summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.gitignore3
-rw-r--r--deps/v8/.vpython32
-rw-r--r--deps/v8/.ycm_extra_conf.py2
-rw-r--r--deps/v8/AUTHORS9
-rw-r--r--deps/v8/BUILD.gn321
-rw-r--r--deps/v8/ChangeLog1970
-rw-r--r--deps/v8/DEPS79
-rw-r--r--deps/v8/OWNERS3
-rw-r--r--deps/v8/PRESUBMIT.py2
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h2
-rw-r--r--deps/v8/gni/isolate.gni7
-rw-r--r--deps/v8/gni/v8.gni10
-rw-r--r--deps/v8/gypfiles/all.gyp2
-rw-r--r--deps/v8/gypfiles/features.gypi2
-rw-r--r--deps/v8/gypfiles/isolate.gypi1
-rw-r--r--deps/v8/gypfiles/standalone.gypi6
-rw-r--r--deps/v8/include/libplatform/libplatform.h29
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h15
-rw-r--r--deps/v8/include/v8-inspector.h19
-rw-r--r--deps/v8/include/v8-platform.h15
-rw-r--r--deps/v8/include/v8-profiler.h10
-rw-r--r--deps/v8/include/v8-util.h7
-rw-r--r--deps/v8/include/v8-version-string.h7
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h307
-rw-r--r--deps/v8/infra/config/cq.cfg30
-rw-r--r--deps/v8/infra/mb/mb_config.pyl139
-rw-r--r--deps/v8/infra/testing/OWNERS4
-rw-r--r--deps/v8/infra/testing/README.md50
-rw-r--r--deps/v8/infra/testing/client.v8.pyl13
-rw-r--r--deps/v8/infra/testing/tryserver.v8.pyl10
-rw-r--r--deps/v8/samples/hello-world.cc5
-rw-r--r--deps/v8/samples/process.cc6
-rw-r--r--deps/v8/samples/shell.cc9
-rw-r--r--deps/v8/snapshot_toolchain.gni12
-rw-r--r--deps/v8/src/OWNERS3
-rw-r--r--deps/v8/src/accessors.cc345
-rw-r--r--deps/v8/src/accessors.h109
-rw-r--r--deps/v8/src/address-map.cc2
-rw-r--r--deps/v8/src/allocation.cc173
-rw-r--r--deps/v8/src/allocation.h39
-rw-r--r--deps/v8/src/api-arguments.h10
-rw-r--r--deps/v8/src/api-natives.cc27
-rw-r--r--deps/v8/src/api.cc754
-rw-r--r--deps/v8/src/api.h44
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h17
-rw-r--r--deps/v8/src/arm/assembler-arm.cc134
-rw-r--r--deps/v8/src/arm/assembler-arm.h18
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc733
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h220
-rw-r--r--deps/v8/src/arm/codegen-arm.cc138
-rw-r--r--deps/v8/src/arm/codegen-arm.h33
-rw-r--r--deps/v8/src/arm/constants-arm.cc17
-rw-r--r--deps/v8/src/arm/constants-arm.h5
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc7
-rw-r--r--deps/v8/src/arm/disasm-arm.cc12
-rw-r--r--deps/v8/src/arm/frame-constants-arm.cc5
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc20
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc280
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h81
-rw-r--r--deps/v8/src/arm/simulator-arm.cc286
-rw-r--r--deps/v8/src/arm/simulator-arm.h20
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h23
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc74
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h63
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc738
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h232
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc96
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h32
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc4
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h18
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc252
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc64
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.cc8
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc6
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h6
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc11
-rw-r--r--deps/v8/src/arm64/instrument-arm64.h5
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc20
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h36
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc514
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h178
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc62
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h11
-rw-r--r--deps/v8/src/arm64/simulator-logic-arm64.cc2
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc6
-rw-r--r--deps/v8/src/asmjs/asm-js.cc125
-rw-r--r--deps/v8/src/asmjs/asm-js.h3
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc42
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc6
-rw-r--r--deps/v8/src/asmjs/asm-types.cc6
-rw-r--r--deps/v8/src/asmjs/asm-types.h4
-rw-r--r--deps/v8/src/assembler.cc126
-rw-r--r--deps/v8/src/assembler.h96
-rw-r--r--deps/v8/src/assert-scope.cc6
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc410
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.h53
-rw-r--r--deps/v8/src/ast/ast-numbering.cc178
-rw-r--r--deps/v8/src/ast/ast-numbering.h9
-rw-r--r--deps/v8/src/ast/ast-source-ranges.h42
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h35
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc167
-rw-r--r--deps/v8/src/ast/ast-value-factory.h256
-rw-r--r--deps/v8/src/ast/ast.cc479
-rw-r--r--deps/v8/src/ast/ast.h923
-rw-r--r--deps/v8/src/ast/context-slot-cache.h4
-rw-r--r--deps/v8/src/ast/modules.h17
-rw-r--r--deps/v8/src/ast/prettyprinter.cc283
-rw-r--r--deps/v8/src/ast/prettyprinter.h16
-rw-r--r--deps/v8/src/ast/scopes.cc78
-rw-r--r--deps/v8/src/ast/scopes.h29
-rw-r--r--deps/v8/src/ast/variables.h4
-rw-r--r--deps/v8/src/bailout-reason.cc2
-rw-r--r--deps/v8/src/bailout-reason.h4
-rw-r--r--deps/v8/src/base.isolate4
-rw-r--r--deps/v8/src/base/bits.cc4
-rw-r--r--deps/v8/src/base/bits.h187
-rw-r--r--deps/v8/src/base/cpu.cc84
-rw-r--r--deps/v8/src/base/debug/stack_trace.cc2
-rw-r--r--deps/v8/src/base/debug/stack_trace_android.cc2
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc28
-rw-r--r--deps/v8/src/base/debug/stack_trace_win.cc16
-rw-r--r--deps/v8/src/base/division-by-constant.cc2
-rw-r--r--deps/v8/src/base/logging.h21
-rw-r--r--deps/v8/src/base/once.h3
-rw-r--r--deps/v8/src/base/platform/OWNERS6
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc2
-rw-r--r--deps/v8/src/base/platform/elapsed-timer.h4
-rw-r--r--deps/v8/src/base/platform/mutex.cc2
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc106
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc180
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc104
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc104
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc116
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc113
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc111
-rw-r--r--deps/v8/src/base/platform/platform-posix-time.cc2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc350
-rw-r--r--deps/v8/src/base/platform/platform-posix.h2
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc105
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc104
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc366
-rw-r--r--deps/v8/src/base/platform/platform.h92
-rw-r--r--deps/v8/src/base/platform/semaphore.cc8
-rw-r--r--deps/v8/src/base/platform/time.cc18
-rw-r--r--deps/v8/src/base/platform/time.h2
-rw-r--r--deps/v8/src/base/sys-info.cc8
-rw-r--r--deps/v8/src/base/template-utils.h43
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc87
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h20
-rw-r--r--deps/v8/src/bignum-dtoa.cc20
-rw-r--r--deps/v8/src/bignum.cc30
-rw-r--r--deps/v8/src/bootstrapper.cc725
-rw-r--r--deps/v8/src/bootstrapper.h2
-rw-r--r--deps/v8/src/boxed-float.h60
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc119
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc926
-rw-r--r--deps/v8/src/builtins/builtins-api.cc3
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc678
-rw-r--r--deps/v8/src/builtins/builtins-array.cc79
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc56
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc13
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc63
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc13
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc1245
-rw-r--r--deps/v8/src/builtins/builtins-console.cc66
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc191
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h5
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc37
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-date.cc2
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h57
-rw-r--r--deps/v8/src/builtins/builtins-error.cc13
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-function.cc2
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc49
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc61
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc2
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc43
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.h36
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc316
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc307
-rw-r--r--deps/v8/src/builtins/builtins-object.cc42
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc161
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h14
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-reflect-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc37
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc367
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc802
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h19
-rw-r--r--deps/v8/src/builtins/builtins-string.cc61
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc47
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc6
-rw-r--r--deps/v8/src/builtins/builtins-utils.h6
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins.cc38
-rw-r--r--deps/v8/src/builtins/builtins.h2
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc158
-rw-r--r--deps/v8/src/builtins/mips/OWNERS5
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc76
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS5
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc84
-rw-r--r--deps/v8/src/builtins/ppc/OWNERS3
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc81
-rw-r--r--deps/v8/src/builtins/s390/OWNERS3
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc82
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc95
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc150
-rw-r--r--deps/v8/src/cached-powers.cc2
-rw-r--r--deps/v8/src/code-events.h6
-rw-r--r--deps/v8/src/code-factory.cc75
-rw-r--r--deps/v8/src/code-factory.h11
-rw-r--r--deps/v8/src/code-stub-assembler.cc3337
-rw-r--r--deps/v8/src/code-stub-assembler.h445
-rw-r--r--deps/v8/src/code-stubs.cc80
-rw-r--r--deps/v8/src/code-stubs.h143
-rw-r--r--deps/v8/src/codegen.cc205
-rw-r--r--deps/v8/src/codegen.h75
-rw-r--r--deps/v8/src/collector.h10
-rw-r--r--deps/v8/src/compilation-cache.cc8
-rw-r--r--deps/v8/src/compilation-cache.h2
-rw-r--r--deps/v8/src/compilation-dependencies.cc13
-rw-r--r--deps/v8/src/compilation-dependencies.h3
-rw-r--r--deps/v8/src/compilation-info.cc45
-rw-r--r--deps/v8/src/compilation-info.h73
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h65
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc115
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h42
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc35
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc17
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc278
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h71
-rw-r--r--deps/v8/src/compiler.cc720
-rw-r--r--deps/v8/src/compiler.h56
-rw-r--r--deps/v8/src/compiler/access-builder.cc14
-rw-r--r--deps/v8/src/compiler/access-builder.h3
-rw-r--r--deps/v8/src/compiler/access-info.cc18
-rw-r--r--deps/v8/src/compiler/all-nodes.h4
-rw-r--r--deps/v8/src/compiler/allocation-builder.h98
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc113
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc18
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc153
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc43
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc5
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.h3
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc4
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc198
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h3
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc107
-rw-r--r--deps/v8/src/compiler/code-assembler.h497
-rw-r--r--deps/v8/src/compiler/code-generator.cc123
-rw-r--r--deps/v8/src/compiler/code-generator.h26
-rw-r--r--deps/v8/src/compiler/common-operator.cc15
-rw-r--r--deps/v8/src/compiler/common-operator.h5
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc172
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h26
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc691
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h24
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc12
-rw-r--r--deps/v8/src/compiler/frame-states.cc6
-rw-r--r--deps/v8/src/compiler/frame-states.h2
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc4
-rw-r--r--deps/v8/src/compiler/graph-assembler.h5
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc562
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h76
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc74
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc50
-rw-r--r--deps/v8/src/compiler/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc2
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc74
-rw-r--r--deps/v8/src/compiler/instruction-selector.h4
-rw-r--r--deps/v8/src/compiler/instruction.h3
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc102
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc507
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h5
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc1001
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h32
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc7
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc130
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc50
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-graph.cc3
-rw-r--r--deps/v8/src/compiler/js-graph.h7
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc21
-rw-r--r--deps/v8/src/compiler/js-inlining.cc62
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc29
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc554
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h38
-rw-r--r--deps/v8/src/compiler/js-operator.cc100
-rw-r--r--deps/v8/src/compiler/js-operator.h49
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc78
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h5
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc387
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h11
-rw-r--r--deps/v8/src/compiler/linkage.cc29
-rw-r--r--deps/v8/src/compiler/linkage.h23
-rw-r--r--deps/v8/src/compiler/load-elimination.cc95
-rw-r--r--deps/v8/src/compiler/load-elimination.h9
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc9
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc1
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc11
-rw-r--r--deps/v8/src/compiler/machine-operator.h4
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc23
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h2
-rw-r--r--deps/v8/src/compiler/mips/OWNERS5
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc68
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc8
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS5
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc81
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc16
-rw-r--r--deps/v8/src/compiler/node-properties.cc111
-rw-r--r--deps/v8/src/compiler/node-properties.h12
-rw-r--r--deps/v8/src/compiler/opcodes.h74
-rw-r--r--deps/v8/src/compiler/operation-typer.cc220
-rw-r--r--deps/v8/src/compiler/operation-typer.h12
-rw-r--r--deps/v8/src/compiler/operator-properties.cc9
-rw-r--r--deps/v8/src/compiler/operator.cc4
-rw-r--r--deps/v8/src/compiler/operator.h4
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc4
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h3
-rw-r--r--deps/v8/src/compiler/pipeline.cc512
-rw-r--r--deps/v8/src/compiler/pipeline.h26
-rw-r--r--deps/v8/src/compiler/ppc/OWNERS3
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc144
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc23
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc17
-rw-r--r--deps/v8/src/compiler/property-access-builder.h2
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc2
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h1
-rw-r--r--deps/v8/src/compiler/representation-change.cc27
-rw-r--r--deps/v8/src/compiler/s390/OWNERS3
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc176
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h1
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc1
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc40
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc227
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h13
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc351
-rw-r--r--deps/v8/src/compiler/simplified-operator.h75
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc9
-rw-r--r--deps/v8/src/compiler/type-cache.h1
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc170
-rw-r--r--deps/v8/src/compiler/typed-optimization.h6
-rw-r--r--deps/v8/src/compiler/typer.cc270
-rw-r--r--deps/v8/src/compiler/types.cc59
-rw-r--r--deps/v8/src/compiler/types.h20
-rw-r--r--deps/v8/src/compiler/verifier.cc171
-rw-r--r--deps/v8/src/compiler/verifier.h4
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc982
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h161
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc30
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc77
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc6
-rw-r--r--deps/v8/src/contexts-inl.h3
-rw-r--r--deps/v8/src/contexts.cc7
-rw-r--r--deps/v8/src/contexts.h7
-rw-r--r--deps/v8/src/conversions-inl.h15
-rw-r--r--deps/v8/src/conversions.cc200
-rw-r--r--deps/v8/src/conversions.h15
-rw-r--r--deps/v8/src/counters-inl.h8
-rw-r--r--deps/v8/src/counters.cc36
-rw-r--r--deps/v8/src/counters.h222
-rw-r--r--deps/v8/src/d8-console.cc64
-rw-r--r--deps/v8/src/d8-console.h6
-rw-r--r--deps/v8/src/d8-posix.cc49
-rw-r--r--deps/v8/src/d8.cc406
-rw-r--r--deps/v8/src/d8.h44
-rw-r--r--deps/v8/src/date.cc26
-rw-r--r--deps/v8/src/date.h4
-rw-r--r--deps/v8/src/dateparser-inl.h2
-rw-r--r--deps/v8/src/dateparser.cc2
-rw-r--r--deps/v8/src/debug/OWNERS2
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc2
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc3
-rw-r--r--deps/v8/src/debug/debug-coverage.cc50
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc232
-rw-r--r--deps/v8/src/debug/debug-evaluate.h6
-rw-r--r--deps/v8/src/debug/debug-frames.cc45
-rw-r--r--deps/v8/src/debug/debug-frames.h4
-rw-r--r--deps/v8/src/debug/debug-interface.h19
-rw-r--r--deps/v8/src/debug/debug-scopes.cc4
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc35
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.h2
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc67
-rw-r--r--deps/v8/src/debug/debug.cc126
-rw-r--r--deps/v8/src/debug/debug.h6
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc2
-rw-r--r--deps/v8/src/debug/interface-types.h8
-rw-r--r--deps/v8/src/debug/liveedit.cc64
-rw-r--r--deps/v8/src/debug/liveedit.h5
-rw-r--r--deps/v8/src/debug/mips/OWNERS5
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc2
-rw-r--r--deps/v8/src/debug/mips64/OWNERS5
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc2
-rw-r--r--deps/v8/src/debug/mirrors.js39
-rw-r--r--deps/v8/src/debug/ppc/OWNERS3
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc2
-rw-r--r--deps/v8/src/debug/s390/OWNERS3
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc2
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc2
-rw-r--r--deps/v8/src/deoptimize-reason.h6
-rw-r--r--deps/v8/src/deoptimizer.cc540
-rw-r--r--deps/v8/src/deoptimizer.h36
-rw-r--r--deps/v8/src/disassembler.cc21
-rw-r--r--deps/v8/src/disassembler.h2
-rw-r--r--deps/v8/src/diy-fp.h2
-rw-r--r--deps/v8/src/double.h8
-rw-r--r--deps/v8/src/eh-frame.cc24
-rw-r--r--deps/v8/src/elements.cc270
-rw-r--r--deps/v8/src/elements.h6
-rw-r--r--deps/v8/src/execution.cc25
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc4
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc2
-rw-r--r--deps/v8/src/external-reference-table.cc22
-rw-r--r--deps/v8/src/factory-inl.h10
-rw-r--r--deps/v8/src/factory.cc720
-rw-r--r--deps/v8/src/factory.h156
-rw-r--r--deps/v8/src/fast-dtoa.cc10
-rw-r--r--deps/v8/src/feedback-vector-inl.h13
-rw-r--r--deps/v8/src/feedback-vector.cc150
-rw-r--r--deps/v8/src/feedback-vector.h70
-rw-r--r--deps/v8/src/field-index-inl.h46
-rw-r--r--deps/v8/src/field-index.h75
-rw-r--r--deps/v8/src/fixed-dtoa.cc6
-rw-r--r--deps/v8/src/flag-definitions.h162
-rw-r--r--deps/v8/src/flags.cc45
-rw-r--r--deps/v8/src/frame-constants.h11
-rw-r--r--deps/v8/src/frames-inl.h5
-rw-r--r--deps/v8/src/frames.cc502
-rw-r--r--deps/v8/src/frames.h40
-rw-r--r--deps/v8/src/futex-emulation.cc2
-rw-r--r--deps/v8/src/gdb-jit.cc69
-rw-r--r--deps/v8/src/global-handles.cc179
-rw-r--r--deps/v8/src/global-handles.h17
-rw-r--r--deps/v8/src/globals.h121
-rw-r--r--deps/v8/src/handles-inl.h4
-rw-r--r--deps/v8/src/handles.cc6
-rw-r--r--deps/v8/src/handles.h42
-rw-r--r--deps/v8/src/heap-symbols.h400
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc58
-rw-r--r--deps/v8/src/heap/array-buffer-collector.h51
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h23
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc44
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h13
-rw-r--r--deps/v8/src/heap/code-stats.cc6
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc62
-rw-r--r--deps/v8/src/heap/gc-tracer.cc5
-rw-r--r--deps/v8/src/heap/heap-inl.h79
-rw-r--r--deps/v8/src/heap/heap.cc755
-rw-r--r--deps/v8/src/heap/heap.h278
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc2
-rw-r--r--deps/v8/src/heap/incremental-marking.cc333
-rw-r--r--deps/v8/src/heap/incremental-marking.h61
-rw-r--r--deps/v8/src/heap/local-allocator.h11
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h324
-rw-r--r--deps/v8/src/heap/mark-compact.cc649
-rw-r--r--deps/v8/src/heap/mark-compact.h202
-rw-r--r--deps/v8/src/heap/memory-reducer.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc67
-rw-r--r--deps/v8/src/heap/object-stats.h52
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h230
-rw-r--r--deps/v8/src/heap/objects-visiting.cc45
-rw-r--r--deps/v8/src/heap/objects-visiting.h40
-rw-r--r--deps/v8/src/heap/remembered-set.h4
-rw-r--r--deps/v8/src/heap/scavenger-inl.h4
-rw-r--r--deps/v8/src/heap/scavenger.cc28
-rw-r--r--deps/v8/src/heap/scavenger.h28
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc95
-rw-r--r--deps/v8/src/heap/slot-set.h2
-rw-r--r--deps/v8/src/heap/spaces-inl.h12
-rw-r--r--deps/v8/src/heap/spaces.cc372
-rw-r--r--deps/v8/src/heap/spaces.h69
-rw-r--r--deps/v8/src/heap/store-buffer.cc8
-rw-r--r--deps/v8/src/heap/sweeper.cc498
-rw-r--r--deps/v8/src/heap/sweeper.h167
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h24
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc60
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h60
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc786
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h306
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc146
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h34
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc1
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc22
-rw-r--r--deps/v8/src/ia32/frame-constants-ia32.cc5
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc36
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc281
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h156
-rw-r--r--deps/v8/src/ic/access-compiler-data.h50
-rw-r--r--deps/v8/src/ic/access-compiler.cc57
-rw-r--r--deps/v8/src/ic/access-compiler.h62
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc664
-rw-r--r--deps/v8/src/ic/accessor-assembler.h12
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc43
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc434
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc50
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc431
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc94
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h7
-rw-r--r--deps/v8/src/ic/call-optimization.cc15
-rw-r--r--deps/v8/src/ic/call-optimization.h5
-rw-r--r--deps/v8/src/ic/handler-compiler.cc154
-rw-r--r--deps/v8/src/ic/handler-compiler.h191
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h106
-rw-r--r--deps/v8/src/ic/handler-configuration.cc188
-rw-r--r--deps/v8/src/ic/handler-configuration.h112
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc40
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc449
-rw-r--r--deps/v8/src/ic/ic-inl.h8
-rw-r--r--deps/v8/src/ic/ic.cc588
-rw-r--r--deps/v8/src/ic/ic.h57
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc62
-rw-r--r--deps/v8/src/ic/mips/OWNERS3
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc41
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc413
-rw-r--r--deps/v8/src/ic/mips64/OWNERS3
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc41
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc413
-rw-r--r--deps/v8/src/ic/ppc/OWNERS6
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc41
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc429
-rw-r--r--deps/v8/src/ic/s390/OWNERS6
-rw-r--r--deps/v8/src/ic/s390/access-compiler-s390.cc42
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc412
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc41
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc421
-rw-r--r--deps/v8/src/icu_util.cc6
-rw-r--r--deps/v8/src/inspector/injected-script-source.js30
-rw-r--r--deps/v8/src/inspector/injected-script.cc52
-rw-r--r--deps/v8/src/inspector/injected-script.h9
-rw-r--r--deps/v8/src/inspector/injected_script_externs.js7
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json2
-rw-r--r--deps/v8/src/inspector/js_protocol-1.3.json1205
-rw-r--r--deps/v8/src/inspector/js_protocol.json137
-rw-r--r--deps/v8/src/inspector/string-16.cc24
-rw-r--r--deps/v8/src/inspector/string-16.h1
-rw-r--r--deps/v8/src/inspector/string-util.cc28
-rw-r--r--deps/v8/src/inspector/string-util.h4
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc10
-rw-r--r--deps/v8/src/inspector/v8-console.cc18
-rw-r--r--deps/v8/src/inspector/v8-console.h4
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc132
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h16
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc221
-rw-r--r--deps/v8/src/inspector/v8-debugger.h44
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc19
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc37
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.h2
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc13
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc122
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h23
-rw-r--r--deps/v8/src/inspector/v8-value-utils.cc3
-rw-r--r--deps/v8/src/interface-descriptors.cc12
-rw-r--r--deps/v8/src/interface-descriptors.h36
-rw-r--r--deps/v8/src/interpreter/block-coverage-builder.h13
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc8
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc168
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h36
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc10
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc6
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc1070
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h78
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h10
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc4
-rw-r--r--deps/v8/src/interpreter/bytecodes.h56
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc94
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h29
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc27
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h22
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc443
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h31
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc1114
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc43
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h2
-rw-r--r--deps/v8/src/interpreter/interpreter.cc144
-rw-r--r--deps/v8/src/interpreter/interpreter.h24
-rw-r--r--deps/v8/src/interpreter/setup-interpreter-internal.cc38
-rw-r--r--deps/v8/src/interpreter/setup-interpreter.h4
-rw-r--r--deps/v8/src/isolate-inl.h2
-rw-r--r--deps/v8/src/isolate.cc608
-rw-r--r--deps/v8/src/isolate.h129
-rw-r--r--deps/v8/src/js/array.js39
-rw-r--r--deps/v8/src/js/intl.js75
-rw-r--r--deps/v8/src/js/macros.py37
-rw-r--r--deps/v8/src/js/string.js151
-rw-r--r--deps/v8/src/js/typedarray.js57
-rw-r--r--deps/v8/src/js/v8natives.js2
-rw-r--r--deps/v8/src/js/weak-collection.js127
-rw-r--r--deps/v8/src/json-parser.cc28
-rw-r--r--deps/v8/src/json-parser.h3
-rw-r--r--deps/v8/src/json-stringifier.cc42
-rw-r--r--deps/v8/src/keys.cc8
-rw-r--r--deps/v8/src/label.h36
-rw-r--r--deps/v8/src/layout-descriptor-inl.h6
-rw-r--r--deps/v8/src/layout-descriptor.cc11
-rw-r--r--deps/v8/src/libplatform/default-background-task-runner.cc59
-rw-r--r--deps/v8/src/libplatform/default-background-task-runner.h45
-rw-r--r--deps/v8/src/libplatform/default-foreground-task-runner.cc115
-rw-r--r--deps/v8/src/libplatform/default-foreground-task-runner.h84
-rw-r--r--deps/v8/src/libplatform/default-platform.cc250
-rw-r--r--deps/v8/src/libplatform/default-platform.h47
-rw-r--r--deps/v8/src/libplatform/task-queue.cc13
-rw-r--r--deps/v8/src/libplatform/task-queue.h8
-rw-r--r--deps/v8/src/libplatform/tracing/trace-buffer.cc4
-rw-r--r--deps/v8/src/libplatform/tracing/trace-config.cc2
-rw-r--r--deps/v8/src/libplatform/tracing/trace-object.cc16
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.cc2
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc35
-rw-r--r--deps/v8/src/libplatform/worker-thread.cc3
-rw-r--r--deps/v8/src/libsampler/sampler.cc4
-rw-r--r--deps/v8/src/locked-queue-inl.h4
-rw-r--r--deps/v8/src/log-utils.cc322
-rw-r--r--deps/v8/src/log-utils.h88
-rw-r--r--deps/v8/src/log.cc794
-rw-r--r--deps/v8/src/log.h79
-rw-r--r--deps/v8/src/lookup-cache-inl.h2
-rw-r--r--deps/v8/src/lookup-cache.cc2
-rw-r--r--deps/v8/src/lookup-cache.h4
-rw-r--r--deps/v8/src/lookup.cc24
-rw-r--r--deps/v8/src/machine-type.h95
-rw-r--r--deps/v8/src/map-updater.cc12
-rw-r--r--deps/v8/src/messages.cc40
-rw-r--r--deps/v8/src/messages.h18
-rw-r--r--deps/v8/src/mips/OWNERS5
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h12
-rw-r--r--deps/v8/src/mips/assembler-mips.cc63
-rw-r--r--deps/v8/src/mips/assembler-mips.h71
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc737
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h252
-rw-r--r--deps/v8/src/mips/codegen-mips.cc134
-rw-r--r--deps/v8/src/mips/codegen-mips.h33
-rw-r--r--deps/v8/src/mips/constants-mips.cc18
-rw-r--r--deps/v8/src/mips/constants-mips.h40
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc3
-rw-r--r--deps/v8/src/mips/disasm-mips.cc10
-rw-r--r--deps/v8/src/mips/frame-constants-mips.cc5
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc34
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc284
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h119
-rw-r--r--deps/v8/src/mips/simulator-mips.cc1059
-rw-r--r--deps/v8/src/mips64/OWNERS5
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h11
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc261
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h74
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc738
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h257
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc133
-rw-r--r--deps/v8/src/mips64/codegen-mips64.h33
-rw-r--r--deps/v8/src/mips64/constants-mips64.cc18
-rw-r--r--deps/v8/src/mips64/constants-mips64.h44
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc5
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc10
-rw-r--r--deps/v8/src/mips64/frame-constants-mips64.cc5
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc34
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc310
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h133
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc998
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h27
-rw-r--r--deps/v8/src/objects-debug.cc113
-rw-r--r--deps/v8/src/objects-inl.h1611
-rw-r--r--deps/v8/src/objects-printer.cc271
-rw-r--r--deps/v8/src/objects.cc1285
-rw-r--r--deps/v8/src/objects.h1798
-rw-r--r--deps/v8/src/objects/arguments-inl.h27
-rw-r--r--deps/v8/src/objects/arguments.h4
-rw-r--r--deps/v8/src/objects/bigint-inl.h56
-rw-r--r--deps/v8/src/objects/bigint.cc1381
-rw-r--r--deps/v8/src/objects/bigint.h237
-rw-r--r--deps/v8/src/objects/code-inl.h714
-rw-r--r--deps/v8/src/objects/code.h947
-rw-r--r--deps/v8/src/objects/compilation-cache-inl.h2
-rw-r--r--deps/v8/src/objects/compilation-cache.h1
-rw-r--r--deps/v8/src/objects/debug-objects.cc4
-rw-r--r--deps/v8/src/objects/debug-objects.h2
-rw-r--r--deps/v8/src/objects/descriptor-array.h19
-rw-r--r--deps/v8/src/objects/dictionary.h70
-rw-r--r--deps/v8/src/objects/frame-array.h5
-rw-r--r--deps/v8/src/objects/hash-table-inl.h9
-rw-r--r--deps/v8/src/objects/hash-table.h33
-rw-r--r--deps/v8/src/objects/intl-objects.cc164
-rw-r--r--deps/v8/src/objects/js-array-inl.h243
-rw-r--r--deps/v8/src/objects/js-array.h357
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h84
-rw-r--r--deps/v8/src/objects/js-regexp.h164
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h51
-rw-r--r--deps/v8/src/objects/literal-objects.cc532
-rw-r--r--deps/v8/src/objects/literal-objects.h75
-rw-r--r--deps/v8/src/objects/map.h250
-rw-r--r--deps/v8/src/objects/module-inl.h1
-rw-r--r--deps/v8/src/objects/module.cc4
-rw-r--r--deps/v8/src/objects/module.h8
-rw-r--r--deps/v8/src/objects/name-inl.h2
-rw-r--r--deps/v8/src/objects/name.h7
-rw-r--r--deps/v8/src/objects/object-macros-undef.h2
-rw-r--r--deps/v8/src/objects/object-macros.h30
-rw-r--r--deps/v8/src/objects/scope-info.cc16
-rw-r--r--deps/v8/src/objects/scope-info.h4
-rw-r--r--deps/v8/src/objects/script-inl.h4
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h49
-rw-r--r--deps/v8/src/objects/shared-function-info.h58
-rw-r--r--deps/v8/src/objects/string-inl.h56
-rw-r--r--deps/v8/src/objects/string-table.h5
-rw-r--r--deps/v8/src/objects/string.h26
-rw-r--r--deps/v8/src/objects/template-objects.cc7
-rw-r--r--deps/v8/src/parsing/background-parsing-task.cc (renamed from deps/v8/src/background-parsing-task.cc)29
-rw-r--r--deps/v8/src/parsing/background-parsing-task.h (renamed from deps/v8/src/background-parsing-task.h)16
-rw-r--r--deps/v8/src/parsing/expression-classifier.h2
-rw-r--r--deps/v8/src/parsing/expression-scope-reparenter.cc12
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc2
-rw-r--r--deps/v8/src/parsing/parse-info.cc29
-rw-r--r--deps/v8/src/parsing/parse-info.h32
-rw-r--r--deps/v8/src/parsing/parser-base.h402
-rw-r--r--deps/v8/src/parsing/parser.cc578
-rw-r--r--deps/v8/src/parsing/parser.h163
-rw-r--r--deps/v8/src/parsing/parsing.cc6
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc10
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc98
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h18
-rw-r--r--deps/v8/src/parsing/preparser.cc42
-rw-r--r--deps/v8/src/parsing/preparser.h155
-rw-r--r--deps/v8/src/parsing/rewriter.cc11
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc8
-rw-r--r--deps/v8/src/parsing/scanner.cc59
-rw-r--r--deps/v8/src/parsing/scanner.h30
-rw-r--r--deps/v8/src/parsing/token.h397
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc98
-rw-r--r--deps/v8/src/pending-compilation-error-handler.h91
-rw-r--r--deps/v8/src/perf-jit.cc9
-rw-r--r--deps/v8/src/ppc/OWNERS3
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h20
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc37
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h20
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc710
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h242
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc117
-rw-r--r--deps/v8/src/ppc/codegen-ppc.h28
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc3
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc14
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.cc5
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc18
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc401
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h96
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc130
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc8
-rw-r--r--deps/v8/src/profiler/circular-queue-inl.h4
-rw-r--r--deps/v8/src/profiler/circular-queue.h7
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h6
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc64
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h2
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc18
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator-inl.h2
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc232
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h21
-rw-r--r--deps/v8/src/profiler/profile-generator.cc26
-rw-r--r--deps/v8/src/profiler/profile-generator.h4
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc66
-rw-r--r--deps/v8/src/profiler/profiler-listener.h8
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc5
-rw-r--r--deps/v8/src/profiler/strings-storage.cc6
-rw-r--r--deps/v8/src/profiler/tick-sample.cc2
-rw-r--r--deps/v8/src/profiler/unbound-queue-inl.h8
-rw-r--r--deps/v8/src/property-details.h14
-rw-r--r--deps/v8/src/prototype.h6
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc8
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc20
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h2
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc8
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h2
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h6
-rw-r--r--deps/v8/src/regexp/jsregexp.cc479
-rw-r--r--deps/v8/src/regexp/jsregexp.h153
-rw-r--r--deps/v8/src/regexp/mips/OWNERS5
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc10
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h2
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS5
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc10
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h2
-rw-r--r--deps/v8/src/regexp/ppc/OWNERS3
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc8
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h2
-rw-r--r--deps/v8/src/regexp/regexp-ast.cc30
-rw-r--r--deps/v8/src/regexp/regexp-ast.h55
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.h15
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h8
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc362
-rw-r--r--deps/v8/src/regexp/regexp-parser.h103
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc8
-rw-r--r--deps/v8/src/regexp/regexp-stack.h4
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc33
-rw-r--r--deps/v8/src/regexp/regexp-utils.h6
-rw-r--r--deps/v8/src/regexp/s390/OWNERS3
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc8
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h2
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc10
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h2
-rw-r--r--deps/v8/src/register-configuration.cc5
-rw-r--r--deps/v8/src/reglist.h25
-rw-r--r--deps/v8/src/runtime-profiler.cc5
-rw-r--r--deps/v8/src/runtime/runtime-array.cc54
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc117
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc565
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc28
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc60
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc69
-rw-r--r--deps/v8/src/runtime/runtime-function.cc21
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc11
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc57
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc49
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc5
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc1
-rw-r--r--deps/v8/src/runtime/runtime-module.cc7
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc31
-rw-r--r--deps/v8/src/runtime/runtime-object.cc73
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc12
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc5
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc113
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc37
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc98
-rw-r--r--deps/v8/src/runtime/runtime-test.cc238
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc10
-rw-r--r--deps/v8/src/runtime/runtime-utils.h6
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc124
-rw-r--r--deps/v8/src/runtime/runtime.cc4
-rw-r--r--deps/v8/src/runtime/runtime.h118
-rw-r--r--deps/v8/src/s390/OWNERS3
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h18
-rw-r--r--deps/v8/src/s390/assembler-s390.cc29
-rw-r--r--deps/v8/src/s390/assembler-s390.h20
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc683
-rw-r--r--deps/v8/src/s390/code-stubs-s390.h367
-rw-r--r--deps/v8/src/s390/codegen-s390.cc119
-rw-r--r--deps/v8/src/s390/codegen-s390.h30
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc5
-rw-r--r--deps/v8/src/s390/disasm-s390.cc10
-rw-r--r--deps/v8/src/s390/frame-constants-s390.cc5
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc18
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc388
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h118
-rw-r--r--deps/v8/src/s390/simulator-s390.cc135
-rw-r--r--deps/v8/src/safepoint-table.cc39
-rw-r--r--deps/v8/src/safepoint-table.h16
-rw-r--r--deps/v8/src/setup-isolate-deserialize.cc6
-rw-r--r--deps/v8/src/setup-isolate-full.cc6
-rw-r--r--deps/v8/src/snapshot/OWNERS1
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.cc289
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.h132
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc254
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.h82
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.cc67
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.h52
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.cc66
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.h33
-rw-r--r--deps/v8/src/snapshot/builtin-snapshot-utils.cc67
-rw-r--r--deps/v8/src/snapshot/builtin-snapshot-utils.h56
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc12
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.cc246
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.h102
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.cc7
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.h1
-rw-r--r--deps/v8/src/snapshot/deserializer.cc430
-rw-r--r--deps/v8/src/snapshot/deserializer.h94
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc20
-rw-r--r--deps/v8/src/snapshot/natives-common.cc2
-rw-r--r--deps/v8/src/snapshot/natives-external.cc14
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc10
-rw-r--r--deps/v8/src/snapshot/object-deserializer.h2
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc20
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.h5
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc20
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h3
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc10
-rw-r--r--deps/v8/src/snapshot/serializer-common.h11
-rw-r--r--deps/v8/src/snapshot/serializer.cc119
-rw-r--r--deps/v8/src/snapshot/serializer.h19
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc48
-rw-r--r--deps/v8/src/snapshot/snapshot-empty.cc3
-rw-r--r--deps/v8/src/snapshot/snapshot-external.cc4
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc2
-rw-r--r--deps/v8/src/snapshot/snapshot.h6
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc18
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.h4
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc70
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h11
-rw-r--r--deps/v8/src/source-position-table.cc21
-rw-r--r--deps/v8/src/source-position-table.h13
-rw-r--r--deps/v8/src/source-position.cc8
-rw-r--r--deps/v8/src/source-position.h9
-rw-r--r--deps/v8/src/splay-tree-inl.h30
-rw-r--r--deps/v8/src/splay-tree.h13
-rw-r--r--deps/v8/src/string-builder.h16
-rw-r--r--deps/v8/src/string-case.cc2
-rw-r--r--deps/v8/src/string-hasher-inl.h4
-rw-r--r--deps/v8/src/string-search.h8
-rw-r--r--deps/v8/src/string-stream.cc10
-rw-r--r--deps/v8/src/strtod.cc22
-rw-r--r--deps/v8/src/tracing/trace-event.h97
-rw-r--r--deps/v8/src/transitions-inl.h12
-rw-r--r--deps/v8/src/transitions.cc21
-rw-r--r--deps/v8/src/transitions.h27
-rw-r--r--deps/v8/src/trap-handler/handler-inside.cc14
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc13
-rw-r--r--deps/v8/src/trap-handler/handler-shared.cc17
-rw-r--r--deps/v8/src/trap-handler/trap-handler-internal.h7
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h5
-rw-r--r--deps/v8/src/type-hints.cc87
-rw-r--r--deps/v8/src/type-hints.h27
-rw-r--r--deps/v8/src/unicode-decoder.cc4
-rw-r--r--deps/v8/src/unicode-decoder.h10
-rw-r--r--deps/v8/src/uri.cc4
-rw-r--r--deps/v8/src/utils.cc27
-rw-r--r--deps/v8/src/utils.h70
-rw-r--r--deps/v8/src/v8.cc7
-rw-r--r--deps/v8/src/v8.gyp104
-rw-r--r--deps/v8/src/v8threads.cc47
-rw-r--r--deps/v8/src/v8threads.h2
-rw-r--r--deps/v8/src/value-serializer.cc125
-rw-r--r--deps/v8/src/value-serializer.h6
-rw-r--r--deps/v8/src/vector.h33
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/visitors.cc2
-rw-r--r--deps/v8/src/visitors.h39
-rw-r--r--deps/v8/src/wasm/baseline/DEPS5
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h65
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h65
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h177
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc389
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h378
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc550
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h65
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h65
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h65
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h65
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h190
-rw-r--r--deps/v8/src/wasm/decoder.h76
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h679
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc298
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h7
-rw-r--r--deps/v8/src/wasm/module-compiler.cc2138
-rw-r--r--deps/v8/src/wasm/module-compiler.h35
-rw-r--r--deps/v8/src/wasm/module-decoder.cc33
-rw-r--r--deps/v8/src/wasm/module-decoder.h2
-rw-r--r--deps/v8/src/wasm/signature-map.cc5
-rw-r--r--deps/v8/src/wasm/signature-map.h11
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc55
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h28
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc131
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.h11
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.cc38
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.h38
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc149
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc4
-rw-r--r--deps/v8/src/wasm/wasm-heap.cc785
-rw-r--r--deps/v8/src/wasm/wasm-heap.h323
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc833
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h9
-rw-r--r--deps/v8/src/wasm/wasm-js.cc43
-rw-r--r--deps/v8/src/wasm/wasm-limits.h8
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc11
-rw-r--r--deps/v8/src/wasm/wasm-memory.h2
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc2
-rw-r--r--deps/v8/src/wasm/wasm-module.cc143
-rw-r--r--deps/v8/src/wasm/wasm-module.h24
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h36
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc612
-rw-r--r--deps/v8/src/wasm/wasm-objects.h146
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc82
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h6
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc687
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h96
-rw-r--r--deps/v8/src/wasm/wasm-text.cc32
-rw-r--r--deps/v8/src/wasm/wasm-value.h85
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h17
-rw-r--r--deps/v8/src/x64/assembler-x64.cc86
-rw-r--r--deps/v8/src/x64/assembler-x64.h54
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc694
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h300
-rw-r--r--deps/v8/src/x64/codegen-x64.cc155
-rw-r--r--deps/v8/src/x64/codegen-x64.h99
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc3
-rw-r--r--deps/v8/src/x64/disasm-x64.cc17
-rw-r--r--deps/v8/src/x64/frame-constants-x64.cc5
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc22
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc610
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h260
-rw-r--r--deps/v8/src/zone/zone-handle-set.h59
-rw-r--r--deps/v8/src/zone/zone-list-inl.h2
-rw-r--r--deps/v8/src/zone/zone.cc2
-rw-r--r--deps/v8/src/zone/zone.h17
-rw-r--r--deps/v8/test/benchmarks/testcfg.py54
-rw-r--r--deps/v8/test/cctest/BUILD.gn16
-rw-r--r--deps/v8/test/cctest/OWNERS5
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.cc33
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.h28
-rw-r--r--deps/v8/test/cctest/cctest.cc39
-rw-r--r--deps/v8/test/cctest/cctest.gyp17
-rw-r--r--deps/v8/test/cctest/cctest.h35
-rw-r--r--deps/v8/test/cctest/cctest.status13
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h13
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc2
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h9
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc8
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h16
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc996
-rw-r--r--deps/v8/test/cctest/compiler/test-graph-visualizer.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc16
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc16
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc9
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc43
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc121
-rw-r--r--deps/v8/test/cctest/compiler/test-run-variables.cc66
-rw-r--r--deps/v8/test/cctest/compiler/test-run-wasm-machops.cc18
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.cc21
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h501
-rw-r--r--deps/v8/test/cctest/expression-type-collector-macros.h10
-rw-r--r--deps/v8/test/cctest/gay-fixed.cc2
-rw-r--r--deps/v8/test/cctest/gay-precision.cc2
-rw-r--r--deps/v8/test/cctest/gay-shortest.cc2
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h2
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc14
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc18
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc342
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc11
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc86
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h1
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden1050
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden54
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden171
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden337
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden56
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden76
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden112
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden46
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden1047
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden1294
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden4218
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden54
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden84
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden37
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden1051
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden1050
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden7
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc13
-rw-r--r--deps/v8/test/cctest/interpreter/source-position-matcher.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc163
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc10
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc522
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc111
-rw-r--r--deps/v8/test/cctest/log-eq-of-logging-and-traversal.js22
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc329
-rw-r--r--deps/v8/test/cctest/print-extension.cc2
-rw-r--r--deps/v8/test/cctest/profiler-extension.cc2
-rw-r--r--deps/v8/test/cctest/scope-test-helper.h18
-rw-r--r--deps/v8/test/cctest/test-accessors.cc10
-rw-r--r--deps/v8/test/cctest/test-allocation.cc8
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc33
-rw-r--r--deps/v8/test/cctest/test-api.cc1036
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc346
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc416
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc6
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc2630
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc2653
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc26
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc22
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc298
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc346
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc53
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc54
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc34
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc52
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc52
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc40
-rw-r--r--deps/v8/test/cctest/test-compiler.cc39
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc120
-rw-r--r--deps/v8/test/cctest/test-date.cc4
-rw-r--r--deps/v8/test/cctest/test-debug.cc489
-rw-r--r--deps/v8/test/cctest/test-decls.cc42
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc36
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc12
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc2
-rw-r--r--deps/v8/test/cctest/test-diy-fp.cc14
-rw-r--r--deps/v8/test/cctest/test-double.cc2
-rw-r--r--deps/v8/test/cctest/test-elements-kind.cc9
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc4
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc22
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc338
-rw-r--r--deps/v8/test/cctest/test-hashmap.cc12
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc37
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc12
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc18
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc2
-rw-r--r--deps/v8/test/cctest/test-log.cc503
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc32
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc26
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc34
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc575
-rw-r--r--deps/v8/test/cctest/test-modules.cc111
-rw-r--r--deps/v8/test/cctest/test-object.cc3
-rw-r--r--deps/v8/test/cctest/test-parsing.cc3678
-rw-r--r--deps/v8/test/cctest/test-platform-linux.cc53
-rw-r--r--deps/v8/test/cctest/test-platform-win32.cc53
-rw-r--r--deps/v8/test/cctest/test-platform.cc56
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc20
-rw-r--r--deps/v8/test/cctest/test-random-number-generator.cc2
-rw-r--r--deps/v8/test/cctest/test-regexp.cc128
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm.cc4
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc4
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x64.cc4
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc14
-rw-r--r--deps/v8/test/cctest/test-serialize.cc339
-rw-r--r--deps/v8/test/cctest/test-strings.cc25
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm.cc (renamed from deps/v8/test/cctest/test-simulator-arm.cc)85
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm64.cc (renamed from deps/v8/test/cctest/test-simulator-arm64.cc)56
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc38
-rw-r--r--deps/v8/test/cctest/test-threads.cc4
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc43
-rw-r--r--deps/v8/test/cctest/test-types.cc21
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc18
-rw-r--r--deps/v8/test/cctest/test-usecounters.cc59
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc6
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h8
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc13
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc8
-rw-r--r--deps/v8/test/cctest/testcfg.py18
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc42
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc27
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc123
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc104
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc90
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc87
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc53
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc185
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc124
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc25
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-codegen.cc101
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc49
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc8
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc200
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h117
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h10
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h51
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc52
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h13
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-arguments.js60
-rw-r--r--deps/v8/test/debugger/debug/debug-script.js7
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-517592.js2
-rw-r--r--deps/v8/test/debugger/testcfg.py15
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc41
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.h11
-rw-r--r--deps/v8/test/fuzzer/regexp.cc2
-rw-r--r--deps/v8/test/fuzzer/testcfg.py12
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc470
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc91
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h9
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/async-instrumentation-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-await-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt24
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt49
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-for-promise.js27
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt42
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-load-more.js44
-rw-r--r--deps/v8/test/inspector/debugger/change-return-value-expected.txt35
-rw-r--r--deps/v8/test/inspector/debugger/change-return-value.js68
-rw-r--r--deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt18
-rw-r--r--deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js2
-rw-r--r--deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt26
-rw-r--r--deps/v8/test/inspector/debugger/collect-old-async-call-chains.js13
-rw-r--r--deps/v8/test/inspector/debugger/external-stack-trace-expected.txt42
-rw-r--r--deps/v8/test/inspector/debugger/external-stack-trace.js170
-rw-r--r--deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt207
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call.js183
-rw-r--r--deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt395
-rw-r--r--deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js13
-rw-r--r--deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-at-last-line-expected.txt16
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-at-last-line.js28
-rw-r--r--deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt13
-rw-r--r--deps/v8/test/inspector/debugger/step-into-break-on-async-call.js61
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt14
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task.js107
-rw-r--r--deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt106
-rw-r--r--deps/v8/test/inspector/debugger/this-in-arrow-function.js54
-rw-r--r--deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt7
-rw-r--r--deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js48
-rw-r--r--deps/v8/test/inspector/inspector-test.cc104
-rw-r--r--deps/v8/test/inspector/isolate-data.cc59
-rw-r--r--deps/v8/test/inspector/isolate-data.h18
-rw-r--r--deps/v8/test/inspector/protocol-test.js7
-rw-r--r--deps/v8/test/inspector/runtime/await-promise-expected.txt62
-rw-r--r--deps/v8/test/inspector/runtime/console-time-end-format-expected.txt8
-rw-r--r--deps/v8/test/inspector/runtime/console-time-end-format.js52
-rw-r--r--deps/v8/test/inspector/runtime/create-context-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt33
-rw-r--r--deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor.js29
-rw-r--r--deps/v8/test/inspector/testcfg.py20
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt2
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-start-stop.js10
-rw-r--r--deps/v8/test/intl/date-format/invalid-time.js20
-rwxr-xr-xdeps/v8/test/intl/number-format/format-currency.js2
-rw-r--r--deps/v8/test/intl/testcfg.py11
-rw-r--r--deps/v8/test/js-perf-test/Collections/common.js49
-rw-r--r--deps/v8/test/js-perf-test/Collections/map.js116
-rw-r--r--deps/v8/test/js-perf-test/Collections/run.js1
-rw-r--r--deps/v8/test/js-perf-test/Collections/set.js76
-rw-r--r--deps/v8/test/js-perf-test/Collections/weakmap.js36
-rw-r--r--deps/v8/test/js-perf-test/Collections/weakset.js9
-rw-r--r--deps/v8/test/js-perf-test/ExpressionDepth/run.js38
-rw-r--r--deps/v8/test/js-perf-test/Inspector/debugger.js19
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json12
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed.json8
-rw-r--r--deps/v8/test/message/console.out15
-rw-r--r--deps/v8/test/message/fail/arrow-bare-rest-param.js (renamed from deps/v8/test/message/arrow-bare-rest-param.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-bare-rest-param.out (renamed from deps/v8/test/message/arrow-bare-rest-param.out)0
-rw-r--r--deps/v8/test/message/fail/arrow-formal-parameters.js (renamed from deps/v8/test/message/arrow-formal-parameters.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-formal-parameters.out (renamed from deps/v8/test/message/arrow-formal-parameters.out)0
-rw-r--r--deps/v8/test/message/fail/arrow-invalid-rest-2.js (renamed from deps/v8/test/message/arrow-invalid-rest-2.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-invalid-rest-2.out (renamed from deps/v8/test/message/arrow-invalid-rest-2.out)0
-rw-r--r--deps/v8/test/message/fail/arrow-invalid-rest.js (renamed from deps/v8/test/message/arrow-invalid-rest.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-invalid-rest.out (renamed from deps/v8/test/message/arrow-invalid-rest.out)0
-rw-r--r--deps/v8/test/message/fail/arrow-missing.js (renamed from deps/v8/test/message/arrow-missing.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-missing.out (renamed from deps/v8/test/message/arrow-missing.out)0
-rw-r--r--deps/v8/test/message/fail/arrow-param-after-rest-2.js (renamed from deps/v8/test/message/arrow-param-after-rest-2.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-param-after-rest-2.out (renamed from deps/v8/test/message/arrow-param-after-rest-2.out)0
-rw-r--r--deps/v8/test/message/fail/arrow-param-after-rest.js (renamed from deps/v8/test/message/arrow-param-after-rest.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-param-after-rest.out (renamed from deps/v8/test/message/arrow-param-after-rest.out)0
-rw-r--r--deps/v8/test/message/fail/arrow-strict-eval-bare-parameter.js (renamed from deps/v8/test/message/arrow-strict-eval-bare-parameter.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-strict-eval-bare-parameter.out (renamed from deps/v8/test/message/arrow-strict-eval-bare-parameter.out)0
-rw-r--r--deps/v8/test/message/fail/arrow-two-rest-params.js (renamed from deps/v8/test/message/arrow-two-rest-params.js)0
-rw-r--r--deps/v8/test/message/fail/arrow-two-rest-params.out (renamed from deps/v8/test/message/arrow-two-rest-params.out)0
-rw-r--r--deps/v8/test/message/fail/async-arrow-invalid-rest-2.js (renamed from deps/v8/test/message/async-arrow-invalid-rest-2.js)0
-rw-r--r--deps/v8/test/message/fail/async-arrow-invalid-rest-2.out (renamed from deps/v8/test/message/async-arrow-invalid-rest-2.out)0
-rw-r--r--deps/v8/test/message/fail/async-arrow-invalid-rest.js (renamed from deps/v8/test/message/async-arrow-invalid-rest.js)0
-rw-r--r--deps/v8/test/message/fail/async-arrow-invalid-rest.out (renamed from deps/v8/test/message/async-arrow-invalid-rest.out)0
-rw-r--r--deps/v8/test/message/fail/async-arrow-param-after-rest.js (renamed from deps/v8/test/message/async-arrow-param-after-rest.js)0
-rw-r--r--deps/v8/test/message/fail/async-arrow-param-after-rest.out (renamed from deps/v8/test/message/async-arrow-param-after-rest.out)0
-rw-r--r--deps/v8/test/message/fail/await-non-async.js (renamed from deps/v8/test/message/await-non-async.js)0
-rw-r--r--deps/v8/test/message/fail/await-non-async.out (renamed from deps/v8/test/message/await-non-async.out)0
-rw-r--r--deps/v8/test/message/fail/call-non-constructable.js (renamed from deps/v8/test/message/call-non-constructable.js)0
-rw-r--r--deps/v8/test/message/fail/call-non-constructable.out (renamed from deps/v8/test/message/call-non-constructable.out)0
-rw-r--r--deps/v8/test/message/fail/call-primitive-constructor.js (renamed from deps/v8/test/message/call-primitive-constructor.js)0
-rw-r--r--deps/v8/test/message/fail/call-primitive-constructor.out (renamed from deps/v8/test/message/call-primitive-constructor.out)0
-rw-r--r--deps/v8/test/message/fail/call-primitive-function.js (renamed from deps/v8/test/message/call-primitive-function.js)0
-rw-r--r--deps/v8/test/message/fail/call-primitive-function.out (renamed from deps/v8/test/message/call-primitive-function.out)0
-rw-r--r--deps/v8/test/message/fail/call-undeclared-constructor.js (renamed from deps/v8/test/message/call-undeclared-constructor.js)0
-rw-r--r--deps/v8/test/message/fail/call-undeclared-constructor.out (renamed from deps/v8/test/message/call-undeclared-constructor.out)0
-rw-r--r--deps/v8/test/message/fail/call-undeclared-function.js (renamed from deps/v8/test/message/call-undeclared-function.js)0
-rw-r--r--deps/v8/test/message/fail/call-undeclared-function.out (renamed from deps/v8/test/message/call-undeclared-function.out)0
-rw-r--r--deps/v8/test/message/fail/class-constructor-accessor.js (renamed from deps/v8/test/message/class-constructor-accessor.js)0
-rw-r--r--deps/v8/test/message/fail/class-constructor-accessor.out (renamed from deps/v8/test/message/class-constructor-accessor.out)0
-rw-r--r--deps/v8/test/message/fail/class-constructor-generator.js (renamed from deps/v8/test/message/class-constructor-generator.js)0
-rw-r--r--deps/v8/test/message/fail/class-constructor-generator.out (renamed from deps/v8/test/message/class-constructor-generator.out)0
-rw-r--r--deps/v8/test/message/fail/class-field-constructor.js9
-rw-r--r--deps/v8/test/message/fail/class-field-constructor.out4
-rw-r--r--deps/v8/test/message/fail/class-field-static-constructor.js9
-rw-r--r--deps/v8/test/message/fail/class-field-static-constructor.out4
-rw-r--r--deps/v8/test/message/fail/class-field-static-prototype.js9
-rw-r--r--deps/v8/test/message/fail/class-field-static-prototype.out4
-rw-r--r--deps/v8/test/message/fail/class-spread-property.js (renamed from deps/v8/test/message/class-spread-property.js)0
-rw-r--r--deps/v8/test/message/fail/class-spread-property.out (renamed from deps/v8/test/message/class-spread-property.out)0
-rw-r--r--deps/v8/test/message/fail/console.js (renamed from deps/v8/test/message/console.js)1
-rw-r--r--deps/v8/test/message/fail/console.out15
-rw-r--r--deps/v8/test/message/fail/const-decl-no-init-sloppy.out (renamed from deps/v8/test/message/const-decl-no-init-sloppy.out)0
-rw-r--r--deps/v8/test/message/fail/const-decl-no-init.js (renamed from deps/v8/test/message/const-decl-no-init.js)0
-rw-r--r--deps/v8/test/message/fail/const-decl-no-init.out (renamed from deps/v8/test/message/const-decl-no-init.out)0
-rw-r--r--deps/v8/test/message/fail/const-decl-no-init2.js (renamed from deps/v8/test/message/const-decl-no-init2.js)0
-rw-r--r--deps/v8/test/message/fail/const-decl-no-init2.out (renamed from deps/v8/test/message/const-decl-no-init2.out)0
-rw-r--r--deps/v8/test/message/fail/default-parameter-tdz-arrow.js (renamed from deps/v8/test/message/default-parameter-tdz-arrow.js)0
-rw-r--r--deps/v8/test/message/fail/default-parameter-tdz-arrow.out (renamed from deps/v8/test/message/default-parameter-tdz-arrow.out)0
-rw-r--r--deps/v8/test/message/fail/default-parameter-tdz.js (renamed from deps/v8/test/message/default-parameter-tdz.js)0
-rw-r--r--deps/v8/test/message/fail/default-parameter-tdz.out (renamed from deps/v8/test/message/default-parameter-tdz.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-number.js (renamed from deps/v8/test/message/destructuring-array-non-iterable-number.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-number.out (renamed from deps/v8/test/message/destructuring-array-non-iterable-number.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal-complex.js (renamed from deps/v8/test/message/destructuring-array-non-iterable-object-literal-complex.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal-complex.out (renamed from deps/v8/test/message/destructuring-array-non-iterable-object-literal-complex.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal.js (renamed from deps/v8/test/message/destructuring-array-non-iterable-object-literal.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal.out (renamed from deps/v8/test/message/destructuring-array-non-iterable-object-literal.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-object.js (renamed from deps/v8/test/message/destructuring-array-non-iterable-object.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-object.out (renamed from deps/v8/test/message/destructuring-array-non-iterable-object.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-undefined.js (renamed from deps/v8/test/message/destructuring-array-non-iterable-undefined.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-undefined.out (renamed from deps/v8/test/message/destructuring-array-non-iterable-undefined.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-decl-no-init-array.js (renamed from deps/v8/test/message/destructuring-decl-no-init-array.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-decl-no-init-array.out (renamed from deps/v8/test/message/destructuring-decl-no-init-array.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-decl-no-init-array2.js (renamed from deps/v8/test/message/destructuring-decl-no-init-array2.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-decl-no-init-array2.out (renamed from deps/v8/test/message/destructuring-decl-no-init-array2.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-decl-no-init-obj.js (renamed from deps/v8/test/message/destructuring-decl-no-init-obj.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-decl-no-init-obj.out (renamed from deps/v8/test/message/destructuring-decl-no-init-obj.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-decl-no-init-obj2.js (renamed from deps/v8/test/message/destructuring-decl-no-init-obj2.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-decl-no-init-obj2.out (renamed from deps/v8/test/message/destructuring-decl-no-init-obj2.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-function-non-iterable.js (renamed from deps/v8/test/message/destructuring-function-non-iterable.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-function-non-iterable.out (renamed from deps/v8/test/message/destructuring-function-non-iterable.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-modify-const.js (renamed from deps/v8/test/message/destructuring-modify-const.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-modify-const.out (renamed from deps/v8/test/message/destructuring-modify-const.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-new-callable-non-iterable.js (renamed from deps/v8/test/message/destructuring-new-callable-non-iterable.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-new-callable-non-iterable.out (renamed from deps/v8/test/message/destructuring-new-callable-non-iterable.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-non-function-non-iterable.js (renamed from deps/v8/test/message/destructuring-non-function-non-iterable.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-non-function-non-iterable.out (renamed from deps/v8/test/message/destructuring-non-function-non-iterable.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-computed-property.js (renamed from deps/v8/test/message/destructuring-undefined-computed-property.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-computed-property.out (renamed from deps/v8/test/message/destructuring-undefined-computed-property.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-number-property.js (renamed from deps/v8/test/message/destructuring-undefined-number-property.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-number-property.out (renamed from deps/v8/test/message/destructuring-undefined-number-property.out)0
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-string-property.js (renamed from deps/v8/test/message/destructuring-undefined-string-property.js)0
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-string-property.out (renamed from deps/v8/test/message/destructuring-undefined-string-property.out)0
-rw-r--r--deps/v8/test/message/fail/dynamic-import-missing-specifier.js (renamed from deps/v8/test/message/regress/regress-4829-1.js)8
-rw-r--r--deps/v8/test/message/fail/dynamic-import-missing-specifier.out4
-rw-r--r--deps/v8/test/message/fail/export-duplicate-as.js (renamed from deps/v8/test/message/export-duplicate-as.js)0
-rw-r--r--deps/v8/test/message/fail/export-duplicate-as.out (renamed from deps/v8/test/message/export-duplicate-as.out)0
-rw-r--r--deps/v8/test/message/fail/export-duplicate-default.js (renamed from deps/v8/test/message/export-duplicate-default.js)0
-rw-r--r--deps/v8/test/message/fail/export-duplicate-default.out (renamed from deps/v8/test/message/export-duplicate-default.out)0
-rw-r--r--deps/v8/test/message/fail/export-duplicate.js (renamed from deps/v8/test/message/export-duplicate.js)0
-rw-r--r--deps/v8/test/message/fail/export-duplicate.out (renamed from deps/v8/test/message/export-duplicate.out)0
-rw-r--r--deps/v8/test/message/fail/for-in-let-loop-initializers-strict.js (renamed from deps/v8/test/message/for-in-let-loop-initializers-strict.js)0
-rw-r--r--deps/v8/test/message/fail/for-in-let-loop-initializers-strict.out (renamed from deps/v8/test/message/for-in-let-loop-initializers-strict.out)0
-rw-r--r--deps/v8/test/message/fail/for-in-loop-initializers-destructuring.js (renamed from deps/v8/test/message/for-in-loop-initializers-destructuring.js)0
-rw-r--r--deps/v8/test/message/fail/for-in-loop-initializers-destructuring.out (renamed from deps/v8/test/message/for-in-loop-initializers-destructuring.out)0
-rw-r--r--deps/v8/test/message/fail/for-in-loop-initializers-strict.js (renamed from deps/v8/test/message/for-in-loop-initializers-strict.js)0
-rw-r--r--deps/v8/test/message/fail/for-in-loop-initializers-strict.out (renamed from deps/v8/test/message/for-in-loop-initializers-strict.out)0
-rw-r--r--deps/v8/test/message/fail/for-loop-invalid-lhs.js (renamed from deps/v8/test/message/for-loop-invalid-lhs.js)0
-rw-r--r--deps/v8/test/message/fail/for-loop-invalid-lhs.out (renamed from deps/v8/test/message/for-loop-invalid-lhs.out)0
-rw-r--r--deps/v8/test/message/fail/for-of-let-loop-initializers.js (renamed from deps/v8/test/message/for-of-let-loop-initializers.js)0
-rw-r--r--deps/v8/test/message/fail/for-of-let-loop-initializers.out (renamed from deps/v8/test/message/for-of-let-loop-initializers.out)0
-rw-r--r--deps/v8/test/message/fail/for-of-loop-initializers-sloppy.js (renamed from deps/v8/test/message/for-of-loop-initializers-sloppy.js)0
-rw-r--r--deps/v8/test/message/fail/for-of-loop-initializers-sloppy.out (renamed from deps/v8/test/message/for-of-loop-initializers-sloppy.out)0
-rw-r--r--deps/v8/test/message/fail/for-of-loop-initializers-strict.js (renamed from deps/v8/test/message/for-of-loop-initializers-strict.js)0
-rw-r--r--deps/v8/test/message/fail/for-of-loop-initializers-strict.out (renamed from deps/v8/test/message/for-of-loop-initializers-strict.out)0
-rw-r--r--deps/v8/test/message/fail/for-of-non-iterable.js (renamed from deps/v8/test/message/for-of-non-iterable.js)0
-rw-r--r--deps/v8/test/message/fail/for-of-non-iterable.out (renamed from deps/v8/test/message/for-of-non-iterable.out)0
-rw-r--r--deps/v8/test/message/fail/for-of-throw-in-body.js (renamed from deps/v8/test/message/for-of-throw-in-body.js)0
-rw-r--r--deps/v8/test/message/fail/for-of-throw-in-body.out (renamed from deps/v8/test/message/for-of-throw-in-body.out)0
-rw-r--r--deps/v8/test/message/fail/formal-parameters-bad-rest.js (renamed from deps/v8/test/message/formal-parameters-bad-rest.js)0
-rw-r--r--deps/v8/test/message/fail/formal-parameters-bad-rest.out (renamed from deps/v8/test/message/formal-parameters-bad-rest.out)0
-rw-r--r--deps/v8/test/message/fail/formal-parameters-strict-body.js (renamed from deps/v8/test/message/formal-parameters-strict-body.js)0
-rw-r--r--deps/v8/test/message/fail/formal-parameters-strict-body.out (renamed from deps/v8/test/message/formal-parameters-strict-body.out)0
-rw-r--r--deps/v8/test/message/fail/formal-parameters-trailing-comma.js (renamed from deps/v8/test/message/formal-parameters-trailing-comma.js)0
-rw-r--r--deps/v8/test/message/fail/formal-parameters-trailing-comma.out (renamed from deps/v8/test/message/formal-parameters-trailing-comma.out)0
-rw-r--r--deps/v8/test/message/fail/func-name-inferrer-arg-1.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-694433.js)8
-rw-r--r--deps/v8/test/message/fail/func-name-inferrer-arg-1.out8
-rw-r--r--deps/v8/test/message/fail/func-name-inferrer-arg.js10
-rw-r--r--deps/v8/test/message/fail/func-name-inferrer-arg.out7
-rw-r--r--deps/v8/test/message/fail/function-param-after-rest.js (renamed from deps/v8/test/message/function-param-after-rest.js)0
-rw-r--r--deps/v8/test/message/fail/function-param-after-rest.out (renamed from deps/v8/test/message/function-param-after-rest.out)0
-rw-r--r--deps/v8/test/message/fail/function-sent-escaped.js (renamed from deps/v8/test/message/function-sent-escaped.js)0
-rw-r--r--deps/v8/test/message/fail/function-sent-escaped.out (renamed from deps/v8/test/message/function-sent-escaped.out)0
-rw-r--r--deps/v8/test/message/fail/generators-throw1.js (renamed from deps/v8/test/message/generators-throw1.js)0
-rw-r--r--deps/v8/test/message/fail/generators-throw1.out (renamed from deps/v8/test/message/generators-throw1.out)0
-rw-r--r--deps/v8/test/message/fail/generators-throw2.js (renamed from deps/v8/test/message/generators-throw2.js)0
-rw-r--r--deps/v8/test/message/fail/generators-throw2.out (renamed from deps/v8/test/message/generators-throw2.out)0
-rw-r--r--deps/v8/test/message/fail/get-iterator-return-non-receiver.js (renamed from deps/v8/test/message/get-iterator-return-non-receiver.js)0
-rw-r--r--deps/v8/test/message/fail/get-iterator-return-non-receiver.out (renamed from deps/v8/test/message/get-iterator-return-non-receiver.out)0
-rw-r--r--deps/v8/test/message/fail/get-iterator1.js (renamed from deps/v8/test/message/get-iterator1.js)0
-rw-r--r--deps/v8/test/message/fail/get-iterator1.out (renamed from deps/v8/test/message/get-iterator1.out)0
-rw-r--r--deps/v8/test/message/fail/import-as-eval.js (renamed from deps/v8/test/message/import-as-eval.js)0
-rw-r--r--deps/v8/test/message/fail/import-as-eval.out (renamed from deps/v8/test/message/import-as-eval.out)0
-rw-r--r--deps/v8/test/message/fail/import-as-redeclaration.js (renamed from deps/v8/test/message/import-as-redeclaration.js)0
-rw-r--r--deps/v8/test/message/fail/import-as-redeclaration.out (renamed from deps/v8/test/message/import-as-redeclaration.out)0
-rw-r--r--deps/v8/test/message/fail/import-as-reserved-word.js (renamed from deps/v8/test/message/import-as-reserved-word.js)0
-rw-r--r--deps/v8/test/message/fail/import-as-reserved-word.out (renamed from deps/v8/test/message/import-as-reserved-word.out)0
-rw-r--r--deps/v8/test/message/fail/import-eval.js (renamed from deps/v8/test/message/import-eval.js)0
-rw-r--r--deps/v8/test/message/fail/import-eval.out (renamed from deps/v8/test/message/import-eval.out)0
-rw-r--r--deps/v8/test/message/fail/import-redeclaration.js (renamed from deps/v8/test/message/import-redeclaration.js)0
-rw-r--r--deps/v8/test/message/fail/import-redeclaration.out (renamed from deps/v8/test/message/import-redeclaration.out)0
-rw-r--r--deps/v8/test/message/fail/import-reserved-word.js (renamed from deps/v8/test/message/import-reserved-word.js)0
-rw-r--r--deps/v8/test/message/fail/import-reserved-word.out (renamed from deps/v8/test/message/import-reserved-word.out)0
-rw-r--r--deps/v8/test/message/fail/instanceof-noncallable.js (renamed from deps/v8/test/message/instanceof-noncallable.js)0
-rw-r--r--deps/v8/test/message/fail/instanceof-noncallable.out (renamed from deps/v8/test/message/instanceof-noncallable.out)0
-rw-r--r--deps/v8/test/message/fail/instanceof-nonobject.js (renamed from deps/v8/test/message/instanceof-nonobject.js)0
-rw-r--r--deps/v8/test/message/fail/instanceof-nonobject.out (renamed from deps/v8/test/message/instanceof-nonobject.out)0
-rw-r--r--deps/v8/test/message/fail/invalid-spread-2.js (renamed from deps/v8/test/message/invalid-spread-2.js)0
-rw-r--r--deps/v8/test/message/fail/invalid-spread-2.out (renamed from deps/v8/test/message/invalid-spread-2.out)0
-rw-r--r--deps/v8/test/message/fail/invalid-spread.js (renamed from deps/v8/test/message/invalid-spread.js)0
-rw-r--r--deps/v8/test/message/fail/invalid-spread.out (renamed from deps/v8/test/message/invalid-spread.out)0
-rw-r--r--deps/v8/test/message/fail/isvar.js (renamed from deps/v8/test/message/isvar.js)0
-rw-r--r--deps/v8/test/message/fail/isvar.out (renamed from deps/v8/test/message/isvar.out)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-declaration-destructuring-brace-in-single-statement.js (renamed from deps/v8/test/message/let-lexical-declaration-destructuring-brace-in-single-statement.js)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-declaration-destructuring-brace-in-single-statement.out (renamed from deps/v8/test/message/let-lexical-declaration-destructuring-brace-in-single-statement.out)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-declaration-destructuring-in-single-statement.js (renamed from deps/v8/test/message/let-lexical-declaration-destructuring-in-single-statement.js)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-declaration-destructuring-in-single-statement.out (renamed from deps/v8/test/message/let-lexical-declaration-destructuring-in-single-statement.out)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-declaration-in-single-statement.js (renamed from deps/v8/test/message/let-lexical-declaration-in-single-statement.js)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-declaration-in-single-statement.out (renamed from deps/v8/test/message/let-lexical-declaration-in-single-statement.out)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-name-in-array-prohibited.js (renamed from deps/v8/test/message/let-lexical-name-in-array-prohibited.js)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-name-in-array-prohibited.out (renamed from deps/v8/test/message/let-lexical-name-in-array-prohibited.out)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-name-in-object-prohibited.js (renamed from deps/v8/test/message/let-lexical-name-in-object-prohibited.js)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-name-in-object-prohibited.out (renamed from deps/v8/test/message/let-lexical-name-in-object-prohibited.out)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-name-prohibited.js (renamed from deps/v8/test/message/let-lexical-name-prohibited.js)0
-rw-r--r--deps/v8/test/message/fail/let-lexical-name-prohibited.out (renamed from deps/v8/test/message/let-lexical-name-prohibited.out)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle1.js (renamed from deps/v8/test/message/modules-cycle1.js)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle1.out (renamed from deps/v8/test/message/modules-cycle1.out)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle2.js (renamed from deps/v8/test/message/modules-cycle2.js)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle2.out (renamed from deps/v8/test/message/modules-cycle2.out)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle3.js (renamed from deps/v8/test/message/modules-cycle3.js)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle3.out (renamed from deps/v8/test/message/modules-cycle3.out)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle4.js (renamed from deps/v8/test/message/modules-cycle4.js)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle4.out (renamed from deps/v8/test/message/modules-cycle4.out)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle5.js (renamed from deps/v8/test/message/modules-cycle5.js)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle5.out (renamed from deps/v8/test/message/modules-cycle5.out)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle6.js (renamed from deps/v8/test/message/modules-cycle6.js)0
-rw-r--r--deps/v8/test/message/fail/modules-cycle6.out (renamed from deps/v8/test/message/modules-cycle6.out)0
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export1.js (renamed from deps/v8/test/message/modules-duplicate-export1.js)0
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export1.out (renamed from deps/v8/test/message/modules-duplicate-export1.out)0
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export2.js (renamed from deps/v8/test/message/modules-duplicate-export2.js)0
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export2.out (renamed from deps/v8/test/message/modules-duplicate-export2.out)0
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export3.js (renamed from deps/v8/test/message/modules-duplicate-export3.js)0
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export3.out (renamed from deps/v8/test/message/modules-duplicate-export3.out)0
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export4.js (renamed from deps/v8/test/message/modules-duplicate-export4.js)0
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export4.out (renamed from deps/v8/test/message/modules-duplicate-export4.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare1.js (renamed from deps/v8/test/message/modules-import-redeclare1.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare1.out (renamed from deps/v8/test/message/modules-import-redeclare1.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare2.js (renamed from deps/v8/test/message/modules-import-redeclare2.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare2.out (renamed from deps/v8/test/message/modules-import-redeclare2.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare3.js (renamed from deps/v8/test/message/modules-import-redeclare3.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare3.out (renamed from deps/v8/test/message/modules-import-redeclare3.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import1.js (renamed from deps/v8/test/message/modules-import1.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import1.out (renamed from deps/v8/test/message/modules-import1.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import2.js (renamed from deps/v8/test/message/modules-import2.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import2.out (renamed from deps/v8/test/message/modules-import2.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import3.js (renamed from deps/v8/test/message/modules-import3.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import3.out (renamed from deps/v8/test/message/modules-import3.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import4.js (renamed from deps/v8/test/message/modules-import4.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import4.out (renamed from deps/v8/test/message/modules-import4.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import5.js (renamed from deps/v8/test/message/modules-import5.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import5.out (renamed from deps/v8/test/message/modules-import5.out)0
-rw-r--r--deps/v8/test/message/fail/modules-import6.js (renamed from deps/v8/test/message/modules-import6.js)0
-rw-r--r--deps/v8/test/message/fail/modules-import6.out (renamed from deps/v8/test/message/modules-import6.out)0
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle2.js (renamed from deps/v8/test/message/modules-skip-cycle2.js)0
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle3.js (renamed from deps/v8/test/message/modules-skip-cycle3.js)0
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle5.js (renamed from deps/v8/test/message/modules-skip-cycle5.js)0
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle6.js (renamed from deps/v8/test/message/modules-skip-cycle6.js)0
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict1.js (renamed from deps/v8/test/message/modules-star-conflict1.js)2
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict1.out (renamed from deps/v8/test/message/modules-star-conflict1.out)2
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict2.js (renamed from deps/v8/test/message/modules-star-conflict2.js)4
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict2.out (renamed from deps/v8/test/message/modules-star-conflict2.out)2
-rw-r--r--deps/v8/test/message/fail/modules-star-default.js (renamed from deps/v8/test/message/modules-star-default.js)0
-rw-r--r--deps/v8/test/message/fail/modules-star-default.out (renamed from deps/v8/test/message/modules-star-default.out)0
-rw-r--r--deps/v8/test/message/fail/modules-undefined-export1.js (renamed from deps/v8/test/message/modules-undefined-export1.js)0
-rw-r--r--deps/v8/test/message/fail/modules-undefined-export1.out (renamed from deps/v8/test/message/modules-undefined-export1.out)0
-rw-r--r--deps/v8/test/message/fail/modules-undefined-export2.js (renamed from deps/v8/test/message/modules-undefined-export2.js)0
-rw-r--r--deps/v8/test/message/fail/modules-undefined-export2.out (renamed from deps/v8/test/message/modules-undefined-export2.out)0
-rw-r--r--deps/v8/test/message/fail/new-target-assignment.js (renamed from deps/v8/test/message/new-target-assignment.js)0
-rw-r--r--deps/v8/test/message/fail/new-target-assignment.out (renamed from deps/v8/test/message/new-target-assignment.out)0
-rw-r--r--deps/v8/test/message/fail/new-target-escaped.js (renamed from deps/v8/test/message/new-target-escaped.js)0
-rw-r--r--deps/v8/test/message/fail/new-target-escaped.out (renamed from deps/v8/test/message/new-target-escaped.out)0
-rw-r--r--deps/v8/test/message/fail/new-target-for-loop.js (renamed from deps/v8/test/message/new-target-for-loop.js)0
-rw-r--r--deps/v8/test/message/fail/new-target-for-loop.out (renamed from deps/v8/test/message/new-target-for-loop.out)0
-rw-r--r--deps/v8/test/message/fail/new-target-postfix-op.js (renamed from deps/v8/test/message/new-target-postfix-op.js)0
-rw-r--r--deps/v8/test/message/fail/new-target-postfix-op.out (renamed from deps/v8/test/message/new-target-postfix-op.out)0
-rw-r--r--deps/v8/test/message/fail/new-target-prefix-op.js (renamed from deps/v8/test/message/new-target-prefix-op.js)0
-rw-r--r--deps/v8/test/message/fail/new-target-prefix-op.out (renamed from deps/v8/test/message/new-target-prefix-op.out)0
-rw-r--r--deps/v8/test/message/fail/nf-yield-in-generator.js (renamed from deps/v8/test/message/nf-yield-in-generator.js)0
-rw-r--r--deps/v8/test/message/fail/nf-yield-in-generator.out (renamed from deps/v8/test/message/nf-yield-in-generator.out)0
-rw-r--r--deps/v8/test/message/fail/nf-yield-strict-in-generator.js (renamed from deps/v8/test/message/nf-yield-strict-in-generator.js)0
-rw-r--r--deps/v8/test/message/fail/nf-yield-strict-in-generator.out (renamed from deps/v8/test/message/nf-yield-strict-in-generator.out)0
-rw-r--r--deps/v8/test/message/fail/nf-yield-strict.js (renamed from deps/v8/test/message/nf-yield-strict.js)0
-rw-r--r--deps/v8/test/message/fail/nf-yield-strict.out (renamed from deps/v8/test/message/nf-yield-strict.out)0
-rw-r--r--deps/v8/test/message/fail/nfe-yield-generator.js (renamed from deps/v8/test/message/nfe-yield-generator.js)0
-rw-r--r--deps/v8/test/message/fail/nfe-yield-generator.out (renamed from deps/v8/test/message/nfe-yield-generator.out)0
-rw-r--r--deps/v8/test/message/fail/nfe-yield-strict.js (renamed from deps/v8/test/message/nfe-yield-strict.js)0
-rw-r--r--deps/v8/test/message/fail/nfe-yield-strict.out (renamed from deps/v8/test/message/nfe-yield-strict.out)0
-rw-r--r--deps/v8/test/message/fail/non-alphanum.js (renamed from deps/v8/test/message/non-alphanum.js)0
-rw-r--r--deps/v8/test/message/fail/non-alphanum.out (renamed from deps/v8/test/message/non-alphanum.out)4
-rw-r--r--deps/v8/test/message/fail/object-rest-assignment-pattern.js (renamed from deps/v8/test/message/object-rest-assignment-pattern.js)0
-rw-r--r--deps/v8/test/message/fail/object-rest-assignment-pattern.out (renamed from deps/v8/test/message/object-rest-assignment-pattern.out)0
-rw-r--r--deps/v8/test/message/fail/object-rest-binding-pattern.js (renamed from deps/v8/test/message/object-rest-binding-pattern.js)0
-rw-r--r--deps/v8/test/message/fail/object-rest-binding-pattern.out (renamed from deps/v8/test/message/object-rest-binding-pattern.out)0
-rw-r--r--deps/v8/test/message/fail/overwritten-builtins.js (renamed from deps/v8/test/message/overwritten-builtins.js)0
-rw-r--r--deps/v8/test/message/fail/overwritten-builtins.out (renamed from deps/v8/test/message/overwritten-builtins.out)0
-rw-r--r--deps/v8/test/message/fail/paren_in_arg_string.js (renamed from deps/v8/test/message/paren_in_arg_string.js)0
-rw-r--r--deps/v8/test/message/fail/paren_in_arg_string.out (renamed from deps/v8/test/message/paren_in_arg_string.out)0
-rw-r--r--deps/v8/test/message/fail/redeclaration1.js (renamed from deps/v8/test/message/redeclaration1.js)0
-rw-r--r--deps/v8/test/message/fail/redeclaration1.out (renamed from deps/v8/test/message/redeclaration1.out)0
-rw-r--r--deps/v8/test/message/fail/redeclaration2.js (renamed from deps/v8/test/message/redeclaration2.js)0
-rw-r--r--deps/v8/test/message/fail/redeclaration2.out (renamed from deps/v8/test/message/redeclaration2.out)0
-rw-r--r--deps/v8/test/message/fail/redeclaration3.js (renamed from deps/v8/test/message/redeclaration3.js)0
-rw-r--r--deps/v8/test/message/fail/redeclaration3.out (renamed from deps/v8/test/message/redeclaration3.out)0
-rw-r--r--deps/v8/test/message/fail/replacement-marker-as-argument.js (renamed from deps/v8/test/message/replacement-marker-as-argument.js)0
-rw-r--r--deps/v8/test/message/fail/replacement-marker-as-argument.out (renamed from deps/v8/test/message/replacement-marker-as-argument.out)0
-rw-r--r--deps/v8/test/message/fail/rest-param-class-setter-strict.js (renamed from deps/v8/test/message/rest-param-class-setter-strict.js)0
-rw-r--r--deps/v8/test/message/fail/rest-param-class-setter-strict.out (renamed from deps/v8/test/message/rest-param-class-setter-strict.out)0
-rw-r--r--deps/v8/test/message/fail/rest-param-object-setter-sloppy.js (renamed from deps/v8/test/message/rest-param-object-setter-sloppy.js)0
-rw-r--r--deps/v8/test/message/fail/rest-param-object-setter-sloppy.out (renamed from deps/v8/test/message/rest-param-object-setter-sloppy.out)0
-rw-r--r--deps/v8/test/message/fail/rest-param-object-setter-strict.js (renamed from deps/v8/test/message/rest-param-object-setter-strict.js)0
-rw-r--r--deps/v8/test/message/fail/rest-param-object-setter-strict.out (renamed from deps/v8/test/message/rest-param-object-setter-strict.out)0
-rw-r--r--deps/v8/test/message/fail/settimeout.js (renamed from deps/v8/test/message/settimeout.js)0
-rw-r--r--deps/v8/test/message/fail/settimeout.out (renamed from deps/v8/test/message/settimeout.out)0
-rw-r--r--deps/v8/test/message/fail/simple-throw.js (renamed from deps/v8/test/message/simple-throw.js)0
-rw-r--r--deps/v8/test/message/fail/simple-throw.out (renamed from deps/v8/test/message/simple-throw.out)0
-rw-r--r--deps/v8/test/message/fail/strict-formal-parameters.js (renamed from deps/v8/test/message/strict-formal-parameters.js)0
-rw-r--r--deps/v8/test/message/fail/strict-formal-parameters.out (renamed from deps/v8/test/message/strict-formal-parameters.out)0
-rw-r--r--deps/v8/test/message/fail/strict-octal-number.js (renamed from deps/v8/test/message/strict-octal-number.js)0
-rw-r--r--deps/v8/test/message/fail/strict-octal-number.out (renamed from deps/v8/test/message/strict-octal-number.out)0
-rw-r--r--deps/v8/test/message/fail/strict-octal-string.js (renamed from deps/v8/test/message/strict-octal-string.js)0
-rw-r--r--deps/v8/test/message/fail/strict-octal-string.out (renamed from deps/v8/test/message/strict-octal-string.out)0
-rw-r--r--deps/v8/test/message/fail/strict-octal-use-strict-after.js (renamed from deps/v8/test/message/strict-octal-use-strict-after.js)0
-rw-r--r--deps/v8/test/message/fail/strict-octal-use-strict-after.out (renamed from deps/v8/test/message/strict-octal-use-strict-after.out)0
-rw-r--r--deps/v8/test/message/fail/strict-octal-use-strict-before.js (renamed from deps/v8/test/message/strict-octal-use-strict-before.js)0
-rw-r--r--deps/v8/test/message/fail/strict-octal-use-strict-before.out (renamed from deps/v8/test/message/strict-octal-use-strict-before.out)0
-rw-r--r--deps/v8/test/message/fail/strict-with.js (renamed from deps/v8/test/message/strict-with.js)0
-rw-r--r--deps/v8/test/message/fail/strict-with.out (renamed from deps/v8/test/message/strict-with.out)0
-rw-r--r--deps/v8/test/message/fail/super-constructor-extra-statement.js (renamed from deps/v8/test/message/super-constructor-extra-statement.js)0
-rw-r--r--deps/v8/test/message/fail/super-constructor-extra-statement.out (renamed from deps/v8/test/message/super-constructor-extra-statement.out)0
-rw-r--r--deps/v8/test/message/fail/super-constructor.js (renamed from deps/v8/test/message/super-constructor.js)0
-rw-r--r--deps/v8/test/message/fail/super-constructor.out (renamed from deps/v8/test/message/super-constructor.out)0
-rw-r--r--deps/v8/test/message/fail/super-in-function.js (renamed from deps/v8/test/message/super-in-function.js)0
-rw-r--r--deps/v8/test/message/fail/super-in-function.out (renamed from deps/v8/test/message/super-in-function.out)0
-rw-r--r--deps/v8/test/message/fail/tonumber-symbol.js (renamed from deps/v8/test/message/tonumber-symbol.js)0
-rw-r--r--deps/v8/test/message/fail/tonumber-symbol.out (renamed from deps/v8/test/message/tonumber-symbol.out)0
-rw-r--r--deps/v8/test/message/fail/try-catch-finally-throw-in-catch-and-finally.js (renamed from deps/v8/test/message/try-catch-finally-throw-in-catch-and-finally.js)0
-rw-r--r--deps/v8/test/message/fail/try-catch-finally-throw-in-catch-and-finally.out (renamed from deps/v8/test/message/try-catch-finally-throw-in-catch-and-finally.out)0
-rw-r--r--deps/v8/test/message/fail/try-catch-finally-throw-in-catch.js (renamed from deps/v8/test/message/try-catch-finally-throw-in-catch.js)0
-rw-r--r--deps/v8/test/message/fail/try-catch-finally-throw-in-catch.out (renamed from deps/v8/test/message/try-catch-finally-throw-in-catch.out)0
-rw-r--r--deps/v8/test/message/fail/try-catch-finally-throw-in-finally.js (renamed from deps/v8/test/message/try-catch-finally-throw-in-finally.js)0
-rw-r--r--deps/v8/test/message/fail/try-catch-finally-throw-in-finally.out (renamed from deps/v8/test/message/try-catch-finally-throw-in-finally.out)0
-rw-r--r--deps/v8/test/message/fail/try-catch-lexical-conflict.js (renamed from deps/v8/test/message/try-catch-lexical-conflict.js)0
-rw-r--r--deps/v8/test/message/fail/try-catch-lexical-conflict.out (renamed from deps/v8/test/message/try-catch-lexical-conflict.out)0
-rw-r--r--deps/v8/test/message/fail/try-catch-variable-conflict.js (renamed from deps/v8/test/message/try-catch-variable-conflict.js)0
-rw-r--r--deps/v8/test/message/fail/try-catch-variable-conflict.out (renamed from deps/v8/test/message/try-catch-variable-conflict.out)0
-rw-r--r--deps/v8/test/message/fail/try-finally-throw-in-finally.js (renamed from deps/v8/test/message/try-finally-throw-in-finally.js)0
-rw-r--r--deps/v8/test/message/fail/try-finally-throw-in-finally.out (renamed from deps/v8/test/message/try-finally-throw-in-finally.out)0
-rw-r--r--deps/v8/test/message/fail/try-finally-throw-in-try-and-finally.js (renamed from deps/v8/test/message/try-finally-throw-in-try-and-finally.js)0
-rw-r--r--deps/v8/test/message/fail/try-finally-throw-in-try-and-finally.out (renamed from deps/v8/test/message/try-finally-throw-in-try-and-finally.out)0
-rw-r--r--deps/v8/test/message/fail/try-finally-throw-in-try.js (renamed from deps/v8/test/message/try-finally-throw-in-try.js)0
-rw-r--r--deps/v8/test/message/fail/try-finally-throw-in-try.out (renamed from deps/v8/test/message/try-finally-throw-in-try.out)0
-rw-r--r--deps/v8/test/message/fail/typedarray.js (renamed from deps/v8/test/message/typedarray.js)0
-rw-r--r--deps/v8/test/message/fail/typedarray.out (renamed from deps/v8/test/message/typedarray.out)0
-rw-r--r--deps/v8/test/message/fail/undefined-keyed-property.js (renamed from deps/v8/test/message/undefined-keyed-property.js)0
-rw-r--r--deps/v8/test/message/fail/undefined-keyed-property.out (renamed from deps/v8/test/message/undefined-keyed-property.out)0
-rw-r--r--deps/v8/test/message/fail/unicode-escape-invalid-2.js (renamed from deps/v8/test/message/unicode-escape-invalid-2.js)0
-rw-r--r--deps/v8/test/message/fail/unicode-escape-invalid-2.out (renamed from deps/v8/test/message/unicode-escape-invalid-2.out)0
-rw-r--r--deps/v8/test/message/fail/unicode-escape-invalid.js (renamed from deps/v8/test/message/unicode-escape-invalid.js)0
-rw-r--r--deps/v8/test/message/fail/unicode-escape-invalid.out (renamed from deps/v8/test/message/unicode-escape-invalid.out)0
-rw-r--r--deps/v8/test/message/fail/unicode-escape-undefined.js (renamed from deps/v8/test/message/unicode-escape-undefined.js)0
-rw-r--r--deps/v8/test/message/fail/unicode-escape-undefined.out (renamed from deps/v8/test/message/unicode-escape-undefined.out)0
-rw-r--r--deps/v8/test/message/fail/unterminated-arg-list.js (renamed from deps/v8/test/message/unterminated-arg-list.js)0
-rw-r--r--deps/v8/test/message/fail/unterminated-arg-list.out (renamed from deps/v8/test/message/unterminated-arg-list.out)0
-rw-r--r--deps/v8/test/message/fail/var-conflict-in-with.js (renamed from deps/v8/test/message/var-conflict-in-with.js)0
-rw-r--r--deps/v8/test/message/fail/var-conflict-in-with.out (renamed from deps/v8/test/message/var-conflict-in-with.out)0
-rw-r--r--deps/v8/test/message/fail/wasm-function-name.js (renamed from deps/v8/test/message/wasm-function-name.js)0
-rw-r--r--deps/v8/test/message/fail/wasm-function-name.out (renamed from deps/v8/test/message/wasm-function-name.out)0
-rw-r--r--deps/v8/test/message/fail/wasm-module-and-function-name.js (renamed from deps/v8/test/message/wasm-module-and-function-name.js)0
-rw-r--r--deps/v8/test/message/fail/wasm-module-and-function-name.out (renamed from deps/v8/test/message/wasm-module-and-function-name.out)0
-rw-r--r--deps/v8/test/message/fail/wasm-module-name.js (renamed from deps/v8/test/message/wasm-module-name.js)0
-rw-r--r--deps/v8/test/message/fail/wasm-module-name.out (renamed from deps/v8/test/message/wasm-module-name.out)0
-rw-r--r--deps/v8/test/message/fail/wasm-no-name.js (renamed from deps/v8/test/message/wasm-no-name.js)0
-rw-r--r--deps/v8/test/message/fail/wasm-no-name.out (renamed from deps/v8/test/message/wasm-no-name.out)0
-rw-r--r--deps/v8/test/message/fail/wasm-trap.js (renamed from deps/v8/test/message/wasm-trap.js)0
-rw-r--r--deps/v8/test/message/fail/wasm-trap.out (renamed from deps/v8/test/message/wasm-trap.out)0
-rw-r--r--deps/v8/test/message/fail/yield-in-arrow-param.js (renamed from deps/v8/test/message/yield-in-arrow-param.js)0
-rw-r--r--deps/v8/test/message/fail/yield-in-arrow-param.out (renamed from deps/v8/test/message/yield-in-arrow-param.out)0
-rw-r--r--deps/v8/test/message/fail/yield-in-generator-param.js (renamed from deps/v8/test/message/yield-in-generator-param.js)0
-rw-r--r--deps/v8/test/message/fail/yield-in-generator-param.out (renamed from deps/v8/test/message/yield-in-generator-param.out)0
-rw-r--r--deps/v8/test/message/message.status8
-rw-r--r--deps/v8/test/message/regress/fail/regress-1527.js (renamed from deps/v8/test/message/regress/regress-1527.js)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-1527.out (renamed from deps/v8/test/message/regress/regress-1527.out)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-3995.js (renamed from deps/v8/test/message/regress/regress-3995.js)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-3995.out (renamed from deps/v8/test/message/regress/regress-3995.out)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-4266.js (renamed from deps/v8/test/message/regress/regress-4266.js)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-4266.out (renamed from deps/v8/test/message/regress/regress-4266.out)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-5727.js (renamed from deps/v8/test/message/regress/regress-5727.js)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-5727.out (renamed from deps/v8/test/message/regress/regress-5727.out)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-73.js (renamed from deps/v8/test/message/regress/regress-73.js)12
-rw-r--r--deps/v8/test/message/regress/fail/regress-73.out (renamed from deps/v8/test/message/regress/regress-73.out)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-75.js (renamed from deps/v8/test/message/regress/regress-75.js)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-75.out (renamed from deps/v8/test/message/regress/regress-75.out)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-crbug-661579.js (renamed from deps/v8/test/message/regress/regress-crbug-661579.js)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-crbug-661579.out (renamed from deps/v8/test/message/regress/regress-crbug-661579.out)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-crbug-669017.js (renamed from deps/v8/test/message/regress/regress-crbug-669017.js)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-crbug-669017.out (renamed from deps/v8/test/message/regress/regress-crbug-669017.out)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-crbug-691194.js (renamed from deps/v8/test/message/regress/regress-crbug-691194.js)0
-rw-r--r--deps/v8/test/message/regress/fail/regress-crbug-691194.out (renamed from deps/v8/test/message/regress/regress-crbug-691194.out)0
-rw-r--r--deps/v8/test/message/regress/regress-4829-1.out8
-rw-r--r--deps/v8/test/message/regress/regress-4829-2.js9
-rw-r--r--deps/v8/test/message/regress/regress-4829-2.out8
-rw-r--r--deps/v8/test/message/testcfg.py31
-rw-r--r--deps/v8/test/mjsunit/array-lastindexof.js10
-rw-r--r--deps/v8/test/mjsunit/array-sort.js4
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-noopt.js4
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-opt.js3
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block.js195
-rw-r--r--deps/v8/test/mjsunit/compiler-regress-787301.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/function-bind.js209
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/math-ceil.js39
-rw-r--r--deps/v8/test/mjsunit/compiler/math-round.js39
-rw-r--r--deps/v8/test/mjsunit/compiler/math-trunc.js39
-rw-r--r--deps/v8/test/mjsunit/compiler/nary-binary-ops.js150
-rw-r--r--deps/v8/test/mjsunit/compiler/object-is.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-arguments.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/reflect-get.js68
-rw-r--r--deps/v8/test/mjsunit/compiler/reflect-has.js67
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-7121.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-772420.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-772872.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-773954.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-788539.js37
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-791245.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-799263.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/string-slice.js33
-rw-r--r--deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js51
-rw-r--r--deps/v8/test/mjsunit/console.js28
-rw-r--r--deps/v8/test/mjsunit/es6/class-computed-property-names-super.js92
-rw-r--r--deps/v8/test/mjsunit/es6/classes.js139
-rw-r--r--deps/v8/test/mjsunit/es6/completion.js7
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-sticky.js6
-rw-r--r--deps/v8/test/mjsunit/es6/sloppy-no-duplicate-generators.js (renamed from deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-generators.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-match.js5
-rw-r--r--deps/v8/test/mjsunit/es6/string-search.js5
-rw-r--r--deps/v8/test/mjsunit/es6/templates.js27
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js11
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-character-ranges.js2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-backrefs.js2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-last-index.js2
-rw-r--r--deps/v8/test/mjsunit/es9/object-rest-basic.js (renamed from deps/v8/test/mjsunit/harmony/object-rest-basic.js)1
-rw-r--r--deps/v8/test/mjsunit/es9/object-spread-basic.js (renamed from deps/v8/test/mjsunit/harmony/object-spread-basic.js)2
-rw-r--r--deps/v8/test/mjsunit/es9/regexp-lookbehind.js (renamed from deps/v8/test/mjsunit/harmony/regexp-lookbehind.js)2
-rw-r--r--deps/v8/test/mjsunit/es9/template-escapes.js (renamed from deps/v8/test/mjsunit/harmony/template-escapes.js)2
-rw-r--r--deps/v8/test/mjsunit/filter-element-kinds.js144
-rw-r--r--deps/v8/test/mjsunit/function-call.js91
-rw-r--r--deps/v8/test/mjsunit/global-accessors.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/array-sort-comparefn.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/add.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/and.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/as-int-n.js300
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/basics.js (renamed from deps/v8/test/mjsunit/harmony/bigint.js)358
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/comparisons.js525
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/dec.js89
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/div.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/inc.js89
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/json.js81
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/mod.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/mul.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/neg.js89
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/not.js89
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/or.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regressions.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/sar.js113
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/shl.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/sub.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/tonumber.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/too-big-literal.js14
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/xor.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-meta.js44
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/public-instance-class-fields.js676
-rw-r--r--deps/v8/test/mjsunit/harmony/public-static-class-fields.js335
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-named-captures.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-binary.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui0.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui1.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui2.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui3.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui4.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui5.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui6.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui7.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui8.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui9.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-6100.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/sloppy-legacy-duplicate-generators.js60
-rw-r--r--deps/v8/test/mjsunit/ignition/dynamic-global-inside-block.js11
-rw-r--r--deps/v8/test/mjsunit/ignition/print-ast.js2
-rw-r--r--deps/v8/test/mjsunit/messages.js118
-rw-r--r--deps/v8/test/mjsunit/migrations.js4
-rw-r--r--deps/v8/test/mjsunit/mjsunit.isolate1
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js22
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status92
-rw-r--r--deps/v8/test/mjsunit/object-literal-modified-object-prototype.js25
-rw-r--r--deps/v8/test/mjsunit/object-literal.js452
-rw-r--r--deps/v8/test/mjsunit/optimized-filter.js440
-rw-r--r--deps/v8/test/mjsunit/optimized-map.js61
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers-autogenerated-i18n.js81
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers-autogenerated.js74
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers-dotall.js27
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers-i18n.js138
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers.js146
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1257.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-353004.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5902.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599717.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-678917.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6941.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6948.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6970.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6989.js85
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6991.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7014-1.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7014-2.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7026.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-707187.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7115.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7135.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-752764.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-774824.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-775888.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-776309.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-778574.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-778668.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-779407.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-781218.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-782754.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-783051.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-783119.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-784080.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-784862.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-784863.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-784990.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-785804.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-786573.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-791345.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-793793.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-794822.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-794825.js (renamed from deps/v8/test/mjsunit/harmony/global-accessors-strict.js)53
-rw-r--r--deps/v8/test/mjsunit/regress/regress-799690.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-799813.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-465564.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-570241.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-747062.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-766635.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-768875.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-774459.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-774860.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-776511.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-778952.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-779344.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-779367.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-779457.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781583.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-783902.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-784835.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-786020.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-786723.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-791256.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-798026.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-801627.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-6906.js15
-rw-r--r--deps/v8/test/mjsunit/regress/string-compare-memcmp.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-02256.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-02256.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-02256b.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-02256b.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-02862.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-02862.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5531.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-5531.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5800.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-5800.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5884.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-5884.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-6054.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-6054.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-6164.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-6164.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-643595.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-643595.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-644682.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-644682.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-647649.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-647649.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-648079.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-648079.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-651961.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-651961.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-654377.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-654377.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-663994.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-663994.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-666741.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-666741.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-667745.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-667745.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-670683.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-670683.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-674447.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-674447.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-680938.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-680938.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-684858.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-684858.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-688876.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-688876.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-689450.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-689450.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-6931.js30
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-694433.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-698587.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-698587.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-699485.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-699485.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-702460.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-702460.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-702839.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-702839.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7033.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7035.js31
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-703568.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-703568.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7049.js54
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-708714.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-708714.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-710844.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-710844.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-711203.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-711203.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-715216a.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-715216-a.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-715216b.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-715216-b.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-717056.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-717056.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-717194.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-717194.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-719175.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-719175.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-722445.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-722445.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-724846.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-724846.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-724851.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-724851.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-724972.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-724972.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-727219.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-727219.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-727222.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-727222.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-727560.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-727560.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-729991.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-729991.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-731351.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-731351.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-734108.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-734108.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-734246.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-734246.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-734345.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-734345.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-736584.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-736584.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-737069.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-737069.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-739768.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-739768.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-753496.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-753496.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-757217.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-757217.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-763439.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-763439.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-763697.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-763697.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-766003.js17
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-769846.js (renamed from deps/v8/test/mjsunit/regress/wasm/regression-769846.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-771243.js39
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-772332.js33
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-775366.js29
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-778917.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-782280.js33
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-784050.js25
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-789952.js40
-rw-r--r--deps/v8/test/mjsunit/splice-proxy.js13
-rw-r--r--deps/v8/test/mjsunit/string-equal.js17
-rw-r--r--deps/v8/test/mjsunit/testcfg.py26
-rw-r--r--deps/v8/test/mjsunit/tools/csvparser.js27
-rw-r--r--deps/v8/test/mjsunit/tools/dumpcpp.js2
-rw-r--r--deps/v8/test/mjsunit/tools/profviz-test.log4966
-rw-r--r--deps/v8/test/mjsunit/tools/profviz.js2
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log16
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.log22
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js19
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/default-liftoff-setting.js21
-rw-r--r--deps/v8/test/mjsunit/wasm/disallow-codegen.js61
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi-error.js58
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi.js15
-rw-r--r--deps/v8/test/mjsunit/wasm/globals.js116
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js105
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js95
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/lazy-compilation.js63
-rw-r--r--deps/v8/test/mjsunit/wasm/liftoff.js36
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-external-call.js189
-rw-r--r--deps/v8/test/mjsunit/wasm/multi-value.js322
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-common.js52
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-constructed.js26
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-exported.js34
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-module.js54
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-reexport.js31
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties.js169
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-memory.js69
-rw-r--r--deps/v8/test/mjsunit/whitespaces.js40
-rw-r--r--deps/v8/test/mjsunit/whitespaces0.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces1.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces2.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces3.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces4.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces5.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces6.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces7.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces8.js7
-rw-r--r--deps/v8/test/mjsunit/whitespaces9.js7
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc5
-rw-r--r--deps/v8/test/mkgrokdump/testcfg.py13
-rw-r--r--deps/v8/test/mozilla/mozilla.status19
-rw-r--r--deps/v8/test/mozilla/testcfg.py14
-rw-r--r--deps/v8/test/preparser/testcfg.py7
-rw-r--r--deps/v8/test/promises-aplus/testcfg.py80
-rw-r--r--deps/v8/test/test262/test262.status56
-rw-r--r--deps/v8/test/test262/testcfg.py66
-rw-r--r--deps/v8/test/unittests/BUILD.gn4
-rw-r--r--deps/v8/test/unittests/api/v8-object-unittest.cc5
-rw-r--r--deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc4
-rw-r--r--deps/v8/test/unittests/asmjs/asm-types-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/bits-unittest.cc109
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc21
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc9
-rw-r--r--deps/v8/test/unittests/base/template-utils-unittest.cc57
-rw-r--r--deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc201
-rw-r--r--deps/v8/test/unittests/bigint-unittest.cc115
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc34
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc278
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc15
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc270
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc21
-rw-r--r--deps/v8/test/unittests/compiler/code-assembler-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc20
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc71
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/mips/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/mips64/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/node-unittest.cc7
-rw-r--r--deps/v8/test/unittests/compiler/schedule-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc83
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc21
-rw-r--r--deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/counters-unittest.cc294
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc136
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc38
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc120
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-utils.h1
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc106
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc93
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h2
-rw-r--r--deps/v8/test/unittests/libplatform/default-platform-unittest.cc162
-rw-r--r--deps/v8/test/unittests/libplatform/task-queue-unittest.cc7
-rw-r--r--deps/v8/test/unittests/libplatform/worker-thread-unittest.cc16
-rw-r--r--deps/v8/test/unittests/object-unittest.cc59
-rw-r--r--deps/v8/test/unittests/parser/ast-value-unittest.cc51
-rw-r--r--deps/v8/test/unittests/parser/preparser-unittest.cc12
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc15
-rw-r--r--deps/v8/test/unittests/source-position-table-unittest.cc22
-rw-r--r--deps/v8/test/unittests/test-helpers.cc11
-rw-r--r--deps/v8/test/unittests/test-helpers.h1
-rw-r--r--deps/v8/test/unittests/test-utils.cc37
-rw-r--r--deps/v8/test/unittests/test-utils.h58
-rw-r--r--deps/v8/test/unittests/unittests.gyp4
-rw-r--r--deps/v8/test/unittests/unittests.status4
-rw-r--r--deps/v8/test/unittests/utils-unittest.cc113
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc118
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc110
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc244
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc26
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc45
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc11
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-unittest.cc69
-rw-r--r--deps/v8/test/unittests/wasm/wasm-heap-unittest.cc235
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py8
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/webkit/class-syntax-declaration-expected.txt6
-rw-r--r--deps/v8/test/webkit/class-syntax-declaration.js6
-rw-r--r--deps/v8/test/webkit/class-syntax-expression-expected.txt6
-rw-r--r--deps/v8/test/webkit/class-syntax-expression.js6
-rw-r--r--deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt18
-rw-r--r--deps/v8/test/webkit/fast/js/array-prototype-properties.js2
-rw-r--r--deps/v8/test/webkit/resources/JSON-stringify.js4
-rw-r--r--deps/v8/test/webkit/run-json-stringify-expected.txt4
-rw-r--r--deps/v8/test/webkit/testcfg.py16
-rw-r--r--deps/v8/test/webkit/webkit.status6
-rw-r--r--deps/v8/third_party/colorama/LICENSE27
-rw-r--r--deps/v8/third_party/colorama/README.v814
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py2
-rw-r--r--deps/v8/third_party/inspector_protocol/CodeGenerator.py13
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template16
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template4
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template10
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_cpp.template46
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_h.template3
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template16
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template13
-rw-r--r--deps/v8/tools/BUILD.gn6
-rwxr-xr-xdeps/v8/tools/android-sync.sh1
-rw-r--r--deps/v8/tools/arguments.js78
-rwxr-xr-xdeps/v8/tools/bigint-tester.py347
-rw-r--r--deps/v8/tools/csvparser.js106
-rwxr-xr-xdeps/v8/tools/dev/gm.py3
-rw-r--r--deps/v8/tools/dump-cpp.py4
-rw-r--r--deps/v8/tools/foozzie/testdata/failure_output.txt4
-rwxr-xr-xdeps/v8/tools/foozzie/v8_foozzie.py10
-rw-r--r--deps/v8/tools/foozzie/v8_suppressions.py4
-rwxr-xr-xdeps/v8/tools/gcmole/download_gcmole_tools.py2
-rwxr-xr-xdeps/v8/tools/gcov.sh2
-rw-r--r--deps/v8/tools/gdb-v8-support.py52
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py22
-rwxr-xr-xdeps/v8/tools/grokdump.py67
-rw-r--r--deps/v8/tools/ic-explorer.html18
-rwxr-xr-xdeps/v8/tools/ic-processor4
-rw-r--r--deps/v8/tools/ic-processor-driver.js2
-rw-r--r--deps/v8/tools/ic-processor.js102
-rwxr-xr-xdeps/v8/tools/js2c.py3
-rw-r--r--deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py2
-rwxr-xr-xdeps/v8/tools/linux-tick-processor4
-rw-r--r--deps/v8/tools/memory/asan/blacklist_win.txt4
-rw-r--r--deps/v8/tools/parser-shell.cc5
-rw-r--r--deps/v8/tools/perf/statistics-for-json.R4
-rwxr-xr-xdeps/v8/tools/plot-timer-events5
-rwxr-xr-xdeps/v8/tools/presubmit.py34
-rw-r--r--deps/v8/tools/profview/profile-utils.js20
-rw-r--r--deps/v8/tools/profview/profview.js34
-rw-r--r--deps/v8/tools/profviz/profviz.js1
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py11
-rwxr-xr-xdeps/v8/tools/release/check_clusterfuzz.py3
-rw-r--r--deps/v8/tools/release/git_recipes.py5
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py3
-rw-r--r--deps/v8/tools/release/testdata/node/deps/v8/.gitignore7
-rw-r--r--deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me1
-rw-r--r--deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo1
-rw-r--r--deps/v8/tools/release/testdata/node/deps/v8/delete_me1
-rw-r--r--deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h20
-rw-r--r--deps/v8/tools/release/testdata/node/deps/v8/v8_foo1
-rwxr-xr-xdeps/v8/tools/release/update_node.py7
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py488
-rwxr-xr-xdeps/v8/tools/run-gc-fuzzer.py14
-rw-r--r--deps/v8/tools/run-num-fuzzer.gyp (renamed from deps/v8/tools/run-valgrind.gyp)6
-rw-r--r--deps/v8/tools/run-num-fuzzer.isolate20
-rwxr-xr-xdeps/v8/tools/run-tests.py969
-rw-r--r--deps/v8/tools/run-valgrind.isolate29
-rwxr-xr-xdeps/v8/tools/run-valgrind.py102
-rwxr-xr-xdeps/v8/tools/run_perf.py140
-rwxr-xr-xdeps/v8/tools/test-server.py215
-rw-r--r--deps/v8/tools/testrunner/README168
-rw-r--r--deps/v8/tools/testrunner/base_runner.py438
-rwxr-xr-xdeps/v8/tools/testrunner/deopt_fuzzer.py381
-rwxr-xr-xdeps/v8/tools/testrunner/gc_fuzzer.py341
-rw-r--r--deps/v8/tools/testrunner/local/commands.py20
-rw-r--r--deps/v8/tools/testrunner/local/execution.py36
-rw-r--r--deps/v8/tools/testrunner/local/progress.py15
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py152
-rwxr-xr-xdeps/v8/tools/testrunner/local/statusfile_unittest.py26
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py281
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_unittest.py53
-rw-r--r--deps/v8/tools/testrunner/local/variants.py25
-rw-r--r--deps/v8/tools/testrunner/local/verbose.py24
-rw-r--r--deps/v8/tools/testrunner/network/__init__.py26
-rw-r--r--deps/v8/tools/testrunner/network/distro.py90
-rw-r--r--deps/v8/tools/testrunner/network/endpoint.py125
-rw-r--r--deps/v8/tools/testrunner/network/network_execution.py253
-rw-r--r--deps/v8/tools/testrunner/objects/context.py15
-rw-r--r--deps/v8/tools/testrunner/objects/output.py8
-rw-r--r--deps/v8/tools/testrunner/objects/peer.py80
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py51
-rw-r--r--deps/v8/tools/testrunner/objects/workpacket.py90
-rw-r--r--deps/v8/tools/testrunner/server/__init__.py26
-rw-r--r--deps/v8/tools/testrunner/server/compression.py111
-rw-r--r--deps/v8/tools/testrunner/server/constants.py51
-rw-r--r--deps/v8/tools/testrunner/server/daemon.py147
-rw-r--r--deps/v8/tools/testrunner/server/local_handler.py119
-rw-r--r--deps/v8/tools/testrunner/server/main.py245
-rw-r--r--deps/v8/tools/testrunner/server/presence_handler.py120
-rw-r--r--deps/v8/tools/testrunner/server/signatures.py63
-rw-r--r--deps/v8/tools/testrunner/server/status_handler.py112
-rw-r--r--deps/v8/tools/testrunner/server/work_handler.py150
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py553
-rw-r--r--deps/v8/tools/tick-processor.html14
-rw-r--r--deps/v8/tools/tickprocessor.js242
-rwxr-xr-xdeps/v8/tools/try_perf.py1
-rw-r--r--deps/v8/tools/unittests/run_perf_test.py16
-rw-r--r--deps/v8/tools/v8heapconst.py448
-rw-r--r--deps/v8/tools/whitespace.txt2
-rwxr-xr-xdeps/v8/tools/windows-tick-processor.bat2
2123 files changed, 92921 insertions, 80387 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 6861c70994..85ff179226 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -70,6 +70,8 @@
!/third_party/binutils
!/third_party/eu-strip
!/third_party/inspector_protocol
+!/third_party/colorama
+/third_party/colorama/src
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
@@ -103,5 +105,6 @@ turbo*.cfg
turbo*.dot
turbo*.json
v8.ignition_dispatches_table.json
+/Default/
!/third_party/jinja2
!/third_party/markupsafe
diff --git a/deps/v8/.vpython b/deps/v8/.vpython
new file mode 100644
index 0000000000..9ea0da7145
--- /dev/null
+++ b/deps/v8/.vpython
@@ -0,0 +1,32 @@
+# This is a vpython "spec" file.
+#
+# It describes patterns for python wheel dependencies of the python scripts in
+# the chromium repo, particularly for dependencies that have compiled components
+# (since pure-python dependencies can be easily vendored into third_party).
+#
+# When vpython is invoked, it finds this file and builds a python VirtualEnv,
+# containing all of the dependencies described in this file, fetching them from
+# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
+# this never requires the end-user machine to have a working python extension
+# compilation environment. All of these packages are built using:
+# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
+#
+# All python scripts in the repo share this same spec, to avoid dependency
+# fragmentation.
+#
+# If you have depot_tools installed in your $PATH, you can invoke python scripts
+# in this repo by running them as you normally would run them, except
+# substituting `vpython` instead of `python` on the command line, e.g.:
+# vpython path/to/script.py some --arguments
+#
+# Read more about `vpython` and how to modify this file here:
+# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
+
+python_version: "2.7"
+
+# Needed by third_party/catapult/devil/devil, which is imported by
+# build/android/test_runner.py when running performance tests.
+wheel: <
+ name: "infra/python/wheels/psutil/${vpython_platform}"
+ version: "version:5.2.2"
+>
diff --git a/deps/v8/.ycm_extra_conf.py b/deps/v8/.ycm_extra_conf.py
index a451d9f31c..74e605431a 100644
--- a/deps/v8/.ycm_extra_conf.py
+++ b/deps/v8/.ycm_extra_conf.py
@@ -42,7 +42,7 @@ import sys
# Flags from YCM's default config.
flags = [
'-DUSE_CLANG_COMPLETER',
-'-std=gnu++11',
+'-std=gnu++14',
'-x',
'c++',
]
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index be50e6e499..08391a5566 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -1,4 +1,4 @@
-# Below is a list of people and organizations that have contributed
+# Below is a list of people and organizations that have contributed
# to the V8 project. Names should be added to the list like so:
#
# Name/Organization <email address>
@@ -31,6 +31,7 @@ StrongLoop, Inc. <*@strongloop.com>
Facebook, Inc. <*@fb.com>
Facebook, Inc. <*@oculus.com>
Vewd Software AS <*@vewd.com>
+Groupon <*@groupon.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
@@ -45,6 +46,7 @@ Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
Anna Henningsen <anna@addaleax.net>
Bangfu Tao <bangfu.tao@samsung.com>
+Ben Coe <ben@npmjs.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
Bert Belder <bertbelder@gmail.com>
@@ -54,6 +56,7 @@ Craig Schlenter <craig.schlenter@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
Chris Nardi <hichris123@gmail.com>
Christopher A. Taylor <chris@gameclosure.com>
+Colin Ihrig <cjihrig@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel James <dnljms@gmail.com>
@@ -75,6 +78,7 @@ Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
Jaime Bernardo <jaime@janeasystems.com>
Jan de Mooij <jandemooij@gmail.com>
+Jan Krems <jan.krems@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
@@ -86,6 +90,7 @@ JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
Kevin Gibbons <bakkot@gmail.com>
+Kris Selden <kris.selden@gmail.com>
Loo Rong Jie <loorongjie@gmail.com>
Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
@@ -127,12 +132,14 @@ Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Seo Sanghyeon <sanxiyn@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
+Sylvestre Ledru <sledru@mozilla.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
+Yong Wang <ccyongwang@tencent.com>
Yu Yin <xwafish@gmail.com>
Zac Hansen <xaxxon@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index daed449c0a..8492cb5f62 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -83,11 +83,11 @@ declare_args() {
# Sets -dV8_TRACE_IGNITION.
v8_enable_trace_ignition = false
- # Sets -dV8_CONCURRENT_MARKING
- v8_enable_concurrent_marking = false
+ # Sets -dV8_TRACE_FEEDBACK_UPDATES.
+ v8_enable_trace_feedback_updates = false
- # Sets -dV8_CSA_WRITE_BARRIER
- v8_enable_csa_write_barrier = true
+ # Sets -dV8_CONCURRENT_MARKING
+ v8_enable_concurrent_marking = true
# Build the snapshot with unwinding information for perf.
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
@@ -132,6 +132,11 @@ declare_args() {
# Temporary flag to allow embedders to update their microtasks scopes
# while rolling in a new version of V8.
v8_check_microtasks_scopes_consistency = ""
+
+ v8_monolithic = false
+
+ # Enable mitigations for executing untrusted code.
+ v8_untrusted_code_mitigations = true
}
# Derived defaults.
@@ -270,6 +275,9 @@ config("features") {
if (v8_enable_trace_ignition) {
defines += [ "V8_TRACE_IGNITION" ]
}
+ if (v8_enable_trace_feedback_updates) {
+ defines += [ "V8_TRACE_FEEDBACK_UPDATES" ]
+ }
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ]
}
@@ -300,9 +308,6 @@ config("features") {
if (v8_enable_concurrent_marking) {
defines += [ "V8_CONCURRENT_MARKING" ]
}
- if (v8_enable_csa_write_barrier) {
- defines += [ "V8_CSA_WRITE_BARRIER" ]
- }
if (v8_check_microtasks_scopes_consistency) {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
@@ -488,6 +493,10 @@ config("toolchain") {
defines += [ "ENABLE_VERIFY_CSA" ]
}
+ if (!v8_untrusted_code_mitigations) {
+ defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ]
+ }
+
if (v8_no_inline) {
cflags += [
"-fno-inline-functions",
@@ -568,9 +577,7 @@ action("js2c") {
"src/js/prologue.js",
"src/js/v8natives.js",
"src/js/array.js",
- "src/js/string.js",
"src/js/typedarray.js",
- "src/js/weak-collection.js",
"src/js/messages.js",
"src/js/spread.js",
"src/js/proxy.js",
@@ -746,6 +753,12 @@ action("postmortem-metadata") {
sources = [
"src/objects.h",
"src/objects-inl.h",
+ "src/objects/code-inl.h",
+ "src/objects/code.h",
+ "src/objects/js-array-inl.h",
+ "src/objects/js-array.h",
+ "src/objects/js-regexp-inl.h",
+ "src/objects/js-regexp.h",
"src/objects/map.h",
"src/objects/map-inl.h",
"src/objects/script.h",
@@ -764,65 +777,68 @@ action("postmortem-metadata") {
rebase_path(sources, root_build_dir)
}
-action("run_mksnapshot") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- deps = [
- ":mksnapshot($v8_snapshot_toolchain)",
- ]
-
- script = "tools/run.py"
+if (v8_use_snapshot) {
+ action("run_mksnapshot") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
- sources = []
+ deps = [
+ ":mksnapshot($v8_snapshot_toolchain)",
+ ]
- outputs = [
- "$target_gen_dir/snapshot.cc",
- ]
+ script = "tools/run.py"
- args = [
- "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)",
- "root_out_dir") + "/mksnapshot",
- root_build_dir),
- "--startup_src",
- rebase_path("$target_gen_dir/snapshot.cc", root_build_dir),
- ]
+ sources = []
- if (v8_random_seed != "0") {
- args += [
- "--random-seed",
- v8_random_seed,
+ outputs = [
+ "$target_gen_dir/snapshot.cc",
]
- }
- if (v8_os_page_size != "0") {
- args += [
- "--v8_os_page_size",
- v8_os_page_size,
+ args = [
+ "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)",
+ "root_out_dir") + "/mksnapshot",
+ root_build_dir),
+ "--turbo_instruction_scheduling",
+ "--startup_src",
+ rebase_path("$target_gen_dir/snapshot.cc", root_build_dir),
]
- }
- if (v8_perf_prof_unwinding_info) {
- args += [ "--perf-prof-unwinding-info" ]
- }
+ if (v8_random_seed != "0") {
+ args += [
+ "--random-seed",
+ v8_random_seed,
+ ]
+ }
- if (v8_use_external_startup_data) {
- outputs += [ "$root_out_dir/snapshot_blob.bin" ]
- args += [
- "--startup_blob",
- rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir),
- ]
- }
+ if (v8_os_page_size != "0") {
+ args += [
+ "--v8_os_page_size",
+ v8_os_page_size,
+ ]
+ }
- if (v8_embed_script != "") {
- sources += [ v8_embed_script ]
- args += [ rebase_path(v8_embed_script, root_build_dir) ]
- }
+ if (v8_perf_prof_unwinding_info) {
+ args += [ "--perf-prof-unwinding-info" ]
+ }
- if (v8_enable_fast_mksnapshot) {
- args += [
- "--no-turbo-rewrite-far-jumps",
- "--no-turbo-verify-allocation",
- ]
+ if (v8_use_external_startup_data) {
+ outputs += [ "$root_out_dir/snapshot_blob.bin" ]
+ args += [
+ "--startup_blob",
+ rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir),
+ ]
+ }
+
+ if (v8_embed_script != "") {
+ sources += [ v8_embed_script ]
+ args += [ rebase_path(v8_embed_script, root_build_dir) ]
+ }
+
+ if (v8_enable_fast_mksnapshot) {
+ args += [
+ "--no-turbo-rewrite-far-jumps",
+ "--no-turbo-verify-allocation",
+ ]
+ }
}
}
@@ -834,6 +850,7 @@ action("v8_dump_build_config") {
is_gcov_coverage = v8_code_coverage && !is_clang
args = [
rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
+ "current_cpu=\"$current_cpu\"",
"dcheck_always_on=$dcheck_always_on",
"is_asan=$is_asan",
"is_cfi=$is_cfi",
@@ -844,7 +861,9 @@ action("v8_dump_build_config") {
"is_tsan=$is_tsan",
"is_ubsan_vptr=$is_ubsan_vptr",
"target_cpu=\"$target_cpu\"",
+ "v8_current_cpu=\"$v8_current_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support",
+ "v8_enable_verify_predictable=$v8_enable_verify_predictable",
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot",
]
@@ -901,44 +920,46 @@ v8_source_set("v8_nosnapshot") {
configs = [ ":internal_config" ]
}
-v8_source_set("v8_snapshot") {
- # Only targets in this file and the top-level visibility target can
- # depend on this.
- visibility = [
- ":*",
- "//:gn_visibility",
- ]
-
- deps = [
- ":js2c",
- ":js2c_experimental_extras",
- ":js2c_extras",
- ":v8_base",
- ]
- public_deps = [
- # This should be public so downstream targets can declare the snapshot
- # output file as their inputs.
- ":run_mksnapshot",
- ]
+if (v8_use_snapshot) {
+ v8_source_set("v8_snapshot") {
+ # Only targets in this file and the top-level visibility target can
+ # depend on this.
+ visibility = [
+ ":*",
+ "//:gn_visibility",
+ ]
- sources = [
- "$target_gen_dir/experimental-extras-libraries.cc",
- "$target_gen_dir/extras-libraries.cc",
- "$target_gen_dir/libraries.cc",
- "$target_gen_dir/snapshot.cc",
- "src/setup-isolate-deserialize.cc",
- ]
+ deps = [
+ ":js2c",
+ ":js2c_experimental_extras",
+ ":js2c_extras",
+ ":v8_base",
+ ]
+ public_deps = [
+ # This should be public so downstream targets can declare the snapshot
+ # output file as their inputs.
+ ":run_mksnapshot",
+ ]
- if (use_jumbo_build == true) {
- jumbo_excluded_sources = [
- # TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
- # Generated source, contains same variable names as libraries.cc
+ sources = [
"$target_gen_dir/experimental-extras-libraries.cc",
+ "$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/libraries.cc",
+ "$target_gen_dir/snapshot.cc",
+ "src/setup-isolate-deserialize.cc",
]
- }
- configs = [ ":internal_config" ]
+ if (use_jumbo_build == true) {
+ jumbo_excluded_sources = [
+ # TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
+ # Generated source, contains same variable names as libraries.cc
+ "$target_gen_dir/experimental-extras-libraries.cc",
+ "$target_gen_dir/libraries.cc",
+ ]
+ }
+
+ configs = [ ":internal_config" ]
+ }
}
if (v8_use_external_startup_data) {
@@ -1008,12 +1029,14 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-iterator-gen.cc",
"src/builtins/builtins-iterator-gen.h",
"src/builtins/builtins-math-gen.cc",
+ "src/builtins/builtins-math-gen.h",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-proxy-gen.cc",
"src/builtins/builtins-proxy-gen.h",
+ "src/builtins/builtins-reflect-gen.cc",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
@@ -1195,8 +1218,6 @@ v8_source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.cc",
"src/assert-scope.h",
- "src/ast/ast-expression-rewriter.cc",
- "src/ast/ast-expression-rewriter.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-numbering.cc",
@@ -1219,8 +1240,6 @@ v8_source_set("v8_base") {
"src/ast/scopes.h",
"src/ast/variables.cc",
"src/ast/variables.h",
- "src/background-parsing-task.cc",
- "src/background-parsing-task.h",
"src/bailout-reason.cc",
"src/bailout-reason.h",
"src/basic-block-profiler.cc",
@@ -1315,6 +1334,7 @@ v8_source_set("v8_base") {
"src/compiler/access-info.h",
"src/compiler/all-nodes.cc",
"src/compiler/all-nodes.h",
+ "src/compiler/allocation-builder.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
"src/compiler/branch-elimination.cc",
@@ -1607,6 +1627,8 @@ v8_source_set("v8_base") {
"src/handles.cc",
"src/handles.h",
"src/heap-symbols.h",
+ "src/heap/array-buffer-collector.cc",
+ "src/heap/array-buffer-collector.h",
"src/heap/array-buffer-tracker-inl.h",
"src/heap/array-buffer-tracker.cc",
"src/heap/array-buffer-tracker.h",
@@ -1658,14 +1680,11 @@ v8_source_set("v8_base") {
"src/heap/spaces.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
+ "src/heap/sweeper.cc",
+ "src/heap/sweeper.h",
"src/heap/worklist.h",
- "src/ic/access-compiler-data.h",
- "src/ic/access-compiler.cc",
- "src/ic/access-compiler.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
- "src/ic/handler-compiler.cc",
- "src/ic/handler-compiler.h",
"src/ic/handler-configuration-inl.h",
"src/ic/handler-configuration.cc",
"src/ic/handler-configuration.h",
@@ -1773,9 +1792,10 @@ v8_source_set("v8_base") {
"src/objects.h",
"src/objects/arguments-inl.h",
"src/objects/arguments.h",
- "src/objects/bigint-inl.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
+ "src/objects/code-inl.h",
+ "src/objects/code.h",
"src/objects/compilation-cache-inl.h",
"src/objects/compilation-cache.h",
"src/objects/debug-objects-inl.h",
@@ -1789,6 +1809,11 @@ v8_source_set("v8_base") {
"src/objects/hash-table.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
+ "src/objects/js-array-inl.h",
+ "src/objects/js-array.h",
+ "src/objects/js-regexp-inl.h",
+ "src/objects/js-regexp.h",
+ "src/objects/literal-objects-inl.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
"src/objects/map-inl.h",
@@ -1816,6 +1841,8 @@ v8_source_set("v8_base") {
"src/objects/template-objects.h",
"src/ostreams.cc",
"src/ostreams.h",
+ "src/parsing/background-parsing-task.cc",
+ "src/parsing/background-parsing-task.h",
"src/parsing/duplicate-finder.h",
"src/parsing/expression-classifier.h",
"src/parsing/expression-scope-reparenter.cc",
@@ -1948,12 +1975,20 @@ v8_source_set("v8_base") {
"src/setup-isolate.h",
"src/signature.h",
"src/simulator.h",
+ "src/snapshot/builtin-deserializer-allocator.cc",
+ "src/snapshot/builtin-deserializer-allocator.h",
"src/snapshot/builtin-deserializer.cc",
"src/snapshot/builtin-deserializer.h",
+ "src/snapshot/builtin-serializer-allocator.cc",
+ "src/snapshot/builtin-serializer-allocator.h",
"src/snapshot/builtin-serializer.cc",
"src/snapshot/builtin-serializer.h",
+ "src/snapshot/builtin-snapshot-utils.cc",
+ "src/snapshot/builtin-snapshot-utils.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
+ "src/snapshot/default-deserializer-allocator.cc",
+ "src/snapshot/default-deserializer-allocator.h",
"src/snapshot/default-serializer-allocator.cc",
"src/snapshot/default-serializer-allocator.h",
"src/snapshot/deserializer.cc",
@@ -2038,6 +2073,9 @@ v8_source_set("v8_base") {
"src/visitors.h",
"src/vm-state-inl.h",
"src/vm-state.h",
+ "src/wasm/baseline/liftoff-assembler.cc",
+ "src/wasm/baseline/liftoff-assembler.h",
+ "src/wasm/baseline/liftoff-compiler.cc",
"src/wasm/compilation-manager.cc",
"src/wasm/compilation-manager.h",
"src/wasm/decoder.h",
@@ -2061,6 +2099,8 @@ v8_source_set("v8_base") {
"src/wasm/wasm-api.h",
"src/wasm/wasm-code-specialization.cc",
"src/wasm/wasm-code-specialization.h",
+ "src/wasm/wasm-code-wrapper.cc",
+ "src/wasm/wasm-code-wrapper.h",
"src/wasm/wasm-debug.cc",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
@@ -2084,6 +2124,8 @@ v8_source_set("v8_base") {
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc",
"src/wasm/wasm-result.h",
+ "src/wasm/wasm-serialization.cc",
+ "src/wasm/wasm-serialization.h",
"src/wasm/wasm-text.cc",
"src/wasm/wasm-text.h",
"src/wasm/wasm-value.h",
@@ -2128,9 +2170,7 @@ v8_source_set("v8_base") {
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
"src/ia32/code-stubs-ia32.cc",
- "src/ia32/code-stubs-ia32.h",
"src/ia32/codegen-ia32.cc",
- "src/ia32/codegen-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
"src/ia32/disasm-ia32.cc",
@@ -2142,10 +2182,10 @@ v8_source_set("v8_base") {
"src/ia32/simulator-ia32.cc",
"src/ia32/simulator-ia32.h",
"src/ia32/sse-instr.h",
- "src/ic/ia32/access-compiler-ia32.cc",
- "src/ic/ia32/handler-compiler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
+ "src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h",
+ "src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
@@ -2156,18 +2196,16 @@ v8_source_set("v8_base") {
"src/compiler/x64/unwinding-info-writer-x64.cc",
"src/compiler/x64/unwinding-info-writer-x64.h",
"src/debug/x64/debug-x64.cc",
- "src/ic/x64/access-compiler-x64.cc",
- "src/ic/x64/handler-compiler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
+ "src/wasm/baseline/x64/liftoff-assembler-x64-defs.h",
+ "src/wasm/baseline/x64/liftoff-assembler-x64.h",
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h",
"src/x64/code-stubs-x64.cc",
- "src/x64/code-stubs-x64.h",
"src/x64/codegen-x64.cc",
- "src/x64/codegen-x64.h",
"src/x64/cpu-x64.cc",
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
@@ -2192,7 +2230,6 @@ v8_source_set("v8_base") {
"src/arm/code-stubs-arm.cc",
"src/arm/code-stubs-arm.h",
"src/arm/codegen-arm.cc",
- "src/arm/codegen-arm.h",
"src/arm/constants-arm.cc",
"src/arm/constants-arm.h",
"src/arm/cpu-arm.cc",
@@ -2214,10 +2251,10 @@ v8_source_set("v8_base") {
"src/compiler/arm/unwinding-info-writer-arm.cc",
"src/compiler/arm/unwinding-info-writer-arm.h",
"src/debug/arm/debug-arm.cc",
- "src/ic/arm/access-compiler-arm.cc",
- "src/ic/arm/handler-compiler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
+ "src/wasm/baseline/arm/liftoff-assembler-arm-defs.h",
+ "src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
@@ -2227,7 +2264,6 @@ v8_source_set("v8_base") {
"src/arm64/code-stubs-arm64.cc",
"src/arm64/code-stubs-arm64.h",
"src/arm64/codegen-arm64.cc",
- "src/arm64/codegen-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
"src/arm64/decoder-arm64-inl.h",
@@ -2261,10 +2297,10 @@ v8_source_set("v8_base") {
"src/compiler/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/arm64/unwinding-info-writer-arm64.h",
"src/debug/arm64/debug-arm64.cc",
- "src/ic/arm64/access-compiler-arm64.cc",
- "src/ic/arm64/handler-compiler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
+ "src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h",
+ "src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
]
if (use_jumbo_build) {
jumbo_excluded_sources += [
@@ -2280,15 +2316,12 @@ v8_source_set("v8_base") {
"src/compiler/mips/instruction-scheduler-mips.cc",
"src/compiler/mips/instruction-selector-mips.cc",
"src/debug/mips/debug-mips.cc",
- "src/ic/mips/access-compiler-mips.cc",
- "src/ic/mips/handler-compiler-mips.cc",
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
"src/mips/code-stubs-mips.cc",
"src/mips/code-stubs-mips.h",
"src/mips/codegen-mips.cc",
- "src/mips/codegen-mips.h",
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
@@ -2303,6 +2336,8 @@ v8_source_set("v8_base") {
"src/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
+ "src/wasm/baseline/mips/liftoff-assembler-mips-defs.h",
+ "src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
@@ -2311,15 +2346,12 @@ v8_source_set("v8_base") {
"src/compiler/mips64/instruction-scheduler-mips64.cc",
"src/compiler/mips64/instruction-selector-mips64.cc",
"src/debug/mips64/debug-mips64.cc",
- "src/ic/mips64/access-compiler-mips64.cc",
- "src/ic/mips64/handler-compiler-mips64.cc",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
"src/mips64/code-stubs-mips64.cc",
"src/mips64/code-stubs-mips64.h",
"src/mips64/codegen-mips64.cc",
- "src/mips64/codegen-mips64.h",
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
@@ -2334,6 +2366,8 @@ v8_source_set("v8_base") {
"src/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
+ "src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h",
+ "src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
@@ -2342,15 +2376,12 @@ v8_source_set("v8_base") {
"src/compiler/ppc/instruction-scheduler-ppc.cc",
"src/compiler/ppc/instruction-selector-ppc.cc",
"src/debug/ppc/debug-ppc.cc",
- "src/ic/ppc/access-compiler-ppc.cc",
- "src/ic/ppc/handler-compiler-ppc.cc",
"src/ppc/assembler-ppc-inl.h",
"src/ppc/assembler-ppc.cc",
"src/ppc/assembler-ppc.h",
"src/ppc/code-stubs-ppc.cc",
"src/ppc/code-stubs-ppc.h",
"src/ppc/codegen-ppc.cc",
- "src/ppc/codegen-ppc.h",
"src/ppc/constants-ppc.cc",
"src/ppc/constants-ppc.h",
"src/ppc/cpu-ppc.cc",
@@ -2365,6 +2396,8 @@ v8_source_set("v8_base") {
"src/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
+ "src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h",
+ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
@@ -2373,8 +2406,6 @@ v8_source_set("v8_base") {
"src/compiler/s390/instruction-scheduler-s390.cc",
"src/compiler/s390/instruction-selector-s390.cc",
"src/debug/s390/debug-s390.cc",
- "src/ic/s390/access-compiler-s390.cc",
- "src/ic/s390/handler-compiler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/s390/assembler-s390-inl.h",
@@ -2383,7 +2414,6 @@ v8_source_set("v8_base") {
"src/s390/code-stubs-s390.cc",
"src/s390/code-stubs-s390.h",
"src/s390/codegen-s390.cc",
- "src/s390/codegen-s390.h",
"src/s390/constants-s390.cc",
"src/s390/constants-s390.h",
"src/s390/cpu-s390.cc",
@@ -2396,6 +2426,8 @@ v8_source_set("v8_base") {
"src/s390/macro-assembler-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
+ "src/wasm/baseline/s390/liftoff-assembler-s390-defs.h",
+ "src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
}
@@ -2597,6 +2629,10 @@ v8_component("v8_libplatform") {
"include/libplatform/libplatform-export.h",
"include/libplatform/libplatform.h",
"include/libplatform/v8-tracing.h",
+ "src/libplatform/default-background-task-runner.cc",
+ "src/libplatform/default-background-task-runner.h",
+ "src/libplatform/default-foreground-task-runner.cc",
+ "src/libplatform/default-foreground-task-runner.h",
"src/libplatform/default-platform.cc",
"src/libplatform/default-platform.h",
"src/libplatform/task-queue.cc",
@@ -2661,10 +2697,34 @@ v8_source_set("fuzzer_support") {
}
###############################################################################
+# Produce a single static library for embedders
+#
+
+if (v8_monolithic) {
+ # A component build is not monolithic.
+ assert(!is_component_build)
+
+ # Using external startup data would produce separate files.
+ assert(!v8_use_external_startup_data)
+ v8_static_library("v8_monolith") {
+ deps = [
+ ":v8",
+ ":v8_libbase",
+ ":v8_libplatform",
+ ":v8_libsampler",
+ "//build/config:exe_and_shlib_deps",
+ "//build/win:default_exe_manifest",
+ ]
+
+ configs = [ ":internal_config" ]
+ }
+}
+
+###############################################################################
# Executables
#
-if (current_toolchain == v8_snapshot_toolchain) {
+if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
v8_executable("mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -2719,6 +2779,8 @@ group("gn_all") {
}
group("v8_clusterfuzz") {
+ testonly = true
+
deps = [
":d8",
]
@@ -2731,6 +2793,13 @@ group("v8_clusterfuzz") {
":d8(//build/toolchain/linux:clang_x86_v8_arm)",
]
}
+
+ if (v8_test_isolation_mode != "noop") {
+ deps += [
+ "tools:run-deopt-fuzzer_run",
+ "tools:run-num-fuzzer_run",
+ ]
+ }
}
group("v8_archive") {
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index bed8ed9770..248b42b8d0 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1973 @@
+2017-11-29: Version 6.4.388
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-29: Version 6.4.387
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-29: Version 6.4.386
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-29: Version 6.4.385
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-29: Version 6.4.384
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-28: Version 6.4.383
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-28: Version 6.4.382
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-28: Version 6.4.381
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-28: Version 6.4.380
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-28: Version 6.4.379
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-28: Version 6.4.378
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-28: Version 6.4.377
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-27: Version 6.4.376
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-27: Version 6.4.375
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-27: Version 6.4.374
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-27: Version 6.4.373
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-27: Version 6.4.372
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-27: Version 6.4.371
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-24: Version 6.4.370
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-24: Version 6.4.369
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-24: Version 6.4.368
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-24: Version 6.4.367
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-24: Version 6.4.366
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-23: Version 6.4.365
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-23: Version 6.4.364
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-23: Version 6.4.363
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-23: Version 6.4.362
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-23: Version 6.4.361
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-23: Version 6.4.360
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-23: Version 6.4.359
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-22: Version 6.4.358
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-22: Version 6.4.357
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-22: Version 6.4.356
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-22: Version 6.4.355
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-22: Version 6.4.354
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-22: Version 6.4.353
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-22: Version 6.4.352
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-21: Version 6.4.351
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-21: Version 6.4.350
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-21: Version 6.4.349
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-21: Version 6.4.348
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-21: Version 6.4.347
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-21: Version 6.4.346
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-21: Version 6.4.345
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.344
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.343
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.342
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.341
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.340
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.339
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.338
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.337
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.336
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.335
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.334
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-20: Version 6.4.333
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-19: Version 6.4.332
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-18: Version 6.4.331
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.330
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.329
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.328
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.327
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.326
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.325
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.324
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.323
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.322
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.321
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.320
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-17: Version 6.4.319
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.318
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.317
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.316
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.315
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.314
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.313
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.312
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.311
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.310
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.309
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-16: Version 6.4.308
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-15: Version 6.4.307
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-15: Version 6.4.306
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-15: Version 6.4.305
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-15: Version 6.4.304
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-15: Version 6.4.303
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.302
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.301
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.300
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.299
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.298
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.297
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.296
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.295
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.294
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.293
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.292
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.291
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.290
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.289
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-14: Version 6.4.288
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.287
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.286
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.285
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.284
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.283
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.282
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.281
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.280
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.279
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.278
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.277
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-13: Version 6.4.276
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-10: Version 6.4.275
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-10: Version 6.4.274
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-10: Version 6.4.273
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-10: Version 6.4.272
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-10: Version 6.4.271
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-10: Version 6.4.270
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-10: Version 6.4.269
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.268
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.267
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.266
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.265
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.264
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.263
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.262
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.261
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.260
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.259
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.258
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.257
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.256
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.255
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.254
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-09: Version 6.4.253
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.252
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.251
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.250
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.249
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.248
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.247
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.246
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.245
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.244
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.243
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.242
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-08: Version 6.4.241
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.240
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.239
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.238
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.237
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.236
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.235
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.234
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.233
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.232
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.231
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.230
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.229
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.228
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.227
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.226
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.225
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-07: Version 6.4.224
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-06: Version 6.4.223
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-06: Version 6.4.222
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-06: Version 6.4.221
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-06: Version 6.4.220
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-06: Version 6.4.219
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-06: Version 6.4.218
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-06: Version 6.4.217
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-06: Version 6.4.216
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-05: Version 6.4.215
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-04: Version 6.4.214
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-04: Version 6.4.213
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-04: Version 6.4.212
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.211
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.210
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.209
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.208
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.207
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.206
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.205
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.204
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.203
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.202
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.201
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.200
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.199
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.198
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.197
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-03: Version 6.4.196
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.195
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.194
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.193
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.192
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.191
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.190
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.189
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.188
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.187
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-02: Version 6.4.186
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-01: Version 6.4.185
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-01: Version 6.4.184
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-01: Version 6.4.183
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.182
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.181
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.180
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.179
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.178
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.177
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.176
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.175
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.174
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.173
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-31: Version 6.4.172
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.171
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.170
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.169
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.168
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.167
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.166
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.165
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.164
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.163
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.162
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.161
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.160
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-30: Version 6.4.159
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.158
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.157
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.156
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.155
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.154
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.153
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.152
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.151
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.150
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.149
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-27: Version 6.4.148
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.147
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.146
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.145
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.144
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.143
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.142
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.141
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.140
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.139
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.138
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.137
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.136
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.135
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-26: Version 6.4.134
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.133
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.132
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.131
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.130
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.129
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.128
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.127
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.126
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.125
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.124
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.123
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.122
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.121
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.120
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.119
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.118
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.117
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.116
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.115
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.114
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.113
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.112
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-25: Version 6.4.111
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.110
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.109
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.108
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.107
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.106
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.105
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.104
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.103
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.102
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.101
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.100
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.99
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.98
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-24: Version 6.4.97
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.96
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.95
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.94
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.93
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.92
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.91
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.90
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.89
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.88
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.87
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.86
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.85
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.84
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.83
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.82
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.81
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.80
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-23: Version 6.4.79
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-22: Version 6.4.78
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-21: Version 6.4.77
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-21: Version 6.4.76
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.75
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.74
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.73
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.72
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.71
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.70
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.69
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.68
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.67
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.66
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.65
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.64
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.63
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.62
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-20: Version 6.4.61
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.60
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.59
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.58
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.57
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.56
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.55
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.54
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.53
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.52
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.51
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.50
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.49
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.48
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.47
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.46
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-19: Version 6.4.45
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.44
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.43
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.42
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.41
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.40
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.39
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.38
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.37
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.36
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-18: Version 6.4.35
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.34
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.33
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.32
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.31
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.30
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.29
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.28
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.27
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.26
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.25
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.24
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.23
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-17: Version 6.4.22
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.21
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.20
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.19
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.18
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.17
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.16
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.15
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.14
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.13
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.12
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-16: Version 6.4.11
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-15: Version 6.4.10
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-15: Version 6.4.9
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-14: Version 6.4.8
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-14: Version 6.4.7
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-13: Version 6.4.6
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-13: Version 6.4.5
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-13: Version 6.4.4
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-13: Version 6.4.3
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-13: Version 6.4.2
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-13: Version 6.4.1
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-12: Version 6.3.298
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-12: Version 6.3.297
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-11: Version 6.3.296
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-11: Version 6.3.295
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-11: Version 6.3.294
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-11: Version 6.3.293
+
+ Performance and stability improvements on all platforms.
+
+
2017-10-10: Version 6.3.292
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index b675dd830e..0d6b49d3b4 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -3,28 +3,33 @@
# all paths in here must match this assumption.
vars = {
+ 'checkout_instrumented_libraries': False,
'chromium_url': 'https://chromium.googlesource.com',
}
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'adaf9e56105b814105e2d49bc4fa63e2cd4795f5',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '9338ce52d0b9bcef34c38285fbd5023b62739fac',
'v8/tools/gyp':
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '21d33b1a09a77f033478ea4ffffb61e6970f83bd',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '741688ebf328da9adc52505248bf4e2ef868722c',
'v8/third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '644afd349826cb68204226a16c38bde13abe9c3c',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '28417458ac4dc79f68915079d0f283f682504cc0',
'v8/buildtools':
- Var('chromium_url') + '/chromium/buildtools.git' + '@' + 'f6d165d9d842ddd29056c127a5f3a3c5d8e0d2e3',
+ Var('chromium_url') + '/chromium/buildtools.git' + '@' + '505de88083136eefd056e5ee4ca0f01fe9b33de8',
'v8/base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'abcc4153b783b5e2c2dafcfbf658017ecb56989a',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '0e9a47d74970bee1bbfc063c47215406f8918699',
'v8/third_party/android_tools': {
- 'url': Var('chromium_url') + '/android_tools.git' + '@' + 'ca9dc7245b888c75307f0619e4a39fb46a82de66',
+ 'url': Var('chromium_url') + '/android_tools.git' + '@' + 'a2e9bc7c1b41d983577907df51d339fb1e0fd02f',
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'a48a6afde0ff7eeb1c847744192977e412107d6a',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '11d7efb857ae77eff1cea4640e3f3d9ac49cba0a',
+ 'condition': 'checkout_android',
+ },
+ 'v8/third_party/colorama/src': {
+ 'url': Var('chromium_url') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
'condition': 'checkout_android',
},
'v8/third_party/jinja2':
@@ -32,7 +37,7 @@ deps = {
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '5e8001d9a710121ce7a68efd0804430a34b4f9e4',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '4bd9152f8a975d57c972c071dfb4ddf668e02200',
'v8/testing/gtest':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6f8a66431cb592dad629028a50b3dd418a408c87',
'v8/testing/gmock':
@@ -42,15 +47,15 @@ deps = {
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '290799bbeeba86245a355894b6ff2bb33d946d9e',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '5d4c667b271a9b39d0de73aef5ffe6879c6f8811',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b3169f97cc1a9daa1a9fbae15752588079792098',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '8688d267571de76a56746324dcc249bf4232b85a',
'v8/tools/luci-go':
- Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '9f54aa9fe06499b6bac378ae1f045be2158cf2cc',
+ Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '45a8a51fda92e123619a69e7644d9c64a320b0c1',
'v8/test/wasm-js':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '89573ee3eabc690637deeb1b8dadec13a963ec30',
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'a7e226a92e660a3d5413cfea4269824f513259d2',
}
recursedeps = [
@@ -248,15 +253,26 @@ hooks = [
],
},
{
- # Pull sanitizer-instrumented third-party libraries if requested via
- # GYP_DEFINES.
- 'name': 'instrumented_libraries',
- 'pattern': '\\.sha1',
- # TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
- 'action': [
- 'python',
- 'v8/third_party/instrumented_libraries/scripts/download_binaries.py',
- ],
+ 'name': 'msan_chained_origins',
+ 'pattern': '.',
+ 'condition': 'checkout_instrumented_libraries',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--no_auth',
+ '--bucket', 'chromium-instrumented-libraries',
+ '-s', 'v8/third_party/instrumented_libraries/binaries/msan-chained-origins-trusty.tgz.sha1',
+ ],
+ },
+ {
+ 'name': 'msan_no_origins',
+ 'pattern': '.',
+ 'condition': 'checkout_instrumented_libraries',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--no_auth',
+ '--bucket', 'chromium-instrumented-libraries',
+ '-s', 'v8/third_party/instrumented_libraries/binaries/msan-no-origins-trusty.tgz.sha1',
+ ],
},
{
# Update the Windows toolchain if necessary.
@@ -284,8 +300,29 @@ hooks = [
'action': ['python', 'v8/tools/clang/scripts/update.py'],
},
{
+ 'name': 'fuchsia_sdk',
+ 'pattern': '.',
+ 'condition': 'checkout_fuchsia',
+ 'action': [
+ 'python',
+ 'v8/build/fuchsia/update_sdk.py',
+ '226f6dd0cad1d6be63a353ce2649423470729ae9',
+ ],
+ },
+ {
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
+ 'name': 'regyp_if_needed',
'pattern': '.',
'action': ['python', 'v8/gypfiles/gyp_v8', '--running-as-hook'],
},
+ # Download and initialize "vpython" VirtualEnv environment packages.
+ {
+ 'name': 'vpython_common',
+ 'pattern': '.',
+ 'condition': 'checkout_android',
+ 'action': [ 'vpython',
+ '-vpython-spec', 'v8/.vpython',
+ '-vpython-tool', 'install',
+ ],
+ },
]
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 621f375e33..2583a229b6 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -27,10 +27,11 @@ mstarzinger@chromium.org
mtrofin@chromium.org
mvstanton@chromium.org
mythria@chromium.org
-petermarshall@chromium.org
neis@chromium.org
+petermarshall@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
+sergiyb@chromium.org
tebbi@chromium.org
titzer@chromium.org
ulan@chromium.org
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 1ef291f6fa..a595220a09 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -281,6 +281,8 @@ def _CommonChecks(input_api, output_api):
results.extend(_CheckMissingFiles(input_api, output_api))
results.extend(_CheckJSONFiles(input_api, output_api))
results.extend(_CheckMacroUndefs(input_api, output_api))
+ results.extend(input_api.RunTests(
+ input_api.canned_checks.CheckVPythonSpec(input_api, output_api)))
return results
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 132a4ea66f..51869ee952 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -189,6 +189,8 @@
// trace points would carry a significant performance cost of acquiring a lock
// and resolving the category.
+// Check that nobody includes this file directly. Clients are supposed to
+// include the surrounding "trace_event.h" of their project instead.
#if defined(TRACE_EVENT0)
#error "Another copy of this file has already been included."
#endif
diff --git a/deps/v8/gni/isolate.gni b/deps/v8/gni/isolate.gni
index 4bdf0c0fad..6ad25c2774 100644
--- a/deps/v8/gni/isolate.gni
+++ b/deps/v8/gni/isolate.gni
@@ -106,6 +106,11 @@ template("v8_isolate_run") {
} else {
use_external_startup_data = "0"
}
+ if (is_ubsan_vptr) {
+ ubsan_vptr = "1"
+ } else {
+ ubsan_vptr = "0"
+ }
if (v8_use_snapshot) {
use_snapshot = "true"
} else {
@@ -168,6 +173,8 @@ template("v8_isolate_run") {
"--config-variable",
"target_arch=$target_arch",
"--config-variable",
+ "ubsan_vptr=$ubsan_vptr",
+ "--config-variable",
"v8_use_external_startup_data=$use_external_startup_data",
"--config-variable",
"v8_use_snapshot=$use_snapshot",
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 0467720f45..4b8292a244 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -174,3 +174,13 @@ template("v8_component") {
configs += v8_add_configs
}
}
+
+template("v8_static_library") {
+ static_library(target_name) {
+ complete_static_lib = true
+ forward_variables_from(invoker, "*", [ "configs" ])
+ configs += invoker.configs
+ configs -= v8_remove_configs
+ configs += v8_add_configs
+ }
+}
diff --git a/deps/v8/gypfiles/all.gyp b/deps/v8/gypfiles/all.gyp
index bc9d9650eb..593ba2a795 100644
--- a/deps/v8/gypfiles/all.gyp
+++ b/deps/v8/gypfiles/all.gyp
@@ -46,7 +46,7 @@
'../tools/gcmole/run_gcmole.gyp:*',
'../tools/jsfunfuzz/jsfunfuzz.gyp:*',
'../tools/run-deopt-fuzzer.gyp:*',
- '../tools/run-valgrind.gyp:*',
+ '../tools/run-num-fuzzer.gyp:*',
],
}],
]
diff --git a/deps/v8/gypfiles/features.gypi b/deps/v8/gypfiles/features.gypi
index 1d3f67daee..d285ee21da 100644
--- a/deps/v8/gypfiles/features.gypi
+++ b/deps/v8/gypfiles/features.gypi
@@ -85,7 +85,7 @@
'v8_check_microtasks_scopes_consistency%': 'false',
# Enable concurrent marking.
- 'v8_enable_concurrent_marking%': 0,
+ 'v8_enable_concurrent_marking%': 1,
# Controls the threshold for on-heap/off-heap Typed Arrays.
'v8_typed_array_max_size_in_heap%': 64,
diff --git a/deps/v8/gypfiles/isolate.gypi b/deps/v8/gypfiles/isolate.gypi
index 149818c8d0..3e85b530e2 100644
--- a/deps/v8/gypfiles/isolate.gypi
+++ b/deps/v8/gypfiles/isolate.gypi
@@ -80,6 +80,7 @@
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
+ '--config-variable', 'ubsan_vptr=0',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
],
diff --git a/deps/v8/gypfiles/standalone.gypi b/deps/v8/gypfiles/standalone.gypi
index 63930d8aef..7a45dc615f 100644
--- a/deps/v8/gypfiles/standalone.gypi
+++ b/deps/v8/gypfiles/standalone.gypi
@@ -439,6 +439,7 @@
'-Wno-undefined-var-template',
# TODO(yangguo): issue 5258
'-Wno-nonportable-include-path',
+ '-Wno-tautological-constant-compare',
],
'conditions':[
['OS=="android"', {
@@ -783,6 +784,11 @@
# over the place.
'-fno-strict-aliasing',
],
+ }, {
+ 'cflags' : [
+ # TODO(hans): https://crbug.com/767059
+ '-Wno-tautological-constant-compare',
+ ],
}],
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el")', {
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index b615088300..04b47b8d2e 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -8,6 +8,7 @@
#include "libplatform/libplatform-export.h"
#include "libplatform/v8-tracing.h"
#include "v8-platform.h" // NOLINT(build/include)
+#include "v8config.h" // NOLINT(build/include)
namespace v8 {
namespace platform {
@@ -33,12 +34,21 @@ enum class MessageLoopBehavior : bool {
* If |tracing_controller| is nullptr, the default platform will create a
* v8::platform::TracingController instance and use it.
*/
-V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
+V8_PLATFORM_EXPORT std::unique_ptr<v8::Platform> NewDefaultPlatform(
int thread_pool_size = 0,
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
InProcessStackDumping in_process_stack_dumping =
InProcessStackDumping::kEnabled,
- v8::TracingController* tracing_controller = nullptr);
+ std::unique_ptr<v8::TracingController> tracing_controller = {});
+
+V8_PLATFORM_EXPORT V8_DEPRECATE_SOON(
+ "Use NewDefaultPlatform instead",
+ v8::Platform* CreateDefaultPlatform(
+ int thread_pool_size = 0,
+ IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
+ InProcessStackDumping in_process_stack_dumping =
+ InProcessStackDumping::kEnabled,
+ v8::TracingController* tracing_controller = nullptr));
/**
* Pumps the message loop for the given isolate.
@@ -46,7 +56,7 @@ V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
* The caller has to make sure that this is called from the right thread.
* Returns true if a task was executed, and false otherwise. Unless requested
* through the |behavior| parameter, this call does not block if no task is
- * pending. The |platform| has to be created using |CreateDefaultPlatform|.
+ * pending. The |platform| has to be created using |NewDefaultPlatform|.
*/
V8_PLATFORM_EXPORT bool PumpMessageLoop(
v8::Platform* platform, v8::Isolate* isolate,
@@ -60,7 +70,7 @@ V8_PLATFORM_EXPORT void EnsureEventLoopInitialized(v8::Platform* platform,
*
* The caller has to make sure that this is called from the right thread.
* This call does not block if no task is pending. The |platform| has to be
- * created using |CreateDefaultPlatform|.
+ * created using |NewDefaultPlatform|.
*/
V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
v8::Isolate* isolate,
@@ -69,13 +79,14 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
/**
* Attempts to set the tracing controller for the given platform.
*
- * The |platform| has to be created using |CreateDefaultPlatform|.
+ * The |platform| has to be created using |NewDefaultPlatform|.
*
- * DEPRECATED: Will be removed soon.
*/
-V8_PLATFORM_EXPORT void SetTracingController(
- v8::Platform* platform,
- v8::platform::tracing::TracingController* tracing_controller);
+V8_PLATFORM_EXPORT V8_DEPRECATE_SOON(
+ "Access the DefaultPlatform directly",
+ void SetTracingController(
+ v8::Platform* platform,
+ v8::platform::tracing::TracingController* tracing_controller));
} // namespace platform
} // namespace v8
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index 9dcf3d7bca..8c1febf762 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -43,8 +43,8 @@ class V8_PLATFORM_EXPORT TraceObject {
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
- unsigned int flags, int64_t timestamp, int64_t cpu_timestamp);
- void UpdateDuration(int64_t timestamp, int64_t cpu_timestamp);
+ unsigned int flags);
+ void UpdateDuration();
void InitializeForTesting(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
@@ -247,13 +247,6 @@ class V8_PLATFORM_EXPORT TracingController
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags) override;
- uint64_t AddTraceEventWithTimestamp(
- char phase, const uint8_t* category_enabled_flag, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values,
- std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
- unsigned int flags, int64_t timestamp) override;
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) override;
void AddTraceStateObserver(
@@ -266,10 +259,6 @@ class V8_PLATFORM_EXPORT TracingController
static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
- protected:
- virtual int64_t CurrentTimestampMicroseconds();
- virtual int64_t CurrentCpuTimestampMicroseconds();
-
private:
const uint8_t* GetCategoryGroupEnabledInternal(const char* category_group);
void UpdateCategoryGroupEnabledFlag(size_t category_index);
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index d0bb9b47fe..5478e127f9 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -215,6 +215,20 @@ class V8_EXPORT V8InspectorClient {
virtual void maxAsyncCallStackDepthChanged(int depth) {}
};
+// These stack trace ids are intended to be passed between debuggers and be
+// resolved later. This allows to track cross-debugger calls and step between
+// them if a single client connects to multiple debuggers.
+struct V8_EXPORT V8StackTraceId {
+ uintptr_t id;
+ std::pair<int64_t, int64_t> debugger_id;
+
+ V8StackTraceId();
+ V8StackTraceId(uintptr_t id, const std::pair<int64_t, int64_t> debugger_id);
+ ~V8StackTraceId() = default;
+
+ bool IsInvalid() const;
+};
+
class V8_EXPORT V8Inspector {
public:
static std::unique_ptr<V8Inspector> create(v8::Isolate*, V8InspectorClient*);
@@ -237,6 +251,11 @@ class V8_EXPORT V8Inspector {
virtual void asyncTaskFinished(void* task) = 0;
virtual void allAsyncTasksCanceled() = 0;
+ virtual V8StackTraceId storeCurrentStackTrace(
+ const StringView& description) = 0;
+ virtual void externalAsyncTaskStarted(const V8StackTraceId& parent) = 0;
+ virtual void externalAsyncTaskFinished(const V8StackTraceId& parent) = 0;
+
// Exceptions instrumentation.
virtual unsigned exceptionThrown(
v8::Local<v8::Context>, const StringView& message,
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 43420a972c..f814543e66 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -119,11 +119,11 @@ class TracingController {
}
/**
- * Adds a trace event to the platform tracing system. These function calls are
+ * Adds a trace event to the platform tracing system. This function call is
* usually the result of a TRACE_* macro from trace_event_common.h when
* tracing and the category of the particular trace are enabled. It is not
- * advisable to call these functions on their own; they are really only meant
- * to be used by the trace macros. The returned handle can be used by
+ * advisable to call this function on its own; it is really only meant to be
+ * used by the trace macros. The returned handle can be used by
* UpdateTraceEventDuration to update the duration of COMPLETE events.
*/
virtual uint64_t AddTraceEvent(
@@ -135,15 +135,6 @@ class TracingController {
unsigned int flags) {
return 0;
}
- virtual uint64_t AddTraceEventWithTimestamp(
- char phase, const uint8_t* category_enabled_flag, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values,
- std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
- unsigned int flags, int64_t timestamp) {
- return 0;
- }
/**
* Sets the duration field of a COMPLETE trace event. It must be called with
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 621ca8b215..a86402be92 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -287,6 +287,13 @@ class V8_EXPORT CpuProfiler {
static CpuProfiler* New(Isolate* isolate);
/**
+ * Synchronously collect current stack sample in all profilers attached to
+ * the |isolate|. The call does not affect number of ticks recorded for
+ * the current top node.
+ */
+ static void CollectSample(Isolate* isolate);
+
+ /**
* Disposes the CPU profiler object.
*/
void Dispose();
@@ -322,7 +329,8 @@ class V8_EXPORT CpuProfiler {
* Recording the forced sample does not contribute to the aggregated
* profile statistics.
*/
- void CollectSample();
+ V8_DEPRECATED("Use static CollectSample(Isolate*) instead.",
+ void CollectSample());
/**
* Tells the profiler whether the embedder is idle.
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index a04a5e84f8..15ea225dc1 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -393,9 +393,14 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
*/
Global<V> SetUnique(const K& key, Global<V>* persistent) {
if (Traits::kCallbackType != kNotWeak) {
+ WeakCallbackType callback_type =
+ Traits::kCallbackType == kWeakWithInternalFields
+ ? WeakCallbackType::kInternalFields
+ : WeakCallbackType::kParameter;
Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
- Traits::WeakCallbackParameter(this, key, value), WeakCallback);
+ Traits::WeakCallbackParameter(this, key, value), WeakCallback,
+ callback_type);
}
PersistentContainerValue old_value =
Traits::Set(this->impl(), key, this->ClearAndLeak(persistent));
diff --git a/deps/v8/include/v8-version-string.h b/deps/v8/include/v8-version-string.h
index eab0934804..fb84144d54 100644
--- a/deps/v8/include/v8-version-string.h
+++ b/deps/v8/include/v8-version-string.h
@@ -29,9 +29,10 @@
"." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) "." V8_S( \
V8_PATCH_LEVEL) V8_EMBEDDER_STRING V8_CANDIDATE_STRING
#else
-#define V8_VERSION_STRING \
- V8_S(V8_MAJOR_VERSION) \
- "." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) V8_CANDIDATE_STRING
+#define V8_VERSION_STRING \
+ V8_S(V8_MAJOR_VERSION) \
+ "." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) \
+ V8_EMBEDDER_STRING V8_CANDIDATE_STRING
#endif
#endif // V8_VERSION_STRING_H_
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 86c50fd4f5..3503f3fbb4 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
-#define V8_MINOR_VERSION 3
-#define V8_BUILD_NUMBER 292
-#define V8_PATCH_LEVEL 48
+#define V8_MINOR_VERSION 4
+#define V8_BUILD_NUMBER 388
+#define V8_PATCH_LEVEL 40
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index f100153364..c09f610333 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -433,20 +433,6 @@ class WeakCallbackInfo {
V8_INLINE T* GetParameter() const { return parameter_; }
V8_INLINE void* GetInternalField(int index) const;
- V8_INLINE V8_DEPRECATED("use indexed version",
- void* GetInternalField1() const) {
- return embedder_fields_[0];
- }
- V8_INLINE V8_DEPRECATED("use indexed version",
- void* GetInternalField2() const) {
- return embedder_fields_[1];
- }
-
- V8_DEPRECATED("Not realiable once SetSecondPassCallback() was used.",
- bool IsFirstPass() const) {
- return callback_ != nullptr;
- }
-
// When first called, the embedder MUST Reset() the Global which triggered the
// callback. The Global itself is unusable for anything else. No v8 other api
// calls may be called in the first callback. Should additional work be
@@ -579,16 +565,22 @@ template <class T> class PersistentBase {
* independent handle should not assume that it will be preceded by a global
* GC prologue callback or followed by a global GC epilogue callback.
*/
- V8_INLINE void MarkIndependent();
+ V8_DEPRECATE_SOON(
+ "Objects are always considered independent. "
+ "Use MarkActive to avoid collecting otherwise dead weak handles.",
+ V8_INLINE void MarkIndependent());
/**
* Marks the reference to this object as active. The scavenge garbage
- * collection should not reclaim the objects marked as active.
+ * collection should not reclaim the objects marked as active, even if the
+ * object held by the handle is otherwise unreachable.
+ *
* This bit is cleared after the each garbage collection pass.
*/
V8_INLINE void MarkActive();
- V8_INLINE bool IsIndependent() const;
+ V8_DEPRECATE_SOON("See MarkIndependent.",
+ V8_INLINE bool IsIndependent() const);
/** Checks if the handle holds the only reference to an object. */
V8_INLINE bool IsNearDeath() const;
@@ -984,9 +976,6 @@ class V8_EXPORT Data {
};
/**
- * This is an unfinished experimental feature, and is only exposed
- * here for internal testing purposes. DO NOT USE.
- *
* A container type that holds relevant metadata for module loading.
*
* This is passed back to the embedder as part of
@@ -1008,9 +997,6 @@ class V8_EXPORT ScriptOrModule {
};
/**
- * This is an unfinished experimental feature, and is only exposed
- * here for internal testing purposes. DO NOT USE.
- *
* An array to hold Primitive values. This is used by the embedder to
* pass host defined options to the ScriptOptions during compilation.
*
@@ -1441,6 +1427,26 @@ class V8_EXPORT ScriptCompiler {
};
/**
+ * The reason for which we are not requesting or providing a code cache.
+ */
+ enum NoCacheReason {
+ kNoCacheNoReason = 0,
+ kNoCacheBecauseCachingDisabled,
+ kNoCacheBecauseNoResource,
+ kNoCacheBecauseInlineScript,
+ kNoCacheBecauseModule,
+ kNoCacheBecauseStreamingSource,
+ kNoCacheBecauseInspector,
+ kNoCacheBecauseScriptTooSmall,
+ kNoCacheBecauseCacheTooCold,
+ kNoCacheBecauseV8Extension,
+ kNoCacheBecauseExtensionModule,
+ kNoCacheBecausePacScript,
+ kNoCacheBecauseInDocumentWrite,
+ kNoCacheBecauseResourceWithNoCacheHandler
+ };
+
+ /**
* Compiles the specified script (context-independent).
* Cached data as part of the source object can be optionally produced to be
* consumed later to speed up compilation of identical source scripts.
@@ -1456,10 +1462,12 @@ class V8_EXPORT ScriptCompiler {
static V8_DEPRECATED("Use maybe version",
Local<UnboundScript> CompileUnbound(
Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions));
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason));
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions);
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
/**
* Compiles the specified script (bound to current context).
@@ -1475,10 +1483,12 @@ class V8_EXPORT ScriptCompiler {
static V8_DEPRECATED(
"Use maybe version",
Local<Script> Compile(Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions));
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, Source* source,
- CompileOptions options = kNoCompileOptions);
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
/**
* Returns a task which streams script data into V8, or NULL if the script
@@ -1568,7 +1578,8 @@ class V8_EXPORT ScriptCompiler {
private:
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
- Isolate* isolate, Source* source, CompileOptions options);
+ Isolate* isolate, Source* source, CompileOptions options,
+ NoCacheReason no_cache_reason);
};
@@ -2070,20 +2081,6 @@ class V8_EXPORT ValueDeserializer {
PrivateData* private_;
};
-/**
- * A map whose keys are referenced weakly. It is similar to JavaScript WeakMap
- * but can be created without entering a v8::Context and hence shouldn't
- * escape to JavaScript.
- */
-class V8_EXPORT NativeWeakMap : public Data {
- public:
- static Local<NativeWeakMap> New(Isolate* isolate);
- void Set(Local<Value> key, Local<Value> value);
- Local<Value> Get(Local<Value> key) const;
- bool Has(Local<Value> key);
- bool Delete(Local<Value> key);
-};
-
// --- Value ---
@@ -3709,8 +3706,6 @@ class FunctionCallbackInfo {
V8_INLINE int Length() const;
/** Accessor for the available arguments. */
V8_INLINE Local<Value> operator[](int i) const;
- V8_INLINE V8_DEPRECATED("Use Data() to explicitly pass Callee instead",
- Local<Function> Callee() const);
/** Returns the receiver. This corresponds to the "this" value. */
V8_INLINE Local<Object> This() const;
/**
@@ -3735,7 +3730,7 @@ class FunctionCallbackInfo {
/** The ReturnValue for the call. */
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 8;
+ static const int kArgsLength = 6;
protected:
friend class internal::FunctionCallbackArguments;
@@ -3746,9 +3741,7 @@ class FunctionCallbackInfo {
static const int kReturnValueDefaultValueIndex = 2;
static const int kReturnValueIndex = 3;
static const int kDataIndex = 4;
- static const int kCalleeIndex = 5;
- static const int kContextSaveIndex = 6;
- static const int kNewTargetIndex = 7;
+ static const int kNewTargetIndex = 5;
V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values, int length);
@@ -5262,7 +5255,7 @@ typedef void (*GenericNamedPropertySetterCallback)(
* defineProperty().
*
* Use `info.GetReturnValue().Set(value)` to set the property attributes. The
- * value is an interger encoding a `v8::PropertyAttribute`.
+ * value is an integer encoding a `v8::PropertyAttribute`.
*
* \param property The name of the property for which the request was
* intercepted.
@@ -5986,7 +5979,7 @@ class V8_EXPORT ObjectTemplate : public Template {
bool IsImmutableProto();
/**
- * Makes the ObjectTempate for an immutable prototype exotic object, with an
+ * Makes the ObjectTemplate for an immutable prototype exotic object, with an
* immutable __proto__.
*/
void SetImmutableProto();
@@ -6292,6 +6285,20 @@ typedef MaybeLocal<Promise> (*HostImportModuleDynamicallyCallback)(
Local<String> specifier);
/**
+ * HostInitializeImportMetaObjectCallback is called the first time import.meta
+ * is accessed for a module. Subsequent access will reuse the same value.
+ *
+ * The method combines two implementation-defined abstract operations into one:
+ * HostGetImportMetaProperties and HostFinalizeImportMeta.
+ *
+ * The embedder should use v8::Object::CreateDataProperty to add properties on
+ * the meta object.
+ */
+typedef void (*HostInitializeImportMetaObjectCallback)(Local<Context> context,
+ Local<Module> module,
+ Local<Object> meta);
+
+/**
* PromiseHook with type kInit is called when a new promise is
* created. When a new promise is created as part of the chain in the
* case of Promise.then or in the intermediate promises created by
@@ -6418,6 +6425,9 @@ typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context,
// --- WebAssembly compilation callbacks ---
typedef bool (*ExtensionCallback)(const FunctionCallbackInfo<Value>&);
+typedef bool (*AllowWasmCodeGenerationCallback)(Local<Context> context,
+ Local<String> source);
+
// --- Callback for APIs defined on v8-supported objects, but implemented
// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming ---
typedef void (*ApiImplementationCallback)(const FunctionCallbackInfo<Value>&);
@@ -7049,9 +7059,15 @@ class V8_EXPORT Isolate {
kConstructorNonUndefinedPrimitiveReturn = 39,
kLabeledExpressionStatement = 40,
kLineOrParagraphSeparatorAsLineTerminator = 41,
+ kIndexAccessor = 42,
+ kErrorCaptureStackTrace = 43,
+ kErrorPrepareStackTrace = 44,
+ kErrorStackTraceLimit = 45,
+ kWebAssemblyInstantiation = 46,
// If you add new values here, you'll also need to update Chromium's:
- // UseCounter.h, V8PerIsolateData.cpp, histograms.xml
+ // web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
+ // this list need to be landed first, then changes on the Chromium side.
kUseCounterFeatureCount // This enum value must be last.
};
@@ -7102,9 +7118,6 @@ class V8_EXPORT Isolate {
AbortOnUncaughtExceptionCallback callback);
/**
- * This is an unfinished experimental feature, and is only exposed
- * here for internal testing purposes. DO NOT USE.
- *
* This specifies the callback called by the upcoming dynamic
* import() language feature to load modules.
*/
@@ -7112,6 +7125,16 @@ class V8_EXPORT Isolate {
HostImportModuleDynamicallyCallback callback);
/**
+ * This is an unfinished experimental feature, and is only exposed
+ * here for internal testing purposes. DO NOT USE.
+ *
+ * This specifies the callback called by the upcoming importa.meta
+ * language feature to retrieve host-defined meta data for a module.
+ */
+ void SetHostInitializeImportMetaObjectCallback(
+ HostInitializeImportMetaObjectCallback callback);
+
+ /**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to guide heuristics.
* It is allowed to call this function from another thread while
@@ -7275,8 +7298,8 @@ class V8_EXPORT Isolate {
* is initialized. It is the embedder's responsibility to stop all CPU
* profiling activities if it has started any.
*/
- V8_DEPRECATE_SOON("CpuProfiler should be created with CpuProfiler::New call.",
- CpuProfiler* GetCpuProfiler());
+ V8_DEPRECATED("CpuProfiler should be created with CpuProfiler::New call.",
+ CpuProfiler* GetCpuProfiler());
/** Returns true if this isolate has a current context. */
bool InContext();
@@ -7703,6 +7726,13 @@ class V8_EXPORT Isolate {
AllowCodeGenerationFromStringsCallback callback);
/**
+ * Set the callback to invoke to check if wasm code generation should
+ * be allowed.
+ */
+ void SetAllowWasmCodeGenerationCallback(
+ AllowWasmCodeGenerationCallback callback);
+
+ /**
* Embedder over{ride|load} injection points for wasm APIs. The expectation
* is that the embedder sets them at most once.
*/
@@ -7958,50 +7988,6 @@ class V8_EXPORT V8 {
void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback));
/**
- * Enables the host application to receive a notification before a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects. It is possible
- * to specify the GCType filter for your callback. But it is not possible to
- * register the same callback function two times with different
- * GCType filters.
- */
- static V8_DEPRECATED(
- "Use isolate version",
- void AddGCPrologueCallback(GCCallback callback,
- GCType gc_type_filter = kGCTypeAll));
-
- /**
- * This function removes callback which was installed by
- * AddGCPrologueCallback function.
- */
- static V8_DEPRECATED("Use isolate version",
- void RemoveGCPrologueCallback(GCCallback callback));
-
- /**
- * Enables the host application to receive a notification after a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects. It is possible
- * to specify the GCType filter for your callback. But it is not possible to
- * register the same callback function two times with different
- * GCType filters.
- */
- static V8_DEPRECATED(
- "Use isolate version",
- void AddGCEpilogueCallback(GCCallback callback,
- GCType gc_type_filter = kGCTypeAll));
-
- /**
- * This function removes callback which was installed by
- * AddGCEpilogueCallback function.
- */
- static V8_DEPRECATED("Use isolate version",
- void RemoveGCEpilogueCallback(GCCallback callback));
-
- /**
* Initializes V8. This function needs to be called before the first Isolate
* is created. It always returns true.
*/
@@ -8086,35 +8072,6 @@ class V8_EXPORT V8 {
void VisitExternalResources(ExternalResourceVisitor* visitor));
/**
- * Iterates through all the persistent handles in the current isolate's heap
- * that have class_ids.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor));
-
- /**
- * Iterates through all the persistent handles in isolate's heap that have
- * class_ids.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void VisitHandlesWithClassIds(Isolate* isolate,
- PersistentHandleVisitor* visitor));
-
- /**
- * Iterates through all the persistent handles in the current isolate's heap
- * that have class_ids and are candidates to be marked as partially dependent
- * handles. This will visit handles to young objects created since the last
- * garbage collection but is free to visit an arbitrary superset of these
- * objects.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void VisitHandlesForPartialDependence(Isolate* isolate,
- PersistentHandleVisitor* visitor));
-
- /**
* Initialize the ICU library bundled with V8. The embedder should only
* invoke this method when using the bundled ICU. Returns true on success.
*
@@ -8383,18 +8340,45 @@ class Maybe {
friend Maybe<U> Just(const U& u);
};
-
template <class T>
inline Maybe<T> Nothing() {
return Maybe<T>();
}
-
template <class T>
inline Maybe<T> Just(const T& t) {
return Maybe<T>(t);
}
+// A template specialization of Maybe<T> for the case of T = void.
+template <>
+class Maybe<void> {
+ public:
+ V8_INLINE bool IsNothing() const { return !is_valid_; }
+ V8_INLINE bool IsJust() const { return is_valid_; }
+
+ V8_INLINE bool operator==(const Maybe& other) const {
+ return IsJust() == other.IsJust();
+ }
+
+ V8_INLINE bool operator!=(const Maybe& other) const {
+ return !operator==(other);
+ }
+
+ private:
+ struct JustTag {};
+
+ Maybe() : is_valid_(false) {}
+ explicit Maybe(JustTag) : is_valid_(true) {}
+
+ bool is_valid_;
+
+ template <class U>
+ friend Maybe<U> Nothing();
+ friend Maybe<void> JustVoid();
+};
+
+inline Maybe<void> JustVoid() { return Maybe<void>(Maybe<void>::JustTag()); }
/**
* An external exception handler.
@@ -8803,7 +8787,7 @@ class V8_EXPORT Context {
* stack.
* https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack
*/
- class BackupIncumbentScope {
+ class V8_EXPORT BackupIncumbentScope {
public:
/**
* |backup_incumbent_context| is pushed onto the backup incumbent settings
@@ -9047,8 +9031,7 @@ class Internals {
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
- static const int kMapInstanceTypeAndBitFieldOffset =
- 1 * kApiPointerSize + kApiIntSize;
+ static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = 3 * kApiPointerSize;
static const int kOddballKindOffset = 4 * kApiPointerSize + sizeof(double);
@@ -9084,14 +9067,14 @@ class Internals {
static const int kNodeStateIsWeakValue = 2;
static const int kNodeStateIsPendingValue = 3;
static const int kNodeStateIsNearDeathValue = 4;
- static const int kNodeIsIndependentShift = 3;
static const int kNodeIsActiveShift = 4;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
- static const int kJSApiObjectType = 0xbf;
- static const int kJSObjectType = 0xc0;
+ static const int kJSSpecialApiObjectType = 0xbc;
+ static const int kJSApiObjectType = 0xc0;
+ static const int kJSObjectType = 0xc1;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@@ -9125,9 +9108,7 @@ class Internals {
V8_INLINE static int GetInstanceType(const internal::Object* obj) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
- // Map::InstanceType is defined so that it will always be loaded into
- // the LS 8 bits of one 16-bit word, regardless of endianess.
- return ReadField<uint16_t>(map, kMapInstanceTypeAndBitFieldOffset) & 0xff;
+ return ReadField<uint16_t>(map, kMapInstanceTypeOffset);
}
V8_INLINE static int GetOddballKind(const internal::Object* obj) {
@@ -9284,16 +9265,11 @@ void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
M::Copy(that, this);
}
-
template <class T>
bool PersistentBase<T>::IsIndependent() const {
- typedef internal::Internals I;
- if (this->IsEmpty()) return false;
- return I::GetNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
- I::kNodeIsIndependentShift);
+ return true;
}
-
template <class T>
bool PersistentBase<T>::IsNearDeath() const {
typedef internal::Internals I;
@@ -9374,13 +9350,7 @@ void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
}
template <class T>
-void PersistentBase<T>::MarkIndependent() {
- typedef internal::Internals I;
- if (this->IsEmpty()) return;
- I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
- true,
- I::kNodeIsIndependentShift);
-}
+void PersistentBase<T>::MarkIndependent() {}
template <class T>
void PersistentBase<T>::MarkActive() {
@@ -9551,13 +9521,6 @@ Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
template<typename T>
-Local<Function> FunctionCallbackInfo<T>::Callee() const {
- return Local<Function>(reinterpret_cast<Function*>(
- &implicit_args_[kCalleeIndex]));
-}
-
-
-template<typename T>
Local<Object> FunctionCallbackInfo<T>::This() const {
return Local<Object>(reinterpret_cast<Object*>(values_ + 1));
}
@@ -9697,7 +9660,8 @@ Local<Value> Object::GetInternalField(int index) {
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);
if (instance_type == I::kJSObjectType ||
- instance_type == I::kJSApiObjectType) {
+ instance_type == I::kJSApiObjectType ||
+ instance_type == I::kJSSpecialApiObjectType) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
O** result = HandleScope::CreateHandle(reinterpret_cast<HO*>(obj), value);
@@ -9717,7 +9681,8 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);
if (V8_LIKELY(instance_type == I::kJSObjectType ||
- instance_type == I::kJSApiObjectType)) {
+ instance_type == I::kJSApiObjectType ||
+ instance_type == I::kJSSpecialApiObjectType)) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
return I::ReadField<void*>(obj, offset);
}
@@ -10404,24 +10369,6 @@ void V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
isolate->VisitExternalResources(visitor);
}
-
-void V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->VisitHandlesWithClassIds(visitor);
-}
-
-
-void V8::VisitHandlesWithClassIds(Isolate* isolate,
- PersistentHandleVisitor* visitor) {
- isolate->VisitHandlesWithClassIds(visitor);
-}
-
-
-void V8::VisitHandlesForPartialDependence(Isolate* isolate,
- PersistentHandleVisitor* visitor) {
- isolate->VisitHandlesForPartialDependence(visitor);
-}
-
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index dcf8e5c0b7..dbc20d5f80 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -9,16 +9,8 @@ commit_burst_delay: 60
max_commit_burst: 1
gerrit {}
-rietveld {
- url: "https://codereview.chromium.org"
-}
verifiers {
- reviewer_lgtm {
- committer_list: "project-v8-committers"
- dry_run_access_list: "project-v8-tryjob-access"
- }
-
gerrit_cq_ability {
committer_list: "project-v8-committers"
dry_run_access_list: "project-v8-tryjob-access"
@@ -30,20 +22,26 @@ verifiers {
try_job {
buckets {
- name: "master.tryserver.v8"
+ name: "luci.v8.try"
builders { name: "v8_android_arm_compile_rel" }
+ builders { name: "v8_fuchsia_rel_ng" }
+ builders { name: "v8_linux64_gcc_compile_dbg" }
+ builders { name: "v8_linux_gcc_compile_rel" }
+ builders { name: "v8_linux_shared_compile_rel" }
+ builders { name: "v8_presubmit" }
+ builders {
+ name: "v8_win64_msvc_compile_rel"
+ experiment_percentage: 20
+ }
+ }
+ buckets {
+ name: "master.tryserver.v8"
builders { name: "v8_node_linux64_rel" }
builders { name: "v8_linux64_asan_rel_ng" }
builders {
name: "v8_linux64_asan_rel_ng_triggered"
triggered_by: "v8_linux64_asan_rel_ng"
}
- builders { name: "v8_linux64_avx2_rel_ng" }
- builders {
- name: "v8_linux64_avx2_rel_ng_triggered"
- triggered_by: "v8_linux64_avx2_rel_ng"
- }
- builders { name: "v8_linux64_gcc_compile_dbg" }
builders { name: "v8_linux64_gyp_rel_ng" }
builders {
name: "v8_linux64_gyp_rel_ng_triggered"
@@ -75,7 +73,6 @@ verifiers {
name: "v8_linux_dbg_ng_triggered"
triggered_by: "v8_linux_dbg_ng"
}
- builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel_ng" }
@@ -98,7 +95,6 @@ verifiers {
name: "v8_mac_rel_ng_triggered"
triggered_by: "v8_mac_rel_ng"
}
- builders { name: "v8_presubmit" }
builders { name: "v8_win64_rel_ng" }
builders {
name: "v8_win64_rel_ng_triggered"
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 3cbcddc073..afc7225148 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -62,9 +62,9 @@
'V8 Linux - noi18n - debug': 'gn_debug_x86_no_i18n',
'V8 Linux - verify csa': 'gn_release_x86_verify_csa',
# Linux64.
- 'V8 Linux64 - builder': 'gn_release_x64_valgrind',
+ 'V8 Linux64 - builder': 'gn_release_x64',
'V8 Linux64 - concurrent marking - builder': 'gn_release_x64_concurrent_marking',
- 'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
+ 'V8 Linux64 - debug builder': 'gn_debug_x64',
'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
'V8 Linux64 - gyp': 'gyp_release_x64',
@@ -74,11 +74,10 @@
'V8 Win32 - debug builder': 'gn_debug_x86_minimal_symbols',
'V8 Win32 - nosnap - shared':
'gn_release_x86_no_snap_shared_minimal_symbols',
+ 'V8 Win32 ASAN': 'gn_release_x86_asan_no_lsan',
'V8 Win64': 'gn_release_x64_minimal_symbols',
'V8 Win64 - debug': 'gn_debug_x64_minimal_symbols',
- # TODO(machenbach): Switch plugins on when errors are fixed.
- 'V8 Win64 - clang': 'gn_release_x64_clang',
- 'V8 Win64 ASAN': 'gn_release_x64_asan_no_lsan',
+ 'V8 Win64 - msvc': 'gn_release_x64_msvc',
# Mac.
'V8 Mac': 'gn_release_x86',
'V8 Mac - debug': 'gn_debug_x86',
@@ -96,6 +95,8 @@
'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
'V8 Linux64 gcc 4.8 - debug': 'gn_debug_x64_gcc',
# FYI.
+ 'V8 Fuchsia': 'gn_release_x64_fuchsia',
+ 'V8 Fuchsia - debug': 'gn_debug_x64_fuchsia',
'V8 Linux - swarming staging': 'gn_release_x64',
'V8 Linux64 - cfi': 'gn_release_x64_cfi',
'V8 Linux64 UBSanVptr': 'gn_release_x64_ubsan_vptr',
@@ -107,6 +108,13 @@
'V8 Random Deopt Fuzzer - debug': 'gn_debug_x64',
},
'client.v8.clusterfuzz': {
+ 'V8 Win32 ASAN - release builder':
+ 'gn_release_x86_asan_no_lsan_verify_heap',
+ # Note this is called a debug builder, but it uses a release build
+ # configuration with dchecks (which enables DEBUG in V8), since win-asan
+ # debug is not supported.
+ 'V8 Win32 ASAN - debug builder':
+ 'gn_release_x86_asan_no_lsan_verify_heap_dchecks',
'V8 Mac64 ASAN - release builder':
'gn_release_x64_asan_no_lsan_edge_verify_heap',
'V8 Mac64 ASAN - debug builder':
@@ -127,7 +135,7 @@
'gn_release_simulate_arm64_msan_no_origins_edge',
'V8 Linux MSAN chained origins':
'gn_release_simulate_arm64_msan_edge',
- 'V8 Linux64 UBSan - release builder': 'gn_release_x64_ubsan_recover',
+ 'V8 Linux64 TSAN - release builder': 'gn_release_x64_tsan',
'V8 Linux64 UBSanVptr - release builder':
'gn_release_x64_ubsan_vptr_recover_edge',
},
@@ -150,11 +158,11 @@
'V8 Linux - mipsel - sim - builder': 'gn_release_simulate_mipsel',
'V8 Linux - mips64el - sim - builder': 'gn_release_simulate_mips64el',
# PPC.
- 'V8 Linux - ppc - sim': 'gyp_release_simulate_ppc',
- 'V8 Linux - ppc64 - sim': 'gyp_release_simulate_ppc64',
+ 'V8 Linux - ppc - sim': 'gn_release_simulate_ppc',
+ 'V8 Linux - ppc64 - sim': 'gn_release_simulate_ppc64',
# S390.
- 'V8 Linux - s390 - sim': 'gyp_release_simulate_s390',
- 'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x',
+ 'V8 Linux - s390 - sim': 'gn_release_simulate_s390',
+ 'V8 Linux - s390x - sim': 'gn_release_simulate_s390x',
},
'client.v8.branches': {
'V8 Linux - beta branch': 'gn_release_x86',
@@ -173,19 +181,19 @@
'V8 mips64el - sim - stable branch': 'gn_release_simulate_mips64el',
'V8 mipsel - sim - beta branch': 'gn_release_simulate_mipsel',
'V8 mipsel - sim - stable branch': 'gn_release_simulate_mipsel',
- 'V8 ppc - sim - beta branch': 'gyp_release_simulate_ppc',
- 'V8 ppc - sim - stable branch': 'gyp_release_simulate_ppc',
- 'V8 ppc64 - sim - beta branch': 'gyp_release_simulate_ppc64',
- 'V8 ppc64 - sim - stable branch': 'gyp_release_simulate_ppc64',
- 'V8 s390 - sim - beta branch': 'gyp_release_simulate_s390',
- 'V8 s390 - sim - stable branch': 'gyp_release_simulate_s390',
- 'V8 s390x - sim - beta branch': 'gyp_release_simulate_s390x',
- 'V8 s390x - sim - stable branch': 'gyp_release_simulate_s390x',
+ 'V8 ppc - sim - beta branch': 'gn_release_simulate_ppc',
+ 'V8 ppc - sim - stable branch': 'gn_release_simulate_ppc',
+ 'V8 ppc64 - sim - beta branch': 'gn_release_simulate_ppc64',
+ 'V8 ppc64 - sim - stable branch': 'gn_release_simulate_ppc64',
+ 'V8 s390 - sim - beta branch': 'gn_release_simulate_s390',
+ 'V8 s390 - sim - stable branch': 'gn_release_simulate_s390',
+ 'V8 s390x - sim - beta branch': 'gn_release_simulate_s390x',
+ 'V8 s390x - sim - stable branch': 'gn_release_simulate_s390x',
},
'tryserver.v8': {
+ 'v8_fuchsia_rel_ng': 'gn_release_x64_fuchsia_trybot',
'v8_linux_rel_ng': 'gn_release_x86_gcmole_trybot',
'v8_linux_verify_csa_rel_ng': 'gn_release_x86_verify_csa',
- 'v8_linux_avx2_dbg': 'gn_debug_x86_trybot',
'v8_linux_nodcheck_rel_ng': 'gn_release_x86_minimal_symbols',
'v8_linux_dbg_ng': 'gn_debug_x86_trybot',
'v8_linux_noi18n_rel_ng': 'gn_release_x86_no_i18n_trybot',
@@ -194,12 +202,11 @@
'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
+ 'v8_linux_shared_compile_rel': 'gn_release_x86_shared_verify_heap',
'v8_linux64_gcc_compile_dbg': 'gn_debug_x64_gcc',
- 'v8_linux64_rel_ng': 'gn_release_x64_valgrind_trybot',
+ 'v8_linux64_rel_ng': 'gn_release_x64_trybot',
'v8_linux64_verify_csa_rel_ng': 'gn_release_x64_verify_csa',
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
- 'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
- 'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
'v8_linux64_asan_rel_ng': 'gn_release_x64_asan_minimal_symbols',
'v8_linux64_msan_rel': 'gn_release_simulate_arm64_msan_minimal_symbols',
'v8_linux64_sanitizer_coverage_rel':
@@ -208,12 +215,14 @@
'v8_linux64_tsan_concurrent_marking_rel_ng':
'gn_release_x64_tsan_concurrent_marking_minimal_symbols',
'v8_linux64_ubsan_rel_ng': 'gn_release_x64_ubsan_vptr_minimal_symbols',
+ 'v8_win_asan_rel_ng': 'gn_release_x86_asan_no_lsan',
'v8_win_dbg': 'gn_debug_x86_trybot',
'v8_win_compile_dbg': 'gn_debug_x86_trybot',
'v8_win_rel_ng': 'gn_release_x86_trybot',
'v8_win_nosnap_shared_rel_ng':
'gn_release_x86_no_snap_shared_minimal_symbols',
- 'v8_win64_asan_rel_ng': 'gn_release_x64_asan_no_lsan',
+ # TODO(machenbach): Rename bot to msvc.
+ 'v8_win64_msvc_compile_rel': 'gn_release_x64_msvc',
'v8_win64_dbg': 'gn_debug_x64_minimal_symbols',
'v8_win64_rel_ng': 'gn_release_x64_trybot',
'v8_mac_rel_ng': 'gn_release_x86_trybot',
@@ -355,11 +364,18 @@
'gn', 'release_bot', 'simulate_mipsel', 'swarming'],
'gn_release_simulate_mips64el': [
'gn', 'release_bot', 'simulate_mips64el', 'swarming'],
+ 'gn_release_simulate_ppc': [
+ 'gn', 'release_bot', 'simulate_ppc', 'swarming'],
+ 'gn_release_simulate_ppc64': [
+ 'gn', 'release_bot', 'simulate_ppc64', 'swarming'],
+ 'gn_release_simulate_s390': [
+ 'gn', 'release_bot', 'simulate_s390', 'swarming'],
+ 'gn_release_simulate_s390x': [
+ 'gn', 'release_bot', 'simulate_s390x', 'swarming'],
# GN debug configs for arm.
'gn_debug_arm': [
- 'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming',
- 'no_custom_libcxx'],
+ 'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
# GN release configs for arm.
'gn_release_arm': [
@@ -394,12 +410,16 @@
'gn', 'release_bot', 'x64', 'cfi', 'swarming'],
'gn_release_x64_cfi_clusterfuzz': [
'gn', 'release_bot', 'x64', 'cfi_clusterfuzz'],
- 'gn_release_x64_clang': [
- 'gn', 'release_bot', 'x64', 'clang', 'swarming'],
+ 'gn_release_x64_msvc': [
+ 'gn', 'release_bot', 'x64', 'msvc', 'swarming'],
'gn_release_x64_concurrent_marking': [
'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'swarming'],
'gn_release_x64_correctness_fuzzer' : [
- 'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer'],
+ 'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer', 'swarming'],
+ 'gn_release_x64_fuchsia': [
+ 'gn', 'release_bot', 'x64', 'fuchsia', 'swarming'],
+ 'gn_release_x64_fuchsia_trybot': [
+ 'gn', 'release_trybot', 'x64', 'fuchsia', 'swarming'],
'gn_release_x64_gcc_coverage': [
'gn', 'release_bot', 'x64', 'coverage', 'gcc'],
'gn_release_x64_internal': [
@@ -418,20 +438,12 @@
'minimal_symbols', 'swarming'],
'gn_release_x64_tsan_minimal_symbols': [
'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
- 'gn_release_x64_ubsan_recover': [
- 'gn', 'release_bot', 'x64', 'ubsan_recover', 'swarming'],
'gn_release_x64_ubsan_vptr': [
'gn', 'release_bot', 'x64', 'ubsan_vptr', 'swarming'],
'gn_release_x64_ubsan_vptr_recover_edge': [
'gn', 'release_bot', 'x64', 'edge', 'ubsan_vptr_recover', 'swarming'],
'gn_release_x64_ubsan_vptr_minimal_symbols': [
'gn', 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols', 'swarming'],
- 'gn_release_x64_valgrind': [
- 'gn', 'release_bot', 'x64', 'swarming', 'valgrind',
- 'no_custom_libcxx'],
- 'gn_release_x64_valgrind_trybot': [
- 'gn', 'release_trybot', 'x64', 'swarming', 'valgrind',
- 'no_custom_libcxx'],
'gn_release_x64_verify_csa': [
'gn', 'release_bot', 'x64', 'swarming', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
@@ -446,15 +458,12 @@
'v8_optimized_debug', 'x64', 'asan', 'edge', 'swarming'],
'gn_debug_x64_custom': [
'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
+ 'gn_debug_x64_fuchsia': [
+ 'gn', 'debug_bot', 'x64', 'fuchsia', 'swarming'],
'gn_debug_x64_gcc': [
'gn', 'debug_bot', 'x64', 'gcc'],
'gn_debug_x64_minimal_symbols': [
'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
- 'gn_debug_x64_trybot': [
- 'gn', 'debug_trybot', 'x64', 'swarming'],
- 'gn_debug_x64_valgrind': [
- 'gn', 'debug_bot', 'x64', 'swarming', 'valgrind',
- 'no_custom_libcxx'],
# GN debug configs for x86.
'gn_debug_x86': [
@@ -478,6 +487,14 @@
# GN release configs for x86.
'gn_release_x86': [
'gn', 'release_bot', 'x86', 'swarming'],
+ 'gn_release_x86_asan_no_lsan': [
+ 'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming'],
+ 'gn_release_x86_asan_no_lsan_verify_heap': [
+ 'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming',
+ 'v8_verify_heap'],
+ 'gn_release_x86_asan_no_lsan_verify_heap_dchecks': [
+ 'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming',
+ 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap'],
'gn_release_x86_disassembler': [
'gn', 'release_bot', 'x86', 'v8_enable_disassembler'],
'gn_release_x86_gcc': [
@@ -513,22 +530,12 @@
# Gyp release configs for mips.
'gyp_release_mips_no_snap_no_i18n': [
- 'gyp', 'release', 'mips', 'crosscompile', 'static', 'v8_no_i18n',
- 'v8_snapshot_none'],
-
- # Gyp release configs for simulators.
- 'gyp_release_simulate_ppc': [
- 'gyp', 'release_bot', 'simulate_ppc', 'swarming'],
- 'gyp_release_simulate_ppc64': [
- 'gyp', 'release_bot', 'simulate_ppc64', 'swarming'],
- 'gyp_release_simulate_s390': [
- 'gyp', 'release_bot', 'simulate_s390', 'swarming'],
- 'gyp_release_simulate_s390x': [
- 'gyp', 'release_bot', 'simulate_s390x', 'swarming'],
+ 'gyp', 'release', 'mips', 'crosscompile', 'no_sysroot', 'static',
+ 'v8_no_i18n', 'v8_snapshot_none'],
# Gyp release configs for x64.
'gyp_release_x64': [
- 'gyp', 'release_bot', 'x64', 'swarming'],
+ 'gyp', 'release_bot', 'x64', 'no_sysroot', 'swarming'],
},
'mixins': {
@@ -608,6 +615,10 @@
'gyp_defines': 'sanitizer_coverage=trace-pc-guard',
},
+ 'fuchsia': {
+ 'gn_args': 'target_os="fuchsia"',
+ },
+
'gcc': {
# TODO(machenbach): Remove cxx11 restriction when updating gcc version.
'gn_args': 'is_clang=false use_cxx11=true',
@@ -660,9 +671,13 @@
'use_prebuilt_instrumented_libraries=true'),
},
- # TODO(machenbach): Remove when http://crbug.com/738814 is resolved.
- 'no_custom_libcxx': {
- 'gn_args': 'use_custom_libcxx=false',
+ 'msvc': {
+ 'gn_args': 'is_clang=false',
+ },
+
+ 'no_sysroot': {
+ 'gn_args': 'use_sysroot=false',
+ 'gyp_defines': 'use_sysroot=0',
},
'release': {
@@ -745,11 +760,6 @@
'gyp_defines': 'clang=1 tsan=1',
},
- 'ubsan_recover': {
- # Ubsan with recovery.
- 'gn_args': 'is_ubsan=true is_ubsan_no_recover=false',
- },
-
'ubsan_vptr': {
# TODO(krasin): Remove is_ubsan_no_recover=true when
# https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
@@ -762,11 +772,6 @@
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=false',
},
- 'valgrind': {
- 'gn_args': 'v8_has_valgrind=true',
- 'gyp_defines': 'has_valgrind=1',
- },
-
'v8_no_i18n': {
'gn_args': 'v8_enable_i18n_support=false icu_use_data_file=false',
'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
diff --git a/deps/v8/infra/testing/OWNERS b/deps/v8/infra/testing/OWNERS
new file mode 100644
index 0000000000..f0129f758e
--- /dev/null
+++ b/deps/v8/infra/testing/OWNERS
@@ -0,0 +1,4 @@
+set noparent
+
+machenbach@chromium.org
+sergiyb@chromium.org \ No newline at end of file
diff --git a/deps/v8/infra/testing/README.md b/deps/v8/infra/testing/README.md
new file mode 100644
index 0000000000..3099062477
--- /dev/null
+++ b/deps/v8/infra/testing/README.md
@@ -0,0 +1,50 @@
+# Src-side test specifications
+
+The infra/testing folder in V8 contains test specifications, consumed and
+executed by the continuous infrastructure. Every master has an optional file
+named `<mastername>.pyl`. E.g. `tryserver.v8.pyl`.
+
+The structure of each file is:
+```
+{
+ <buildername>: [
+ {
+ 'name': <test-spec name>,
+ 'variant': <variant name>,
+ 'shards': <number of shards>,
+ },
+ ...
+ ],
+ ...
+}
+```
+The `<buildername>` is a string name of the builder to execute the tests.
+`<test-spec name>` is a label defining a test specification matching the
+[infra-side](https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/recipe_modules/v8/testing.py#58).
+The `<variant name>` is a testing variant as specified in
+`v8/tools/testrunner/local/variants.py`. `<number of shards>` is optional
+(default 1), but can be provided to increase the swarming shards for
+long-running tests.
+
+Example:
+```
+{
+ 'v8_linux64_rel_ng_triggered': [
+ {'name': 'v8testing', 'variant': 'nooptimization', 'shards': 2},
+ ],
+}
+```
+
+## Guidelines
+
+Please keep trybots and continuous bots in sync. E.g. add the same configuration
+for the release and debug CI bots and the corresponding trybot (where
+applicable). E.g.
+
+```
+tryserver.v8:
+ v8_linux64_rel_ng_triggered
+client.v8:
+ V8 Linux64
+ V8 Linux64 - debug
+``` \ No newline at end of file
diff --git a/deps/v8/infra/testing/client.v8.pyl b/deps/v8/infra/testing/client.v8.pyl
new file mode 100644
index 0000000000..80d75a920a
--- /dev/null
+++ b/deps/v8/infra/testing/client.v8.pyl
@@ -0,0 +1,13 @@
+# Copyright 2017 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ ### Example configuration for CI bots (please keep as reference).
+ # 'V8 Linux64': [
+ # {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
+ # ],
+ # 'V8 Linux64 - debug': [
+ # {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
+ # ],
+} \ No newline at end of file
diff --git a/deps/v8/infra/testing/tryserver.v8.pyl b/deps/v8/infra/testing/tryserver.v8.pyl
new file mode 100644
index 0000000000..f296779c4e
--- /dev/null
+++ b/deps/v8/infra/testing/tryserver.v8.pyl
@@ -0,0 +1,10 @@
+# Copyright 2017 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ ### Example configuration for trybots (please keep as reference).
+ # 'v8_linux64_rel_ng_triggered': [
+ # {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
+ # ],
+} \ No newline at end of file
diff --git a/deps/v8/samples/hello-world.cc b/deps/v8/samples/hello-world.cc
index 9d8058da41..ab6f0dd8bf 100644
--- a/deps/v8/samples/hello-world.cc
+++ b/deps/v8/samples/hello-world.cc
@@ -13,8 +13,8 @@ int main(int argc, char* argv[]) {
// Initialize V8.
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
// Create a new Isolate and make it the current one.
@@ -56,7 +56,6 @@ int main(int argc, char* argv[]) {
isolate->Dispose();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
- delete platform;
delete create_params.array_buffer_allocator;
return 0;
}
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index 7ee85a84f9..f22407a837 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -701,8 +701,8 @@ void PrintMap(map<string, string>* m) {
int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
map<string, string> options;
string file;
@@ -728,7 +728,7 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "Error initializing processor.\n");
return 1;
}
- if (!ProcessEntries(platform, &processor, kSampleSize, kSampleRequests))
+ if (!ProcessEntries(platform.get(), &processor, kSampleSize, kSampleRequests))
return 1;
PrintMap(&output);
}
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 0f8e2a4fdc..81b028720c 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -66,8 +66,8 @@ static bool run_shell;
int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate::CreateParams create_params;
@@ -85,13 +85,12 @@ int main(int argc, char* argv[]) {
return 1;
}
v8::Context::Scope context_scope(context);
- result = RunMain(isolate, platform, argc, argv);
- if (run_shell) RunShell(context, platform);
+ result = RunMain(isolate, platform.get(), argc, argv);
+ if (run_shell) RunShell(context, platform.get());
}
isolate->Dispose();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
- delete platform;
delete create_params.array_buffer_allocator;
return result;
}
diff --git a/deps/v8/snapshot_toolchain.gni b/deps/v8/snapshot_toolchain.gni
index 80cd1bd390..756413e5ce 100644
--- a/deps/v8/snapshot_toolchain.gni
+++ b/deps/v8/snapshot_toolchain.gni
@@ -63,6 +63,12 @@ if (v8_snapshot_toolchain == "") {
} else if (current_os == "win" && host_os == "mac" && is_clang) {
# This is a mac -> win cross-compile, which is only supported w/ clang.
v8_snapshot_toolchain = "//build/toolchain/mac:clang_${v8_current_cpu}"
+ } else if (host_cpu == "x64" &&
+ (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
+ # We don't support snapshot generation for big-endian targets,
+ # therefore snapshots will need to be built using native mksnapshot
+ # in combination with qemu
+ v8_snapshot_toolchain = current_toolchain
} else if (host_cpu == "x64") {
# This is a cross-compile from an x64 host to either a non-Intel target
# cpu or a different target OS. Clang will always be used by default on the
@@ -76,11 +82,9 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
- } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
- v8_current_cpu == "mips64") {
+ } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
_cpus = "x64_v8_${v8_current_cpu}"
- } else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" ||
- v8_current_cpu == "mips") {
+ } else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
_cpus = "x86_v8_${v8_current_cpu}"
} else {
# This branch should not be reached; leave _cpus blank so the assert
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index 44e4dc517a..5f6fedcd36 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -3,5 +3,8 @@ per-file intl.*=mnita@google.com
per-file intl.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org
+per-file objects-body-descriptors*=hpayer@chromium.org
+per-file objects-body-descriptors*=mlippautz@chromium.org
+per-file objects-body-descriptors*=ulan@chromium.org
# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index fd991f5167..adaa0be3c6 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -20,10 +20,9 @@ namespace internal {
Handle<AccessorInfo> Accessors::MakeAccessor(
Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
- AccessorNameBooleanSetterCallback setter, PropertyAttributes attributes) {
+ AccessorNameBooleanSetterCallback setter) {
Factory* factory = isolate->factory();
Handle<AccessorInfo> info = factory->NewAccessorInfo();
- info->set_property_attributes(attributes);
info->set_all_can_read(false);
info->set_all_can_write(false);
info->set_is_special_data_property(true);
@@ -44,13 +43,12 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
return info;
}
-
static V8_INLINE bool CheckForName(Handle<Name> name,
- Handle<String> property_name,
- int offset,
- int* object_offset) {
+ Handle<String> property_name, int offset,
+ FieldIndex::Encoding encoding,
+ FieldIndex* index) {
if (Name::Equals(name, property_name)) {
- *object_offset = offset;
+ *index = FieldIndex::ForInObjectOffset(offset, encoding);
return true;
}
return false;
@@ -60,18 +58,17 @@ static V8_INLINE bool CheckForName(Handle<Name> name,
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
- int* object_offset) {
+ FieldIndex* index) {
Isolate* isolate = name->GetIsolate();
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
- return
- CheckForName(name, isolate->factory()->length_string(),
- JSArray::kLengthOffset, object_offset);
+ return CheckForName(name, isolate->factory()->length_string(),
+ JSArray::kLengthOffset, FieldIndex::kTagged, index);
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return CheckForName(name, isolate->factory()->length_string(),
- String::kLengthOffset, object_offset);
+ String::kLengthOffset, FieldIndex::kTagged, index);
}
return false;
@@ -135,12 +132,9 @@ void Accessors::ArgumentsIteratorGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeArgumentsIteratorInfo(Isolate* isolate) {
Handle<Name> name = isolate->factory()->iterator_symbol();
- return MakeAccessor(isolate, name, &ArgumentsIteratorGetter, nullptr,
- attributes);
+ return MakeAccessor(isolate, name, &ArgumentsIteratorGetter, nullptr);
}
@@ -220,14 +214,9 @@ void Accessors::ArrayLengthSetter(
}
}
-
-Handle<AccessorInfo> Accessors::ArrayLengthInfo(
- Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->length_string(),
- &ArrayLengthGetter,
- &ArrayLengthSetter,
- attributes);
+Handle<AccessorInfo> Accessors::MakeArrayLengthInfo(Isolate* isolate) {
+ return MakeAccessor(isolate, isolate->factory()->length_string(),
+ &ArrayLengthGetter, &ArrayLengthSetter);
}
//
@@ -268,10 +257,10 @@ void Accessors::ModuleNamespaceEntrySetter(
}
}
-Handle<AccessorInfo> Accessors::ModuleNamespaceEntryInfo(
- Isolate* isolate, Handle<String> name, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeModuleNamespaceEntryInfo(
+ Isolate* isolate, Handle<String> name) {
return MakeAccessor(isolate, name, &ModuleNamespaceEntryGetter,
- &ModuleNamespaceEntrySetter, attributes);
+ &ModuleNamespaceEntrySetter);
}
@@ -302,11 +291,9 @@ void Accessors::StringLengthGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
-
-Handle<AccessorInfo> Accessors::StringLengthInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeStringLengthInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
- &StringLengthGetter, nullptr, attributes);
+ &StringLengthGetter, nullptr);
}
@@ -327,13 +314,10 @@ void Accessors::ScriptColumnOffsetGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptColumnOffsetInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("column_offset")));
- return MakeAccessor(isolate, name, &ScriptColumnOffsetGetter, nullptr,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptColumnOffsetGetter, nullptr);
}
@@ -353,12 +337,10 @@ void Accessors::ScriptIdGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptIdInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptIdInfo(Isolate* isolate) {
Handle<String> name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
- return MakeAccessor(isolate, name, &ScriptIdGetter, nullptr, attributes);
+ return MakeAccessor(isolate, name, &ScriptIdGetter, nullptr);
}
@@ -378,11 +360,9 @@ void Accessors::ScriptNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptNameInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptNameInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
- &ScriptNameGetter, nullptr, attributes);
+ &ScriptNameGetter, nullptr);
}
@@ -402,11 +382,9 @@ void Accessors::ScriptSourceGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptSourceInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptSourceInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->source_string(),
- &ScriptSourceGetter, nullptr, attributes);
+ &ScriptSourceGetter, nullptr);
}
@@ -427,13 +405,10 @@ void Accessors::ScriptLineOffsetGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptLineOffsetInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("line_offset")));
- return MakeAccessor(isolate, name, &ScriptLineOffsetGetter, nullptr,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptLineOffsetGetter, nullptr);
}
@@ -454,12 +429,10 @@ void Accessors::ScriptTypeGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptTypeInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptTypeInfo(Isolate* isolate) {
Handle<String> name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
- return MakeAccessor(isolate, name, &ScriptTypeGetter, nullptr, attributes);
+ return MakeAccessor(isolate, name, &ScriptTypeGetter, nullptr);
}
@@ -480,13 +453,11 @@ void Accessors::ScriptCompilationTypeGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptCompilationTypeInfo(
+ Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("compilation_type")));
- return MakeAccessor(isolate, name, &ScriptCompilationTypeGetter, nullptr,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptCompilationTypeGetter, nullptr);
}
@@ -506,11 +477,10 @@ void Accessors::ScriptSourceUrlGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo(
- Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate, isolate->factory()->source_url_string(),
- &ScriptSourceUrlGetter, nullptr, attributes);
+Handle<AccessorInfo> Accessors::MakeScriptSourceUrlInfo(Isolate* isolate) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("source_url")));
+ return MakeAccessor(isolate, name, &ScriptSourceUrlGetter, nullptr);
}
@@ -531,11 +501,11 @@ void Accessors::ScriptSourceMappingUrlGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
- Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate, isolate->factory()->source_mapping_url_string(),
- &ScriptSourceMappingUrlGetter, nullptr, attributes);
+Handle<AccessorInfo> Accessors::MakeScriptSourceMappingUrlInfo(
+ Isolate* isolate) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("source_mapping_url")));
+ return MakeAccessor(isolate, name, &ScriptSourceMappingUrlGetter, nullptr);
}
@@ -555,13 +525,10 @@ void Accessors::ScriptContextDataGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-
-Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptContextDataInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("context_data")));
- return MakeAccessor(isolate, name, &ScriptContextDataGetter, nullptr,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptContextDataGetter, nullptr);
}
@@ -591,13 +558,10 @@ void Accessors::ScriptEvalFromScriptGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
-Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptEvalFromScriptInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_script")));
- return MakeAccessor(isolate, name, &ScriptEvalFromScriptGetter, nullptr,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptEvalFromScriptGetter, nullptr);
}
@@ -621,13 +585,12 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
-Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptEvalFromScriptPositionInfo(
+ Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_script_position")));
return MakeAccessor(isolate, name, &ScriptEvalFromScriptPositionGetter,
- nullptr, attributes);
+ nullptr);
}
@@ -654,13 +617,12 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
-Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeScriptEvalFromFunctionNameInfo(
+ Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_function_name")));
- return MakeAccessor(isolate, name, &ScriptEvalFromFunctionNameGetter, nullptr,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptEvalFromFunctionNameGetter,
+ nullptr);
}
@@ -704,14 +666,9 @@ void Accessors::FunctionPrototypeSetter(
info.GetReturnValue().Set(true);
}
-
-Handle<AccessorInfo> Accessors::FunctionPrototypeInfo(
- Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->prototype_string(),
- &FunctionPrototypeGetter,
- &FunctionPrototypeSetter,
- attributes);
+Handle<AccessorInfo> Accessors::MakeFunctionPrototypeInfo(Isolate* isolate) {
+ return MakeAccessor(isolate, isolate->factory()->prototype_string(),
+ &FunctionPrototypeGetter, &FunctionPrototypeSetter);
}
@@ -724,6 +681,7 @@ void Accessors::FunctionLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -735,11 +693,9 @@ void Accessors::FunctionLengthGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-Handle<AccessorInfo> Accessors::FunctionLengthInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeFunctionLengthInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
- &FunctionLengthGetter, &ReconfigureToDataProperty,
- attributes);
+ &FunctionLengthGetter, &ReconfigureToDataProperty);
}
@@ -759,11 +715,9 @@ void Accessors::FunctionNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-Handle<AccessorInfo> Accessors::FunctionNameInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeFunctionNameInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
- &FunctionNameGetter, &ReconfigureToDataProperty,
- attributes);
+ &FunctionNameGetter, &ReconfigureToDataProperty);
}
@@ -771,12 +725,11 @@ Handle<AccessorInfo> Accessors::FunctionNameInfo(
// Accessors::FunctionArguments
//
+namespace {
-static Handle<Object> ArgumentsForInlinedFunction(
- JavaScriptFrame* frame,
- Handle<JSFunction> inlined_function,
- int inlined_frame_index) {
- Isolate* isolate = inlined_function->GetIsolate();
+Handle<JSObject> ArgumentsForInlinedFunction(JavaScriptFrame* frame,
+ int inlined_frame_index) {
+ Isolate* isolate = frame->isolate();
Factory* factory = isolate->factory();
TranslatedState translated_values(frame);
@@ -788,7 +741,9 @@ static Handle<Object> ArgumentsForInlinedFunction(
&argument_count);
TranslatedFrame::iterator iter = translated_frame->begin();
- // Skip the function.
+ // Materialize the function.
+ bool should_deoptimize = iter->IsMaterializedObject();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(iter->GetValue());
iter++;
// Skip the receiver.
@@ -796,9 +751,8 @@ static Handle<Object> ArgumentsForInlinedFunction(
argument_count--;
Handle<JSObject> arguments =
- factory->NewArgumentsObject(inlined_function, argument_count);
+ factory->NewArgumentsObject(function, argument_count);
Handle<FixedArray> array = factory->NewFixedArray(argument_count);
- bool should_deoptimize = false;
for (int i = 0; i < argument_count; ++i) {
// If we materialize any object, we should deoptimize the frame because we
// might alias an object that was eliminated by escape analysis.
@@ -817,9 +771,7 @@ static Handle<Object> ArgumentsForInlinedFunction(
return arguments;
}
-
-static int FindFunctionInFrame(JavaScriptFrame* frame,
- Handle<JSFunction> function) {
+int FindFunctionInFrame(JavaScriptFrame* frame, Handle<JSFunction> function) {
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
for (size_t i = frames.size(); i != 0; i--) {
@@ -830,69 +782,66 @@ static int FindFunctionInFrame(JavaScriptFrame* frame,
return -1;
}
+Handle<JSObject> GetFrameArguments(Isolate* isolate,
+ JavaScriptFrameIterator* it,
+ int function_index) {
+ JavaScriptFrame* frame = it->frame();
-namespace {
+ if (function_index > 0) {
+ // The function in question was inlined. Inlined functions have the
+ // correct number of arguments and no allocated arguments object, so
+ // we can construct a fresh one by interpreting the function's
+ // deoptimization input data.
+ return ArgumentsForInlinedFunction(frame, function_index);
+ }
-Handle<Object> GetFunctionArguments(Isolate* isolate,
- Handle<JSFunction> function) {
- // Find the top invocation of the function by traversing frames.
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- int function_index = FindFunctionInFrame(frame, function);
- if (function_index < 0) continue;
-
- if (function_index > 0) {
- // The function in question was inlined. Inlined functions have the
- // correct number of arguments and no allocated arguments object, so
- // we can construct a fresh one by interpreting the function's
- // deoptimization input data.
- return ArgumentsForInlinedFunction(frame, function, function_index);
- }
+ // Find the frame that holds the actual arguments passed to the function.
+ if (it->frame()->has_adapted_arguments()) {
+ it->AdvanceOneFrame();
+ DCHECK(it->frame()->is_arguments_adaptor());
+ }
+ frame = it->frame();
- // Find the frame that holds the actual arguments passed to the function.
- if (it.frame()->has_adapted_arguments()) {
- it.AdvanceOneFrame();
- DCHECK(it.frame()->is_arguments_adaptor());
- }
- frame = it.frame();
-
- // Get the number of arguments and construct an arguments object
- // mirror for the right frame.
- const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
- function, length);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
-
- // Copy the parameters to the arguments object.
- DCHECK(array->length() == length);
- for (int i = 0; i < length; i++) {
- Object* value = frame->GetParameter(i);
- if (value->IsTheHole(isolate)) {
- // Generators currently use holes as dummy arguments when resuming. We
- // must not leak those.
- DCHECK(IsResumableFunction(function->shared()->kind()));
- value = isolate->heap()->undefined_value();
- }
- array->set(i, value);
+ // Get the number of arguments and construct an arguments object
+ // mirror for the right frame and the underlying function.
+ const int length = frame->ComputeParametersCount();
+ Handle<JSFunction> function(frame->function(), isolate);
+ Handle<JSObject> arguments =
+ isolate->factory()->NewArgumentsObject(function, length);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+
+ // Copy the parameters to the arguments object.
+ DCHECK(array->length() == length);
+ for (int i = 0; i < length; i++) {
+ Object* value = frame->GetParameter(i);
+ if (value->IsTheHole(isolate)) {
+ // Generators currently use holes as dummy arguments when resuming. We
+ // must not leak those.
+ DCHECK(IsResumableFunction(function->shared()->kind()));
+ value = isolate->heap()->undefined_value();
}
- arguments->set_elements(*array);
-
- // Return the freshly allocated arguments object.
- return arguments;
+ array->set(i, value);
}
+ arguments->set_elements(*array);
- // No frame corresponding to the given function found. Return null.
- return isolate->factory()->null_value();
+ // Return the freshly allocated arguments object.
+ return arguments;
}
} // namespace
-
-Handle<JSObject> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
- Handle<Object> arguments =
- GetFunctionArguments(function->GetIsolate(), function);
- CHECK(arguments->IsJSObject());
- return Handle<JSObject>::cast(arguments);
+Handle<JSObject> Accessors::FunctionGetArguments(JavaScriptFrame* frame,
+ int inlined_jsframe_index) {
+ Isolate* isolate = frame->isolate();
+ Address requested_frame_fp = frame->fp();
+ // Forward a frame iterator to the requested frame. This is needed because we
+ // potentially need for advance it to the arguments adaptor frame later.
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ if (it.frame()->fp() != requested_frame_fp) continue;
+ return GetFrameArguments(isolate, &it, inlined_jsframe_index);
+ }
+ UNREACHABLE(); // Requested frame not found.
+ return Handle<JSObject>();
}
@@ -903,18 +852,24 @@ void Accessors::FunctionArgumentsGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- Handle<Object> result =
- function->shared()->native()
- ? Handle<Object>::cast(isolate->factory()->null_value())
- : GetFunctionArguments(isolate, function);
+ Handle<Object> result = isolate->factory()->null_value();
+ if (!function->shared()->native()) {
+ // Find the top invocation of the function by traversing frames.
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ int function_index = FindFunctionInFrame(frame, function);
+ if (function_index >= 0) {
+ result = GetFrameArguments(isolate, &it, function_index);
+ break;
+ }
+ }
+ }
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
-Handle<AccessorInfo> Accessors::FunctionArgumentsInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeFunctionArgumentsInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->arguments_string(),
- &FunctionArgumentsGetter, nullptr, attributes);
+ &FunctionArgumentsGetter, nullptr);
}
@@ -1088,11 +1043,9 @@ void Accessors::FunctionCallerGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
-Handle<AccessorInfo> Accessors::FunctionCallerInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeFunctionCallerInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->caller_string(),
- &FunctionCallerGetter, nullptr, attributes);
+ &FunctionCallerGetter, nullptr);
}
@@ -1118,11 +1071,9 @@ void Accessors::BoundFunctionLengthGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-Handle<AccessorInfo> Accessors::BoundFunctionLengthInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeBoundFunctionLengthInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
- &BoundFunctionLengthGetter, &ReconfigureToDataProperty,
- attributes);
+ &BoundFunctionLengthGetter, &ReconfigureToDataProperty);
}
//
@@ -1145,11 +1096,9 @@ void Accessors::BoundFunctionNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-Handle<AccessorInfo> Accessors::BoundFunctionNameInfo(
- Isolate* isolate, PropertyAttributes attributes) {
+Handle<AccessorInfo> Accessors::MakeBoundFunctionNameInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
- &BoundFunctionNameGetter, &ReconfigureToDataProperty,
- attributes);
+ &BoundFunctionNameGetter, &ReconfigureToDataProperty);
}
//
@@ -1163,7 +1112,8 @@ MaybeHandle<JSReceiver> ClearInternalStackTrace(Isolate* isolate,
RETURN_ON_EXCEPTION(
isolate,
JSReceiver::SetProperty(error, isolate->factory()->stack_trace_symbol(),
- isolate->factory()->undefined_value(), STRICT),
+ isolate->factory()->undefined_value(),
+ LanguageMode::kStrict),
JSReceiver);
return error;
}
@@ -1261,12 +1211,9 @@ void Accessors::ErrorStackSetter(
Accessors::ReconfigureToDataProperty(name, val, info);
}
-Handle<AccessorInfo> Accessors::ErrorStackInfo(Isolate* isolate,
- PropertyAttributes attributes) {
- Handle<AccessorInfo> info =
- MakeAccessor(isolate, isolate->factory()->stack_string(),
- &ErrorStackGetter, &ErrorStackSetter, attributes);
- return info;
+Handle<AccessorInfo> Accessors::MakeErrorStackInfo(Isolate* isolate) {
+ return MakeAccessor(isolate, isolate->factory()->stack_string(),
+ &ErrorStackGetter, &ErrorStackSetter);
}
} // namespace internal
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 52420d91de..70e6a9200e 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -17,34 +17,36 @@ namespace internal {
class AccessorInfo;
template <typename T>
class Handle;
+class FieldIndex;
+class JavaScriptFrame;
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
-#define ACCESSOR_INFO_LIST(V) \
- V(ArgumentsIterator) \
- V(ArrayLength) \
- V(BoundFunctionLength) \
- V(BoundFunctionName) \
- V(ErrorStack) \
- V(FunctionArguments) \
- V(FunctionCaller) \
- V(FunctionName) \
- V(FunctionLength) \
- V(FunctionPrototype) \
- V(ScriptColumnOffset) \
- V(ScriptCompilationType) \
- V(ScriptContextData) \
- V(ScriptEvalFromScript) \
- V(ScriptEvalFromScriptPosition) \
- V(ScriptEvalFromFunctionName) \
- V(ScriptId) \
- V(ScriptLineOffset) \
- V(ScriptName) \
- V(ScriptSource) \
- V(ScriptType) \
- V(ScriptSourceUrl) \
- V(ScriptSourceMappingUrl) \
- V(StringLength)
+#define ACCESSOR_INFO_LIST(V) \
+ V(arguments_iterator, ArgumentsIterator) \
+ V(array_length, ArrayLength) \
+ V(bound_function_length, BoundFunctionLength) \
+ V(bound_function_name, BoundFunctionName) \
+ V(error_stack, ErrorStack) \
+ V(function_arguments, FunctionArguments) \
+ V(function_caller, FunctionCaller) \
+ V(function_name, FunctionName) \
+ V(function_length, FunctionLength) \
+ V(function_prototype, FunctionPrototype) \
+ V(script_column_offset, ScriptColumnOffset) \
+ V(script_compilation_type, ScriptCompilationType) \
+ V(script_context_data, ScriptContextData) \
+ V(script_eval_from_script, ScriptEvalFromScript) \
+ V(script_eval_from_script_position, ScriptEvalFromScriptPosition) \
+ V(script_eval_from_function_name, ScriptEvalFromFunctionName) \
+ V(script_id, ScriptId) \
+ V(script_line_offset, ScriptLineOffset) \
+ V(script_name, ScriptName) \
+ V(script_source, ScriptSource) \
+ V(script_type, ScriptType) \
+ V(script_source_url, ScriptSourceUrl) \
+ V(script_source_mapping_url, ScriptSourceMappingUrl) \
+ V(string_length, StringLength)
#define ACCESSOR_SETTER_LIST(V) \
V(ArrayLengthSetter) \
@@ -57,45 +59,36 @@ class Handle;
class Accessors : public AllStatic {
public:
- // Accessor descriptors.
-#define ACCESSOR_INFO_DECLARATION(name) \
- static void name##Getter( \
- v8::Local<v8::Name> name, \
- const v8::PropertyCallbackInfo<v8::Value>& info); \
- static Handle<AccessorInfo> name##Info( \
- Isolate* isolate, \
- PropertyAttributes attributes);
- ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
-#undef ACCESSOR_INFO_DECLARATION
+#define ACCESSOR_GETTER_DECLARATION(accessor_name, AccessorName) \
+ static void AccessorName##Getter( \
+ v8::Local<v8::Name> name, \
+ const v8::PropertyCallbackInfo<v8::Value>& info);
+ ACCESSOR_INFO_LIST(ACCESSOR_GETTER_DECLARATION)
+#undef ACCESSOR_GETTER_DECLARATION
-#define ACCESSOR_SETTER_DECLARATION(name) \
- static void name(v8::Local<v8::Name> name, v8::Local<v8::Value> value, \
- const v8::PropertyCallbackInfo<v8::Boolean>& info);
+#define ACCESSOR_SETTER_DECLARATION(accessor_name) \
+ static void accessor_name( \
+ v8::Local<v8::Name> name, v8::Local<v8::Value> value, \
+ const v8::PropertyCallbackInfo<v8::Boolean>& info);
ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
#undef ACCESSOR_SETTER_DECLARATION
static void ModuleNamespaceEntryGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info);
- static Handle<AccessorInfo> ModuleNamespaceEntryInfo(
- Isolate* isolate, Handle<String> name, PropertyAttributes attributes);
-
- enum DescriptorId {
-#define ACCESSOR_INFO_DECLARATION(name) \
- k##name##Getter, \
- k##name##Setter,
- ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
-#undef ACCESSOR_INFO_DECLARATION
- descriptorCount
- };
+ static Handle<AccessorInfo> MakeModuleNamespaceEntryInfo(Isolate* isolate,
+ Handle<String> name);
- // Accessor functions called directly from the runtime system.
- static Handle<JSObject> FunctionGetArguments(Handle<JSFunction> object);
+ // Accessor function called directly from the runtime system. Returns the
+ // newly materialized arguments object for the given {frame}. Note that for
+ // optimized frames it is possible to specify an {inlined_jsframe_index}.
+ static Handle<JSObject> FunctionGetArguments(JavaScriptFrame* frame,
+ int inlined_jsframe_index);
// Returns true for properties that are accessors to object fields.
- // If true, *object_offset contains offset of object field.
+ // If true, the matching FieldIndex is returned through |field_index|.
static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
- int* object_offset);
+ FieldIndex* field_index);
// Create an AccessorInfo. The setter is optional (can be nullptr).
//
@@ -111,7 +104,15 @@ class Accessors : public AllStatic {
static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
- AccessorNameBooleanSetterCallback setter, PropertyAttributes attributes);
+ AccessorNameBooleanSetterCallback setter);
+
+ private:
+#define ACCESSOR_INFO_DECLARATION(accessor_name, AccessorName) \
+ static Handle<AccessorInfo> Make##AccessorName##Info(Isolate* isolate);
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
+
+ friend class Heap;
};
} // namespace internal
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
index 4b0d029588..2b0bf727e5 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/address-map.cc
@@ -12,7 +12,7 @@ namespace internal {
RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
- if (map_ != NULL) return;
+ if (map_ != nullptr) return;
map_ = new HeapObjectToIndexHashMap();
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 4753d2d855..ab7b33a085 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -6,11 +6,8 @@
#include <stdlib.h> // For free, malloc.
#include "src/base/bits.h"
-#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
-#include "src/base/utils/random-number-generator.h"
-#include "src/flags.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -106,28 +103,33 @@ void AlignedFree(void *ptr) {
#endif
}
-VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(base::OS::ReserveRegion(size, hint)), size_(size) {
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address_, size_);
-#endif
+byte* AllocateSystemPage(void* address, size_t* allocated) {
+ size_t page_size = base::OS::AllocatePageSize();
+ void* result = base::OS::Allocate(address, page_size, page_size,
+ base::OS::MemoryPermission::kReadWrite);
+ if (result != nullptr) *allocated = page_size;
+ return static_cast<byte*>(result);
}
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
+VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
+
+VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(nullptr), size_(0) {
- address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
+ size_t page_size = base::OS::AllocatePageSize();
+ size_t alloc_size = RoundUp(size, page_size);
+ address_ = base::OS::Allocate(hint, alloc_size, alignment,
+ base::OS::MemoryPermission::kNoAccess);
+ if (address_ != nullptr) {
+ size_ = alloc_size;
#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address_, size_);
+ __lsan_register_root_region(address_, size_);
#endif
+ }
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- bool result = base::OS::ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
+ Free();
}
}
@@ -136,24 +138,19 @@ void VirtualMemory::Reset() {
size_ = 0;
}
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- CHECK(InVM(address, size));
- return base::OS::CommitRegion(address, size, is_executable);
-}
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
+bool VirtualMemory::SetPermissions(void* address, size_t size,
+ base::OS::MemoryPermission access) {
CHECK(InVM(address, size));
- return base::OS::UncommitRegion(address, size);
-}
-
-bool VirtualMemory::Guard(void* address) {
- CHECK(InVM(address, base::OS::CommitPageSize()));
- base::OS::Guard(address, base::OS::CommitPageSize());
- return true;
+ bool result = base::OS::SetPermissions(address, size, access);
+ DCHECK(result);
+ USE(result);
+ return result;
}
-size_t VirtualMemory::ReleasePartial(void* free_start) {
+size_t VirtualMemory::Release(void* free_start) {
DCHECK(IsReserved());
+ DCHECK(IsAddressAligned(static_cast<Address>(free_start),
+ base::OS::CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t free_size = size_ - (reinterpret_cast<size_t>(free_start) -
@@ -166,14 +163,12 @@ size_t VirtualMemory::ReleasePartial(void* free_start) {
__lsan_unregister_root_region(address_, size_);
__lsan_register_root_region(address_, size_ - free_size);
#endif
- const bool result = base::OS::ReleasePartialRegion(free_start, free_size);
- USE(result);
- DCHECK(result);
+ CHECK(base::OS::Release(free_start, free_size));
size_ -= free_size;
return free_size;
}
-void VirtualMemory::Release() {
+void VirtualMemory::Free() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
@@ -181,9 +176,10 @@ void VirtualMemory::Release() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
- bool result = base::OS::ReleaseRegion(address, size);
- USE(result);
- DCHECK(result);
+#if defined(LEAK_SANITIZER)
+ __lsan_unregister_root_region(address, size);
+#endif
+ CHECK(base::OS::Free(address, size));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
@@ -208,116 +204,17 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) {
- VirtualMemory first_try(size, alignment, hint);
+ VirtualMemory first_try(size, hint, alignment);
if (first_try.IsReserved()) {
result->TakeControl(&first_try);
return true;
}
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- VirtualMemory second_try(size, alignment, hint);
+ VirtualMemory second_try(size, hint, alignment);
result->TakeControl(&second_try);
return result->IsReserved();
}
-namespace {
-
-struct RNGInitializer {
- static void Construct(void* mem) {
- auto rng = new (mem) base::RandomNumberGenerator();
- int64_t random_seed = FLAG_random_seed;
- if (random_seed) {
- rng->SetSeed(random_seed);
- }
- }
-};
-
-} // namespace
-
-static base::LazyInstance<base::RandomNumberGenerator, RNGInitializer>::type
- random_number_generator = LAZY_INSTANCE_INITIALIZER;
-
-void* GetRandomMmapAddr() {
-#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
- defined(THREAD_SANITIZER)
- // Dynamic tools do not support custom mmap addresses.
- return NULL;
-#endif
- uintptr_t raw_addr;
- random_number_generator.Pointer()->NextBytes(&raw_addr, sizeof(raw_addr));
-#if V8_OS_POSIX
-#if V8_TARGET_ARCH_X64
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#elif V8_TARGET_ARCH_PPC64
-#if V8_OS_AIX
- // AIX: 64 bits of virtual addressing, but we limit address range to:
- // a) minimize Segment Lookaside Buffer (SLB) misses and
- raw_addr &= V8_UINT64_C(0x3ffff000);
- // Use extra address space to isolate the mmap regions.
- raw_addr += V8_UINT64_C(0x400000000000);
-#elif V8_TARGET_BIG_ENDIAN
- // Big-endian Linux: 44 bits of virtual addressing.
- raw_addr &= V8_UINT64_C(0x03fffffff000);
-#else
- // Little-endian Linux: 48 bits of virtual addressing.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#endif
-#elif V8_TARGET_ARCH_S390X
- // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
- // of virtual addressing. Truncate to 40 bits to allow kernel chance to
- // fulfill request.
- raw_addr &= V8_UINT64_C(0xfffffff000);
-#elif V8_TARGET_ARCH_S390
- // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
- // to fulfill request.
- raw_addr &= 0x1ffff000;
-#else
- raw_addr &= 0x3ffff000;
-
-#ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-#elif V8_OS_AIX
- // The range 0x30000000 - 0xD0000000 is available on AIX;
- // choose the upper range.
- raw_addr += 0x90000000;
-#else
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
- // 10.6 and 10.7.
- raw_addr += 0x20000000;
-#endif
-#endif
-#else // V8_OS_WIN
-// The address range used to randomize RWX allocations in OS::Allocate
-// Try not to map pages into the default range that windows loads DLLs
-// Use a multiple of 64k to prevent committing unused memory.
-// Note: This does not guarantee RWX regions will be within the
-// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
- static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000;
- static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
- static const uintptr_t kAllocationRandomAddressMin = 0x04000000;
- static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
- raw_addr <<= kPageSizeBits;
- raw_addr += kAllocationRandomAddressMin;
- raw_addr &= kAllocationRandomAddressMax;
-#endif // V8_OS_WIN
- return reinterpret_cast<void*>(raw_addr);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index a78db1a881..668a0e6037 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -76,19 +76,22 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
+// Allocates a single system memory page with read/write permissions. The
+// address parameter is a hint. Returns the base address of the memory, or null
+// on failure. Permissions can be changed on the base address.
+byte* AllocateSystemPage(void* address, size_t* allocated);
+
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
- // Reserves virtual memory with size.
- explicit VirtualMemory(size_t size, void* hint);
-
- // Reserves virtual memory containing an area of the given size that
- // is aligned per alignment. This may not be at the position returned
- // by address().
- VirtualMemory(size_t size, size_t alignment, void* hint);
+ // Reserves virtual memory containing an area of the given size that is
+ // aligned per alignment. This may not be at the position returned by
+ // address().
+ VirtualMemory(size_t size, void* hint,
+ size_t alignment = base::OS::AllocatePageSize());
// Construct a virtual memory by assigning it some already mapped address
// and size.
@@ -125,19 +128,16 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// than the requested size.
size_t size() const { return size_; }
- // Commits real memory. Returns whether the operation succeeded.
- bool Commit(void* address, size_t size, bool is_executable);
-
- // Uncommit real memory. Returns whether the operation succeeded.
- bool Uncommit(void* address, size_t size);
+ // Sets permissions according to the access argument. address and size must be
+ // multiples of CommitPageSize(). Returns true on success, otherwise false.
+ bool SetPermissions(void* address, size_t size,
+ base::OS::MemoryPermission access);
- // Creates a single guard page at the given address.
- bool Guard(void* address);
+ // Releases memory after |free_start|. Returns the number of bytes released.
+ size_t Release(void* free_start);
- // Releases the memory after |free_start|. Returns the bytes released.
- size_t ReleasePartial(void* free_start);
-
- void Release();
+ // Frees all memory.
+ void Free();
// Assign control of the reserved region to a different VirtualMemory object.
// The old object is no longer functional (IsReserved() returns false).
@@ -159,9 +159,6 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result);
-// Generate a random address to be used for hinting mmap().
-V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 50baed4ab7..179d787941 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -80,7 +80,7 @@ class PropertyCallbackArguments
static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
- JSObject* holder, Object::ShouldThrow should_throw)
+ JSObject* holder, ShouldThrow should_throw)
: Super(isolate) {
Object** values = this->begin();
values[T::kThisIndex] = self;
@@ -88,7 +88,7 @@ class PropertyCallbackArguments
values[T::kDataIndex] = data;
values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
values[T::kShouldThrowOnErrorIndex] =
- Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
+ Smi::FromInt(should_throw == kThrowOnError ? 1 : 0);
// Here the hole is set as default value.
// It cannot escape into js as it's removed in Call below.
@@ -158,8 +158,6 @@ class FunctionCallbackArguments
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
- static const int kCalleeIndex = T::kCalleeIndex;
- static const int kContextSaveIndex = T::kContextSaveIndex;
static const int kNewTargetIndex = T::kNewTargetIndex;
FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
@@ -170,18 +168,14 @@ class FunctionCallbackArguments
: Super(isolate), argv_(argv), argc_(argc) {
Object** values = begin();
values[T::kDataIndex] = data;
- values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
values[T::kNewTargetIndex] = new_target;
- values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
- DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
- values[T::kCalleeIndex]->IsFunctionTemplateInfo());
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 35759459c6..93698c9f52 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -112,7 +112,7 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
#endif
MAYBE_RETURN_NULL(
- Object::AddDataProperty(&it, value, attributes, Object::THROW_ON_ERROR,
+ Object::AddDataProperty(&it, value, attributes, kThrowOnError,
Object::CERTAINLY_NOT_STORE_FROM_KEYED));
return value;
}
@@ -212,7 +212,10 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
// Install accumulated accessors.
for (int i = 0; i < valid_descriptors; i++) {
Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
- JSObject::SetAccessor(obj, accessor).Assert();
+ Handle<Name> name(Name::cast(accessor->name()), isolate);
+ JSObject::SetAccessor(obj, name, accessor,
+ accessor->initial_property_attributes())
+ .Assert();
}
}
@@ -282,10 +285,10 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<UnseededNumberDictionary> slow_cache =
+ Handle<NumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(serial_number);
- if (entry == UnseededNumberDictionary::kNotFound) {
+ if (entry == NumberDictionary::kNotFound) {
return MaybeHandle<JSObject>();
}
return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
@@ -310,10 +313,9 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<UnseededNumberDictionary> cache =
+ Handle<NumberDictionary> cache =
isolate->slow_template_instantiations_cache();
- auto new_cache =
- UnseededNumberDictionary::Set(cache, serial_number, object);
+ auto new_cache = NumberDictionary::Set(cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@@ -332,11 +334,11 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<UnseededNumberDictionary> cache =
+ Handle<NumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
- DCHECK_NE(UnseededNumberDictionary::kNotFound, entry);
- cache = UnseededNumberDictionary::DeleteEntry(cache, entry);
+ DCHECK_NE(NumberDictionary::kNotFound, entry);
+ cache = NumberDictionary::DeleteEntry(cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@@ -631,17 +633,16 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
shared, isolate->native_context());
if (obj->remove_prototype()) {
- result->set_map(*isolate->sloppy_function_without_prototype_map());
DCHECK(prototype.is_null());
DCHECK(result->shared()->IsApiFunction());
- DCHECK(!result->has_initial_map());
- DCHECK(!result->has_prototype());
DCHECK(!result->IsConstructor());
+ DCHECK(!result->has_prototype_slot());
return result;
}
// Down from here is only valid for API functions that can be used as a
// constructor (don't set the "remove prototype" flag).
+ DCHECK(result->has_prototype_slot());
if (obj->read_only_prototype()) {
result->set_map(*isolate->sloppy_function_with_readonly_prototype_map());
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index e0938a70bd..d92d7961d2 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -21,7 +21,6 @@
#include "src/accessors.h"
#include "src/api-natives.h"
#include "src/assert-scope.h"
-#include "src/background-parsing-task.h"
#include "src/base/functional.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
@@ -53,6 +52,7 @@
#include "src/json-stringifier.h"
#include "src/messages.h"
#include "src/objects-inl.h"
+#include "src/parsing/background-parsing-task.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@@ -86,6 +86,7 @@
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
+#include "src/wasm/wasm-serialization.h"
namespace v8 {
@@ -390,7 +391,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
// HeapIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
char* first_newline = strchr(last_few_messages, '\n');
- if (first_newline == NULL || first_newline[1] == '\0')
+ if (first_newline == nullptr || first_newline[1] == '\0')
first_newline = last_few_messages;
PrintF("\n<--- Last few GCs --->\n%s\n", first_newline);
PrintF("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
@@ -483,15 +484,21 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
+ return data == nullptr ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
- void* address = base::OS::ReserveRegion(length, i::GetRandomMmapAddr());
+ size_t page_size = base::OS::AllocatePageSize();
+ size_t allocated = RoundUp(length, page_size);
+ void* address =
+ base::OS::Allocate(base::OS::GetRandomMmapAddr(), allocated, page_size,
+ base::OS::MemoryPermission::kNoAccess);
#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address, length);
+ if (address != nullptr) {
+ __lsan_register_root_region(address, allocated);
+ }
#endif
return address;
}
@@ -503,7 +510,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
- base::OS::ReleaseRegion(data, length);
+ CHECK(base::OS::Free(data, length));
return;
}
}
@@ -512,16 +519,13 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void SetProtection(
void* data, size_t length,
v8::ArrayBuffer::Allocator::Protection protection) {
- switch (protection) {
- case v8::ArrayBuffer::Allocator::Protection::kNoAccess: {
- base::OS::Guard(data, length);
- return;
- }
- case v8::ArrayBuffer::Allocator::Protection::kReadWrite: {
- base::OS::Unprotect(data, length);
- return;
- }
- }
+ DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
+ protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+ base::OS::MemoryPermission permission =
+ (protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
+ ? base::OS::MemoryPermission::kReadWrite
+ : base::OS::MemoryPermission::kNoAccess;
+ CHECK(base::OS::SetPermissions(data, length, permission));
}
};
@@ -748,7 +752,7 @@ StartupData SnapshotCreator::CreateBlob(
// cache and thus needs to happen before SerializeWeakReferencesAndDeferred
// is called below.
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
- builtin_serializer.SerializeBuiltins();
+ builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
@@ -778,7 +782,7 @@ StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
{
HandleScope scope(isolate);
Local<Context> context = Context::New(isolate);
- if (embedded_source != NULL &&
+ if (embedded_source != nullptr &&
!RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
return result;
}
@@ -798,8 +802,8 @@ StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
const char* warmup_source) {
- CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != NULL);
- CHECK(warmup_source != NULL);
+ CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != nullptr);
+ CHECK_NOT_NULL(warmup_source);
// Use following steps to create a warmed up snapshot blob from a cold one:
// - Create a new isolate from the cold snapshot.
// - Create a new context to run the warmup script. This will trigger
@@ -851,9 +855,7 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags);
}
-
-RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
-
+RegisteredExtension* RegisteredExtension::first_extension_ = nullptr;
RegisteredExtension::RegisteredExtension(Extension* extension)
: extension_(extension) { }
@@ -867,12 +869,12 @@ void RegisteredExtension::Register(RegisteredExtension* that) {
void RegisteredExtension::UnregisterAll() {
RegisteredExtension* re = first_extension_;
- while (re != NULL) {
+ while (re != nullptr) {
RegisteredExtension* next = re->next();
delete re;
re = next;
}
- first_extension_ = NULL;
+ first_extension_ = nullptr;
}
@@ -895,13 +897,13 @@ Extension::Extension(const char* name,
dep_count_(dep_count),
deps_(deps),
auto_enable_(false) {
- CHECK(source != NULL || source_length_ == 0);
+ CHECK(source != nullptr || source_length_ == 0);
}
ResourceConstraints::ResourceConstraints()
: max_semi_space_size_in_kb_(0),
max_old_space_size_(0),
- stack_limit_(NULL),
+ stack_limit_(nullptr),
code_range_size_(0),
max_zone_pool_size_(0) {}
@@ -934,7 +936,7 @@ void SetResourceConstraints(i::Isolate* isolate,
}
isolate->allocator()->ConfigureSegmentPool(max_pool_size);
- if (constraints.stack_limit() != NULL) {
+ if (constraints.stack_limit() != nullptr) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
@@ -1097,9 +1099,9 @@ i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(GetIsolate())->heap();
Utils::ApiCheck((*escape_slot_)->IsTheHole(heap->isolate()),
"EscapableHandleScope::Escape", "Escape value set twice");
- if (escape_value == NULL) {
+ if (escape_value == nullptr) {
*escape_slot_ = heap->undefined_value();
- return NULL;
+ return nullptr;
}
*escape_slot_ = *escape_value;
return escape_slot_;
@@ -1237,7 +1239,7 @@ void Context::SetEmbedderData(int index, v8::Local<Value> value) {
void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
- if (data.is_null()) return NULL;
+ if (data.is_null()) return nullptr;
return DecodeSmiToAligned(data->get(index), location);
}
@@ -1373,9 +1375,6 @@ static Local<FunctionTemplate> FunctionTemplateNew(
}
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
- if (data.IsEmpty()) {
- data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
- }
Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
obj->set_length(length);
@@ -1456,10 +1455,11 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TUPLE2_TYPE, i::TENURED);
+ isolate->factory()->NewStruct(i::TUPLE3_TYPE, i::TENURED);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
+ SET_FIELD_WRAPPED(obj, set_js_callback, obj->redirected_callback());
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
@@ -1468,29 +1468,14 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
}
-static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
- i::Handle<i::AccessorInfo> obj, v8::Local<Name> name,
- v8::AccessControl settings, v8::PropertyAttribute attributes,
- v8::Local<AccessorSignature> signature) {
- obj->set_name(*Utils::OpenHandle(*name));
- if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
- if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
- obj->set_property_attributes(static_cast<i::PropertyAttributes>(attributes));
- if (!signature.IsEmpty()) {
- obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
- }
- return obj;
-}
-
namespace {
template <typename Getter, typename Setter>
i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Local<Name> name, Getter getter, Setter setter, v8::Local<Value> data,
- v8::AccessControl settings, v8::PropertyAttribute attributes,
+ i::Isolate* isolate, v8::Local<Name> name, Getter getter, Setter setter,
+ v8::Local<Value> data, v8::AccessControl settings,
v8::Local<AccessorSignature> signature, bool is_special_data_property,
bool replace_on_access) {
- i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::AccessorInfo> obj = isolate->factory()->NewAccessorInfo();
SET_FIELD_WRAPPED(obj, set_getter, getter);
DCHECK_IMPLIES(replace_on_access,
@@ -1507,7 +1492,19 @@ i::Handle<i::AccessorInfo> MakeAccessorInfo(
obj->set_data(*Utils::OpenHandle(*data));
obj->set_is_special_data_property(is_special_data_property);
obj->set_replace_on_access(replace_on_access);
- return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
+ i::Handle<i::Name> accessor_name = Utils::OpenHandle(*name);
+ if (!accessor_name->IsUniqueName()) {
+ accessor_name = isolate->factory()->InternalizeString(
+ i::Handle<i::String>::cast(accessor_name));
+ }
+ obj->set_name(*accessor_name);
+ if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
+ if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
+ obj->set_initial_property_attributes(i::NONE);
+ if (!signature.IsEmpty()) {
+ obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
+ }
+ return obj;
}
} // namespace
@@ -1659,7 +1656,7 @@ static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
}
template <typename Getter, typename Setter, typename Data, typename Template>
-static bool TemplateSetAccessor(Template* template_obj, v8::Local<Name> name,
+static void TemplateSetAccessor(Template* template_obj, v8::Local<Name> name,
Getter getter, Setter setter, Data data,
AccessControl settings,
PropertyAttribute attribute,
@@ -1670,12 +1667,12 @@ static bool TemplateSetAccessor(Template* template_obj, v8::Local<Name> name,
auto isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
- auto obj =
- MakeAccessorInfo(name, getter, setter, data, settings, attribute,
- signature, is_special_data_property, replace_on_access);
- if (obj.is_null()) return false;
- i::ApiNatives::AddNativeDataProperty(isolate, info, obj);
- return true;
+ i::Handle<i::AccessorInfo> accessor_info =
+ MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
+ is_special_data_property, replace_on_access);
+ accessor_info->set_initial_property_attributes(
+ static_cast<i::PropertyAttributes>(attribute));
+ i::ApiNatives::AddNativeDataProperty(isolate, info, accessor_info);
}
@@ -1913,10 +1910,11 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TUPLE2_TYPE, i::TENURED);
+ isolate->factory()->NewStruct(i::TUPLE3_TYPE, i::TENURED);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
+ SET_FIELD_WRAPPED(obj, set_js_callback, obj->redirected_callback());
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
@@ -2230,8 +2228,9 @@ Location Module::GetModuleRequestLocation(int i) const {
Local<Value> Module::GetModuleNamespace() {
Utils::ApiCheck(
- GetStatus() >= kInstantiated, "v8::Module::GetModuleNamespace",
- "v8::Module::GetModuleNamespace must be used on an instantiated module");
+ GetStatus() == kEvaluated, "v8::Module::GetModuleNamespace",
+ "v8::Module::GetModuleNamespace can only be used on a module with "
+ "status kEvaluated");
i::Handle<i::Module> self = Utils::OpenHandle(this);
i::Handle<i::JSModuleNamespace> module_namespace =
i::Module::GetModuleNamespace(self);
@@ -2276,7 +2275,8 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
}
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
- Isolate* v8_isolate, Source* source, CompileOptions options) {
+ Isolate* v8_isolate, Source* source, CompileOptions options,
+ NoCacheReason no_cache_reason) {
auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
ENTER_V8_NO_SCRIPT(isolate, v8_isolate->GetCurrentContext(), ScriptCompiler,
@@ -2291,7 +2291,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
options = kNoCompileOptions;
}
- i::ScriptData* script_data = NULL;
+ i::ScriptData* script_data = nullptr;
if (options == kConsumeParserCache || options == kConsumeCodeCache) {
DCHECK(source->cached_data);
// ScriptData takes care of pointer-aligning the data.
@@ -2301,102 +2301,97 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
i::Handle<i::SharedFunctionInfo> result;
- {
- i::HistogramTimerScope total(isolate->counters()->compile_script(), true);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
- i::MaybeHandle<i::Object> name_obj;
- i::MaybeHandle<i::Object> source_map_url;
- i::MaybeHandle<i::FixedArray> host_defined_options =
- isolate->factory()->empty_fixed_array();
- int line_offset = 0;
- int column_offset = 0;
- if (!source->resource_name.IsEmpty()) {
- name_obj = Utils::OpenHandle(*(source->resource_name));
- }
- if (!source->host_defined_options.IsEmpty()) {
- host_defined_options = Utils::OpenHandle(*(source->host_defined_options));
- }
- if (!source->resource_line_offset.IsEmpty()) {
- line_offset = static_cast<int>(source->resource_line_offset->Value());
- }
- if (!source->resource_column_offset.IsEmpty()) {
- column_offset =
- static_cast<int>(source->resource_column_offset->Value());
- }
- if (!source->source_map_url.IsEmpty()) {
- source_map_url = Utils::OpenHandle(*(source->source_map_url));
- }
- i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- i::Compiler::GetSharedFunctionInfoForScript(
- str, name_obj, line_offset, column_offset, source->resource_options,
- source_map_url, isolate->native_context(), NULL, &script_data,
- options, i::NOT_NATIVES_CODE, host_defined_options);
- has_pending_exception = !maybe_function_info.ToHandle(&result);
- if (has_pending_exception && script_data != NULL) {
- // This case won't happen during normal operation; we have compiled
- // successfully and produced cached data, and but the second compilation
- // of the same source code fails.
- delete script_data;
- script_data = NULL;
- }
- RETURN_ON_FAILED_EXECUTION(UnboundScript);
-
- if (produce_cache && script_data != NULL) {
- // script_data now contains the data that was generated. source will
- // take the ownership.
- source->cached_data = new CachedData(
- script_data->data(), script_data->length(), CachedData::BufferOwned);
- script_data->ReleaseDataOwnership();
- } else if (options == kConsumeParserCache || options == kConsumeCodeCache) {
- source->cached_data->rejected = script_data->rejected();
- }
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
+ i::MaybeHandle<i::Object> name_obj;
+ i::MaybeHandle<i::Object> source_map_url;
+ i::MaybeHandle<i::FixedArray> host_defined_options =
+ isolate->factory()->empty_fixed_array();
+ int line_offset = 0;
+ int column_offset = 0;
+ if (!source->resource_name.IsEmpty()) {
+ name_obj = Utils::OpenHandle(*(source->resource_name));
+ }
+ if (!source->host_defined_options.IsEmpty()) {
+ host_defined_options = Utils::OpenHandle(*(source->host_defined_options));
+ }
+ if (!source->resource_line_offset.IsEmpty()) {
+ line_offset = static_cast<int>(source->resource_line_offset->Value());
+ }
+ if (!source->resource_column_offset.IsEmpty()) {
+ column_offset = static_cast<int>(source->resource_column_offset->Value());
+ }
+ if (!source->source_map_url.IsEmpty()) {
+ source_map_url = Utils::OpenHandle(*(source->source_map_url));
+ }
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScript(
+ str, name_obj, line_offset, column_offset, source->resource_options,
+ source_map_url, isolate->native_context(), nullptr, &script_data,
+ options, no_cache_reason, i::NOT_NATIVES_CODE, host_defined_options);
+ has_pending_exception = !maybe_function_info.ToHandle(&result);
+ if (has_pending_exception && script_data != nullptr) {
+ // This case won't happen during normal operation; we have compiled
+ // successfully and produced cached data, and but the second compilation
+ // of the same source code fails.
delete script_data;
+ script_data = nullptr;
}
+ RETURN_ON_FAILED_EXECUTION(UnboundScript);
+
+ if (produce_cache && script_data != nullptr) {
+ // script_data now contains the data that was generated. source will
+ // take the ownership.
+ source->cached_data = new CachedData(
+ script_data->data(), script_data->length(), CachedData::BufferOwned);
+ script_data->ReleaseDataOwnership();
+ } else if (options == kConsumeParserCache || options == kConsumeCodeCache) {
+ source->cached_data->rejected = script_data->rejected();
+ }
+ delete script_data;
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
}
-
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundScript(
- Isolate* v8_isolate, Source* source, CompileOptions options) {
+ Isolate* v8_isolate, Source* source, CompileOptions options,
+ NoCacheReason no_cache_reason) {
Utils::ApiCheck(
!source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileUnboundScript",
"v8::ScriptCompiler::CompileModule must be used to compile modules");
- return CompileUnboundInternal(v8_isolate, source, options);
+ return CompileUnboundInternal(v8_isolate, source, options, no_cache_reason);
}
-
-Local<UnboundScript> ScriptCompiler::CompileUnbound(Isolate* v8_isolate,
- Source* source,
- CompileOptions options) {
+Local<UnboundScript> ScriptCompiler::CompileUnbound(
+ Isolate* v8_isolate, Source* source, CompileOptions options,
+ NoCacheReason no_cache_reason) {
Utils::ApiCheck(
!source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileUnbound",
"v8::ScriptCompiler::CompileModule must be used to compile modules");
- RETURN_TO_LOCAL_UNCHECKED(CompileUnboundInternal(v8_isolate, source, options),
- UnboundScript);
+ RETURN_TO_LOCAL_UNCHECKED(
+ CompileUnboundInternal(v8_isolate, source, options, no_cache_reason),
+ UnboundScript);
}
-
MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
Source* source,
- CompileOptions options) {
+ CompileOptions options,
+ NoCacheReason no_cache_reason) {
Utils::ApiCheck(
!source->GetResourceOptions().IsModule(), "v8::ScriptCompiler::Compile",
"v8::ScriptCompiler::CompileModule must be used to compile modules");
auto isolate = context->GetIsolate();
- auto maybe = CompileUnboundInternal(isolate, source, options);
+ auto maybe =
+ CompileUnboundInternal(isolate, source, options, no_cache_reason);
Local<UnboundScript> result;
if (!maybe.ToLocal(&result)) return MaybeLocal<Script>();
v8::Context::Scope scope(context);
return result->BindToCurrentContext();
}
-
-Local<Script> ScriptCompiler::Compile(
- Isolate* v8_isolate,
- Source* source,
- CompileOptions options) {
+Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate, Source* source,
+ CompileOptions options,
+ NoCacheReason no_cache_reason) {
auto context = v8_isolate->GetCurrentContext();
RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, options), Script);
}
@@ -2408,7 +2403,8 @@ MaybeLocal<Module> ScriptCompiler::CompileModule(Isolate* isolate,
Utils::ApiCheck(source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileModule",
"Invalid ScriptOrigin: is_module must be true");
- auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions);
+ auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions,
+ kNoCacheBecauseModule);
Local<UnboundScript> unbound;
if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
@@ -2423,7 +2419,7 @@ class IsIdentifierHelper {
bool Check(i::String* string) {
i::ConsString* cons_string = i::String::VisitFlat(this, string, 0);
- if (cons_string == NULL) return is_identifier_;
+ if (cons_string == nullptr) return is_identifier_;
// We don't support cons strings here.
return false;
}
@@ -2462,7 +2458,7 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Local<Object> context_extensions[]) {
PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunctionInContext,
Function);
- TRACE_EVENT0("v8", "V8.ScriptCompiler");
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
i::Handle<i::String> source_string;
auto factory = isolate->factory();
if (arguments_count) {
@@ -2539,7 +2535,7 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
i::Handle<i::JSFunction> fun;
has_pending_exception =
!i::Compiler::GetFunctionFromEval(
- source_string, outer_info, context, i::SLOPPY,
+ source_string, outer_info, context, i::LanguageMode::kSloppy,
i::ONLY_SINGLE_FUNCTION_LITERAL, i::kNoSourcePosition,
eval_scope_position, eval_position, line_offset,
column_offset - scope_position, name_obj, source->resource_options)
@@ -2587,7 +2583,7 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
Local<String> full_source_string,
const ScriptOrigin& origin) {
PREPARE_FOR_EXECUTION(context, ScriptCompiler, Compile, Script);
- TRACE_EVENT0("v8", "V8.ScriptCompiler");
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
i::StreamedSource* source = v8_source->impl();
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
i::Handle<i::Script> script = isolate->factory()->NewScript(str);
@@ -2613,18 +2609,25 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
}
source->info->set_script(script);
- if (source->info->literal() == nullptr) {
- source->parser->ReportErrors(isolate, script);
- }
source->parser->UpdateStatistics(isolate, script);
- source->info->UpdateStatisticsAfterBackgroundParse(isolate);
+ source->info->UpdateBackgroundParseStatisticsOnMainThread(isolate);
source->parser->HandleSourceURLComments(isolate, script);
i::Handle<i::SharedFunctionInfo> result;
- if (source->info->literal() != nullptr) {
- // Parsing has succeeded.
- result = i::Compiler::GetSharedFunctionInfoForStreamedScript(
- script, source->info.get(), str->length());
+ if (source->info->literal() == nullptr) {
+ // Parsing has failed - report error messages.
+ source->info->pending_error_handler()->ReportErrors(
+ isolate, script, source->info->ast_value_factory());
+ } else {
+ // Parsing has succeeded - finalize compile.
+ if (i::FLAG_background_compile) {
+ result = i::Compiler::GetSharedFunctionInfoForBackgroundCompile(
+ script, source->info.get(), str->length(),
+ source->outer_function_job.get(), &source->inner_function_jobs);
+ } else {
+ result = i::Compiler::GetSharedFunctionInfoForStreamedScript(
+ script, source->info.get(), str->length());
+ }
}
has_pending_exception = result.is_null();
if (has_pending_exception) isolate->ReportPendingMessages();
@@ -2798,7 +2801,7 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
i::Handle<i::String> name = isolate->factory()->stack_string();
Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
- has_pending_exception = !maybe.IsJust();
+ has_pending_exception = maybe.IsNothing();
RETURN_ON_FAILED_EXECUTION(Value);
if (!maybe.FromJust()) return v8::Local<Value>();
Local<Value> result;
@@ -3158,101 +3161,6 @@ bool StackFrame::IsConstructor() const {
bool StackFrame::IsWasm() const { return Utils::OpenHandle(this)->is_wasm(); }
-// --- N a t i v e W e a k M a p ---
-
-Local<NativeWeakMap> NativeWeakMap::New(Isolate* v8_isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::Handle<i::JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
- i::JSWeakCollection::Initialize(weakmap, isolate);
- return Utils::NativeWeakMapToLocal(weakmap);
-}
-
-
-void NativeWeakMap::Set(Local<Value> v8_key, Local<Value> v8_value) {
- i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
- i::Isolate* isolate = weak_collection->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
- i::Handle<i::Object> value = Utils::OpenHandle(*v8_value);
- if (!key->IsJSReceiver() && !key->IsSymbol()) {
- DCHECK(false);
- return;
- }
- i::Handle<i::ObjectHashTable> table(
- i::ObjectHashTable::cast(weak_collection->table()));
- if (!table->IsKey(isolate, *key)) {
- DCHECK(false);
- return;
- }
- int32_t hash = key->GetOrCreateHash(isolate)->value();
- i::JSWeakCollection::Set(weak_collection, key, value, hash);
-}
-
-Local<Value> NativeWeakMap::Get(Local<Value> v8_key) const {
- i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
- i::Isolate* isolate = weak_collection->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
- if (!key->IsJSReceiver() && !key->IsSymbol()) {
- DCHECK(false);
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
- }
- i::Handle<i::ObjectHashTable> table(
- i::ObjectHashTable::cast(weak_collection->table()));
- if (!table->IsKey(isolate, *key)) {
- DCHECK(false);
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
- }
- i::Handle<i::Object> lookup(table->Lookup(key), isolate);
- if (lookup->IsTheHole(isolate))
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
- return Utils::ToLocal(lookup);
-}
-
-
-bool NativeWeakMap::Has(Local<Value> v8_key) {
- i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
- i::Isolate* isolate = weak_collection->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
- if (!key->IsJSReceiver() && !key->IsSymbol()) {
- DCHECK(false);
- return false;
- }
- i::Handle<i::ObjectHashTable> table(
- i::ObjectHashTable::cast(weak_collection->table()));
- if (!table->IsKey(isolate, *key)) {
- DCHECK(false);
- return false;
- }
- i::Handle<i::Object> lookup(table->Lookup(key), isolate);
- return !lookup->IsTheHole(isolate);
-}
-
-
-bool NativeWeakMap::Delete(Local<Value> v8_key) {
- i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
- i::Isolate* isolate = weak_collection->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
- if (!key->IsJSReceiver() && !key->IsSymbol()) {
- DCHECK(false);
- return false;
- }
- i::Handle<i::ObjectHashTable> table(
- i::ObjectHashTable::cast(weak_collection->table()));
- if (!table->IsKey(isolate, *key)) {
- DCHECK(false);
- return false;
- }
- int32_t hash = key->GetOrCreateHash(isolate)->value();
- return i::JSWeakCollection::Delete(weak_collection, key, hash);
-}
-
// --- J S O N ---
@@ -3905,7 +3813,7 @@ Local<Uint32> Value::ToUint32(Isolate* isolate) const {
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- Utils::ApiCheck(isolate != NULL && !isolate->IsDead(),
+ Utils::ApiCheck(isolate != nullptr && !isolate->IsDead(),
"v8::internal::Internals::CheckInitialized",
"Isolate is not initialized or V8 has died");
}
@@ -4085,21 +3993,20 @@ void v8::SharedArrayBuffer::CheckCast(Value* that) {
void v8::Date::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = NULL;
+ i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != NULL &&
- obj->HasSpecificClassOf(isolate->heap()->Date_string()),
- "v8::Date::Cast()",
- "Could not convert to date");
+ Utils::ApiCheck(isolate != nullptr &&
+ obj->HasSpecificClassOf(isolate->heap()->Date_string()),
+ "v8::Date::Cast()", "Could not convert to date");
}
void v8::StringObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = NULL;
+ i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != NULL &&
- obj->HasSpecificClassOf(isolate->heap()->String_string()),
+ Utils::ApiCheck(isolate != nullptr &&
+ obj->HasSpecificClassOf(isolate->heap()->String_string()),
"v8::StringObject::Cast()",
"Could not convert to StringObject");
}
@@ -4107,10 +4014,10 @@ void v8::StringObject::CheckCast(v8::Value* that) {
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = NULL;
+ i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != NULL &&
- obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
+ Utils::ApiCheck(isolate != nullptr &&
+ obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
"v8::SymbolObject::Cast()",
"Could not convert to SymbolObject");
}
@@ -4118,10 +4025,10 @@ void v8::SymbolObject::CheckCast(v8::Value* that) {
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = NULL;
+ i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != NULL &&
- obj->HasSpecificClassOf(isolate->heap()->Number_string()),
+ Utils::ApiCheck(isolate != nullptr &&
+ obj->HasSpecificClassOf(isolate->heap()->Number_string()),
"v8::NumberObject::Cast()",
"Could not convert to NumberObject");
}
@@ -4129,12 +4036,12 @@ void v8::NumberObject::CheckCast(v8::Value* that) {
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = NULL;
+ i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != NULL &&
- obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
- "v8::BooleanObject::Cast()",
- "Could not convert to BooleanObject");
+ Utils::ApiCheck(
+ isolate != nullptr &&
+ obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
+ "v8::BooleanObject::Cast()", "Could not convert to BooleanObject");
}
@@ -4349,7 +4256,8 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception =
i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj,
- i::SLOPPY).is_null();
+ i::LanguageMode::kSloppy)
+ .is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -4368,7 +4276,8 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception = i::Object::SetElement(isolate, self, index, value_obj,
- i::SLOPPY).is_null();
+ i::LanguageMode::kSloppy)
+ .is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -4393,7 +4302,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
isolate, self, key_obj, self, i::LookupIterator::OWN);
Maybe<bool> result =
- i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
+ i::JSReceiver::CreateDataProperty(&it, value_obj, i::kDontThrow);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4411,7 +4320,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::LookupIterator it(isolate, self, index, self, i::LookupIterator::OWN);
Maybe<bool> result =
- i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
+ i::JSReceiver::CreateDataProperty(&it, value_obj, i::kDontThrow);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4529,8 +4438,8 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
ENTER_V8(isolate, context, Object, DefineOwnProperty, Nothing<bool>(),
i::HandleScope);
Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
- isolate, self, key_obj, &desc, i::Object::DONT_THROW);
- // Even though we said DONT_THROW, there might be accessors that do throw.
+ isolate, self, key_obj, &desc, i::kDontThrow);
+ // Even though we said kDontThrow, there might be accessors that do throw.
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return success;
} else {
@@ -4539,7 +4448,7 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
ENTER_V8_NO_SCRIPT(isolate, context, Object, DefineOwnProperty,
Nothing<bool>(), i::HandleScope);
Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
- isolate, self, key_obj, &desc, i::Object::DONT_THROW);
+ isolate, self, key_obj, &desc, i::kDontThrow);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return success;
}
@@ -4555,8 +4464,7 @@ Maybe<bool> v8::Object::DefineProperty(v8::Local<v8::Context> context,
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
- isolate, self, key_obj, &descriptor.get_private()->desc,
- i::Object::DONT_THROW);
+ isolate, self, key_obj, &descriptor.get_private()->desc, i::kDontThrow);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return success;
}
@@ -4610,7 +4518,7 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
desc.set_value(value_obj);
return i::JSProxy::SetPrivateProperty(
isolate, i::Handle<i::JSProxy>::cast(self),
- i::Handle<i::Symbol>::cast(key_obj), &desc, i::Object::DONT_THROW);
+ i::Handle<i::Symbol>::cast(key_obj), &desc, i::kDontThrow);
}
auto js_object = i::Handle<i::JSObject>::cast(self);
i::LookupIterator it(js_object, key_obj, js_object);
@@ -4734,8 +4642,8 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
// We do not allow exceptions thrown while setting the prototype
// to propagate outside.
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- auto result = i::JSReceiver::SetPrototype(self, value_obj, false,
- i::Object::THROW_ON_ERROR);
+ auto result =
+ i::JSReceiver::SetPrototype(self, value_obj, false, i::kThrowOnError);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
@@ -4845,8 +4753,8 @@ Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
auto self = Utils::OpenHandle(this);
i::JSReceiver::IntegrityLevel i_level =
level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
- Maybe<bool> result = i::JSReceiver::SetIntegrityLevel(
- self, i_level, i::Object::THROW_ON_ERROR);
+ Maybe<bool> result =
+ i::JSReceiver::SetIntegrityLevel(self, i_level, i::kThrowOnError);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4858,8 +4766,8 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
auto key_obj = Utils::OpenHandle(*key);
if (self->IsJSProxy()) {
ENTER_V8(isolate, context, Object, Delete, Nothing<bool>(), i::HandleScope);
- Maybe<bool> result =
- i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
+ Maybe<bool> result = i::Runtime::DeleteObjectProperty(
+ isolate, self, key_obj, i::LanguageMode::kSloppy);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4868,8 +4776,8 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
// a script.
ENTER_V8_NO_SCRIPT(isolate, context, Object, Delete, Nothing<bool>(),
i::HandleScope);
- Maybe<bool> result =
- i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
+ Maybe<bool> result = i::Runtime::DeleteObjectProperty(
+ isolate, self, key_obj, i::LanguageMode::kSloppy);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4890,8 +4798,8 @@ Maybe<bool> v8::Object::DeletePrivate(Local<Context> context,
i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
- Maybe<bool> result =
- i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
+ Maybe<bool> result = i::Runtime::DeleteObjectProperty(
+ isolate, self, key_obj, i::LanguageMode::kSloppy);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4978,13 +4886,18 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
i::Handle<i::JSObject> obj =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
v8::Local<AccessorSignature> signature;
- auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
- signature, is_special_data_property, false);
+ i::Handle<i::AccessorInfo> info =
+ MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
+ is_special_data_property, false);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
+
+ i::Handle<i::Name> accessor_name(info->name(), isolate);
+ i::PropertyAttributes attrs = static_cast<i::PropertyAttributes>(attributes);
has_pending_exception =
- !i::JSObject::SetAccessor(obj, info).ToHandle(&result);
+ !i::JSObject::SetAccessor(obj, accessor_name, info, attrs)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
if (result->IsUndefined(obj->GetIsolate())) return Just(false);
if (fast) {
@@ -5388,7 +5301,7 @@ Local<Function> Function::New(Isolate* v8_isolate, FunctionCallback callback,
Local<v8::Object> Function::NewInstance() const {
- return NewInstance(Isolate::GetCurrent()->GetCurrentContext(), 0, NULL)
+ return NewInstance(Isolate::GetCurrent()->GetCurrentContext(), 0, nullptr)
.FromMaybe(Local<Object>());
}
@@ -5639,7 +5552,7 @@ class ContainsOnlyOneByteHelper {
ContainsOnlyOneByteHelper() : is_one_byte_(true) {}
bool Check(i::String* string) {
i::ConsString* cons_string = i::String::VisitFlat(this, string, 0);
- if (cons_string == NULL) return is_one_byte_;
+ if (cons_string == nullptr) return is_one_byte_;
return CheckCons(cons_string);
}
void VisitOneByteString(const uint8_t* chars, int length) {
@@ -5691,7 +5604,7 @@ class ContainsOnlyOneByteHelper {
i::String::VisitFlat(this, right, 0);
if (!is_one_byte_) return false;
// Standard recurse/iterate trick.
- if (left_as_cons != NULL && right_as_cons != NULL) {
+ if (left_as_cons != nullptr && right_as_cons != nullptr) {
if (left->length() < right->length()) {
CheckCons(left_as_cons);
cons_string = right_as_cons;
@@ -5704,12 +5617,12 @@ class ContainsOnlyOneByteHelper {
continue;
}
// Descend left in place.
- if (left_as_cons != NULL) {
+ if (left_as_cons != nullptr) {
cons_string = left_as_cons;
continue;
}
// Descend right in place.
- if (right_as_cons != NULL) {
+ if (right_as_cons != nullptr) {
cons_string = right_as_cons;
continue;
}
@@ -5863,16 +5776,16 @@ class Utf8LengthHelper : public i::AllStatic {
int leaf_length;
ConsString* left_as_cons =
Visitor::VisitFlat(left, &leaf_length, &left_leaf_state);
- if (left_as_cons == NULL) {
+ if (left_as_cons == nullptr) {
total_length += leaf_length;
MergeLeafLeft(&total_length, &state, left_leaf_state);
}
ConsString* right_as_cons =
Visitor::VisitFlat(right, &leaf_length, &right_leaf_state);
- if (right_as_cons == NULL) {
+ if (right_as_cons == nullptr) {
total_length += leaf_length;
MergeLeafRight(&total_length, &state, right_leaf_state);
- if (left_as_cons != NULL) {
+ if (left_as_cons != nullptr) {
// 1 Leaf node. Descend in place.
current = left_as_cons;
continue;
@@ -5881,7 +5794,7 @@ class Utf8LengthHelper : public i::AllStatic {
MergeTerminal(&total_length, state, state_out);
return total_length;
}
- } else if (left_as_cons == NULL) {
+ } else if (left_as_cons == nullptr) {
// 1 Leaf node. Descend in place.
current = right_as_cons;
continue;
@@ -5917,7 +5830,7 @@ static int Utf8Length(i::String* str, i::Isolate* isolate) {
uint8_t state;
i::ConsString* cons_string =
Utf8LengthHelper::Visitor::VisitFlat(str, &length, &state);
- if (cons_string == NULL) return length;
+ if (cons_string == nullptr) return length;
return Utf8LengthHelper::Calculate(cons_string);
}
@@ -6084,7 +5997,7 @@ class Utf8WriterVisitor {
int CompleteWrite(bool write_null, int* utf16_chars_read_out) {
// Write out number of utf16 characters written to the stream.
- if (utf16_chars_read_out != NULL) {
+ if (utf16_chars_read_out != nullptr) {
*utf16_chars_read_out = utf16_chars_read_;
}
// Only null terminate if all of the string was written and there's space.
@@ -6114,7 +6027,7 @@ static bool RecursivelySerializeToUtf8(i::String* current,
int recursion_budget) {
while (!writer->IsDone()) {
i::ConsString* cons_string = i::String::VisitFlat(writer, current);
- if (cons_string == NULL) return true; // Leaf node.
+ if (cons_string == nullptr) return true; // Leaf node.
if (recursion_budget <= 0) return false;
// Must write the left branch first.
i::String* first = cons_string->first();
@@ -6157,7 +6070,7 @@ int String::WriteUtf8(char* buffer,
// one-byte fast path.
if (utf8_bytes == string_length) {
WriteOneByte(reinterpret_cast<uint8_t*>(buffer), 0, capacity, options);
- if (nchars_ref != NULL) *nchars_ref = string_length;
+ if (nchars_ref != nullptr) *nchars_ref = string_length;
if (write_null && (utf8_bytes+1 <= capacity)) {
return string_length + 1;
}
@@ -6246,7 +6159,7 @@ void v8::String::VerifyExternalStringResource(
i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResource*>(resource);
} else {
- expected = NULL;
+ expected = nullptr;
}
CHECK_EQ(expected, value);
}
@@ -6267,7 +6180,7 @@ void v8::String::VerifyExternalStringResourceBase(
expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
expectedEncoding = TWO_BYTE_ENCODING;
} else {
- expected = NULL;
+ expected = nullptr;
expectedEncoding =
str->IsOneByteRepresentation() ? ONE_BYTE_ENCODING : TWO_BYTE_ENCODING;
}
@@ -6283,7 +6196,7 @@ v8::String::GetExternalOneByteStringResource() const {
i::Handle<i::ExternalOneByteString>::cast(str)->resource();
return reinterpret_cast<const ExternalOneByteStringResource*>(resource);
} else {
- return NULL;
+ return nullptr;
}
}
@@ -6376,7 +6289,7 @@ void v8::Object::SetInternalField(int index, v8::Local<Value> value) {
void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
- if (!InternalFieldOK(obj, index, location)) return NULL;
+ if (!InternalFieldOK(obj, index, location)) return nullptr;
return DecodeSmiToAligned(
i::Handle<i::JSObject>::cast(obj)->GetEmbedderField(index), location);
}
@@ -6413,7 +6326,7 @@ static void* ExternalValue(i::Object* obj) {
// Obscure semantics for undefined, but somehow checked in our unit tests...
if (!obj->IsSmi() &&
obj->IsUndefined(i::HeapObject::cast(obj)->GetIsolate())) {
- return NULL;
+ return nullptr;
}
i::Object* foreign = i::JSObject::cast(obj)->GetEmbedderField(0);
return i::Foreign::cast(foreign)->foreign_address();
@@ -6668,7 +6581,7 @@ Local<Context> NewContext(
LOG_API(isolate, Context, New);
i::HandleScope scope(isolate);
ExtensionConfiguration no_extensions;
- if (extensions == NULL) extensions = &no_extensions;
+ if (extensions == nullptr) extensions = &no_extensions;
i::Handle<i::Context> env = CreateEnvironment<i::Context>(
isolate, extensions, global_template, global_object,
context_snapshot_index, embedder_fields_deserializer);
@@ -7648,7 +7561,7 @@ MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
i::Handle<i::Object> result;
has_pending_exception =
!i::Execution::Call(isolate, isolate->promise_internal_constructor(),
- isolate->factory()->undefined_value(), 0, NULL)
+ isolate->factory()->undefined_value(), 0, nullptr)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise::Resolver);
RETURN_ESCAPED(Local<Promise::Resolver>::Cast(Utils::ToLocal(result)));
@@ -7869,26 +7782,40 @@ WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
i::Handle<i::WasmCompiledModule> compiled_part =
i::handle(i::WasmCompiledModule::cast(obj->compiled_module()));
+ if (i::FLAG_wasm_jit_to_native) {
+ i::Isolate* isolate = obj->GetIsolate();
- std::unique_ptr<i::ScriptData> script_data =
- i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(),
- compiled_part);
- script_data->ReleaseDataOwnership();
+ return i::wasm::NativeModuleSerializer::SerializeWholeModule(isolate,
+ compiled_part);
+ } else {
+ std::unique_ptr<i::ScriptData> script_data =
+ i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(),
+ compiled_part);
+ script_data->ReleaseDataOwnership();
- size_t size = static_cast<size_t>(script_data->length());
- return {std::unique_ptr<const uint8_t[]>(script_data->data()), size};
+ size_t size = static_cast<size_t>(script_data->length());
+ return {std::unique_ptr<const uint8_t[]>(script_data->data()), size};
+ }
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
Isolate* isolate,
const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
- int size = static_cast<int>(serialized_module.second);
- i::ScriptData sc(serialized_module.first, size);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::MaybeHandle<i::FixedArray> maybe_compiled_part =
- i::WasmCompiledModuleSerializer::DeserializeWasmModule(
- i_isolate, &sc, {wire_bytes.first, wire_bytes.second});
+ i::MaybeHandle<i::FixedArray> maybe_compiled_part;
+ if (i::FLAG_wasm_jit_to_native) {
+ maybe_compiled_part =
+ i::wasm::NativeModuleDeserializer::DeserializeFullBuffer(
+ i_isolate, {serialized_module.first, serialized_module.second},
+ {wire_bytes.first, wire_bytes.second});
+ } else {
+ int size = static_cast<int>(serialized_module.second);
+ i::ScriptData sc(serialized_module.first, size);
+ maybe_compiled_part =
+ i::WasmCompiledModuleSerializer::DeserializeWasmModule(
+ i_isolate, &sc, {wire_bytes.first, wire_bytes.second});
+ }
i::Handle<i::FixedArray> compiled_part;
if (!maybe_compiled_part.ToHandle(&compiled_part)) {
return MaybeLocal<WasmCompiledModule>();
@@ -8106,7 +8033,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
size_t byte_length,
ArrayBufferCreationMode mode) {
// Embedders must guarantee that the external backing store is valid.
- CHECK(byte_length == 0 || data != NULL);
+ CHECK(byte_length == 0 || data != nullptr);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -8311,7 +8238,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
ArrayBufferCreationMode mode) {
CHECK(i::FLAG_harmony_sharedarraybuffer);
// Embedders must guarantee that the external backing store is valid.
- CHECK(byte_length == 0 || data != NULL);
+ CHECK(byte_length == 0 || data != nullptr);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SharedArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -8456,16 +8383,16 @@ CpuProfiler* Isolate::GetCpuProfiler() {
bool Isolate::InContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return isolate->context() != NULL;
+ return isolate->context() != nullptr;
}
v8::Local<v8::Context> Isolate::GetCurrentContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Context* context = isolate->context();
- if (context == NULL) return Local<Context>();
+ if (context == nullptr) return Local<Context>();
i::Context* native_context = context->native_context();
- if (native_context == NULL) return Local<Context>();
+ if (native_context == nullptr) return Local<Context>();
return Utils::ToLocal(i::Handle<i::Context>(native_context));
}
@@ -8567,35 +8494,6 @@ void Isolate::RemoveGCEpilogueCallback(GCCallback callback) {
RemoveGCEpilogueCallback(CallGCCallbackWithoutData, data);
}
-static void CallGCCallbackWithoutIsolate(Isolate* isolate, GCType type,
- GCCallbackFlags flags, void* data) {
- reinterpret_cast<v8::GCCallback>(data)(type, flags);
-}
-
-void V8::AddGCPrologueCallback(v8::GCCallback callback, GCType gc_type) {
- void* data = reinterpret_cast<void*>(callback);
- Isolate::GetCurrent()->AddGCPrologueCallback(CallGCCallbackWithoutIsolate,
- data, gc_type);
-}
-
-void V8::AddGCEpilogueCallback(v8::GCCallback callback, GCType gc_type) {
- void* data = reinterpret_cast<void*>(callback);
- Isolate::GetCurrent()->AddGCEpilogueCallback(CallGCCallbackWithoutIsolate,
- data, gc_type);
-}
-
-void V8::RemoveGCPrologueCallback(GCCallback callback) {
- void* data = reinterpret_cast<void*>(callback);
- Isolate::GetCurrent()->RemoveGCPrologueCallback(CallGCCallbackWithoutIsolate,
- data);
-}
-
-void V8::RemoveGCEpilogueCallback(GCCallback callback) {
- void* data = reinterpret_cast<void*>(callback);
- Isolate::GetCurrent()->RemoveGCEpilogueCallback(CallGCCallbackWithoutIsolate,
- data);
-}
-
void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->SetEmbedderHeapTracer(tracer);
@@ -8662,9 +8560,9 @@ Isolate* Isolate::New(const Isolate::CreateParams& params) {
Isolate* IsolateNewImpl(internal::Isolate* isolate,
const v8::Isolate::CreateParams& params) {
Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
- CHECK(params.array_buffer_allocator != NULL);
+ CHECK_NOT_NULL(params.array_buffer_allocator);
isolate->set_array_buffer_allocator(params.array_buffer_allocator);
- if (params.snapshot_blob != NULL) {
+ if (params.snapshot_blob != nullptr) {
isolate->set_snapshot_blob(params.snapshot_blob);
} else {
isolate->set_snapshot_blob(i::Snapshot::DefaultSnapshotBlob());
@@ -8709,9 +8607,12 @@ Isolate* IsolateNewImpl(internal::Isolate* isolate,
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
if (params.entry_hook || !i::Snapshot::Initialize(isolate)) {
+ // If snapshot data was provided and we failed to deserialize it must
+ // have been corrupted.
+ CHECK_NULL(isolate->snapshot_blob());
base::ElapsedTimer timer;
if (i::FLAG_profile_deserialization) timer.Start();
- isolate->Init(NULL);
+ isolate->Init(nullptr);
if (i::FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
i::PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
@@ -8766,6 +8667,12 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
isolate->SetHostImportModuleDynamicallyCallback(callback);
}
+void Isolate::SetHostInitializeImportMetaObjectCallback(
+ HostInitializeImportMetaObjectCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetHostInitializeImportMetaObjectCallback(callback);
+}
+
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
@@ -8938,7 +8845,7 @@ void Isolate::SetEventLogger(LogEventCallback that) {
void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
- if (callback == NULL) return;
+ if (callback == nullptr) return;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->AddBeforeCallEnteredCallback(callback);
}
@@ -8952,7 +8859,7 @@ void Isolate::RemoveBeforeCallEnteredCallback(
void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
- if (callback == NULL) return;
+ if (callback == nullptr) return;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->AddCallCompletedCallback(callback);
}
@@ -8982,14 +8889,14 @@ void Isolate::SetPromiseHook(PromiseHook hook) {
}
void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
- if (callback == NULL) return;
+ if (callback == nullptr) return;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetPromiseRejectCallback(callback);
}
void Isolate::RunMicrotasks() {
- DCHECK(MicrotasksPolicy::kScoped != GetMicrotasksPolicy());
+ DCHECK_NE(MicrotasksPolicy::kScoped, GetMicrotasksPolicy());
reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
}
@@ -9005,8 +8912,10 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
i::HandleScope scope(isolate);
i::Handle<i::CallHandlerInfo> callback_info =
i::Handle<i::CallHandlerInfo>::cast(
- isolate->factory()->NewStruct(i::TUPLE2_TYPE, i::NOT_TENURED));
+ isolate->factory()->NewStruct(i::TUPLE3_TYPE, i::NOT_TENURED));
SET_FIELD_WRAPPED(callback_info, set_callback, microtask);
+ SET_FIELD_WRAPPED(callback_info, set_js_callback,
+ callback_info->redirected_callback());
SET_FIELD_WRAPPED(callback_info, set_data, data);
isolate->EnqueueMicrotask(callback_info);
}
@@ -9185,34 +9094,25 @@ void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
*length_in_bytes =
isolate->heap()->memory_allocator()->code_range()->size();
} else {
- *start = NULL;
+ *start = nullptr;
*length_in_bytes = 0;
}
}
-void Isolate::SetFatalErrorHandler(FatalErrorCallback that) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->set_exception_behavior(that);
-}
-
-void Isolate::SetOOMErrorHandler(OOMErrorCallback that) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->set_oom_behavior(that);
-}
-
-void Isolate::SetAllowCodeGenerationFromStringsCallback(
- AllowCodeGenerationFromStringsCallback callback) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->set_allow_code_gen_callback(callback);
-}
-
#define CALLBACK_SETTER(ExternalName, Type, InternalName) \
void Isolate::Set##ExternalName(Type callback) { \
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); \
isolate->set_##InternalName(callback); \
}
+CALLBACK_SETTER(FatalErrorHandler, FatalErrorCallback, exception_behavior)
+CALLBACK_SETTER(OOMErrorHandler, OOMErrorCallback, oom_behavior)
+CALLBACK_SETTER(AllowCodeGenerationFromStringsCallback,
+ AllowCodeGenerationFromStringsCallback, allow_code_gen_callback)
+CALLBACK_SETTER(AllowWasmCodeGenerationCallback,
+ AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback)
+
CALLBACK_SETTER(WasmModuleCallback, ExtensionCallback, wasm_module_callback)
CALLBACK_SETTER(WasmInstanceCallback, ExtensionCallback, wasm_instance_callback)
@@ -9366,7 +9266,7 @@ bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8Isolate) {
}
String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
- : str_(NULL), length_(0) {
+ : str_(nullptr), length_(0) {
if (obj.IsEmpty()) return;
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_DO_NOT_USE(i_isolate);
@@ -9389,7 +9289,7 @@ String::Utf8Value::~Utf8Value() {
}
String::Value::Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
- : str_(NULL), length_(0) {
+ : str_(nullptr), length_(0) {
if (obj.IsEmpty()) return;
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_DO_NOT_USE(i_isolate);
@@ -9442,7 +9342,7 @@ Local<Message> Exception::CreateMessage(Isolate* isolate,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::HandleScope scope(i_isolate);
return Utils::MessageToLocal(
- scope.CloseAndEscape(i_isolate->CreateMessage(obj, NULL)));
+ scope.CloseAndEscape(i_isolate->CreateMessage(obj, nullptr)));
}
@@ -9973,7 +9873,7 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
Local<String> source) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, UnboundScript);
- i::ScriptData* script_data = NULL;
+ i::ScriptData* script_data = nullptr;
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::SharedFunctionInfo> result;
{
@@ -9981,8 +9881,9 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
str, i::MaybeHandle<i::Object>(), 0, 0, origin_options,
- i::MaybeHandle<i::Object>(), isolate->native_context(), NULL,
+ i::MaybeHandle<i::Object>(), isolate->native_context(), nullptr,
&script_data, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
: i::INSPECTOR_CODE,
i::MaybeHandle<i::FixedArray>());
@@ -10064,33 +9965,33 @@ Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope handle_scope(isolate);
- i::Builtins::Name name;
+ i::Builtins::Name builtin_id;
switch (builtin) {
case kObjectKeys:
- name = i::Builtins::kObjectKeys;
+ builtin_id = i::Builtins::kObjectKeys;
break;
case kObjectGetPrototypeOf:
- name = i::Builtins::kObjectGetPrototypeOf;
+ builtin_id = i::Builtins::kObjectGetPrototypeOf;
break;
case kObjectGetOwnPropertyDescriptor:
- name = i::Builtins::kObjectGetOwnPropertyDescriptor;
+ builtin_id = i::Builtins::kObjectGetOwnPropertyDescriptor;
break;
case kObjectGetOwnPropertyNames:
- name = i::Builtins::kObjectGetOwnPropertyNames;
+ builtin_id = i::Builtins::kObjectGetOwnPropertyNames;
break;
case kObjectGetOwnPropertySymbols:
- name = i::Builtins::kObjectGetOwnPropertySymbols;
+ builtin_id = i::Builtins::kObjectGetOwnPropertySymbols;
break;
default:
UNREACHABLE();
}
- i::Handle<i::Code> call_code(isolate->builtins()->builtin(name));
- i::Handle<i::JSFunction> fun =
- isolate->factory()->NewFunctionWithoutPrototype(
- isolate->factory()->empty_string(), call_code, i::SLOPPY);
- if (i::Builtins::IsLazy(name)) {
- fun->shared()->set_lazy_deserialization_builtin_id(name);
- }
+
+ i::Handle<i::String> name = isolate->factory()->empty_string();
+ i::Handle<i::Code> code(isolate->builtins()->builtin(builtin_id));
+ i::NewFunctionArgs args = i::NewFunctionArgs::ForBuiltinWithoutPrototype(
+ name, code, builtin_id, i::LanguageMode::kSloppy);
+ i::Handle<i::JSFunction> fun = isolate->factory()->NewFunction(args);
+
fun->shared()->DontAdaptArguments();
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
@@ -10190,6 +10091,54 @@ void debug::GlobalLexicalScopeNames(
}
}
+void debug::SetReturnValue(v8::Isolate* v8_isolate,
+ v8::Local<v8::Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ if (!isolate->debug()->break_id()) return;
+ isolate->debug()->set_return_value(*Utils::OpenHandle(*value));
+}
+
+int debug::GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> v8_object,
+ v8::Local<v8::Name> v8_name) {
+ i::Handle<i::JSReceiver> object = Utils::OpenHandle(*v8_object);
+ i::Handle<i::Name> name = Utils::OpenHandle(*v8_name);
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ return static_cast<int>(debug::NativeAccessorType::None);
+ }
+ i::LookupIterator it =
+ i::LookupIterator(object, name, i::LookupIterator::OWN);
+ if (!it.IsFound()) return static_cast<int>(debug::NativeAccessorType::None);
+ if (it.state() != i::LookupIterator::ACCESSOR) {
+ return static_cast<int>(debug::NativeAccessorType::None);
+ }
+ i::Handle<i::Object> structure = it.GetAccessors();
+ if (!structure->IsAccessorInfo()) {
+ return static_cast<int>(debug::NativeAccessorType::None);
+ }
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ int result = 0;
+#define IS_BUILTIN_ACESSOR(name, _) \
+ if (*structure == *isolate->factory()->name##_accessor()) \
+ result |= static_cast<int>(debug::NativeAccessorType::IsBuiltin);
+ ACCESSOR_INFO_LIST(IS_BUILTIN_ACESSOR)
+#undef IS_BUILTIN_ACESSOR
+ i::Handle<i::AccessorInfo> accessor_info =
+ i::Handle<i::AccessorInfo>::cast(structure);
+ if (accessor_info->getter())
+ result |= static_cast<int>(debug::NativeAccessorType::HasGetter);
+ if (accessor_info->setter())
+ result |= static_cast<int>(debug::NativeAccessorType::HasSetter);
+ return result;
+}
+
+int64_t debug::GetNextRandomInt64(v8::Isolate* v8_isolate) {
+ return reinterpret_cast<i::Isolate*>(v8_isolate)
+ ->random_number_generator()
+ ->NextInt64();
+}
+
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -10410,7 +10359,7 @@ const std::vector<CpuProfileDeoptInfo>& CpuProfileNode::GetDeoptInfos() const {
void CpuProfile::Delete() {
i::CpuProfile* profile = reinterpret_cast<i::CpuProfile*>(this);
i::CpuProfiler* profiler = profile->cpu_profiler();
- DCHECK(profiler != nullptr);
+ DCHECK_NOT_NULL(profiler);
profiler->DeleteProfile(profile);
}
@@ -10465,6 +10414,11 @@ CpuProfiler* CpuProfiler::New(Isolate* isolate) {
void CpuProfiler::Dispose() { delete reinterpret_cast<i::CpuProfiler*>(this); }
+// static
+void CpuProfiler::CollectSample(Isolate* isolate) {
+ i::CpuProfiler::CollectSample(reinterpret_cast<i::Isolate*>(isolate));
+}
+
void CpuProfiler::SetSamplingInterval(int us) {
DCHECK_GE(us, 0);
return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
@@ -10494,7 +10448,7 @@ void CpuProfiler::SetIdle(bool is_idle) {
if (!isolate->is_profiling()) return;
v8::StateTag state = isolate->current_vm_state();
DCHECK(state == v8::EXTERNAL || state == v8::IDLE);
- if (isolate->js_entry_sp() != NULL) return;
+ if (isolate->js_entry_sp() != nullptr) return;
if (is_idle) {
isolate->set_current_vm_state(v8::IDLE);
} else if (state == v8::IDLE) {
@@ -10841,7 +10795,7 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
// Iterate over all handles in the blocks except for the last.
for (int i = static_cast<int>(blocks()->size()) - 2; i >= 0; --i) {
Object** block = blocks()->at(i);
- if (last_handle_before_deferred_block_ != NULL &&
+ if (last_handle_before_deferred_block_ != nullptr &&
(last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
(last_handle_before_deferred_block_ >= block)) {
v->VisitRootPointers(Root::kHandleScope, block,
@@ -10855,7 +10809,7 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
}
}
- DCHECK(last_handle_before_deferred_block_ == NULL ||
+ DCHECK(last_handle_before_deferred_block_ == nullptr ||
found_block_before_deferred);
// Iterate over live handles in the last block (if any).
@@ -10911,17 +10865,17 @@ DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
// HandleScope stack since BeginDeferredScope was called, but in
// reverse order.
- DCHECK(prev_limit == NULL || !blocks_.empty());
+ DCHECK(prev_limit == nullptr || !blocks_.empty());
- DCHECK(!blocks_.empty() && prev_limit != NULL);
- DCHECK(last_handle_before_deferred_block_ != NULL);
- last_handle_before_deferred_block_ = NULL;
+ DCHECK(!blocks_.empty() && prev_limit != nullptr);
+ DCHECK_NOT_NULL(last_handle_before_deferred_block_);
+ last_handle_before_deferred_block_ = nullptr;
return deferred;
}
void HandleScopeImplementer::BeginDeferredScope() {
- DCHECK(last_handle_before_deferred_block_ == NULL);
+ DCHECK_NULL(last_handle_before_deferred_block_);
last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 92025ee0ca..0a70ac83e4 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -108,7 +108,6 @@ class RegisteredExtension {
V(StackTrace, FixedArray) \
V(StackFrame, StackFrameInfo) \
V(Proxy, JSProxy) \
- V(NativeWeakMap, JSWeakMap) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
V(Promise, JSPromise) \
@@ -208,8 +207,6 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<NativeWeakMap> NativeWeakMapToLocal(
- v8::internal::Handle<v8::internal::JSWeakMap> obj);
static inline Local<Function> CallableToLocal(
v8::internal::Handle<v8::internal::JSReceiver> obj);
static inline Local<Primitive> ToLocalPrimitive(
@@ -332,7 +329,6 @@ MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
-MAKE_TO_LOCAL(NativeWeakMapToLocal, JSWeakMap, NativeWeakMap)
MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
MAKE_TO_LOCAL(ToLocal, FixedArray, PrimitiveArray)
@@ -347,8 +343,8 @@ MAKE_TO_LOCAL(ScriptOrModuleToLocal, Script, ScriptOrModule)
#define MAKE_OPEN_HANDLE(From, To) \
v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
const v8::From* that, bool allow_empty_handle) { \
- DCHECK(allow_empty_handle || that != NULL); \
- DCHECK(that == NULL || \
+ DCHECK(allow_empty_handle || that != nullptr); \
+ DCHECK(that == nullptr || \
(*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
@@ -370,8 +366,8 @@ class V8_EXPORT_PRIVATE DeferredHandles {
private:
DeferredHandles(Object** first_block_limit, Isolate* isolate)
- : next_(NULL),
- previous_(NULL),
+ : next_(nullptr),
+ previous_(nullptr),
first_block_limit_(first_block_limit),
isolate_(isolate) {
isolate->LinkDeferredHandles(this);
@@ -404,7 +400,7 @@ class HandleScopeImplementer {
explicit HandleScopeImplementer(Isolate* isolate)
: isolate_(isolate),
microtask_context_(nullptr),
- spare_(NULL),
+ spare_(nullptr),
call_depth_(0),
microtasks_depth_(0),
microtasks_suppressions_(0),
@@ -413,7 +409,7 @@ class HandleScopeImplementer {
debug_microtasks_depth_(0),
#endif
microtasks_policy_(v8::MicrotasksPolicy::kAuto),
- last_handle_before_deferred_block_(NULL) {
+ last_handle_before_deferred_block_(nullptr) {
}
~HandleScopeImplementer() {
@@ -487,8 +483,8 @@ class HandleScopeImplementer {
Isolate* isolate() const { return isolate_; }
void ReturnBlock(Object** block) {
- DCHECK(block != NULL);
- if (spare_ != NULL) DeleteArray(spare_);
+ DCHECK_NOT_NULL(block);
+ if (spare_ != nullptr) DeleteArray(spare_);
spare_ = block;
}
@@ -499,8 +495,8 @@ class HandleScopeImplementer {
saved_contexts_.detach();
microtask_context_ = nullptr;
entered_context_count_during_microtasks_ = 0;
- spare_ = NULL;
- last_handle_before_deferred_block_ = NULL;
+ spare_ = nullptr;
+ last_handle_before_deferred_block_ = nullptr;
call_depth_ = 0;
}
@@ -513,11 +509,11 @@ class HandleScopeImplementer {
blocks_.free();
entered_contexts_.free();
saved_contexts_.free();
- if (spare_ != NULL) {
+ if (spare_ != nullptr) {
DeleteArray(spare_);
- spare_ = NULL;
+ spare_ = nullptr;
}
- DCHECK(call_depth_ == 0);
+ DCHECK_EQ(call_depth_, 0);
}
void BeginDeferredScope();
@@ -620,10 +616,10 @@ Handle<Context> HandleScopeImplementer::MicrotaskContext() {
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
- internal::Object** block = (spare_ != NULL) ?
- spare_ :
- NewArray<internal::Object*>(kHandleBlockSize);
- spare_ = NULL;
+ internal::Object** block =
+ (spare_ != nullptr) ? spare_
+ : NewArray<internal::Object*>(kHandleBlockSize);
+ spare_ = nullptr;
return block;
}
@@ -645,13 +641,13 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(block_start, block_limit);
#endif
- if (spare_ != NULL) {
+ if (spare_ != nullptr) {
DeleteArray(spare_);
}
spare_ = block_start;
}
- DCHECK((blocks_.empty() && prev_limit == NULL) ||
- (!blocks_.empty() && prev_limit != NULL));
+ DCHECK((blocks_.empty() && prev_limit == nullptr) ||
+ (!blocks_.empty() && prev_limit != nullptr));
}
// Interceptor functions called from generated inline caches to notify
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 4261943325..ce6b759d30 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -67,14 +67,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
if (Assembler::IsMovW(Memory::int32_at(pc_))) {
return reinterpret_cast<Address>(pc_);
} else {
@@ -113,7 +112,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -157,9 +156,9 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
@@ -384,14 +383,14 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index c9aa9ef015..8c22974ca3 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -250,12 +250,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
void CpuFeatures::PrintTarget() {
- const char* arm_arch = NULL;
+ const char* arm_arch = nullptr;
const char* arm_target_type = "";
const char* arm_no_probe = "";
const char* arm_fpu = "";
const char* arm_thumb = "";
- const char* arm_float_abi = NULL;
+ const char* arm_float_abi = nullptr;
#if !defined __arm__
arm_target_type = " simulator";
@@ -357,6 +357,17 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
reinterpret_cast<Address>(size), flush_mode);
}
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ set_embedded_address(isolate, address, icache_flush_mode);
+}
+
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return embedded_address();
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-arm-inl.h for inlined constructors
@@ -382,7 +393,7 @@ Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
shift_op = LSL;
} else if (shift_op == RRX) {
// encoded as ROR with shift_imm == 0
- DCHECK(shift_imm == 0);
+ DCHECK_EQ(shift_imm, 0);
shift_op_ = ROR;
shift_imm_ = 0;
}
@@ -587,7 +598,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
- DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
+ DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -694,7 +705,7 @@ bool Assembler::IsAddRegisterImmediate(Instr instr) {
Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
DCHECK(IsAddRegisterImmediate(instr));
- DCHECK(offset >= 0);
+ DCHECK_GE(offset, 0);
DCHECK(is_uint12(offset));
// Set the offset.
return (instr & ~kOff12Mask) | offset;
@@ -930,14 +941,14 @@ void Assembler::target_at_put(int pos, int target_pos) {
instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
-
-void Assembler::print(Label* L) {
+void Assembler::print(const Label* L) {
if (L->is_unused()) {
PrintF("unused label\n");
} else if (L->is_bound()) {
PrintF("bound label to %d\n", L->pos());
} else if (L->is_linked()) {
- Label l = *L;
+ Label l;
+ l.link_to(L->pos());
PrintF("unbound label");
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
@@ -945,7 +956,7 @@ void Assembler::print(Label* L) {
if ((instr & ~kImm24Mask) == 0) {
PrintF("value\n");
} else {
- DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx
+ DCHECK_EQ(instr & 7 * B25, 5 * B25); // b, bl, or blx
Condition cond = Instruction::ConditionField(instr);
const char* b;
const char* c;
@@ -1019,7 +1030,7 @@ void Assembler::next(Label* L) {
// chain.
L->Unuse();
} else {
- DCHECK(link >= 0);
+ DCHECK_GE(link, 0);
L->link_to(link);
}
}
@@ -1043,9 +1054,9 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
}
// If the opcode is one with a complementary version and the complementary
// immediate fits, change the opcode.
- if (instr != NULL) {
+ if (instr != nullptr) {
if ((*instr & kMovMvnMask) == kMovMvnPattern) {
- if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) {
+ if (FitsShifter(~imm32, rotate_imm, immed_8, nullptr)) {
*instr ^= kMovMvnFlip;
return true;
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
@@ -1059,7 +1070,7 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
}
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
+ if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, nullptr)) {
*instr ^= kCmpCmnFlip;
return true;
}
@@ -1067,13 +1078,14 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD ||
alu_insn == SUB) {
- if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
+ if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8,
+ nullptr)) {
*instr ^= kAddSubFlip;
return true;
}
} else if (alu_insn == AND ||
alu_insn == BIC) {
- if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) {
+ if (FitsShifter(~imm32, rotate_imm, immed_8, nullptr)) {
*instr ^= kAndBicFlip;
return true;
}
@@ -1089,7 +1101,7 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
// encoded.
bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- if (assembler != NULL && assembler->predictable_code_size()) return true;
+ if (assembler != nullptr && assembler->predictable_code_size()) return true;
return assembler->serializer_enabled();
} else if (RelocInfo::IsNone(rmode)) {
return false;
@@ -1098,7 +1110,7 @@ bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
}
bool UseMovImmediateLoad(const Operand& x, const Assembler* assembler) {
- DCHECK(assembler != nullptr);
+ DCHECK_NOT_NULL(assembler);
if (x.MustOutputRelocInfo(assembler)) {
// Prefer constant pool if data is likely to be patched.
return false;
@@ -1116,7 +1128,7 @@ bool Operand::MustOutputRelocInfo(const Assembler* assembler) const {
int Operand::InstructionsRequired(const Assembler* assembler,
Instr instr) const {
- DCHECK(assembler != nullptr);
+ DCHECK_NOT_NULL(assembler);
if (rm_.is_valid()) return 1;
uint32_t dummy1, dummy2;
if (MustOutputRelocInfo(assembler) ||
@@ -1196,7 +1208,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// For move instructions, rn is not defined.
DCHECK(rn.is_valid() || (opcode == MOV) || (opcode == MVN));
DCHECK(rd.is_valid() || rn.is_valid());
- DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
+ DCHECK_EQ(instr & ~(kCondMask | kOpCodeMask | S), 0);
if (!AddrMode1TryEncodeOperand(&instr, x)) {
DCHECK(x.IsImmediate());
// Upon failure to encode, the opcode should not have changed.
@@ -1207,6 +1219,26 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// pool only for a MOV instruction which does not set the flags.
DCHECK(!rn.is_valid());
Move32BitImmediate(rd, x, cond);
+ } else if ((opcode == ADD) && !set_flags && (rd == rn) &&
+ (scratch_register_list_ == 0)) {
+ // Split the operation into a sequence of additions if we cannot use a
+ // scratch register. In this case, we cannot re-use rn and the assembler
+ // does not have any scratch registers to spare.
+ uint32_t imm = x.immediate();
+ do {
+ // The immediate encoding format is composed of 8 bits of data and 4
+ // bits encoding a rotation. Each of the 16 possible rotations accounts
+ // for a rotation by an even number.
+ // 4 bits -> 16 rotations possible
+ // -> 16 rotations of 2 bits each fits in a 32-bit value.
+ // This means that finding the even number of trailing zeroes of the
+ // immediate allows us to more efficiently split it:
+ int trailing_zeroes = base::bits::CountTrailingZeros(imm) & ~1u;
+ uint32_t mask = (0xff << trailing_zeroes);
+ add(rd, rd, Operand(imm & mask), LeaveCC, cond);
+ imm = imm & ~mask;
+ } while (!ImmediateFitsAddrMode1Instruction(imm));
+ add(rd, rd, Operand(imm), LeaveCC, cond);
} else {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to a scratch register and change the original instruction to
@@ -1283,7 +1315,7 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
}
- DCHECK(offset_12 >= 0); // no masking needed
+ DCHECK_GE(offset_12, 0); // no masking needed
instr |= offset_12;
} else {
// Register offset (shift_imm_ and shift_op_ are 0) or scaled
@@ -1320,7 +1352,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
}
- DCHECK(offset_8 >= 0); // no masking needed
+ DCHECK_GE(offset_8, 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
// Scaled register offsets are not supported, compute the offset separately
@@ -1344,7 +1376,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
- DCHECK(rl != 0);
+ DCHECK_NE(rl, 0);
DCHECK(rn != pc);
emit(instr | rn.code()*B16 | rl);
}
@@ -1356,7 +1388,7 @@ void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_;
int offset_8 = x.offset_;
- DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset
+ DCHECK_EQ(offset_8 & 3, 0); // offset must be an aligned word offset
offset_8 >>= 2;
if (offset_8 < 0) {
offset_8 = -offset_8;
@@ -1369,7 +1401,7 @@ void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
if ((am & P) == 0)
am |= W;
- DCHECK(offset_8 >= 0); // no masking needed
+ DCHECK_GE(offset_8, 0); // no masking needed
emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
}
@@ -1399,7 +1431,7 @@ int Assembler::branch_offset(Label* L) {
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
- DCHECK((branch_offset & 3) == 0);
+ DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
emit(cond | B27 | B25 | (imm24 & kImm24Mask));
@@ -1412,14 +1444,14 @@ void Assembler::b(int branch_offset, Condition cond) {
void Assembler::bl(int branch_offset, Condition cond) {
- DCHECK((branch_offset & 3) == 0);
+ DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
void Assembler::blx(int branch_offset) {
- DCHECK((branch_offset & 1) == 0);
+ DCHECK_EQ(branch_offset & 1, 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
@@ -2041,7 +2073,7 @@ void Assembler::mrs(Register dst, SRegister s, Condition cond) {
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
- DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
+ DCHECK_NE(fields & 0x000f0000, 0); // At least one field must be set.
DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
Instr instr;
if (src.IsImmediate()) {
@@ -2049,7 +2081,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
uint32_t rotate_imm;
uint32_t immed_8;
if (src.MustOutputRelocInfo(this) ||
- !FitsShifter(src.immediate(), &rotate_imm, &immed_8, NULL)) {
+ !FitsShifter(src.immediate(), &rotate_imm, &immed_8, nullptr)) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// Immediate operand cannot be encoded, load it first to a scratch
@@ -2209,7 +2241,7 @@ void Assembler::pld(const MemOperand& address) {
offset = -offset;
U = 0;
}
- DCHECK(offset < 4096);
+ DCHECK_LT(offset, 4096);
emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
0xf*B12 | offset);
}
@@ -2250,7 +2282,7 @@ void Assembler::stm(BlockAddrMode am,
// enabling/disabling and a counter feature. See simulator-arm.h .
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
- DCHECK(code >= kDefaultStopCode);
+ DCHECK_GE(code, kDefaultStopCode);
{
BlockConstPoolScope block_const_pool(this);
if (code >= 0) {
@@ -2419,14 +2451,14 @@ void Assembler::vldr(const DwVfpRegister dst,
DCHECK(VfpRegisterIsAvailable(dst));
int u = 1;
if (offset < 0) {
- CHECK(offset != kMinInt);
+ CHECK_NE(offset, kMinInt);
offset = -offset;
u = 0;
}
int vd, d;
dst.split_code(&vd, &d);
- DCHECK(offset >= 0);
+ DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
0xB*B8 | ((offset / 4) & 255));
@@ -2479,7 +2511,7 @@ void Assembler::vldr(const SwVfpRegister dst,
}
int sd, d;
dst.split_code(&sd, &d);
- DCHECK(offset >= 0);
+ DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
@@ -2528,11 +2560,11 @@ void Assembler::vstr(const DwVfpRegister src,
DCHECK(VfpRegisterIsAvailable(src));
int u = 1;
if (offset < 0) {
- CHECK(offset != kMinInt);
+ CHECK_NE(offset, kMinInt);
offset = -offset;
u = 0;
}
- DCHECK(offset >= 0);
+ DCHECK_GE(offset, 0);
int vd, d;
src.split_code(&vd, &d);
@@ -2583,13 +2615,13 @@ void Assembler::vstr(const SwVfpRegister src,
// Vdst(15-12) | 1010(11-8) | (offset/4)
int u = 1;
if (offset < 0) {
- CHECK(offset != kMinInt);
+ CHECK_NE(offset, kMinInt);
offset = -offset;
u = 0;
}
int sd, d;
src.split_code(&sd, &d);
- DCHECK(offset >= 0);
+ DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
@@ -2638,7 +2670,7 @@ void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
- DCHECK(count <= 16);
+ DCHECK_LE(count, 16);
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -2656,7 +2688,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
- DCHECK(count <= 16);
+ DCHECK_LE(count, 16);
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -3510,7 +3542,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
DCHECK(VfpRegisterIsAvailable(src1));
- DCHECK(src2 == 0.0);
+ DCHECK_EQ(src2, 0.0);
int vd, d;
src1.split_code(&vd, &d);
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
@@ -3523,7 +3555,7 @@ void Assembler::vcmp(const SwVfpRegister src1, const float src2,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
- DCHECK(src2 == 0.0);
+ DCHECK_EQ(src2, 0.0);
int vd, d;
src1.split_code(&vd, &d);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
@@ -4940,7 +4972,7 @@ Instr Assembler::GetMovWPattern() { return kMovwPattern; }
Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
- DCHECK(immediate < 0x10000);
+ DCHECK_LT(immediate, 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
@@ -4961,7 +4993,7 @@ int Assembler::DecodeShiftImm(Instr instr) {
Instr Assembler::PatchShiftImm(Instr instr, int immed) {
uint32_t rotate_imm = 0;
uint32_t immed_8 = 0;
- bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8, NULL);
+ bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8, nullptr);
DCHECK(immed_fits);
USE(immed_fits);
return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
@@ -4989,7 +5021,7 @@ bool Assembler::IsOrrImmed(Instr instr) {
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
uint32_t dummy2;
- return FitsShifter(imm32, &dummy1, &dummy2, NULL);
+ return FitsShifter(imm32, &dummy1, &dummy2, nullptr);
}
@@ -5098,8 +5130,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!emit_debug_code())) {
return;
}
- DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
+ RelocInfo rinfo(pc_, rmode, data, nullptr);
reloc_info_writer.Write(&rinfo);
}
@@ -5109,7 +5141,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
rmode != RelocInfo::NONE64);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
(rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
- DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
+ DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
}
@@ -5163,7 +5195,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
void Assembler::ConstantPoolAddEntry(int position, Double value) {
- DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
+ DCHECK_LT(pending_64_bit_constants_.size(), kMaxNumPending64Constants);
if (pending_64_bit_constants_.empty()) {
first_const_pool_64_use_ = position;
}
@@ -5439,8 +5471,8 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
}
Register UseScratchRegisterScope::Acquire() {
- DCHECK(available_ != nullptr);
- DCHECK(*available_ != 0);
+ DCHECK_NOT_NULL(available_);
+ DCHECK_NE(*available_, 0);
int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
*available_ &= ~(1UL << index);
return Register::from_code(index);
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 536731978b..9d8cb4c05c 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -596,14 +596,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -1713,7 +1714,7 @@ class Assembler : public AssemblerBase {
void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
// Labels
- void print(Label* L);
+ void print(const Label* L);
void bind_to(Label* L, int pos);
void next(Label* L);
@@ -1724,7 +1725,6 @@ class Assembler : public AssemblerBase {
void ConstantPoolAddEntry(int position, Double value);
friend class RelocInfo;
- friend class CodePatcher;
friend class BlockConstPoolScope;
friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index c2aa0d4bed..2add525abd 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -9,13 +9,11 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/counters.h"
#include "src/double.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -42,50 +40,30 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
- Label out_of_range, only_low, negate, done;
- Register input_reg = source();
+ Label negate, done;
Register result_reg = destination();
- DCHECK(is_truncating());
- int double_offset = offset();
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += 3 * kPointerSize;
-
- Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
- Register scratch_low =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
- Register scratch_high =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ UseScratchRegisterScope temps(masm);
+ Register double_low = GetRegisterThatIsNotOneOf(result_reg);
+ Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
LowDwVfpRegister double_scratch = kScratchDoubleReg;
- __ Push(scratch_high, scratch_low, scratch);
-
- if (!skip_fastpath()) {
- // Load double input.
- __ vldr(double_scratch, MemOperand(input_reg, double_offset));
- __ vmov(scratch_low, scratch_high, double_scratch);
+ // Save the old values from these temporary registers on the stack.
+ __ Push(double_high, double_low);
- // Do fast-path convert from double to int.
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(result_reg, double_scratch.low());
+ // Account for saved regs.
+ const int kArgumentOffset = 2 * kPointerSize;
- // If result is not saturated (0x7fffffff or 0x80000000), we are done.
- __ sub(scratch, result_reg, Operand(1));
- __ cmp(scratch, Operand(0x7ffffffe));
- __ b(lt, &done);
- } else {
- // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
- // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
- if (double_offset == 0) {
- __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
- } else {
- __ ldr(scratch_low, MemOperand(input_reg, double_offset));
- __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
- }
- }
+ // Load double input.
+ __ vldr(double_scratch, MemOperand(sp, kArgumentOffset));
+ __ vmov(double_low, double_high, double_scratch);
+ // Try to convert with a FPU convert instruction. This handles all
+ // non-saturating cases.
+ __ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
- __ Ubfx(scratch, scratch_high,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ Register scratch = temps.Acquire();
+ __ Ubfx(scratch, double_high, HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
// Load scratch with exponent - 1. This is faster than loading
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
@@ -93,85 +71,64 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
- // Compare exponent with 84 (compare exponent - 1 with 83).
+ // Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is
+ // greater than this, the conversion is out of range, so return zero.
__ cmp(scratch, Operand(83));
- __ b(ge, &out_of_range);
+ __ mov(result_reg, Operand::Zero(), LeaveCC, ge);
+ __ b(ge, &done);
+
+ // If we reach this code, 30 <= exponent <= 83.
+ // `TryInlineTruncateDoubleToI` above will have truncated any double with an
+ // exponent lower than 30.
+ if (masm->emit_debug_code()) {
+ // Scratch is exponent - 1.
+ __ cmp(scratch, Operand(30 - 1));
+ __ Check(ge, kUnexpectedValue);
+ }
- // If we reach this code, 31 <= exponent <= 83.
- // So, we don't have to handle cases where 0 <= exponent <= 20 for
- // which we would need to shift right the high part of the mantissa.
+ // We don't have to handle cases where 0 <= exponent <= 20 for which we would
+ // need to shift right the high part of the mantissa.
// Scratch contains exponent - 1.
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
__ rsb(scratch, scratch, Operand(51), SetCC);
- __ b(ls, &only_low);
- // 21 <= exponent <= 51, shift scratch_low and scratch_high
+
+ // 52 <= exponent <= 83, shift only double_low.
+ // On entry, scratch contains: 52 - exponent.
+ __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls);
+ __ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls);
+ __ b(ls, &negate);
+
+ // 21 <= exponent <= 51, shift double_low and double_high
// to generate the result.
- __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
+ __ mov(double_low, Operand(double_low, LSR, scratch));
// Scratch contains: 52 - exponent.
// We needs: exponent - 20.
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
__ rsb(scratch, scratch, Operand(32));
- __ Ubfx(result_reg, scratch_high,
- 0, HeapNumber::kMantissaBitsInTopWord);
- // Set the implicit 1 before the mantissa part in scratch_high.
+ __ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord);
+ // Set the implicit 1 before the mantissa part in double_high.
__ orr(result_reg, result_reg,
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
- __ b(&negate);
-
- __ bind(&out_of_range);
- __ mov(result_reg, Operand::Zero());
- __ b(&done);
-
- __ bind(&only_low);
- // 52 <= exponent <= 83, shift only scratch_low.
- // On entry, scratch contains: 52 - exponent.
- __ rsb(scratch, scratch, Operand::Zero());
- __ mov(result_reg, Operand(scratch_low, LSL, scratch));
+ __ orr(result_reg, double_low, Operand(result_reg, LSL, scratch));
__ bind(&negate);
- // If input was positive, scratch_high ASR 31 equals 0 and
- // scratch_high LSR 31 equals zero.
+ // If input was positive, double_high ASR 31 equals 0 and
+ // double_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+ // Input_high ASR 31 equals 0xffffffff and double_high LSR 31 equals 1.
// New result = (result eor 0xffffffff) + 1 = 0 - result.
- __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
- __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
+ __ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
+ __ add(result_reg, result_reg, Operand(double_high, LSR, 31));
__ bind(&done);
- __ Pop(scratch_high, scratch_low, scratch);
+ // Restore registers corrupted in this routine and return.
+ __ Pop(double_high, double_low);
__ Ret();
}
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ stm(db_w, sp, kCallerSaved | lr.bit());
-
- const Register scratch = r1;
-
- if (save_doubles()) {
- __ SaveFPRegs(sp, scratch);
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count);
- __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- if (save_doubles()) {
- __ RestoreFPRegs(sp, scratch);
- }
- __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
-}
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == r2);
@@ -263,14 +220,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-bool CEntryStub::NeedsImmovableCode() {
- return true;
-}
-
+Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -280,7 +233,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
- StoreBufferOverflowStub(isolate, mode).GetCode();
}
@@ -392,7 +344,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
? no_reg
// Callee-saved register r4 still holds argc.
: r4;
- __ LeaveExitFrame(save_doubles(), argc, true);
+ __ LeaveExitFrame(save_doubles(), argc);
__ mov(pc, lr);
// Handling of exception.
@@ -400,10 +352,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -437,12 +387,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ mov(r1, Operand(pending_handler_code_address));
+ __ mov(r1, Operand(pending_handler_entrypoint_address));
__ ldr(r1, MemOperand(r1));
- __ mov(r2, Operand(pending_handler_offset_address));
- __ ldr(r2, MemOperand(r2));
- __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
- __ add(pc, r1, r2);
+ __ Jump(r1);
}
@@ -605,100 +552,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
-void StringHelper::GenerateFlatOneByteStringEquals(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ cmp(length, scratch2);
- __ b(eq, &check_zero_length);
- __ bind(&strings_not_equal);
- __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
- __ Ret();
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(length, Operand::Zero());
- __ b(ne, &compare_chars);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
- &strings_not_equal);
-
- // Characters are equal.
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4) {
- Label result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
- Register length_delta = scratch3;
- __ mov(scratch1, scratch2, LeaveCC, gt);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(min_length, Operand::Zero());
- __ b(eq, &compare_lengths);
-
- // Compare loop.
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- scratch4, &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ mov(r0, Operand(length_delta), SetCC);
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
- __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
- __ Ret();
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ add(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, Operand(scratch1));
- __ add(right, right, Operand(scratch1));
- __ rsb(length, length, Operand::Zero());
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ ldrb(scratch1, MemOperand(left, index));
- __ ldrb(scratch2, MemOperand(right, index));
- __ cmp(scratch1, scratch2);
- __ b(ne, chars_not_equal);
- __ add(index, index, Operand(1), SetCC);
- __ b(ne, &loop);
-}
-
-
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
@@ -718,397 +571,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0) {
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
- __ sub(index, index, Operand(1));
- __ and_(index, index, Operand(
- Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- STATIC_ASSERT(kSmiTagSize == 1);
- Register tmp = properties;
- __ add(tmp, properties, Operand(index, LSL, 1));
- __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- DCHECK(tmp != entity_name);
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ cmp(entity_name, tmp);
- __ b(eq, done);
-
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
- // Stop if found the property.
- __ cmp(entity_name, Operand(Handle<Name>(name)));
- __ b(eq, miss);
-
- Label good;
- __ cmp(entity_name, tmp);
- __ b(eq, &good);
-
- // Check if the entry name is not a unique name.
- __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
- __ bind(&good);
-
- // Restore the properties.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- }
-
- const int spill_mask =
- (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
- r2.bit() | r1.bit() | r0.bit());
-
- __ stm(db_w, sp, spill_mask);
- __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- __ mov(r1, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmp(r0, Operand::Zero());
- __ ldm(ia_w, sp, spill_mask);
-
- __ b(eq, done);
- __ b(ne, miss);
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: NameDictionary to probe
- // r1: key
- // dictionary: NameDictionary to probe.
- // index: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = r0;
- Register dictionary = r0;
- Register key = r1;
- Register index = r2;
- Register mask = r3;
- Register hash = r4;
- Register undefined = r5;
- Register entry_key = r6;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(mask, mask, Operand(1));
-
- __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ add(index, hash, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- } else {
- __ mov(index, Operand(hash));
- }
- __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- STATIC_ASSERT(kSmiTagSize == 1);
- __ add(index, dictionary, Operand(index, LSL, 2));
- __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ cmp(entry_key, Operand(undefined));
- __ b(eq, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(entry_key, Operand(key));
- __ b(eq, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ ldrb(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ mov(result, Operand::Zero());
- __ Ret();
- }
-
- __ bind(&in_dictionary);
- __ mov(result, Operand(1));
- __ Ret();
-
- __ bind(&not_in_dictionary);
- __ mov(result, Operand::Zero());
- __ Ret();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- // Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction =
- Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
-
- if (Assembler::IsBranch(first_instruction)) {
- return INCREMENTAL;
- }
-
- DCHECK(Assembler::IsTstImmediate(first_instruction));
-
- if (Assembler::IsBranch(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(Assembler::IsTstImmediate(second_instruction));
-
- return STORE_BUFFER_ONLY;
-}
-
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
- 2 * Assembler::kInstrSize);
-}
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- {
- // Block literal pool emission, as the position of these two instructions
- // is assumed by the patching code.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ b(&skip_to_incremental_noncompacting);
- __ b(&skip_to_incremental_compacting);
- }
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
- DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count);
- Register address = r0 == regs_.address() ? regs_.scratch0() : regs_.address();
- DCHECK(address != regs_.object());
- DCHECK(address != r0);
- __ Move(address, regs_.address());
- __ Move(r0, regs_.object());
- __ Move(r1, address);
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-#endif
-
- // Get the value from the slot.
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(tasm);
predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize);
@@ -1119,7 +584,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
@@ -1147,7 +612,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// We also save lr, so the count here is one higher than the mask indicates.
const int32_t kNumSavedRegs = 7;
- DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
+ DCHECK_EQ(kCallerSaved & kSavedRegs, kCallerSaved);
// Save all caller-save registers as this may be called from anywhere.
__ stm(db_w, sp, kSavedRegs | lr.bit());
@@ -1357,7 +822,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ tst(r4, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r4, r4, r5, MAP_TYPE);
@@ -1437,7 +902,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
@@ -1485,8 +950,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference thunk_ref,
int stack_space,
MemOperand* stack_space_operand,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
+ MemOperand return_value_operand) {
Isolate* isolate = masm->isolate();
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate);
@@ -1571,17 +1035,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Leave the API exit frame.
__ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ ldr(cp, *context_restore_operand);
- }
// LeaveExitFrame expects unwind space to be in a register.
- if (stack_space_operand != NULL) {
+ if (stack_space_operand != nullptr) {
__ ldr(r4, *stack_space_operand);
} else {
__ mov(r4, Operand(stack_space));
}
- __ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
+ __ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
@@ -1610,7 +1070,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : callee
// -- r4 : call_data
// -- r2 : holder
// -- r1 : api_function_address
@@ -1620,21 +1079,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1) * 4] : first argument
// -- sp[argc * 4] : receiver
- // -- sp[(argc + 1) * 4] : accessor_holder
// -----------------------------------
- Register callee = r0;
Register call_data = r4;
Register holder = r2;
Register api_function_address = r1;
- Register context = cp;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1644,12 +1098,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
- // context save
- __ push(context);
-
- // callee
- __ push(callee);
-
// call data
__ push(call_data);
@@ -1667,37 +1115,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// holder
__ push(holder);
- // enter a new context
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- sp[0] : holder
- // -- ...
- // -- sp[(FCA::kArgsLength - 1) * 4] : new_target
- // -- sp[FCA::kArgsLength * 4] : last argument
- // -- ...
- // -- sp[(FCA::kArgsLength + argc - 1) * 4] : first argument
- // -- sp[(FCA::kArgsLength + argc) * 4] : receiver
- // -- sp[(FCA::kArgsLength + argc + 1) * 4] : accessor_holder
- // -----------------------------------------------------------
-
- // load context from accessor_holder
- Register accessor_holder = context;
- __ ldr(accessor_holder,
- MemOperand(sp, (FCA::kArgsLength + 1 + argc()) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ ldr(scratch0, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
- __ tst(scratch1, Operand(1 << Map::kIsConstructor));
- __ b(ne, &skip_looking_for_constructor);
- __ GetMapConstructor(context, scratch0, scratch0, scratch1);
- __ bind(&skip_looking_for_constructor);
- __ ldr(context, FieldMemOperand(context, JSFunction::kContextOffset));
- } else {
- // load context from callee
- __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
-
// Prepare arguments.
__ mov(scratch0, sp);
@@ -1726,22 +1143,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 2;
+ const int stack_space = argc() + FCA::kArgsLength + 1;
MemOperand* stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, return_value_operand,
- &context_restore_operand);
+ stack_space_operand, return_value_operand);
}
@@ -1803,7 +1212,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, NULL, return_value_operand, NULL);
+ kStackUnwindSpace, nullptr, return_value_operand);
}
#undef __
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 3407ff9573..a9b82210e0 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -8,184 +8,6 @@
namespace v8 {
namespace internal {
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one-byte strings and returns result in r0.
- static void GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4);
-
- // Compares two flat one-byte strings for equality and returns result in r0.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- static void GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Label* chars_not_equal);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate,
- Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
- DCHECK(Assembler::IsTstImmediate(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
- DCHECK(Assembler::IsBranch(masm->instr_at(pos)));
- }
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0),
- scratch1_(no_reg) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- masm->SaveFPRegs(sp, scratch0_);
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- masm->RestoreFPRegs(sp, scratch0_);
- }
- masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
- Label slow_;
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
@@ -197,52 +19,12 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() override { return true; }
+ Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
-
-class NameDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class LookupModeBits: public BitField<LookupMode, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 6c0c3aca1e..f7e29ace49 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/codegen-arm.h"
-
#if V8_TARGET_ARCH_ARM
#include <memory>
@@ -16,21 +14,21 @@
namespace v8 {
namespace internal {
-
#define __ masm.
#if defined(V8_HOST_ARCH_ARM)
+
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
Register dest = r0;
@@ -171,8 +169,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -184,12 +183,12 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#if defined(USE_SIMULATOR)
return stub;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
Register dest = r0;
@@ -261,9 +260,9 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
-
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
@@ -273,12 +272,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
@@ -290,114 +289,15 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ tst(result, Operand(kIsIndirectStringMask));
- __ b(eq, &check_sequential);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string, thin_string;
- __ and_(result, result, Operand(kStringRepresentationMask));
- __ cmp(result, Operand(kConsStringTag));
- __ b(eq, &cons_string);
- __ cmp(result, Operand(kThinStringTag));
- __ b(eq, &thin_string);
-
- // Handle slices.
- __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ add(index, index, Operand::SmiUntag(result));
- __ jmp(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kempty_stringRootIndex);
- __ b(ne, call_runtime);
- // Get the first of the two strings and load its instance type.
- __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
- __ jmp(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(ne, &external_string);
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ add(string,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&check_encoding);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ tst(result, Operand(kIsIndirectStringMask));
- __ Assert(eq, kExternalStringExpectedButNotFound);
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(result, Operand(kShortExternalStringMask));
- __ b(ne, call_runtime);
- __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label one_byte, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &one_byte);
- // Two-byte string.
- __ ldrh(result, MemOperand(string, index, LSL, 1));
- __ jmp(&done);
- __ bind(&one_byte);
- // One-byte string.
- __ ldrb(result, MemOperand(string, index));
- __ bind(&done);
-}
-
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
deleted file mode 100644
index 6d328bd117..0000000000
--- a/deps/v8/src/arm/codegen-arm.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM_CODEGEN_ARM_H_
-#define V8_ARM_CODEGEN_ARM_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index 915d9030e8..c788d33ef2 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-double Instruction::DoubleImmedVmov() const {
+Float64 Instruction::DoubleImmedVmov() const {
// Reconstruct a double from the immediate encoded in the vmov instruction.
//
// instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
@@ -25,9 +25,7 @@ double Instruction::DoubleImmedVmov() const {
high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
uint64_t imm = high16 << 48;
- double d;
- memcpy(&d, &imm, 8);
- return d;
+ return Float64::FromBits(imm);
}
@@ -41,15 +39,8 @@ const char* Registers::names_[kNumRegisters] = {
// List of alias names which can be used when referring to ARM registers.
const Registers::RegisterAlias Registers::aliases_[] = {
- {10, "sl"},
- {11, "r11"},
- {12, "r12"},
- {13, "r13"},
- {14, "r14"},
- {15, "r15"},
- {kNoRegister, NULL}
-};
-
+ {10, "sl"}, {11, "r11"}, {12, "r12"}, {13, "r13"},
+ {14, "r14"}, {15, "r15"}, {kNoRegister, nullptr}};
// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
// Note that "sN:sM" is the same as "dN/2" up to d15.
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 21794a5a5c..20cf8e4d5e 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "src/boxed-float.h"
#include "src/globals.h"
// ARM EABI is required.
@@ -29,7 +30,7 @@ inline int EncodeConstantPoolLength(int length) {
return ((length & 0xfff0) << 4) | (length & 0xf);
}
inline int DecodeConstantPoolLength(int instr) {
- DCHECK((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
+ DCHECK_EQ(instr & kConstantPoolMarkerMask, kConstantPoolMarker);
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
@@ -662,7 +663,7 @@ class Instruction {
inline bool HasLink() const { return LinkValue() == 1; }
// Decode the double immediate from a vmov instruction.
- double DoubleImmedVmov() const;
+ Float64 DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index d633d910fb..81224c5fcb 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
@@ -31,8 +30,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
- DCHECK(kDoubleRegZero.code() == 13);
- DCHECK(kScratchDoubleReg.code() == 14);
+ DCHECK_EQ(kDoubleRegZero.code(), 13);
+ DCHECK_EQ(kScratchDoubleReg.code(), 14);
{
// We use a run-time check for VFP32DREGS.
@@ -107,7 +106,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 83081f1b66..05adc37f61 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -301,7 +301,7 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'r');
+ DCHECK_EQ(format[0], 'r');
if (format[1] == 'n') { // 'rn: Rn register
int reg = instr->RnValue();
PrintRegister(reg);
@@ -468,7 +468,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 4;
}
case 'd': { // 'd: vmov double immediate.
- double d = instr->DoubleImmedVmov();
+ double d = instr->DoubleImmedVmov().get_scalar();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%g", d);
return 1;
}
@@ -479,9 +479,9 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
// BFC/BFI:
// Bits 20-16 represent most-significant bit. Covert to width.
width -= lsbit;
- DCHECK(width > 0);
+ DCHECK_GT(width, 0);
}
- DCHECK((width + lsbit) <= 32);
+ DCHECK_LE(width + lsbit, 32);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d, #%d", lsbit, width);
return 1;
@@ -501,7 +501,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
DCHECK((width >= 1) && (width <= 32));
DCHECK((lsb >= 0) && (lsb <= 31));
- DCHECK((width + lsb) <= 32);
+ DCHECK_LE(width + lsb, 32);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
@@ -583,7 +583,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
if (instr->TypeValue() == 0) {
PrintShiftRm(instr);
} else {
- DCHECK(instr->TypeValue() == 1);
+ DCHECK_EQ(instr->TypeValue(), 1);
PrintShiftImm(instr);
}
return 8;
diff --git a/deps/v8/src/arm/frame-constants-arm.cc b/deps/v8/src/arm/frame-constants-arm.cc
index b83dd38a9a..bb4cb5dd76 100644
--- a/deps/v8/src/arm/frame-constants-arm.cc
+++ b/deps/v8/src/arm/frame-constants-arm.cc
@@ -24,6 +24,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index fb7076d33f..20ef0e37bc 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -58,9 +58,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r4; }
const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r5; }
-const Register StringCompareDescriptor::LeftRegister() { return r1; }
-const Register StringCompareDescriptor::RightRegister() { return r0; }
-
const Register ApiGetterDescriptor::HolderRegister() { return r0; }
const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
@@ -217,7 +214,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -227,7 +224,7 @@ void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {r1, r2, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -237,7 +234,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {r1, r2, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@@ -287,10 +284,10 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- r0, // callee
- r4, // call_data
- r2, // holder
- r1, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ r4, // call_data
+ r2, // holder
+ r1, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
@@ -340,8 +337,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // the value to pass to the generator
- r1, // the JSGeneratorObject to resume
- r2 // the resume mode (tagged)
+ r1 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 2950de0a0c..8575b0336c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -12,7 +12,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/double.h"
@@ -352,8 +352,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
- DCHECK(srcdst0 != kScratchDoubleReg);
- DCHECK(srcdst1 != kScratchDoubleReg);
+ DCHECK_NE(srcdst0, kScratchDoubleReg);
+ DCHECK_NE(srcdst1, kScratchDoubleReg);
vmov(kScratchDoubleReg, srcdst0);
vmov(srcdst0, srcdst1);
vmov(srcdst1, kScratchDoubleReg);
@@ -401,7 +401,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
- DCHECK(lsb < 32);
+ DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
@@ -417,7 +417,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
- DCHECK(lsb < 32);
+ DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
@@ -438,7 +438,7 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
Condition cond) {
- DCHECK(lsb < 32);
+ DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, src, Operand(mask));
@@ -490,14 +490,6 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
}
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cond,
- Label* branch) {
- DCHECK(cond == eq || cond == ne);
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
-}
-
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
LinkRegisterStatus lr_status,
@@ -540,7 +532,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -552,7 +544,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -645,13 +637,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
@@ -674,39 +660,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address, Register scratch,
- SaveFPRegsMode fp_mode) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- {
- UseScratchRegisterScope temps(this);
- Register store_buffer = temps.Acquire();
- mov(store_buffer, Operand(ExternalReference::store_buffer_top(isolate())));
- ldr(scratch, MemOperand(store_buffer));
- // Store pointer to buffer and increment buffer top.
- str(address, MemOperand(scratch, kPointerSize, PostIndex));
- // Write back new top of buffer.
- str(scratch, MemOperand(store_buffer));
- }
- // Call stub on end of buffer.
- // Check for end of buffer.
- tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
- Ret(ne);
- push(lr);
- StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
- CallStub(&store_buffer_overflow);
- pop(lr);
- bind(&done);
- Ret();
-}
-
void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
if (marker_reg.code() > fp.code()) {
@@ -736,11 +689,11 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0.
- DCHECK(kSafepointSavedRegisters == (1 << kNumSafepointSavedRegisters) - 1);
+ DCHECK_EQ(kSafepointSavedRegisters, (1 << kNumSafepointSavedRegisters) - 1);
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK(num_unsaved >= 0);
+ DCHECK_GE(num_unsaved, 0);
sub(sp, sp, Operand(num_unsaved * kPointerSize));
stm(db_w, sp, kSafepointSavedRegisters);
}
@@ -1267,7 +1220,6 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
UseScratchRegisterScope temps(this);
@@ -1288,11 +1240,9 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
str(r3, MemOperand(scratch));
// Restore current context from top and clear it in debug mode.
- if (restore_context) {
- mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
- isolate())));
- ldr(cp, MemOperand(scratch));
- }
+ mov(scratch,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ ldr(cp, MemOperand(scratch));
#ifdef DEBUG
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
@@ -1630,7 +1580,7 @@ void MacroAssembler::CompareObjectType(Register object,
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
- ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
@@ -1644,31 +1594,6 @@ void MacroAssembler::CompareRoot(Register obj,
cmp(obj, scratch);
}
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- mov(value, Operand(cell));
- ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
-}
-
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp, Register temp2) {
- Label done, loop;
- ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done);
- CompareObjectType(result, temp, temp2, MAP_TYPE);
- b(ne, &done);
- ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
- b(&loop);
- bind(&done);
-}
-
void MacroAssembler::CallStub(CodeStub* stub,
Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
@@ -1715,20 +1640,6 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
-void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
- if (CpuFeatures::IsSupported(VFPv3)) {
- CpuFeatureScope scope(this, VFPv3);
- vmov(value.low(), smi);
- vcvt_f64_s32(value, 1);
- } else {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- SmiUntag(scratch, smi);
- vmov(value.low(), scratch);
- vcvt_f64_s32(value, value.low());
- }
-}
-
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
LowDwVfpRegister double_scratch) {
@@ -1766,7 +1677,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
- CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, result));
add(sp, sp, Operand(kDoubleSize));
pop(lr);
@@ -1823,7 +1734,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
#if defined(__thumb__)
// Thumb mode builtin.
- DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(builtin.address()) & 1, 1);
#endif
mov(r1, Operand(builtin));
CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
@@ -1833,7 +1744,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
ldr(scratch1, MemOperand(scratch2));
@@ -1845,7 +1756,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
ldr(scratch1, MemOperand(scratch2));
@@ -1872,7 +1783,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
+ if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@@ -1901,7 +1812,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
// of the Abort macro constant.
static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
- DCHECK(abort_instructions <= kExpectedAbortInstructions);
+ DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
nop();
}
@@ -2052,18 +1963,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
- Label* not_unique_name) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
- b(eq, &succeed);
- cmp(reg, Operand(SYMBOL_TYPE));
- b(ne, not_unique_name);
-
- bind(&succeed);
-}
-
void TurboAssembler::CheckFor32DRegs(Register scratch) {
mov(scratch, Operand(ExternalReference::cpu_features()));
ldr(scratch, MemOperand(scratch));
@@ -2362,100 +2261,6 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
b(cc, condition_met);
}
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- ldr(scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(scratch, Operand(mask_scratch));
- b(first_bit == 1 ? eq : ne, &other_color);
- // Shift left 1 by adding.
- add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
- b(eq, &word_boundary);
- tst(scratch, Operand(mask_scratch));
- b(second_bit == 1 ? ne : eq, has_color);
- jmp(&other_color);
-
- bind(&word_boundary);
- ldr(scratch,
- MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
- tst(scratch, Operand(1));
- b(second_bit == 1 ? ne : eq, has_color);
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
- Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- Ubfx(scratch, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- add(bitmap_reg, bitmap_reg, Operand(scratch, LSL, kPointerSizeLog2));
- mov(scratch, Operand(1));
- mov(mask_reg, Operand(scratch, LSL, mask_reg));
-}
-
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Register load_scratch,
- Label* value_is_white) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(mask_scratch, load_scratch);
- b(eq, value_is_white);
-}
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- ldr(dst,
- FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
- : AccessorPair::kSetterOffset;
- ldr(dst, FieldMemOperand(dst, offset));
-}
-
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
@@ -2508,51 +2313,6 @@ bool AreAliased(Register reg1,
}
#endif
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache)
- : address_(address),
- size_(instructions * Assembler::kInstrSize),
- masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
- flush_cache_(flush_cache) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- if (flush_cache_ == FLUSH) {
- Assembler::FlushICache(masm_.isolate(), address_, size_);
- }
-
- // Check that we don't have any pending constant pools.
- DCHECK(masm_.pending_32_bit_constants_.empty());
- DCHECK(masm_.pending_64_bit_constants_.empty());
-
- // Check that the code was patched as expected.
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void CodePatcher::Emit(Instr instr) {
- masm()->emit(instr);
-}
-
-
-void CodePatcher::Emit(Address addr) {
- masm()->emit(reinterpret_cast<Instr>(addr));
-}
-
-
-void CodePatcher::EmitCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- instr = (instr & ~kCondMask) | cond;
- masm_.emit(instr);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index d8dded8cc1..2f97869621 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -583,18 +583,6 @@ class MacroAssembler : public TurboAssembler {
void CallDeoptimizer(Address target);
static int CallDeoptimizerSize();
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
@@ -680,7 +668,6 @@ class MacroAssembler : public TurboAssembler {
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
bool argument_count_is_length = false);
// Load the global proxy from the current context.
@@ -730,11 +717,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Support functions.
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done, and |temp2| its instance type.
- void GetMapConstructor(Register result, Register map, Register temp,
- Register temp2);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -755,12 +737,6 @@ class MacroAssembler : public TurboAssembler {
Register type_reg,
InstanceType type);
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the given
- // miss label if the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
// Compare the object in a register to a value from the root list.
// Acquires a scratch register.
void CompareRoot(Register obj, Heap::RootListIndex index);
@@ -784,10 +760,6 @@ class MacroAssembler : public TurboAssembler {
b(ne, if_not_equal);
}
- // Load the value of a smi object into a double register.
- // The register value must be between d0 and d15.
- void SmiToDouble(LowDwVfpRegister value, Register smi);
-
// Try to convert a double to a signed 32-bit integer.
// Z flag set to one and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result,
@@ -876,15 +848,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- // ---------------------------------------------------------------------------
- // String utilities
-
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template<typename Field>
void DecodeField(Register dst, Register src) {
Ubfx(dst, src, Field::kShift, Field::kSize);
@@ -907,13 +870,6 @@ class MacroAssembler : public TurboAssembler {
Condition cond, // eq for new space, ne otherwise.
Label* branch);
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@@ -922,43 +878,6 @@ class MacroAssembler : public TurboAssembler {
friend class StandardFrame;
};
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- enum FlushICache {
- FLUSH,
- DONT_FLUSH
- };
-
- CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache = FLUSH);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit an address directly.
- void Emit(Address addr);
-
- // Emit the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void EmitCondition(Condition cond);
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
- FlushICache flush_cache_; // Whether to flush the I cache after patching.
-};
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index f83d6f2a2a..8ab6cb6b5c 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -14,6 +14,7 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -92,7 +93,7 @@ double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
- return sim_->get_double_from_d_register(regnum);
+ return sim_->get_double_from_d_register(regnum).get_scalar();
}
@@ -116,7 +117,7 @@ bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && !is_double) {
- *value = sim_->get_float_from_s_register(regnum);
+ *value = sim_->get_float_from_s_register(regnum).get_scalar();
return true;
}
return false;
@@ -127,7 +128,7 @@ bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && is_double) {
- *value = sim_->get_double_from_d_register(regnum);
+ *value = sim_->get_double_from_d_register(regnum).get_scalar();
return true;
}
return false;
@@ -136,7 +137,7 @@ bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
return false;
}
@@ -150,25 +151,25 @@ bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
- sim_->break_pc_ = NULL;
+ sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
return true;
}
void ArmDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void ArmDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
@@ -210,11 +211,11 @@ void ArmDebugger::Debug() {
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
- if (line == NULL) {
+ if (line == nullptr) {
break;
} else {
char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
line = last_input;
} else {
// Ownership is transferred to sim_;
@@ -305,8 +306,8 @@ void ArmDebugger::Debug() {
PrintF("printobject <value>\n");
}
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int32_t* cur = NULL;
- int32_t* end = NULL;
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@@ -356,9 +357,9 @@ void ArmDebugger::Debug() {
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
- byte* prev = NULL;
- byte* cur = NULL;
- byte* end = NULL;
+ byte* prev = nullptr;
+ byte* cur = nullptr;
+ byte* end = nullptr;
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
@@ -415,7 +416,7 @@ void ArmDebugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
+ if (!DeleteBreakpoint(nullptr)) {
PrintF("deleting breakpoint failed\n");
}
} else if (strcmp(cmd, "flags") == 0) {
@@ -552,8 +553,8 @@ void ArmDebugger::Debug() {
static bool ICacheMatch(void* one, void* two) {
- DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
}
@@ -598,7 +599,7 @@ void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
CachePage* new_page = new CachePage();
entry->value = new_page;
}
@@ -609,10 +610,10 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
- DCHECK(size <= CachePage::kPageSize);
+ DCHECK_LE(size, CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
- DCHECK((start & CachePage::kLineMask) == 0);
- DCHECK((size & CachePage::kLineMask) == 0);
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@@ -653,7 +654,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
+ if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
@@ -664,7 +665,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
stack_ = reinterpret_cast<char*>(malloc(stack_size));
pc_modified_ = false;
icount_ = 0;
- break_pc_ = NULL;
+ break_pc_ = nullptr;
break_instr_ = 0;
// Set up architecture state.
@@ -706,7 +707,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
- last_debugger_input_ = NULL;
+ last_debugger_input_ = nullptr;
}
Simulator::~Simulator() {
@@ -728,7 +729,7 @@ class Redirection {
: external_function_(external_function),
swi_instruction_(al | (0xf * B24) | kCallRtRedirected),
type_(type),
- next_(NULL) {
+ next_(nullptr) {
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -747,9 +748,9 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) {
- DCHECK_EQ(current->type(), type);
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
return current;
}
}
@@ -813,10 +814,10 @@ void* Simulator::RedirectExternalReference(Isolate* isolate,
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- DCHECK(isolate_data != NULL);
+ DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
+ if (sim == nullptr) {
// TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator(isolate);
isolate_data->set_simulator(sim);
@@ -946,28 +947,26 @@ unsigned int Simulator::get_s_register(int sreg) const {
template<class InputType, int register_size>
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
- DCHECK(reg_index >= 0);
+ unsigned bytes = register_size * sizeof(vfp_registers_[0]);
+ DCHECK_EQ(sizeof(InputType), bytes);
+ DCHECK_GE(reg_index, 0);
if (register_size == 1) DCHECK(reg_index < num_s_registers);
if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
- char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
- memcpy(&vfp_registers_[reg_index * register_size], buffer,
- register_size * sizeof(vfp_registers_[0]));
+ memcpy(&vfp_registers_[reg_index * register_size], &value, bytes);
}
template<class ReturnType, int register_size>
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
- DCHECK(reg_index >= 0);
+ unsigned bytes = register_size * sizeof(vfp_registers_[0]);
+ DCHECK_EQ(sizeof(ReturnType), bytes);
+ DCHECK_GE(reg_index, 0);
if (register_size == 1) DCHECK(reg_index < num_s_registers);
if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
- ReturnType value = 0;
- char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &vfp_registers_[register_size * reg_index],
- register_size * sizeof(vfp_registers_[0]));
- memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
+ ReturnType value;
+ memcpy(&value, &vfp_registers_[register_size * reg_index], bytes);
return value;
}
@@ -1004,8 +1003,8 @@ uint32_t Simulator::GetFromSpecialRegister(SRegister reg) {
// All are consructed here from r0-r3 or d0, d1 and r0.
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
- *x = get_double_from_d_register(0);
- *y = get_double_from_d_register(1);
+ *x = get_double_from_d_register(0).get_scalar();
+ *y = get_double_from_d_register(1).get_scalar();
*z = get_register(0);
} else {
// Registers 0 and 1 -> x.
@@ -1479,7 +1478,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
*carry_out = (result & 1) == 1;
result >>= 1;
} else {
- DCHECK(shift_amount >= 32);
+ DCHECK_GE(shift_amount, 32);
if (result < 0) {
*carry_out = true;
result = 0xffffffff;
@@ -1502,7 +1501,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
*carry_out = (result & 1) == 1;
result = 0;
} else {
- DCHECK(shift_amount > 32);
+ DCHECK_GT(shift_amount, 32);
*carry_out = false;
result = 0;
}
@@ -1966,24 +1965,37 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
float Simulator::canonicalizeNaN(float value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
- const uint32_t kDefaultNaN = 0x7FC00000u;
+ constexpr uint32_t kDefaultNaN = 0x7FC00000u;
if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
value = bit_cast<float>(kDefaultNaN);
}
return value;
}
+Float32 Simulator::canonicalizeNaN(Float32 value) {
+ // Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
+ // choices" of the ARM Reference Manual.
+ constexpr Float32 kDefaultNaN = Float32::FromBits(0x7FC00000u);
+ return FPSCR_default_NaN_mode_ && value.is_nan() ? kDefaultNaN : value;
+}
double Simulator::canonicalizeNaN(double value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
- const uint64_t kDefaultNaN = V8_UINT64_C(0x7FF8000000000000);
+ constexpr uint64_t kDefaultNaN = V8_UINT64_C(0x7FF8000000000000);
if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
value = bit_cast<double>(kDefaultNaN);
}
return value;
}
+Float64 Simulator::canonicalizeNaN(Float64 value) {
+ // Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
+ // choices" of the ARM Reference Manual.
+ constexpr Float64 kDefaultNaN =
+ Float64::FromBits(V8_UINT64_C(0x7FF8000000000000));
+ return FPSCR_default_NaN_mode_ && value.is_nan() ? kDefaultNaN : value;
+}
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
@@ -1992,13 +2004,13 @@ bool Simulator::isStopInstruction(Instruction* instr) {
bool Simulator::isWatchedStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
return code < kNumOfWatchedStops;
}
bool Simulator::isEnabledStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
!(watched_stops_[code].count & kStopDisabledBit);
@@ -2022,7 +2034,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n"
@@ -2037,7 +2049,7 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
if (!isWatchedStop(code)) {
PrintF("Stop not watched.");
} else {
@@ -2305,7 +2317,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
}
if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
- DCHECK((rd % 2) == 0);
+ DCHECK_EQ(rd % 2, 0);
if (instr->HasH()) {
// The strd instruction.
int32_t value1 = get_register(rd);
@@ -2416,7 +2428,7 @@ void Simulator::DecodeType01(Instruction* instr) {
if (type == 0) {
shifter_operand = GetShiftRm(instr, &shifter_carry_out);
} else {
- DCHECK(instr->TypeValue() == 1);
+ DCHECK_EQ(instr->TypeValue(), 1);
shifter_operand = GetImm(instr, &shifter_carry_out);
}
int32_t alu_out;
@@ -3119,7 +3131,7 @@ void Simulator::DecodeType3(Instruction* instr) {
void Simulator::DecodeType4(Instruction* instr) {
- DCHECK(instr->Bit(22) == 0); // only allowed to be set in privileged mode
+ DCHECK_EQ(instr->Bit(22), 0); // only allowed to be set in privileged mode
if (instr->HasL()) {
// Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
HandleRList(instr, true);
@@ -3193,7 +3205,7 @@ void Simulator::DecodeType7(Instruction* instr) {
// vdup.size Qd, Rt.
void Simulator::DecodeTypeVFP(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- DCHECK(instr->Bits(11, 9) == 0x5);
+ DCHECK_EQ(instr->Bits(11, 9), 0x5);
// Obtain single precision register codes.
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
@@ -3218,28 +3230,32 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
if (instr->SzValue() == 0x1) {
- double dm_value = get_double_from_d_register(vm);
- double dd_value = std::fabs(dm_value);
- dd_value = canonicalizeNaN(dd_value);
- set_d_register_from_double(vd, dd_value);
+ Float64 dm = get_double_from_d_register(vm);
+ constexpr uint64_t kSignBit64 = uint64_t{1} << 63;
+ Float64 dd = Float64::FromBits(dm.get_bits() & ~kSignBit64);
+ dd = canonicalizeNaN(dd);
+ set_d_register_from_double(vd, dd);
} else {
- float sm_value = get_float_from_s_register(m);
- float sd_value = std::fabs(sm_value);
- sd_value = canonicalizeNaN(sd_value);
- set_s_register_from_float(d, sd_value);
+ Float32 sm = get_float_from_s_register(m);
+ constexpr uint32_t kSignBit32 = uint32_t{1} << 31;
+ Float32 sd = Float32::FromBits(sm.get_bits() & ~kSignBit32);
+ sd = canonicalizeNaN(sd);
+ set_s_register_from_float(d, sd);
}
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
if (instr->SzValue() == 0x1) {
- double dm_value = get_double_from_d_register(vm);
- double dd_value = -dm_value;
- dd_value = canonicalizeNaN(dd_value);
- set_d_register_from_double(vd, dd_value);
+ Float64 dm = get_double_from_d_register(vm);
+ constexpr uint64_t kSignBit64 = uint64_t{1} << 63;
+ Float64 dd = Float64::FromBits(dm.get_bits() ^ kSignBit64);
+ dd = canonicalizeNaN(dd);
+ set_d_register_from_double(vd, dd);
} else {
- float sm_value = get_float_from_s_register(m);
- float sd_value = -sm_value;
- sd_value = canonicalizeNaN(sd_value);
- set_s_register_from_float(d, sd_value);
+ Float32 sm = get_float_from_s_register(m);
+ constexpr uint32_t kSignBit32 = uint32_t{1} << 31;
+ Float32 sd = Float32::FromBits(sm.get_bits() ^ kSignBit32);
+ sd = canonicalizeNaN(sd);
+ set_s_register_from_float(d, sd);
}
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@@ -3262,12 +3278,12 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vsqrt
lazily_initialize_fast_sqrt(isolate_);
if (instr->SzValue() == 0x1) {
- double dm_value = get_double_from_d_register(vm);
+ double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = fast_sqrt(dm_value, isolate_);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
- float sm_value = get_float_from_s_register(m);
+ float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = fast_sqrt(sm_value, isolate_);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -3277,17 +3293,19 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
set_d_register_from_double(vd, instr->DoubleImmedVmov());
} else {
- set_s_register_from_float(d, instr->DoubleImmedVmov());
+ // Cast double to float.
+ float value = instr->DoubleImmedVmov().get_scalar();
+ set_s_register_from_float(d, value);
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
if (instr->SzValue() == 0x1) {
- double dm_value = get_double_from_d_register(vm);
+ double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = trunc(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
- float sm_value = get_float_from_s_register(m);
+ float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = truncf(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -3299,14 +3317,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->Opc3Value() & 0x1) {
// vsub
if (instr->SzValue() == 0x1) {
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
+ double dn_value = get_double_from_d_register(vn).get_scalar();
+ double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = dn_value - dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
- float sn_value = get_float_from_s_register(n);
- float sm_value = get_float_from_s_register(m);
+ float sn_value = get_float_from_s_register(n).get_scalar();
+ float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = sn_value - sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -3314,14 +3332,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else {
// vadd
if (instr->SzValue() == 0x1) {
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
+ double dn_value = get_double_from_d_register(vn).get_scalar();
+ double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = dn_value + dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
- float sn_value = get_float_from_s_register(n);
- float sm_value = get_float_from_s_register(m);
+ float sn_value = get_float_from_s_register(n).get_scalar();
+ float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = sn_value + sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -3330,14 +3348,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
// vmul
if (instr->SzValue() == 0x1) {
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
+ double dn_value = get_double_from_d_register(vn).get_scalar();
+ double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = dn_value * dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
- float sn_value = get_float_from_s_register(n);
- float sm_value = get_float_from_s_register(m);
+ float sn_value = get_float_from_s_register(n).get_scalar();
+ float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = sn_value * sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -3346,48 +3364,46 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vmla, vmls
const bool is_vmls = (instr->Opc3Value() & 0x1);
if (instr->SzValue() == 0x1) {
- const double dd_val = get_double_from_d_register(vd);
- const double dn_val = get_double_from_d_register(vn);
- const double dm_val = get_double_from_d_register(vm);
+ const double dd_val = get_double_from_d_register(vd).get_scalar();
+ const double dn_val = get_double_from_d_register(vn).get_scalar();
+ const double dm_val = get_double_from_d_register(vm).get_scalar();
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
- set_d_register_from_double(vd, dn_val * dm_val);
+ const double res = dn_val * dm_val;
+ set_d_register_from_double(vd, res);
if (is_vmls) {
- set_d_register_from_double(
- vd, canonicalizeNaN(dd_val - get_double_from_d_register(vd)));
+ set_d_register_from_double(vd, canonicalizeNaN(dd_val - res));
} else {
- set_d_register_from_double(
- vd, canonicalizeNaN(dd_val + get_double_from_d_register(vd)));
+ set_d_register_from_double(vd, canonicalizeNaN(dd_val + res));
}
} else {
- const float sd_val = get_float_from_s_register(d);
- const float sn_val = get_float_from_s_register(n);
- const float sm_val = get_float_from_s_register(m);
+ const float sd_val = get_float_from_s_register(d).get_scalar();
+ const float sn_val = get_float_from_s_register(n).get_scalar();
+ const float sm_val = get_float_from_s_register(m).get_scalar();
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
- set_s_register_from_float(d, sn_val * sm_val);
+ const float res = sn_val * sm_val;
+ set_s_register_from_float(d, res);
if (is_vmls) {
- set_s_register_from_float(
- d, canonicalizeNaN(sd_val - get_float_from_s_register(d)));
+ set_s_register_from_float(d, canonicalizeNaN(sd_val - res));
} else {
- set_s_register_from_float(
- d, canonicalizeNaN(sd_val + get_float_from_s_register(d)));
+ set_s_register_from_float(d, canonicalizeNaN(sd_val + res));
}
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() == 0x1) {
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
+ double dn_value = get_double_from_d_register(vn).get_scalar();
+ double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
- float sn_value = get_float_from_s_register(n);
- float sm_value = get_float_from_s_register(m);
+ float sn_value = get_float_from_s_register(n).get_scalar();
+ float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = sn_value / sm_value;
div_zero_vfp_flag_ = (sm_value == 0);
sd_value = canonicalizeNaN(sd_value);
@@ -3565,7 +3581,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
void Simulator::DecodeTypeCP15(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
- DCHECK(instr->CoprocessorValue() == 15);
+ DCHECK_EQ(instr->CoprocessorValue(), 15);
if (instr->Bit(4) == 1) {
// mcr
@@ -3626,10 +3642,10 @@ void Simulator::DecodeVCMP(Instruction* instr) {
}
if (precision == kDoublePrecision) {
- double dd_value = get_double_from_d_register(d);
+ double dd_value = get_double_from_d_register(d).get_scalar();
double dm_value = 0.0;
if (instr->Opc2Value() == 0x4) {
- dm_value = get_double_from_d_register(m);
+ dm_value = get_double_from_d_register(m).get_scalar();
}
// Raise exceptions for quiet NaNs if necessary.
@@ -3641,10 +3657,10 @@ void Simulator::DecodeVCMP(Instruction* instr) {
Compute_FPSCR_Flags(dd_value, dm_value);
} else {
- float sd_value = get_float_from_s_register(d);
+ float sd_value = get_float_from_s_register(d).get_scalar();
float sm_value = 0.0;
if (instr->Opc2Value() == 0x4) {
- sm_value = get_float_from_s_register(m);
+ sm_value = get_float_from_s_register(m).get_scalar();
}
// Raise exceptions for quiet NaNs if necessary.
@@ -3674,10 +3690,10 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
int src = instr->VFPMRegValue(src_precision);
if (dst_precision == kSinglePrecision) {
- double val = get_double_from_d_register(src);
+ double val = get_double_from_d_register(src).get_scalar();
set_s_register_from_float(dst, static_cast<float>(val));
} else {
- float val = get_float_from_s_register(src);
+ float val = get_float_from_s_register(src).get_scalar();
set_d_register_from_double(dst, static_cast<double>(val));
}
}
@@ -3810,8 +3826,8 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
bool unsigned_integer = (instr->Bit(16) == 0);
bool double_precision = (src_precision == kDoublePrecision);
- double val = double_precision ? get_double_from_d_register(src)
- : get_float_from_s_register(src);
+ double val = double_precision ? get_double_from_d_register(src).get_scalar()
+ : get_float_from_s_register(src).get_scalar();
int32_t temp = ConvertDoubleToInt(val, unsigned_integer, mode);
@@ -3852,7 +3868,7 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
- DCHECK((instr->TypeValue() == 6));
+ DCHECK_EQ(instr->TypeValue(), 6);
if (instr->CoprocessorValue() == 0xA) {
switch (instr->OpcodeValue()) {
@@ -3870,7 +3886,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
int32_t address = get_register(rn) + 4 * offset;
// Load and store address for singles must be at least four-byte
// aligned.
- DCHECK((address % 4) == 0);
+ DCHECK_EQ(address % 4, 0);
if (instr->HasL()) {
// Load single from memory: vldr.
set_s_register_from_sinteger(vd, ReadW(address, instr));
@@ -3926,7 +3942,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
int32_t address = get_register(rn) + 4 * offset;
// Load and store address for doubles must be at least four-byte
// aligned.
- DCHECK((address % 4) == 0);
+ DCHECK_EQ(address % 4, 0);
if (instr->HasL()) {
// Load double from memory: vldr.
int32_t data[] = {
@@ -5561,7 +5577,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if (instr->SzValue() == 0x1) {
int vm = instr->VFPMRegValue(kDoublePrecision);
int vd = instr->VFPDRegValue(kDoublePrecision);
- double dm_value = get_double_from_d_register(vm);
+ double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = 0.0;
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
@@ -5587,7 +5603,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
} else {
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
- float sm_value = get_float_from_s_register(m);
+ float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = 0.0;
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
@@ -5617,8 +5633,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int m = instr->VFPMRegValue(kDoublePrecision);
int n = instr->VFPNRegValue(kDoublePrecision);
int d = instr->VFPDRegValue(kDoublePrecision);
- double dn_value = get_double_from_d_register(n);
- double dm_value = get_double_from_d_register(m);
+ double dn_value = get_double_from_d_register(n).get_scalar();
+ double dm_value = get_double_from_d_register(m).get_scalar();
double dd_value;
if (instr->Bit(6) == 0x1) { // vminnm
if ((dn_value < dm_value) || std::isnan(dm_value)) {
@@ -5647,8 +5663,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int m = instr->VFPMRegValue(kSinglePrecision);
int n = instr->VFPNRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
- float sn_value = get_float_from_s_register(n);
- float sm_value = get_float_from_s_register(m);
+ float sn_value = get_float_from_s_register(n).get_scalar();
+ float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value;
if (instr->Bit(6) == 0x1) { // vminnm
if ((sn_value < sm_value) || std::isnan(sm_value)) {
@@ -5704,13 +5720,13 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int n = instr->VFPNRegValue(kDoublePrecision);
int m = instr->VFPMRegValue(kDoublePrecision);
int d = instr->VFPDRegValue(kDoublePrecision);
- double result = get_double_from_d_register(condition_holds ? n : m);
+ Float64 result = get_double_from_d_register(condition_holds ? n : m);
set_d_register_from_double(d, result);
} else {
int n = instr->VFPNRegValue(kSinglePrecision);
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
- float result = get_float_from_s_register(condition_holds ? n : m);
+ Float32 result = get_float_from_s_register(condition_holds ? n : m);
set_s_register_from_float(d, result);
}
} else {
@@ -5884,7 +5900,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
// Set up arguments
// First four arguments passed in registers.
- DCHECK(argument_count >= 4);
+ DCHECK_GE(argument_count, 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
@@ -5935,16 +5951,6 @@ int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
}
-double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
- CallFP(entry, d0, d1);
- if (use_eabi_hardfloat()) {
- return get_double_from_d_register(0);
- } else {
- return get_double_from_register_pair(0);
- }
-}
-
-
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 478b1fef25..26889018b5 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -16,6 +16,7 @@
#include "src/allocation.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
+#include "src/boxed-float.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native arm platform.
@@ -158,20 +159,26 @@ class Simulator {
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
- void set_d_register_from_double(int dreg, const double& dbl) {
+ void set_d_register_from_double(int dreg, const Float64 dbl) {
+ SetVFPRegister<Float64, 2>(dreg, dbl);
+ }
+ void set_d_register_from_double(int dreg, const double dbl) {
SetVFPRegister<double, 2>(dreg, dbl);
}
- double get_double_from_d_register(int dreg) {
- return GetFromVFPRegister<double, 2>(dreg);
+ Float64 get_double_from_d_register(int dreg) {
+ return GetFromVFPRegister<Float64, 2>(dreg);
}
+ void set_s_register_from_float(int sreg, const Float32 flt) {
+ SetVFPRegister<Float32, 1>(sreg, flt);
+ }
void set_s_register_from_float(int sreg, const float flt) {
SetVFPRegister<float, 1>(sreg, flt);
}
- float get_float_from_s_register(int sreg) {
- return GetFromVFPRegister<float, 1>(sreg);
+ Float32 get_float_from_s_register(int sreg) {
+ return GetFromVFPRegister<Float32, 1>(sreg);
}
void set_s_register_from_sinteger(int sreg, const int sint) {
@@ -208,7 +215,6 @@ class Simulator {
// Alternative: call a 2-argument double function.
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
- double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -277,6 +283,8 @@ class Simulator {
void Copy_FPSCR_to_APSR();
inline float canonicalizeNaN(float value);
inline double canonicalizeNaN(double value);
+ inline Float32 canonicalizeNaN(Float32 value);
+ inline Float64 canonicalizeNaN(Float64 value);
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index ea2e5b1571..70d50eb330 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -284,7 +284,7 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
extend_(extend),
shift_amount_(shift_amount) {
DCHECK(reg.IsValid());
- DCHECK(shift_amount <= 4);
+ DCHECK_LE(shift_amount, 4);
DCHECK(!reg.IsSP());
// Extend modes SXTX and UXTX require a 64-bit register.
@@ -533,7 +533,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
@@ -618,7 +618,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -629,14 +629,13 @@ int RelocInfo::target_address_size() {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_pointer_address_at(pc_);
}
@@ -665,7 +664,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -710,9 +709,9 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
@@ -816,7 +815,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
- DCHECK(kStartOfLabelLinkChain == 0);
+ DCHECK_EQ(kStartOfLabelLinkChain, 0);
int offset = LinkAndGetByteOffsetTo(label);
DCHECK(IsAligned(offset, kInstructionSize));
return offset >> kInstructionSizeLog2;
@@ -965,7 +964,7 @@ Instr Assembler::ExtendMode(Extend extend) {
Instr Assembler::ImmExtendShift(unsigned left_shift) {
- DCHECK(left_shift <= 4);
+ DCHECK_LE(left_shift, 4);
return left_shift << ImmExtendShift_offset;
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index e851fa5d78..2093a89df6 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -92,7 +92,7 @@ void CPURegList::RemoveCalleeSaved() {
} else if (type() == CPURegister::kVRegister) {
Remove(GetCalleeSavedV(RegisterSizeInBits()));
} else {
- DCHECK(type() == CPURegister::kNoRegister);
+ DCHECK_EQ(type(), CPURegister::kNoRegister);
DCHECK(IsEmpty());
// The list must already be empty, so do nothing.
}
@@ -195,19 +195,16 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
// No icache flushing needed, see comment in set_target_address_at.
}
-Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
- Register reg3, Register reg4) {
- CPURegList regs(reg1, reg2, reg3, reg4);
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- Register candidate = Register::from_code(code);
- if (regs.IncludesAliasOf(candidate)) continue;
- return candidate;
- }
- UNREACHABLE();
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ set_embedded_address(isolate, address, icache_flush_mode);
}
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return embedded_address();
+}
bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister& reg3, const CPURegister& reg4,
@@ -361,7 +358,7 @@ bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
int ConstPool::DistanceToFirstUse() {
- DCHECK(first_use_ >= 0);
+ DCHECK_GE(first_use_, 0);
return assm_->pc_offset() - first_use_;
}
@@ -497,8 +494,8 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
const MemOperand& operandA,
const MemOperand& operandB,
int access_size_log2) {
- DCHECK(access_size_log2 >= 0);
- DCHECK(access_size_log2 <= 3);
+ DCHECK_GE(access_size_log2, 0);
+ DCHECK_LE(access_size_log2, 3);
// Step one: check that they share the same base, that the mode is Offset
// and that the offset is a multiple of access size.
if (!operandA.base().Is(operandB.base()) ||
@@ -699,7 +696,7 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
prev_link->SetImmPCOffsetTarget(isolate_data(), next_link);
- } else if (label_veneer != NULL) {
+ } else if (label_veneer != nullptr) {
// Use the veneer for all previous links in the chain.
prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
@@ -768,11 +765,11 @@ void Assembler::bind(Label* label) {
CheckLabelLinkChain(label);
- DCHECK(linkoffset >= 0);
+ DCHECK_GE(linkoffset, 0);
DCHECK(linkoffset < pc_offset());
DCHECK((linkoffset > prevlinkoffset) ||
(linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
- DCHECK(prevlinkoffset >= 0);
+ DCHECK_GE(prevlinkoffset, 0);
// Update the link to point to the label.
if (link->IsUnresolvedInternalReference()) {
@@ -804,7 +801,7 @@ void Assembler::bind(Label* label) {
int Assembler::LinkAndGetByteOffsetTo(Label* label) {
- DCHECK(sizeof(*pc_) == 1);
+ DCHECK_EQ(sizeof(*pc_), 1);
CheckLabelLinkChain(label);
int offset;
@@ -819,7 +816,7 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
// Note that offset can be zero for self-referential instructions. (This
// could be useful for ADR, for example.)
offset = label->pos() - pc_offset();
- DCHECK(offset <= 0);
+ DCHECK_LE(offset, 0);
} else {
if (label->is_linked()) {
// The label is linked, so the referring instruction should be added onto
@@ -828,7 +825,7 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
// In this case, label->pos() returns the offset of the last linked
// instruction from the start of the buffer.
offset = label->pos() - pc_offset();
- DCHECK(offset != kStartOfLabelLinkChain);
+ DCHECK_NE(offset, kStartOfLabelLinkChain);
// Note that the offset here needs to be PC-relative only so that the
// first instruction in a buffer can link to an unbound label. Otherwise,
// the offset would be 0 for this case, and 0 is reserved for
@@ -883,7 +880,7 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
if (unresolved_branches_.empty()) {
- DCHECK(next_veneer_pool_check_ == kMaxInt);
+ DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
return;
}
@@ -1635,7 +1632,7 @@ void Assembler::LoadStorePair(const CPURegister& rt,
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
DCHECK(!rt2.Is(addr.base()));
- DCHECK(addr.offset() != 0);
+ DCHECK_NE(addr.offset(), 0);
if (addr.IsPreIndex()) {
addrmodeop = LoadStorePairPreIndexFixed;
} else {
@@ -1761,6 +1758,7 @@ void Assembler::stlxr(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits());
+ DCHECK(!rs.Is(rt) && !rs.Is(rn));
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
@@ -1788,6 +1786,7 @@ void Assembler::stlxrb(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
+ DCHECK(!rs.Is(rt) && !rs.Is(rn));
Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
@@ -1814,6 +1813,7 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
+ DCHECK(!rs.Is(rt) && !rs.Is(rn));
Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
@@ -3917,7 +3917,7 @@ void Assembler::dcptr(Label* label) {
// In this case, label->pos() returns the offset of the last linked
// instruction from the start of the buffer.
offset = label->pos() - pc_offset();
- DCHECK(offset != kStartOfLabelLinkChain);
+ DCHECK_NE(offset, kStartOfLabelLinkChain);
} else {
// The label is unused, so it now becomes linked and the internal
// reference is at the start of the new link chain.
@@ -4064,7 +4064,7 @@ void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK_LE(RoundUp(len, kInstructionSize), static_cast<size_t>(kGap));
EmitData(string, static_cast<int>(len));
- // Pad with NULL characters until pc_ is aligned.
+ // Pad with nullptr characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
static_assert(sizeof(pad) == kInstructionSize,
"Size of padding must match instruction size.");
@@ -4087,11 +4087,11 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
// Refer to instructions-arm64.h for a description of the marker and its
// arguments.
hlt(kImmExceptionIsDebug);
- DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugCodeOffset);
dc32(code);
- DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugParamsOffset);
dc32(params);
- DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugMessageOffset);
EmitStringData(message);
hlt(kImmExceptionIsUnreachable);
@@ -4116,8 +4116,8 @@ void Assembler::Logical(const Register& rd,
int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
- DCHECK(immediate != 0);
- DCHECK(immediate != -1);
+ DCHECK_NE(immediate, 0);
+ DCHECK_NE(immediate, -1);
DCHECK(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
@@ -4300,7 +4300,7 @@ void Assembler::EmitExtendShift(const Register& rd,
case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
case UXTX:
case SXTX: {
- DCHECK(rn.SizeInBits() == kXRegSizeInBits);
+ DCHECK_EQ(rn.SizeInBits(), kXRegSizeInBits);
// Nothing to extend. Just shift.
lsl(rd, rn_, left_shift);
break;
@@ -4438,7 +4438,7 @@ bool Assembler::IsImmLogical(uint64_t value,
unsigned* n,
unsigned* imm_s,
unsigned* imm_r) {
- DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
+ DCHECK((n != nullptr) && (imm_s != nullptr) && (imm_r != nullptr));
DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
bool negate = false;
@@ -4748,7 +4748,7 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, nullptr);
bool write_reloc_info = true;
if ((rmode == RelocInfo::COMMENT) ||
@@ -4776,7 +4776,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
}
- DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
}
}
@@ -4862,7 +4862,7 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
RelocInfo rinfo(buffer_ + location_offset, RelocInfo::VENEER_POOL,
- static_cast<intptr_t>(size), NULL);
+ static_cast<intptr_t>(size), nullptr);
reloc_info_writer.Write(&rinfo);
}
@@ -4940,7 +4940,7 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int margin) {
// There is nothing to do if there are no pending veneer pool entries.
if (unresolved_branches_.empty()) {
- DCHECK(next_veneer_pool_check_ == kMaxInt);
+ DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
return;
}
@@ -5008,7 +5008,7 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
adr(rd, target_offset & 0xFFFF);
movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
- DCHECK((target_offset >> 48) == 0);
+ DCHECK_EQ(target_offset >> 48, 0);
add(rd, rd, scratch);
}
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 47ce6667c8..bfdab599a3 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -132,7 +132,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
int SizeInBytes() const {
DCHECK(IsValid());
- DCHECK(SizeInBits() % 8 == 0);
+ DCHECK_EQ(SizeInBits() % 8, 0);
return reg_size_ / 8;
}
bool Is8Bits() const {
@@ -280,6 +280,12 @@ class Register : public CPURegister {
return Register::Create(code, kXRegSizeInBits);
}
+ template <int code>
+ static Register from_code() {
+ // Always return an X register.
+ return Register::Create<code, kXRegSizeInBits>();
+ }
+
// End of V8 compatibility section -----------------------
//
private:
@@ -503,13 +509,6 @@ ALIAS_REGISTER(VRegister, fp_scratch2, d31);
#undef ALIAS_REGISTER
-
-Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
- Register reg2 = NoReg,
- Register reg3 = NoReg,
- Register reg4 = NoReg);
-
-
// AreAliased returns true if any of the named registers overlap. Arguments set
// to NoReg are ignored. The system stack pointer may be specified.
bool AreAliased(const CPURegister& reg1,
@@ -671,7 +670,7 @@ class CPURegList {
int RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
- DCHECK((size_in_bits % kBitsPerByte) == 0);
+ DCHECK_EQ(size_in_bits % kBitsPerByte, 0);
return size_in_bits / kBitsPerByte;
}
@@ -935,14 +934,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -965,8 +965,8 @@ class Assembler : public AssemblerBase {
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
//
- // The descriptor (desc) can be NULL. In that case, the code is finalized as
- // usual, but the descriptor is not populated.
+ // The descriptor (desc) can be nullptr. In that case, the code is finalized
+ // as usual, but the descriptor is not populated.
void GetCode(Isolate* isolate, CodeDesc* desc);
// Insert the smallest number of nop instructions
@@ -1064,7 +1064,7 @@ class Assembler : public AssemblerBase {
// TODO(jbramley): Work out what sign to use for these things and if possible,
// change things to be consistent.
void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
- DCHECK(size >= 0);
+ DCHECK_GE(size, 0);
DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
}
@@ -1408,14 +1408,14 @@ class Assembler : public AssemblerBase {
// Bfm aliases.
// Bitfield insert.
void bfi(const Register& rd, const Register& rn, int lsb, int width) {
- DCHECK(width >= 1);
+ DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Bitfield extract and insert low.
void bfxil(const Register& rd, const Register& rn, int lsb, int width) {
- DCHECK(width >= 1);
+ DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, lsb, lsb + width - 1);
}
@@ -1429,14 +1429,14 @@ class Assembler : public AssemblerBase {
// Signed bitfield insert in zero.
void sbfiz(const Register& rd, const Register& rn, int lsb, int width) {
- DCHECK(width >= 1);
+ DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Signed bitfield extract.
void sbfx(const Register& rd, const Register& rn, int lsb, int width) {
- DCHECK(width >= 1);
+ DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, lsb, lsb + width - 1);
}
@@ -1472,14 +1472,14 @@ class Assembler : public AssemblerBase {
// Unsigned bitfield insert in zero.
void ubfiz(const Register& rd, const Register& rn, int lsb, int width) {
- DCHECK(width >= 1);
+ DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Unsigned bitfield extract.
void ubfx(const Register& rd, const Register& rn, int lsb, int width) {
- DCHECK(width >= 1);
+ DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, lsb, lsb + width - 1);
}
@@ -2872,9 +2872,9 @@ class Assembler : public AssemblerBase {
// Emit an address in the instruction stream.
void dcptr(Label* label);
- // Copy a string into the instruction stream, including the terminating NULL
- // character. The instruction pointer (pc_) is then aligned correctly for
- // subsequent instructions.
+ // Copy a string into the instruction stream, including the terminating
+ // nullptr character. The instruction pointer (pc_) is then aligned correctly
+ // for subsequent instructions.
void EmitStringData(const char* string);
// Pseudo-instructions ------------------------------------------------------
@@ -3353,9 +3353,8 @@ class Assembler : public AssemblerBase {
// Remove the specified branch from the unbound label link chain.
// If available, a veneer for this label can be used for other branches in the
// chain if the link chain cannot be fixed up without this branch.
- void RemoveBranchFromLabelLinkChain(Instruction* branch,
- Label* label,
- Instruction* label_veneer = NULL);
+ void RemoveBranchFromLabelLinkChain(Instruction* branch, Label* label,
+ Instruction* label_veneer = nullptr);
// Prevent sharing of code target constant pool entries until
// EndBlockCodeTargetSharing is called. Calls to this function can be nested
@@ -3497,7 +3496,7 @@ class Assembler : public AssemblerBase {
// Emit data inline in the instruction stream.
void EmitData(void const * data, unsigned size) {
- DCHECK(sizeof(*pc_) == 1);
+ DCHECK_EQ(sizeof(*pc_), 1);
DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
// TODO(all): Somehow register we have some data here. Then we can
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 2a994fca01..1ad50e5112 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -9,12 +9,10 @@
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/counters.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -41,35 +39,21 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label done;
- Register input = source();
Register result = destination();
- DCHECK(is_truncating());
DCHECK(result.Is64Bits());
DCHECK(jssp.Is(masm->StackPointer()));
- int double_offset = offset();
-
- DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
- Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
- Register scratch2 =
- GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
-
- __ Push(scratch1, scratch2);
- // Account for saved regs if input is jssp.
- if (input.is(jssp)) double_offset += 2 * kPointerSize;
-
- if (!skip_fastpath()) {
- __ Push(double_scratch);
- if (input.is(jssp)) double_offset += 1 * kDoubleSize;
- __ Ldr(double_scratch, MemOperand(input, double_offset));
- // Try to convert with a FPU convert instruction. This handles all
- // non-saturating cases.
- __ TryConvertDoubleToInt64(result, double_scratch, &done);
- __ Fmov(result, double_scratch);
- } else {
- __ Ldr(result, MemOperand(input, double_offset));
- }
+ UseScratchRegisterScope temps(masm);
+ Register scratch1 = temps.AcquireX();
+ Register scratch2 = temps.AcquireX();
+ DoubleRegister double_scratch = temps.AcquireD();
+
+ __ Peek(double_scratch, 0);
+ // Try to convert with a FPU convert instruction. This handles all
+ // non-saturating cases.
+ __ TryConvertDoubleToInt64(result, double_scratch, &done);
+ __ Fmov(result, double_scratch);
// If we reach here we need to manually convert the input to an int32.
@@ -110,55 +94,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Lsl(result, mantissa, exponent);
__ Bind(&done);
- if (!skip_fastpath()) {
- __ Pop(double_scratch);
- }
- __ Pop(scratch2, scratch1);
- __ Ret();
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- CPURegList saved_regs = kCallerSaved;
- CPURegList saved_fp_regs = kCallerSavedV;
-
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
-
- // We don't care if MacroAssembler scratch registers are corrupted.
- saved_regs.Remove(*(masm->TmpList()));
- saved_fp_regs.Remove(*(masm->FPTmpList()));
- DCHECK_EQ(saved_regs.Count() % 2, 0);
- DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
-
- __ PushCPURegList(saved_regs);
- if (save_doubles()) {
- __ PushCPURegList(saved_fp_regs);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(x0, ExternalReference::isolate_address(isolate()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
-
- if (save_doubles()) {
- __ PopCPURegList(saved_fp_regs);
- }
- __ PopCPURegList(saved_regs);
__ Ret();
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
// Stack on entry:
// jssp[0]: Exponent (as a tagged value).
@@ -284,10 +223,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the following stubs are generated in this order
// because pregenerated stubs can only call other pregenerated stubs.
- // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
- // CEntryStub.
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -299,8 +235,7 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
USE(isolate);
}
-
-bool CEntryStub::NeedsImmovableCode() {
+Movability CEntryStub::NeedsImmovableCode() {
// CEntryStub stores the return address on the stack before calling into
// C++ code. In some cases, the VM accesses this address, but it is not used
// when the C++ code returns to the stub because LR holds the return address
@@ -309,7 +244,7 @@ bool CEntryStub::NeedsImmovableCode() {
// TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
// find any comment to confirm this, and I don't hit any crashes whatever
// this function returns. The anaylsis should be properly confirmed.
- return true;
+ return kImmovable;
}
@@ -369,7 +304,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// - Adjust for the arg[] array.
Register temp_argv = x11;
if (!argv_in_register()) {
- __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ __ SlotAddress(temp_argv, x0);
// - Adjust for the receiver.
__ Sub(temp_argv, temp_argv, 1 * kPointerSize);
}
@@ -480,11 +415,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(argc, 2 * kPointerSize);
__ Peek(target, 3 * kPointerSize);
- __ LeaveExitFrame(save_doubles(), x10, true);
+ __ LeaveExitFrame(save_doubles(), x10);
DCHECK(jssp.Is(__ StackPointer()));
if (!argv_in_register()) {
// Drop the remaining stack slots and return from the stub.
- __ Drop(x11);
+ __ DropArguments(x11);
}
__ AssertFPCRState();
__ Ret();
@@ -498,10 +433,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -543,12 +476,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Bind(&not_js_frame);
// Compute the handler entry address and jump to it.
- __ Mov(x10, Operand(pending_handler_code_address));
+ __ Mov(x10, Operand(pending_handler_entrypoint_address));
__ Ldr(x10, MemOperand(x10));
- __ Mov(x11, Operand(pending_handler_offset_address));
- __ Ldr(x11, MemOperand(x11));
- __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, x11);
__ Br(x10);
}
@@ -610,7 +539,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Select between the inner and outermost frame marker, based on the JS entry
// sp. We assert that the inner marker is zero, so we can use xzr to save a
// move instruction.
- DCHECK(StackFrame::INNER_JSENTRY_FRAME == 0);
+ DCHECK_EQ(StackFrame::INNER_JSENTRY_FRAME, 0);
__ Cmp(x11, 0); // If x11 is zero, this is the outermost frame.
__ Csel(x12, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
__ B(ne, &done);
@@ -738,371 +667,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void StringHelper::GenerateFlatOneByteStringEquals(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
- Register result = x0;
- Register left_length = scratch1;
- Register right_length = scratch2;
-
- // Compare lengths. If lengths differ, strings can't be equal. Lengths are
- // smis, and don't need to be untagged.
- Label strings_not_equal, check_zero_length;
- __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
- __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
- __ Cmp(left_length, right_length);
- __ B(eq, &check_zero_length);
-
- __ Bind(&strings_not_equal);
- __ Mov(result, Smi::FromInt(NOT_EQUAL));
- __ Ret();
-
- // Check if the length is zero. If so, the strings must be equal (and empty.)
- Label compare_chars;
- __ Bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ Cbnz(left_length, &compare_chars);
- __ Mov(result, Smi::FromInt(EQUAL));
- __ Ret();
-
- // Compare characters. Falls through if all characters are equal.
- __ Bind(&compare_chars);
- GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
- scratch3, &strings_not_equal);
-
- // Characters in strings are equal.
- __ Mov(result, Smi::FromInt(EQUAL));
- __ Ret();
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4) {
- DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
- Label result_not_equal, compare_lengths;
-
- // Find minimum length and length difference.
- Register length_delta = scratch3;
- __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Subs(length_delta, scratch1, scratch2);
-
- Register min_length = scratch1;
- __ Csel(min_length, scratch2, scratch1, gt);
- __ Cbz(min_length, &compare_lengths);
-
- // Compare loop.
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- scratch4, &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ Bind(&compare_lengths);
-
- DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
-
- // Use length_delta as result if it's zero.
- Register result = x0;
- __ Subs(result, length_delta, 0);
-
- __ Bind(&result_not_equal);
- Register greater = x10;
- Register less = x11;
- __ Mov(greater, Smi::FromInt(GREATER));
- __ Mov(less, Smi::FromInt(LESS));
- __ CmovX(result, greater, gt);
- __ CmovX(result, less, lt);
- __ Ret();
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Label* chars_not_equal) {
- DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
-
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ Add(left, left, scratch1);
- __ Add(right, right, scratch1);
-
- Register index = length;
- __ Neg(index, length); // index = -length;
-
- // Compare loop
- Label loop;
- __ Bind(&loop);
- __ Ldrb(scratch1, MemOperand(left, index));
- __ Ldrb(scratch2, MemOperand(right, index));
- __ Cmp(scratch1, scratch2);
- __ B(ne, chars_not_equal);
- __ Add(index, index, 1);
- __ Cbnz(index, &loop);
-}
-
-
-RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
- Register address,
- Register scratch)
- : object_(object),
- address_(address),
- scratch0_(scratch),
- saved_regs_(kCallerSaved),
- saved_fp_regs_(kCallerSavedV) {
- DCHECK(!AreAliased(scratch, object, address));
-
- // The SaveCallerSaveRegisters method needs to save caller-saved
- // registers, but we don't bother saving MacroAssembler scratch registers.
- saved_regs_.Remove(MacroAssembler::DefaultTmpList());
- saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
-
- // We would like to require more scratch registers for this stub,
- // but the number of registers comes down to the ones used in
- // FullCodeGen::SetVar(), which is architecture independent.
- // We allocate 2 extra scratch registers that we'll save on the stack.
- CPURegList pool_available = GetValidRegistersForAllocation();
- CPURegList used_regs(object, address, scratch);
- pool_available.Remove(used_regs);
- scratch1_ = pool_available.PopLowestIndex().Reg();
- scratch2_ = pool_available.PopLowestIndex().Reg();
-
- // The scratch registers will be restored by other means so we don't need
- // to save them with the other caller saved registers.
- saved_regs_.Remove(scratch0_);
- saved_regs_.Remove(scratch1_);
- saved_regs_.Remove(scratch2_);
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- // Find the mode depending on the first two instructions.
- Instruction* instr1 =
- reinterpret_cast<Instruction*>(stub->instruction_start());
- Instruction* instr2 = instr1->following();
-
- if (instr1->IsUncondBranchImm()) {
- DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
- return INCREMENTAL;
- }
-
- DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
-
- if (instr2->IsUncondBranchImm()) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(instr2->IsPCRelAddressing());
-
- return STORE_BUFFER_ONLY;
-}
-
-// We patch the two first instructions of the stub back and forth between an
-// adr and branch when we start and stop incremental heap marking.
-// The branch is
-// b label
-// The adr is
-// adr xzr label
-// so effectively a nop.
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- // We are going to patch the two first instructions of the stub.
- PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
- Instruction* instr1 = patcher.InstructionAt(0);
- Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
- // Instructions must be either 'adr' or 'b'.
- DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
- DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
- // Retrieve the offsets to the labels.
- auto offset_to_incremental_noncompacting =
- static_cast<int32_t>(instr1->ImmPCOffset());
- auto offset_to_incremental_compacting =
- static_cast<int32_t>(instr2->ImmPCOffset());
-
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- patcher.adr(xzr, offset_to_incremental_noncompacting);
- patcher.adr(xzr, offset_to_incremental_compacting);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
- patcher.adr(xzr, offset_to_incremental_compacting);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- patcher.adr(xzr, offset_to_incremental_noncompacting);
- patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
- break;
- }
- DCHECK(GetMode(stub) == mode);
-}
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- // We need some extra registers for this stub, they have been allocated
- // but we need to save them before using them.
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- Register val = regs_.scratch0();
- __ Ldr(val, MemOperand(regs_.address()));
- __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm); // Restore the extra scratch registers we used.
-
- __ RememberedSetHelper(object(), address(),
- value(), // scratch1
- save_fp_regs_mode());
-
- __ Bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- Register address =
- x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
- DCHECK(!address.Is(regs_.object()));
- DCHECK(!address.Is(x0));
- __ Mov(address, regs_.address());
- __ Mov(x0, regs_.object());
- __ Mov(x1, address);
- __ Mov(x2, ExternalReference::isolate_address(isolate()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- ExternalReference function =
- ExternalReference::incremental_marking_record_write_function(
- isolate());
- __ CallCFunction(function, 3, 0);
-
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
- // If the object is not black we don't have to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(),
- value(), // scratch1
- save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ Bind(&on_black);
-#endif
-
- // Get the value from the slot.
- Register val = regs_.scratch0();
- __ Ldr(val, MemOperand(regs_.address()));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlagClear(val, regs_.scratch1(),
- MemoryChunk::kEvacuationCandidateMask,
- &ensure_not_white);
-
- __ CheckPageFlagClear(regs_.object(),
- regs_.scratch1(),
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- &need_incremental);
-
- __ Bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.address(), regs_.object());
- __ JumpIfWhite(val,
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- regs_.scratch2(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(),
- value(), // scratch1
- save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ Bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ Bind(&need_incremental);
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // We patch these two first instructions back and forth between a nop and
- // real branch when we start and stop incremental heap marking.
- // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
- // are generated.
- // See RecordWriteStub::Patch for details.
- {
- InstructionAccurateScope scope(masm, 2);
- __ adr(xzr, &skip_to_incremental_noncompacting);
- __ adr(xzr, &skip_to_incremental_compacting);
- }
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(),
- value(), // scratch1
- save_fp_regs_mode());
- }
- __ Ret();
-
- __ Bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ Bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-}
-
-
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -1110,21 +674,21 @@ static const unsigned int kProfileEntryHookCallSize =
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
Assembler::BlockConstPoolScope no_const_pools(tasm);
DontEmitDebugCodeScope no_debug_code(tasm);
Label entry_hook_call_start;
tasm->Bind(&entry_hook_call_start);
tasm->Push(padreg, lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
- kProfileEntryHookCallSize);
+ DCHECK_EQ(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start),
+ kProfileEntryHookCallSize);
tasm->Pop(lr, padreg);
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
Assembler::BlockConstPoolScope no_const_pools(masm);
DontEmitDebugCodeScope no_debug_code(masm);
@@ -1132,8 +696,8 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
__ Bind(&entry_hook_call_start);
__ Push(padreg, lr);
__ CallStub(&stub);
- DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
- kProfileEntryHookCallSize);
+ DCHECK_EQ(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start),
+ kProfileEntryHookCallSize);
__ Pop(lr, padreg);
}
}
@@ -1170,7 +734,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The caller's return address is above the saved temporaries.
// Grab its location for the second argument to the hook.
- __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
+ __ SlotAddress(x1, kNumSavedRegs);
{
// Create a dummy frame, as CallCFunction requires this.
@@ -1218,164 +782,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Blr(lr);
}
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0) {
- DCHECK(!AreAliased(receiver, properties, scratch0));
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
- __ Sub(index, index, 1);
- __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- Register tmp = index;
- __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
- __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Operand(name));
- __ B(eq, miss);
-
- Label good;
- __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
-
- // Check if the entry name is not a unique name.
- __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ Ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
- __ Bind(&good);
- }
-
- CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
- spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
- spill_list.Combine(lr);
- spill_list.Combine(padreg); // Add padreg to make the list of even length.
- DCHECK_EQ(spill_list.Count() % 2, 0);
-
- __ PushCPURegList(spill_list);
-
- __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- __ Mov(x1, Operand(name));
- NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- // Move stub return value to scratch0. Note that scratch0 is not included in
- // spill_list and won't be clobbered by PopCPURegList.
- __ Mov(scratch0, x0);
- __ PopCPURegList(spill_list);
-
- __ Cbz(scratch0, done);
- __ B(miss);
-}
-
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- //
- // Arguments are in x0 and x1:
- // x0: property dictionary.
- // x1: the name of the property we are looking for.
- //
- // Return value is in x0 and is zero if lookup failed, non zero otherwise.
- // If the lookup is successful, x2 will contains the index of the entry.
-
- Register result = x0;
- Register dictionary = x0;
- Register key = x1;
- Register index = x2;
- Register mask = x3;
- Register hash = x4;
- Register undefined = x5;
- Register entry_key = x6;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
- __ Sub(mask, mask, 1);
-
- __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Add(index, hash,
- NameDictionary::GetProbeOffset(i) << Name::kHashShift);
- } else {
- __ Mov(index, hash);
- }
- __ And(index, mask, Operand(index, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
- __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ Cmp(entry_key, undefined);
- __ B(eq, &not_in_dictionary);
-
- // Stop if found the property.
- __ Cmp(entry_key, key);
- __ B(eq, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ Bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup, probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ Mov(result, 0);
- __ Ret();
- }
-
- __ Bind(&in_dictionary);
- __ Mov(result, 1);
- __ Ret();
-
- __ Bind(&not_in_dictionary);
- __ Mov(result, 0);
- __ Ret();
-}
-
-
template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
@@ -1562,7 +968,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
@@ -1571,7 +977,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// We should either have undefined in the allocation_site register or a
// valid AllocationSite.
- __ AssertUndefinedOrAllocationSite(allocation_site, x10);
+ __ AssertUndefinedOrAllocationSite(allocation_site);
}
// Enter the context of the Array function.
@@ -1659,7 +1065,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
@@ -1710,8 +1116,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Register function_address,
ExternalReference thunk_ref,
int stack_space, int spill_offset,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
+ MemOperand return_value_operand) {
ASM_LOCATION("CallApiFunctionAndReturn");
Isolate* isolate = masm->isolate();
ExternalReference next_address =
@@ -1813,12 +1218,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Peek(x21, (spill_offset + 2) * kXRegSize);
__ Peek(x22, (spill_offset + 3) * kXRegSize);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ Ldr(cp, *context_restore_operand);
- }
-
- __ LeaveExitFrame(false, x1, !restore_context);
+ __ LeaveExitFrame(false, x1);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
@@ -1848,7 +1248,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : callee
// -- x4 : call_data
// -- x2 : holder
// -- x1 : api_function_address
@@ -1858,21 +1257,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1) * 8] : first argument
// -- sp[argc * 8] : receiver
- // -- sp[(argc + 1) * 8] : accessor_holder
// -----------------------------------
- Register callee = x0;
Register call_data = x4;
Register holder = x2;
Register api_function_address = x1;
- Register context = cp;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1882,8 +1276,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
Register undef = x7;
__ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
- // Push new target, context, callee and call data.
- __ Push(undef, context, callee, call_data);
+ // Push new target, call data.
+ __ Push(undef, call_data);
Register isolate_reg = x5;
__ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
@@ -1892,40 +1286,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// return value, return value default, isolate, holder.
__ Push(undef, undef, isolate_reg, holder);
- // Enter a new context.
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- sp[0] : holder
- // -- ...
- // -- sp[(FCA::kArgsLength - 1) * 8] : new_target
- // -- sp[FCA::kArgsLength * 8] : last argument
- // -- ...
- // -- sp[(FCA::kArgsLength + argc - 1) * 8] : first argument
- // -- sp[(FCA::kArgsLength + argc) * 8] : receiver
- // -- sp[(FCA::kArgsLength + argc + 1) * 8] : accessor_holder
- // -----------------------------------------------------------
-
- // Load context from accessor_holder.
- Register accessor_holder = context;
- Register scratch = undef;
- Register scratch2 = callee;
- __ Ldr(accessor_holder,
- MemOperand(__ StackPointer(),
- (FCA::kArgsLength + 1 + argc()) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ Ldr(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
- __ Ldrb(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ Tst(scratch2, Operand(1 << Map::kIsConstructor));
- __ B(ne, &skip_looking_for_constructor);
- __ GetMapConstructor(context, scratch, scratch, scratch2);
- __ Bind(&skip_looking_for_constructor);
- __ Ldr(context, FieldMemOperand(context, JSFunction::kContextOffset));
- } else {
- // Load context from callee.
- __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
-
// Prepare arguments.
Register args = x6;
__ Mov(args, masm->StackPointer());
@@ -1944,7 +1304,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
DCHECK(!AreAliased(x0, api_function_address));
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
- __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
+ __ SlotAddress(x0, 1);
// FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
__ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
@@ -1956,25 +1316,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
// The number of arguments might be odd, but will be padded when calling the
- // stub. We do not round up stack_space here, this will be done in
- // CallApiFunctionAndReturn.
- const int stack_space = argc() + FCA::kArgsLength + 2;
- DCHECK_EQ((stack_space - argc()) % 2, 0);
+ // stub. We do not round up stack_space to account for odd argc here, this
+ // will be done in CallApiFunctionAndReturn.
+ const int stack_space = (argc() + 1) + FCA::kArgsLength;
+
+ // The current frame needs to be aligned.
+ DCHECK_EQ((stack_space - (argc() + 1)) % 2, 0);
const int spill_offset = 1 + kApiStackSpace;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- spill_offset, return_value_operand,
- &context_restore_operand);
+ spill_offset, return_value_operand);
}
@@ -2033,7 +1387,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
__ Poke(x1, 1 * kPointerSize);
- __ Add(x1, masm->StackPointer(), 1 * kPointerSize);
+ __ SlotAddress(x1, 1);
// x1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
@@ -2051,7 +1405,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, spill_offset,
- return_value_operand, NULL);
+ return_value_operand);
}
#undef __
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 0713d3a319..14c4a988ac 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -8,196 +8,6 @@
namespace v8 {
namespace internal {
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one-byte strings and returns result in x0.
- static void GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4);
-
- // Compare two flat one-byte strings for equality and returns result in x0.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- static void GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Label* chars_not_equal);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- // Stub to record the write of 'value' at 'address' in 'object'.
- // Typically 'address' = 'object' + <some offset>.
- // See MacroAssembler::RecordWriteField() for example.
- RecordWriteStub(Isolate* isolate,
- Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- DCHECK(object.Is64Bits());
- DCHECK(value.Is64Bits());
- DCHECK(address.Is64Bits());
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class to manage the registers associated with the stub.
- // The 'object' and 'address' registers must be preserved.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch);
-
- void Save(MacroAssembler* masm) {
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->Push(scratch1_, scratch2_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->Pop(scratch2_, scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- // TODO(all): This can be very expensive, and it is likely that not every
- // register will need to be preserved. Can we improve this?
- masm->PushCPURegList(saved_regs_);
- if (mode == kSaveFPRegs) {
- masm->PushCPURegList(saved_fp_regs_);
- }
- }
-
- void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
- // TODO(all): This can be very expensive, and it is likely that not every
- // register will need to be preserved. Can we improve this?
- if (mode == kSaveFPRegs) {
- masm->PopCPURegList(saved_fp_regs_);
- }
- masm->PopCPURegList(saved_regs_);
- }
-
- Register object() { return object_; }
- Register address() { return address_; }
- Register scratch0() { return scratch0_; }
- Register scratch1() { return scratch1_; }
- Register scratch2() { return scratch2_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_ = NoReg;
- Register scratch2_ = NoReg;
- CPURegList saved_regs_;
- CPURegList saved_fp_regs_;
-
- // TODO(all): We should consider moving this somewhere else.
- static CPURegList GetValidRegistersForAllocation() {
- // The list of valid registers for allocation is defined as all the
- // registers without those with a special meaning.
- //
- // The default list excludes registers x26 to x31 because they are
- // reserved for the following purpose:
- // - x26 root register
- // - x27 context pointer register
- // - x28 jssp
- // - x29 frame pointer
- // - x30 link register(lr)
- // - x31 xzr/stack pointer
- CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
-
- // We also remove MacroAssembler's scratch registers.
- list.Remove(MacroAssembler::DefaultTmpList());
-
- return list;
- }
-
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits: public BitField<int, 0, 5> {};
- class ValueBits: public BitField<int, 5, 5> {};
- class AddressBits: public BitField<int, 10, 5> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
-
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
// Helper to call C++ functions from generated code. The caller must prepare
// the exit frame before doing the call with GenerateCall.
class DirectCEntryStub: public PlatformCodeStub {
@@ -206,52 +16,12 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() override { return true; }
+ Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
-
-class NameDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class LookupModeBits: public BitField<LookupMode, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index 3d988418ee..1016e3707a 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/codegen-arm64.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
@@ -21,100 +19,6 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
}
-// -------------------------------------------------------------------------
-// Code generators
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
- Label indirect_string_loaded;
- __ Bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string, thin_string;
- __ And(result, result, kStringRepresentationMask);
- __ Cmp(result, kConsStringTag);
- __ B(eq, &cons_string);
- __ Cmp(result, kThinStringTag);
- __ B(eq, &thin_string);
-
- // Handle slices.
- __ Ldr(result.W(),
- UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
- __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ Add(index, index, result.W());
- __ B(&indirect_string_loaded);
-
- // Handle thin strings.
- __ Bind(&thin_string);
- __ Ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
- __ B(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ Bind(&cons_string);
- __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
- // Get the first of the two strings and load its instance type.
- __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
- __ B(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ Bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ B(&check_encoding);
-
- // Handle external strings.
- __ Bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ Tst(result, kIsIndirectStringMask);
- __ Assert(eq, kExternalStringExpectedButNotFound);
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
- // can be bound far away in deferred code.
- __ Tst(result, kShortExternalStringMask);
- __ B(ne, call_runtime);
- __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label one_byte, done;
- __ Bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ TestAndBranchIfAnySet(result, kStringEncodingMask, &one_byte);
- // Two-byte string.
- __ Ldrh(result, MemOperand(string, index, SXTW, 1));
- __ B(&done);
- __ Bind(&one_byte);
- // One-byte string.
- __ Ldrb(result, MemOperand(string, index, SXTW));
- __ Bind(&done);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
deleted file mode 100644
index 7ccd5ac444..0000000000
--- a/deps/v8/src/arm64/codegen-arm64.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM64_CODEGEN_ARM64_H_
-#define V8_ARM64_CODEGEN_ARM64_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output. Register index is asserted to be a 32-bit W
- // register.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ARM64_CODEGEN_ARM64_H_
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 7c1084f62d..d4cb200de6 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -49,8 +49,8 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
uintptr_t dsize = sizes.dcache_line_size();
uintptr_t isize = sizes.icache_line_size();
// Cache line sizes are always a power of 2.
- DCHECK(CountSetBits(dsize, 64) == 1);
- DCHECK(CountSetBits(isize, 64) == 1);
+ DCHECK_EQ(CountSetBits(dsize, 64), 1);
+ DCHECK_EQ(CountSetBits(isize, 64), 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 6718bd3d68..55a09dc1c5 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -96,10 +96,10 @@ void Decoder<V>::Decode(Instruction *instr) {
template<typename V>
void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
- DCHECK(instr->Bits(27, 24) == 0x0);
+ DCHECK_EQ(0x0, instr->Bits(27, 24));
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
- DCHECK(instr->Bit(28) == 0x1);
+ DCHECK_EQ(0x1, instr->Bit(28));
V::VisitPCRelAddressing(instr);
}
@@ -339,7 +339,7 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeLogical(Instruction* instr) {
- DCHECK(instr->Bits(27, 24) == 0x2);
+ DCHECK_EQ(0x2, instr->Bits(27, 24));
if (instr->Mask(0x80400000) == 0x00400000) {
V::VisitUnallocated(instr);
@@ -359,7 +359,7 @@ void Decoder<V>::DecodeLogical(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
- DCHECK(instr->Bits(27, 24) == 0x3);
+ DCHECK_EQ(0x3, instr->Bits(27, 24));
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
@@ -385,7 +385,7 @@ void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
- DCHECK(instr->Bits(27, 24) == 0x1);
+ DCHECK_EQ(0x1, instr->Bits(27, 24));
if (instr->Bit(23) == 1) {
V::VisitUnallocated(instr);
} else {
@@ -623,7 +623,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
}
} else {
// Bit 30 == 1 has been handled earlier.
- DCHECK(instr->Bit(30) == 0);
+ DCHECK_EQ(0, instr->Bit(30));
if (instr->Mask(0xA0800000) != 0) {
V::VisitUnallocated(instr);
} else {
@@ -639,7 +639,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
template <typename V>
void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
- DCHECK(instr->Bits(29, 25) == 0x6);
+ DCHECK_EQ(0x6, instr->Bits(29, 25));
if (instr->Bit(31) == 0) {
if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
V::VisitUnallocated(instr);
@@ -670,7 +670,7 @@ void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
template <typename V>
void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
- DCHECK(instr->Bits(28, 25) == 0x7);
+ DCHECK_EQ(0x7, instr->Bits(28, 25));
if (instr->Bit(31) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
@@ -748,7 +748,7 @@ void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
template <typename V>
void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) {
- DCHECK(instr->Bits(28, 25) == 0xF);
+ DCHECK_EQ(0xF, instr->Bits(28, 25));
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 0180797215..5f372eadd2 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -5,7 +5,6 @@
#include "src/api.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
@@ -17,6 +16,77 @@ namespace internal {
#define __ masm()->
+namespace {
+
+void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
+ int dst_offset, const CPURegList& reg_list,
+ const Register& temp0, const Register& temp1,
+ int src_offset = 0) {
+ DCHECK_EQ(reg_list.Count() % 2, 0);
+ UseScratchRegisterScope temps(masm);
+ CPURegList copy_to_input = reg_list;
+ int reg_size = reg_list.RegisterSizeInBytes();
+ DCHECK_EQ(temp0.SizeInBytes(), reg_size);
+ DCHECK_EQ(temp1.SizeInBytes(), reg_size);
+
+ // Compute some temporary addresses to avoid having the macro assembler set
+ // up a temp with an offset for accesses out of the range of the addressing
+ // mode.
+ Register src = temps.AcquireX();
+ masm->Add(src, masm->StackPointer(), src_offset);
+ masm->Add(dst, dst, dst_offset);
+
+ // Write reg_list into the frame pointed to by dst.
+ for (int i = 0; i < reg_list.Count(); i += 2) {
+ masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
+
+ CPURegister reg0 = copy_to_input.PopLowestIndex();
+ CPURegister reg1 = copy_to_input.PopLowestIndex();
+ int offset0 = reg0.code() * reg_size;
+ int offset1 = reg1.code() * reg_size;
+
+ // Pair up adjacent stores, otherwise write them separately.
+ if (offset1 == offset0 + reg_size) {
+ masm->Stp(temp0, temp1, MemOperand(dst, offset0));
+ } else {
+ masm->Str(temp0, MemOperand(dst, offset0));
+ masm->Str(temp1, MemOperand(dst, offset1));
+ }
+ }
+ masm->Sub(dst, dst, dst_offset);
+}
+
+void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
+ const Register& src_base, int src_offset) {
+ DCHECK_EQ(reg_list.Count() % 2, 0);
+ UseScratchRegisterScope temps(masm);
+ CPURegList restore_list = reg_list;
+ int reg_size = restore_list.RegisterSizeInBytes();
+
+ // Compute a temporary addresses to avoid having the macro assembler set
+ // up a temp with an offset for accesses out of the range of the addressing
+ // mode.
+ Register src = temps.AcquireX();
+ masm->Add(src, src_base, src_offset);
+
+ // Restore every register in restore_list from src.
+ while (!restore_list.IsEmpty()) {
+ CPURegister reg0 = restore_list.PopLowestIndex();
+ CPURegister reg1 = restore_list.PopLowestIndex();
+ int offset0 = reg0.code() * reg_size;
+ int offset1 = reg1.code() * reg_size;
+
+ // Pair up adjacent loads, otherwise read them separately.
+ if (offset1 == offset0 + reg_size) {
+ masm->Ldp(reg0, reg1, MemOperand(src, offset0));
+ } else {
+ masm->Ldr(reg0, MemOperand(src, offset0));
+ masm->Ldr(reg1, MemOperand(src, offset1));
+ }
+ }
+}
+} // namespace
+
void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
@@ -28,17 +98,23 @@ void Deoptimizer::TableEntryGenerator::Generate() {
CPURegList saved_double_registers(
CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Default()->allocatable_double_codes_mask());
+ DCHECK_EQ(saved_double_registers.Count() % 2, 0);
__ PushCPURegList(saved_double_registers);
- // Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kVRegister, kSRegSizeInBits,
RegisterConfiguration::Default()->allocatable_float_codes_mask());
+ DCHECK_EQ(saved_float_registers.Count() % 4, 0);
__ PushCPURegList(saved_float_registers);
- // We save all the registers expcept jssp, sp and lr.
+ // We save all the registers except sp, lr and the masm scratches.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
+ saved_registers.Remove(ip0);
+ saved_registers.Remove(ip1);
+ // TODO(arm): padding here can be replaced with jssp/x28 when allocatable.
+ saved_registers.Combine(padreg);
saved_registers.Combine(fp);
+ DCHECK_EQ(saved_registers.Count() % 2, 0);
__ PushCPURegList(saved_registers);
__ Mov(x3, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
@@ -64,18 +140,24 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
- // Compute the fp-to-sp delta, and correct one word for bailout id.
+ // Compute the fp-to-sp delta, adding two words for alignment padding and
+ // bailout id.
__ Add(fp_to_sp, __ StackPointer(),
- kSavedRegistersAreaSize + (1 * kPointerSize));
+ kSavedRegistersAreaSize + (2 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
- __ Mov(x0, 0);
- Label context_check;
__ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(x1, &context_check);
+
+ // Ensure we can safely load from below fp.
+ DCHECK_GT(kSavedRegistersAreaSize,
+ -JavaScriptFrameConstants::kFunctionOffset);
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ bind(&context_check);
+
+ // If x1 is a smi, zero x0.
+ __ Tst(x1, kSmiTagMask);
+ __ CzeroX(x0, eq);
+
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
@@ -96,70 +178,47 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
- CPURegList copy_to_input = saved_registers;
- for (int i = 0; i < saved_registers.Count(); i++) {
- __ Peek(x2, i * kPointerSize);
- CPURegister current_reg = copy_to_input.PopLowestIndex();
- int offset = (current_reg.code() * kPointerSize) +
- FrameDescription::registers_offset();
- __ Str(x2, MemOperand(x1, offset));
- }
+ CopyRegListToFrame(masm(), x1, FrameDescription::registers_offset(),
+ saved_registers, x2, x3);
// Copy double registers to the input frame.
- CPURegList copy_double_to_input = saved_double_registers;
- for (int i = 0; i < saved_double_registers.Count(); i++) {
- int src_offset = kDoubleRegistersOffset + (i * kDoubleSize);
- __ Peek(x2, src_offset);
- CPURegister reg = copy_double_to_input.PopLowestIndex();
- int dst_offset = FrameDescription::double_registers_offset() +
- (reg.code() * kDoubleSize);
- __ Str(x2, MemOperand(x1, dst_offset));
- }
+ CopyRegListToFrame(masm(), x1, FrameDescription::double_registers_offset(),
+ saved_double_registers, x2, x3, kDoubleRegistersOffset);
// Copy float registers to the input frame.
- CPURegList copy_float_to_input = saved_float_registers;
- for (int i = 0; i < saved_float_registers.Count(); i++) {
- int src_offset = kFloatRegistersOffset + (i * kFloatSize);
- __ Peek(w2, src_offset);
- CPURegister reg = copy_float_to_input.PopLowestIndex();
- int dst_offset =
- FrameDescription::float_registers_offset() + (reg.code() * kFloatSize);
- __ Str(w2, MemOperand(x1, dst_offset));
- }
+ // TODO(arm): these are the lower 32-bits of the double registers stored
+ // above, so we shouldn't need to store them again.
+ CopyRegListToFrame(masm(), x1, FrameDescription::float_registers_offset(),
+ saved_float_registers, w2, w3, kFloatRegistersOffset);
- // Remove the bailout id and the saved registers from the stack.
- __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
+ // Remove the padding, bailout id and the saved registers from the stack.
+ DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
+ __ Drop(2 + (kSavedRegistersAreaSize / kXRegSize));
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
- __ Add(unwind_limit, unwind_limit, __ StackPointer());
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
- Label pop_loop;
- Label pop_loop_header;
- __ B(&pop_loop_header);
- __ Bind(&pop_loop);
- __ Pop(x4);
- __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
- __ Bind(&pop_loop_header);
- __ Cmp(unwind_limit, __ StackPointer());
- __ B(ne, &pop_loop);
+ __ SlotAddress(x1, 0);
+ __ Lsr(unwind_limit, unwind_limit, kPointerSizeLog2);
+ __ Mov(x5, unwind_limit);
+ __ CopyDoubleWords(x3, x1, x5);
+ __ Drop(unwind_limit);
// Compute the output frame in the deoptimizer.
- __ Push(x0); // Preserve deoptimizer object across call.
-
+ __ Push(padreg, x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
- __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+ __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
__ Ldr(__ StackPointer(),
MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
@@ -174,43 +233,29 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Bind(&outer_push_loop);
Register current_frame = x2;
- __ Ldr(current_frame, MemOperand(x0, 0));
+ Register frame_size = x3;
+ __ Ldr(current_frame, MemOperand(x0, kPointerSize, PostIndex));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
- __ B(&inner_loop_header);
+ __ Lsr(frame_size, x3, kPointerSizeLog2);
+ __ Claim(frame_size);
- __ Bind(&inner_push_loop);
- __ Sub(x3, x3, kPointerSize);
- __ Add(x6, current_frame, x3);
- __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
- __ Push(x7);
- __ Bind(&inner_loop_header);
- __ Cbnz(x3, &inner_push_loop);
+ __ Add(x7, current_frame, FrameDescription::frame_content_offset());
+ __ SlotAddress(x6, 0);
+ __ CopyDoubleWords(x6, x7, frame_size);
- __ Add(x0, x0, kPointerSize);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
- DCHECK(!saved_double_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
- !saved_double_registers.IncludesAliasOf(fp_zero) &&
- !saved_double_registers.IncludesAliasOf(fp_scratch));
- while (!saved_double_registers.IsEmpty()) {
- const CPURegister reg = saved_double_registers.PopLowestIndex();
- int src_offset = FrameDescription::double_registers_offset() +
- (reg.code() * kDoubleSize);
- __ Ldr(reg, MemOperand(x1, src_offset));
- }
+ RestoreRegList(masm(), saved_double_registers, x1,
+ FrameDescription::double_registers_offset());
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.
- // TODO(all): This code needs to be revisited, We probably don't need to
- // restore all the registers as fullcodegen does not keep live values in
- // registers (note that at least fp must be restored though).
-
// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
@@ -219,19 +264,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
- // We don't need to restore x7 as it will be clobbered later to hold the
- // continuation address.
- Register continuation = x7;
- saved_registers.Remove(continuation);
-
- while (!saved_registers.IsEmpty()) {
- // TODO(all): Look for opportunities to optimize this by using ldp.
- CPURegister current_reg = saved_registers.PopLowestIndex();
- int offset = (current_reg.code() * kPointerSize) +
- FrameDescription::registers_offset();
- __ Ldr(current_reg, MemOperand(last_output_frame, offset));
- }
+ RestoreRegList(masm(), saved_registers, last_output_frame,
+ FrameDescription::registers_offset());
+ Register continuation = x7;
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
@@ -239,37 +275,57 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Br(continuation);
}
-
// Size of an entry of the second level deopt table.
// This is the code size generated by GeneratePrologue for one entry.
-const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
-
+const int Deoptimizer::table_entry_size_ = kInstructionSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());
+ // The address at which the deopt table is entered should be in x16, the first
+ // temp register allocated. We can't assert that the address is in there, but
+ // we can check that it's the first allocated temp. Later, we'll also check
+ // the computed entry_id is in the expected range.
+ Register entry_addr = temps.AcquireX();
Register entry_id = temps.AcquireX();
+ DCHECK(entry_addr.Is(x16));
+ DCHECK(entry_id.Is(x17));
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label done;
{
InstructionAccurateScope scope(masm());
- // The number of entry will never exceed kMaxNumberOfEntries.
- // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
- // a movz instruction to load the entry id.
- DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
-
+ Label start_of_table, end_of_table;
+ __ bind(&start_of_table);
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ movz(entry_id, i);
- __ b(&done);
+ __ b(&end_of_table);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
+ __ bind(&end_of_table);
+
+ // Get the address of the start of the table.
+ DCHECK(is_int21(table_entry_size_ * count()));
+ __ adr(entry_id, &start_of_table);
+
+ // Compute the gap in bytes between the entry address, which should have
+ // been left in entry_addr (x16) by CallForDeoptimization, and the start of
+ // the table.
+ __ sub(entry_id, entry_addr, entry_id);
+
+ // Shift down to obtain the entry_id.
+ DCHECK_EQ(table_entry_size_, kInstructionSize);
+ __ lsr(entry_id, entry_id, kInstructionSizeLog2);
+ }
+
+ __ Push(padreg, entry_id);
+
+ if (__ emit_debug_code()) {
+ // Ensure the entry_id looks sensible, ie. 0 <= entry_id < count().
+ __ Cmp(entry_id, count());
+ __ Check(lo, kOffsetOutOfRange);
}
- __ Bind(&done);
- __ Push(entry_id);
}
bool Deoptimizer::PadTopOfStackRegister() { return true; }
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 13eb77e06d..c9b2c9a4aa 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -547,7 +547,7 @@ void DisassemblingDecoder::VisitUnconditionalBranchToRegister(
case RET: {
mnemonic = "ret";
if (instr->Rn() == kLinkRegCode) {
- form = NULL;
+ form = nullptr;
}
break;
}
@@ -1244,7 +1244,7 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
switch (instr->ImmHint()) {
case NOP: {
mnemonic = "nop";
- form = NULL;
+ form = nullptr;
break;
}
}
@@ -1262,7 +1262,7 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
}
case ISB: {
mnemonic = "isb";
- form = NULL;
+ form = nullptr;
break;
}
}
@@ -1334,8 +1334,8 @@ void DisassemblingDecoder::VisitNEON3Same(Instruction* instr) {
"shadd", "uhadd", "shadd", "uhadd",
"sqadd", "uqadd", "sqadd", "uqadd",
"srhadd", "urhadd", "srhadd", "urhadd",
- NULL, NULL, NULL,
- NULL, // Handled by logical cases above.
+ nullptr, nullptr, nullptr,
+ nullptr, // Handled by logical cases above.
"shsub", "uhsub", "shsub", "uhsub",
"sqsub", "uqsub", "sqsub", "uqsub",
"cmgt", "cmhi", "cmgt", "cmhi",
@@ -1976,8 +1976,8 @@ void DisassemblingDecoder::VisitNEONExtract(Instruction* instr) {
}
void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
- const char* mnemonic = NULL;
- const char* form = NULL;
+ const char* mnemonic = nullptr;
+ const char* form = nullptr;
const char* form_1v = "{'Vt.%1$s}, ['Xns]";
const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]";
const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]";
@@ -2046,7 +2046,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
}
// Work out unallocated encodings.
- bool allocated = (mnemonic != NULL);
+ bool allocated = (mnemonic != nullptr);
switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
case NEON_LD2:
case NEON_LD3:
@@ -2073,8 +2073,8 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex(
Instruction* instr) {
- const char* mnemonic = NULL;
- const char* form = NULL;
+ const char* mnemonic = nullptr;
+ const char* form = nullptr;
const char* form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1";
const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2";
const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3";
@@ -2144,7 +2144,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex(
}
// Work out unallocated encodings.
- bool allocated = (mnemonic != NULL);
+ bool allocated = (mnemonic != nullptr);
switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
case NEON_LD2_post:
case NEON_LD3_post:
@@ -2170,8 +2170,8 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex(
}
void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
- const char* mnemonic = NULL;
- const char* form = NULL;
+ const char* mnemonic = nullptr;
+ const char* form = nullptr;
const char* form_1b = "{'Vt.b}['IVLSLane0], ['Xns]";
const char* form_1h = "{'Vt.h}['IVLSLane1], ['Xns]";
@@ -2294,7 +2294,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
}
// Work out unallocated encodings.
- bool allocated = (mnemonic != NULL);
+ bool allocated = (mnemonic != nullptr);
switch (instr->Mask(NEONLoadStoreSingleStructMask)) {
case NEON_LD1_h:
case NEON_LD2_h:
@@ -2342,8 +2342,8 @@ void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
void DisassemblingDecoder::VisitNEONLoadStoreSingleStructPostIndex(
Instruction* instr) {
- const char* mnemonic = NULL;
- const char* form = NULL;
+ const char* mnemonic = nullptr;
+ const char* form = nullptr;
const char* form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1";
const char* form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2";
@@ -2455,7 +2455,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreSingleStructPostIndex(
}
// Work out unallocated encodings.
- bool allocated = (mnemonic != NULL);
+ bool allocated = (mnemonic != nullptr);
switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
case NEON_LD1_h_post:
case NEON_LD2_h_post:
@@ -3355,10 +3355,10 @@ void DisassemblingDecoder::Format(Instruction* instr, const char* mnemonic,
const char* format) {
// TODO(mcapewel) don't think I can use the instr address here - there needs
// to be a base address too
- DCHECK(mnemonic != NULL);
+ DCHECK_NOT_NULL(mnemonic);
ResetOutput();
Substitute(instr, mnemonic);
- if (format != NULL) {
+ if (format != nullptr) {
buffer_[buffer_pos_++] = ' ';
Substitute(instr, format);
}
@@ -3561,7 +3561,7 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
const char* format) {
- DCHECK(format[0] == 'I');
+ DCHECK_EQ(format[0], 'I');
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
@@ -3572,7 +3572,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
if (!instr->SixtyFourBits()) imm &= UINT64_C(0xffffffff);
AppendToOutput("#0x%" PRIx64, imm);
} else {
- DCHECK(format[5] == 'L');
+ DCHECK_EQ(format[5], 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
@@ -3617,7 +3617,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
return 6;
}
case 'A': { // IAddSub.
- DCHECK(instr->ShiftAddSub() <= 1);
+ DCHECK_LE(instr->ShiftAddSub(), 1);
int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
return 7;
@@ -3795,7 +3795,7 @@ int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
AppendToOutput("#%d", s + 1);
return 5;
} else {
- DCHECK(format[3] == '-');
+ DCHECK_EQ(format[3], '-');
AppendToOutput("#%d", s - r + 1);
return 7;
}
@@ -3816,7 +3816,7 @@ int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
int DisassemblingDecoder::SubstituteLiteralField(Instruction* instr,
const char* format) {
- DCHECK(strncmp(format, "LValue", 6) == 0);
+ DCHECK_EQ(strncmp(format, "LValue", 6), 0);
USE(format);
switch (instr->Mask(LoadLiteralMask)) {
@@ -3858,7 +3858,7 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
int DisassemblingDecoder::SubstituteConditionField(Instruction* instr,
const char* format) {
- DCHECK(format[0] == 'C');
+ DCHECK_EQ(format[0], 'C');
const char* condition_code[] = { "eq", "ne", "hs", "lo",
"mi", "pl", "vs", "vc",
"hi", "ls", "ge", "lt",
@@ -3880,12 +3880,12 @@ int DisassemblingDecoder::SubstituteConditionField(Instruction* instr,
int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr,
const char* format) {
USE(format);
- DCHECK(strncmp(format, "AddrPCRel", 9) == 0);
+ DCHECK_EQ(strncmp(format, "AddrPCRel", 9), 0);
int offset = instr->ImmPCRel();
// Only ADR (AddrPCRelByte) is supported.
- DCHECK(strcmp(format, "AddrPCRelByte") == 0);
+ DCHECK_EQ(strcmp(format, "AddrPCRelByte"), 0);
char sign = '+';
if (offset < 0) {
@@ -3927,8 +3927,8 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
int DisassemblingDecoder::SubstituteExtendField(Instruction* instr,
const char* format) {
- DCHECK(strncmp(format, "Ext", 3) == 0);
- DCHECK(instr->ExtendMode() <= 7);
+ DCHECK_EQ(strncmp(format, "Ext", 3), 0);
+ DCHECK_LE(instr->ExtendMode(), 7);
USE(format);
const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
@@ -3954,7 +3954,7 @@ int DisassemblingDecoder::SubstituteExtendField(Instruction* instr,
int DisassemblingDecoder::SubstituteLSRegOffsetField(Instruction* instr,
const char* format) {
- DCHECK(strncmp(format, "Offsetreg", 9) == 0);
+ DCHECK_EQ(strncmp(format, "Offsetreg", 9), 0);
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
"undefined", "undefined", "sxtw", "sxtx" };
USE(format);
@@ -3983,7 +3983,7 @@ int DisassemblingDecoder::SubstituteLSRegOffsetField(Instruction* instr,
int DisassemblingDecoder::SubstitutePrefetchField(Instruction* instr,
const char* format) {
- DCHECK(format[0] == 'P');
+ DCHECK_EQ(format[0], 'P');
USE(format);
int prefetch_mode = instr->PrefetchMode();
@@ -3998,7 +3998,7 @@ int DisassemblingDecoder::SubstitutePrefetchField(Instruction* instr,
int DisassemblingDecoder::SubstituteBarrierField(Instruction* instr,
const char* format) {
- DCHECK(format[0] == 'M');
+ DCHECK_EQ(format[0], 'M');
USE(format);
static const char* const options[4][4] = {
diff --git a/deps/v8/src/arm64/frame-constants-arm64.cc b/deps/v8/src/arm64/frame-constants-arm64.cc
index 327c0ed188..a37b665d28 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.cc
+++ b/deps/v8/src/arm64/frame-constants-arm64.cc
@@ -24,6 +24,14 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return RoundUp(register_count, 2);
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ // Round the total slot count up to a multiple of two, to make the frame a
+ // multiple of 16 bytes.
+ int slot_count = kFixedSlotCount + register_count;
+ int rounded_slot_count = RoundUp(slot_count, 2);
+ return rounded_slot_count - slot_count;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index d6f106b800..272948a819 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -70,7 +70,7 @@ bool Instruction::IsStore() const {
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
- DCHECK(width <= 64);
+ DCHECK_LE(width, 64);
rotate &= 63;
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
(value >> rotate);
@@ -657,8 +657,8 @@ void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format2) {
DCHECK_NOT_NULL(format0);
formats_[0] = format0;
- formats_[1] = (format1 == NULL) ? formats_[0] : format1;
- formats_[2] = (format2 == NULL) ? formats_[1] : format2;
+ formats_[1] = (format1 == nullptr) ? formats_[0] : format1;
+ formats_[2] = (format2 == nullptr) ? formats_[1] : format2;
}
void NEONFormatDecoder::SetFormatMap(unsigned index,
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 0c59a425cc..0cc3e803d0 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -551,7 +551,7 @@ const Instr kImmExceptionIsDebug = 0xdeb0;
// Parameters are inlined in the code after a debug pseudo-instruction:
// - Debug code.
// - Debug parameters.
-// - Debug message string. This is a NULL-terminated ASCII string, padded to
+// - Debug message string. This is a nullptr-terminated ASCII string, padded to
// kInstructionSize so that subsequent instructions are correctly aligned.
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
// string data.
@@ -642,8 +642,8 @@ class NEONFormatDecoder {
// Set the format mapping for all or individual substitutions.
void SetFormatMaps(const NEONFormatMap* format0,
- const NEONFormatMap* format1 = NULL,
- const NEONFormatMap* format2 = NULL);
+ const NEONFormatMap* format1 = nullptr,
+ const NEONFormatMap* format2 = nullptr);
void SetFormatMap(unsigned index, const NEONFormatMap* format);
// Substitute %s in the input string with the placeholder string for each
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 2ed67ba57c..9fc2adb6f7 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -9,7 +9,7 @@ namespace internal {
Counter::Counter(const char* name, CounterType type)
: count_(0), enabled_(false), type_(type) {
- DCHECK(name != NULL);
+ DCHECK_NOT_NULL(name);
strncpy(name_, name, kCounterNameMaxLength);
}
@@ -96,12 +96,11 @@ static const CounterDescriptor kCounterList[] = {
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stderr), sample_period_(sample_period) {
-
- // Set up the output stream. If datafile is non-NULL, use that file. If it
- // can't be opened, or datafile is NULL, use stderr.
- if (datafile != NULL) {
+ // Set up the output stream. If datafile is non-nullptr, use that file. If it
+ // can't be opened, or datafile is nullptr, use stderr.
+ if (datafile != nullptr) {
output_stream_ = fopen(datafile, "w");
- if (output_stream_ == NULL) {
+ if (output_stream_ == nullptr) {
fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
output_stream_ = stderr;
}
diff --git a/deps/v8/src/arm64/instrument-arm64.h b/deps/v8/src/arm64/instrument-arm64.h
index 02816e943e..8b3d7e6023 100644
--- a/deps/v8/src/arm64/instrument-arm64.h
+++ b/deps/v8/src/arm64/instrument-arm64.h
@@ -52,8 +52,9 @@ class Counter {
class Instrument: public DecoderVisitor {
public:
- explicit Instrument(const char* datafile = NULL,
- uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ explicit Instrument(
+ const char* datafile = nullptr,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
// Declare all Visitor functions.
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 6f0a600aa2..300d42d565 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -58,9 +58,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return x4; }
const Register StoreTransitionDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x5; }
-const Register StringCompareDescriptor::LeftRegister() { return x1; }
-const Register StringCompareDescriptor::RightRegister() { return x0; }
-
const Register ApiGetterDescriptor::HolderRegister() { return x0; }
const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
@@ -222,7 +219,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {x1, x3, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -232,7 +229,7 @@ void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
Register registers[] = {x1, x2, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -242,7 +239,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// x1: function
// x2: allocation site with elements kind
Register registers[] = {x1, x2, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@@ -298,10 +295,10 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- x0, // callee
- x4, // call_data
- x2, // holder
- x1, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
@@ -351,8 +348,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // the value to pass to the generator
- x1, // the JSGeneratorObject to resume
- x2 // the resume mode (tagged)
+ x1 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index c9da9d12d0..9bef2b378b 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -1045,7 +1045,7 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
void MacroAssembler::AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
- DCHECK(sp_alignment >= 16);
+ DCHECK_GE(sp_alignment, 16);
DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
@@ -1173,7 +1173,7 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
- JumpIfSmi(value, NULL, not_smi_label);
+ JumpIfSmi(value, nullptr, not_smi_label);
}
@@ -1206,14 +1206,14 @@ void MacroAssembler::JumpIfEitherSmi(Register value1,
void MacroAssembler::JumpIfEitherNotSmi(Register value1,
Register value2,
Label* not_smi_label) {
- JumpIfBothSmi(value1, value2, NULL, not_smi_label);
+ JumpIfBothSmi(value1, value2, nullptr, not_smi_label);
}
void MacroAssembler::JumpIfBothNotSmi(Register value1,
Register value2,
Label* not_smi_label) {
- JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
+ JumpIfEitherSmi(value1, value2, nullptr, not_smi_label);
}
@@ -1257,7 +1257,7 @@ void TurboAssembler::Push(Smi* smi) {
}
void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
- DCHECK(count >= 0);
+ DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
if (size == 0) {
@@ -1265,7 +1265,7 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
}
if (csp.Is(StackPointer())) {
- DCHECK(size % 16 == 0);
+ DCHECK_EQ(size % 16, 0);
} else {
BumpSystemStackPointer(size);
}
@@ -1312,7 +1312,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
- DCHECK(count >= 0);
+ DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
if (size == 0) {
@@ -1322,7 +1322,7 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
Add(StackPointer(), StackPointer(), size);
if (csp.Is(StackPointer())) {
- DCHECK(size % 16 == 0);
+ DCHECK_EQ(size % 16, 0);
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
@@ -1353,14 +1353,24 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
}
-void TurboAssembler::DropArguments(const Register& count, uint64_t unit_size) {
- Drop(count, unit_size);
+void TurboAssembler::DropArguments(const Register& count,
+ ArgumentsCountMode mode) {
+ if (mode == kCountExcludesReceiver) {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Add(tmp, count, 1);
+ Drop(tmp);
+ } else {
+ Drop(count);
+ }
}
void TurboAssembler::DropSlots(int64_t count, uint64_t unit_size) {
Drop(count, unit_size);
}
+void TurboAssembler::PushArgument(const Register& arg) { Push(arg); }
+
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
@@ -1404,7 +1414,7 @@ void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
- DCHECK(CountSetBits(bit_pattern, bits) > 0);
+ DCHECK_GT(CountSetBits(bit_pattern, bits), 0);
if (CountSetBits(bit_pattern, bits) == 1) {
Tbnz(reg, MaskToBit(bit_pattern), label);
} else {
@@ -1417,7 +1427,7 @@ void TurboAssembler::TestAndBranchIfAllClear(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
- DCHECK(CountSetBits(bit_pattern, bits) > 0);
+ DCHECK_GT(CountSetBits(bit_pattern, bits), 0);
if (CountSetBits(bit_pattern, bits) == 1) {
Tbz(reg, MaskToBit(bit_pattern), label);
} else {
@@ -1447,7 +1457,7 @@ void MacroAssembler::DisableInstrumentation() {
void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
- DCHECK(strlen(marker_name) == 2);
+ DCHECK_EQ(strlen(marker_name), 2);
// We allow only printable characters in the marker names. Unprintable
// characters are reserved for controlling features of the instrumentation.
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index f10ddceab5..5f69f0e1e2 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -9,7 +9,7 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
@@ -53,45 +53,61 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1,
- Register exclusion2,
- Register exclusion3) const {
+ Register exclusion) const {
int bytes = 0;
auto list = kCallerSaved;
- list.Remove(exclusion1, exclusion2, exclusion3);
+ DCHECK_EQ(list.Count() % 2, 0);
+ // We only allow one exclusion register, so if the list is of even length
+ // before exclusions, it must still be afterwards, to maintain alignment.
+ // Therefore, we can ignore the exclusion register in the computation.
+ // However, we leave it in the argument list to mirror the prototype for
+ // Push/PopCallerSaved().
+ USE(exclusion);
bytes += list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
+ DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion) {
int bytes = 0;
auto list = kCallerSaved;
- list.Remove(exclusion1, exclusion2, exclusion3);
+ DCHECK_EQ(list.Count() % 2, 0);
+ if (!exclusion.Is(no_reg)) {
+ // Replace the excluded register with padding to maintain alignment.
+ list.Remove(exclusion);
+ list.Combine(padreg);
+ }
PushCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
+ DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PushCPURegList(kCallerSavedV);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
int bytes = 0;
if (fp_mode == kSaveFPRegs) {
+ DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PopCPURegList(kCallerSavedV);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
auto list = kCallerSaved;
- list.Remove(exclusion1, exclusion2, exclusion3);
+ DCHECK_EQ(list.Count() % 2, 0);
+ if (!exclusion.Is(no_reg)) {
+ // Replace the excluded register with padding to maintain alignment.
+ list.Remove(exclusion);
+ list.Combine(padreg);
+ }
PopCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
@@ -191,7 +207,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports shift <= 4. We want to support exactly the
// same modes here.
- DCHECK(operand.shift_amount() <= 4);
+ DCHECK_LE(operand.shift_amount(), 4);
DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
@@ -255,7 +271,7 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// Iterate through the halfwords. Use movn/movz for the first non-ignored
// halfword, and movk for subsequent halfwords.
- DCHECK((reg_size % 16) == 0);
+ DCHECK_EQ(reg_size % 16, 0);
bool first_mov_done = false;
for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
@@ -528,7 +544,7 @@ void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
}
unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
- DCHECK((reg_size % 8) == 0);
+ DCHECK_EQ(reg_size % 8, 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
@@ -765,7 +781,7 @@ void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports a shift <= 4. We want to support exactly the
// same modes.
- DCHECK(operand.shift_amount() <= 4);
+ DCHECK_LE(operand.shift_amount(), 4);
DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
@@ -876,13 +892,13 @@ void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
return;
}
- DCHECK(hint == kAdrFar);
+ DCHECK_EQ(hint, kAdrFar);
if (label->is_bound()) {
int label_offset = label->pos() - pc_offset();
if (Instruction::IsValidPCRelOffset(label_offset)) {
adr(rd, label);
} else {
- DCHECK(label_offset <= 0);
+ DCHECK_LE(label_offset, 0);
int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
adr(rd, min_adr_offset);
Add(rd, rd, label_offset - min_adr_offset);
@@ -1015,12 +1031,12 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm,
// If the comparison sets the v flag, the input was the smallest value
// representable by rm, and the mathematical result of abs(rm) is not
// representable using two's complement.
- if ((is_not_representable != NULL) && (is_representable != NULL)) {
+ if ((is_not_representable != nullptr) && (is_representable != nullptr)) {
B(is_not_representable, vs);
B(is_representable);
- } else if (is_not_representable != NULL) {
+ } else if (is_not_representable != nullptr) {
B(is_not_representable, vs);
- } else if (is_representable != NULL) {
+ } else if (is_representable != nullptr) {
B(is_representable, vc);
}
}
@@ -1313,7 +1329,7 @@ void TurboAssembler::PushPreamble(Operand total_size) {
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- DCHECK((total_size.ImmediateValue() % 16) == 0);
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
@@ -1334,7 +1350,7 @@ void TurboAssembler::PopPostamble(Operand total_size) {
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- DCHECK((total_size.ImmediateValue() % 16) == 0);
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
@@ -1356,7 +1372,7 @@ void TurboAssembler::PopPostamble(int count, int size) {
void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
- DCHECK(offset.ImmediateValue() >= 0);
+ DCHECK_GE(offset.ImmediateValue(), 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
@@ -1368,7 +1384,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
- DCHECK(offset.ImmediateValue() >= 0);
+ DCHECK_GE(offset.ImmediateValue(), 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
@@ -1482,6 +1498,85 @@ void TurboAssembler::AssertCspAligned() {
}
}
+void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) {
+ DCHECK(!src.IsZero());
+ UseScratchRegisterScope scope(this);
+ Register dst_reg = scope.AcquireX();
+ SlotAddress(dst_reg, dst);
+ SlotAddress(src, src);
+ CopyDoubleWords(dst_reg, src, slot_count);
+}
+
+void TurboAssembler::CopySlots(Register dst, Register src,
+ Register slot_count) {
+ DCHECK(!dst.IsZero() && !src.IsZero());
+ SlotAddress(dst, dst);
+ SlotAddress(src, src);
+ CopyDoubleWords(dst, src, slot_count);
+}
+
+void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
+ CopyDoubleWordsMode mode) {
+ DCHECK(!AreAliased(dst, src, count));
+
+ if (emit_debug_code()) {
+ Register pointer1 = dst;
+ Register pointer2 = src;
+ if (mode == kSrcLessThanDst) {
+ pointer1 = src;
+ pointer2 = dst;
+ }
+ // Copy requires pointer1 < pointer2 || (pointer1 - pointer2) >= count.
+ Label pointer1_below_pointer2;
+ Subs(pointer1, pointer1, pointer2);
+ B(lt, &pointer1_below_pointer2);
+ Cmp(pointer1, count);
+ Check(ge, kOffsetOutOfRange);
+ Bind(&pointer1_below_pointer2);
+ Add(pointer1, pointer1, pointer2);
+ }
+ static_assert(kPointerSize == kDRegSize,
+ "pointers must be the same size as doubles");
+
+ int direction = (mode == kDstLessThanSrc) ? 1 : -1;
+ UseScratchRegisterScope scope(this);
+ VRegister temp0 = scope.AcquireD();
+ VRegister temp1 = scope.AcquireD();
+
+ Label pairs, loop, done;
+
+ Tbz(count, 0, &pairs);
+ Ldr(temp0, MemOperand(src, direction * kPointerSize, PostIndex));
+ Sub(count, count, 1);
+ Str(temp0, MemOperand(dst, direction * kPointerSize, PostIndex));
+
+ Bind(&pairs);
+ if (mode == kSrcLessThanDst) {
+ // Adjust pointers for post-index ldp/stp with negative offset:
+ Sub(dst, dst, kPointerSize);
+ Sub(src, src, kPointerSize);
+ }
+ Bind(&loop);
+ Cbz(count, &done);
+ Ldp(temp0, temp1, MemOperand(src, 2 * direction * kPointerSize, PostIndex));
+ Sub(count, count, 2);
+ Stp(temp0, temp1, MemOperand(dst, 2 * direction * kPointerSize, PostIndex));
+ B(&loop);
+
+ // TODO(all): large copies may benefit from using temporary Q registers
+ // to copy four double words per iteration.
+
+ Bind(&done);
+}
+
+void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
+ Add(dst, StackPointer(), slot_offset << kPointerSizeLog2);
+}
+
+void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
+ Add(dst, StackPointer(), Operand(slot_offset, LSL, kPointerSizeLog2));
+}
+
void TurboAssembler::AssertFPCRState(Register fpcr) {
if (emit_debug_code()) {
Label unexpected_mode, done;
@@ -1537,32 +1632,6 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- Ldr(dst,
- FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
- : AccessorPair::kSetterOffset;
- Ldr(dst, FieldMemOperand(dst, offset));
-}
-
-void MacroAssembler::InNewSpace(Register object,
- Condition cond,
- Label* branch) {
- DCHECK(cond == eq || cond == ne);
- UseScratchRegisterScope temps(this);
- CheckPageFlag(object, temps.AcquireSameSizeAs(object),
- MemoryChunk::kIsInNewSpaceMask, cond, branch);
-}
-
void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1571,7 +1640,6 @@ void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
}
}
-
void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1626,12 +1694,9 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- // Load instance type
- Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
-
Label do_check;
- // Check if JSGeneratorObject
- Cmp(temp, JS_GENERATOR_OBJECT_TYPE);
+ // Load instance type and check if JSGeneratorObject
+ CompareInstanceType(temp, temp, JS_GENERATOR_OBJECT_TYPE);
B(eq, &do_check);
// Check if JSAsyncGeneratorObject
@@ -1642,9 +1707,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
Check(eq, kOperandIsNotAGeneratorObject);
}
-void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
- Register scratch) {
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
Label done_checking;
AssertNotSmi(object);
JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
@@ -1785,14 +1851,14 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
// BUILTIN_FP_CALL: double f(double)
// BUILTIN_FP_INT_CALL: double f(double, int)
if (num_of_double_args > 0) {
- DCHECK(num_of_reg_args <= 1);
- DCHECK((num_of_double_args + num_of_reg_args) <= 2);
+ DCHECK_LE(num_of_reg_args, 1);
+ DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
}
// We rely on the frame alignment being 16 bytes, which means we never need
// to align the CSP by an unknown number of bytes and we always know the delta
// between the stack pointer and the frame pointer.
- DCHECK(ActivationFrameAlignment() == 16);
+ DCHECK_EQ(ActivationFrameAlignment(), 16);
// If the stack pointer is not csp, we need to derive an aligned csp from the
// current stack pointer.
@@ -1931,7 +1997,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
- DCHECK(((imm >> 48) & 0xffff) == 0);
+ DCHECK_EQ((imm >> 48) & 0xffff, 0);
movz(temp, (imm >> 0) & 0xffff, 0);
movk(temp, (imm >> 16) & 0xffff, 16);
movk(temp, (imm >> 32) & 0xffff, 32);
@@ -1958,6 +2024,38 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
#endif
}
+void TurboAssembler::Call(ExternalReference target) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ // Immediate is in charge of setting the relocation mode to
+ // EXTERNAL_REFERENCE.
+ Ldr(temp, Immediate(target));
+ Call(temp);
+}
+
+void TurboAssembler::CallForDeoptimization(Address target,
+ RelocInfo::Mode rmode) {
+ DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
+
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ // Deoptimisation table entries require the call address to be in x16, in
+ // order to compute the entry id.
+ DCHECK(temp.Is(x16));
+ Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
+ Blr(temp);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
+#endif
+}
+
int TurboAssembler::CallSize(Register target) {
USE(target);
return kInstructionSize;
@@ -2046,19 +2144,6 @@ void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
}
}
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
- Label* not_unique_name) {
- STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
- // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
- // continue
- // } else {
- // goto not_unique_name
- // }
- Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
- Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
- B(ne, not_unique_name);
-}
-
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
@@ -2196,38 +2281,31 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
- CompareAndBranch(x4, Operand(0), eq, &skip_hook);
+ Cbz(x4, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- if (expected.is_reg()) {
- SmiTag(expected.reg());
- Push(expected.reg());
- }
- if (actual.is_reg()) {
- SmiTag(actual.reg());
- Push(actual.reg());
- }
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
+
+ Register expected_reg = padreg;
+ Register actual_reg = padreg;
+ if (expected.is_reg()) expected_reg = expected.reg();
+ if (actual.is_reg()) actual_reg = actual.reg();
+ if (!new_target.is_valid()) new_target = padreg;
+
+ // Save values on stack.
+ SmiTag(expected_reg);
+ SmiTag(actual_reg);
+ Push(expected_reg, actual_reg, new_target, fun);
+
+ PushArgument(fun);
CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
- if (actual.is_reg()) {
- Pop(actual.reg());
- SmiUntag(actual.reg());
- }
- if (expected.is_reg()) {
- Pop(expected.reg());
- SmiUntag(expected.reg());
- }
+
+ // Restore values from stack.
+ Pop(fun, new_target, actual_reg, expected_reg);
+ SmiUntag(actual_reg);
+ SmiUntag(expected_reg);
}
- bind(&skip_hook);
+ Bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -2373,9 +2451,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// If we fell through then inline version didn't succeed - call stub instead.
Push(lr, double_input);
- auto stub = new (zone) DoubleToIStub(nullptr, jssp, result, 0,
- true, // is_truncating
- true); // skip_fastpath
+ auto stub = new (zone) DoubleToIStub(nullptr, result);
// DoubleToIStub preserves any registers it needs to clobber.
CallStubDelayed(stub);
@@ -2425,13 +2501,21 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// csp[1] : type
// csp[0] : for alignment
} else {
+ DCHECK_EQ(type, StackFrame::CONSTRUCT);
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
- Push(lr, fp, type_reg);
- Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
- // jssp[2] : lr
- // jssp[1] : fp
- // jssp[0] : type
+
+ // Users of this frame type push a context pointer after the type field,
+ // so do it here to keep the stack pointer aligned.
+ Push(lr, fp, type_reg, cp);
+
+ // The context pointer isn't part of the fixed frame, so add an extra slot
+ // to account for it.
+ Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ // jssp[3] : lr
+ // jssp[2] : fp
+ // jssp[1] : type
+ // jssp[0] : cp
}
}
@@ -2462,7 +2546,7 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
CPURegList saved_fp_regs = kCallerSavedV;
- DCHECK(saved_fp_regs.Count() % 2 == 0);
+ DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
int offset = ExitFrameConstants::kLastExitFrameField;
while (!saved_fp_regs.IsEmpty()) {
@@ -2554,8 +2638,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Leave the current exit frame.
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
- const Register& scratch,
- bool restore_context) {
+ const Register& scratch) {
DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
@@ -2563,11 +2646,9 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
}
// Restore the context pointer from the top frame.
- if (restore_context) {
- Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
- isolate())));
- Ldr(cp, MemOperand(scratch));
- }
+ Mov(scratch,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ Ldr(cp, MemOperand(scratch));
if (emit_debug_code()) {
// Also emit debug code to clear the cp in the top frame.
@@ -2593,7 +2674,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value != 0);
+ DCHECK_NE(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Mov(scratch2, ExternalReference(counter));
Ldr(scratch1.W(), MemOperand(scratch2));
@@ -2644,23 +2725,11 @@ void MacroAssembler::CompareObjectType(Register object,
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
- Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Cmp(type_reg, type);
}
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- Mov(value, Operand(cell));
- Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
-}
-
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
// Load the map's "bit field 2".
Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
@@ -2668,19 +2737,6 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
DecodeField<Map::ElementsKindBits>(result);
}
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp, Register temp2) {
- Label done, loop;
- Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
- Bind(&loop);
- JumpIfSmi(result, &done);
- CompareObjectType(result, temp, temp2, MAP_TYPE);
- B(ne, &done);
- Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
- B(&loop);
- Bind(&done);
-}
-
void MacroAssembler::CompareRoot(const Register& obj,
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
@@ -2751,44 +2807,6 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address, Register scratch1,
- SaveFPRegsMode fp_mode) {
- DCHECK(!AreAliased(object, address, scratch1));
- Label done, store_buffer_overflow;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, &ok);
- Abort(kRememberedSetPointerInNewSpace);
- bind(&ok);
- }
- UseScratchRegisterScope temps(this);
- Register scratch2 = temps.AcquireX();
-
- // Load store buffer top.
- Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
- Ldr(scratch1, MemOperand(scratch2));
- // Store pointer to buffer and increment buffer top.
- Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
- // Write back new top of buffer.
- Str(scratch1, MemOperand(scratch2));
- // Call stub on end of buffer.
- // Check for end of buffer.
- Tst(scratch1, StoreBuffer::kStoreBufferMask);
- B(eq, &store_buffer_overflow);
- Ret();
-
- Bind(&store_buffer_overflow);
- Push(lr);
- StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
- CallStub(&store_buffer_overflow_stub);
- Pop(lr);
-
- Bind(&done);
- Ret();
-}
-
-
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
DCHECK_GE(num_unsaved, 0);
@@ -2812,7 +2830,7 @@ void MacroAssembler::PushSafepointRegisters() {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
- DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+ DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6ffcffff);
// Safepoint registers are stored contiguously on the stack, but not all the
// registers are saved. The following registers are excluded:
@@ -2909,7 +2927,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
CPURegList regs(lr);
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -2921,7 +2939,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
CPURegList regs(lr);
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -3014,13 +3032,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (lr_status == kLRHasNotBeenSaved) {
Push(padreg, lr);
}
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
if (lr_status == kLRHasNotBeenSaved) {
Pop(lr, padreg);
}
@@ -3040,120 +3052,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-
-void MacroAssembler::AssertHasValidColor(const Register& reg) {
- if (emit_debug_code()) {
- // The bit sequence is backward. The first character in the string
- // represents the least significant bit.
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label color_is_valid;
- Tbnz(reg, 0, &color_is_valid);
- Tbz(reg, 1, &color_is_valid);
- Abort(kUnexpectedColorFound);
- Bind(&color_is_valid);
- }
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register shift_reg) {
- DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
- DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
- // addr_reg is divided into fields:
- // |63 page base 20|19 high 8|7 shift 3|2 0|
- // 'high' gives the index of the cell holding color bits for the object.
- // 'shift' gives the offset in the cell for this object's color.
- const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
- Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
- Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
- // bitmap_reg:
- // |63 page base 20|19 zeros 15|14 high 3|2 0|
- Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register shift_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- // See mark-compact.h for color definitions.
- DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
-
- GetMarkBits(object, bitmap_scratch, shift_scratch);
- Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- // Shift the bitmap down to get the color of the object in bits [1:0].
- Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
-
- AssertHasValidColor(bitmap_scratch);
-
- // These bit sequences are backwards. The first character in the string
- // represents the least significant bit.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
-
- // Check for the color.
- if (first_bit == 0) {
- // Checking for white.
- DCHECK(second_bit == 0);
- // We only need to test the first bit.
- Tbz(bitmap_scratch, 0, has_color);
- } else {
- Label other_color;
- // Checking for grey or black.
- Tbz(bitmap_scratch, 0, &other_color);
- if (second_bit == 0) {
- Tbz(bitmap_scratch, 1, has_color);
- } else {
- Tbnz(bitmap_scratch, 1, has_color);
- }
- Bind(&other_color);
- }
-
- // Fall through if it does not have the right color.
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
-}
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register shift_scratch, Register load_scratch,
- Register length_scratch,
- Label* value_is_white) {
- DCHECK(!AreAliased(
- value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
-
- // These bit sequences are backwards. The first character in the string
- // represents the least significant bit.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
-
- GetMarkBits(value, bitmap_scratch, shift_scratch);
- Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Lsr(load_scratch, load_scratch, shift_scratch);
-
- AssertHasValidColor(load_scratch);
-
- // If the value is black or grey we don't need to do anything.
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- Tbz(load_scratch, 0, value_is_white);
-}
-
void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
if (emit_debug_code()) {
Check(cond, reason);
@@ -3399,7 +3297,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
dc32(arg_pattern_list); // kPrintfArgPatternListOffset
}
#else
- Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
+ Call(ExternalReference::printf_function(isolate()));
#endif
}
@@ -3541,7 +3439,7 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
}
InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
- : reg_(NoReg), smi_check_delta_(0), smi_check_(NULL) {
+ : reg_(NoReg), smi_check_delta_(0), smi_check_(nullptr) {
InstructionSequence* inline_data = InstructionSequence::At(info);
DCHECK(inline_data->IsInlineData());
if (inline_data->IsInlineData()) {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 170266ca9d..035558fd81 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -659,6 +659,37 @@ class TurboAssembler : public Assembler {
// Emits a runtime assert that the CSP is aligned.
void AssertCspAligned();
+ // Copy slot_count stack slots from the stack offset specified by src to
+ // the stack offset specified by dst. The offsets and count are expressed in
+ // slot-sized units. Offset dst must be less than src, or the gap between
+ // them must be greater than or equal to slot_count, otherwise the result is
+ // unpredictable. The function may corrupt its register arguments. The
+ // registers must not alias each other.
+ void CopySlots(int dst, Register src, Register slot_count);
+ void CopySlots(Register dst, Register src, Register slot_count);
+
+ // Copy count double words from the address in register src to the address
+ // in register dst. There are two modes for this function:
+ // 1) Address dst must be less than src, or the gap between them must be
+ // greater than or equal to count double words, otherwise the result is
+ // unpredictable. This is the default mode.
+ // 2) Address src must be less than dst, or the gap between them must be
+ // greater than or equal to count double words, otherwise the result is
+ // undpredictable. In this mode, src and dst specify the last (highest)
+ // address of the regions to copy from and to.
+ // The case where src == dst is not supported.
+ // The function may corrupt its register arguments. The registers must not
+ // alias each other.
+ enum CopyDoubleWordsMode { kDstLessThanSrc, kSrcLessThanDst };
+ void CopyDoubleWords(Register dst, Register src, Register count,
+ CopyDoubleWordsMode mode = kDstLessThanSrc);
+
+ // Calculate the address of a double word-sized slot at slot_offset from the
+ // stack pointer, and write it to dst. Positive slot_offsets are at addresses
+ // greater than sp, with slot zero at sp.
+ void SlotAddress(Register dst, int slot_offset);
+ void SlotAddress(Register dst, Register slot_offset);
+
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
// Helper function for double immediate.
@@ -681,12 +712,15 @@ class TurboAssembler : public Assembler {
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
// Drop arguments from stack without actually accessing memory.
- // This will currently drop 'count' arguments of the given size from the
- // stack.
+ // This will currently drop 'count' arguments from the stack.
+ // We assume the size of the arguments is the pointer size.
+ // An optional mode argument is passed, which can indicate we need to
+ // explicitly add the receiver to the count.
// TODO(arm64): Update this to round up the number of bytes dropped to
// a multiple of 16, so that we can remove jssp.
+ enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
inline void DropArguments(const Register& count,
- uint64_t unit_size = kXRegSize);
+ ArgumentsCountMode mode = kCountIncludesReceiver);
// Drop slots from stack without actually accessing memory.
// This will currently drop 'count' slots of the given size from the stack.
@@ -694,6 +728,10 @@ class TurboAssembler : public Assembler {
// a multiple of 16, so that we can remove jssp.
inline void DropSlots(int64_t count, uint64_t unit_size = kXRegSize);
+ // Push a single argument to the stack.
+ // TODO(arm64): Update this to push a padding slot above the argument.
+ inline void PushArgument(const Register& arg);
+
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()).
//
@@ -802,20 +840,15 @@ class TurboAssembler : public Assembler {
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg) const;
+ Register exclusion) const;
// Push caller saved registers on the stack, and return the number of bytes
// stack pointer is adjusted.
- int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
+
// Restore caller saved registers from the stack, and return the number of
// bytes stack pointer is adjusted.
- int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
// Move an immediate into register dst, and return an Operand object for use
// with a subsequent instruction that accepts a shift. The value moved into
@@ -853,7 +886,7 @@ class TurboAssembler : public Assembler {
inline void Brk(int code);
inline void JumpIfSmi(Register value, Label* smi_label,
- Label* not_smi_label = NULL);
+ Label* not_smi_label = nullptr);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, Register rn);
@@ -884,10 +917,9 @@ class TurboAssembler : public Assembler {
void Call(Label* target);
void Call(Address target, RelocInfo::Mode rmode);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
+ void Call(ExternalReference target);
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
- Call(target, rmode);
- }
+ void CallForDeoptimization(Address target, RelocInfo::Mode rmode);
// For every Call variant, there is a matching CallSize function that returns
// the size (in bytes) of the call sequence.
@@ -1197,7 +1229,8 @@ class TurboAssembler : public Assembler {
// If rm is the minimum representable value, the result is not representable.
// Handlers for each case can be specified using the relevant labels.
void Abs(const Register& rd, const Register& rm,
- Label* is_not_representable = NULL, Label* is_representable = NULL);
+ Label* is_not_representable = nullptr,
+ Label* is_representable = nullptr);
inline void Cls(const Register& rd, const Register& rn);
inline void Cneg(const Register& rd, const Register& rn, Condition cond);
@@ -1240,7 +1273,7 @@ class TurboAssembler : public Assembler {
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
// have mixed types. The format string (x0) should not be included.
- void CallPrintf(int arg_count = 0, const CPURegister* args = NULL);
+ void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
private:
bool has_frame_ = false;
@@ -1634,18 +1667,6 @@ class MacroAssembler : public TurboAssembler {
// csp must be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
// register.
inline void ClaimBySMI(const Register& count_smi,
@@ -1712,11 +1733,6 @@ class MacroAssembler : public TurboAssembler {
static int SafepointRegisterStackIndex(int reg_code);
- void LoadInstanceDescriptors(Register map,
- Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template<typename Field>
void DecodeField(Register dst, Register src) {
static const int shift = Field::kShift;
@@ -1741,14 +1757,12 @@ class MacroAssembler : public TurboAssembler {
inline void SmiTagAndPush(Register src1, Register src2);
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
- inline void JumpIfBothSmi(Register value1,
- Register value2,
+ inline void JumpIfBothSmi(Register value1, Register value2,
Label* both_smi_label,
- Label* not_smi_label = NULL);
- inline void JumpIfEitherSmi(Register value1,
- Register value2,
+ Label* not_smi_label = nullptr);
+ inline void JumpIfEitherSmi(Register value1, Register value2,
Label* either_smi_label,
- Label* not_smi_label = NULL);
+ Label* not_smi_label = nullptr);
inline void JumpIfEitherNotSmi(Register value1,
Register value2,
Label* not_smi_label);
@@ -1778,7 +1792,7 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
- void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+ void AssertUndefinedOrAllocationSite(Register object);
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
@@ -1792,17 +1806,13 @@ class MacroAssembler : public TurboAssembler {
// On output the Z flag is set if the operation was successful.
void TryRepresentDoubleAsInt64(Register as_int, VRegister value,
VRegister scratch_d,
- Label* on_successful_conversion = NULL,
- Label* on_failed_conversion = NULL) {
+ Label* on_successful_conversion = nullptr,
+ Label* on_failed_conversion = nullptr) {
DCHECK(as_int.Is64Bits());
TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
on_failed_conversion);
}
- // ---- String Utilities ----
-
- void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
-
// ---- Calling / Jumping helpers ----
void CallStub(CodeStub* stub);
@@ -1867,11 +1877,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Support functions.
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done, and |temp2| its instance type.
- void GetMapConstructor(Register result, Register map, Register temp,
- Register temp2);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -1906,12 +1911,6 @@ class MacroAssembler : public TurboAssembler {
Register type_reg,
InstanceType type);
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the given
- // miss label if the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
// Load the elements kind field from a map, and return it in the result
// register.
void LoadElementsKindFromMap(Register result, Register map);
@@ -1993,9 +1992,7 @@ class MacroAssembler : public TurboAssembler {
// * The stack pointer is reset to jssp.
//
// The stack pointer must be csp on entry.
- void LeaveExitFrame(bool save_doubles,
- const Register& scratch,
- bool restore_context);
+ void LeaveExitFrame(bool save_doubles, const Register& scratch);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
@@ -2013,13 +2010,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Garbage collector support (GC).
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch1,
- SaveFPRegsMode save_fp);
-
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -2028,18 +2018,6 @@ class MacroAssembler : public TurboAssembler {
void CheckPageFlag(const Register& object, const Register& scratch, int mask,
Condition cc, Label* condition_met);
- // Check if object is in new space and jump accordingly.
- // Register 'object' is preserved.
- void JumpIfNotInNewSpace(Register object,
- Label* branch) {
- InNewSpace(object, ne, branch);
- }
-
- void JumpIfInNewSpace(Register object,
- Label* branch) {
- InNewSpace(object, eq, branch);
- }
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -2060,36 +2038,6 @@ class MacroAssembler : public TurboAssembler {
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Register scratch3, Register scratch4, Label* value_is_white);
-
- // Helper for finding the mark bits for an address.
- // Note that the behaviour slightly differs from other architectures.
- // On exit:
- // - addr_reg is unchanged.
- // - The bitmap register points at the word with the mark bits.
- // - The shift register contains the index of the first color bit for this
- // object in the bitmap.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register shift_reg);
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black);
-
-
// ---------------------------------------------------------------------------
// Debugging.
@@ -2158,8 +2106,8 @@ class MacroAssembler : public TurboAssembler {
// On output the Z flag is set if the operation was successful.
void TryRepresentDoubleAsInt(Register as_int, VRegister value,
VRegister scratch_d,
- Label* on_successful_conversion = NULL,
- Label* on_failed_conversion = NULL);
+ Label* on_successful_conversion = nullptr,
+ Label* on_failed_conversion = nullptr);
public:
// Far branches resolving.
@@ -2278,9 +2226,7 @@ class InlineSmiCheckInfo {
public:
explicit InlineSmiCheckInfo(Address info);
- bool HasSmiCheck() const {
- return smi_check_ != NULL;
- }
+ bool HasSmiCheck() const { return smi_check_ != nullptr; }
const Register& SmiRegister() const {
return reg_;
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 9881bae26b..c01741c31e 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -72,9 +72,7 @@ void Simulator::TraceSim(const char* format, ...) {
}
}
-
-const Instruction* Simulator::kEndOfSimAddress = NULL;
-
+const Instruction* Simulator::kEndOfSimAddress = nullptr;
void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
int width = msb - lsb + 1;
@@ -82,7 +80,7 @@ void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
bits <<= lsb;
uint32_t mask = ((1 << width) - 1) << lsb;
- DCHECK((mask & write_ignore_mask_) == 0);
+ DCHECK_EQ(mask & write_ignore_mask_, 0);
value_ = (value_ & ~mask) | (bits & mask);
}
@@ -111,10 +109,10 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator* Simulator::current(Isolate* isolate) {
Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- DCHECK(isolate_data != NULL);
+ DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
+ if (sim == nullptr) {
if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) {
sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate);
} else {
@@ -333,7 +331,7 @@ uintptr_t Simulator::PopAddress() {
intptr_t current_sp = sp();
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
uintptr_t address = *stack_slot;
- DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
+ DCHECK_LT(sizeof(uintptr_t), 2 * kXRegSize);
set_sp(current_sp + 2 * kXRegSize);
return address;
}
@@ -352,11 +350,10 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return stack_limit_ + 1024;
}
-
Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Isolate* isolate, FILE* stream)
: decoder_(decoder),
- last_debugger_input_(NULL),
+ last_debugger_input_(nullptr),
log_parameters_(NO_PARAM),
isolate_(isolate) {
// Setup the decoder.
@@ -376,12 +373,11 @@ Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
}
}
-
Simulator::Simulator()
- : decoder_(NULL),
- last_debugger_input_(NULL),
+ : decoder_(nullptr),
+ last_debugger_input_(nullptr),
log_parameters_(NO_PARAM),
- isolate_(NULL) {
+ isolate_(nullptr) {
Init(stdout);
CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
}
@@ -414,7 +410,7 @@ void Simulator::ResetState() {
fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
// Reset registers to 0.
- pc_ = NULL;
+ pc_ = nullptr;
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
set_xreg(i, 0xbadbeef);
}
@@ -473,7 +469,7 @@ class Redirection {
public:
Redirection(Isolate* isolate, void* external_function,
ExternalReference::Type type)
- : external_function_(external_function), type_(type), next_(NULL) {
+ : external_function_(external_function), type_(type), next_(nullptr) {
redirect_call_.SetInstructionBits(
HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
next_ = isolate->simulator_redirection();
@@ -493,9 +489,9 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) {
- DCHECK_EQ(current->type(), type);
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
return current;
}
}
@@ -2219,7 +2215,7 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode) {
if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
- DCHECK(offset != 0);
+ DCHECK_NE(offset, 0);
uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
set_reg(addr_reg, address + offset, Reg31IsStackPointer);
}
@@ -2286,6 +2282,8 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
} else {
if (is_exclusive) {
unsigned rs = instr->Rs();
+ DCHECK_NE(rs, rt);
+ DCHECK_NE(rs, rn);
if (local_monitor_.NotifyStoreExcl(address,
get_transaction_size(access_size)) &&
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
@@ -2570,7 +2568,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
case SMULH_x:
- DCHECK(instr->Ra() == kZeroRegCode);
+ DCHECK_EQ(instr->Ra(), kZeroRegCode);
result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
break;
default: UNIMPLEMENTED();
@@ -3216,12 +3214,12 @@ void Simulator::Debug() {
PrintInstructionsAt(pc_, 1);
// Read the command line.
char* line = ReadLine("sim> ");
- if (line == NULL) {
+ if (line == nullptr) {
break;
} else {
// Repeat last command by default.
char* last_input = last_debugger_input();
- if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
+ if (strcmp(line, "\n") == 0 && (last_input != nullptr)) {
DeleteArray(line);
line = last_input;
} else {
@@ -3341,8 +3339,8 @@ void Simulator::Debug() {
// stack / mem ----------------------------------------------------------
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int64_t* cur = NULL;
- int64_t* end = NULL;
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@@ -3504,7 +3502,7 @@ void Simulator::VisitException(Instruction* instr) {
// We are going to break, so printing something is not an issue in
// terms of speed.
if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
- if (message != NULL) {
+ if (message != nullptr) {
PrintF(stream_,
"# %sDebugger hit %d: %s%s%s\n",
clr_debug_number,
@@ -3539,7 +3537,7 @@ void Simulator::VisitException(Instruction* instr) {
break;
default:
// We don't support a one-shot LOG_DISASM.
- DCHECK((parameters & LOG_DISASM) == 0);
+ DCHECK_EQ(parameters & LOG_DISASM, 0);
// Don't print information that is already being traced.
parameters &= ~log_parameters();
// Print the requested information.
@@ -3554,7 +3552,7 @@ void Simulator::VisitException(Instruction* instr) {
pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
// - Verify that the unreachable marker is present.
DCHECK(pc_->Mask(ExceptionMask) == HLT);
- DCHECK(pc_->ImmException() == kImmExceptionIsUnreachable);
+ DCHECK_EQ(pc_->ImmException(), kImmExceptionIsUnreachable);
// - Skip past the unreachable marker.
set_pc(pc_->following());
@@ -4341,7 +4339,7 @@ void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
SimVRegister& rd = vreg(instr->Rd());
SimVRegister& rn = vreg(instr->Rn());
- ByElementOp Op = NULL;
+ ByElementOp Op = nullptr;
int rm_reg = instr->Rm();
int index = (instr->NEONH() << 1) | instr->NEONL();
@@ -5275,7 +5273,7 @@ void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
SimVRegister& rd = vreg(instr->Rd());
SimVRegister& rn = vreg(instr->Rn());
- ByElementOp Op = NULL;
+ ByElementOp Op = nullptr;
int rm_reg = instr->Rm();
int index = (instr->NEONH() << 1) | instr->NEONL();
@@ -5717,8 +5715,8 @@ void Simulator::DoPrintf(Instruction* instr) {
instr + kPrintfArgPatternListOffset,
sizeof(arg_pattern_list));
- DCHECK(arg_count <= kPrintfMaxArgCount);
- DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
+ DCHECK_LE(arg_count, kPrintfMaxArgCount);
+ DCHECK_EQ(arg_pattern_list >> (kPrintfArgPatternBits * arg_count), 0);
// We need to call the host printf function with a set of arguments defined by
// arg_pattern_list. Because we don't know the types and sizes of the
@@ -5730,7 +5728,7 @@ void Simulator::DoPrintf(Instruction* instr) {
// Leave enough space for one extra character per expected argument (plus the
// '\0' termination).
const char * format_base = reg<const char *>(0);
- DCHECK(format_base != NULL);
+ DCHECK_NOT_NULL(format_base);
size_t length = strlen(format_base) + 1;
char * const format = new char[length + arg_count];
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index c82bdd8c7a..0411c0bc96 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -690,8 +690,7 @@ class Simulator : public DecoderVisitor {
}
explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
- Isolate* isolate = NULL,
- FILE* stream = stderr);
+ Isolate* isolate = nullptr, FILE* stream = stderr);
Simulator();
~Simulator();
@@ -1700,9 +1699,9 @@ class Simulator : public DecoderVisitor {
LogicVRegister Table(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& ind, bool zero_out_of_bounds,
const LogicVRegister* tab1,
- const LogicVRegister* tab2 = NULL,
- const LogicVRegister* tab3 = NULL,
- const LogicVRegister* tab4 = NULL);
+ const LogicVRegister* tab2 = nullptr,
+ const LogicVRegister* tab3 = nullptr,
+ const LogicVRegister* tab4 = nullptr);
LogicVRegister tbl(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& tab, const LogicVRegister& ind);
LogicVRegister tbl(VectorFormat vform, LogicVRegister dst,
@@ -2206,7 +2205,7 @@ class Simulator : public DecoderVisitor {
// functions, or to save and restore it when entering and leaving generated
// code.
void AssertSupportedFPCR() {
- DCHECK(fpcr().FZ() == 0); // No flush-to-zero support.
+ DCHECK_EQ(fpcr().FZ(), 0); // No flush-to-zero support.
DCHECK(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
// The simulator does not support half-precision operations so fpcr().AHP()
diff --git a/deps/v8/src/arm64/simulator-logic-arm64.cc b/deps/v8/src/arm64/simulator-logic-arm64.cc
index 44a31c4097..03d1d37df9 100644
--- a/deps/v8/src/arm64/simulator-logic-arm64.cc
+++ b/deps/v8/src/arm64/simulator-logic-arm64.cc
@@ -2159,7 +2159,7 @@ LogicVRegister Simulator::Table(VectorFormat vform, LogicVRegister dst,
uint64_t j = ind.Uint(vform, i);
int tab_idx = static_cast<int>(j >> 4);
int j_idx = static_cast<int>(j & 15);
- if ((tab_idx < 4) && (tab[tab_idx] != NULL)) {
+ if ((tab_idx < 4) && (tab[tab_idx] != nullptr)) {
result[i] = tab[tab_idx]->Uint(kFormat16B, j_idx);
}
}
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
index 26369d9875..8ef8420001 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -105,10 +105,10 @@ int CountTrailingZeros(uint64_t value, int width) {
int CountSetBits(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
- return static_cast<int>(base::bits::CountPopulation64(value));
+ return static_cast<int>(base::bits::CountPopulation(value));
}
- return static_cast<int>(base::bits::CountPopulation32(
- static_cast<uint32_t>(value & 0xfffffffff)));
+ return static_cast<int>(
+ base::bits::CountPopulation(static_cast<uint32_t>(value & 0xfffffffff)));
}
int LowestSetBitPosition(uint64_t value) {
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 97da2c2af2..5e3d0d0c2a 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -145,12 +145,11 @@ void ReportCompilationSuccess(Handle<Script> script, int position,
}
// Hook to report failed execution of {AsmJs::CompileAsmViaWasm} phase.
-void ReportCompilationFailure(Handle<Script> script, int position,
+void ReportCompilationFailure(ParseInfo* parse_info, int position,
const char* reason) {
if (FLAG_suppress_asm_messages) return;
- Vector<const char> text = CStrVector(reason);
- Report(script, position, text, MessageTemplate::kAsmJsInvalid,
- v8::Isolate::kMessageWarning);
+ parse_info->pending_error_handler()->ReportWarningAt(
+ position, position, MessageTemplate::kAsmJsInvalid, reason);
}
// Hook to report successful execution of {AsmJs::InstantiateAsmWasm} phase.
@@ -187,69 +186,70 @@ void ReportInstantiationFailure(Handle<Script> script, int position,
class AsmJsCompilationJob final : public CompilationJob {
public:
explicit AsmJsCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
- Isolate* isolate)
- : CompilationJob(isolate, parse_info, &compilation_info_, "AsmJs"),
- zone_(isolate->allocator(), ZONE_NAME),
- compilation_info_(&zone_, isolate, parse_info, literal),
+ AccountingAllocator* allocator)
+ : CompilationJob(parse_info->stack_limit(), parse_info,
+ &compilation_info_, "AsmJs", State::kReadyToExecute),
+ allocator_(allocator),
+ zone_(allocator, ZONE_NAME),
+ compilation_info_(&zone_, parse_info, literal),
module_(nullptr),
asm_offsets_(nullptr),
translate_time_(0),
- compile_time_(0) {}
+ compile_time_(0),
+ module_source_size_(0),
+ translate_time_micro_(0),
+ translate_zone_size_(0) {}
protected:
- Status PrepareJobImpl() final;
+ Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
- Status FinalizeJobImpl() final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
private:
+ void RecordHistograms(Isolate* isolate);
+
+ AccountingAllocator* allocator_;
Zone zone_;
CompilationInfo compilation_info_;
wasm::ZoneBuffer* module_;
wasm::ZoneBuffer* asm_offsets_;
wasm::AsmJsParser::StdlibSet stdlib_uses_;
- double translate_time_; // Time (milliseconds) taken to execute step [1].
- double compile_time_; // Time (milliseconds) taken to execute step [2].
+ double translate_time_; // Time (milliseconds) taken to execute step [1].
+ double compile_time_; // Time (milliseconds) taken to execute step [2].
+ int module_source_size_; // Module source size in bytes.
+ int64_t translate_time_micro_; // Time (microseconds) taken to translate.
+ size_t translate_zone_size_;
DISALLOW_COPY_AND_ASSIGN(AsmJsCompilationJob);
};
-CompilationJob::Status AsmJsCompilationJob::PrepareJobImpl() {
+CompilationJob::Status AsmJsCompilationJob::PrepareJobImpl(Isolate* isolate) {
+ UNREACHABLE(); // Prepare should always be skipped.
return SUCCEEDED;
}
CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
// Step 1: Translate asm.js module to WebAssembly module.
- HistogramTimerScope translate_time_scope(
- compilation_info()->isolate()->counters()->asm_wasm_translation_time());
size_t compile_zone_start = compilation_info()->zone()->allocation_size();
base::ElapsedTimer translate_timer;
translate_timer.Start();
Zone* compile_zone = compilation_info()->zone();
- Zone translate_zone(compilation_info()->isolate()->allocator(), ZONE_NAME);
+ Zone translate_zone(allocator_, ZONE_NAME);
Utf16CharacterStream* stream = parse_info()->character_stream();
base::Optional<AllowHandleDereference> allow_deref;
if (stream->can_access_heap()) {
- DCHECK(
- ThreadId::Current().Equals(compilation_info()->isolate()->thread_id()));
allow_deref.emplace();
}
stream->Seek(compilation_info()->literal()->start_position());
wasm::AsmJsParser parser(&translate_zone, stack_limit(), stream);
if (!parser.Run()) {
- // TODO(rmcilroy): Temporarily allow heap access here until we have a
- // mechanism for delaying pending messages.
- DCHECK(
- ThreadId::Current().Equals(compilation_info()->isolate()->thread_id()));
- AllowHeapAllocation allow_allocation;
- AllowHandleAllocation allow_handles;
- allow_deref.emplace();
-
- DCHECK(!compilation_info()->isolate()->has_pending_exception());
- ReportCompilationFailure(parse_info()->script(), parser.failure_location(),
- parser.failure_message());
+ if (!FLAG_suppress_asm_messages) {
+ ReportCompilationFailure(parse_info(), parser.failure_location(),
+ parser.failure_message());
+ }
return FAILED;
}
module_ = new (compile_zone) wasm::ZoneBuffer(compile_zone);
@@ -260,50 +260,32 @@ CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
size_t compile_zone_size =
compilation_info()->zone()->allocation_size() - compile_zone_start;
- size_t translate_zone_size = translate_zone.allocation_size();
- compilation_info()
- ->isolate()
- ->counters()
- ->asm_wasm_translation_peak_memory_bytes()
- ->AddSample(static_cast<int>(translate_zone_size));
+ translate_zone_size_ = translate_zone.allocation_size();
translate_time_ = translate_timer.Elapsed().InMillisecondsF();
- int module_size = compilation_info()->literal()->end_position() -
- compilation_info()->literal()->start_position();
- compilation_info()->isolate()->counters()->asm_module_size_bytes()->AddSample(
- module_size);
- int64_t translate_time_micro = translate_timer.Elapsed().InMicroseconds();
- int translation_throughput =
- translate_time_micro != 0
- ? static_cast<int>(static_cast<int64_t>(module_size) /
- translate_time_micro)
- : 0;
- compilation_info()
- ->isolate()
- ->counters()
- ->asm_wasm_translation_throughput()
- ->AddSample(translation_throughput);
+ translate_time_micro_ = translate_timer.Elapsed().InMicroseconds();
+ module_source_size_ = compilation_info()->literal()->end_position() -
+ compilation_info()->literal()->start_position();
if (FLAG_trace_asm_parser) {
PrintF(
"[asm.js translation successful: time=%0.3fms, "
"translate_zone=%" PRIuS "KB, compile_zone+=%" PRIuS "KB]\n",
- translate_time_, translate_zone_size / KB, compile_zone_size / KB);
+ translate_time_, translate_zone_size_ / KB, compile_zone_size / KB);
}
return SUCCEEDED;
}
-CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl() {
+CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(Isolate* isolate) {
// Step 2: Compile and decode the WebAssembly module.
base::ElapsedTimer compile_timer;
compile_timer.Start();
Handle<HeapNumber> uses_bitset =
- compilation_info()->isolate()->factory()->NewHeapNumberFromBits(
- stdlib_uses_.ToIntegral());
+ isolate->factory()->NewHeapNumberFromBits(stdlib_uses_.ToIntegral());
- wasm::ErrorThrower thrower(compilation_info()->isolate(), "AsmJs::Compile");
+ wasm::ErrorThrower thrower(isolate, "AsmJs::Compile");
Handle<WasmModuleObject> compiled =
SyncCompileTranslatedAsmJs(
- compilation_info()->isolate(), &thrower,
+ isolate, &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
parse_info()->script(),
Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
@@ -313,24 +295,41 @@ CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl() {
// The result is a compiled module and serialized standard library uses.
Handle<FixedArray> result =
- compilation_info()->isolate()->factory()->NewFixedArray(
- kWasmDataEntryCount);
+ isolate->factory()->NewFixedArray(kWasmDataEntryCount);
result->set(kWasmDataCompiledModule, *compiled);
result->set(kWasmDataUsesBitSet, *uses_bitset);
compilation_info()->SetAsmWasmData(result);
- compilation_info()->SetCode(
- BUILTIN_CODE(compilation_info()->isolate(), InstantiateAsmJs));
+ compilation_info()->SetCode(BUILTIN_CODE(isolate, InstantiateAsmJs));
+ RecordHistograms(isolate);
ReportCompilationSuccess(parse_info()->script(),
compilation_info()->literal()->position(),
translate_time_, compile_time_, module_->size());
return SUCCEEDED;
}
+void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) {
+ Counters* counters = isolate->counters();
+ counters->asm_wasm_translation_time()->AddSample(
+ static_cast<int>(translate_time_micro_));
+ counters->asm_wasm_translation_peak_memory_bytes()->AddSample(
+ static_cast<int>(translate_zone_size_));
+ counters->asm_module_size_bytes()->AddSample(module_source_size_);
+ // translation_throughput is not exact (assumes MB == 1000000). But that is ok
+ // since the metric is stored in buckets that lose some precision anyways.
+ int translation_throughput =
+ translate_time_micro_ != 0
+ ? static_cast<int>(static_cast<int64_t>(module_source_size_) /
+ translate_time_micro_)
+ : 0;
+ counters->asm_wasm_translation_throughput()->AddSample(
+ translation_throughput);
+}
+
CompilationJob* AsmJs::NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
- Isolate* isolate) {
- return new AsmJsCompilationJob(parse_info, literal, isolate);
+ AccountingAllocator* allocator) {
+ return new AsmJsCompilationJob(parse_info, literal, allocator);
}
MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index a0f2ba30c5..9d2ab752ed 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -12,6 +12,7 @@
namespace v8 {
namespace internal {
+class AccountingAllocator;
class CompilationInfo;
class CompilationJob;
class FunctionLiteral;
@@ -24,7 +25,7 @@ class AsmJs {
public:
static CompilationJob* NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
- Isolate* isolate);
+ AccountingAllocator* allocator);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
Handle<SharedFunctionInfo>,
Handle<FixedArray> wasm_data,
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index d0eb1050f6..c38c52220d 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -230,7 +230,7 @@ wasm::AsmJsParser::VarInfo* AsmJsParser::GetVarInfo(
}
uint32_t AsmJsParser::VarIndex(VarInfo* info) {
- DCHECK(info->kind == VarKind::kGlobal);
+ DCHECK_EQ(info->kind, VarKind::kGlobal);
return info->index + static_cast<uint32_t>(global_imports_.size());
}
@@ -292,6 +292,9 @@ void AsmJsParser::Begin(AsmJsScanner::token_t label) {
void AsmJsParser::Loop(AsmJsScanner::token_t label) {
BareBegin(BlockKind::kLoop, label);
+ int position = static_cast<int>(scanner_.Position());
+ DCHECK_EQ(position, scanner_.Position());
+ current_function_builder_->AddAsmWasmOffset(position, position);
current_function_builder_->EmitWithU8(kExprLoop, kLocalVoid);
}
@@ -308,7 +311,7 @@ void AsmJsParser::BareBegin(BlockKind kind, AsmJsScanner::token_t label) {
}
void AsmJsParser::BareEnd() {
- DCHECK(block_stack_.size() > 0);
+ DCHECK_GT(block_stack_.size(), 0);
block_stack_.pop_back();
}
@@ -797,7 +800,7 @@ void AsmJsParser::ValidateFunction() {
}
function_info = GetVarInfo(function_name);
if (function_info->type->IsA(AsmType::None())) {
- DCHECK(function_info->kind == VarKind::kFunction);
+ DCHECK_EQ(function_info->kind, VarKind::kFunction);
function_info->type = function_type;
} else if (!function_type->IsA(function_info->type)) {
// TODO(bradnelson): Should IsExactly be used here?
@@ -1164,18 +1167,18 @@ void AsmJsParser::DoStatement() {
RECURSE(ValidateStatement());
EXPECT_TOKEN(TOK(while));
End();
- // }
+ // } // end c
EXPECT_TOKEN('(');
RECURSE(Expression(AsmType::Int()));
- // if (CONDITION) break a;
+ // if (!CONDITION) break a;
current_function_builder_->Emit(kExprI32Eqz);
current_function_builder_->EmitWithU8(kExprBrIf, 1);
// continue b;
current_function_builder_->EmitWithU8(kExprBr, 0);
EXPECT_TOKEN(')');
- // }
+ // } // end b
End();
- // }
+ // } // end a
End();
SkipSemicolon();
}
@@ -1195,13 +1198,16 @@ void AsmJsParser::ForStatement() {
// a: block {
Begin(pending_label_);
// b: loop {
- Loop(pending_label_);
+ Loop();
+ // c: block { // but treated like loop so continue works
+ BareBegin(BlockKind::kLoop, pending_label_);
+ current_function_builder_->EmitWithU8(kExprBlock, kLocalVoid);
pending_label_ = 0;
if (!Peek(';')) {
- // if (CONDITION) break a;
+ // if (!CONDITION) break a;
RECURSE(Expression(AsmType::Int()));
current_function_builder_->Emit(kExprI32Eqz);
- current_function_builder_->EmitWithU8(kExprBrIf, 1);
+ current_function_builder_->EmitWithU8(kExprBrIf, 2);
}
EXPECT_TOKEN(';');
// Race past INCREMENT
@@ -1210,18 +1216,21 @@ void AsmJsParser::ForStatement() {
EXPECT_TOKEN(')');
// BODY
RECURSE(ValidateStatement());
- // INCREMENT
+ // } // end c
+ End();
+ // INCREMENT
size_t end_position = scanner_.Position();
scanner_.Seek(increment_position);
if (!Peek(')')) {
RECURSE(Expression(nullptr));
// NOTE: No explicit drop because below break is an implicit drop.
}
+ // continue b;
current_function_builder_->EmitWithU8(kExprBr, 0);
scanner_.Seek(end_position);
- // }
+ // } // end b
End();
- // }
+ // } // end a
End();
}
@@ -1392,11 +1401,10 @@ AsmType* AsmJsParser::NumericLiteral() {
if (uvalue <= 0x7fffffff) {
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
return AsmType::FixNum();
- } else if (uvalue <= 0xffffffff) {
+ } else {
+ DCHECK_LE(uvalue, 0xffffffff);
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
return AsmType::Unsigned();
- } else {
- FAILn("Integer numeric literal out of range.");
}
} else {
FAILn("Expected numeric literal.");
@@ -2195,7 +2203,7 @@ AsmType* AsmJsParser::ValidateCall() {
if (return_type->IsA(AsmType::Float())) {
FAILn("Imported function can't be called as float");
}
- DCHECK(function_info->import != nullptr);
+ DCHECK_NOT_NULL(function_info->import);
// TODO(bradnelson): Factor out.
uint32_t index;
auto it = function_info->import->cache.find(sig);
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index fe9cabf9d6..910fe37546 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -255,15 +255,15 @@ void AsmJsScanner::ConsumeIdentifier(uc32 ch) {
}
}
if (preceding_token_ == '.') {
- CHECK(global_count_ < kMaxIdentifierCount);
+ CHECK_LT(global_count_, kMaxIdentifierCount);
token_ = kGlobalsStart + global_count_++;
property_names_[identifier_string_] = token_;
} else if (in_local_scope_) {
- CHECK(local_names_.size() < kMaxIdentifierCount);
+ CHECK_LT(local_names_.size(), kMaxIdentifierCount);
token_ = kLocalsStart - static_cast<token_t>(local_names_.size());
local_names_[identifier_string_] = token_;
} else {
- CHECK(global_count_ < kMaxIdentifierCount);
+ CHECK_LT(global_count_, kMaxIdentifierCount);
token_ = kGlobalsStart + global_count_++;
global_names_[identifier_string_] = token_;
}
diff --git a/deps/v8/src/asmjs/asm-types.cc b/deps/v8/src/asmjs/asm-types.cc
index d864324b76..5ec242769b 100644
--- a/deps/v8/src/asmjs/asm-types.cc
+++ b/deps/v8/src/asmjs/asm-types.cc
@@ -228,8 +228,8 @@ class AsmMinMaxType final : public AsmCallableType {
} // namespace
AsmType* AsmType::MinMaxType(Zone* zone, AsmType* dest, AsmType* src) {
- DCHECK(dest->AsValueType() != nullptr);
- DCHECK(src->AsValueType() != nullptr);
+ DCHECK_NOT_NULL(dest->AsValueType());
+ DCHECK_NOT_NULL(src->AsValueType());
auto* MinMax = new (zone) AsmMinMaxType(dest, src);
return reinterpret_cast<AsmType*>(MinMax);
}
@@ -300,7 +300,7 @@ bool AsmOverloadedFunctionType::CanBeInvokedWith(
}
void AsmOverloadedFunctionType::AddOverload(AsmType* overload) {
- DCHECK(overload->AsCallableType() != nullptr);
+ DCHECK_NOT_NULL(overload->AsCallableType());
overloads_.push_back(overload);
}
diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h
index e1275c6e91..061d465def 100644
--- a/deps/v8/src/asmjs/asm-types.h
+++ b/deps/v8/src/asmjs/asm-types.h
@@ -83,8 +83,8 @@ class AsmValueType {
}
bitset_t Bitset() const {
- DCHECK((reinterpret_cast<uintptr_t>(this) & kAsmValueTypeTag) ==
- kAsmValueTypeTag);
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(this) & kAsmValueTypeTag,
+ kAsmValueTypeTag);
return static_cast<bitset_t>(reinterpret_cast<uintptr_t>(this) &
~kAsmValueTypeTag);
}
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index b36c494129..90d7ac3ff8 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -163,9 +163,9 @@ AssemblerBase::AssemblerBase(IsolateData isolate_data, void* buffer,
predictable_code_size_(false),
constant_pool_available_(false),
jump_optimization_info_(nullptr) {
- own_buffer_ = buffer == NULL;
+ own_buffer_ = buffer == nullptr;
if (buffer_size == 0) buffer_size = kMinimalBufferSize;
- DCHECK(buffer_size > 0);
+ DCHECK_GT(buffer_size, 0);
if (own_buffer_) buffer = NewArray<byte>(buffer_size);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
@@ -313,24 +313,20 @@ void RelocInfo::set_global_handle(Isolate* isolate, Address address,
set_embedded_address(isolate, address, icache_flush_mode);
}
-Address RelocInfo::global_handle() const {
- DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
- return embedded_address();
+Address RelocInfo::wasm_call_address() const {
+ DCHECK_EQ(rmode_, WASM_CALL);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
-void RelocInfo::update_wasm_global_reference(
- Isolate* isolate, Address old_base, Address new_base,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmGlobalReference(rmode_));
- Address updated_reference;
- DCHECK_LE(old_base, wasm_global_reference());
- updated_reference = new_base + (wasm_global_reference() - old_base);
- DCHECK_LE(new_base, updated_reference);
- set_embedded_address(isolate, updated_reference, icache_flush_mode);
+void RelocInfo::set_wasm_call_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, WASM_CALL);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ icache_flush_mode);
}
-Address RelocInfo::wasm_global_reference() const {
- DCHECK(IsWasmGlobalReference(rmode_));
+Address RelocInfo::global_handle() const {
+ DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
return embedded_address();
}
@@ -354,10 +350,10 @@ void RelocInfo::update_wasm_function_table_size_reference(
void RelocInfo::set_target_address(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
Assembler::set_target_address_at(isolate, pc_, host_, target,
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTarget(rmode_)) {
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
@@ -372,7 +368,7 @@ uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
WriteMode(RelocInfo::PC_JUMP);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
- DCHECK(pc_jump > 0);
+ DCHECK_GT(pc_jump, 0);
// Write kChunkBits size chunks of the pc_jump.
for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
byte b = pc_jump & kChunkMask;
@@ -428,7 +424,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
byte* begin_pos = pos_;
#endif
DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
- DCHECK(rinfo->pc() - last_pc_ >= 0);
+ DCHECK_GE(rinfo->pc() - last_pc_, 0);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
@@ -437,7 +433,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteShortTaggedPC(pc_delta, kCodeTargetTag);
- DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
+ DCHECK_LE(begin_pos - pos_, RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::DEOPT_REASON) {
DCHECK(rinfo->data() < (1 << kBitsPerByte));
WriteShortTaggedPC(pc_delta, kLocatableTag);
@@ -448,15 +444,14 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
- RelocInfo::IsDeoptPosition(rmode) ||
- RelocInfo::IsWasmProtectedLanding(rmode)) {
+ RelocInfo::IsDeoptPosition(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
last_pc_ = rinfo->pc();
last_mode_ = rmode;
#ifdef DEBUG
- DCHECK(begin_pos - pos_ <= kMaxSize);
+ DCHECK_LE(begin_pos - pos_, kMaxSize);
#endif
}
@@ -536,7 +531,7 @@ void RelocIterator::next() {
return;
}
} else {
- DCHECK(tag == kDefaultTag);
+ DCHECK_EQ(tag, kDefaultTag);
RelocInfo::Mode rmode = GetMode();
if (rmode == RelocInfo::PC_JUMP) {
AdvanceReadLongPCJump();
@@ -551,8 +546,7 @@ void RelocIterator::next() {
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDeoptId(rmode) ||
- RelocInfo::IsDeoptPosition(rmode) ||
- RelocInfo::IsWasmProtectedLanding(rmode)) {
+ RelocInfo::IsDeoptPosition(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@@ -571,6 +565,7 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
+ rinfo_.constant_pool_ = code->constant_pool();
// Relocation info is read backwards.
pos_ = code->relocation_start() + code->relocation_size();
end_ = code->relocation_start();
@@ -592,6 +587,21 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
next();
}
+RelocIterator::RelocIterator(Vector<byte> instructions,
+ Vector<const byte> reloc_info, Address const_pool,
+ int mode_mask) {
+ rinfo_.pc_ = instructions.start();
+ rinfo_.data_ = 0;
+ rinfo_.constant_pool_ = const_pool;
+ // Relocation info is read backwards.
+ pos_ = reloc_info.start() + reloc_info.size();
+ end_ = reloc_info.start();
+ done_ = false;
+ mode_mask_ = mode_mask;
+ if (mode_mask_ == 0) pos_ = end_;
+ next();
+}
+
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -643,14 +653,14 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "veneer pool";
case WASM_CONTEXT_REFERENCE:
return "wasm context reference";
- case WASM_GLOBAL_REFERENCE:
- return "wasm global value reference";
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
return "wasm function table size reference";
- case WASM_PROTECTED_INSTRUCTION_LANDING:
- return "wasm protected instruction landing";
case WASM_GLOBAL_HANDLE:
return "global handle";
+ case WASM_CALL:
+ return "internal wasm call";
+ case JS_TO_WASM_CALL:
+ return "js to wasm call";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@@ -679,8 +689,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
Code* code = Code::GetCodeFromTargetAddress(target_address());
os << " (" << Code::Kind2String(code->kind()) << ") ("
<< static_cast<const void*>(target_address()) << ")";
- } else if (IsRuntimeEntry(rmode_) &&
- isolate->deoptimizer_data() != NULL) {
+ } else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
isolate, target_address(), Deoptimizer::EAGER);
@@ -704,7 +713,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
- CHECK(addr != NULL);
+ CHECK_NOT_NULL(addr);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = isolate->FindCodeObject(addr);
@@ -731,11 +740,10 @@ void RelocInfo::Verify(Isolate* isolate) {
case CONST_POOL:
case VENEER_POOL:
case WASM_CONTEXT_REFERENCE:
- case WASM_GLOBAL_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case WASM_GLOBAL_HANDLE:
- case WASM_PROTECTED_INSTRUCTION_LANDING:
- // TODO(eholk): make sure the protected instruction is in range.
+ case WASM_CALL:
+ case JS_TO_WASM_CALL:
case NONE32:
case NONE64:
break;
@@ -773,10 +781,9 @@ ExternalReference::ExternalReference(Address address, Isolate* isolate)
: address_(Redirect(isolate, address)) {}
ExternalReference::ExternalReference(
- ApiFunction* fun,
- Type type = ExternalReference::BUILTIN_CALL,
- Isolate* isolate = NULL)
- : address_(Redirect(isolate, fun->address(), type)) {}
+ ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL,
+ Isolate* isolate = nullptr)
+ : address_(Redirect(isolate, fun->address(), type)) {}
ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
: ExternalReference(Runtime::FunctionForId(id), isolate) {}
@@ -853,7 +860,7 @@ ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
void ExternalReference::set_redirector(
Isolate* isolate, ExternalReferenceRedirector* redirector) {
// We can't stack them.
- DCHECK(isolate->external_reference_redirector() == NULL);
+ DCHECK_NULL(isolate->external_reference_redirector());
isolate->set_external_reference_redirector(
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
@@ -862,6 +869,10 @@ ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
return ExternalReference(isolate->stress_deopt_count_address());
}
+ExternalReference ExternalReference::force_slow_path(Isolate* isolate) {
+ return ExternalReference(isolate->force_slow_path_address());
+}
+
ExternalReference ExternalReference::new_deoptimizer_function(
Isolate* isolate) {
return ExternalReference(
@@ -1028,7 +1039,7 @@ ExternalReference ExternalReference::wasm_clear_thread_in_wasm_flag(
static void f64_mod_wrapper(double* param0, double* param1) {
WriteDoubleValue(param0,
- modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
+ Modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
}
ExternalReference ExternalReference::f64_mod_wrapper_function(
@@ -1077,11 +1088,6 @@ ExternalReference ExternalReference::address_of_regexp_stack_limit(
return ExternalReference(isolate->regexp_stack()->limit_address());
}
-ExternalReference ExternalReference::address_of_regexp_dotall_flag(
- Isolate* isolate) {
- return ExternalReference(&FLAG_harmony_regexp_dotall);
-}
-
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address());
}
@@ -1397,6 +1403,10 @@ ExternalReference ExternalReference::libc_memset_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memset)));
}
+ExternalReference ExternalReference::printf_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(std::printf)));
+}
+
template <typename SubjectChar, typename PatternChar>
ExternalReference ExternalReference::search_string_raw(Isolate* isolate) {
auto f = SearchStringRaw<SubjectChar, PatternChar>;
@@ -1415,6 +1425,13 @@ ExternalReference ExternalReference::get_or_create_hash_raw(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
+ExternalReference ExternalReference::jsreceiver_create_identity_hash(
+ Isolate* isolate) {
+ typedef Smi* (*CreateIdentityHash)(Isolate * isolate, JSReceiver * key);
+ CreateIdentityHash f = JSReceiver::CreateIdentityHash;
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
+}
+
ExternalReference
ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
Isolate* isolate) {
@@ -1542,6 +1559,8 @@ double power_double_double(double x, double y) {
return Pow(x, y);
}
+double modulo_double_double(double x, double y) { return Modulo(x, y); }
+
ExternalReference ExternalReference::power_double_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@@ -1551,9 +1570,8 @@ ExternalReference ExternalReference::power_double_double_function(
ExternalReference ExternalReference::mod_two_doubles_operation(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(modulo),
- BUILTIN_FP_FP_CALL));
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(modulo_double_double), BUILTIN_FP_FP_CALL));
}
ExternalReference ExternalReference::debug_last_step_action_address(
@@ -1685,7 +1703,7 @@ void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
const int entry_size = ConstantPoolEntry::size(type);
int base = emitted_label_.pos();
- DCHECK(base > 0);
+ DCHECK_GT(base, 0);
int shared_end = static_cast<int>(shared_entries.size());
std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
for (int i = 0; i < shared_end; i++, shared_it++) {
@@ -1713,7 +1731,7 @@ void ConstantPoolBuilder::EmitGroup(Assembler* assm,
std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
const int entry_size = ConstantPoolEntry::size(type);
int base = emitted_label_.pos();
- DCHECK(base > 0);
+ DCHECK_GT(base, 0);
int begin;
int end;
@@ -1842,7 +1860,7 @@ void SetUpJSCallerSavedCodeData() {
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0) caller_saved_codes[i++] = r;
- DCHECK(i == kNumJSCallerSaved);
+ DCHECK_EQ(i, kNumJSCallerSaved);
}
int JSCallerSavedCode(int n) {
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 2ebe88d534..1e8365dcee 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -54,6 +54,9 @@ namespace v8 {
class ApiFunction;
namespace internal {
+namespace wasm {
+class WasmCode;
+}
// Forward declarations.
class Isolate;
@@ -287,12 +290,12 @@ class CpuFeatures : public AllStatic {
static inline bool SupportsWasmSimd128();
static inline unsigned icache_line_size() {
- DCHECK(icache_line_size_ != 0);
+ DCHECK_NE(icache_line_size_, 0);
return icache_line_size_;
}
static inline unsigned dcache_line_size() {
- DCHECK(dcache_line_size_ != 0);
+ DCHECK_NE(dcache_line_size_, 0);
return dcache_line_size_;
}
@@ -364,10 +367,10 @@ class RelocInfo {
// wasm code. Everything after WASM_CONTEXT_REFERENCE (inclusive) is not
// GC'ed.
WASM_CONTEXT_REFERENCE,
- WASM_GLOBAL_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
- WASM_PROTECTED_INSTRUCTION_LANDING,
WASM_GLOBAL_HANDLE,
+ WASM_CALL,
+ JS_TO_WASM_CALL,
RUNTIME_ENTRY,
COMMENT,
@@ -423,6 +426,7 @@ class RelocInfo {
static inline bool IsRuntimeEntry(Mode mode) {
return mode == RUNTIME_ENTRY;
}
+ static inline bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
@@ -460,9 +464,6 @@ class RelocInfo {
static inline bool IsWasmContextReference(Mode mode) {
return mode == WASM_CONTEXT_REFERENCE;
}
- static inline bool IsWasmGlobalReference(Mode mode) {
- return mode == WASM_GLOBAL_REFERENCE;
- }
static inline bool IsWasmFunctionTableSizeReference(Mode mode) {
return mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
@@ -473,11 +474,8 @@ class RelocInfo {
return IsWasmFunctionTableSizeReference(mode);
}
static inline bool IsWasmPtrReference(Mode mode) {
- return mode == WASM_CONTEXT_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
- mode == WASM_GLOBAL_HANDLE;
- }
- static inline bool IsWasmProtectedLanding(Mode mode) {
- return mode == WASM_PROTECTED_INSTRUCTION_LANDING;
+ return mode == WASM_CONTEXT_REFERENCE || mode == WASM_GLOBAL_HANDLE ||
+ mode == WASM_CALL || mode == JS_TO_WASM_CALL;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
@@ -488,7 +486,6 @@ class RelocInfo {
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
Code* host() const { return host_; }
- void set_host(Code* host) { host_ = host; }
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
@@ -506,17 +503,14 @@ class RelocInfo {
bool IsInConstantPool();
Address wasm_context_reference() const;
- Address wasm_global_reference() const;
uint32_t wasm_function_table_size_reference() const;
- uint32_t wasm_memory_size_reference() const;
Address global_handle() const;
+ Address js_to_wasm_address() const;
+ Address wasm_call_address() const;
void set_wasm_context_reference(
Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- void update_wasm_global_reference(
- Isolate* isolate, Address old_base, Address new_base,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference(
Isolate* isolate, uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
@@ -528,6 +522,12 @@ class RelocInfo {
void set_global_handle(
Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_wasm_call_address(
+ Isolate*, Address,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_js_to_wasm_address(
+ Isolate*, Address,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -625,7 +625,11 @@ class RelocInfo {
byte* pc_;
Mode rmode_;
intptr_t data_;
+ // TODO(mtrofin): try remove host_, if all we need is the constant_pool_ or
+ // other few attributes, like start address, etc. This is so that we can reuse
+ // RelocInfo for WasmCode without having a modal design.
Code* host_;
+ Address constant_pool_ = nullptr;
friend class RelocIterator;
};
@@ -634,7 +638,7 @@ class RelocInfo {
// lower addresses.
class RelocInfoWriter BASE_EMBEDDED {
public:
- RelocInfoWriter() : pos_(NULL), last_pc_(NULL) {}
+ RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc) {}
byte* pos() const { return pos_; }
@@ -691,6 +695,11 @@ class RelocIterator: public Malloced {
// iteration iff bit k of mode_mask is set.
explicit RelocIterator(Code* code, int mode_mask = -1);
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
+ explicit RelocIterator(Vector<byte> instructions,
+ Vector<const byte> reloc_info, Address const_pool,
+ int mode_mask = -1);
+ RelocIterator(RelocIterator&&) = default;
+ RelocIterator& operator=(RelocIterator&&) = default;
// Iteration
bool done() const { return done_; }
@@ -725,8 +734,8 @@ class RelocIterator: public Malloced {
return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false;
}
- byte* pos_;
- byte* end_;
+ const byte* pos_;
+ const byte* end_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@@ -799,7 +808,7 @@ class ExternalReference BASE_EMBEDDED {
typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
Type type);
- ExternalReference() : address_(NULL) {}
+ ExternalReference() : address_(nullptr) {}
ExternalReference(Address address, Isolate* isolate);
@@ -898,9 +907,6 @@ class ExternalReference BASE_EMBEDDED {
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
- // Direct access to FLAG_harmony_regexp_dotall.
- static ExternalReference address_of_regexp_dotall_flag(Isolate* isolate);
-
// Static variables for RegExp.
static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
static ExternalReference address_of_regexp_stack_memory_address(
@@ -969,6 +975,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference libc_memmove_function(Isolate* isolate);
static ExternalReference libc_memset_function(Isolate* isolate);
+ static ExternalReference printf_function(Isolate* isolate);
+
static ExternalReference try_internalize_string_function(Isolate* isolate);
static ExternalReference check_object_type(Isolate* isolate);
@@ -984,6 +992,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference orderedhashmap_gethash_raw(Isolate* isolate);
static ExternalReference get_or_create_hash_raw(Isolate* isolate);
+ static ExternalReference jsreceiver_create_identity_hash(Isolate* isolate);
static ExternalReference copy_fast_number_jsarray_elements_to_typed_array(
Isolate* isolate);
@@ -1045,6 +1054,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference stress_deopt_count(Isolate* isolate);
+ static ExternalReference force_slow_path(Isolate* isolate);
+
static ExternalReference fixed_typed_array_base_data_offset();
private:
@@ -1058,8 +1069,9 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
- void* answer =
- (redirector == NULL) ? address : (*redirector)(isolate, address, type);
+ void* answer = (redirector == nullptr)
+ ? address
+ : (*redirector)(isolate, address, type);
return answer;
}
@@ -1110,11 +1122,11 @@ class ConstantPoolEntry {
DCHECK(is_merged());
}
int offset(void) const {
- DCHECK(merged_index_ >= 0);
+ DCHECK_GE(merged_index_, 0);
return merged_index_;
}
void set_offset(int offset) {
- DCHECK(offset >= 0);
+ DCHECK_GE(offset, 0);
merged_index_ = offset;
}
intptr_t value() const { return value_; }
@@ -1263,6 +1275,10 @@ class HeapObjectRequest {
// and best performance in optimized code.
template <typename SubType, int kAfterLastRegister>
class RegisterBase {
+ // Internal enum class; used for calling constexpr methods, where we need to
+ // pass an integral type as template parameter.
+ enum class RegisterCode : int { kFirst = 0, kAfterLast = kAfterLastRegister };
+
public:
static constexpr int kCode_no_reg = -1;
static constexpr int kNumRegisters = kAfterLastRegister;
@@ -1275,12 +1291,34 @@ class RegisterBase {
return SubType{code};
}
+ constexpr operator RegisterCode() const {
+ return static_cast<RegisterCode>(reg_code_);
+ }
+
+ template <RegisterCode reg_code>
+ static constexpr int code() {
+ static_assert(
+ reg_code >= RegisterCode::kFirst && reg_code < RegisterCode::kAfterLast,
+ "must be valid reg");
+ return static_cast<int>(reg_code);
+ }
+
+ template <RegisterCode reg_code>
+ static constexpr int bit() {
+ return 1 << code<reg_code>();
+ }
+
static SubType from_code(int code) {
DCHECK_LE(0, code);
DCHECK_GT(kNumRegisters, code);
return SubType{code};
}
+ template <RegisterCode... reg_codes>
+ static constexpr RegList ListOf() {
+ return CombineRegLists(RegisterBase::bit<reg_codes>()...);
+ }
+
bool is_valid() const { return reg_code_ != kCode_no_reg; }
int code() const {
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
index 82ecdc81ee..643967411f 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/assert-scope.cc
@@ -71,7 +71,7 @@ class PerThreadAssertData final {
template <PerThreadAssertType kType, bool kAllow>
PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope()
: data_(PerThreadAssertData::GetCurrent()) {
- if (data_ == NULL) {
+ if (data_ == nullptr) {
data_ = new PerThreadAssertData();
PerThreadAssertData::SetCurrent(data_);
}
@@ -92,7 +92,7 @@ void PerThreadAssertScope<kType, kAllow>::Release() {
DCHECK_NOT_NULL(data_);
data_->Set(kType, old_state_);
if (data_->DecrementLevel()) {
- PerThreadAssertData::SetCurrent(NULL);
+ PerThreadAssertData::SetCurrent(nullptr);
delete data_;
}
data_ = nullptr;
@@ -102,7 +102,7 @@ void PerThreadAssertScope<kType, kAllow>::Release() {
template <PerThreadAssertType kType, bool kAllow>
bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
PerThreadAssertData* data = PerThreadAssertData::GetCurrent();
- return data == NULL || data->Get(kType);
+ return data == nullptr || data->Get(kType);
}
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
deleted file mode 100644
index 02a4408a60..0000000000
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast/ast-expression-rewriter.h"
-#include "src/ast/ast.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Implementation of AstExpressionRewriter
-// The AST is traversed but no actual rewriting takes place, unless the
-// Visit methods are overriden in subclasses.
-
-#define REWRITE_THIS(node) \
- do { \
- if (!RewriteExpression(node)) return; \
- } while (false)
-#define NOTHING() DCHECK_NULL(replacement_)
-
-void AstExpressionRewriter::VisitDeclarations(Declaration::List* declarations) {
- for (Declaration::List::Iterator it = declarations->begin();
- it != declarations->end(); ++it) {
- AST_REWRITE(Declaration, *it, it = replacement);
- }
-}
-
-
-void AstExpressionRewriter::VisitStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- AST_REWRITE_LIST_ELEMENT(Statement, statements, i);
- if (statements->at(i)->IsJump()) break;
- }
-}
-
-
-void AstExpressionRewriter::VisitExpressions(
- ZoneList<Expression*>* expressions) {
- for (int i = 0; i < expressions->length(); i++) {
- // The variable statement visiting code may pass NULL expressions
- // to this code. Maybe this should be handled by introducing an
- // undefined expression or literal? Revisit this code if this
- // changes
- if (expressions->at(i) != nullptr) {
- AST_REWRITE_LIST_ELEMENT(Expression, expressions, i);
- }
- }
-}
-
-
-void AstExpressionRewriter::VisitVariableDeclaration(
- VariableDeclaration* node) {
- // Not visiting `proxy_`.
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitFunctionDeclaration(
- FunctionDeclaration* node) {
- // Not visiting `proxy_`.
- AST_REWRITE_PROPERTY(FunctionLiteral, node, fun);
-}
-
-
-void AstExpressionRewriter::VisitBlock(Block* node) {
- VisitStatements(node->statements());
-}
-
-
-void AstExpressionRewriter::VisitExpressionStatement(
- ExpressionStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, expression);
-}
-
-
-void AstExpressionRewriter::VisitEmptyStatement(EmptyStatement* node) {
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* node) {
- AST_REWRITE_PROPERTY(Statement, node, statement);
-}
-
-
-void AstExpressionRewriter::VisitIfStatement(IfStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, condition);
- AST_REWRITE_PROPERTY(Statement, node, then_statement);
- AST_REWRITE_PROPERTY(Statement, node, else_statement);
-}
-
-
-void AstExpressionRewriter::VisitContinueStatement(ContinueStatement* node) {
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitBreakStatement(BreakStatement* node) {
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitReturnStatement(ReturnStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, expression);
-}
-
-
-void AstExpressionRewriter::VisitWithStatement(WithStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, expression);
- AST_REWRITE_PROPERTY(Statement, node, statement);
-}
-
-
-void AstExpressionRewriter::VisitSwitchStatement(SwitchStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, tag);
- for (CaseClause* clause : *node->cases()) {
- if (!clause->is_default()) {
- AST_REWRITE_PROPERTY(Expression, clause, label);
- }
- VisitStatements(clause->statements());
- }
-}
-
-
-void AstExpressionRewriter::VisitDoWhileStatement(DoWhileStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, cond);
- AST_REWRITE_PROPERTY(Statement, node, body);
-}
-
-
-void AstExpressionRewriter::VisitWhileStatement(WhileStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, cond);
- AST_REWRITE_PROPERTY(Statement, node, body);
-}
-
-
-void AstExpressionRewriter::VisitForStatement(ForStatement* node) {
- if (node->init() != nullptr) {
- AST_REWRITE_PROPERTY(Statement, node, init);
- }
- if (node->cond() != nullptr) {
- AST_REWRITE_PROPERTY(Expression, node, cond);
- }
- if (node->next() != nullptr) {
- AST_REWRITE_PROPERTY(Statement, node, next);
- }
- AST_REWRITE_PROPERTY(Statement, node, body);
-}
-
-
-void AstExpressionRewriter::VisitForInStatement(ForInStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, each);
- AST_REWRITE_PROPERTY(Expression, node, subject);
- AST_REWRITE_PROPERTY(Statement, node, body);
-}
-
-
-void AstExpressionRewriter::VisitForOfStatement(ForOfStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, assign_iterator);
- AST_REWRITE_PROPERTY(Expression, node, next_result);
- AST_REWRITE_PROPERTY(Expression, node, result_done);
- AST_REWRITE_PROPERTY(Expression, node, assign_each);
- AST_REWRITE_PROPERTY(Statement, node, body);
-}
-
-
-void AstExpressionRewriter::VisitTryCatchStatement(TryCatchStatement* node) {
- AST_REWRITE_PROPERTY(Block, node, try_block);
- // Not visiting the variable.
- AST_REWRITE_PROPERTY(Block, node, catch_block);
-}
-
-
-void AstExpressionRewriter::VisitTryFinallyStatement(
- TryFinallyStatement* node) {
- AST_REWRITE_PROPERTY(Block, node, try_block);
- AST_REWRITE_PROPERTY(Block, node, finally_block);
-}
-
-
-void AstExpressionRewriter::VisitDebuggerStatement(DebuggerStatement* node) {
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitFunctionLiteral(FunctionLiteral* node) {
- REWRITE_THIS(node);
- VisitDeclarations(node->scope()->declarations());
- ZoneList<Statement*>* body = node->body();
- if (body != nullptr) VisitStatements(body);
-}
-
-
-void AstExpressionRewriter::VisitClassLiteral(ClassLiteral* node) {
- REWRITE_THIS(node);
- // Not visiting `class_variable_proxy_`.
- if (node->extends() != nullptr) {
- AST_REWRITE_PROPERTY(Expression, node, extends);
- }
- AST_REWRITE_PROPERTY(FunctionLiteral, node, constructor);
- ZoneList<typename ClassLiteral::Property*>* properties = node->properties();
- for (int i = 0; i < properties->length(); i++) {
- VisitLiteralProperty(properties->at(i));
- }
-}
-
-void AstExpressionRewriter::VisitNativeFunctionLiteral(
- NativeFunctionLiteral* node) {
- REWRITE_THIS(node);
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitConditional(Conditional* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, condition);
- AST_REWRITE_PROPERTY(Expression, node, then_expression);
- AST_REWRITE_PROPERTY(Expression, node, else_expression);
-}
-
-
-void AstExpressionRewriter::VisitVariableProxy(VariableProxy* node) {
- REWRITE_THIS(node);
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitLiteral(Literal* node) {
- REWRITE_THIS(node);
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitRegExpLiteral(RegExpLiteral* node) {
- REWRITE_THIS(node);
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitObjectLiteral(ObjectLiteral* node) {
- REWRITE_THIS(node);
- ZoneList<typename ObjectLiteral::Property*>* properties = node->properties();
- for (int i = 0; i < properties->length(); i++) {
- VisitLiteralProperty(properties->at(i));
- }
-}
-
-void AstExpressionRewriter::VisitLiteralProperty(LiteralProperty* property) {
- if (property == nullptr) return;
- AST_REWRITE_PROPERTY(Expression, property, key);
- AST_REWRITE_PROPERTY(Expression, property, value);
-}
-
-
-void AstExpressionRewriter::VisitArrayLiteral(ArrayLiteral* node) {
- REWRITE_THIS(node);
- VisitExpressions(node->values());
-}
-
-
-void AstExpressionRewriter::VisitAssignment(Assignment* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, target);
- AST_REWRITE_PROPERTY(Expression, node, value);
-}
-
-void AstExpressionRewriter::VisitCompoundAssignment(CompoundAssignment* node) {
- VisitAssignment(node);
-}
-
-void AstExpressionRewriter::VisitYield(Yield* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
-}
-
-void AstExpressionRewriter::VisitYieldStar(YieldStar* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
-}
-
-void AstExpressionRewriter::VisitAwait(Await* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
-}
-
-void AstExpressionRewriter::VisitThrow(Throw* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, exception);
-}
-
-
-void AstExpressionRewriter::VisitProperty(Property* node) {
- REWRITE_THIS(node);
- if (node == nullptr) return;
- AST_REWRITE_PROPERTY(Expression, node, obj);
- AST_REWRITE_PROPERTY(Expression, node, key);
-}
-
-
-void AstExpressionRewriter::VisitCall(Call* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
- VisitExpressions(node->arguments());
-}
-
-
-void AstExpressionRewriter::VisitCallNew(CallNew* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
- VisitExpressions(node->arguments());
-}
-
-
-void AstExpressionRewriter::VisitCallRuntime(CallRuntime* node) {
- REWRITE_THIS(node);
- VisitExpressions(node->arguments());
-}
-
-
-void AstExpressionRewriter::VisitUnaryOperation(UnaryOperation* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
-}
-
-
-void AstExpressionRewriter::VisitCountOperation(CountOperation* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
-}
-
-
-void AstExpressionRewriter::VisitBinaryOperation(BinaryOperation* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, left);
- AST_REWRITE_PROPERTY(Expression, node, right);
-}
-
-
-void AstExpressionRewriter::VisitCompareOperation(CompareOperation* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, left);
- AST_REWRITE_PROPERTY(Expression, node, right);
-}
-
-
-void AstExpressionRewriter::VisitSpread(Spread* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
-}
-
-
-void AstExpressionRewriter::VisitThisFunction(ThisFunction* node) {
- REWRITE_THIS(node);
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitSuperPropertyReference(
- SuperPropertyReference* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
- AST_REWRITE_PROPERTY(Expression, node, home_object);
-}
-
-
-void AstExpressionRewriter::VisitSuperCallReference(SuperCallReference* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
- AST_REWRITE_PROPERTY(VariableProxy, node, new_target_var);
- AST_REWRITE_PROPERTY(VariableProxy, node, this_function_var);
-}
-
-
-void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
- NOTHING();
-}
-
-void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
- AST_REWRITE_PROPERTY(Expression, node, iterable);
-}
-
-void AstExpressionRewriter::VisitGetTemplateObject(GetTemplateObject* node) {
- NOTHING();
-}
-
-void AstExpressionRewriter::VisitImportCallExpression(
- ImportCallExpression* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, argument);
-}
-
-void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
- REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Block, node, block);
- AST_REWRITE_PROPERTY(VariableProxy, node, result);
-}
-
-
-void AstExpressionRewriter::VisitRewritableExpression(
- RewritableExpression* node) {
- REWRITE_THIS(node);
- AST_REWRITE(Expression, node->expression(), node->Rewrite(replacement));
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ast/ast-expression-rewriter.h b/deps/v8/src/ast/ast-expression-rewriter.h
deleted file mode 100644
index c246fcd37d..0000000000
--- a/deps/v8/src/ast/ast-expression-rewriter.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_AST_EXPRESSION_REWRITER_H_
-#define V8_AST_AST_EXPRESSION_REWRITER_H_
-
-#include "src/allocation.h"
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// A rewriting Visitor over a CompilationInfo's AST that invokes
-// VisitExpression on each expression node.
-
-// This AstVistor is not final, and provides the AstVisitor methods as virtual
-// methods so they can be specialized by subclasses.
-class AstExpressionRewriter : public AstVisitor<AstExpressionRewriter> {
- public:
- explicit AstExpressionRewriter(Isolate* isolate) {
- InitializeAstRewriter(isolate);
- }
- explicit AstExpressionRewriter(uintptr_t stack_limit) {
- InitializeAstRewriter(stack_limit);
- }
- virtual ~AstExpressionRewriter() {}
-
- virtual void VisitDeclarations(Declaration::List* declarations);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
- virtual void VisitExpressions(ZoneList<Expression*>* expressions);
-
- virtual void VisitLiteralProperty(LiteralProperty* property);
-
- protected:
- virtual bool RewriteExpression(Expression* expr) = 0;
-
- private:
- DEFINE_AST_REWRITER_SUBCLASS_MEMBERS();
-
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DISALLOW_COPY_AND_ASSIGN(AstExpressionRewriter);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_AST_EXPRESSION_REWRITER_H_
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 3df7aae861..0736e543e2 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -15,16 +15,11 @@ namespace internal {
class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
- Compiler::EagerInnerFunctionLiterals* eager_literals,
- bool collect_type_profile = false)
+ Compiler::EagerInnerFunctionLiterals* eager_literals)
: zone_(zone),
eager_literals_(eager_literals),
suspend_count_(0),
- properties_(zone),
- language_mode_(SLOPPY),
- slot_cache_(zone),
- dont_optimize_reason_(kNoReason),
- collect_type_profile_(collect_type_profile) {
+ dont_optimize_reason_(kNoReason) {
InitializeAstVisitor(stack_limit);
}
@@ -36,10 +31,6 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
- void VisitVariableProxy(VariableProxy* node, TypeofMode typeof_mode);
- void VisitVariableProxyReference(VariableProxy* node);
- void VisitPropertyReference(Property* node);
- void VisitReference(Expression* expr);
void VisitSuspend(Suspend* node);
void VisitStatementsAndDeclarations(Block* node);
@@ -52,25 +43,6 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
dont_optimize_reason_ = reason;
}
- template <typename Node>
- void ReserveFeedbackSlots(Node* node) {
- node->AssignFeedbackSlots(properties_.get_spec(), language_mode_,
- function_kind_, &slot_cache_);
- }
-
- class LanguageModeScope {
- public:
- LanguageModeScope(AstNumberingVisitor* visitor, LanguageMode language_mode)
- : visitor_(visitor), outer_language_mode_(visitor->language_mode_) {
- visitor_->language_mode_ = language_mode;
- }
- ~LanguageModeScope() { visitor_->language_mode_ = outer_language_mode_; }
-
- private:
- AstNumberingVisitor* visitor_;
- LanguageMode outer_language_mode_;
- };
-
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
Zone* zone() const { return zone_; }
@@ -78,105 +50,72 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int suspend_count_;
- AstProperties properties_;
- LanguageMode language_mode_;
FunctionKind function_kind_;
- // The slot cache allows us to reuse certain feedback slots.
- FeedbackSlotCache slot_cache_;
BailoutReason dont_optimize_reason_;
- bool collect_type_profile_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
};
-
void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
VisitVariableProxy(node->proxy());
}
-
void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
}
-
void AstNumberingVisitor::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* node) {
Visit(node->statement());
}
-
void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
}
-
void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
}
-
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
}
-
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
DisableOptimization(kNativeFunctionLiteral);
- ReserveFeedbackSlots(node);
}
-
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
Visit(node->block());
Visit(node->result());
}
-
void AstNumberingVisitor::VisitLiteral(Literal* node) {
}
-
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
- ReserveFeedbackSlots(node);
-}
-
-
-void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
-}
-
-void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
- TypeofMode typeof_mode) {
- VisitVariableProxyReference(node);
- node->AssignFeedbackSlots(properties_.get_spec(), typeof_mode, &slot_cache_);
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
- VisitVariableProxy(node, NOT_INSIDE_TYPEOF);
}
-
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
}
-
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
Visit(node->this_var());
Visit(node->home_object());
}
-
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
Visit(node->this_var());
Visit(node->new_target_var());
Visit(node->this_function_var());
}
-
void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
}
-
void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
Visit(node->expression());
}
@@ -196,7 +135,6 @@ void AstNumberingVisitor::VisitYieldStar(YieldStar* node) {
node->set_await_delegated_iterator_output_suspend_id(suspend_count_++);
}
Visit(node->expression());
- ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitAwait(Await* node) { VisitSuspend(node); }
@@ -205,32 +143,16 @@ void AstNumberingVisitor::VisitThrow(Throw* node) {
Visit(node->exception());
}
-
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
- if ((node->op() == Token::TYPEOF) && node->expression()->IsVariableProxy()) {
- VariableProxy* proxy = node->expression()->AsVariableProxy();
- VisitVariableProxy(proxy, INSIDE_TYPEOF);
- } else {
- Visit(node->expression());
- }
- ReserveFeedbackSlots(node);
+ Visit(node->expression());
}
-
void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
Visit(node->expression());
- ReserveFeedbackSlots(node);
}
-
void AstNumberingVisitor::VisitBlock(Block* node) {
- Scope* scope = node->scope();
- if (scope != nullptr) {
- LanguageModeScope language_mode_scope(this, scope->language_mode());
- VisitStatementsAndDeclarations(node);
- } else {
- VisitStatementsAndDeclarations(node);
- }
+ VisitStatementsAndDeclarations(node);
}
void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
@@ -245,18 +167,15 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
VisitFunctionLiteral(node->fun());
}
-
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
VisitArguments(node->arguments());
}
-
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
Visit(node->expression());
Visit(node->statement());
}
-
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
@@ -264,7 +183,6 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
-
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
@@ -272,46 +190,25 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
-
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
Visit(node->try_block());
Visit(node->catch_block());
}
-
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
Visit(node->try_block());
Visit(node->finally_block());
}
-
-void AstNumberingVisitor::VisitPropertyReference(Property* node) {
+void AstNumberingVisitor::VisitProperty(Property* node) {
Visit(node->key());
Visit(node->obj());
}
-
-void AstNumberingVisitor::VisitReference(Expression* expr) {
- DCHECK(expr->IsProperty() || expr->IsVariableProxy());
- if (expr->IsProperty()) {
- VisitPropertyReference(expr->AsProperty());
- } else {
- VisitVariableProxyReference(expr->AsVariableProxy());
- }
-}
-
-
-void AstNumberingVisitor::VisitProperty(Property* node) {
- VisitPropertyReference(node);
- ReserveFeedbackSlots(node);
-}
-
-
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
- VisitReference(node->target());
+ Visit(node->target());
Visit(node->value());
- ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitCompoundAssignment(CompoundAssignment* node) {
@@ -322,14 +219,18 @@ void AstNumberingVisitor::VisitCompoundAssignment(CompoundAssignment* node) {
void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
Visit(node->left());
Visit(node->right());
- ReserveFeedbackSlots(node);
}
+void AstNumberingVisitor::VisitNaryOperation(NaryOperation* node) {
+ Visit(node->first());
+ for (size_t i = 0; i < node->subsequent_length(); ++i) {
+ Visit(node->subsequent(i));
+ }
+}
void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
Visit(node->left());
Visit(node->right());
- ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitSpread(Spread* node) {
@@ -342,7 +243,6 @@ void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
- ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitGetTemplateObject(GetTemplateObject* node) {}
@@ -358,10 +258,8 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
Visit(node->each());
Visit(node->body());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
- ReserveFeedbackSlots(node);
}
-
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
@@ -372,14 +270,12 @@ void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
-
void AstNumberingVisitor::VisitConditional(Conditional* node) {
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
}
-
void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
Visit(node->condition());
Visit(node->then_statement());
@@ -388,37 +284,43 @@ void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
}
}
-
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
Visit(node->tag());
for (CaseClause* clause : *node->cases()) {
if (!clause->is_default()) Visit(clause->label());
VisitStatements(clause->statements());
- ReserveFeedbackSlots(clause);
}
}
-
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
- if (node->init() != NULL) Visit(node->init()); // Not part of loop.
+ if (node->init() != nullptr) Visit(node->init()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
- if (node->cond() != NULL) Visit(node->cond());
- if (node->next() != NULL) Visit(node->next());
+ if (node->cond() != nullptr) Visit(node->cond());
+ if (node->next() != nullptr) Visit(node->next());
Visit(node->body());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
-
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
- LanguageModeScope language_mode_scope(this, STRICT);
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
+ if (node->static_fields_initializer() != nullptr) {
+ Visit(node->static_fields_initializer());
+ }
+ if (node->instance_fields_initializer_function() != nullptr) {
+ Visit(node->instance_fields_initializer_function());
+ }
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
- ReserveFeedbackSlots(node);
}
+void AstNumberingVisitor::VisitInitializeClassFieldsStatement(
+ InitializeClassFieldsStatement* node) {
+ for (int i = 0; i < node->fields()->length(); i++) {
+ VisitLiteralProperty(node->fields()->at(i));
+ }
+}
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
@@ -429,7 +331,6 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code will be is emitted.
node->CalculateEmitStore(zone_);
- ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
@@ -442,26 +343,20 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
Visit(node->values()->at(i));
}
node->InitDepthAndFlags();
- ReserveFeedbackSlots(node);
}
-
void AstNumberingVisitor::VisitCall(Call* node) {
- ReserveFeedbackSlots(node);
Visit(node->expression());
VisitArguments(node->arguments());
}
-
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
- ReserveFeedbackSlots(node);
Visit(node->expression());
VisitArguments(node->arguments());
}
-
void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
- if (statements == NULL) return;
+ if (statements == nullptr) return;
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
if (statements->at(i)->IsJump()) break;
@@ -472,14 +367,12 @@ void AstNumberingVisitor::VisitDeclarations(Declaration::List* decls) {
for (Declaration* decl : *decls) Visit(decl);
}
-
void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
Visit(arguments->at(i));
}
}
-
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
if (node->ShouldEagerCompile()) {
if (eager_literals_) {
@@ -494,30 +387,21 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
return;
}
}
- ReserveFeedbackSlots(node);
}
-
void AstNumberingVisitor::VisitRewritableExpression(
RewritableExpression* node) {
Visit(node->expression());
}
-
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
DCHECK(!scope->HasBeenRemoved());
function_kind_ = node->kind();
- LanguageModeScope language_mode_scope(this, node->language_mode());
-
- if (collect_type_profile_) {
- properties_.get_spec()->AddTypeProfileSlot();
- }
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
- node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
@@ -526,14 +410,12 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
bool AstNumbering::Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
- Compiler::EagerInnerFunctionLiterals* eager_literals,
- bool collect_type_profile) {
+ Compiler::EagerInnerFunctionLiterals* eager_literals) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- AstNumberingVisitor visitor(stack_limit, zone, eager_literals,
- collect_type_profile);
+ AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
return visitor.Renumber(function);
}
} // namespace internal
diff --git a/deps/v8/src/ast/ast-numbering.h b/deps/v8/src/ast/ast-numbering.h
index 82987ef593..11122803b8 100644
--- a/deps/v8/src/ast/ast-numbering.h
+++ b/deps/v8/src/ast/ast-numbering.h
@@ -22,13 +22,12 @@ template <typename T>
class ZoneVector;
namespace AstNumbering {
-// Assign type feedback IDs, bailout IDs, and generator suspend IDs to an AST
-// node tree; perform catch prediction for TryStatements. If |eager_literals| is
-// non-null, adds any eager inner literal functions into it.
+// Assign bailout IDs, and generator suspend IDs to an AST node tree; perform
+// catch prediction for TryStatements. If |eager_literals| is non-null, adds any
+// eager inner literal functions into it.
bool Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
- ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals,
- bool collect_type_profile = false);
+ ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
}
// Some details on suspend IDs
diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h
index 55554b1043..cf7bab53da 100644
--- a/deps/v8/src/ast/ast-source-ranges.h
+++ b/deps/v8/src/ast/ast-source-ranges.h
@@ -30,12 +30,14 @@ struct SourceRange {
// The list of ast node kinds that have associated source ranges. Note that this
// macro is not undefined at the end of this file.
#define AST_SOURCE_RANGE_LIST(V) \
+ V(BinaryOperation) \
V(Block) \
V(CaseClause) \
V(Conditional) \
V(IfStatement) \
V(IterationStatement) \
V(JumpStatement) \
+ V(NaryOperation) \
V(Suspend) \
V(SwitchStatement) \
V(Throw) \
@@ -48,6 +50,7 @@ enum class SourceRangeKind {
kContinuation,
kElse,
kFinally,
+ kRight,
kThen,
};
@@ -57,13 +60,27 @@ class AstNodeSourceRanges : public ZoneObject {
virtual SourceRange GetRange(SourceRangeKind kind) = 0;
};
+class BinaryOperationSourceRanges final : public AstNodeSourceRanges {
+ public:
+ explicit BinaryOperationSourceRanges(const SourceRange& right_range)
+ : right_range_(right_range) {}
+
+ SourceRange GetRange(SourceRangeKind kind) {
+ DCHECK_EQ(kind, SourceRangeKind::kRight);
+ return right_range_;
+ }
+
+ private:
+ SourceRange right_range_;
+};
+
class ContinuationSourceRanges : public AstNodeSourceRanges {
public:
explicit ContinuationSourceRanges(int32_t continuation_position)
: continuation_position_(continuation_position) {}
SourceRange GetRange(SourceRangeKind kind) {
- DCHECK(kind == SourceRangeKind::kContinuation);
+ DCHECK_EQ(kind, SourceRangeKind::kContinuation);
return SourceRange::OpenEnded(continuation_position_);
}
@@ -83,7 +100,7 @@ class CaseClauseSourceRanges final : public AstNodeSourceRanges {
: body_range_(body_range) {}
SourceRange GetRange(SourceRangeKind kind) {
- DCHECK(kind == SourceRangeKind::kBody);
+ DCHECK_EQ(kind, SourceRangeKind::kBody);
return body_range_;
}
@@ -166,6 +183,27 @@ class JumpStatementSourceRanges final : public ContinuationSourceRanges {
: ContinuationSourceRanges(continuation_position) {}
};
+class NaryOperationSourceRanges final : public AstNodeSourceRanges {
+ public:
+ NaryOperationSourceRanges(Zone* zone, const SourceRange& range)
+ : ranges_(zone) {
+ AddRange(range);
+ }
+
+ SourceRange GetRangeAtIndex(size_t index) {
+ DCHECK(index < ranges_.size());
+ return ranges_[index];
+ }
+
+ void AddRange(const SourceRange& range) { ranges_.push_back(range); }
+ size_t RangeCount() const { return ranges_.size(); }
+
+ SourceRange GetRange(SourceRangeKind kind) { UNREACHABLE(); }
+
+ private:
+ ZoneVector<SourceRange> ranges_;
+};
+
class SuspendSourceRanges final : public ContinuationSourceRanges {
public:
explicit SuspendSourceRanges(int32_t continuation_position)
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 0fec89a58c..6ad4df357c 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -228,13 +228,13 @@ void AstTraversalVisitor<Subclass>::VisitWhileStatement(WhileStatement* stmt) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitForStatement(ForStatement* stmt) {
PROCESS_NODE(stmt);
- if (stmt->init() != NULL) {
+ if (stmt->init() != nullptr) {
RECURSE(Visit(stmt->init()));
}
- if (stmt->cond() != NULL) {
+ if (stmt->cond() != nullptr) {
RECURSE(Visit(stmt->cond()));
}
- if (stmt->next() != NULL) {
+ if (stmt->next() != nullptr) {
RECURSE(Visit(stmt->next()));
}
RECURSE(Visit(stmt->body()));
@@ -444,6 +444,15 @@ void AstTraversalVisitor<Subclass>::VisitBinaryOperation(
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitNaryOperation(NaryOperation* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->first()));
+ for (size_t i = 0; i < expr->subsequent_length(); ++i) {
+ RECURSE_EXPRESSION(Visit(expr->subsequent(i)));
+ }
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCompareOperation(
CompareOperation* expr) {
PROCESS_EXPRESSION(expr);
@@ -463,6 +472,12 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
RECURSE_EXPRESSION(Visit(expr->extends()));
}
RECURSE_EXPRESSION(Visit(expr->constructor()));
+ if (expr->static_fields_initializer() != nullptr) {
+ RECURSE_EXPRESSION(Visit(expr->static_fields_initializer()));
+ }
+ if (expr->instance_fields_initializer_function() != nullptr) {
+ RECURSE_EXPRESSION(Visit(expr->instance_fields_initializer_function()));
+ }
ZoneList<ClassLiteralProperty*>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ClassLiteralProperty* prop = props->at(i);
@@ -474,6 +489,20 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitInitializeClassFieldsStatement(
+ InitializeClassFieldsStatement* stmt) {
+ PROCESS_NODE(stmt);
+ ZoneList<ClassLiteralProperty*>* props = stmt->fields();
+ for (int i = 0; i < props->length(); ++i) {
+ ClassLiteralProperty* prop = props->at(i);
+ if (!prop->key()->IsLiteral()) {
+ RECURSE(Visit(prop->key()));
+ }
+ RECURSE(Visit(prop->value()));
+ }
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSpread(Spread* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index b83ed4547e..458afb8bc1 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -168,102 +168,18 @@ void AstConsString::Internalize(Isolate* isolate) {
set_string(tmp);
}
-AstValue::AstValue(double n) : next_(nullptr) {
- int int_value;
- if (DoubleToSmiInteger(n, &int_value)) {
- type_ = SMI;
- smi_ = int_value;
- } else {
- type_ = NUMBER;
- number_ = n;
- }
-}
-
-bool AstValue::ToUint32(uint32_t* value) const {
- if (IsSmi()) {
- int num = smi_;
- if (num < 0) return false;
- *value = static_cast<uint32_t>(num);
- return true;
- }
- if (IsHeapNumber()) {
- return DoubleToUint32IfEqualToSelf(number_, value);
- }
- return false;
-}
-
-bool AstValue::IsPropertyName() const {
- if (type_ == STRING) {
- uint32_t index;
- return !string_->AsArrayIndex(&index);
- }
- return false;
-}
-
-
-bool AstValue::BooleanValue() const {
- switch (type_) {
- case STRING:
- DCHECK(string_ != NULL);
- return !string_->IsEmpty();
- case SYMBOL:
- UNREACHABLE();
- break;
- case NUMBER:
- return DoubleToBoolean(number_);
- case SMI:
- return smi_ != 0;
- case BOOLEAN:
- return bool_;
- case NULL_TYPE:
- return false;
- case THE_HOLE:
- UNREACHABLE();
- break;
- case UNDEFINED:
- return false;
+std::forward_list<const AstRawString*> AstConsString::ToRawStrings() const {
+ std::forward_list<const AstRawString*> result;
+ if (IsEmpty()) {
+ return result;
}
- UNREACHABLE();
-}
-
-void AstValue::Internalize(Isolate* isolate) {
- switch (type_) {
- case STRING:
- DCHECK_NOT_NULL(string_);
- // Strings are already internalized.
- DCHECK(!string_->string().is_null());
- break;
- case SYMBOL:
- switch (symbol_) {
- case AstSymbol::kHomeObjectSymbol:
- set_value(isolate->factory()->home_object_symbol());
- break;
- }
- break;
- case NUMBER:
- set_value(isolate->factory()->NewNumber(number_, TENURED));
- break;
- case SMI:
- set_value(handle(Smi::FromInt(smi_), isolate));
- break;
- case BOOLEAN:
- if (bool_) {
- set_value(isolate->factory()->true_value());
- } else {
- set_value(isolate->factory()->false_value());
- }
- break;
- case NULL_TYPE:
- set_value(isolate->factory()->null_value());
- break;
- case THE_HOLE:
- set_value(isolate->factory()->the_hole_value());
- break;
- case UNDEFINED:
- set_value(isolate->factory()->undefined_value());
- break;
+ result.emplace_front(segment_.string);
+ for (AstConsString::Segment* current = segment_.next; current != nullptr;
+ current = current->next) {
+ result.emplace_front(current->string);
}
+ return result;
}
AstStringConstants::AstStringConstants(Isolate* isolate, uint32_t hash_seed)
@@ -317,7 +233,7 @@ AstRawString* AstValueFactory::GetTwoByteStringInternal(
const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
- AstRawString* result = NULL;
+ AstRawString* result = nullptr;
DisallowHeapAllocation no_gc;
String::FlatContent content = literal->GetFlatContent();
if (content.IsOneByte()) {
@@ -361,72 +277,9 @@ void AstValueFactory::Internalize(Isolate* isolate) {
current = next;
}
- for (AstValue* current = values_; current != nullptr;) {
- AstValue* next = current->next();
- current->Internalize(isolate);
- current = next;
- }
ResetStrings();
- values_ = nullptr;
-}
-
-
-const AstValue* AstValueFactory::NewString(const AstRawString* string) {
- AstValue* value = new (zone_) AstValue(string);
- CHECK_NOT_NULL(string);
- return AddValue(value);
-}
-
-const AstValue* AstValueFactory::NewSymbol(AstSymbol symbol) {
- AstValue* value = new (zone_) AstValue(symbol);
- return AddValue(value);
-}
-
-const AstValue* AstValueFactory::NewNumber(double number) {
- AstValue* value = new (zone_) AstValue(number);
- return AddValue(value);
-}
-
-const AstValue* AstValueFactory::NewSmi(uint32_t number) {
- bool cacheable_smi = number <= kMaxCachedSmi;
- if (cacheable_smi && smis_[number] != nullptr) return smis_[number];
-
- AstValue* value = new (zone_) AstValue(AstValue::SMI, number);
- if (cacheable_smi) smis_[number] = value;
- return AddValue(value);
}
-#define GENERATE_VALUE_GETTER(value, initializer) \
- if (!value) { \
- value = AddValue(new (zone_) AstValue(initializer)); \
- } \
- return value;
-
-const AstValue* AstValueFactory::NewBoolean(bool b) {
- if (b) {
- GENERATE_VALUE_GETTER(true_value_, true);
- } else {
- GENERATE_VALUE_GETTER(false_value_, false);
- }
-}
-
-
-const AstValue* AstValueFactory::NewNull() {
- GENERATE_VALUE_GETTER(null_value_, AstValue::NULL_TYPE);
-}
-
-
-const AstValue* AstValueFactory::NewUndefined() {
- GENERATE_VALUE_GETTER(undefined_value_, AstValue::UNDEFINED);
-}
-
-
-const AstValue* AstValueFactory::NewTheHole() {
- GENERATE_VALUE_GETTER(the_hole_value_, AstValue::THE_HOLE);
-}
-
-
-#undef GENERATE_VALUE_GETTER
AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte,
Vector<const byte> literal_bytes) {
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index e67c87b4c0..6a3aea5fa0 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -28,6 +28,8 @@
#ifndef V8_AST_AST_VALUE_FACTORY_H_
#define V8_AST_AST_VALUE_FACTORY_H_
+#include <forward_list>
+
#include "src/base/hashmap.h"
#include "src/conversions.h"
#include "src/factory.h"
@@ -35,7 +37,7 @@
#include "src/isolate.h"
#include "src/utils.h"
-// Ast(Raw|Cons)String, AstValue and AstValueFactory are for storing strings and
+// Ast(Raw|Cons)String and AstValueFactory are for storing strings and
// values independent of the V8 heap and internalizing them later. During
// parsing, they are created and stored outside the heap, in AstValueFactory.
// After parsing, the strings and values are internalized (moved into the V8
@@ -151,6 +153,8 @@ class AstConsString final : public ZoneObject {
return Handle<String>(string_);
}
+ std::forward_list<const AstRawString*> ToRawStrings() const;
+
private:
friend class AstValueFactory;
@@ -176,167 +180,62 @@ class AstConsString final : public ZoneObject {
enum class AstSymbol : uint8_t { kHomeObjectSymbol };
-// AstValue is either a string, a symbol, a number, a string array, a boolean,
-// or a special value (null, undefined, the hole).
-class AstValue : public ZoneObject {
+class AstBigInt {
public:
- bool IsString() const {
- return type_ == STRING;
- }
-
- bool IsSymbol() const { return type_ == SYMBOL; }
-
- bool IsNumber() const { return IsSmi() || IsHeapNumber(); }
-
- const AstRawString* AsString() const {
- CHECK_EQ(STRING, type_);
- return string_;
- }
+ // |bigint| must be a NUL-terminated string of ASCII characters
+ // representing a BigInt (suitable for passing to BigIntLiteral()
+ // from conversions.h).
+ explicit AstBigInt(const char* bigint) : bigint_(bigint) {}
- AstSymbol AsSymbol() const {
- CHECK_EQ(SYMBOL, type_);
- return symbol_;
- }
-
- double AsNumber() const {
- if (IsHeapNumber()) return number_;
- if (IsSmi()) return smi_;
- UNREACHABLE();
- }
-
- Smi* AsSmi() const {
- CHECK(IsSmi());
- return Smi::FromInt(smi_);
- }
-
- bool ToUint32(uint32_t* value) const;
-
- bool EqualsString(const AstRawString* string) const {
- return type_ == STRING && string_ == string;
- }
-
- bool IsPropertyName() const;
-
- bool BooleanValue() const;
-
- bool IsSmi() const { return type_ == SMI; }
- bool IsHeapNumber() const { return type_ == NUMBER; }
- bool IsFalse() const { return type_ == BOOLEAN && !bool_; }
- bool IsTrue() const { return type_ == BOOLEAN && bool_; }
- bool IsUndefined() const { return type_ == UNDEFINED; }
- bool IsTheHole() const { return type_ == THE_HOLE; }
- bool IsNull() const { return type_ == NULL_TYPE; }
-
- void Internalize(Isolate* isolate);
-
- // Can be called after Internalize has been called.
- V8_INLINE Handle<Object> value() const {
- if (type_ == STRING) {
- return string_->string();
- }
- DCHECK_NOT_NULL(value_);
- return Handle<Object>(value_);
- }
- AstValue* next() const { return next_; }
- void set_next(AstValue* next) { next_ = next; }
+ const char* c_str() const { return bigint_; }
private:
- void set_value(Handle<Object> object) { value_ = object.location(); }
- friend class AstValueFactory;
-
- enum Type {
- STRING,
- SYMBOL,
- NUMBER,
- SMI,
- BOOLEAN,
- NULL_TYPE,
- UNDEFINED,
- THE_HOLE
- };
-
- explicit AstValue(const AstRawString* s) : type_(STRING), next_(nullptr) {
- string_ = s;
- }
-
- explicit AstValue(AstSymbol symbol) : type_(SYMBOL), next_(nullptr) {
- symbol_ = symbol;
- }
-
- explicit AstValue(double n);
-
- AstValue(Type t, int i) : type_(t), next_(nullptr) {
- DCHECK(type_ == SMI);
- smi_ = i;
- }
-
- explicit AstValue(bool b) : type_(BOOLEAN), next_(nullptr) { bool_ = b; }
-
- explicit AstValue(Type t) : type_(t), next_(nullptr) {
- DCHECK(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE);
- }
-
- Type type_;
-
- // {value_} is stored as Object** instead of a Handle<Object> so it can be
- // stored in a union with {next_}.
- union {
- Object** value_; // if internalized
- AstValue* next_; // if !internalized
- };
-
- // Uninternalized value.
- union {
- const AstRawString* string_;
- double number_;
- int smi_;
- bool bool_;
- AstSymbol symbol_;
- };
+ const char* bigint_;
};
// For generating constants.
-#define AST_STRING_CONSTANTS(F) \
- F(anonymous_function, "(anonymous function)") \
- F(arguments, "arguments") \
- F(async, "async") \
- F(await, "await") \
- F(boolean, "boolean") \
- F(constructor, "constructor") \
- F(default, "default") \
- F(done, "done") \
- F(dot, ".") \
- F(dot_for, ".for") \
- F(dot_generator_object, ".generator_object") \
- F(dot_iterator, ".iterator") \
- F(dot_result, ".result") \
- F(dot_switch_tag, ".switch_tag") \
- F(dot_catch, ".catch") \
- F(empty, "") \
- F(eval, "eval") \
- F(function, "function") \
- F(get_space, "get ") \
- F(length, "length") \
- F(let, "let") \
- F(name, "name") \
- F(native, "native") \
- F(new_target, ".new.target") \
- F(next, "next") \
- F(number, "number") \
- F(object, "object") \
- F(proto, "__proto__") \
- F(prototype, "prototype") \
- F(return, "return") \
- F(set_space, "set ") \
- F(star_default_star, "*default*") \
- F(string, "string") \
- F(symbol, "symbol") \
- F(this, "this") \
- F(this_function, ".this_function") \
- F(throw, "throw") \
- F(undefined, "undefined") \
- F(use_asm, "use asm") \
- F(use_strict, "use strict") \
+#define AST_STRING_CONSTANTS(F) \
+ F(anonymous_function, "(anonymous function)") \
+ F(arguments, "arguments") \
+ F(async, "async") \
+ F(await, "await") \
+ F(bigint, "bigint") \
+ F(boolean, "boolean") \
+ F(constructor, "constructor") \
+ F(default, "default") \
+ F(done, "done") \
+ F(dot, ".") \
+ F(dot_for, ".for") \
+ F(dot_generator_object, ".generator_object") \
+ F(dot_iterator, ".iterator") \
+ F(dot_result, ".result") \
+ F(dot_switch_tag, ".switch_tag") \
+ F(dot_catch, ".catch") \
+ F(empty, "") \
+ F(eval, "eval") \
+ F(function, "function") \
+ F(get_space, "get ") \
+ F(length, "length") \
+ F(let, "let") \
+ F(name, "name") \
+ F(native, "native") \
+ F(new_target, ".new.target") \
+ F(next, "next") \
+ F(number, "number") \
+ F(object, "object") \
+ F(proto, "__proto__") \
+ F(prototype, "prototype") \
+ F(return, "return") \
+ F(set_space, "set ") \
+ F(star_default_star, "*default*") \
+ F(string, "string") \
+ F(symbol, "symbol") \
+ F(this, "this") \
+ F(this_function, ".this_function") \
+ F(throw, "throw") \
+ F(undefined, "undefined") \
+ F(use_asm, "use asm") \
+ F(use_strict, "use strict") \
F(value, "value")
class AstStringConstants final {
@@ -365,19 +264,11 @@ class AstStringConstants final {
DISALLOW_COPY_AND_ASSIGN(AstStringConstants);
};
-#define OTHER_CONSTANTS(F) \
- F(true_value) \
- F(false_value) \
- F(null_value) \
- F(undefined_value) \
- F(the_hole_value)
-
class AstValueFactory {
public:
AstValueFactory(Zone* zone, const AstStringConstants* string_constants,
uint32_t hash_seed)
: string_table_(string_constants->string_table()),
- values_(nullptr),
strings_(nullptr),
strings_end_(&strings_),
cons_strings_(nullptr),
@@ -386,11 +277,7 @@ class AstValueFactory {
empty_cons_string_(nullptr),
zone_(zone),
hash_seed_(hash_seed) {
-#define F(name) name##_ = nullptr;
- OTHER_CONSTANTS(F)
-#undef F
DCHECK_EQ(hash_seed, string_constants->hash_seed());
- std::fill(smis_, smis_ + arraysize(smis_), nullptr);
std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_),
nullptr);
@@ -425,27 +312,7 @@ class AstValueFactory {
#undef F
const AstConsString* empty_cons_string() const { return empty_cons_string_; }
- V8_EXPORT_PRIVATE const AstValue* NewString(const AstRawString* string);
- // A JavaScript symbol (ECMA-262 edition 6).
- const AstValue* NewSymbol(AstSymbol symbol);
- V8_EXPORT_PRIVATE const AstValue* NewNumber(double number);
- const AstValue* NewSmi(uint32_t number);
- const AstValue* NewBoolean(bool b);
- const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
- const AstValue* NewNull();
- const AstValue* NewUndefined();
- const AstValue* NewTheHole();
-
private:
- static const uint32_t kMaxCachedSmi = 1 << 10;
-
- STATIC_ASSERT(kMaxCachedSmi <= Smi::kMaxValue);
-
- AstValue* AddValue(AstValue* value) {
- value->set_next(values_);
- values_ = value;
- return value;
- }
AstRawString* AddString(AstRawString* string) {
*strings_end_ = string;
strings_end_ = string->next_location();
@@ -468,11 +335,8 @@ class AstValueFactory {
AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
- // All strings are copied here, one after another (no NULLs inbetween).
+ // All strings are copied here, one after another (no zeroes inbetween).
base::CustomMatcherHashMap string_table_;
- // For keeping track of all AstValues and AstRawStrings we've created (so that
- // they can be internalized later).
- AstValue* values_;
// We need to keep track of strings_ in order since cons strings require their
// members to be internalized first.
@@ -485,22 +349,14 @@ class AstValueFactory {
const AstStringConstants* string_constants_;
const AstConsString* empty_cons_string_;
- // Caches for faster access: small numbers, one character lowercase strings
- // (for minified code).
- AstValue* smis_[kMaxCachedSmi + 1];
+ // Caches one character lowercase strings (for minified code).
AstRawString* one_character_strings_[26];
Zone* zone_;
uint32_t hash_seed_;
-
-#define F(name) AstValue* name##_;
- OTHER_CONSTANTS(F)
-#undef F
};
} // namespace internal
} // namespace v8
-#undef OTHER_CONSTANTS
-
#endif // V8_AST_AST_VALUE_FACTORY_H_
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 94abe81bda..710cbb40a5 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -5,6 +5,7 @@
#include "src/ast/ast.h"
#include <cmath> // For isfinite.
+#include <vector>
#include "src/ast/compile-time-value.h"
#include "src/ast/prettyprinter.h"
@@ -14,7 +15,7 @@
#include "src/builtins/builtins.h"
#include "src/code-stubs.h"
#include "src/contexts.h"
-#include "src/conversions.h"
+#include "src/conversions-inl.h"
#include "src/double.h"
#include "src/elements.h"
#include "src/objects-inl.h"
@@ -90,15 +91,15 @@ MaterializedLiteral* AstNode::AsMaterializedLiteral() {
#undef RETURN_NODE
bool Expression::IsSmiLiteral() const {
- return IsLiteral() && AsLiteral()->raw_value()->IsSmi();
+ return IsLiteral() && AsLiteral()->type() == Literal::kSmi;
}
bool Expression::IsNumberLiteral() const {
- return IsLiteral() && AsLiteral()->raw_value()->IsNumber();
+ return IsLiteral() && AsLiteral()->IsNumber();
}
bool Expression::IsStringLiteral() const {
- return IsLiteral() && AsLiteral()->raw_value()->IsString();
+ return IsLiteral() && AsLiteral()->type() == Literal::kString;
}
bool Expression::IsPropertyName() const {
@@ -106,19 +107,22 @@ bool Expression::IsPropertyName() const {
}
bool Expression::IsNullLiteral() const {
- if (!IsLiteral()) return false;
- return AsLiteral()->raw_value()->IsNull();
+ return IsLiteral() && AsLiteral()->type() == Literal::kNull;
+}
+
+bool Expression::IsTheHoleLiteral() const {
+ return IsLiteral() && AsLiteral()->type() == Literal::kTheHole;
}
bool Expression::IsUndefinedLiteral() const {
- if (IsLiteral() && AsLiteral()->raw_value()->IsUndefined()) return true;
+ if (IsLiteral() && AsLiteral()->type() == Literal::kUndefined) return true;
const VariableProxy* var_proxy = AsVariableProxy();
if (var_proxy == nullptr) return false;
Variable* var = var_proxy->var();
// The global identifier "undefined" is immutable. Everything
// else could be reassigned.
- return var != NULL && var->IsUnallocated() &&
+ return var != nullptr && var->IsUnallocated() &&
var_proxy->raw_name()->IsOneByteEqualTo("undefined");
}
@@ -201,76 +205,12 @@ void VariableProxy::BindTo(Variable* var) {
if (is_assigned()) var->set_maybe_assigned();
}
-void VariableProxy::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- TypeofMode typeof_mode,
- FeedbackSlotCache* cache) {
- if (UsesVariableFeedbackSlot()) {
- // VariableProxies that point to the same Variable within a function can
- // make their loads from the same IC slot.
- if (var()->IsUnallocated() || var()->mode() == DYNAMIC_GLOBAL) {
- FeedbackSlot slot = cache->Get(typeof_mode, var());
- if (!slot.IsInvalid()) {
- variable_feedback_slot_ = slot;
- return;
- }
- variable_feedback_slot_ = spec->AddLoadGlobalICSlot(typeof_mode);
- cache->Put(typeof_mode, var(), variable_feedback_slot_);
- } else {
- variable_feedback_slot_ = spec->AddLoadICSlot();
- }
- }
-}
-
-static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FeedbackSlot* out_slot) {
- Property* property = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
- // TODO(ishell): consider using ICSlotCache for variables here.
- if (assign_type == VARIABLE &&
- expr->AsVariableProxy()->var()->IsUnallocated()) {
- *out_slot = spec->AddStoreGlobalICSlot(language_mode);
-
- } else if (assign_type == NAMED_PROPERTY) {
- *out_slot = spec->AddStoreICSlot(language_mode);
-
- } else if (assign_type == KEYED_PROPERTY) {
- *out_slot = spec->AddKeyedStoreICSlot(language_mode);
- }
-}
-
-void ForInStatement::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- AssignVectorSlots(each(), spec, language_mode, &each_slot_);
- for_in_feedback_slot_ = spec->AddForInSlot();
-}
-
Assignment::Assignment(NodeType node_type, Token::Value op, Expression* target,
Expression* value, int pos)
: Expression(pos, node_type), target_(target), value_(value) {
bit_field_ |= TokenField::encode(op);
}
-void Assignment::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- AssignVectorSlots(target(), spec, language_mode, &slot_);
-}
-
-void CountOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- AssignVectorSlots(expression(), spec, language_mode, &slot_);
- // Assign a slot to collect feedback about binary operations. Used only in
- // ignition. Fullcodegen uses AstId to record type feedback.
- binary_operation_slot_ = spec->AddInterpreterBinaryOpICSlot();
-}
-
-
bool FunctionLiteral::ShouldEagerCompile() const {
return scope()->ShouldEagerCompile();
}
@@ -309,6 +249,34 @@ bool FunctionLiteral::NeedsHomeObject(Expression* expr) {
return expr->AsFunctionLiteral()->scope()->NeedsHomeObject();
}
+std::unique_ptr<char[]> FunctionLiteral::GetDebugName() const {
+ const AstConsString* cons_string;
+ if (raw_name_ != nullptr && !raw_name_->IsEmpty()) {
+ cons_string = raw_name_;
+ } else if (raw_inferred_name_ != nullptr && !raw_inferred_name_->IsEmpty()) {
+ cons_string = raw_inferred_name_;
+ } else if (!inferred_name_.is_null()) {
+ AllowHandleDereference allow_deref;
+ return inferred_name_->ToCString();
+ } else {
+ return std::unique_ptr<char[]>(new char{'\0'});
+ }
+
+ // TODO(rmcilroy): Deal with two-character strings.
+ std::vector<char> result_vec;
+ std::forward_list<const AstRawString*> strings = cons_string->ToRawStrings();
+ for (const AstRawString* string : strings) {
+ if (!string->is_one_byte()) break;
+ for (int i = 0; i < string->length(); i++) {
+ result_vec.push_back(string->raw_data()[i]);
+ }
+ }
+ std::unique_ptr<char[]> result(new char[result_vec.size() + 1]);
+ memcpy(result.get(), result_vec.data(), result_vec.size());
+ result[result_vec.size()] = '\0';
+ return result;
+}
+
ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
Kind kind, bool is_computed_name)
: LiteralProperty(key, value, is_computed_name),
@@ -319,11 +287,10 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
Expression* key, Expression* value,
bool is_computed_name)
: LiteralProperty(key, value, is_computed_name), emit_store_(true) {
- if (!is_computed_name &&
- key->AsLiteral()->raw_value()->EqualsString(
- ast_value_factory->proto_string())) {
+ if (!is_computed_name && key->AsLiteral()->IsString() &&
+ key->AsLiteral()->AsRawString() == ast_value_factory->proto_string()) {
kind_ = PROTOTYPE;
- } else if (value_->AsMaterializedLiteral() != NULL) {
+ } else if (value_->AsMaterializedLiteral() != nullptr) {
kind_ = MATERIALIZED_LITERAL;
} else if (value_->IsLiteral()) {
kind_ = CONSTANT;
@@ -332,16 +299,6 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
}
-FeedbackSlot LiteralProperty::GetStoreDataPropertySlot() const {
- int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
- return GetSlot(offset);
-}
-
-void LiteralProperty::SetStoreDataPropertySlot(FeedbackSlot slot) {
- int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
- return SetSlot(slot, offset);
-}
-
bool LiteralProperty::NeedsSetFunctionName() const {
return is_computed_name_ && (value_->IsAnonymousFunctionDefinition() ||
value_->IsConciseMethodDefinition() ||
@@ -353,28 +310,8 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
bool is_computed_name)
: LiteralProperty(key, value, is_computed_name),
kind_(kind),
- is_static_(is_static) {}
-
-void ClassLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- // This logic that computes the number of slots needed for vector store
- // ICs must mirror BytecodeGenerator::VisitClassLiteral.
- if (FunctionLiteral::NeedsHomeObject(constructor())) {
- home_object_slot_ = spec->AddStoreICSlot(language_mode);
- }
-
- for (int i = 0; i < properties()->length(); i++) {
- ClassLiteral::Property* property = properties()->at(i);
- Expression* value = property->value();
- if (FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot(language_mode));
- }
- property->SetStoreDataPropertySlot(
- spec->AddStoreDataPropertyInLiteralICSlot());
- }
-}
+ is_static_(is_static),
+ computed_name_var_(nullptr) {}
bool ObjectLiteral::Property::IsCompileTimeValue() const {
return kind_ == CONSTANT ||
@@ -389,77 +326,6 @@ void ObjectLiteral::Property::set_emit_store(bool emit_store) {
bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
-void ObjectLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- // The empty object literal doesn't need any feedback vector slot.
- if (this->IsEmptyObjectLiteral()) return;
-
- MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, kind, cache);
-
- // This logic that computes the number of slots needed for vector store
- // ics must mirror FullCodeGenerator::VisitObjectLiteral.
- int property_index = 0;
- for (; property_index < properties()->length(); property_index++) {
- ObjectLiteral::Property* property = properties()->at(property_index);
- if (property->is_computed_name()) break;
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key()->AsLiteral();
- Expression* value = property->value();
- switch (property->kind()) {
- case ObjectLiteral::Property::SPREAD:
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- // It is safe to use [[Put]] here because the boilerplate already
- // contains computed properties with an uninitialized value.
- if (key->IsStringLiteral()) {
- if (property->emit_store()) {
- property->SetSlot(spec->AddStoreOwnICSlot());
- if (FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot(language_mode), 1);
- }
- }
- break;
- }
- if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot(language_mode));
- }
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- break;
- case ObjectLiteral::Property::GETTER:
- if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot(language_mode));
- }
- break;
- case ObjectLiteral::Property::SETTER:
- if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot(language_mode));
- }
- break;
- }
- }
-
- for (; property_index < properties()->length(); property_index++) {
- ObjectLiteral::Property* property = properties()->at(property_index);
-
- Expression* value = property->value();
- if (!property->IsPrototype()) {
- if (FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot(language_mode));
- }
- }
- property->SetStoreDataPropertySlot(
- spec->AddStoreDataPropertyInLiteralICSlot());
- }
-}
-
-
void ObjectLiteral::CalculateEmitStore(Zone* zone) {
const auto GETTER = ObjectLiteral::Property::GETTER;
const auto SETTER = ObjectLiteral::Property::SETTER;
@@ -479,7 +345,7 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
// entry was also an accessor.
uint32_t hash = literal->Hash();
ZoneHashMap::Entry* entry = table.LookupOrInsert(literal, hash, allocator);
- if (entry->value != NULL) {
+ if (entry->value != nullptr) {
auto previous_kind =
static_cast<ObjectLiteral::Property*>(entry->value)->kind();
if (!((property->kind() == GETTER && previous_kind == SETTER) ||
@@ -539,7 +405,7 @@ int ObjectLiteral::InitDepthAndFlags() {
needs_initial_allocation_site |= literal->NeedsInitialAllocationSite();
}
- const AstValue* key = property->key()->AsLiteral()->raw_value();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
@@ -550,12 +416,11 @@ int ObjectLiteral::InitDepthAndFlags() {
// much larger than the number of elements, creating an object
// literal with fast elements will be a waste of space.
uint32_t element_index = 0;
- if (key->IsString() && key->AsString()->AsArrayIndex(&element_index)) {
- max_element_index = Max(element_index, max_element_index);
- elements++;
- } else if (key->ToUint32(&element_index) && element_index != kMaxUInt32) {
+ if (key->AsArrayIndex(&element_index)) {
max_element_index = Max(element_index, max_element_index);
elements++;
+ } else {
+ DCHECK(key->IsPropertyName());
}
nof_properties++;
@@ -585,11 +450,9 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
continue;
}
- Handle<Object> key = property->key()->AsLiteral()->value();
+ Literal* key = property->key()->AsLiteral();
- uint32_t element_index = 0;
- if (key->ToArrayIndex(&element_index) ||
- (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index))) {
+ if (!key->IsPropertyName()) {
index_keys++;
}
}
@@ -611,22 +474,21 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
DCHECK(!property->is_computed_name());
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
- if (m_literal != NULL) {
+ if (m_literal != nullptr) {
m_literal->BuildConstants(isolate);
}
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
// value for COMPUTED properties, the real value is filled in at
// runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->AsLiteral()->value();
- Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
-
+ Literal* key_literal = property->key()->AsLiteral();
uint32_t element_index = 0;
- if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
- key = isolate->factory()->NewNumberFromUint(element_index);
- } else if (key->IsNumber() && !key->ToArrayIndex(&element_index)) {
- key = isolate->factory()->NumberToString(key);
- }
+ Handle<Object> key =
+ key_literal->AsArrayIndex(&element_index)
+ ? isolate->factory()->NewNumberFromUint(element_index)
+ : Handle<Object>::cast(key_literal->AsRawPropertyName()->string());
+
+ Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
// Add name, value pair to the fixed array.
constant_properties->set(position++, *key);
@@ -665,7 +527,7 @@ int ArrayLiteral::InitDepthAndFlags() {
Expression* element = values()->at(array_index);
DCHECK(!element->IsSpread());
MaterializedLiteral* literal = element->AsMaterializedLiteral();
- if (literal != NULL) {
+ if (literal != nullptr) {
int subliteral_depth = literal->InitDepthAndFlags() + 1;
if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
}
@@ -700,7 +562,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
Expression* element = values()->at(array_index);
DCHECK(!element->IsSpread());
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
- if (m_literal != NULL) {
+ if (m_literal != nullptr) {
m_literal->BuildConstants(isolate);
}
@@ -757,26 +619,6 @@ void ArrayLiteral::RewindSpreads() {
first_spread_index_ = -1;
}
-void ArrayLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, kind, cache);
-
- // This logic that computes the number of slots needed for vector store
- // ics must mirror FullCodeGenerator::VisitArrayLiteral.
- for (int array_index = 0; array_index < values()->length(); array_index++) {
- Expression* subexpr = values()->at(array_index);
- DCHECK(!subexpr->IsSpread());
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
- // We'll reuse the same literal slot for all of the non-constant
- // subexpressions that use a keyed store IC.
- literal_slot_ = spec->AddKeyedStoreICSlot(language_mode);
- return;
- }
-}
-
bool MaterializedLiteral::IsSimple() const {
if (IsArrayLiteral()) return AsArrayLiteral()->is_simple();
if (IsObjectLiteral()) return AsObjectLiteral()->is_simple();
@@ -787,7 +629,7 @@ bool MaterializedLiteral::IsSimple() const {
Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
Isolate* isolate) {
if (expression->IsLiteral()) {
- return expression->AsLiteral()->value();
+ return expression->AsLiteral()->BuildValue(isolate);
}
if (CompileTimeValue::IsCompileTimeValue(expression)) {
return CompileTimeValue::GetValue(isolate, expression);
@@ -829,60 +671,29 @@ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
isolate->factory()->NewFixedArray(this->raw_strings()->length(), TENURED);
bool raw_and_cooked_match = true;
for (int i = 0; i < raw_strings->length(); ++i) {
- if (*this->raw_strings()->at(i)->value() !=
- *this->cooked_strings()->at(i)->value()) {
+ if (this->cooked_strings()->at(i) == nullptr ||
+ *this->raw_strings()->at(i)->string() !=
+ *this->cooked_strings()->at(i)->string()) {
raw_and_cooked_match = false;
}
- raw_strings->set(i, *this->raw_strings()->at(i)->value());
+ raw_strings->set(i, *this->raw_strings()->at(i)->string());
}
Handle<FixedArray> cooked_strings = raw_strings;
if (!raw_and_cooked_match) {
cooked_strings = isolate->factory()->NewFixedArray(
this->cooked_strings()->length(), TENURED);
for (int i = 0; i < cooked_strings->length(); ++i) {
- cooked_strings->set(i, *this->cooked_strings()->at(i)->value());
+ if (this->cooked_strings()->at(i) != nullptr) {
+ cooked_strings->set(i, *this->cooked_strings()->at(i)->string());
+ } else {
+ cooked_strings->set(i, isolate->heap()->undefined_value());
+ }
}
}
return isolate->factory()->NewTemplateObjectDescription(
this->hash(), raw_strings, cooked_strings);
}
-void UnaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- switch (op()) {
- // Only unary plus, minus, and bitwise-not currently collect feedback.
- case Token::ADD:
- case Token::SUB:
- case Token::BIT_NOT:
- // Note that the slot kind remains "BinaryOp", as the operation
- // is transformed into a binary operation in the BytecodeGenerator.
- feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
- return;
- default:
- return;
- }
-}
-
-void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- // Feedback vector slot is only used by interpreter for binary operations.
- // Full-codegen uses AstId to record type feedback.
- switch (op()) {
- // Comma, logical_or and logical_and do not collect type feedback.
- case Token::COMMA:
- case Token::AND:
- case Token::OR:
- return;
- default:
- feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
- return;
- }
-}
-
static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) {
// Add is not commutative due to potential for string addition.
return op == Token::MUL || op == Token::BIT_AND || op == Token::BIT_OR ||
@@ -909,23 +720,7 @@ bool BinaryOperation::IsSmiLiteralOperation(Expression** subexpr,
static bool IsTypeof(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
- return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
-}
-
-void CompareOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache_) {
- // Feedback vector slot is only used by interpreter for binary operations.
- // Full-codegen uses AstId to record type feedback.
- switch (op()) {
- // instanceof and in do not collect type feedback.
- case Token::INSTANCEOF:
- case Token::IN:
- return;
- default:
- feedback_slot_ = spec->AddInterpreterCompareICSlot();
- }
+ return maybe_unary != nullptr && maybe_unary->op() == Token::TYPEOF;
}
// Check for the pattern: typeof <expression> equals <string literal>.
@@ -949,9 +744,8 @@ bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
static bool IsVoidOfLiteral(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
- return maybe_unary != NULL &&
- maybe_unary->op() == Token::VOID &&
- maybe_unary->expression()->IsLiteral();
+ return maybe_unary != nullptr && maybe_unary->op() == Token::VOID &&
+ maybe_unary->expression()->IsLiteral();
}
@@ -977,7 +771,6 @@ bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
MatchLiteralCompareUndefined(right_, op(), left_, expr);
}
-
// Check for the pattern: null equals <expression>
static bool MatchLiteralCompareNull(Expression* left,
Token::Value op,
@@ -990,25 +783,14 @@ static bool MatchLiteralCompareNull(Expression* left,
return false;
}
-
bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
return MatchLiteralCompareNull(left_, op(), right_, expr) ||
MatchLiteralCompareNull(right_, op(), left_, expr);
}
-
-// ----------------------------------------------------------------------------
-// Recording of type feedback
-
-void Call::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode, FunctionKind kind,
- FeedbackSlotCache* cache) {
- ic_slot_ = spec->AddCallICSlot();
-}
-
Call::CallType Call::GetCallType() const {
VariableProxy* proxy = expression()->AsVariableProxy();
- if (proxy != NULL) {
+ if (proxy != nullptr) {
if (proxy->var()->IsUnallocated()) {
return GLOBAL_CALL;
} else if (proxy->var()->IsLookupSlot()) {
@@ -1036,28 +818,113 @@ Call::CallType Call::GetCallType() const {
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
: label_(label), statements_(statements) {}
-void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
- LanguageMode language_mode,
- FunctionKind kind,
- FeedbackSlotCache* cache) {
- feedback_slot_ = spec->AddInterpreterCompareICSlot();
+bool Literal::IsPropertyName() const {
+ if (type() != kString) return false;
+ uint32_t index;
+ return !string_->AsArrayIndex(&index);
+}
+
+bool Literal::ToUint32(uint32_t* value) const {
+ switch (type()) {
+ case kString:
+ return string_->AsArrayIndex(value);
+ case kSmi:
+ if (smi_ < 0) return false;
+ *value = static_cast<uint32_t>(smi_);
+ return true;
+ case kHeapNumber:
+ return DoubleToUint32IfEqualToSelf(AsNumber(), value);
+ default:
+ return false;
+ }
+}
+
+bool Literal::AsArrayIndex(uint32_t* value) const {
+ return ToUint32(value) && *value != kMaxUInt32;
+}
+
+Handle<Object> Literal::BuildValue(Isolate* isolate) const {
+ switch (type()) {
+ case kSmi:
+ return handle(Smi::FromInt(smi_), isolate);
+ case kHeapNumber:
+ return isolate->factory()->NewNumber(number_, TENURED);
+ case kString:
+ return string_->string();
+ case kSymbol:
+ return isolate->factory()->home_object_symbol();
+ case kBoolean:
+ return isolate->factory()->ToBoolean(boolean_);
+ case kNull:
+ return isolate->factory()->null_value();
+ case kUndefined:
+ return isolate->factory()->undefined_value();
+ case kTheHole:
+ return isolate->factory()->the_hole_value();
+ case kBigInt:
+ // This should never fail: the parser will never create a BigInt
+ // literal that cannot be allocated.
+ return BigIntLiteral(isolate, bigint_.c_str()).ToHandleChecked();
+ }
+ UNREACHABLE();
+}
+
+bool Literal::ToBooleanIsTrue() const {
+ switch (type()) {
+ case kSmi:
+ return smi_ != 0;
+ case kHeapNumber:
+ return DoubleToBoolean(number_);
+ case kString:
+ return !string_->IsEmpty();
+ case kNull:
+ case kUndefined:
+ return false;
+ case kBoolean:
+ return boolean_;
+ case kBigInt: {
+ const char* bigint_str = bigint_.c_str();
+ size_t length = strlen(bigint_str);
+ DCHECK_GT(length, 0);
+ if (length == 1 && bigint_str[0] == '0') return false;
+ // Skip over any radix prefix; BigInts with length > 1 only
+ // begin with zero if they include a radix.
+ for (size_t i = (bigint_str[0] == '0') ? 2 : 0; i < length; ++i) {
+ if (bigint_str[i] != '0') return true;
+ }
+ return false;
+ }
+ case kSymbol:
+ return true;
+ case kTheHole:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
}
uint32_t Literal::Hash() {
- return raw_value()->IsString()
- ? raw_value()->AsString()->Hash()
- : ComputeLongHash(double_to_uint64(raw_value()->AsNumber()));
+ return IsString() ? AsRawString()->Hash()
+ : ComputeLongHash(double_to_uint64(AsNumber()));
}
// static
-bool Literal::Match(void* literal1, void* literal2) {
- const AstValue* x = static_cast<Literal*>(literal1)->raw_value();
- const AstValue* y = static_cast<Literal*>(literal2)->raw_value();
- return (x->IsString() && y->IsString() && x->AsString() == y->AsString()) ||
+bool Literal::Match(void* a, void* b) {
+ Literal* x = static_cast<Literal*>(a);
+ Literal* y = static_cast<Literal*>(b);
+ return (x->IsString() && y->IsString() &&
+ x->AsRawString() == y->AsRawString()) ||
(x->IsNumber() && y->IsNumber() && x->AsNumber() == y->AsNumber());
}
+Literal* AstNodeFactory::NewNumberLiteral(double number, int pos) {
+ int int_value;
+ if (DoubleToSmiInteger(number, &int_value)) {
+ return NewSmiLiteral(int_value, pos);
+ }
+ return new (zone_) Literal(number, pos);
+}
+
const char* CallRuntime::debug_name() {
#ifdef DEBUG
return is_jsruntime() ? NameForNativeContextIntrinsicIndex(context_index_)
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 0253e6651e..1ca192a462 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -5,6 +5,8 @@
#ifndef V8_AST_AST_H_
#define V8_AST_AST_H_
+#include <memory>
+
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
#include "src/ast/variables.h"
@@ -61,7 +63,8 @@ namespace internal {
V(WithStatement) \
V(TryCatchStatement) \
V(TryFinallyStatement) \
- V(DebuggerStatement)
+ V(DebuggerStatement) \
+ V(InitializeClassFieldsStatement)
#define LITERAL_NODE_LIST(V) \
V(RegExpLiteral) \
@@ -73,6 +76,7 @@ namespace internal {
V(Assignment) \
V(Await) \
V(BinaryOperation) \
+ V(NaryOperation) \
V(Call) \
V(CallNew) \
V(CallRuntime) \
@@ -107,6 +111,7 @@ namespace internal {
EXPRESSION_NODE_LIST(V)
// Forward declarations
+class AstNode;
class AstNodeFactory;
class Declaration;
class BreakableStatement;
@@ -121,44 +126,6 @@ class Statement;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
-class FeedbackSlotCache {
- public:
- typedef std::pair<TypeofMode, Variable*> Key;
-
- explicit FeedbackSlotCache(Zone* zone) : map_(zone) {}
-
- void Put(TypeofMode typeof_mode, Variable* variable, FeedbackSlot slot) {
- Key key = std::make_pair(typeof_mode, variable);
- auto entry = std::make_pair(key, slot);
- map_.insert(entry);
- }
-
- FeedbackSlot Get(TypeofMode typeof_mode, Variable* variable) const {
- Key key = std::make_pair(typeof_mode, variable);
- auto iter = map_.find(key);
- if (iter != map_.end()) {
- return iter->second;
- }
- return FeedbackSlot();
- }
-
- private:
- ZoneMap<Key, FeedbackSlot> map_;
-};
-
-
-class AstProperties final BASE_EMBEDDED {
- public:
- explicit AstProperties(Zone* zone) : spec_(zone) {}
-
- const FeedbackVectorSpec* get_spec() const { return &spec_; }
- FeedbackVectorSpec* get_spec() { return &spec_; }
-
- private:
- FeedbackVectorSpec spec_;
-};
-
-
class AstNode: public ZoneObject {
public:
#define DECLARE_TYPE_ENUM(type) k##type,
@@ -206,7 +173,7 @@ class AstNode: public ZoneObject {
class Statement : public AstNode {
public:
- bool IsEmpty() { return AsEmptyStatement() != NULL; }
+ bool IsEmpty() { return AsEmptyStatement() != nullptr; }
bool IsJump() const;
protected:
@@ -264,6 +231,9 @@ class Expression : public AstNode {
// True iff the expression is the null literal.
bool IsNullLiteral() const;
+ // True iff the expression is the hole literal.
+ bool IsTheHoleLiteral() const;
+
// True if we can prove that the expression is the undefined literal. Note
// that this also checks for loads of the global "undefined" variable.
bool IsUndefinedLiteral() const;
@@ -312,8 +282,8 @@ class Block : public BreakableStatement {
inline ZoneList<const AstRawString*>* labels() const;
bool IsJump() const {
- return !statements_.is_empty() && statements_.last()->IsJump()
- && labels() == NULL; // Good enough as an approximation...
+ return !statements_.is_empty() && statements_.last()->IsJump() &&
+ labels() == nullptr; // Good enough as an approximation...
}
Scope* scope() const { return scope_; }
@@ -335,7 +305,7 @@ class Block : public BreakableStatement {
bool ignore_completion_value)
: BreakableStatement(TARGET_FOR_NAMED_ONLY, kNoSourcePosition, kBlock),
statements_(capacity, zone),
- scope_(NULL) {
+ scope_(nullptr) {
bit_field_ |= IgnoreCompletionField::encode(ignore_completion_value) |
IsLabeledField::encode(labels != nullptr);
}
@@ -367,9 +337,7 @@ inline ZoneList<const AstRawString*>* Block::labels() const {
class DoExpression final : public Expression {
public:
Block* block() { return block_; }
- void set_block(Block* b) { block_ = b; }
VariableProxy* result() { return result_; }
- void set_result(VariableProxy* v) { result_ = v; }
private:
friend class AstNodeFactory;
@@ -448,14 +416,13 @@ inline NestedVariableDeclaration* VariableDeclaration::AsNested() {
class FunctionDeclaration final : public Declaration {
public:
FunctionLiteral* fun() const { return fun_; }
- void set_fun(FunctionLiteral* f) { fun_ = f; }
private:
friend class AstNodeFactory;
FunctionDeclaration(VariableProxy* proxy, FunctionLiteral* fun, int pos)
: Declaration(proxy, pos, kFunctionDeclaration), fun_(fun) {
- DCHECK(fun != NULL);
+ DCHECK_NOT_NULL(fun);
}
FunctionLiteral* fun_;
@@ -481,7 +448,7 @@ class IterationStatement : public BreakableStatement {
NodeType type)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
labels_(labels),
- body_(NULL),
+ body_(nullptr),
suspend_count_(0),
first_suspend_id_(0) {}
void Initialize(Statement* body) { body_ = body; }
@@ -505,13 +472,12 @@ class DoWhileStatement final : public IterationStatement {
}
Expression* cond() const { return cond_; }
- void set_cond(Expression* e) { cond_ = e; }
private:
friend class AstNodeFactory;
DoWhileStatement(ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(labels, pos, kDoWhileStatement), cond_(NULL) {}
+ : IterationStatement(labels, pos, kDoWhileStatement), cond_(nullptr) {}
Expression* cond_;
};
@@ -525,13 +491,12 @@ class WhileStatement final : public IterationStatement {
}
Expression* cond() const { return cond_; }
- void set_cond(Expression* e) { cond_ = e; }
private:
friend class AstNodeFactory;
WhileStatement(ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(labels, pos, kWhileStatement), cond_(NULL) {}
+ : IterationStatement(labels, pos, kWhileStatement), cond_(nullptr) {}
Expression* cond_;
};
@@ -551,18 +516,14 @@ class ForStatement final : public IterationStatement {
Expression* cond() const { return cond_; }
Statement* next() const { return next_; }
- void set_init(Statement* s) { init_ = s; }
- void set_cond(Expression* e) { cond_ = e; }
- void set_next(Statement* s) { next_ = s; }
-
private:
friend class AstNodeFactory;
ForStatement(ZoneList<const AstRawString*>* labels, int pos)
: IterationStatement(labels, pos, kForStatement),
- init_(NULL),
- cond_(NULL),
- next_(NULL) {}
+ init_(nullptr),
+ cond_(nullptr),
+ next_(nullptr) {}
Statement* init_;
Expression* cond_;
@@ -605,23 +566,8 @@ class ForInStatement final : public ForEachStatement {
Expression* each() const { return each_; }
Expression* subject() const { return subject_; }
- void set_each(Expression* e) { each_ = e; }
- void set_subject(Expression* e) { subject_ = e; }
-
- // Type feedback information.
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
- FeedbackSlot EachFeedbackSlot() const { return each_slot_; }
- FeedbackSlot ForInFeedbackSlot() {
- DCHECK(!for_in_feedback_slot_.IsInvalid());
- return for_in_feedback_slot_;
- }
-
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return ForInTypeField::decode(bit_field_); }
- void set_for_in_type(ForInType type) {
- bit_field_ = ForInTypeField::update(bit_field_, type);
- }
private:
friend class AstNodeFactory;
@@ -635,8 +581,6 @@ class ForInStatement final : public ForEachStatement {
Expression* each_;
Expression* subject_;
- FeedbackSlot each_slot_;
- FeedbackSlot for_in_feedback_slot_;
class ForInTypeField
: public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
@@ -680,21 +624,16 @@ class ForOfStatement final : public ForEachStatement {
return assign_each_;
}
- void set_assign_iterator(Expression* e) { assign_iterator_ = e; }
- void set_next_result(Expression* e) { next_result_ = e; }
- void set_result_done(Expression* e) { result_done_ = e; }
- void set_assign_each(Expression* e) { assign_each_ = e; }
-
private:
friend class AstNodeFactory;
ForOfStatement(ZoneList<const AstRawString*>* labels, int pos)
: ForEachStatement(labels, pos, kForOfStatement),
- iterator_(NULL),
- assign_iterator_(NULL),
- next_result_(NULL),
- result_done_(NULL),
- assign_each_(NULL) {}
+ iterator_(nullptr),
+ assign_iterator_(nullptr),
+ next_result_(nullptr),
+ result_done_(nullptr),
+ assign_each_(nullptr) {}
Variable* iterator_;
Expression* assign_iterator_;
@@ -762,7 +701,6 @@ class ReturnStatement final : public JumpStatement {
enum Type { kNormal, kAsyncReturn };
Expression* expression() const { return expression_; }
- void set_expression(Expression* e) { expression_ = e; }
Type type() const { return TypeField::decode(bit_field_); }
bool is_async_return() const { return type() == kAsyncReturn; }
@@ -790,7 +728,6 @@ class WithStatement final : public Statement {
public:
Scope* scope() { return scope_; }
Expression* expression() const { return expression_; }
- void set_expression(Expression* e) { expression_ = e; }
Statement* statement() const { return statement_; }
void set_statement(Statement* s) { statement_ = s; }
@@ -811,25 +748,18 @@ class WithStatement final : public Statement {
class CaseClause final : public ZoneObject {
public:
- bool is_default() const { return label_ == NULL; }
+ bool is_default() const { return label_ == nullptr; }
Expression* label() const {
DCHECK(!is_default());
return label_;
}
- void set_label(Expression* e) { label_ = e; }
ZoneList<Statement*>* statements() const { return statements_; }
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
-
- FeedbackSlot CompareOperationFeedbackSlot() { return feedback_slot_; }
-
private:
friend class AstNodeFactory;
CaseClause(Expression* label, ZoneList<Statement*>* statements);
- FeedbackSlot feedback_slot_;
Expression* label_;
ZoneList<Statement*>* statements_;
};
@@ -874,7 +804,6 @@ class IfStatement final : public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
- void set_condition(Expression* e) { condition_ = e; }
void set_then_statement(Statement* s) { then_statement_ = s; }
void set_else_statement(Statement* s) { else_statement_ = s; }
@@ -1036,25 +965,79 @@ class SloppyBlockFunctionStatement final : public Statement {
class Literal final : public Expression {
public:
+ enum Type {
+ kSmi,
+ kHeapNumber,
+ kBigInt,
+ kString,
+ kSymbol,
+ kBoolean,
+ kUndefined,
+ kNull,
+ kTheHole,
+ };
+
+ Type type() const { return TypeField::decode(bit_field_); }
+
// Returns true if literal represents a property name (i.e. cannot be parsed
// as array indices).
- bool IsPropertyName() const { return value_->IsPropertyName(); }
+ bool IsPropertyName() const;
+
+ // Returns true if literal represents an array index.
+ // Note, that in general the following statement is not true:
+ // key->IsPropertyName() != key->AsArrayIndex(...)
+ // but for non-computed LiteralProperty properties the following is true:
+ // property->key()->IsPropertyName() != property->key()->AsArrayIndex(...)
+ bool AsArrayIndex(uint32_t* index) const;
const AstRawString* AsRawPropertyName() {
DCHECK(IsPropertyName());
- return value_->AsString();
+ return string_;
+ }
+
+ Smi* AsSmiLiteral() const {
+ DCHECK_EQ(kSmi, type());
+ return Smi::FromInt(smi_);
+ }
+
+ // Returns true if literal represents a Number.
+ bool IsNumber() const { return type() == kHeapNumber || type() == kSmi; }
+ double AsNumber() const {
+ DCHECK(IsNumber());
+ switch (type()) {
+ case kSmi:
+ return smi_;
+ case kHeapNumber:
+ return number_;
+ default:
+ UNREACHABLE();
+ }
}
- Smi* AsSmiLiteral() {
- DCHECK(IsSmiLiteral());
- return raw_value()->AsSmi();
+ AstBigInt AsBigInt() const {
+ DCHECK_EQ(type(), kBigInt);
+ return bigint_;
}
- bool ToBooleanIsTrue() const { return raw_value()->BooleanValue(); }
- bool ToBooleanIsFalse() const { return !raw_value()->BooleanValue(); }
+ bool IsString() const { return type() == kString; }
+ const AstRawString* AsRawString() {
+ DCHECK_EQ(type(), kString);
+ return string_;
+ }
+
+ AstSymbol AsSymbol() {
+ DCHECK_EQ(type(), kSymbol);
+ return symbol_;
+ }
+
+ V8_EXPORT_PRIVATE bool ToBooleanIsTrue() const;
+ bool ToBooleanIsFalse() const { return !ToBooleanIsTrue(); }
- Handle<Object> value() const { return value_->value(); }
- const AstValue* raw_value() const { return value_; }
+ bool ToUint32(uint32_t* value) const;
+
+ // Returns an appropriate Object representing this Literal, allocating
+ // a heap object if needed.
+ Handle<Object> BuildValue(Isolate* isolate) const;
// Support for using Literal as a HashMap key. NOTE: Currently, this works
// only for string and number literals!
@@ -1064,29 +1047,59 @@ class Literal final : public Expression {
private:
friend class AstNodeFactory;
- Literal(const AstValue* value, int position)
- : Expression(position, kLiteral), value_(value) {}
+ class TypeField : public BitField<Type, Expression::kNextBitFieldIndex, 4> {};
+
+ Literal(int smi, int position) : Expression(position, kLiteral), smi_(smi) {
+ bit_field_ = TypeField::update(bit_field_, kSmi);
+ }
+
+ Literal(double number, int position)
+ : Expression(position, kLiteral), number_(number) {
+ bit_field_ = TypeField::update(bit_field_, kHeapNumber);
+ }
+
+ Literal(AstBigInt bigint, int position)
+ : Expression(position, kLiteral), bigint_(bigint) {
+ bit_field_ = TypeField::update(bit_field_, kBigInt);
+ }
+
+ Literal(const AstRawString* string, int position)
+ : Expression(position, kLiteral), string_(string) {
+ bit_field_ = TypeField::update(bit_field_, kString);
+ }
+
+ Literal(AstSymbol symbol, int position)
+ : Expression(position, kLiteral), symbol_(symbol) {
+ bit_field_ = TypeField::update(bit_field_, kSymbol);
+ }
+
+ Literal(bool boolean, int position)
+ : Expression(position, kLiteral), boolean_(boolean) {
+ bit_field_ = TypeField::update(bit_field_, kBoolean);
+ }
- const AstValue* value_;
+ Literal(Type type, int position) : Expression(position, kLiteral) {
+ DCHECK(type == kNull || type == kUndefined || type == kTheHole);
+ bit_field_ = TypeField::update(bit_field_, type);
+ }
+
+ union {
+ const AstRawString* string_;
+ int smi_;
+ double number_;
+ AstSymbol symbol_;
+ AstBigInt bigint_;
+ bool boolean_;
+ };
};
// Base class for literals that need space in the type feedback vector.
class MaterializedLiteral : public Expression {
public:
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache) {
- literal_slot_ = spec->AddLiteralSlot();
- }
-
- FeedbackSlot literal_slot() const { return literal_slot_; }
-
// A Materializedliteral is simple if the values consist of only
// constants and simple object and array literals.
bool IsSimple() const;
- private:
- FeedbackSlot literal_slot_;
-
protected:
MaterializedLiteral(int pos, NodeType type) : Expression(pos, type) {}
@@ -1199,25 +1212,8 @@ class LiteralProperty : public ZoneObject {
public:
Expression* key() const { return key_; }
Expression* value() const { return value_; }
- void set_key(Expression* e) { key_ = e; }
- void set_value(Expression* e) { value_ = e; }
bool is_computed_name() const { return is_computed_name_; }
-
- FeedbackSlot GetSlot(int offset = 0) const {
- DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
- return slots_[offset];
- }
-
- FeedbackSlot GetStoreDataPropertySlot() const;
-
- void SetSlot(FeedbackSlot slot, int offset = 0) {
- DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
- slots_[offset] = slot;
- }
-
- void SetStoreDataPropertySlot(FeedbackSlot slot);
-
bool NeedsSetFunctionName() const;
protected:
@@ -1226,7 +1222,6 @@ class LiteralProperty : public ZoneObject {
Expression* key_;
Expression* value_;
- FeedbackSlot slots_[2];
bool is_computed_name_;
};
@@ -1348,16 +1343,11 @@ class ObjectLiteral final : public AggregateLiteral {
static_cast<int>(kFastElements));
struct Accessors: public ZoneObject {
- Accessors() : getter(NULL), setter(NULL) {}
+ Accessors() : getter(nullptr), setter(nullptr) {}
ObjectLiteralProperty* getter;
ObjectLiteralProperty* setter;
};
- // Object literals need one feedback slot for each non-trivial value, as well
- // as some slots for home objects.
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
-
private:
friend class AstNodeFactory;
@@ -1414,7 +1404,9 @@ class AccessorTable
Iterator lookup(Literal* literal) {
Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
- if (it->second == NULL) it->second = new (zone_) ObjectLiteral::Accessors();
+ if (it->second == nullptr) {
+ it->second = new (zone_) ObjectLiteral::Accessors();
+ }
return it;
}
@@ -1467,10 +1459,6 @@ class ArrayLiteral final : public AggregateLiteral {
// Rewind an array literal omitting everything from the first spread on.
void RewindSpreads();
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
- FeedbackSlot LiteralFeedbackSlot() const { return literal_slot_; }
-
private:
friend class AstNodeFactory;
@@ -1480,7 +1468,6 @@ class ArrayLiteral final : public AggregateLiteral {
values_(values) {}
int first_spread_index_;
- FeedbackSlot literal_slot_;
Handle<ConstantElementsPair> constant_elements_;
ZoneList<Expression*>* values_;
};
@@ -1542,15 +1529,6 @@ class VariableProxy final : public Expression {
// Bind this proxy to the variable var.
void BindTo(Variable* var);
- bool UsesVariableFeedbackSlot() const {
- return var()->IsUnallocated() || var()->IsLookupSlot();
- }
-
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, TypeofMode typeof_mode,
- FeedbackSlotCache* cache);
-
- FeedbackSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
-
void set_next_unresolved(VariableProxy* next) { next_unresolved_ = next; }
VariableProxy* next_unresolved() { return next_unresolved_; }
@@ -1580,7 +1558,6 @@ class VariableProxy final : public Expression {
class HoleCheckModeField
: public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
- FeedbackSlot variable_feedback_slot_;
union {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
@@ -1607,25 +1584,11 @@ class Property final : public Expression {
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
- void set_obj(Expression* e) { obj_ = e; }
- void set_key(Expression* e) { key_ = e; }
-
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache) {
- if (key()->IsPropertyName()) {
- property_feedback_slot_ = spec->AddLoadICSlot();
- } else {
- property_feedback_slot_ = spec->AddKeyedLoadICSlot();
- }
- }
-
- FeedbackSlot PropertyFeedbackSlot() const { return property_feedback_slot_; }
-
// Returns the properties assign type.
static LhsKind GetAssignType(Property* property) {
- if (property == NULL) return VARIABLE;
+ if (property == nullptr) return VARIABLE;
bool super_access = property->IsSuperAccess();
return (property->key()->IsPropertyName())
? (super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY)
@@ -1639,7 +1602,6 @@ class Property final : public Expression {
: Expression(pos, kProperty), obj_(obj), key_(key) {
}
- FeedbackSlot property_feedback_slot_;
Expression* obj_;
Expression* key_;
};
@@ -1650,18 +1612,14 @@ class Call final : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- void set_expression(Expression* e) { expression_ = e; }
-
- // Type feedback information.
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
-
- FeedbackSlot CallFeedbackICSlot() const { return ic_slot_; }
-
bool is_possibly_eval() const {
return IsPossiblyEvalField::decode(bit_field_);
}
+ bool is_tagged_template() const {
+ return IsTaggedTemplateField::decode(bit_field_);
+ }
+
bool only_last_arg_is_spread() {
return !arguments_->is_empty() && arguments_->last()->IsSpread();
}
@@ -1685,6 +1643,8 @@ class Call final : public Expression {
// Helpers to determine how to handle the call.
CallType GetCallType() const;
+ enum class TaggedTemplateTag { kTrue };
+
private:
friend class AstNodeFactory;
@@ -1694,13 +1654,22 @@ class Call final : public Expression {
expression_(expression),
arguments_(arguments) {
bit_field_ |=
- IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL);
+ IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL) |
+ IsTaggedTemplateField::encode(false);
+ }
+
+ Call(Expression* expression, ZoneList<Expression*>* arguments, int pos,
+ TaggedTemplateTag tag)
+ : Expression(pos, kCall), expression_(expression), arguments_(arguments) {
+ bit_field_ |= IsPossiblyEvalField::encode(false) |
+ IsTaggedTemplateField::encode(true);
}
class IsPossiblyEvalField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+ class IsTaggedTemplateField
+ : public BitField<bool, IsPossiblyEvalField::kNext, 1> {};
- FeedbackSlot ic_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
};
@@ -1711,21 +1680,6 @@ class CallNew final : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- void set_expression(Expression* e) { expression_ = e; }
-
- // Type feedback information.
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache) {
- // CallNew stores feedback in the exact same way as Call. We can
- // piggyback on the type feedback infrastructure for calls.
- callnew_feedback_slot_ = spec->AddCallICSlot();
- }
-
- FeedbackSlot CallNewFeedbackSlot() {
- DCHECK(!callnew_feedback_slot_.IsInvalid());
- return callnew_feedback_slot_;
- }
-
bool only_last_arg_is_spread() {
return !arguments_->is_empty() && arguments_->last()->IsSpread();
}
@@ -1739,7 +1693,6 @@ class CallNew final : public Expression {
arguments_(arguments) {
}
- FeedbackSlot callnew_feedback_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
};
@@ -1752,16 +1705,12 @@ class CallNew final : public Expression {
class CallRuntime final : public Expression {
public:
ZoneList<Expression*>* arguments() const { return arguments_; }
- bool is_jsruntime() const { return function_ == NULL; }
+ bool is_jsruntime() const { return function_ == nullptr; }
int context_index() const {
DCHECK(is_jsruntime());
return context_index_;
}
- void set_context_index(int index) {
- DCHECK(is_jsruntime());
- context_index_ = index;
- }
const Runtime::Function* function() const {
DCHECK(!is_jsruntime());
return function_;
@@ -1780,7 +1729,7 @@ class CallRuntime final : public Expression {
CallRuntime(int context_index, ZoneList<Expression*>* arguments, int pos)
: Expression(pos, kCallRuntime),
context_index_(context_index),
- function_(NULL),
+ function_(nullptr),
arguments_(arguments) {}
int context_index_;
@@ -1793,12 +1742,6 @@ class UnaryOperation final : public Expression {
public:
Token::Value op() const { return OperatorField::decode(bit_field_); }
Expression* expression() const { return expression_; }
- void set_expression(Expression* e) { expression_ = e; }
-
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
-
- FeedbackSlot UnaryOperationFeedbackSlot() const { return feedback_slot_; }
private:
friend class AstNodeFactory;
@@ -1809,7 +1752,6 @@ class UnaryOperation final : public Expression {
DCHECK(Token::IsUnaryOp(op));
}
- FeedbackSlot feedback_slot_;
Expression* expression_;
class OperatorField
@@ -1821,14 +1763,7 @@ class BinaryOperation final : public Expression {
public:
Token::Value op() const { return OperatorField::decode(bit_field_); }
Expression* left() const { return left_; }
- void set_left(Expression* e) { left_ = e; }
Expression* right() const { return right_; }
- void set_right(Expression* e) { right_ = e; }
-
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
-
- FeedbackSlot BinaryOperationFeedbackSlot() const { return feedback_slot_; }
// Returns true if one side is a Smi literal, returning the other side's
// sub-expression in |subexpr| and the literal Smi in |literal|.
@@ -1843,7 +1778,6 @@ class BinaryOperation final : public Expression {
DCHECK(Token::IsBinaryOp(op));
}
- FeedbackSlot feedback_slot_;
Expression* left_;
Expression* right_;
@@ -1851,6 +1785,65 @@ class BinaryOperation final : public Expression {
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
};
+class NaryOperation final : public Expression {
+ public:
+ Token::Value op() const { return OperatorField::decode(bit_field_); }
+ Expression* first() const { return first_; }
+ Expression* subsequent(size_t index) const {
+ return subsequent_[index].expression;
+ }
+
+ size_t subsequent_length() const { return subsequent_.size(); }
+ int subsequent_op_position(size_t index) const {
+ return subsequent_[index].op_position;
+ }
+
+ void AddSubsequent(Expression* expr, int pos) {
+ subsequent_.emplace_back(expr, pos);
+ }
+
+ private:
+ friend class AstNodeFactory;
+
+ NaryOperation(Zone* zone, Token::Value op, Expression* first,
+ size_t initial_subsequent_size)
+ : Expression(first->position(), kNaryOperation),
+ first_(first),
+ subsequent_(zone) {
+ bit_field_ |= OperatorField::encode(op);
+ DCHECK(Token::IsBinaryOp(op));
+ DCHECK_NE(op, Token::EXP);
+ subsequent_.reserve(initial_subsequent_size);
+ }
+
+ // Nary operations store the first (lhs) child expression inline, and the
+ // child expressions (rhs of each op) are stored out-of-line, along with
+ // their operation's position. Note that the Nary operation expression's
+ // position has no meaning.
+ //
+ // So an nary add:
+ //
+ // expr + expr + expr + ...
+ //
+ // is stored as:
+ //
+ // (expr) [(+ expr), (+ expr), ...]
+ // '-.--' '-----------.-----------'
+ // first subsequent entry list
+
+ Expression* first_;
+
+ struct NaryOperationEntry {
+ Expression* expression;
+ int op_position;
+ NaryOperationEntry(Expression* e, int pos)
+ : expression(e), op_position(pos) {}
+ };
+ ZoneVector<NaryOperationEntry> subsequent_;
+
+ class OperatorField
+ : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
+};
class CountOperation final : public Expression {
public:
@@ -1860,16 +1853,6 @@ class CountOperation final : public Expression {
Token::Value op() const { return TokenField::decode(bit_field_); }
Expression* expression() const { return expression_; }
- void set_expression(Expression* e) { expression_ = e; }
-
- // Feedback slot for binary operation is only used by ignition.
- FeedbackSlot CountBinaryOpFeedbackSlot() const {
- return binary_operation_slot_;
- }
-
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
- FeedbackSlot CountSlot() const { return slot_; }
private:
friend class AstNodeFactory;
@@ -1883,8 +1866,6 @@ class CountOperation final : public Expression {
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class TokenField : public BitField<Token::Value, IsPrefixField::kNext, 7> {};
- FeedbackSlot slot_;
- FeedbackSlot binary_operation_slot_;
Expression* expression_;
};
@@ -1895,14 +1876,6 @@ class CompareOperation final : public Expression {
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- void set_left(Expression* e) { left_ = e; }
- void set_right(Expression* e) { right_ = e; }
-
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
-
- FeedbackSlot CompareOperationFeedbackSlot() const { return feedback_slot_; }
-
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Literal** literal);
bool IsLiteralCompareUndefined(Expression** expr);
@@ -1918,7 +1891,6 @@ class CompareOperation final : public Expression {
DCHECK(Token::IsCompareOp(op));
}
- FeedbackSlot feedback_slot_;
Expression* left_;
Expression* right_;
@@ -1930,7 +1902,6 @@ class CompareOperation final : public Expression {
class Spread final : public Expression {
public:
Expression* expression() const { return expression_; }
- void set_expression(Expression* e) { expression_ = e; }
int expression_position() const { return expr_pos_; }
@@ -1953,10 +1924,6 @@ class Conditional final : public Expression {
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
- void set_condition(Expression* e) { condition_ = e; }
- void set_then_expression(Expression* e) { then_expression_ = e; }
- void set_else_expression(Expression* e) { else_expression_ = e; }
-
private:
friend class AstNodeFactory;
@@ -1978,9 +1945,6 @@ class Assignment : public Expression {
Expression* target() const { return target_; }
Expression* value() const { return value_; }
- void set_target(Expression* e) { target_ = e; }
- void set_value(Expression* e) { value_ = e; }
-
// The assignment was generated as part of block-scoped sloppy-mode
// function hoisting, see
// ES#sec-block-level-function-declarations-web-legacy-compatibility-semantics
@@ -1993,10 +1957,6 @@ class Assignment : public Expression {
LookupHoistingModeField::update(bit_field_, static_cast<bool>(mode));
}
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
- FeedbackSlot AssignmentSlot() const { return slot_; }
-
protected:
Assignment(NodeType type, Token::Value op, Expression* target,
Expression* value, int pos);
@@ -2009,7 +1969,6 @@ class Assignment : public Expression {
class LookupHoistingModeField : public BitField<bool, TokenField::kNext, 1> {
};
- FeedbackSlot slot_;
Expression* target_;
Expression* value_;
};
@@ -2060,17 +2019,22 @@ class RewritableExpression final : public Expression {
set_rewritten();
}
+ Scope* scope() const { return scope_; }
+ void set_scope(Scope* scope) { scope_ = scope; }
+
private:
friend class AstNodeFactory;
- explicit RewritableExpression(Expression* expression)
+ RewritableExpression(Expression* expression, Scope* scope)
: Expression(expression->position(), kRewritableExpression),
- expr_(expression) {
+ expr_(expression),
+ scope_(scope) {
bit_field_ |= IsRewrittenField::encode(false);
DCHECK(!expression->IsRewritableExpression());
}
Expression* expr_;
+ Scope* scope_;
class IsRewrittenField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
@@ -2100,8 +2064,6 @@ class Suspend : public Expression {
}
int suspend_id() const { return suspend_id_; }
-
- void set_expression(Expression* e) { expression_ = e; }
void set_suspend_id(int id) { suspend_id_ = id; }
inline bool IsInitialYield() const { return suspend_id_ == 0 && IsYield(); }
@@ -2165,63 +2127,6 @@ class YieldStar final : public Suspend {
return 1;
}
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache) {
- load_iterable_iterator_slot_ = spec->AddLoadICSlot();
- load_iterator_return_slot_ = spec->AddLoadICSlot();
- load_iterator_next_slot_ = spec->AddLoadICSlot();
- load_iterator_throw_slot_ = spec->AddLoadICSlot();
- load_output_done_slot_ = spec->AddLoadICSlot();
- load_output_value_slot_ = spec->AddLoadICSlot();
- call_iterable_iterator_slot_ = spec->AddCallICSlot();
- call_iterator_return_slot1_ = spec->AddCallICSlot();
- call_iterator_return_slot2_ = spec->AddCallICSlot();
- call_iterator_next_slot_ = spec->AddCallICSlot();
- call_iterator_throw_slot_ = spec->AddCallICSlot();
- if (IsAsyncGeneratorFunction(kind)) {
- load_iterable_async_iterator_slot_ = spec->AddLoadICSlot();
- call_iterable_async_iterator_slot_ = spec->AddCallICSlot();
- }
- }
-
- FeedbackSlot load_iterable_iterator_slot() const {
- return load_iterable_iterator_slot_;
- }
- FeedbackSlot load_iterator_return_slot() const {
- return load_iterator_return_slot_;
- }
- FeedbackSlot load_iterator_next_slot() const {
- return load_iterator_next_slot_;
- }
- FeedbackSlot load_iterator_throw_slot() const {
- return load_iterator_throw_slot_;
- }
- FeedbackSlot load_output_done_slot() const { return load_output_done_slot_; }
- FeedbackSlot load_output_value_slot() const {
- return load_output_value_slot_;
- }
- FeedbackSlot call_iterable_iterator_slot() const {
- return call_iterable_iterator_slot_;
- }
- FeedbackSlot call_iterator_return_slot1() const {
- return call_iterator_return_slot1_;
- }
- FeedbackSlot call_iterator_return_slot2() const {
- return call_iterator_return_slot2_;
- }
- FeedbackSlot call_iterator_next_slot() const {
- return call_iterator_next_slot_;
- }
- FeedbackSlot call_iterator_throw_slot() const {
- return call_iterator_throw_slot_;
- }
- FeedbackSlot load_iterable_async_iterator_slot() const {
- return load_iterable_async_iterator_slot_;
- }
- FeedbackSlot call_iterable_async_iterator_slot() const {
- return call_iterable_async_iterator_slot_;
- }
-
private:
friend class AstNodeFactory;
@@ -2231,21 +2136,6 @@ class YieldStar final : public Suspend {
await_iterator_close_suspend_id_(-1),
await_delegated_iterator_output_suspend_id_(-1) {}
- FeedbackSlot load_iterable_iterator_slot_;
- FeedbackSlot load_iterator_return_slot_;
- FeedbackSlot load_iterator_next_slot_;
- FeedbackSlot load_iterator_throw_slot_;
- FeedbackSlot load_output_done_slot_;
- FeedbackSlot load_output_value_slot_;
- FeedbackSlot call_iterable_iterator_slot_;
- FeedbackSlot call_iterator_return_slot1_;
- FeedbackSlot call_iterator_return_slot2_;
- FeedbackSlot call_iterator_next_slot_;
- FeedbackSlot call_iterator_throw_slot_;
-
- FeedbackSlot load_iterable_async_iterator_slot_;
- FeedbackSlot call_iterable_async_iterator_slot_;
-
int await_iterator_close_suspend_id_;
int await_delegated_iterator_output_suspend_id_;
};
@@ -2261,7 +2151,6 @@ class Await final : public Suspend {
class Throw final : public Expression {
public:
Expression* exception() const { return exception_; }
- void set_exception(Expression* e) { exception_ = e; }
private:
friend class AstNodeFactory;
@@ -2312,13 +2201,6 @@ class FunctionLiteral final : public Expression {
}
LanguageMode language_mode() const;
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache) {
- literal_feedback_slot_ = spec->AddCreateClosureSlot();
- }
-
- FeedbackSlot LiteralFeedbackSlot() const { return literal_feedback_slot_; }
-
static bool NeedsHomeObject(Expression* expr);
int expected_property_count() {
@@ -2339,19 +2221,15 @@ class FunctionLiteral final : public Expression {
return false;
}
- Handle<String> debug_name() const {
- if (raw_name_ != NULL && !raw_name_->IsEmpty()) {
- return raw_name_->string();
- }
- return inferred_name();
- }
+ // Returns either name or inferred name as a cstring.
+ std::unique_ptr<char[]> GetDebugName() const;
Handle<String> inferred_name() const {
if (!inferred_name_.is_null()) {
- DCHECK(raw_inferred_name_ == NULL);
+ DCHECK_NULL(raw_inferred_name_);
return inferred_name_;
}
- if (raw_inferred_name_ != NULL) {
+ if (raw_inferred_name_ != nullptr) {
return raw_inferred_name_->string();
}
UNREACHABLE();
@@ -2361,12 +2239,14 @@ class FunctionLiteral final : public Expression {
void set_inferred_name(Handle<String> inferred_name) {
DCHECK(!inferred_name.is_null());
inferred_name_ = inferred_name;
- DCHECK(raw_inferred_name_== NULL || raw_inferred_name_->IsEmpty());
- raw_inferred_name_ = NULL;
+ DCHECK(raw_inferred_name_ == nullptr || raw_inferred_name_->IsEmpty());
+ raw_inferred_name_ = nullptr;
}
+ const AstConsString* raw_inferred_name() { return raw_inferred_name_; }
+
void set_raw_inferred_name(const AstConsString* raw_inferred_name) {
- DCHECK(raw_inferred_name != NULL);
+ DCHECK_NOT_NULL(raw_inferred_name);
raw_inferred_name_ = raw_inferred_name;
DCHECK(inferred_name_.is_null());
inferred_name_ = Handle<String>();
@@ -2394,13 +2274,6 @@ class FunctionLiteral final : public Expression {
}
FunctionKind kind() const;
- void set_ast_properties(AstProperties* ast_properties) {
- ast_properties_ = *ast_properties;
- }
- const FeedbackVectorSpec* feedback_vector_spec() const {
- return ast_properties_.get_spec();
- }
-
bool dont_optimize() { return dont_optimize_reason() != kNoReason; }
BailoutReason dont_optimize_reason() {
return DontOptimizeReasonField::decode(bit_field_);
@@ -2425,6 +2298,13 @@ class FunctionLiteral final : public Expression {
function_literal_id_ = function_literal_id;
}
+ void set_requires_instance_fields_initializer(bool value) {
+ bit_field_ = RequiresInstanceFieldsInitializer::update(bit_field_, value);
+ }
+ bool requires_instance_fields_initializer() const {
+ return RequiresInstanceFieldsInitializer::decode(bit_field_);
+ }
+
ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
return produced_preparsed_scope_data_;
}
@@ -2451,14 +2331,14 @@ class FunctionLiteral final : public Expression {
scope_(scope),
body_(body),
raw_inferred_name_(ast_value_factory->empty_cons_string()),
- ast_properties_(zone),
function_literal_id_(function_literal_id),
produced_preparsed_scope_data_(produced_preparsed_scope_data) {
bit_field_ |= FunctionTypeBits::encode(function_type) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
- DontOptimizeReasonField::encode(kNoReason);
+ DontOptimizeReasonField::encode(kNoReason) |
+ RequiresInstanceFieldsInitializer::encode(false);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
DCHECK_EQ(body == nullptr, expected_property_count < 0);
}
@@ -2469,6 +2349,8 @@ class FunctionLiteral final : public Expression {
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class DontOptimizeReasonField
: public BitField<BailoutReason, HasDuplicateParameters::kNext, 8> {};
+ class RequiresInstanceFieldsInitializer
+ : public BitField<bool, DontOptimizeReasonField::kNext, 1> {};
int expected_property_count_;
int parameter_count_;
@@ -2482,9 +2364,7 @@ class FunctionLiteral final : public Expression {
ZoneList<Statement*>* body_;
const AstConsString* raw_inferred_name_;
Handle<String> inferred_name_;
- AstProperties ast_properties_;
int function_literal_id_;
- FeedbackSlot literal_feedback_slot_;
ProducedPreParsedScopeData* produced_preparsed_scope_data_;
};
@@ -2498,6 +2378,9 @@ class ClassLiteralProperty final : public LiteralProperty {
bool is_static() const { return is_static_; }
+ void set_computed_name_var(Variable* var) { computed_name_var_ = var; }
+ Variable* computed_name_var() const { return computed_name_var_; }
+
private:
friend class AstNodeFactory;
@@ -2506,6 +2389,21 @@ class ClassLiteralProperty final : public LiteralProperty {
Kind kind_;
bool is_static_;
+ Variable* computed_name_var_;
+};
+
+class InitializeClassFieldsStatement final : public Statement {
+ public:
+ typedef ClassLiteralProperty Property;
+ ZoneList<Property*>* fields() const { return fields_; }
+
+ private:
+ friend class AstNodeFactory;
+
+ InitializeClassFieldsStatement(ZoneList<Property*>* fields, int pos)
+ : Statement(pos, kInitializeClassFieldsStatement), fields_(fields) {}
+
+ ZoneList<Property*>* fields_;
};
class ClassLiteral final : public Expression {
@@ -2515,9 +2413,7 @@ class ClassLiteral final : public Expression {
Scope* scope() const { return scope_; }
Variable* class_variable() const { return class_variable_; }
Expression* extends() const { return extends_; }
- void set_extends(Expression* e) { extends_ = e; }
FunctionLiteral* constructor() const { return constructor_; }
- void set_constructor(FunctionLiteral* f) { constructor_ = f; }
ZoneList<Property*>* properties() const { return properties_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
@@ -2535,18 +2431,21 @@ class ClassLiteral final : public Expression {
return is_anonymous_expression();
}
- // Object literals need one feedback slot for each non-trivial value, as well
- // as some slots for home objects.
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache);
+ FunctionLiteral* static_fields_initializer() const {
+ return static_fields_initializer_;
+ }
- FeedbackSlot HomeObjectSlot() const { return home_object_slot_; }
+ FunctionLiteral* instance_fields_initializer_function() const {
+ return instance_fields_initializer_function_;
+ }
private:
friend class AstNodeFactory;
ClassLiteral(Scope* scope, Variable* class_variable, Expression* extends,
FunctionLiteral* constructor, ZoneList<Property*>* properties,
+ FunctionLiteral* static_fields_initializer,
+ FunctionLiteral* instance_fields_initializer_function,
int start_position, int end_position,
bool has_name_static_property, bool has_static_computed_names,
bool is_anonymous)
@@ -2556,20 +2455,23 @@ class ClassLiteral final : public Expression {
class_variable_(class_variable),
extends_(extends),
constructor_(constructor),
- properties_(properties) {
+ properties_(properties),
+ static_fields_initializer_(static_fields_initializer),
+ instance_fields_initializer_function_(
+ instance_fields_initializer_function) {
bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
HasStaticComputedNames::encode(has_static_computed_names) |
IsAnonymousExpression::encode(is_anonymous);
}
int end_position_;
- FeedbackSlot home_object_slot_;
Scope* scope_;
Variable* class_variable_;
Expression* extends_;
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
-
+ FunctionLiteral* static_fields_initializer_;
+ FunctionLiteral* instance_fields_initializer_function_;
class HasNameStaticProperty
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class HasStaticComputedNames
@@ -2582,15 +2484,8 @@ class ClassLiteral final : public Expression {
class NativeFunctionLiteral final : public Expression {
public:
Handle<String> name() const { return name_->string(); }
+ const AstRawString* raw_name() const { return name_; }
v8::Extension* extension() const { return extension_; }
- FeedbackSlot LiteralFeedbackSlot() const { return literal_feedback_slot_; }
-
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache) {
- // TODO(mvstanton): The FeedbackSlotCache can be adapted
- // to always return the same slot for this case.
- literal_feedback_slot_ = spec->AddCreateClosureSlot();
- }
private:
friend class AstNodeFactory;
@@ -2601,7 +2496,6 @@ class NativeFunctionLiteral final : public Expression {
name_(name),
extension_(extension) {}
- FeedbackSlot literal_feedback_slot_;
const AstRawString* name_;
v8::Extension* extension_;
};
@@ -2617,9 +2511,7 @@ class ThisFunction final : public Expression {
class SuperPropertyReference final : public Expression {
public:
VariableProxy* this_var() const { return this_var_; }
- void set_this_var(VariableProxy* v) { this_var_ = v; }
Expression* home_object() const { return home_object_; }
- void set_home_object(Expression* e) { home_object_ = e; }
private:
friend class AstNodeFactory;
@@ -2641,11 +2533,8 @@ class SuperPropertyReference final : public Expression {
class SuperCallReference final : public Expression {
public:
VariableProxy* this_var() const { return this_var_; }
- void set_this_var(VariableProxy* v) { this_var_ = v; }
VariableProxy* new_target_var() const { return new_target_var_; }
- void set_new_target_var(VariableProxy* v) { new_target_var_ = v; }
VariableProxy* this_function_var() const { return this_function_var_; }
- void set_this_function_var(VariableProxy* v) { this_function_var_ = v; }
private:
friend class AstNodeFactory;
@@ -2671,7 +2560,6 @@ class SuperCallReference final : public Expression {
class ImportCallExpression final : public Expression {
public:
Expression* argument() const { return argument_; }
- void set_argument(Expression* argument) { argument_ = argument; }
private:
friend class AstNodeFactory;
@@ -2701,33 +2589,6 @@ class GetIterator final : public Expression {
IteratorType hint() const { return hint_; }
Expression* iterable() const { return iterable_; }
- void set_iterable(Expression* iterable) { iterable_ = iterable; }
-
- void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
- FunctionKind kind, FeedbackSlotCache* cache) {
- iterator_property_feedback_slot_ = spec->AddLoadICSlot();
- iterator_call_feedback_slot_ = spec->AddCallICSlot();
- if (hint() == IteratorType::kAsync) {
- async_iterator_property_feedback_slot_ = spec->AddLoadICSlot();
- async_iterator_call_feedback_slot_ = spec->AddCallICSlot();
- }
- }
-
- FeedbackSlot IteratorPropertyFeedbackSlot() const {
- return iterator_property_feedback_slot_;
- }
-
- FeedbackSlot IteratorCallFeedbackSlot() const {
- return iterator_call_feedback_slot_;
- }
-
- FeedbackSlot AsyncIteratorPropertyFeedbackSlot() const {
- return async_iterator_property_feedback_slot_;
- }
-
- FeedbackSlot AsyncIteratorCallFeedbackSlot() const {
- return async_iterator_call_feedback_slot_;
- }
Expression* iterable_for_call_printer() const {
return destructured_iterable_ != nullptr ? destructured_iterable_
@@ -2757,19 +2618,18 @@ class GetIterator final : public Expression {
// the raw value stored in the variable proxy. This is only used for
// pretty printing error messages.
Expression* destructured_iterable_;
-
- FeedbackSlot iterator_property_feedback_slot_;
- FeedbackSlot iterator_call_feedback_slot_;
- FeedbackSlot async_iterator_property_feedback_slot_;
- FeedbackSlot async_iterator_call_feedback_slot_;
};
// Represents the spec operation `GetTemplateObject(templateLiteral)`
// (defined at https://tc39.github.io/ecma262/#sec-gettemplateobject).
class GetTemplateObject final : public Expression {
public:
- ZoneList<Literal*>* cooked_strings() const { return cooked_strings_; }
- ZoneList<Literal*>* raw_strings() const { return raw_strings_; }
+ const ZoneList<const AstRawString*>* cooked_strings() const {
+ return cooked_strings_;
+ }
+ const ZoneList<const AstRawString*>* raw_strings() const {
+ return raw_strings_;
+ }
int hash() const { return hash_; }
Handle<TemplateObjectDescription> GetOrBuildDescription(Isolate* isolate);
@@ -2777,15 +2637,16 @@ class GetTemplateObject final : public Expression {
private:
friend class AstNodeFactory;
- GetTemplateObject(ZoneList<Literal*>* cooked_strings,
- ZoneList<Literal*>* raw_strings, int hash, int pos)
+ GetTemplateObject(const ZoneList<const AstRawString*>* cooked_strings,
+ const ZoneList<const AstRawString*>* raw_strings, int hash,
+ int pos)
: Expression(pos, kGetTemplateObject),
cooked_strings_(cooked_strings),
raw_strings_(raw_strings),
hash_(hash) {}
- ZoneList<Literal*>* cooked_strings_;
- ZoneList<Literal*>* raw_strings_;
+ const ZoneList<const AstRawString*>* cooked_strings_;
+ const ZoneList<const AstRawString*>* raw_strings_;
int hash_;
};
@@ -2813,12 +2674,12 @@ class AstVisitor BASE_EMBEDDED {
void VisitExpressions(ZoneList<Expression*>* expressions) {
for (int i = 0; i < expressions->length(); i++) {
- // The variable statement visiting code may pass NULL expressions
+ // The variable statement visiting code may pass null expressions
// to this code. Maybe this should be handled by introducing an
- // undefined expression or literal? Revisit this code if this
- // changes
+ // undefined expression or literal? Revisit this code if this
+ // changes.
Expression* expression = expressions->at(i);
- if (expression != NULL) Visit(expression);
+ if (expression != nullptr) Visit(expression);
}
}
@@ -2879,69 +2740,6 @@ class AstVisitor BASE_EMBEDDED {
\
private:
-#define DEFINE_AST_REWRITER_SUBCLASS_MEMBERS() \
- public: \
- AstNode* Rewrite(AstNode* node) { \
- DCHECK_NULL(replacement_); \
- DCHECK_NOT_NULL(node); \
- Visit(node); \
- if (HasStackOverflow()) return node; \
- if (replacement_ == nullptr) return node; \
- AstNode* result = replacement_; \
- replacement_ = nullptr; \
- return result; \
- } \
- \
- private: \
- void InitializeAstRewriter(Isolate* isolate) { \
- InitializeAstVisitor(isolate); \
- replacement_ = nullptr; \
- } \
- \
- void InitializeAstRewriter(uintptr_t stack_limit) { \
- InitializeAstVisitor(stack_limit); \
- replacement_ = nullptr; \
- } \
- \
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); \
- \
- protected: \
- AstNode* replacement_
-// Generic macro for rewriting things; `GET` is the expression to be
-// rewritten; `SET` is a command that should do the rewriting, i.e.
-// something sensible with the variable called `replacement`.
-#define AST_REWRITE(Type, GET, SET) \
- do { \
- DCHECK(!HasStackOverflow()); \
- DCHECK_NULL(replacement_); \
- Visit(GET); \
- if (HasStackOverflow()) return; \
- if (replacement_ == nullptr) break; \
- Type* replacement = reinterpret_cast<Type*>(replacement_); \
- do { \
- SET; \
- } while (false); \
- replacement_ = nullptr; \
- } while (false)
-
-// Macro for rewriting object properties; it assumes that `object` has
-// `property` with a public getter and setter.
-#define AST_REWRITE_PROPERTY(Type, object, property) \
- do { \
- auto _obj = (object); \
- AST_REWRITE(Type, _obj->property(), _obj->set_##property(replacement)); \
- } while (false)
-
-// Macro for rewriting list elements; it assumes that `list` has methods
-// `at` and `Set`.
-#define AST_REWRITE_LIST_ELEMENT(Type, list, index) \
- do { \
- auto _list = (list); \
- auto _index = (index); \
- AST_REWRITE(Type, _list->at(_index), _list->Set(_index, replacement)); \
- } while (false)
-
-
// ----------------------------------------------------------------------------
// AstNode factory
@@ -3100,36 +2898,38 @@ class AstNodeFactory final BASE_EMBEDDED {
}
Literal* NewStringLiteral(const AstRawString* string, int pos) {
- return new (zone_) Literal(ast_value_factory_->NewString(string), pos);
+ return new (zone_) Literal(string, pos);
}
// A JavaScript symbol (ECMA-262 edition 6).
Literal* NewSymbolLiteral(AstSymbol symbol, int pos) {
- return new (zone_) Literal(ast_value_factory_->NewSymbol(symbol), pos);
+ return new (zone_) Literal(symbol, pos);
}
- Literal* NewNumberLiteral(double number, int pos) {
- return new (zone_) Literal(ast_value_factory_->NewNumber(number), pos);
+ Literal* NewNumberLiteral(double number, int pos);
+
+ Literal* NewSmiLiteral(int number, int pos) {
+ return new (zone_) Literal(number, pos);
}
- Literal* NewSmiLiteral(uint32_t number, int pos) {
- return new (zone_) Literal(ast_value_factory_->NewSmi(number), pos);
+ Literal* NewBigIntLiteral(AstBigInt bigint, int pos) {
+ return new (zone_) Literal(bigint, pos);
}
Literal* NewBooleanLiteral(bool b, int pos) {
- return new (zone_) Literal(ast_value_factory_->NewBoolean(b), pos);
+ return new (zone_) Literal(b, pos);
}
Literal* NewNullLiteral(int pos) {
- return new (zone_) Literal(ast_value_factory_->NewNull(), pos);
+ return new (zone_) Literal(Literal::kNull, pos);
}
Literal* NewUndefinedLiteral(int pos) {
- return new (zone_) Literal(ast_value_factory_->NewUndefined(), pos);
+ return new (zone_) Literal(Literal::kUndefined, pos);
}
- Literal* NewTheHoleLiteral(int pos) {
- return new (zone_) Literal(ast_value_factory_->NewTheHole(), pos);
+ Literal* NewTheHoleLiteral() {
+ return new (zone_) Literal(Literal::kTheHole, kNoSourcePosition);
}
ObjectLiteral* NewObjectLiteral(
@@ -3198,6 +2998,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) Call(expression, arguments, pos, possibly_eval);
}
+ Call* NewTaggedTemplate(Expression* expression,
+ ZoneList<Expression*>* arguments, int pos) {
+ return new (zone_)
+ Call(expression, arguments, pos, Call::TaggedTemplateTag::kTrue);
+ }
+
CallNew* NewCallNew(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
@@ -3232,6 +3038,11 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) BinaryOperation(op, left, right, pos);
}
+ NaryOperation* NewNaryOperation(Token::Value op, Expression* first,
+ size_t initial_subsequent_size) {
+ return new (zone_) NaryOperation(zone_, op, first, initial_subsequent_size);
+ }
+
CountOperation* NewCountOperation(Token::Value op,
bool is_prefix,
Expression* expr,
@@ -3258,9 +3069,10 @@ class AstNodeFactory final BASE_EMBEDDED {
Conditional(condition, then_expression, else_expression, position);
}
- RewritableExpression* NewRewritableExpression(Expression* expression) {
+ RewritableExpression* NewRewritableExpression(Expression* expression,
+ Scope* scope) {
DCHECK_NOT_NULL(expression);
- return new (zone_) RewritableExpression(expression);
+ return new (zone_) RewritableExpression(expression, scope);
}
Assignment* NewAssignment(Token::Value op,
@@ -3343,18 +3155,19 @@ class AstNodeFactory final BASE_EMBEDDED {
ClassLiteral::Property(key, value, kind, is_static, is_computed_name);
}
- ClassLiteral* NewClassLiteral(Scope* scope, Variable* variable,
- Expression* extends,
- FunctionLiteral* constructor,
- ZoneList<ClassLiteral::Property*>* properties,
- int start_position, int end_position,
- bool has_name_static_property,
- bool has_static_computed_names,
- bool is_anonymous) {
- return new (zone_)
- ClassLiteral(scope, variable, extends, constructor, properties,
- start_position, end_position, has_name_static_property,
- has_static_computed_names, is_anonymous);
+ ClassLiteral* NewClassLiteral(
+ Scope* scope, Variable* variable, Expression* extends,
+ FunctionLiteral* constructor,
+ ZoneList<ClassLiteral::Property*>* properties,
+ FunctionLiteral* static_fields_initializer,
+ FunctionLiteral* instance_fields_initializer_function, int start_position,
+ int end_position, bool has_name_static_property,
+ bool has_static_computed_names, bool is_anonymous) {
+ return new (zone_) ClassLiteral(
+ scope, variable, extends, constructor, properties,
+ static_fields_initializer, instance_fields_initializer_function,
+ start_position, end_position, has_name_static_property,
+ has_static_computed_names, is_anonymous);
}
NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
@@ -3401,9 +3214,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) GetIterator(iterable, hint, pos);
}
- GetTemplateObject* NewGetTemplateObject(ZoneList<Literal*>* cooked_strings,
- ZoneList<Literal*>* raw_strings,
- int hash, int pos) {
+ GetTemplateObject* NewGetTemplateObject(
+ const ZoneList<const AstRawString*>* cooked_strings,
+ const ZoneList<const AstRawString*>* raw_strings, int hash, int pos) {
return new (zone_)
GetTemplateObject(cooked_strings, raw_strings, hash, pos);
}
@@ -3412,6 +3225,11 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) ImportCallExpression(args, pos);
}
+ InitializeClassFieldsStatement* NewInitializeClassFieldsStatement(
+ ZoneList<ClassLiteralProperty*>* args, int pos) {
+ return new (zone_) InitializeClassFieldsStatement(args, pos);
+ }
+
Zone* zone() const { return zone_; }
void set_zone(Zone* zone) { zone_ = zone; }
@@ -3428,38 +3246,39 @@ class AstNodeFactory final BASE_EMBEDDED {
// Type testing & conversion functions overridden by concrete subclasses.
// Inline functions for AstNode.
-#define DECLARE_NODE_FUNCTIONS(type) \
- bool AstNode::Is##type() const { \
- NodeType mine = node_type(); \
- if (mine == AstNode::kRewritableExpression && \
- AstNode::k##type != AstNode::kRewritableExpression) \
- mine = reinterpret_cast<const RewritableExpression*>(this) \
- ->expression() \
- ->node_type(); \
- return mine == AstNode::k##type; \
- } \
- type* AstNode::As##type() { \
- NodeType mine = node_type(); \
- AstNode* result = this; \
- if (mine == AstNode::kRewritableExpression && \
- AstNode::k##type != AstNode::kRewritableExpression) { \
- result = \
- reinterpret_cast<const RewritableExpression*>(this)->expression(); \
- mine = result->node_type(); \
- } \
- return mine == AstNode::k##type ? reinterpret_cast<type*>(result) : NULL; \
- } \
- const type* AstNode::As##type() const { \
- NodeType mine = node_type(); \
- const AstNode* result = this; \
- if (mine == AstNode::kRewritableExpression && \
- AstNode::k##type != AstNode::kRewritableExpression) { \
- result = \
- reinterpret_cast<const RewritableExpression*>(this)->expression(); \
- mine = result->node_type(); \
- } \
- return mine == AstNode::k##type ? reinterpret_cast<const type*>(result) \
- : NULL; \
+#define DECLARE_NODE_FUNCTIONS(type) \
+ bool AstNode::Is##type() const { \
+ NodeType mine = node_type(); \
+ if (mine == AstNode::kRewritableExpression && \
+ AstNode::k##type != AstNode::kRewritableExpression) \
+ mine = reinterpret_cast<const RewritableExpression*>(this) \
+ ->expression() \
+ ->node_type(); \
+ return mine == AstNode::k##type; \
+ } \
+ type* AstNode::As##type() { \
+ NodeType mine = node_type(); \
+ AstNode* result = this; \
+ if (mine == AstNode::kRewritableExpression && \
+ AstNode::k##type != AstNode::kRewritableExpression) { \
+ result = \
+ reinterpret_cast<const RewritableExpression*>(this)->expression(); \
+ mine = result->node_type(); \
+ } \
+ return mine == AstNode::k##type ? reinterpret_cast<type*>(result) \
+ : nullptr; \
+ } \
+ const type* AstNode::As##type() const { \
+ NodeType mine = node_type(); \
+ const AstNode* result = this; \
+ if (mine == AstNode::kRewritableExpression && \
+ AstNode::k##type != AstNode::kRewritableExpression) { \
+ result = \
+ reinterpret_cast<const RewritableExpression*>(this)->expression(); \
+ mine = result->node_type(); \
+ } \
+ return mine == AstNode::k##type ? reinterpret_cast<const type*>(result) \
+ : nullptr; \
}
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
diff --git a/deps/v8/src/ast/context-slot-cache.h b/deps/v8/src/ast/context-slot-cache.h
index b4e3590919..bf4a6d709e 100644
--- a/deps/v8/src/ast/context-slot-cache.h
+++ b/deps/v8/src/ast/context-slot-cache.h
@@ -36,8 +36,8 @@ class ContextSlotCache {
private:
ContextSlotCache() {
for (int i = 0; i < kLength; ++i) {
- keys_[i].data = NULL;
- keys_[i].name = NULL;
+ keys_[i].data = nullptr;
+ keys_[i].name = nullptr;
values_[i] = static_cast<uint32_t>(kNotFound);
}
}
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index d44bb46c75..465eca447f 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -21,8 +21,8 @@ class ModuleDescriptor : public ZoneObject {
public:
explicit ModuleDescriptor(Zone* zone)
: module_requests_(zone),
- special_exports_(1, zone),
- namespace_imports_(1, zone),
+ special_exports_(zone),
+ namespace_imports_(zone),
regular_exports_(zone),
regular_imports_(zone) {}
@@ -130,7 +130,7 @@ class ModuleDescriptor : public ZoneObject {
}
// Namespace imports.
- const ZoneList<const Entry*>& namespace_imports() const {
+ const ZoneVector<const Entry*>& namespace_imports() const {
return namespace_imports_;
}
@@ -140,7 +140,7 @@ class ModuleDescriptor : public ZoneObject {
}
// Star exports and explicitly indirect exports.
- const ZoneList<const Entry*>& special_exports() const {
+ const ZoneVector<const Entry*>& special_exports() const {
return special_exports_;
}
@@ -161,7 +161,7 @@ class ModuleDescriptor : public ZoneObject {
void AddSpecialExport(const Entry* entry, Zone* zone) {
DCHECK_NULL(entry->local_name);
DCHECK_LE(0, entry->module_request);
- special_exports_.Add(entry, zone);
+ special_exports_.push_back(entry);
}
void AddRegularImport(Entry* entry) {
@@ -179,7 +179,7 @@ class ModuleDescriptor : public ZoneObject {
DCHECK_NULL(entry->export_name);
DCHECK_NOT_NULL(entry->local_name);
DCHECK_LE(0, entry->module_request);
- namespace_imports_.Add(entry, zone);
+ namespace_imports_.push_back(entry);
}
Handle<FixedArray> SerializeRegularExports(Isolate* isolate,
@@ -188,10 +188,9 @@ class ModuleDescriptor : public ZoneObject {
Handle<ModuleInfo> module_info);
private:
- // TODO(neis): Use STL datastructure instead of ZoneList?
ZoneMap<const AstRawString*, ModuleRequest> module_requests_;
- ZoneList<const Entry*> special_exports_;
- ZoneList<const Entry*> namespace_imports_;
+ ZoneVector<const Entry*> special_exports_;
+ ZoneVector<const Entry*> namespace_imports_;
ZoneMultimap<const AstRawString*, Entry*> regular_exports_;
ZoneMap<const AstRawString*, Entry*> regular_imports_;
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index b3ab10aab9..f01ade8896 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -146,11 +146,11 @@ void CallPrinter::VisitWhileStatement(WhileStatement* node) {
void CallPrinter::VisitForStatement(ForStatement* node) {
- if (node->init() != NULL) {
+ if (node->init() != nullptr) {
Find(node->init());
}
- if (node->cond() != NULL) Find(node->cond());
- if (node->next() != NULL) Find(node->next());
+ if (node->cond() != nullptr) Find(node->cond());
+ if (node->next() != nullptr) Find(node->next());
Find(node->body());
}
@@ -198,6 +198,12 @@ void CallPrinter::VisitClassLiteral(ClassLiteral* node) {
}
}
+void CallPrinter::VisitInitializeClassFieldsStatement(
+ InitializeClassFieldsStatement* node) {
+ for (int i = 0; i < node->fields()->length(); i++) {
+ Find(node->fields()->at(i)->value());
+ }
+}
void CallPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {}
@@ -213,7 +219,9 @@ void CallPrinter::VisitConditional(Conditional* node) {
void CallPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(node->value(), true);
+ // TODO(adamk): Teach Literal how to print its values without
+ // allocating on the heap.
+ PrintLiteral(node->BuildValue(isolate_), true);
}
@@ -279,10 +287,13 @@ void CallPrinter::VisitThrow(Throw* node) { Find(node->exception()); }
void CallPrinter::VisitProperty(Property* node) {
Expression* key = node->key();
Literal* literal = key->AsLiteral();
- if (literal != NULL && literal->value()->IsInternalizedString()) {
+ if (literal != nullptr &&
+ literal->BuildValue(isolate_)->IsInternalizedString()) {
Find(node->obj(), true);
Print(".");
- PrintLiteral(literal->value(), false);
+ // TODO(adamk): Teach Literal how to print its values without
+ // allocating on the heap.
+ PrintLiteral(literal->BuildValue(isolate_), false);
} else {
Find(node->obj(), true);
Print("[");
@@ -377,6 +388,17 @@ void CallPrinter::VisitBinaryOperation(BinaryOperation* node) {
Print(")");
}
+void CallPrinter::VisitNaryOperation(NaryOperation* node) {
+ Print("(");
+ Find(node->first(), true);
+ for (size_t i = 0; i < node->subsequent_length(); ++i) {
+ Print(" ");
+ Print(Token::String(node->op()));
+ Print(" ");
+ Find(node->subsequent(i), true);
+ }
+ Print(")");
+}
void CallPrinter::VisitCompareOperation(CompareOperation* node) {
Print("(");
@@ -442,7 +464,7 @@ void CallPrinter::VisitRewritableExpression(RewritableExpression* node) {
void CallPrinter::FindStatements(ZoneList<Statement*>* statements) {
- if (statements == NULL) return;
+ if (statements == nullptr) return;
for (int i = 0; i < statements->length(); i++) {
Find(statements->at(i));
}
@@ -488,16 +510,6 @@ void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
#ifdef DEBUG
-// A helper for ast nodes that use FeedbackSlots.
-static int FormatSlotNode(Vector<char>* buf, Expression* node,
- const char* node_name, FeedbackSlot slot) {
- int pos = SNPrintF(*buf, "%s", node_name);
- if (!slot.IsInvalid()) {
- pos += SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
- }
- return pos;
-}
-
const char* AstPrinter::Print(AstNode* node) {
Init();
Visit(node);
@@ -506,7 +518,7 @@ const char* AstPrinter::Print(AstNode* node) {
void AstPrinter::Init() {
if (size_ == 0) {
- DCHECK(output_ == NULL);
+ DCHECK_NULL(output_);
const int initial_size = 256;
output_ = NewArray<char>(initial_size);
size_ = initial_size;
@@ -542,7 +554,7 @@ void AstPrinter::Print(const char* format, ...) {
}
void AstPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
- if (labels != NULL) {
+ if (labels != nullptr) {
for (int i = 0; i < labels->length(); i++) {
PrintLiteral(labels->at(i), false);
Print(": ");
@@ -550,64 +562,70 @@ void AstPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
}
}
-void AstPrinter::PrintLiteral(MaybeHandle<Object> maybe_value, bool quote) {
- Handle<Object> value;
- if (!maybe_value.ToHandle(&value)) {
- Print("<nil>");
- return;
- }
- Object* object = *value;
- if (object->IsString()) {
- String* string = String::cast(object);
- if (quote) Print("\"");
- for (int i = 0; i < string->length(); i++) {
- Print("%c", string->Get(i));
- }
- if (quote) Print("\"");
- } else if (object->IsNull(isolate_)) {
- Print("null");
- } else if (object->IsTrue(isolate_)) {
- Print("true");
- } else if (object->IsFalse(isolate_)) {
- Print("false");
- } else if (object->IsUndefined(isolate_)) {
- Print("undefined");
- } else if (object->IsNumber()) {
- Print("%g", object->Number());
- } else if (object->IsJSObject()) {
- // regular expression
- if (object->IsJSFunction()) {
- Print("JS-Function");
- } else if (object->IsJSArray()) {
- Print("JS-array[%u]", Smi::ToInt(JSArray::cast(object)->length()));
- } else if (object->IsJSObject()) {
- Print("JS-Object");
- } else {
- Print("?UNKNOWN?");
- }
- } else if (object->IsFixedArray()) {
- Print("FixedArray");
- } else if (object->IsSymbol()) {
- // Symbols can only occur as literals if they were inserted by the parser.
- Symbol* symbol = Symbol::cast(object);
- if (symbol->name()->IsString()) {
- int length = 0;
- String* string = String::cast(symbol->name());
- std::unique_ptr<char[]> desc = string->ToCString(
- ALLOW_NULLS, FAST_STRING_TRAVERSAL, 0, string->length(), &length);
- Print("Symbol(%*s)", length, desc.get());
- } else {
- Print("Symbol()");
- }
- } else {
- Print("<unknown literal %p>", static_cast<void*>(object));
+void AstPrinter::PrintLiteral(Literal* literal, bool quote) {
+ switch (literal->type()) {
+ case Literal::kString:
+ PrintLiteral(literal->AsRawString(), quote);
+ break;
+ case Literal::kSymbol:
+ const char* symbol;
+ switch (literal->AsSymbol()) {
+ case AstSymbol::kHomeObjectSymbol:
+ symbol = "HomeObjectSymbol";
+ }
+ Print("%s", symbol);
+ break;
+ case Literal::kSmi:
+ Print("%d", Smi::ToInt(literal->AsSmiLiteral()));
+ break;
+ case Literal::kHeapNumber:
+ Print("%g", literal->AsNumber());
+ break;
+ case Literal::kBigInt:
+ Print("%sn", literal->AsBigInt().c_str());
+ break;
+ case Literal::kNull:
+ Print("null");
+ break;
+ case Literal::kUndefined:
+ Print("undefined");
+ break;
+ case Literal::kTheHole:
+ Print("the hole");
+ break;
+ case Literal::kBoolean:
+ if (literal->ToBooleanIsTrue()) {
+ Print("true");
+ } else {
+ Print("false");
+ }
+ break;
}
}
void AstPrinter::PrintLiteral(const AstRawString* value, bool quote) {
- PrintLiteral(value->string(), quote);
+ if (quote) Print("\"");
+ if (value != nullptr) {
+ const char* format = value->is_one_byte() ? "%c" : "%lc";
+ const int increment = value->is_one_byte() ? 1 : 2;
+ const unsigned char* raw_bytes = value->raw_data();
+ for (int i = 0; i < value->length(); i += increment) {
+ Print(format, raw_bytes[i]);
+ }
+ }
+ if (quote) Print("\"");
}
+void AstPrinter::PrintLiteral(const AstConsString* value, bool quote) {
+ if (quote) Print("\"");
+ if (value != nullptr) {
+ std::forward_list<const AstRawString*> strings = value->ToRawStrings();
+ for (const AstRawString* string : strings) {
+ PrintLiteral(string, false);
+ }
+ }
+ if (quote) Print("\"");
+}
//-----------------------------------------------------------------------------
@@ -638,13 +656,13 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter(Isolate* isolate)
- : isolate_(isolate), output_(nullptr), size_(0), pos_(0), indent_(0) {
- InitializeAstVisitor(isolate);
+AstPrinter::AstPrinter(uintptr_t stack_limit)
+ : output_(nullptr), size_(0), pos_(0), indent_(0) {
+ InitializeAstVisitor(stack_limit);
}
AstPrinter::~AstPrinter() {
- DCHECK(indent_ == 0);
+ DCHECK_EQ(indent_, 0);
DeleteArray(output_);
}
@@ -656,20 +674,33 @@ void AstPrinter::PrintIndented(const char* txt) {
Print("%s", txt);
}
-void AstPrinter::PrintLiteralIndented(const char* info,
- MaybeHandle<Object> maybe_value,
+void AstPrinter::PrintLiteralIndented(const char* info, Literal* literal,
bool quote) {
PrintIndented(info);
Print(" ");
- PrintLiteral(maybe_value, quote);
+ PrintLiteral(literal, quote);
Print("\n");
}
+void AstPrinter::PrintLiteralIndented(const char* info,
+ const AstRawString* value, bool quote) {
+ PrintIndented(info);
+ Print(" ");
+ PrintLiteral(value, quote);
+ Print("\n");
+}
-void AstPrinter::PrintLiteralWithModeIndented(const char* info,
- Variable* var,
- Handle<Object> value) {
- if (var == NULL) {
+void AstPrinter::PrintLiteralIndented(const char* info,
+ const AstConsString* value, bool quote) {
+ PrintIndented(info);
+ Print(" ");
+ PrintLiteral(value, quote);
+ Print("\n");
+}
+
+void AstPrinter::PrintLiteralWithModeIndented(const char* info, Variable* var,
+ const AstRawString* value) {
+ if (var == nullptr) {
PrintLiteralIndented(info, value, true);
} else {
EmbeddedVector<char, 256> buf;
@@ -683,7 +714,7 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
void AstPrinter::PrintLabelsIndented(ZoneList<const AstRawString*>* labels) {
- if (labels == NULL || labels->length() == 0) return;
+ if (labels == nullptr || labels->length() == 0) return;
PrintIndented("LABELS ");
PrintLabels(labels);
Print("\n");
@@ -703,8 +734,13 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
Print(" %d\n", program->kind());
PrintIndented("SUSPEND COUNT");
Print(" %d\n", program->suspend_count());
- PrintLiteralIndented("NAME", program->name(), true);
- PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
+ PrintLiteralIndented("NAME", program->raw_name(), true);
+ if (program->raw_inferred_name()) {
+ PrintLiteralIndented("INFERRED NAME", program->raw_inferred_name(), true);
+ }
+ if (program->requires_instance_fields_initializer()) {
+ Print(" REQUIRES INSTANCE FIELDS INITIALIZER\n");
+ }
PrintParameters(program->scope());
PrintDeclarations(program->scope()->declarations());
PrintStatements(program->body());
@@ -714,7 +750,7 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
void AstPrinter::PrintOut(Isolate* isolate, AstNode* node) {
- AstPrinter printer(isolate);
+ AstPrinter printer(isolate->stack_guard()->real_climit());
printer.Init();
printer.Visit(node);
PrintF("%s", printer.output_);
@@ -732,7 +768,7 @@ void AstPrinter::PrintParameters(DeclarationScope* scope) {
IndentedScope indent(this, "PARAMS");
for (int i = 0; i < scope->num_parameters(); i++) {
PrintLiteralWithModeIndented("VAR", scope->parameter(i),
- scope->parameter(i)->name());
+ scope->parameter(i)->raw_name());
}
}
}
@@ -763,16 +799,16 @@ void AstPrinter::VisitBlock(Block* node) {
// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
PrintLiteralWithModeIndented("VARIABLE", node->proxy()->var(),
- node->proxy()->name());
+ node->proxy()->raw_name());
}
// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
PrintIndented("FUNCTION ");
- PrintLiteral(node->proxy()->name(), true);
+ PrintLiteral(node->proxy()->raw_name(), true);
Print(" = function ");
- PrintLiteral(node->fun()->name(), false);
+ PrintLiteral(node->fun()->raw_name(), false);
Print("\n");
}
@@ -925,7 +961,7 @@ void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
}
Print(" %s\n", prediction);
PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
- node->scope()->catch_variable()->name());
+ node->scope()->catch_variable()->raw_name());
PrintIndentedVisit("CATCH", node->catch_block());
}
@@ -942,8 +978,8 @@ void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
IndentedScope indent(this, "FUNC LITERAL", node->position());
- PrintLiteralIndented("NAME", node->name(), false);
- PrintLiteralIndented("INFERRED NAME", node->inferred_name(), false);
+ PrintLiteralIndented("NAME", node->raw_name(), false);
+ PrintLiteralIndented("INFERRED NAME", node->raw_inferred_name(), false);
PrintParameters(node->scope());
// We don't want to see the function literal in this case: it
// will be printed via PrintProgram when the code for it is
@@ -954,13 +990,27 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
IndentedScope indent(this, "CLASS LITERAL", node->position());
- PrintLiteralIndented("NAME", node->constructor()->name(), false);
+ PrintLiteralIndented("NAME", node->constructor()->raw_name(), false);
if (node->extends() != nullptr) {
PrintIndentedVisit("EXTENDS", node->extends());
}
+ if (node->static_fields_initializer() != nullptr) {
+ PrintIndentedVisit("STATIC FIELDS INITIALIZER",
+ node->static_fields_initializer());
+ }
+ if (node->instance_fields_initializer_function() != nullptr) {
+ PrintIndentedVisit("INSTANCE FIELDS INITIALIZER",
+ node->instance_fields_initializer_function());
+ }
PrintClassProperties(node->properties());
}
+void AstPrinter::VisitInitializeClassFieldsStatement(
+ InitializeClassFieldsStatement* node) {
+ IndentedScope indent(this, "INITIALIZE CLASS FIELDS", node->position());
+ PrintClassProperties(node->fields());
+}
+
void AstPrinter::PrintClassProperties(
ZoneList<ClassLiteral::Property*>* properties) {
for (int i = 0; i < properties->length(); i++) {
@@ -992,7 +1042,7 @@ void AstPrinter::PrintClassProperties(
void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
IndentedScope indent(this, "NATIVE FUNC LITERAL", node->position());
- PrintLiteralIndented("NAME", node->name(), false);
+ PrintLiteralIndented("NAME", node->raw_name(), false);
}
@@ -1010,19 +1060,16 @@ void AstPrinter::VisitConditional(Conditional* node) {
}
-// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitLiteral(Literal* node) {
- PrintLiteralIndented("LITERAL", node->value(), true);
+ PrintLiteralIndented("LITERAL", node, true);
}
void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
IndentedScope indent(this, "REGEXP LITERAL", node->position());
- EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
- PrintIndented(buf.start());
- PrintLiteralIndented("PATTERN", node->pattern(), false);
+ PrintLiteralIndented("PATTERN", node->raw_pattern(), false);
int i = 0;
+ EmbeddedVector<char, 128> buf;
if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
if (node->flags() & RegExp::kMultiline) buf[i++] = 'm';
@@ -1037,9 +1084,6 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
IndentedScope indent(this, "OBJ LITERAL", node->position());
- EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
- PrintIndented(buf.start());
PrintObjectProperties(node->properties());
}
@@ -1082,10 +1126,6 @@ void AstPrinter::PrintObjectProperties(
void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
IndentedScope indent(this, "ARRAY LITERAL", node->position());
-
- EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
- PrintIndented(buf.start());
if (node->values()->length() > 0) {
IndentedScope indent(this, "VALUES", node->position());
for (int i = 0; i < node->values()->length(); i++) {
@@ -1097,12 +1137,11 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
EmbeddedVector<char, 128> buf;
- int pos =
- FormatSlotNode(&buf, node, "VAR PROXY", node->VariableFeedbackSlot());
+ int pos = SNPrintF(buf, "VAR PROXY");
if (!node->is_resolved()) {
SNPrintF(buf + pos, " unresolved");
- PrintLiteralWithModeIndented(buf.start(), nullptr, node->name());
+ PrintLiteralWithModeIndented(buf.start(), nullptr, node->raw_name());
} else {
Variable* var = node->var();
switch (var->location()) {
@@ -1125,7 +1164,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
SNPrintF(buf + pos, " module");
break;
}
- PrintLiteralWithModeIndented(buf.start(), var, node->name());
+ PrintLiteralWithModeIndented(buf.start(), var, node->raw_name());
}
}
@@ -1169,14 +1208,17 @@ void AstPrinter::VisitThrow(Throw* node) {
void AstPrinter::VisitProperty(Property* node) {
EmbeddedVector<char, 128> buf;
- FormatSlotNode(&buf, node, "PROPERTY", node->PropertyFeedbackSlot());
+ SNPrintF(buf, "PROPERTY");
IndentedScope indent(this, buf.start(), node->position());
Visit(node->obj());
- Literal* literal = node->key()->AsLiteral();
- if (literal != NULL && literal->value()->IsInternalizedString()) {
- PrintLiteralIndented("NAME", literal->value(), false);
+ LhsKind property_kind = Property::GetAssignType(node);
+ if (property_kind == NAMED_PROPERTY ||
+ property_kind == NAMED_SUPER_PROPERTY) {
+ PrintLiteralIndented("NAME", node->key()->AsLiteral(), false);
} else {
+ DCHECK(property_kind == KEYED_PROPERTY ||
+ property_kind == KEYED_SUPER_PROPERTY);
PrintIndentedVisit("KEY", node->key());
}
}
@@ -1184,7 +1226,7 @@ void AstPrinter::VisitProperty(Property* node) {
void AstPrinter::VisitCall(Call* node) {
EmbeddedVector<char, 128> buf;
- FormatSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
+ SNPrintF(buf, "CALL");
IndentedScope indent(this, buf.start());
Visit(node->expression());
@@ -1229,6 +1271,13 @@ void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
Visit(node->right());
}
+void AstPrinter::VisitNaryOperation(NaryOperation* node) {
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
+ Visit(node->first());
+ for (size_t i = 0; i < node->subsequent_length(); ++i) {
+ Visit(node->subsequent(i));
+ }
+}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
IndentedScope indent(this, Token::Name(node->op()), node->position());
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 58849a6052..97c2437877 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -64,7 +64,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
class AstPrinter final : public AstVisitor<AstPrinter> {
public:
- explicit AstPrinter(Isolate* isolate);
+ explicit AstPrinter(uintptr_t stack_limit);
~AstPrinter();
// The following routines print a node into a string.
@@ -89,7 +89,8 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
void PrintLabels(ZoneList<const AstRawString*>* labels);
void PrintLiteral(const AstRawString* value, bool quote);
- void PrintLiteral(MaybeHandle<Object> maybe_value, bool quote);
+ void PrintLiteral(const AstConsString* value, bool quote);
+ void PrintLiteral(Literal* literal, bool quote);
void PrintIndented(const char* txt);
void PrintIndentedVisit(const char* s, AstNode* node);
@@ -98,11 +99,13 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
void PrintParameters(DeclarationScope* scope);
void PrintArguments(ZoneList<Expression*>* arguments);
void PrintCaseClause(CaseClause* clause);
- void PrintLiteralIndented(const char* info, MaybeHandle<Object> maybe_value,
+ void PrintLiteralIndented(const char* info, Literal* literal, bool quote);
+ void PrintLiteralIndented(const char* info, const AstRawString* value,
bool quote);
- void PrintLiteralWithModeIndented(const char* info,
- Variable* var,
- Handle<Object> value);
+ void PrintLiteralIndented(const char* info, const AstConsString* value,
+ bool quote);
+ void PrintLiteralWithModeIndented(const char* info, Variable* var,
+ const AstRawString* value);
void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
void PrintObjectProperties(ZoneList<ObjectLiteral::Property*>* properties);
void PrintClassProperties(ZoneList<ClassLiteral::Property*>* properties);
@@ -112,7 +115,6 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- Isolate* isolate_;
char* output_; // output string buffer
int size_; // output_ size
int pos_; // current printing position
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 07eacd3fe9..d012ec90f1 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -98,12 +98,12 @@ void VariableMap::Add(Zone* zone, Variable* var) {
Variable* VariableMap::Lookup(const AstRawString* name) {
Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->Hash());
- if (p != NULL) {
+ if (p != nullptr) {
DCHECK(reinterpret_cast<const AstRawString*>(p->key) == name);
- DCHECK(p->value != NULL);
+ DCHECK_NOT_NULL(p->value);
return reinterpret_cast<Variable*>(p->value);
}
- return NULL;
+ return nullptr;
}
void SloppyBlockFunctionMap::Delegate::set_statement(Statement* statement) {
@@ -197,7 +197,7 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
kModule) {
Zone* zone = ast_value_factory->zone();
module_descriptor_ = new (zone) ModuleDescriptor(zone);
- set_language_mode(STRICT);
+ set_language_mode(LanguageMode::kStrict);
DeclareThis(ast_value_factory);
}
@@ -208,7 +208,7 @@ ModuleScope::ModuleScope(Handle<ScopeInfo> scope_info,
Isolate* isolate = scope_info->GetIsolate();
Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate);
- set_language_mode(STRICT);
+ set_language_mode(LanguageMode::kStrict);
module_descriptor_ = new (zone) ModuleDescriptor(zone);
// Deserialize special exports.
@@ -339,7 +339,7 @@ void Scope::SetDefaults() {
num_stack_slots_ = 0;
num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
- set_language_mode(SLOPPY);
+ set_language_mode(LanguageMode::kSloppy);
scope_calls_eval_ = false;
scope_nonlinear_ = false;
@@ -605,12 +605,10 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
auto declaration =
factory->NewVariableDeclaration(proxy, kNoSourcePosition);
// Based on the preceding checks, it doesn't matter what we pass as
- // allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
DeclareVariable(declaration, VAR,
- Variable::DefaultInitializationFlag(VAR), false, nullptr,
- &ok);
+ Variable::DefaultInitializationFlag(VAR), nullptr, &ok);
DCHECK(ok);
} else {
DCHECK(is_being_lazily_parsed_);
@@ -648,9 +646,12 @@ void DeclarationScope::AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate) {
}
void DeclarationScope::Analyze(ParseInfo* info) {
- RuntimeCallTimerScope runtimeTimer(info->runtime_call_stats(),
- &RuntimeCallStats::CompileScopeAnalysis);
- DCHECK(info->literal() != NULL);
+ RuntimeCallTimerScope runtimeTimer(
+ info->runtime_call_stats(),
+ info->on_background_thread()
+ ? &RuntimeCallStats::CompileBackgroundScopeAnalysis
+ : &RuntimeCallStats::CompileScopeAnalysis);
+ DCHECK_NOT_NULL(info->literal());
DeclarationScope* scope = info->literal()->scope();
base::Optional<AllowHandleDereference> allow_deref;
@@ -998,13 +999,11 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
}
Variable* Scope::Lookup(const AstRawString* name) {
- for (Scope* scope = this;
- scope != NULL;
- scope = scope->outer_scope()) {
+ for (Scope* scope = this; scope != nullptr; scope = scope->outer_scope()) {
Variable* var = scope->LookupLocal(name);
- if (var != NULL) return var;
+ if (var != nullptr) return var;
}
- return NULL;
+ return nullptr;
}
Variable* DeclarationScope::DeclareParameter(
@@ -1080,7 +1079,6 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
Variable* Scope::DeclareVariable(
Declaration* declaration, VariableMode mode, InitializationFlag init,
- bool allow_harmony_restrictive_generators,
bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
@@ -1089,8 +1087,8 @@ Variable* Scope::DeclareVariable(
if (mode == VAR && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariable(
- declaration, mode, init, allow_harmony_restrictive_generators,
- sloppy_mode_block_scope_function_redefinition, ok);
+ declaration, mode, init, sloppy_mode_block_scope_function_redefinition,
+ ok);
}
DCHECK(!is_catch_scope());
DCHECK(!is_with_scope());
@@ -1098,7 +1096,7 @@ Variable* Scope::DeclareVariable(
(IsLexicalVariableMode(mode) && is_block_scope()));
VariableProxy* proxy = declaration->proxy();
- DCHECK(proxy->raw_name() != NULL);
+ DCHECK_NOT_NULL(proxy->raw_name());
const AstRawString* name = proxy->raw_name();
bool is_function_declaration = declaration->IsFunctionDeclaration();
@@ -1125,7 +1123,7 @@ Variable* Scope::DeclareVariable(
} else {
// Declare the variable in the declaration scope.
var = LookupLocal(name);
- if (var == NULL) {
+ if (var == nullptr) {
// Declare the name.
VariableKind kind = NORMAL_VARIABLE;
if (is_function_declaration) {
@@ -1151,8 +1149,7 @@ Variable* Scope::DeclareVariable(
map->Lookup(const_cast<AstRawString*>(name),
name->Hash()) != nullptr &&
!IsAsyncFunction(function_kind) &&
- !(allow_harmony_restrictive_generators &&
- IsGeneratorFunction(function_kind));
+ !IsGeneratorFunction(function_kind);
}
if (duplicate_allowed) {
*sloppy_mode_block_scope_function_redefinition = true;
@@ -1391,7 +1388,7 @@ bool DeclarationScope::AllowsLazyCompilation() const {
int Scope::ContextChainLength(Scope* scope) const {
int n = 0;
for (const Scope* s = this; s != scope; s = s->outer_scope_) {
- DCHECK(s != NULL); // scope must be in the scope chain
+ DCHECK_NOT_NULL(s); // scope must be in the scope chain
if (s->NeedsContext()) n++;
}
return n;
@@ -1446,16 +1443,6 @@ bool Scope::NeedsScopeInfo() const {
return NeedsContext();
}
-ModuleScope* Scope::GetModuleScope() {
- Scope* scope = this;
- DCHECK(!scope->is_script_scope());
- while (!scope->is_module_scope()) {
- scope = scope->outer_scope();
- DCHECK_NOT_NULL(scope);
- }
- return scope->AsModuleScope();
-}
-
DeclarationScope* Scope::GetReceiverScope() {
Scope* scope = this;
while (!scope->is_script_scope() &&
@@ -1544,7 +1531,10 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
DCHECK(!force_eager_compilation_);
VariableProxy* unresolved = nullptr;
- if (!outer_scope_->is_script_scope() || FLAG_preparser_scope_analysis) {
+ if (!outer_scope_->is_script_scope() ||
+ (FLAG_preparser_scope_analysis &&
+ produced_preparsed_scope_data_ != nullptr &&
+ produced_preparsed_scope_data_->ContainsInnerFunctions())) {
// Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete.
@@ -1556,12 +1546,6 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
unresolved = copy;
}
- // Clear arguments_ if unused. This is used as a signal for optimization.
- if (arguments_ != nullptr &&
- !(MustAllocate(arguments_) && !has_arguments_parameter_)) {
- arguments_ = nullptr;
- }
-
// Migrate function_ to the right Zone.
if (function_ != nullptr) {
function_ = ast_node_factory->CopyVariable(function_);
@@ -1982,8 +1966,8 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
}
// We should always have valid source positions.
- DCHECK(var->initializer_position() != kNoSourcePosition);
- DCHECK(proxy->position() != kNoSourcePosition);
+ DCHECK_NE(var->initializer_position(), kNoSourcePosition);
+ DCHECK_NE(proxy->position(), kNoSourcePosition);
if (var->scope()->is_nonlinear() ||
var->initializer_position() >= proxy->position()) {
@@ -2026,7 +2010,7 @@ void Scope::ResolveVariablesRecursively(ParseInfo* info) {
// unresolved references remaining, they just need to be resolved in outer
// scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
- DCHECK(variables_.occupancy() == 0);
+ DCHECK_EQ(variables_.occupancy(), 0);
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
Variable* var = outer_scope()->LookupRecursive(proxy, nullptr);
@@ -2251,8 +2235,10 @@ void DeclarationScope::AllocateLocals() {
// allocated in the context, it must be the last slot in the context,
// because of the current ScopeInfo implementation (see
// ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
- if (function_ != nullptr) {
+ if (function_ != nullptr && MustAllocate(function_)) {
AllocateNonParameterLocal(function_);
+ } else {
+ function_ = nullptr;
}
DCHECK(!has_rest_ || !MustAllocate(rest_parameter()) ||
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index fe15508027..bcfd2187df 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -173,7 +173,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// ---------------------------------------------------------------------------
// Declarations
- // Lookup a variable in this scope. Returns the variable or NULL if not found.
+ // Lookup a variable in this scope. Returns the variable or nullptr if not
+ // found.
Variable* LookupLocal(const AstRawString* name) {
Variable* result = variables_.Lookup(name);
if (result != nullptr || scope_info_.is_null()) return result;
@@ -183,7 +184,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Variable* LookupInScopeInfo(const AstRawString* name);
// Lookup a variable in this scope or outer scopes.
- // Returns the variable or NULL if not found.
+ // Returns the variable or nullptr if not found.
Variable* Lookup(const AstRawString* name);
// Declare a local variable in this scope. If the variable has been
@@ -195,7 +196,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Variable* DeclareVariable(Declaration* declaration, VariableMode mode,
InitializationFlag init,
- bool allow_harmony_restrictive_generators,
bool* sloppy_mode_block_scope_function_redefinition,
bool* ok);
@@ -385,7 +385,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
ScopeType scope_type() const { return scope_type_; }
// The language mode of this scope.
- LanguageMode language_mode() const { return is_strict_ ? STRICT : SLOPPY; }
+ LanguageMode language_mode() const {
+ return is_strict_ ? LanguageMode::kStrict : LanguageMode::kSloppy;
+ }
// inner_scope() and sibling() together implement the inner scope list of a
// scope. Inner scope points to the an inner scope of the function, and
@@ -393,7 +395,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Scope* inner_scope() const { return inner_scope_; }
Scope* sibling() const { return sibling_; }
- // The scope immediately surrounding this scope, or NULL.
+ // The scope immediately surrounding this scope, or nullptr.
Scope* outer_scope() const { return outer_scope_; }
Variable* catch_variable() const {
@@ -439,9 +441,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// 'this' is bound, and what determines the function kind.
DeclarationScope* GetReceiverScope();
- // Find the module scope, assuming there is one.
- ModuleScope* GetModuleScope();
-
// Find the innermost outer scope that needs a context.
Scope* GetOuterScopeWithContext();
@@ -517,7 +516,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Zone* zone_;
// Scope tree.
- Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
+ Scope* outer_scope_; // the immediately enclosing outer scope, or nullptr
Scope* inner_scope_; // an inner scope of this scope
Scope* sibling_; // a sibling inner scope of the outer scope of this scope.
@@ -564,7 +563,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Scope-specific information computed during parsing.
//
// The language mode of this scope.
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
bool is_strict_ : 1;
// This scope or a nested catch scope or with scope contain an 'eval' call. At
// the 'eval' call site this scope is the declaration scope.
@@ -762,11 +761,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
Variable* new_target_var() { return new_target_; }
// The variable holding the function literal for named function
- // literals, or NULL. Only valid for function scopes.
- Variable* function_var() const {
- DCHECK(is_function_scope());
- return function_;
- }
+ // literals, or nullptr. Only valid for function scopes.
+ Variable* function_var() const { return function_; }
Variable* generator_object_var() const {
DCHECK(is_function_scope() || is_module_scope());
@@ -813,7 +809,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
has_simple_parameters_ = false;
}
- // The local variable 'arguments' if we need to allocate it; NULL otherwise.
+ // The local variable 'arguments' if we need to allocate it; nullptr
+ // otherwise.
Variable* arguments() const {
DCHECK(!is_arrow_scope() || arguments_ == nullptr);
return arguments_;
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 09df57ad54..4d58c8fed9 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -43,7 +43,7 @@ class Variable final : public ZoneObject {
// The source code for an eval() call may refer to a variable that is
// in an outer scope about which we don't know anything (it may not
- // be the script scope). scope() is NULL in that case. Currently the
+ // be the script scope). scope() is nullptr in that case. Currently the
// scope is only used to follow the context chain length.
Scope* scope() const { return scope_; }
@@ -137,7 +137,7 @@ class Variable final : public ZoneObject {
}
Variable* local_if_not_shadowed() const {
- DCHECK(mode() == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
+ DCHECK(mode() == DYNAMIC_LOCAL && local_if_not_shadowed_ != nullptr);
return local_if_not_shadowed_;
}
diff --git a/deps/v8/src/bailout-reason.cc b/deps/v8/src/bailout-reason.cc
index cd01851380..ac7bb929b9 100644
--- a/deps/v8/src/bailout-reason.cc
+++ b/deps/v8/src/bailout-reason.cc
@@ -9,7 +9,7 @@ namespace v8 {
namespace internal {
const char* GetBailoutReason(BailoutReason reason) {
- DCHECK(reason < kLastErrorMessage);
+ DCHECK_LT(reason, kLastErrorMessage);
#define ERROR_MESSAGES_TEXTS(C, T) T,
static const char* error_messages_[] = {
ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 593b4972e1..2bb92e1a2b 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -40,8 +40,6 @@ namespace internal {
"Expected optimized code cell or optimization sentinel") \
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
- V(kExternalStringExpectedButNotFound, \
- "External string expected, but not found") \
V(kForOfStatement, "ForOfStatement") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kFunctionCallsEval, "Function calls eval") \
@@ -97,8 +95,6 @@ namespace internal {
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kSmiAdditionOverflow, "Smi addition overflow") \
- V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kSpreadCall, "Call with spread argument") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index 3ab70a7936..d482d75cfb 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -24,7 +24,7 @@
],
},
}],
- ['asan==1 or cfi_vptr==1 or msan==1 or tsan==1', {
+ ['asan==1 or cfi_vptr==1 or msan==1 or tsan==1 or ubsan_vptr==1', {
'variables': {
'files': [
'../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
@@ -32,7 +32,7 @@
},
}],
# Workaround for https://code.google.com/p/swarming/issues/detail?id=211
- ['asan==0 or cfi_vptr==0 or msan==0 or tsan==0', {
+ ['asan==0 or cfi_vptr==0 or msan==0 or tsan==0 or ubsan_vptr==0 ', {
'variables': {},
}],
],
diff --git a/deps/v8/src/base/bits.cc b/deps/v8/src/base/bits.cc
index 049dc4a1b1..fedbdb2d2d 100644
--- a/deps/v8/src/base/bits.cc
+++ b/deps/v8/src/base/bits.cc
@@ -18,7 +18,7 @@ uint32_t RoundUpToPowerOfTwo32(uint32_t value) {
if (value) --value;
// Use computation based on leading zeros if we have compiler support for that.
#if V8_HAS_BUILTIN_CLZ || V8_CC_MSVC
- return 1u << (32 - CountLeadingZeros32(value));
+ return 1u << (32 - CountLeadingZeros(value));
#else
value |= value >> 1;
value |= value >> 2;
@@ -34,7 +34,7 @@ uint64_t RoundUpToPowerOfTwo64(uint64_t value) {
if (value) --value;
// Use computation based on leading zeros if we have compiler support for that.
#if V8_HAS_BUILTIN_CLZ
- return uint64_t{1} << (64 - CountLeadingZeros64(value));
+ return uint64_t{1} << (64 - CountLeadingZeros(value));
#else
value |= value >> 1;
value |= value >> 2;
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 504be0370a..731a7181d7 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -27,98 +27,32 @@ class CheckedNumeric;
namespace bits {
-// Define overloaded |Name| for |Name32| and |Name64|, depending on the size of
-// the given value.
-//
-// The overloads are only defined for input types of size 4 and 8, respectively,
-// using enable_if and SFINAE to disable them otherwise. enable_if<bool,
-// typename> only has a "type" member if the first parameter is true, in which
-// case "type" is a typedef to the second member (here, set to "unsigned").
-// Otherwise, enable_if::type doesn't exist, making the function signature
-// invalid, and so the entire function is thrown away (without an error) due to
-// SFINAE.
-//
-// Not that we cannot simply check sizeof(T) using an if statement, as we need
-// both branches of the if to be syntactically valid even if one of the branches
-// is dead.
-#define DEFINE_32_64_OVERLOADS(Name) \
- template <typename T> \
- inline typename std::enable_if<sizeof(T) == 4, unsigned>::type Name( \
- T value) { \
- return Name##32(value); \
- } \
- \
- template <typename T> \
- inline typename std::enable_if<sizeof(T) == 8, unsigned>::type Name( \
- T value) { \
- return Name##64(value); \
- }
-
-// CountPopulation32(value) returns the number of bits set in |value|.
-inline unsigned CountPopulation32(uint32_t value) {
+// CountPopulation(value) returns the number of bits set in |value|.
+template <typename T>
+constexpr inline
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountPopulation(T value) {
#if V8_HAS_BUILTIN_POPCOUNT
- return __builtin_popcount(value);
+ return sizeof(T) == 8 ? __builtin_popcountll(static_cast<uint64_t>(value))
+ : __builtin_popcount(static_cast<uint32_t>(value));
#else
- value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
- value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
- value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
- value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
- value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+ constexpr uint64_t mask[] = {0x5555555555555555, 0x3333333333333333,
+ 0x0f0f0f0f0f0f0f0f, 0x00ff00ff00ff00ff,
+ 0x0000ffff0000ffff, 0x00000000ffffffff};
+ value = ((value >> 1) & mask[0]) + (value & mask[0]);
+ value = ((value >> 2) & mask[1]) + (value & mask[1]);
+ value = ((value >> 4) & mask[2]) + (value & mask[2]);
+ if (sizeof(T) > 1)
+ value = ((value >> (sizeof(T) > 1 ? 8 : 0)) & mask[3]) + (value & mask[3]);
+ if (sizeof(T) > 2)
+ value = ((value >> (sizeof(T) > 2 ? 16 : 0)) & mask[4]) + (value & mask[4]);
+ if (sizeof(T) > 4)
+ value = ((value >> (sizeof(T) > 4 ? 32 : 0)) & mask[5]) + (value & mask[5]);
return static_cast<unsigned>(value);
#endif
}
-
-// CountPopulation64(value) returns the number of bits set in |value|.
-inline unsigned CountPopulation64(uint64_t value) {
-#if V8_HAS_BUILTIN_POPCOUNT
- return __builtin_popcountll(value);
-#else
- return CountPopulation32(static_cast<uint32_t>(value)) +
- CountPopulation32(static_cast<uint32_t>(value >> 32));
-#endif
-}
-
-DEFINE_32_64_OVERLOADS(CountPopulation)
-
-// CountLeadingZeros32(value) returns the number of zero bits following the most
-// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
-inline unsigned CountLeadingZeros32(uint32_t value) {
-#if V8_HAS_BUILTIN_CLZ
- return value ? __builtin_clz(value) : 32;
-#elif V8_CC_MSVC
- unsigned long result; // NOLINT(runtime/int)
- if (!_BitScanReverse(&result, value)) return 32;
- return static_cast<unsigned>(31 - result);
-#else
- value = value | (value >> 1);
- value = value | (value >> 2);
- value = value | (value >> 4);
- value = value | (value >> 8);
- value = value | (value >> 16);
- return CountPopulation32(~value);
-#endif
-}
-
-
-// CountLeadingZeros64(value) returns the number of zero bits following the most
-// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 64.
-inline unsigned CountLeadingZeros64(uint64_t value) {
-#if V8_HAS_BUILTIN_CLZ
- return value ? __builtin_clzll(value) : 64;
-#else
- value = value | (value >> 1);
- value = value | (value >> 2);
- value = value | (value >> 4);
- value = value | (value >> 8);
- value = value | (value >> 16);
- value = value | (value >> 32);
- return CountPopulation64(~value);
-#endif
-}
-
-DEFINE_32_64_OVERLOADS(CountLeadingZeros)
-
// ReverseBits(value) returns |value| in reverse bit order.
template <typename T>
T ReverseBits(T value) {
@@ -132,46 +66,73 @@ T ReverseBits(T value) {
return result;
}
-// CountTrailingZeros32(value) returns the number of zero bits preceding the
-// least significant 1 bit in |value| if |value| is non-zero, otherwise it
-// returns 32.
-inline unsigned CountTrailingZeros32(uint32_t value) {
-#if V8_HAS_BUILTIN_CTZ
- return value ? __builtin_ctz(value) : 32;
-#elif V8_CC_MSVC
- unsigned long result; // NOLINT(runtime/int)
- if (!_BitScanForward(&result, value)) return 32;
- return static_cast<unsigned>(result);
+// CountLeadingZeros(value) returns the number of zero bits following the most
+// significant 1 bit in |value| if |value| is non-zero, otherwise it returns
+// {sizeof(T) * 8}.
+template <typename T, unsigned bits = sizeof(T) * 8>
+inline constexpr
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountLeadingZeros(T value) {
+ static_assert(bits > 0, "invalid instantiation");
+#if V8_HAS_BUILTIN_CLZ
+ return value == 0
+ ? bits
+ : bits == 64
+ ? __builtin_clzll(static_cast<uint64_t>(value))
+ : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits);
#else
- if (value == 0) return 32;
- unsigned count = 0;
- for (value ^= value - 1; value >>= 1; ++count) {
- }
- return count;
+ // Binary search algorithm taken from "Hacker's Delight" (by Henry S. Warren,
+ // Jr.), figures 5-11 and 5-12.
+ if (bits == 1) return static_cast<unsigned>(value) ^ 1;
+ T upper_half = value >> (bits / 2);
+ T next_value = upper_half != 0 ? upper_half : value;
+ unsigned add = upper_half != 0 ? 0 : bits / 2;
+ constexpr unsigned next_bits = bits == 1 ? 1 : bits / 2;
+ return CountLeadingZeros<T, next_bits>(next_value) + add;
#endif
}
+inline constexpr unsigned CountLeadingZeros32(uint32_t value) {
+ return CountLeadingZeros(value);
+}
+inline constexpr unsigned CountLeadingZeros64(uint64_t value) {
+ return CountLeadingZeros(value);
+}
-// CountTrailingZeros64(value) returns the number of zero bits preceding the
+// CountTrailingZeros(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
-// returns 64.
-inline unsigned CountTrailingZeros64(uint64_t value) {
+// returns {sizeof(T) * 8}.
+template <typename T, unsigned bits = sizeof(T) * 8>
+inline constexpr
+ typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountTrailingZeros(T value) {
#if V8_HAS_BUILTIN_CTZ
- return value ? __builtin_ctzll(value) : 64;
+ return value == 0 ? bits
+ : bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
+ : __builtin_ctz(static_cast<uint32_t>(value));
#else
- if (value == 0) return 64;
- unsigned count = 0;
- for (value ^= value - 1; value >>= 1; ++count) {
- }
- return count;
+ // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
+ // chapter 5-4. On x64, since is faster than counting in a loop and faster
+ // than doing binary search.
+ using U = typename std::make_unsigned<T>::type;
+ U u = value;
+ return CountPopulation(static_cast<U>(~u & (u - 1u)));
#endif
}
-DEFINE_32_64_OVERLOADS(CountTrailingZeros)
+inline constexpr unsigned CountTrailingZeros32(uint32_t value) {
+ return CountTrailingZeros(value);
+}
+inline constexpr unsigned CountTrailingZeros64(uint64_t value) {
+ return CountTrailingZeros(value);
+}
// Returns true iff |value| is a power of 2.
template <typename T,
- typename = typename std::enable_if<std::is_integral<T>::value>::type>
+ typename = typename std::enable_if<std::is_integral<T>::value ||
+ std::is_enum<T>::value>::type>
constexpr inline bool IsPowerOfTwo(T value) {
return value > 0 && (value & (value - 1)) == 0;
}
@@ -338,8 +299,6 @@ V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
// checks and returns the result.
V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
-#undef DEFINE_32_64_OVERLOADS
-
} // namespace bits
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index abbb824b6d..f449612e6a 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -75,8 +75,7 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT
-#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 \
- || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#if V8_OS_LINUX
@@ -116,7 +115,7 @@ static uint32_t ReadELFHWCaps() {
#else
// Read the ELF HWCAP flags by parsing /proc/self/auxv.
FILE* fp = fopen("/proc/self/auxv", "r");
- if (fp != NULL) {
+ if (fp != nullptr) {
struct { uint32_t tag; uint32_t value; } entry;
for (;;) {
size_t n = fread(&entry, sizeof(entry), 1, fp);
@@ -176,7 +175,7 @@ int __detect_mips_arch_revision(void) {
// Fall-back to the least common denominator which is mips32 revision 1.
return result ? 1 : 6;
}
-#endif
+#endif // V8_HOST_ARCH_MIPS
// Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo final {
@@ -187,7 +186,7 @@ class CPUInfo final {
// when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
static const char PATHNAME[] = "/proc/cpuinfo";
FILE* fp = fopen(PATHNAME, "r");
- if (fp != NULL) {
+ if (fp != nullptr) {
for (;;) {
char buffer[256];
size_t n = fread(buffer, 1, sizeof(buffer), fp);
@@ -202,7 +201,7 @@ class CPUInfo final {
// Read the contents of the cpuinfo file.
data_ = new char[datalen_ + 1];
fp = fopen(PATHNAME, "r");
- if (fp != NULL) {
+ if (fp != nullptr) {
for (size_t offset = 0; offset < datalen_; ) {
size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
if (n == 0) {
@@ -224,17 +223,17 @@ class CPUInfo final {
// Extract the content of a the first occurrence of a given field in
// the content of the cpuinfo file and return it as a heap-allocated
// string that must be freed by the caller using delete[].
- // Return NULL if not found.
+ // Return nullptr if not found.
char* ExtractField(const char* field) const {
- DCHECK(field != NULL);
+ DCHECK_NOT_NULL(field);
// Look for first field occurrence, and ensure it starts the line.
size_t fieldlen = strlen(field);
char* p = data_;
for (;;) {
p = strstr(p, field);
- if (p == NULL) {
- return NULL;
+ if (p == nullptr) {
+ return nullptr;
}
if (p == data_ || p[-1] == '\n') {
break;
@@ -244,21 +243,21 @@ class CPUInfo final {
// Skip to the first colon followed by a space.
p = strchr(p + fieldlen, ':');
- if (p == NULL || !isspace(p[1])) {
- return NULL;
+ if (p == nullptr || !isspace(p[1])) {
+ return nullptr;
}
p += 2;
// Find the end of the line.
char* q = strchr(p, '\n');
- if (q == NULL) {
+ if (q == nullptr) {
q = data_ + datalen_;
}
// Copy the line into a heap-allocated buffer.
size_t len = q - p;
char* result = new char[len + 1];
- if (result != NULL) {
+ if (result != nullptr) {
memcpy(result, p, len);
result[len] = '\0';
}
@@ -270,13 +269,11 @@ class CPUInfo final {
size_t datalen_;
};
-#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
-
// Checks that a space-separated list of items contains one given 'item'.
static bool HasListItem(const char* list, const char* item) {
ssize_t item_len = strlen(item);
const char* p = list;
- if (p != NULL) {
+ if (p != nullptr) {
while (*p != '\0') {
// Skip whitespace.
while (isspace(*p)) ++p;
@@ -296,11 +293,9 @@ static bool HasListItem(const char* list, const char* item) {
return false;
}
-#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
-
#endif // V8_OS_LINUX
-#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
CPU::CPU()
: stepping_(0),
@@ -432,7 +427,7 @@ CPU::CPU()
// Extract implementor from the "CPU implementer" field.
char* implementer = cpu_info.ExtractField("CPU implementer");
- if (implementer != NULL) {
+ if (implementer != nullptr) {
char* end;
implementer_ = strtol(implementer, &end, 0);
if (end == implementer) {
@@ -442,7 +437,7 @@ CPU::CPU()
}
char* variant = cpu_info.ExtractField("CPU variant");
- if (variant != NULL) {
+ if (variant != nullptr) {
char* end;
variant_ = strtol(variant, &end, 0);
if (end == variant) {
@@ -453,7 +448,7 @@ CPU::CPU()
// Extract part number from the "CPU part" field.
char* part = cpu_info.ExtractField("CPU part");
- if (part != NULL) {
+ if (part != nullptr) {
char* end;
part_ = strtol(part, &end, 0);
if (end == part) {
@@ -469,7 +464,7 @@ CPU::CPU()
// $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
// same file.
char* architecture = cpu_info.ExtractField("CPU architecture");
- if (architecture != NULL) {
+ if (architecture != nullptr) {
char* end;
architecture_ = strtol(architecture, &end, 10);
if (end == architecture) {
@@ -572,7 +567,7 @@ CPU::CPU()
// QNX doesn't say if Thumb2 is available.
// Assume false for the architectures older than ARMv7.
}
- DCHECK(architecture_ >= 6);
+ DCHECK_GE(architecture_, 6);
has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
has_vfp_ = has_fpu_;
if (cpu_flags & ARM_CPU_FLAG_NEON) {
@@ -606,49 +601,16 @@ CPU::CPU()
#endif
#elif V8_HOST_ARCH_ARM64
-
- CPUInfo cpu_info;
-
- // Extract implementor from the "CPU implementer" field.
- char* implementer = cpu_info.ExtractField("CPU implementer");
- if (implementer != NULL) {
- char* end;
- implementer_ = static_cast<int>(strtol(implementer, &end, 0));
- if (end == implementer) {
- implementer_ = 0;
- }
- delete[] implementer;
- }
-
- char* variant = cpu_info.ExtractField("CPU variant");
- if (variant != NULL) {
- char* end;
- variant_ = static_cast<int>(strtol(variant, &end, 0));
- if (end == variant) {
- variant_ = -1;
- }
- delete[] variant;
- }
-
- // Extract part number from the "CPU part" field.
- char* part = cpu_info.ExtractField("CPU part");
- if (part != NULL) {
- char* end;
- part_ = static_cast<int>(strtol(part, &end, 0));
- if (end == part) {
- part_ = 0;
- }
- delete[] part;
- }
+// Implementer, variant and part are currently unused under ARM64.
#elif V8_HOST_ARCH_PPC
#ifndef USE_SIMULATOR
#if V8_OS_LINUX
// Read processor info from /proc/self/auxv.
- char* auxv_cpu_type = NULL;
+ char* auxv_cpu_type = nullptr;
FILE* fp = fopen("/proc/self/auxv", "r");
- if (fp != NULL) {
+ if (fp != nullptr) {
#if V8_TARGET_ARCH_PPC64
Elf64_auxv_t entry;
#else
diff --git a/deps/v8/src/base/debug/stack_trace.cc b/deps/v8/src/base/debug/stack_trace.cc
index 0a7a3f9ab9..2a3fb87a19 100644
--- a/deps/v8/src/base/debug/stack_trace.cc
+++ b/deps/v8/src/base/debug/stack_trace.cc
@@ -26,7 +26,7 @@ StackTrace::~StackTrace() {}
const void* const* StackTrace::Addresses(size_t* count) const {
*count = count_;
if (count_) return trace_;
- return NULL;
+ return nullptr;
}
std::string StackTrace::ToString() const {
diff --git a/deps/v8/src/base/debug/stack_trace_android.cc b/deps/v8/src/base/debug/stack_trace_android.cc
index e1d5fd2e57..16fbf9890c 100644
--- a/deps/v8/src/base/debug/stack_trace_android.cc
+++ b/deps/v8/src/base/debug/stack_trace_android.cc
@@ -63,7 +63,7 @@ bool EnableInProcessStackDumping() {
memset(&action, 0, sizeof(action));
action.sa_handler = SIG_IGN;
sigemptyset(&action.sa_mask);
- return (sigaction(SIGPIPE, &action, NULL) == 0);
+ return (sigaction(SIGPIPE, &action, nullptr) == 0);
}
void DisableSignalStackDump() {
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index 87c0a73d19..67f86c634f 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -51,8 +51,8 @@ namespace internal {
// POSIX doesn't define any async-signal safe function for converting
// an integer to ASCII. We'll have to define our own version.
// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
-// conversion was successful or NULL otherwise. It never writes more than "sz"
-// bytes. Output will be truncated as needed, and a NUL character is always
+// conversion was successful or nullptr otherwise. It never writes more than
+// "sz" bytes. Output will be truncated as needed, and a NUL character is always
// appended.
char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding);
@@ -104,7 +104,7 @@ void DemangleSymbols(std::string* text) {
// Try to demangle the mangled symbol candidate.
int status = 0;
std::unique_ptr<char, FreeDeleter> demangled_symbol(
- abi::__cxa_demangle(mangled_symbol.c_str(), NULL, 0, &status));
+ abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, 0, &status));
if (status == 0) { // Demangling is successful.
// Remove the mangled symbol.
text->erase(mangled_start, mangled_end - mangled_start);
@@ -334,7 +334,7 @@ bool EnableInProcessStackDumping() {
memset(&sigpipe_action, 0, sizeof(sigpipe_action));
sigpipe_action.sa_handler = SIG_IGN;
sigemptyset(&sigpipe_action.sa_mask);
- bool success = (sigaction(SIGPIPE, &sigpipe_action, NULL) == 0);
+ bool success = (sigaction(SIGPIPE, &sigpipe_action, nullptr) == 0);
// Avoid hangs during backtrace initialization, see above.
WarmUpBacktrace();
@@ -345,12 +345,12 @@ bool EnableInProcessStackDumping() {
action.sa_sigaction = &StackDumpSignalHandler;
sigemptyset(&action.sa_mask);
- success &= (sigaction(SIGILL, &action, NULL) == 0);
- success &= (sigaction(SIGABRT, &action, NULL) == 0);
- success &= (sigaction(SIGFPE, &action, NULL) == 0);
- success &= (sigaction(SIGBUS, &action, NULL) == 0);
- success &= (sigaction(SIGSEGV, &action, NULL) == 0);
- success &= (sigaction(SIGSYS, &action, NULL) == 0);
+ success &= (sigaction(SIGILL, &action, nullptr) == 0);
+ success &= (sigaction(SIGABRT, &action, nullptr) == 0);
+ success &= (sigaction(SIGFPE, &action, nullptr) == 0);
+ success &= (sigaction(SIGBUS, &action, nullptr) == 0);
+ success &= (sigaction(SIGSEGV, &action, nullptr) == 0);
+ success &= (sigaction(SIGSYS, &action, nullptr) == 0);
dump_stack_in_signal_handler = true;
@@ -397,11 +397,11 @@ namespace internal {
char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write at least one NUL byte.
size_t n = 1;
- if (n > sz) return NULL;
+ if (n > sz) return nullptr;
if (base < 2 || base > 16) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
char* start = buf;
@@ -416,7 +416,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write the '-' character.
if (++n > sz) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
*start++ = '-';
}
@@ -428,7 +428,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure there is still enough space left in our output buffer.
if (++n > sz) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
// Output the next digit.
diff --git a/deps/v8/src/base/debug/stack_trace_win.cc b/deps/v8/src/base/debug/stack_trace_win.cc
index 64e6309122..7a7e4f5168 100644
--- a/deps/v8/src/base/debug/stack_trace_win.cc
+++ b/deps/v8/src/base/debug/stack_trace_win.cc
@@ -24,9 +24,9 @@ namespace debug {
namespace {
-// Previous unhandled filter. Will be called if not NULL when we intercept an
+// Previous unhandled filter. Will be called if not nullptr when we intercept an
// exception. Only used in unit tests.
-LPTOP_LEVEL_EXCEPTION_FILTER g_previous_filter = NULL;
+LPTOP_LEVEL_EXCEPTION_FILTER g_previous_filter = nullptr;
bool g_dump_stack_in_signal_handler = true;
bool g_initialized_symbols = false;
@@ -43,7 +43,7 @@ long WINAPI StackDumpExceptionFilter(EXCEPTION_POINTERS* info) { // NOLINT
}
void GetExePath(wchar_t* path_out) {
- GetModuleFileName(NULL, path_out, MAX_PATH);
+ GetModuleFileName(nullptr, path_out, MAX_PATH);
path_out[MAX_PATH - 1] = L'\0';
PathRemoveFileSpec(path_out);
}
@@ -54,7 +54,7 @@ bool InitializeSymbols() {
// Defer symbol load until they're needed, use undecorated names, and get line
// numbers.
SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
- if (!SymInitialize(GetCurrentProcess(), NULL, TRUE)) {
+ if (!SymInitialize(GetCurrentProcess(), nullptr, TRUE)) {
g_init_error = GetLastError();
// TODO(awong): Handle error: SymInitialize can fail with
// ERROR_INVALID_PARAMETER.
@@ -174,7 +174,7 @@ void DisableSignalStackDump() {
StackTrace::StackTrace() {
// When walking our own stack, use CaptureStackBackTrace().
- count_ = CaptureStackBackTrace(0, arraysize(trace_), trace_, NULL);
+ count_ = CaptureStackBackTrace(0, arraysize(trace_), trace_, nullptr);
}
#if defined(V8_CC_MSVC)
@@ -216,13 +216,13 @@ void StackTrace::InitTrace(const CONTEXT* context_record) {
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
- &stack_frame, &context_copy, NULL,
- &SymFunctionTableAccess64, &SymGetModuleBase64, NULL) &&
+ &stack_frame, &context_copy, nullptr,
+ &SymFunctionTableAccess64, &SymGetModuleBase64, nullptr) &&
count_ < arraysize(trace_)) {
trace_[count_++] = reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
}
- for (size_t i = count_; i < arraysize(trace_); ++i) trace_[i] = NULL;
+ for (size_t i = count_; i < arraysize(trace_); ++i) trace_[i] = nullptr;
}
void StackTrace::Print() const { OutputToStream(&std::cerr); }
diff --git a/deps/v8/src/base/division-by-constant.cc b/deps/v8/src/base/division-by-constant.cc
index 03d198e9bf..4e0900fa24 100644
--- a/deps/v8/src/base/division-by-constant.cc
+++ b/deps/v8/src/base/division-by-constant.cc
@@ -53,7 +53,7 @@ template <class T>
MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
unsigned leading_zeros) {
STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
- DCHECK(d != 0);
+ DCHECK_NE(d, 0);
const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
const T ones = ~static_cast<T>(0) >> leading_zeros;
const T min = static_cast<T>(1) << (bits - 1);
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 889c6885b2..9f3a1e6991 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -192,7 +192,8 @@ EXPLICIT_CHECK_OP_INSTANTIATION(void const*)
#undef EXPLICIT_CHECK_OP_INSTANTIATION
// comparison_underlying_type provides the underlying integral type of an enum,
-// or std::decay<T>::type if T is not an enum.
+// or std::decay<T>::type if T is not an enum. Booleans are converted to
+// "unsigned int", to allow "unsigned int == bool" comparisons.
template <typename T>
struct comparison_underlying_type {
// std::underlying_type must only be used with enum types, thus use this
@@ -202,8 +203,15 @@ struct comparison_underlying_type {
static constexpr bool is_enum = std::is_enum<decay>::value;
using underlying = typename std::underlying_type<
typename std::conditional<is_enum, decay, Dummy>::type>::type;
- using type = typename std::conditional<is_enum, underlying, decay>::type;
+ using type_or_bool =
+ typename std::conditional<is_enum, underlying, decay>::type;
+ using type =
+ typename std::conditional<std::is_same<type_or_bool, bool>::value,
+ unsigned int, type_or_bool>::type;
};
+// Cast a value to its underlying type
+#define MAKE_UNDERLYING(Type, value) \
+ static_cast<typename comparison_underlying_type<Type>::type>(value)
// is_signed_vs_unsigned::value is true if both types are integral, Lhs is
// signed, and Rhs is unsigned. False in all other cases.
@@ -233,11 +241,14 @@ struct is_unsigned_vs_signed : public is_signed_vs_unsigned<Rhs, Lhs> {};
return IMPL; \
}
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, EQ,
- lhs >= 0 && MAKE_UNSIGNED(Lhs, lhs) == rhs)
+ lhs >= 0 && MAKE_UNSIGNED(Lhs, lhs) ==
+ MAKE_UNDERLYING(Rhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LT,
- lhs < 0 || MAKE_UNSIGNED(Lhs, lhs) < rhs)
+ lhs < 0 || MAKE_UNSIGNED(Lhs, lhs) <
+ MAKE_UNDERLYING(Rhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LE,
- lhs <= 0 || MAKE_UNSIGNED(Lhs, lhs) <= rhs)
+ lhs <= 0 || MAKE_UNSIGNED(Lhs, lhs) <=
+ MAKE_UNDERLYING(Rhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, NE, !CmpEQImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GT, !CmpLEImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GE, !CmpLTImpl(lhs, rhs))
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index 8008812d75..ea9c2fa88d 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -85,7 +85,8 @@ V8_BASE_EXPORT void CallOnceImpl(OnceType* once, PointerArgFunction init_func,
inline void CallOnce(OnceType* once, NoArgFunction init_func) {
if (Acquire_Load(once) != ONCE_STATE_DONE) {
- CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
+ CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
+ nullptr);
}
}
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
new file mode 100644
index 0000000000..5deaa67ce7
--- /dev/null
+++ b/deps/v8/src/base/platform/OWNERS
@@ -0,0 +1,6 @@
+set noparent
+
+hpayer@chromium.org
+mlippautz@chromium.org
+
+# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index 6df8599def..165651aae1 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -28,7 +28,7 @@ ConditionVariable::ConditionVariable() {
DCHECK_EQ(0, result);
result = pthread_condattr_destroy(&attr);
#else
- int result = pthread_cond_init(&native_handle_, NULL);
+ int result = pthread_cond_init(&native_handle_, nullptr);
#endif
DCHECK_EQ(0, result);
USE(result);
diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h
index f9a9ef4361..3406831cbe 100644
--- a/deps/v8/src/base/platform/elapsed-timer.h
+++ b/deps/v8/src/base/platform/elapsed-timer.h
@@ -56,7 +56,7 @@ class ElapsedTimer final {
DCHECK(IsStarted());
TimeTicks ticks = Now();
TimeDelta elapsed = ticks - start_ticks_;
- DCHECK(elapsed.InMicroseconds() >= 0);
+ DCHECK_GE(elapsed.InMicroseconds(), 0);
start_ticks_ = ticks;
DCHECK(IsStarted());
return elapsed;
@@ -67,7 +67,7 @@ class ElapsedTimer final {
TimeDelta Elapsed() const {
DCHECK(IsStarted());
TimeDelta elapsed = Now() - start_ticks_;
- DCHECK(elapsed.InMicroseconds() >= 0);
+ DCHECK_GE(elapsed.InMicroseconds(), 0);
return elapsed;
}
diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc
index 191f07ffb1..a044075c16 100644
--- a/deps/v8/src/base/platform/mutex.cc
+++ b/deps/v8/src/base/platform/mutex.cc
@@ -25,7 +25,7 @@ static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
result = pthread_mutexattr_destroy(&attr);
#else
// Use a fast mutex (default attributes).
- result = pthread_mutex_init(mutex, NULL);
+ result = pthread_mutex_init(mutex, nullptr);
#endif // defined(DEBUG)
DCHECK_EQ(0, result);
USE(result);
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index 6c1bde7b85..39559552bb 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -49,116 +49,24 @@ const char* AIXTimezoneCache::LocalTimezone(double time) {
time_t tv = static_cast<time_t>(floor(time / msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
- if (NULL == t) return "";
+ if (nullptr == t) return "";
return tzname[0]; // The location of the timezone string on AIX.
}
double AIXTimezoneCache::LocalTimeOffset() {
// On AIX, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- DCHECK(utc != -1);
+ time_t utc = time(nullptr);
+ DCHECK_NE(utc, -1);
struct tm tm;
struct tm* loc = localtime_r(&utc, &tm);
- DCHECK(loc != NULL);
+ DCHECK_NOT_NULL(loc);
return static_cast<double>((mktime(loc) - utc) * msPerSecond);
}
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
- kMmapFdOffset);
-
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return nullptr;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-
- if (mprotect(address, size, prot) == -1) return false;
-
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mprotect(address, size, PROT_NONE) != -1;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() { return true; }
-
static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+ return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
@@ -193,7 +101,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
+ if (start_of_path == nullptr) continue;
buffer[bytes_read] = 0;
result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
@@ -201,7 +109,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index f20c530d67..eabd53570f 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -28,25 +28,37 @@ namespace base {
namespace {
-// The VirtualMemory implementation is taken from platform-win32.cc.
-// The mmap-based virtual memory implementation as it is used on most posix
-// platforms does not work well because Cygwin does not support MAP_FIXED.
-// This causes VirtualMemory::Commit to not always commit the memory region
-// specified.
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
- void* hint) {
- LPVOID base = NULL;
-
- if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
- // For exectutable pages try and randomize the allocation address
- base = VirtualAlloc(hint, size, action, protection);
+// The memory allocation implementation is taken from platform-win32.cc.
+
+DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return PAGE_NOACCESS;
+ case OS::MemoryPermission::kReadWrite:
+ return PAGE_READWRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return PAGE_EXECUTE_READWRITE;
+ case OS::MemoryPermission::kReadExecute:
+ return PAGE_EXECUTE_READ;
+ }
+ UNREACHABLE();
+}
+
+uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
+ void* hint) {
+ LPVOID base = nullptr;
+
+ // For executable or reserved pages try to use the address hint.
+ if (protect != PAGE_READWRITE) {
+ base = VirtualAlloc(hint, size, flags, protect);
}
- // After three attempts give up and let the OS find an address to use.
- if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+ // If that fails, let the OS find an address to use.
+ if (base == nullptr) {
+ base = VirtualAlloc(nullptr, size, flags, protect);
+ }
- return base;
+ return reinterpret_cast<uint8_t*>(base);
}
} // namespace
@@ -64,93 +76,97 @@ const char* CygwinTimezoneCache::LocalTimezone(double time) {
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
- if (NULL == t) return "";
+ if (nullptr == t) return "";
return tzname[0]; // The location of the timezone string on Cygwin.
}
double CygwinTimezoneCache::LocalTimeOffset() {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- DCHECK(utc != -1);
+ time_t utc = time(nullptr);
+ DCHECK_NE(utc, -1);
struct tm tm;
struct tm* loc = localtime_r(&utc, &tm);
- DCHECK(loc != NULL);
+ DCHECK_NOT_NULL(loc);
// time - localtime includes any daylight savings offset, so subtract it.
return static_cast<double>((mktime(loc) - utc) * msPerSecond -
(loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- hint = AlignedAddress(hint, alignment);
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size, hint);
- if (address == NULL) {
- *allocated = 0;
- return nullptr;
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ MemoryPermission access) {
+ size_t page_size = AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ DCHECK_LE(page_size, alignment);
+ address = AlignedAddress(address, alignment);
+
+ DWORD flags = (access == OS::MemoryPermission::kNoAccess)
+ ? MEM_RESERVE
+ : MEM_RESERVE | MEM_COMMIT;
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+
+ // First, try an exact size aligned allocation.
+ uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
+ if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
+
+ // If address is suitably aligned, we're done.
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ if (base == aligned_base) return reinterpret_cast<void*>(base);
+
+ // Otherwise, free it and try a larger allocation.
+ CHECK(Free(base, size));
+
+ // Clear the hint. It's unlikely we can allocate at this address.
+ address = nullptr;
+
+ // Add the maximum misalignment so we are guaranteed an aligned base address
+ // in the allocated region.
+ size_t padded_size = size + (alignment - page_size);
+ const int kMaxAttempts = 3;
+ aligned_base = nullptr;
+ for (int i = 0; i < kMaxAttempts; ++i) {
+ base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
+ if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
+
+ // Try to trim the allocation by freeing the padded allocation and then
+ // calling VirtualAlloc at the aligned base.
+ CHECK(Free(base, padded_size));
+ aligned_base = RoundUp(base, alignment);
+ base = reinterpret_cast<uint8_t*>(
+ VirtualAlloc(aligned_base, size, flags, protect));
+ // We might not get the reduced allocation due to a race. In that case,
+ // base will be nullptr.
+ if (base != nullptr) break;
}
- uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- DCHECK(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != nullptr) {
- request_size = size;
- DCHECK(base == static_cast<uint8_t*>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size, hint);
- if (address == nullptr) {
- *allocated = 0;
- return nullptr;
- }
- }
-
- *allocated = request_size;
- return static_cast<void*>(address);
+ DCHECK_EQ(base, aligned_base);
+ return reinterpret_cast<void*>(base);
}
// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
- return true;
+bool OS::Free(void* address, const size_t size) {
+ DCHECK_EQ(0, static_cast<uintptr_t>(address) % AllocatePageSize());
+ DCHECK_EQ(0, size % AllocatePageSize());
+ USE(size);
+ return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
-bool OS::UncommitRegion(void* address, size_t size) {
+bool OS::Release(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return VirtualFree(address, 0, MEM_RELEASE) != 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ if (access == MemoryPermission::kNoAccess) {
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+ }
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+ return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
@@ -165,7 +181,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
+ if (fp == nullptr) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -193,7 +209,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
ungetc(c, fp); // Push the '/' back into the stream to be read below.
// Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+ if (fgets(lib_name, kLibNameLen, fp) == nullptr) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
@@ -219,7 +235,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {
+void OS::SignalCodeMovingGC() {
// Nothing to do on Cygwin.
}
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index a1eb7e8928..2b9779b843 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -40,106 +40,8 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase =
- mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
-
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- hint = AlignedAddress(hint, alignment);
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+ return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
@@ -174,7 +76,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
+ if (start_of_path == nullptr) continue;
buffer[bytes_read] = 0;
result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
@@ -182,7 +84,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 16e6f1d2b0..83a8a23c48 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -13,79 +13,73 @@
namespace v8 {
namespace base {
-TimezoneCache* OS::CreateTimezoneCache() {
- return new PosixDefaultTimezoneCache();
+namespace {
+
+uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return 0; // no permissions
+ case OS::MemoryPermission::kReadWrite:
+ return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
+ ZX_VM_FLAG_PERM_EXECUTE;
+ case OS::MemoryPermission::kReadExecute:
+ return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_EXECUTE;
+ }
+ UNREACHABLE();
}
-// static
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
- return nullptr;
-}
+} // namespace
-// static
-void OS::Guard(void* address, size_t size) {
- CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- 0 /*no permissions*/));
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- zx_handle_t vmo;
- if (zx_vmo_create(size, 0, &vmo) != ZX_OK) return nullptr;
- uintptr_t result;
- zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size,
- 0 /*no permissions*/, &result);
- zx_handle_close(vmo);
- if (status != ZX_OK) return nullptr;
- return reinterpret_cast<void*>(result);
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
}
// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ OS::MemoryPermission access) {
+ size_t page_size = OS::AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ address = AlignedAddress(address, alignment);
+ // Add the maximum misalignment so we are guaranteed an aligned base address.
+ size_t request_size = size + (alignment - page_size);
zx_handle_t vmo;
if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
- *allocated = 0;
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
zx_object_set_property(vmo, ZX_PROP_NAME, kVirtualMemoryName,
strlen(kVirtualMemoryName));
uintptr_t reservation;
+ uint32_t prot = GetProtectionFromMemoryPermission(access);
zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, request_size,
- 0 /*no permissions*/, &reservation);
+ prot, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
// so close the vmo either way.
zx_handle_close(vmo);
if (status != ZX_OK) {
- *allocated = 0;
return nullptr;
}
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
+ DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
prefix_size);
request_size -= prefix_size;
}
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
+ size_t aligned_size = RoundUp(size, page_size);
if (aligned_size != request_size) {
+ DCHECK_LT(aligned_size, request_size);
size_t suffix_size = request_size - aligned_size;
zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(aligned_base + aligned_size),
@@ -94,37 +88,33 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
}
DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- uint32_t prot = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
- (is_executable ? ZX_VM_FLAG_PERM_EXECUTE : 0);
- return zx_vmar_protect(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- prot) == ZX_OK;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return zx_vmar_protect(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- 0 /*no permissions*/) == ZX_OK;
+bool OS::Free(void* address, const size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
+ DCHECK_EQ(0, size % AllocatePageSize());
+ return zx_vmar_unmap(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
-bool OS::ReleaseRegion(void* address, size_t size) {
+bool OS::Release(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
return zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return zx_vmar_unmap(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ uint32_t prot = GetProtectionFromMemoryPermission(access);
+ return zx_vmar_protect(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size,
+ prot) == ZX_OK;
}
// static
@@ -138,7 +128,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<SharedLibraryAddress>();
}
-void OS::SignalCodeMovingGC(void* hint) {
+void OS::SignalCodeMovingGC() {
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
}
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 2299a2c3e3..725ad0c6eb 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -93,109 +93,13 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
- kMmapFdOffset);
- if (mbase == MAP_FAILED) return nullptr;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return nullptr;
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() { return true; }
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
+ if (fp == nullptr) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -203,11 +107,15 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
// This loop will terminate once the scanning hits an EOF.
while (true) {
- uintptr_t start, end;
+ uintptr_t start, end, offset;
char attr_r, attr_w, attr_x, attr_p;
// Parse the addresses and permission bits at the beginning of the line.
if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+ if (fscanf(fp, "%" V8PRIxPTR, &offset) != 1) break;
+
+ // Adjust {start} based on {offset}.
+ start -= offset;
int c;
if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
@@ -224,7 +132,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
ungetc(c, fp);
// Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+ if (fgets(lib_name, kLibNameLen, fp) == nullptr) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
@@ -250,7 +158,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {
+void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
@@ -261,14 +169,14 @@ void OS::SignalCodeMovingGC(void* hint) {
// kernel log.
long size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
- if (f == NULL) {
+ if (f == nullptr) {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
- void* addr =
- mmap(hint, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
+ void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC,
+ MAP_PRIVATE, fileno(f), 0);
DCHECK_NE(MAP_FAILED, addr);
- OS::Free(addr, size);
+ CHECK(Free(addr, size));
fclose(f);
}
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 3c19962186..081e434589 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -43,119 +43,12 @@
namespace v8 {
namespace base {
-
-// Constants used for mmap.
-// kMmapFd is used to pass vm_alloc flags to tag the region with the user
-// defined tag 255 This helps identify V8-allocated regions in memory analysis
-// tools like vmmap(1).
-static const int kMmapFd = VM_MAKE_TAG(255);
-static const off_t kMmapFdOffset = 0;
-
-// static
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase =
- mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return nullptr;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() { return true; }
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
- if (header == NULL) continue;
+ if (header == nullptr) continue;
#if V8_HOST_ARCH_X64
uint64_t size;
char* code_ptr = getsectdatafromheader_64(
@@ -165,7 +58,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
unsigned int size;
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
#endif
- if (code_ptr == NULL) continue;
+ if (code_ptr == nullptr) continue;
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
@@ -174,7 +67,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index 910d4a8104..9084c3075e 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -38,112 +38,13 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase =
- mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
+ if (fp == nullptr) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -171,7 +72,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
ungetc(c, fp); // Push the '/' back into the stream to be read below.
// Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+ if (fgets(lib_name, kLibNameLen, fp) == nullptr) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
@@ -197,7 +98,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {
+void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
@@ -208,13 +109,13 @@ void OS::SignalCodeMovingGC(void* hint) {
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
- if (f == NULL) {
+ if (f == nullptr) {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
void* addr =
- mmap(hint, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
- DCHECK_NE(MAP_FAILED, addr);
+ mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
+ DCHECK(addr != MAP_FAILED);
OS::Free(addr, size);
fclose(f);
}
diff --git a/deps/v8/src/base/platform/platform-posix-time.cc b/deps/v8/src/base/platform/platform-posix-time.cc
index a960f7237e..54618810c2 100644
--- a/deps/v8/src/base/platform/platform-posix-time.cc
+++ b/deps/v8/src/base/platform/platform-posix-time.cc
@@ -19,7 +19,7 @@ const char* PosixDefaultTimezoneCache::LocalTimezone(double time) {
}
double PosixDefaultTimezoneCache::LocalTimeOffset() {
- time_t tv = time(NULL);
+ time_t tv = time(nullptr);
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 8f658b95cb..b873197d3b 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -27,8 +27,6 @@
#include <sys/sysctl.h> // NOLINT, for sysctl
#endif
-#undef MAP_TYPE
-
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
#define LOG_TAG "v8"
#include <android/log.h> // NOLINT
@@ -61,6 +59,22 @@
#include <sys/syscall.h>
#endif
+#if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#if defined(V8_OS_SOLARIS)
+#if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
+extern "C" int madvise(caddr_t, size_t, int);
+#else
+extern int madvise(caddr_t, size_t, int);
+#endif
+#endif
+
+#ifndef MADV_FREE
+#define MADV_FREE MADV_DONTNEED
+#endif
+
namespace v8 {
namespace base {
@@ -71,10 +85,96 @@ const pthread_t kNoThread = (pthread_t) 0;
bool g_hard_abort = false;
-const char* g_gc_fake_mmap = NULL;
+const char* g_gc_fake_mmap = nullptr;
+
+static LazyInstance<RandomNumberGenerator>::type
+ platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+#if !V8_OS_FUCHSIA
+#if V8_OS_MACOSX
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+const int kMmapFd = VM_MAKE_TAG(255);
+#else // !V8_OS_MACOSX
+const int kMmapFd = -1;
+#endif // !V8_OS_MACOSX
+
+const int kMmapFdOffset = 0;
+
+int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return PROT_NONE;
+ case OS::MemoryPermission::kReadWrite:
+ return PROT_READ | PROT_WRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ case OS::MemoryPermission::kReadExecute:
+ return PROT_READ | PROT_EXEC;
+ }
+ UNREACHABLE();
+}
+
+int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ if (access == OS::MemoryPermission::kNoAccess) {
+#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
+ flags |= MAP_NORESERVE;
+#endif // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
+#if V8_OS_QNX
+ flags |= MAP_LAZY;
+#endif // V8_OS_QNX
+ }
+ return flags;
+}
+
+void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
+ const size_t actual_size = RoundUp(size, OS::AllocatePageSize());
+ int prot = GetProtectionFromMemoryPermission(access);
+ int flags = GetFlagsForMemoryPermission(access);
+ void* result =
+ mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset);
+ if (result == MAP_FAILED) return nullptr;
+ return result;
+}
+
+int ReclaimInaccessibleMemory(void* address, size_t size) {
+#if defined(OS_MACOSX)
+ // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
+ // marks the pages with the reusable bit, which allows both Activity Monitor
+ // and memory-infra to correctly track the pages.
+ int ret = madvise(address, size, MADV_FREE_REUSABLE);
+#elif defined(_AIX) || defined(V8_OS_SOLARIS)
+ int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
+#else
+ int ret = madvise(address, size, MADV_FREE);
+#endif
+ if (ret != 0 && errno == EINVAL) {
+ // MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
+ // MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
+ // imply runtime support.
+#if defined(_AIX) || defined(V8_OS_SOLARIS)
+ ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
+#else
+ ret = madvise(address, size, MADV_DONTNEED);
+#endif
+ }
+ return ret;
+}
+
+#endif // !V8_OS_FUCHSIA
} // namespace
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+ const char* const gc_fake_mmap) {
+ if (random_seed) {
+ platform_random_number_generator.Pointer()->SetSeed(random_seed);
+ }
+ g_hard_abort = hard_abort;
+ g_gc_fake_mmap = gc_fake_mmap;
+}
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
@@ -95,77 +195,161 @@ int OS::ActivationFrameAlignment() {
#endif
}
+// static
+size_t OS::AllocatePageSize() {
+ return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+}
-intptr_t OS::CommitPageSize() {
- static intptr_t page_size = getpagesize();
+// static
+size_t OS::CommitPageSize() {
+ static size_t page_size = getpagesize();
return page_size;
}
-void* OS::Allocate(const size_t requested, size_t* allocated,
- bool is_executable, void* hint) {
- return OS::Allocate(requested, allocated,
- is_executable ? OS::MemoryPermission::kReadWriteExecute
- : OS::MemoryPermission::kReadWrite,
- hint);
+// static
+void* OS::GetRandomMmapAddr() {
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER)
+ // Dynamic tools do not support custom mmap addresses.
+ return nullptr;
+#endif
+ uintptr_t raw_addr;
+ platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
+ sizeof(raw_addr));
+#if V8_TARGET_ARCH_X64
+ // Currently available CPUs have 48 bits of virtual addressing. Truncate
+ // the hint address to 46 bits to give the kernel a fighting chance of
+ // fulfilling our placement request.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#elif V8_TARGET_ARCH_PPC64
+#if V8_OS_AIX
+ // AIX: 64 bits of virtual addressing, but we limit address range to:
+ // a) minimize Segment Lookaside Buffer (SLB) misses and
+ raw_addr &= V8_UINT64_C(0x3ffff000);
+ // Use extra address space to isolate the mmap regions.
+ raw_addr += V8_UINT64_C(0x400000000000);
+#elif V8_TARGET_BIG_ENDIAN
+ // Big-endian Linux: 44 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x03fffffff000);
+#else
+ // Little-endian Linux: 48 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#endif
+#elif V8_TARGET_ARCH_S390X
+ // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
+ // of virtual addressing. Truncate to 40 bits to allow kernel chance to
+ // fulfill request.
+ raw_addr &= V8_UINT64_C(0xfffffff000);
+#elif V8_TARGET_ARCH_S390
+ // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= 0x1ffff000;
+#else
+ raw_addr &= 0x3ffff000;
+
+#ifdef __sun
+ // For our Solaris/illumos mmap hint, we pick a random address in the bottom
+ // half of the top half of the address space (that is, the third quarter).
+ // Because we do not MAP_FIXED, this will be treated only as a hint -- the
+ // system will not fail to mmap() because something else happens to already
+ // be mapped at our random address. We deliberately set the hint high enough
+ // to get well above the system's break (that is, the heap); Solaris and
+ // illumos will try the hint and if that fails allocate as if there were
+ // no hint at all. The high hint prevents the break from getting hemmed in
+ // at low values, ceding half of the address space to the system heap.
+ raw_addr += 0x80000000;
+#elif V8_OS_AIX
+ // The range 0x30000000 - 0xD0000000 is available on AIX;
+ // choose the upper range.
+ raw_addr += 0x90000000;
+#else
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+ // 10.6 and 10.7.
+ raw_addr += 0x20000000;
+#endif
+#endif
+ return reinterpret_cast<void*>(raw_addr);
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- DCHECK(result == 0);
-}
+// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
+#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
+// static
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ MemoryPermission access) {
+ size_t page_size = AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ address = AlignedAddress(address, alignment);
+ // Add the maximum misalignment so we are guaranteed an aligned base address.
+ size_t request_size = size + (alignment - page_size);
+ void* result = base::Allocate(address, request_size, access);
+ if (result == nullptr) return nullptr;
+
+ // Unmap memory allocated before the aligned base address.
+ uint8_t* base = static_cast<uint8_t*>(result);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ if (aligned_base != base) {
+ DCHECK_LT(base, aligned_base);
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ CHECK(Free(base, prefix_size));
+ request_size -= prefix_size;
+ }
+ // Unmap memory allocated after the potentially unaligned end.
+ if (size != request_size) {
+ DCHECK_LT(size, request_size);
+ size_t suffix_size = request_size - size;
+ CHECK(Free(aligned_base + size, suffix_size));
+ request_size -= suffix_size;
+ }
+ DCHECK_EQ(size, request_size);
+ return static_cast<void*>(aligned_base);
+}
-// Get rid of writable permission on code allocations.
-void OS::ProtectCode(void* address, const size_t size) {
-#if V8_OS_CYGWIN
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-#else
- mprotect(address, size, PROT_READ | PROT_EXEC);
-#endif
+// static
+bool OS::Free(void* address, const size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
+ DCHECK_EQ(0, size % AllocatePageSize());
+ return munmap(address, size) == 0;
}
+// static
+bool OS::Release(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ return munmap(address, size) == 0;
+}
-// Create guard pages.
-#if !V8_OS_FUCHSIA
-void OS::Guard(void* address, const size_t size) {
-#if V8_OS_CYGWIN
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-#else
- mprotect(address, size, PROT_NONE);
-#endif
+// static
+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+
+ int prot = GetProtectionFromMemoryPermission(access);
+ int ret = mprotect(address, size, prot);
+ if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
+ ret = ReclaimInaccessibleMemory(address, size);
+ }
+ return ret == 0;
}
-#endif // !V8_OS_FUCHSIA
-// Make a region of memory readable and writable.
-void OS::Unprotect(void* address, const size_t size) {
-#if V8_OS_CYGWIN
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READWRITE, &oldprotect);
+// static
+bool OS::HasLazyCommits() {
+#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
+ return true;
#else
- mprotect(address, size, PROT_READ | PROT_WRITE);
+ // TODO(bbudge) Return true for all POSIX platforms.
+ return false;
#endif
}
-
-void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
- g_hard_abort = hard_abort;
- g_gc_fake_mmap = gc_fake_mmap;
-}
-
+#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
const char* OS::GetGCFakeMMapFile() {
return g_gc_fake_mmap;
}
-size_t OS::AllocateAlignment() {
- return static_cast<size_t>(sysconf(_SC_PAGESIZE));
-}
-
-
void OS::Sleep(TimeDelta interval) {
usleep(static_cast<useconds_t>(interval.InMicroseconds()));
}
@@ -220,13 +404,14 @@ class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
if (FILE* file = fopen(name, "r+")) {
if (fseek(file, 0, SEEK_END) == 0) {
long size = ftell(file); // NOLINT(runtime/int)
if (size >= 0) {
- void* const memory = mmap(hint, size, PROT_READ | PROT_WRITE,
- MAP_SHARED, fileno(file), 0);
+ void* const memory =
+ mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fileno(file), 0);
if (memory != MAP_FAILED) {
return new PosixMemoryMappedFile(file, memory, size);
}
@@ -239,13 +424,13 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint,
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
if (FILE* file = fopen(name, "w+")) {
size_t result = fwrite(initial, 1, size, file);
if (result == size && !ferror(file)) {
- void* memory = mmap(hint, result, PROT_READ | PROT_WRITE, MAP_SHARED,
- fileno(file), 0);
+ void* memory = mmap(OS::GetRandomMmapAddr(), result,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
if (memory != MAP_FAILED) {
return new PosixMemoryMappedFile(file, memory, result);
}
@@ -257,7 +442,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
+ if (memory_) CHECK(OS::Free(memory_, size_));
fclose(file_);
}
@@ -309,7 +494,7 @@ double PosixTimezoneCache::DaylightSavingsOffset(double time) {
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
- if (NULL == t) return std::numeric_limits<double>::quiet_NaN();
+ if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
@@ -325,16 +510,16 @@ int OS::GetLastError() {
FILE* OS::FOpen(const char* path, const char* mode) {
FILE* file = fopen(path, mode);
- if (file == NULL) return NULL;
+ if (file == nullptr) return nullptr;
struct stat file_stat;
if (fstat(fileno(file), &file_stat) != 0) {
fclose(file);
- return NULL;
+ return nullptr;
}
bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
if (is_regular_file) return file;
fclose(file);
- return NULL;
+ return nullptr;
}
@@ -462,7 +647,7 @@ class Thread::PlatformData {
Thread::Thread(const Options& options)
: data_(new PlatformData),
stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
+ start_semaphore_(nullptr) {
if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
stack_size_ = PTHREAD_STACK_MIN;
}
@@ -487,8 +672,7 @@ static void SetThreadName(const char* name) {
int (*dynamic_pthread_setname_np)(const char*);
*reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
dlsym(RTLD_DEFAULT, "pthread_setname_np");
- if (dynamic_pthread_setname_np == NULL)
- return;
+ if (dynamic_pthread_setname_np == nullptr) return;
// Mac OS X does not expose the length limit of the name, so hardcode it.
static const int kMaxNameLength = 63;
@@ -509,9 +693,9 @@ static void* ThreadEntry(void* arg) {
// one).
{ LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
SetThreadName(thread->name());
- DCHECK(thread->data()->thread_ != kNoThread);
+ DCHECK_NE(thread->data()->thread_, kNoThread);
thread->NotifyStartedAndRun();
- return NULL;
+ return nullptr;
}
@@ -548,15 +732,11 @@ void Thread::Start() {
DCHECK_EQ(0, result);
result = pthread_attr_destroy(&attr);
DCHECK_EQ(0, result);
- DCHECK(data_->thread_ != kNoThread);
+ DCHECK_NE(data_->thread_, kNoThread);
USE(result);
}
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
+void Thread::Join() { pthread_join(data_->thread_, nullptr); }
static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
#if V8_OS_CYGWIN
@@ -595,7 +775,7 @@ static void InitializeTlsBaseOffset() {
char buffer[kBufferSize];
size_t buffer_size = kBufferSize;
int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
- if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+ if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
@@ -605,7 +785,7 @@ static void InitializeTlsBaseOffset() {
char* period_pos = strchr(buffer, '.');
*period_pos = '\0';
int kernel_version_major =
- static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
+ static_cast<int>(strtol(buffer, nullptr, 10)); // NOLINT
// The constants below are taken from pthreads.s from the XNU kernel
// sources archive at www.opensource.apple.com.
if (kernel_version_major < 11) {
@@ -633,7 +813,7 @@ static void CheckFastTls(Thread::LocalStorageKey key) {
V8_Fatal(__FILE__, __LINE__,
"V8 failed to initialize fast TLS on current kernel");
}
- Thread::SetThreadLocal(key, NULL);
+ Thread::SetThreadLocal(key, nullptr);
}
#endif // V8_FAST_TLS_SUPPORTED
@@ -648,7 +828,7 @@ Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
}
#endif
pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
+ int result = pthread_key_create(&key, nullptr);
DCHECK_EQ(0, result);
USE(result);
LocalStorageKey local_key = PthreadKeyToLocalKey(key);
@@ -681,17 +861,9 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
USE(result);
}
-int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
- switch (access) {
- case OS::MemoryPermission::kNoAccess:
- return PROT_NONE;
- case OS::MemoryPermission::kReadWrite:
- return PROT_READ | PROT_WRITE;
- case OS::MemoryPermission::kReadWriteExecute:
- return PROT_READ | PROT_WRITE | PROT_EXEC;
- }
- UNREACHABLE();
-}
+#undef LOG_TAG
+#undef MAP_ANONYMOUS
+#undef MADV_FREE
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h
index b092bb526d..55861bc9ac 100644
--- a/deps/v8/src/base/platform/platform-posix.h
+++ b/deps/v8/src/base/platform/platform-posix.h
@@ -21,8 +21,6 @@ class PosixTimezoneCache : public TimezoneCache {
static const int msPerSecond = 1000;
};
-int GetProtectionFromMemoryPermission(OS::MemoryPermission access);
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index 68bc0efbf9..640b77c816 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -89,106 +89,9 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
- kMmapFdOffset);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() { return false; }
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
- procfs_mapinfo *mapinfos = NULL, *mapinfo;
+ procfs_mapinfo *mapinfos = nullptr, *mapinfo;
int proc_fd, num, i;
struct {
@@ -205,14 +108,14 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
}
/* Get the number of map entries. */
- if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
+ if (devctl(proc_fd, DCMD_PROC_MAPINFO, nullptr, 0, &num) != EOK) {
close(proc_fd);
return result;
}
mapinfos =
reinterpret_cast<procfs_mapinfo*>(malloc(num * sizeof(procfs_mapinfo)));
- if (mapinfos == NULL) {
+ if (mapinfos == nullptr) {
close(proc_fd);
return result;
}
@@ -241,7 +144,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index 2ea6ef4a6c..b81895a3fb 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -47,7 +47,7 @@ const char* SolarisTimezoneCache::LocalTimezone(double time) {
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
- if (NULL == t) return "";
+ if (nullptr == t) return "";
return tzname[0]; // The location of the timezone string on Solaris.
}
@@ -58,111 +58,11 @@ double SolarisTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase =
- mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
-
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<SharedLibraryAddress>();
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index de1a27506f..e026d7edae 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -20,10 +20,12 @@
#include "src/base/win32-headers.h"
#include "src/base/bits.h"
+#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/timezone-cache.h"
+#include "src/base/utils/random-number-generator.h"
// Extra functions for MinGW. Most of these are the _s functions which are in
// the Microsoft Visual Studio C++ CRT.
@@ -45,14 +47,14 @@ inline void MemoryFence() {
int localtime_s(tm* out_tm, const time_t* time) {
tm* posix_local_time_struct = localtime_r(time, out_tm);
- if (posix_local_time_struct == NULL) return 1;
+ if (posix_local_time_struct == nullptr) return 1;
return 0;
}
int fopen_s(FILE** pFile, const char* filename, const char* mode) {
*pFile = fopen(filename, mode);
- return *pFile != NULL ? 0 : 1;
+ return *pFile != nullptr ? 0 : 1;
}
int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
@@ -63,8 +65,8 @@ int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
- CHECK(source != NULL);
- CHECK(dest != NULL);
+ CHECK(source != nullptr);
+ CHECK(dest != nullptr);
CHECK_GT(dest_size, 0);
if (count == _TRUNCATE) {
@@ -137,11 +139,11 @@ class WindowsTimezoneCache : public TimezoneCache {
}
// Make standard and DST timezone names.
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
- std_tz_name_, kTzNameSize, NULL, NULL);
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1, std_tz_name_,
+ kTzNameSize, nullptr, nullptr);
std_tz_name_[kTzNameSize - 1] = '\0';
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
- dst_tz_name_, kTzNameSize, NULL, NULL);
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1, dst_tz_name_,
+ kTzNameSize, nullptr, nullptr);
dst_tz_name_[kTzNameSize - 1] = '\0';
// If OS returned empty string or resource id (like "@tzres.dll,-211")
@@ -551,7 +553,7 @@ FILE* OS::FOpen(const char* path, const char* mode) {
if (fopen_s(&result, path, mode) == 0) {
return result;
} else {
- return NULL;
+ return nullptr;
}
}
@@ -572,13 +574,13 @@ FILE* OS::OpenTemporaryFile() {
char tempPathBuffer[MAX_PATH];
DWORD path_result = 0;
path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
- if (path_result > MAX_PATH || path_result == 0) return NULL;
+ if (path_result > MAX_PATH || path_result == 0) return nullptr;
UINT name_result = 0;
char tempNameBuffer[MAX_PATH];
name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
- if (name_result == 0) return NULL;
+ if (name_result == 0) return nullptr;
FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses.
- if (result != NULL) {
+ if (result != nullptr) {
Remove(tempNameBuffer); // Delete on close.
}
return result;
@@ -672,42 +674,81 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
#undef _TRUNCATE
#undef STRUNCATE
+// The allocation alignment is the guaranteed alignment for
+// VirtualAlloc'ed blocks of memory.
+size_t OS::AllocatePageSize() {
+ static size_t allocate_alignment = 0;
+ if (allocate_alignment == 0) {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ allocate_alignment = info.dwAllocationGranularity;
+ }
+ return allocate_alignment;
+}
-// Get the system's page size used by VirtualAlloc() or the next power
-// of two. The reason for always returning a power of two is that the
-// rounding up in OS::Allocate expects that.
-static size_t GetPageSize() {
+size_t OS::CommitPageSize() {
static size_t page_size = 0;
if (page_size == 0) {
SYSTEM_INFO info;
GetSystemInfo(&info);
- page_size = base::bits::RoundUpToPowerOfTwo32(info.dwPageSize);
+ page_size = info.dwPageSize;
+ DCHECK_EQ(4096, page_size);
}
return page_size;
}
+static LazyInstance<RandomNumberGenerator>::type
+ platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
-size_t OS::AllocateAlignment() {
- static size_t allocate_alignment = 0;
- if (allocate_alignment == 0) {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- allocate_alignment = info.dwAllocationGranularity;
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+ const char* const gc_fake_mmap) {
+ if (random_seed) {
+ platform_random_number_generator.Pointer()->SetSeed(random_seed);
}
- return allocate_alignment;
+ g_hard_abort = hard_abort;
}
-void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
- g_hard_abort = hard_abort;
+void* OS::GetRandomMmapAddr() {
+// The address range used to randomize RWX allocations in OS::Allocate
+// Try not to map pages into the default range that windows loads DLLs
+// Use a multiple of 64k to prevent committing unused memory.
+// Note: This does not guarantee RWX regions will be within the
+// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+ static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+ static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+ static const uintptr_t kAllocationRandomAddressMin = 0x04000000;
+ static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+ uintptr_t address;
+ platform_random_number_generator.Pointer()->NextBytes(&address,
+ sizeof(address));
+ address <<= kPageSizeBits;
+ address += kAllocationRandomAddressMin;
+ address &= kAllocationRandomAddressMax;
+ return reinterpret_cast<void*>(address);
}
namespace {
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
- void* hint) {
- LPVOID base = NULL;
+DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return PAGE_NOACCESS;
+ case OS::MemoryPermission::kReadWrite:
+ return PAGE_READWRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return PAGE_EXECUTE_READWRITE;
+ case OS::MemoryPermission::kReadExecute:
+ return PAGE_EXECUTE_READ;
+ }
+ UNREACHABLE();
+}
+
+uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
+ void* hint) {
+ LPVOID base = nullptr;
static BOOL use_aslr = -1;
#ifdef V8_HOST_ARCH_32_BIT
// Don't bother randomizing on 32-bit hosts, because they lack the room and
@@ -718,146 +759,96 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
use_aslr = TRUE;
#endif
- if (use_aslr &&
- (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) {
- // For executable pages try and randomize the allocation address
- base = VirtualAlloc(hint, size, action, protection);
+ if (use_aslr && protect != PAGE_READWRITE) {
+ // For executable or reserved pages try to randomize the allocation address.
+ base = VirtualAlloc(hint, size, flags, protect);
}
- // After three attempts give up and let the OS find an address to use.
- if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
- return base;
-}
-
-} // namespace
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- bool is_executable, void* hint) {
- return OS::Allocate(requested, allocated,
- is_executable ? OS::MemoryPermission::kReadWriteExecute
- : OS::MemoryPermission::kReadWrite,
- hint);
-}
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- // VirtualAlloc rounds allocated size to page size automatically.
- size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-
- // Windows XP SP2 allows Data Excution Prevention (DEP).
- int prot = PAGE_NOACCESS;
- switch (access) {
- case OS::MemoryPermission::kNoAccess: {
- prot = PAGE_NOACCESS;
- break;
- }
- case OS::MemoryPermission::kReadWrite: {
- prot = PAGE_READWRITE;
- break;
- }
- case OS::MemoryPermission::kReadWriteExecute: {
- prot = PAGE_EXECUTE_READWRITE;
- break;
- }
+ // On failure, let the OS find an address to use.
+ if (base == nullptr) {
+ base = VirtualAlloc(nullptr, size, flags, protect);
}
-
- LPVOID mbase =
- RandomizedVirtualAlloc(msize, MEM_COMMIT | MEM_RESERVE, prot, hint);
-
- if (mbase == NULL) return NULL;
-
- DCHECK((reinterpret_cast<uintptr_t>(mbase) % OS::AllocateAlignment()) == 0);
-
- *allocated = msize;
- return mbase;
+ return reinterpret_cast<uint8_t*>(base);
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): VirtualFree has a return value which is ignored here.
- VirtualFree(address, 0, MEM_RELEASE);
- USE(size);
-}
-
-intptr_t OS::CommitPageSize() {
- return 4096;
-}
-
-void OS::ProtectCode(void* address, const size_t size) {
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-void OS::Guard(void* address, const size_t size) {
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-}
-
-void OS::Unprotect(void* address, const size_t size) {
- LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE);
- USE(result);
-}
+} // namespace
// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
-}
-
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size, hint);
- if (address == nullptr) {
- *allocated = 0;
- return nullptr;
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ MemoryPermission access) {
+ size_t page_size = AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ DCHECK_LE(page_size, alignment);
+ address = AlignedAddress(address, alignment);
+
+ DWORD flags = (access == OS::MemoryPermission::kNoAccess)
+ ? MEM_RESERVE
+ : MEM_RESERVE | MEM_COMMIT;
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+
+ // First, try an exact size aligned allocation.
+ uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
+ if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
+
+ // If address is suitably aligned, we're done.
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ if (base == aligned_base) return reinterpret_cast<void*>(base);
+
+ // Otherwise, free it and try a larger allocation.
+ CHECK(Free(base, size));
+
+ // Clear the hint. It's unlikely we can allocate at this address.
+ address = nullptr;
+
+ // Add the maximum misalignment so we are guaranteed an aligned base address
+ // in the allocated region.
+ size_t padded_size = size + (alignment - page_size);
+ const int kMaxAttempts = 3;
+ aligned_base = nullptr;
+ for (int i = 0; i < kMaxAttempts; ++i) {
+ base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
+ if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
+
+ // Try to trim the allocation by freeing the padded allocation and then
+ // calling VirtualAlloc at the aligned base.
+ CHECK(Free(base, padded_size));
+ aligned_base = RoundUp(base, alignment);
+ base = reinterpret_cast<uint8_t*>(
+ VirtualAlloc(aligned_base, size, flags, protect));
+ // We might not get the reduced allocation due to a race. In that case,
+ // base will be nullptr.
+ if (base != nullptr) break;
}
- uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- DCHECK(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != nullptr) {
- request_size = size;
- DCHECK(base == static_cast<uint8_t*>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size, hint);
- if (address == nullptr) {
- *allocated = 0;
- return nullptr;
- }
- }
-
- *allocated = request_size;
- return static_cast<void*>(address);
+ DCHECK_EQ(base, aligned_base);
+ return reinterpret_cast<void*>(base);
}
// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
- return true;
+bool OS::Free(void* address, const size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
+ // TODO(bbudge) Add DCHECK_EQ(0, size % AllocatePageSize()) when callers
+ // pass the correct size on Windows.
+ USE(size);
+ return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
-bool OS::UncommitRegion(void* address, size_t size) {
+bool OS::Release(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return VirtualFree(address, 0, MEM_RELEASE) != 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ if (access == MemoryPermission::kNoAccess) {
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+ }
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+ return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
@@ -916,18 +907,19 @@ class Win32MemoryMappedFile final : public OS::MemoryMappedFile {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
- if (file == INVALID_HANDLE_VALUE) return NULL;
+ FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+ OPEN_EXISTING, 0, nullptr);
+ if (file == INVALID_HANDLE_VALUE) return nullptr;
- DWORD size = GetFileSize(file, NULL);
+ DWORD size = GetFileSize(file, nullptr);
- // Create a file mapping for the physical file. Ignore hint on Windows.
+ // Create a file mapping for the physical file
HANDLE file_mapping =
- CreateFileMapping(file, NULL, PAGE_READWRITE, 0, size, NULL);
- if (file_mapping == NULL) return NULL;
+ CreateFileMapping(file, nullptr, PAGE_READWRITE, 0, size, nullptr);
+ if (file_mapping == nullptr) return nullptr;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
@@ -936,17 +928,17 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint,
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
- OPEN_ALWAYS, 0, NULL);
- if (file == NULL) return NULL;
- // Create a file mapping for the physical file. Ignore hint on Windows.
- HANDLE file_mapping = CreateFileMapping(file, NULL, PAGE_READWRITE, 0,
- static_cast<DWORD>(size), NULL);
- if (file_mapping == NULL) return NULL;
+ FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+ OPEN_ALWAYS, 0, nullptr);
+ if (file == nullptr) return nullptr;
+ // Create a file mapping for the physical file
+ HANDLE file_mapping = CreateFileMapping(file, nullptr, PAGE_READWRITE, 0,
+ static_cast<DWORD>(size), nullptr);
+ if (file_mapping == nullptr) return nullptr;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
if (memory) memmove(memory, initial, size);
@@ -1062,7 +1054,7 @@ typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
#undef VOID
// Declare a variable for each dynamically loaded DLL function.
-#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
+#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = nullptr;
DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
#undef DEF_DLL_FUNCTION
@@ -1079,7 +1071,7 @@ static bool LoadDbgHelpAndTlHelp32() {
// Load functions from the dbghelp.dll module.
module = LoadLibrary(TEXT("dbghelp.dll"));
- if (module == NULL) {
+ if (module == nullptr) {
return false;
}
@@ -1094,7 +1086,7 @@ DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
// Load functions from the kernel32.dll module (the TlHelp32.h function used
// to be in tlhelp32.dll but are now moved to kernel32.dll).
module = LoadLibrary(TEXT("kernel32.dll"));
- if (module == NULL) {
+ if (module == nullptr) {
return false;
}
@@ -1107,14 +1099,14 @@ TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
#undef LOAD_DLL_FUNC
// Check that all functions where loaded.
- bool result =
-#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
+bool result =
+#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != nullptr)&&
-DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
-TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
+ DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
+ TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
#undef DLL_FUNC_LOADED
- true;
+ true;
dbghelp_loaded = result;
return result;
@@ -1141,7 +1133,7 @@ static std::vector<OS::SharedLibraryAddress> LoadSymbols(
// Initialize the symbol engine.
ok = _SymInitialize(process_handle, // hProcess
- NULL, // UserSearchPath
+ nullptr, // UserSearchPath
false); // fInvadeProcess
if (!ok) return result;
@@ -1185,10 +1177,10 @@ static std::vector<OS::SharedLibraryAddress> LoadSymbols(
}
}
int lib_name_length = WideCharToMultiByte(
- CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
+ CP_UTF8, 0, module_entry.szExePath, -1, nullptr, 0, nullptr, nullptr);
std::string lib_name(lib_name_length, 0);
WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
- lib_name_length, NULL, NULL);
+ lib_name_length, nullptr, nullptr);
result.push_back(OS::SharedLibraryAddress(
lib_name, reinterpret_cast<uintptr_t>(module_entry.modBaseAddr),
reinterpret_cast<uintptr_t>(module_entry.modBaseAddr +
@@ -1212,13 +1204,16 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return LoadSymbols(process_handle);
}
+void OS::SignalCodeMovingGC() {}
+
#else // __MINGW32__
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<OS::SharedLibraryAddress>();
}
+
+void OS::SignalCodeMovingGC() {}
#endif // __MINGW32__
-void OS::SignalCodeMovingGC(void* hint) {}
int OS::ActivationFrameAlignment() {
#ifdef _WIN64
@@ -1261,8 +1256,7 @@ class Thread::PlatformData {
// handle until it is started.
Thread::Thread(const Options& options)
- : stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
+ : stack_size_(options.stack_size()), start_semaphore_(nullptr) {
data_ = new PlatformData(kNoThread);
set_name(options.name());
}
@@ -1286,12 +1280,8 @@ Thread::~Thread() {
// initialize thread specific structures in the C runtime library.
void Thread::Start() {
data_->thread_ = reinterpret_cast<HANDLE>(
- _beginthreadex(NULL,
- static_cast<unsigned>(stack_size_),
- ThreadEntry,
- this,
- 0,
- &data_->thread_id_));
+ _beginthreadex(nullptr, static_cast<unsigned>(stack_size_), ThreadEntry,
+ this, 0, &data_->thread_id_));
}
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 0ff8599b0c..dd454ecd43 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -62,7 +62,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
kPointerSize * index));
}
intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
- DCHECK(extra != 0);
+ DCHECK_NE(extra, 0);
return *reinterpret_cast<intptr_t*>(extra +
kPointerSize * (index - kMaxInlineSlots));
}
@@ -107,9 +107,11 @@ class TimezoneCache;
class V8_BASE_EXPORT OS {
public:
// Initialize the OS class.
+ // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
// - hard_abort: If true, OS::Abort() will crash instead of aborting.
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
- static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
+ static void Initialize(int64_t random_seed, bool hard_abort,
+ const char* const gc_fake_mmap);
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
@@ -155,55 +157,47 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
- // Memory access permissions. Only the modes currently used by V8 are listed
- // here even though most systems support additional modes.
- enum class MemoryPermission { kNoAccess, kReadWrite, kReadWriteExecute };
-
- // Allocate/Free memory used by JS heap. Permissions are set according to the
- // is_* flags. Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested, size_t* allocated,
- MemoryPermission access, void* hint = nullptr);
- // Allocate/Free memory used by JS heap. Pages are readable/writable, but
- // they are not guaranteed to be executable unless 'executable' is true.
- // Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested, size_t* allocated,
- bool is_executable, void* hint = nullptr);
- static void Free(void* address, const size_t size);
-
- // Allocates a region of memory that is inaccessible. On Windows this reserves
- // but does not commit the memory. On POSIX systems it allocates memory as
- // PROT_NONE, which also prevents it from being committed.
- static void* AllocateGuarded(const size_t requested);
-
- // This is the granularity at which the ProtectCode(...) call can set page
- // permissions.
- static intptr_t CommitPageSize();
-
- // Mark code segments non-writable.
- static void ProtectCode(void* address, const size_t size);
-
- // Assign memory as a guard page so that access will cause an exception.
- static void Guard(void* address, const size_t size);
-
- // Make a region of memory readable and writable.
- static void Unprotect(void* address, const size_t size);
+ enum class MemoryPermission {
+ kNoAccess,
+ kReadWrite,
+ // TODO(hpayer): Remove this flag. Memory should never be rwx.
+ kReadWriteExecute,
+ kReadExecute
+ };
- // Get the Alignment guaranteed by Allocate().
- static size_t AllocateAlignment();
+ // Gets the page granularity for Allocate. Addresses returned by Allocate are
+ // aligned to this size.
+ static size_t AllocatePageSize();
- static void* ReserveRegion(size_t size, void* hint);
+ // Gets the granularity at which the permissions and commit calls can be made.
+ static size_t CommitPageSize();
- static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated);
+ // Generate a random address to be used for hinting allocation calls.
+ static void* GetRandomMmapAddr();
- static bool CommitRegion(void* address, size_t size, bool is_executable);
+ // Allocates memory. Permissions are set according to the access argument.
+ // The address parameter is a hint. The size and alignment parameters must be
+ // multiples of AllocatePageSize(). Returns the address of the allocated
+ // memory, with the specified size and alignment, or nullptr on failure.
+ V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
+ size_t alignment,
+ MemoryPermission access);
- static bool UncommitRegion(void* address, size_t size);
+ // Frees memory allocated by a call to Allocate. address and size must be
+ // multiples of AllocatePageSize(). Returns true on success, otherwise false.
+ V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
- static bool ReleaseRegion(void* address, size_t size);
+ // Releases memory that is no longer needed. The range specified by address
+ // and size must be part of an allocated memory region, and must be multiples
+ // of CommitPageSize(). Released memory is left in an undefined state, so it
+ // should not be accessed. Returns true on success, otherwise false.
+ V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
- // Release part of a reserved address range.
- static bool ReleasePartialRegion(void* address, size_t size);
+ // Sets permissions according to the access argument. address and size must be
+ // multiples of CommitPageSize(). Setting permission to kNoAccess may cause
+ // the memory contents to be lost. Returns true on success, otherwise false.
+ V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
+ MemoryPermission access);
static bool HasLazyCommits();
@@ -231,8 +225,8 @@ class V8_BASE_EXPORT OS {
virtual void* memory() const = 0;
virtual size_t size() const = 0;
- static MemoryMappedFile* open(const char* name, void* hint);
- static MemoryMappedFile* create(const char* name, void* hint, size_t size,
+ static MemoryMappedFile* open(const char* name);
+ static MemoryMappedFile* create(const char* name, size_t size,
void* initial);
};
@@ -271,7 +265,7 @@ class V8_BASE_EXPORT OS {
// process that a code moving garbage collection starts. Can do
// nothing, in which case the code objects must not move (e.g., by
// using --never-compact) if accurate profiling is desired.
- static void SignalCodeMovingGC(void* hint);
+ static void SignalCodeMovingGC();
// Support runtime detection of whether the hard float option of the
// EABI is used.
@@ -335,7 +329,7 @@ class V8_BASE_EXPORT Thread {
Start();
start_semaphore_->Wait();
delete start_semaphore_;
- start_semaphore_ = NULL;
+ start_semaphore_ = nullptr;
}
// Wait until thread terminates.
@@ -360,7 +354,7 @@ class V8_BASE_EXPORT Thread {
SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
}
static bool HasThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key) != NULL;
+ return GetThreadLocal(key) != nullptr;
}
#ifdef V8_FAST_TLS_SUPPORTED
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 346705fd02..9a7ef7a8f4 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -73,7 +73,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
#elif V8_OS_POSIX
Semaphore::Semaphore(int count) {
- DCHECK(count >= 0);
+ DCHECK_GE(count, 0);
int result = sem_init(&native_handle_, 0, count);
DCHECK_EQ(0, result);
USE(result);
@@ -135,9 +135,9 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
#elif V8_OS_WIN
Semaphore::Semaphore(int count) {
- DCHECK(count >= 0);
- native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
- DCHECK(native_handle_ != NULL);
+ DCHECK_GE(count, 0);
+ native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7fffffff, nullptr);
+ DCHECK_NOT_NULL(native_handle_);
}
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 6695bf8e57..3529d55875 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -38,7 +38,7 @@ int64_t ComputeThreadTicks() {
THREAD_BASIC_INFO,
reinterpret_cast<thread_info_t>(&thread_info_data),
&thread_info_count);
- CHECK(kr == KERN_SUCCESS);
+ CHECK_EQ(kr, KERN_SUCCESS);
v8::base::CheckedNumeric<int64_t> absolute_micros(
thread_info_data.user_time.seconds +
@@ -195,7 +195,7 @@ TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
struct mach_timespec TimeDelta::ToMachTimespec() const {
struct mach_timespec ts;
- DCHECK(delta_ >= 0);
+ DCHECK_GE(delta_, 0);
ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
Time::kNanosecondsPerMicrosecond;
@@ -316,7 +316,7 @@ Time Time::FromFiletime(FILETIME ft) {
FILETIME Time::ToFiletime() const {
- DCHECK(us_ >= 0);
+ DCHECK_GE(us_, 0);
FILETIME ft;
if (IsNull()) {
ft.dwLowDateTime = 0;
@@ -338,7 +338,7 @@ FILETIME Time::ToFiletime() const {
Time Time::Now() {
struct timeval tv;
- int result = gettimeofday(&tv, NULL);
+ int result = gettimeofday(&tv, nullptr);
DCHECK_EQ(0, result);
USE(result);
return FromTimeval(tv);
@@ -351,8 +351,8 @@ Time Time::NowFromSystemTime() {
Time Time::FromTimespec(struct timespec ts) {
- DCHECK(ts.tv_nsec >= 0);
- DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
+ DCHECK_GE(ts.tv_nsec, 0);
+ DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
return Time();
}
@@ -384,7 +384,7 @@ struct timespec Time::ToTimespec() const {
Time Time::FromTimeval(struct timeval tv) {
- DCHECK(tv.tv_usec >= 0);
+ DCHECK_GE(tv.tv_usec, 0);
DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
if (tv.tv_usec == 0 && tv.tv_sec == 0) {
return Time();
@@ -577,7 +577,7 @@ static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
ThreadSafeInitOnceTrait>::type high_res_tick_clock =
LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
+// static
TimeTicks TimeTicks::Now() {
// Make sure we never return 0 here.
TimeTicks ticks(tick_clock.Pointer()->Now());
@@ -585,7 +585,7 @@ TimeTicks TimeTicks::Now() {
return ticks;
}
-
+// static
TimeTicks TimeTicks::HighResolutionNow() {
// Make sure we never return 0 here.
TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index ed1751268f..25dee1c419 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -280,7 +280,7 @@ class TimeBase {
class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
public:
- // Contains the NULL time. Use Time::Now() to get the current time.
+ // Contains the nullptr time. Use Time::Now() to get the current time.
Time() : TimeBase(0) {}
// Returns the current time. Watch out, the system might adjust its clock
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index 1b6d39397e..28ff780dd3 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -33,7 +33,7 @@ int SysInfo::NumberOfProcessors() {
int mib[2] = {CTL_HW, HW_NCPU};
int ncpu = 0;
size_t len = sizeof(ncpu);
- if (sysctl(mib, arraysize(mib), &ncpu, &len, NULL, 0) != 0) {
+ if (sysctl(mib, arraysize(mib), &ncpu, &len, nullptr, 0) != 0) {
return 1;
}
return ncpu;
@@ -57,15 +57,15 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
int mib[2] = {CTL_HW, HW_MEMSIZE};
int64_t memsize = 0;
size_t len = sizeof(memsize);
- if (sysctl(mib, arraysize(mib), &memsize, &len, NULL, 0) != 0) {
+ if (sysctl(mib, arraysize(mib), &memsize, &len, nullptr, 0) != 0) {
return 0;
}
return memsize;
#elif V8_OS_FREEBSD
int pages, page_size;
size_t size = sizeof(pages);
- sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
- sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+ sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, nullptr, 0);
+ sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, nullptr, 0);
if (pages == -1 || page_size == -1) {
return 0;
}
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index a7bb290929..18850695cb 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -22,8 +22,9 @@ struct make_array_helper;
template <class Function, std::size_t... Indexes>
struct make_array_helper<Function, 0, Indexes...> {
- constexpr static auto make_array(Function f)
- -> std::array<decltype(f(std::size_t{0})), sizeof...(Indexes) + 1> {
+ constexpr static std::array<typename std::result_of<Function(size_t)>::type,
+ sizeof...(Indexes) + 1>
+ make_array(Function f) {
return {{f(0), f(Indexes)...}};
}
};
@@ -41,8 +42,8 @@ struct make_array_helper<Function, FirstIndex, Indexes...>
// [](std::size_t i) { return static_cast<int>(2 * i); });
// The resulting array will be constexpr if the passed function is constexpr.
template <std::size_t Size, class Function>
-constexpr auto make_array(Function f)
- -> std::array<decltype(f(std::size_t{0})), Size> {
+constexpr std::array<typename std::result_of<Function(size_t)>::type, Size>
+make_array(Function f) {
static_assert(Size > 0, "Can only create non-empty arrays");
return detail::make_array_helper<Function, Size - 1>::make_array(f);
}
@@ -93,6 +94,40 @@ struct has_output_operator {
static constexpr bool value = sizeof(__check_operator(ptr_t{nullptr})) == 1;
};
+namespace detail {
+
+template <typename Func, typename T, typename... Ts>
+struct fold_helper {
+ static_assert(sizeof...(Ts) == 0, "this is the base case");
+ using result_t = typename std::remove_reference<T>::type;
+ static constexpr T&& fold(Func func, T&& first) {
+ return std::forward<T>(first);
+ }
+};
+
+template <typename Func, typename T1, typename T2, typename... Ts>
+struct fold_helper<Func, T1, T2, Ts...> {
+ using folded_t = typename std::result_of<Func(T1, T2)>::type;
+ using next_fold_helper = fold_helper<Func, folded_t&&, Ts...>;
+ using result_t = typename next_fold_helper::result_t;
+ static constexpr result_t fold(Func func, T1&& first, T2&& second,
+ Ts&&... more) {
+ return next_fold_helper::fold(
+ func, func(std::forward<T1>(first), std::forward<T2>(second)),
+ std::forward<Ts>(more)...);
+ }
+};
+
+} // namespace detail
+
+// Fold all arguments from left to right with a given function.
+template <typename Func, typename... Ts>
+constexpr auto fold(Func func, Ts&&... more) ->
+ typename detail::fold_helper<Func, Ts...>::result_t {
+ return detail::fold_helper<Func, Ts...>::fold(func,
+ std::forward<Ts>(more)...);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 842b36a1a0..86c3694feb 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -7,6 +7,7 @@
#include <stdio.h>
#include <stdlib.h>
+#include <algorithm>
#include <new>
#include "src/base/bits.h"
@@ -18,8 +19,7 @@ namespace v8 {
namespace base {
static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-static RandomNumberGenerator::EntropySource entropy_source = NULL;
-
+static RandomNumberGenerator::EntropySource entropy_source = nullptr;
// static
void RandomNumberGenerator::SetEntropySource(EntropySource source) {
@@ -31,7 +31,7 @@ void RandomNumberGenerator::SetEntropySource(EntropySource source) {
RandomNumberGenerator::RandomNumberGenerator() {
// Check if embedder supplied an entropy source.
{ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
- if (entropy_source != NULL) {
+ if (entropy_source != nullptr) {
int64_t seed;
if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
sizeof(seed))) {
@@ -53,7 +53,7 @@ RandomNumberGenerator::RandomNumberGenerator() {
#else
// Gather entropy from /dev/urandom if available.
FILE* fp = fopen("/dev/urandom", "rb");
- if (fp != NULL) {
+ if (fp != nullptr) {
int64_t seed;
size_t n = fread(&seed, sizeof(seed), 1, fp);
fclose(fp);
@@ -115,6 +115,85 @@ void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
}
}
+static std::vector<uint64_t> ComplementSample(
+ const std::unordered_set<uint64_t>& set, uint64_t max) {
+ std::vector<uint64_t> result;
+ result.reserve(max - set.size());
+ for (uint64_t i = 0; i < max; i++) {
+ if (!set.count(i)) {
+ result.push_back(i);
+ }
+ }
+ return result;
+}
+
+std::vector<uint64_t> RandomNumberGenerator::NextSample(uint64_t max,
+ size_t n) {
+ CHECK_LE(n, max);
+
+ if (n == 0) {
+ return std::vector<uint64_t>();
+ }
+
+ // Choose to select or exclude, whatever needs fewer generator calls.
+ size_t smaller_part = static_cast<size_t>(
+ std::min(max - static_cast<uint64_t>(n), static_cast<uint64_t>(n)));
+ std::unordered_set<uint64_t> selected;
+
+ size_t counter = 0;
+ while (selected.size() != smaller_part && counter / 3 < smaller_part) {
+ uint64_t x = static_cast<uint64_t>(NextDouble() * max);
+ CHECK_LT(x, max);
+
+ selected.insert(x);
+ counter++;
+ }
+
+ if (selected.size() == smaller_part) {
+ if (smaller_part != n) {
+ return ComplementSample(selected, max);
+ }
+ return std::vector<uint64_t>(selected.begin(), selected.end());
+ }
+
+ // Failed to select numbers in smaller_part * 3 steps, try different approach.
+ return NextSampleSlow(max, n, selected);
+}
+
+std::vector<uint64_t> RandomNumberGenerator::NextSampleSlow(
+ uint64_t max, size_t n, const std::unordered_set<uint64_t>& excluded) {
+ CHECK_GE(max - excluded.size(), n);
+
+ std::vector<uint64_t> result;
+ result.reserve(max - excluded.size());
+
+ for (uint64_t i = 0; i < max; i++) {
+ if (!excluded.count(i)) {
+ result.push_back(i);
+ }
+ }
+
+ // Decrease result vector until it contains values to select or exclude,
+ // whatever needs fewer generator calls.
+ size_t larger_part = static_cast<size_t>(
+ std::max(max - static_cast<uint64_t>(n), static_cast<uint64_t>(n)));
+
+ // Excluded set may cause that initial result is already smaller than
+ // larget_part.
+ while (result.size() != larger_part && result.size() > n) {
+ size_t x = static_cast<size_t>(NextDouble() * result.size());
+ CHECK_LT(x, result.size());
+
+ std::swap(result[x], result.back());
+ result.pop_back();
+ }
+
+ if (result.size() != n) {
+ return ComplementSample(
+ std::unordered_set<uint64_t>(result.begin(), result.end()), max);
+ }
+ return result;
+}
int RandomNumberGenerator::Next(int bits) {
DCHECK_LT(0, bits);
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 7a322b5332..285c5972e0 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -5,6 +5,9 @@
#ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
#define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#include <unordered_set>
+#include <vector>
+
#include "src/base/base-export.h"
#include "src/base/macros.h"
@@ -85,6 +88,23 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
// Fills the elements of a specified array of bytes with random numbers.
void NextBytes(void* buffer, size_t buflen);
+ // Returns the next pseudorandom set of n unique uint64 values smaller than
+ // max.
+ // n must be less or equal to max.
+ std::vector<uint64_t> NextSample(uint64_t max, size_t n) WARN_UNUSED_RESULT;
+
+ // Returns the next pseudorandom set of n unique uint64 values smaller than
+ // max.
+ // n must be less or equal to max.
+ // max - |excluded| must be less or equal to n.
+ //
+ // Generates list of all possible values and removes random values from it
+ // until size reaches n.
+ std::vector<uint64_t> NextSampleSlow(
+ uint64_t max, size_t n,
+ const std::unordered_set<uint64_t>& excluded =
+ std::unordered_set<uint64_t>{}) WARN_UNUSED_RESULT;
+
// Override the current ssed.
void SetSeed(int64_t seed);
diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/bignum-dtoa.cc
index 78ee7aa3e5..ccfe690a3a 100644
--- a/deps/v8/src/bignum-dtoa.cc
+++ b/deps/v8/src/bignum-dtoa.cc
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
static int NormalizedExponent(uint64_t significand, int exponent) {
- DCHECK(significand != 0);
+ DCHECK_NE(significand, 0);
while ((significand & Double::kHiddenBit) == 0) {
significand = significand << 1;
exponent = exponent - 1;
@@ -66,7 +66,7 @@ static void GenerateCountedDigits(int count, int* decimal_point,
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
Vector<char> buffer, int* length, int* decimal_point) {
- DCHECK(v > 0);
+ DCHECK_GT(v, 0);
DCHECK(!Double(v).IsSpecial());
uint64_t significand = Double(v).Significand();
bool is_even = (significand & 1) == 0;
@@ -97,7 +97,7 @@ void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
// 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
// The maximum double is 1.7976931348623157e308 which needs fewer than
// 308*4 binary digits.
- DCHECK(Bignum::kMaxSignificantBits >= 324*4);
+ DCHECK_GE(Bignum::kMaxSignificantBits, 324 * 4);
bool need_boundary_deltas = (mode == BIGNUM_DTOA_SHORTEST);
InitialScaledStartValues(v, estimated_power, need_boundary_deltas,
&numerator, &denominator,
@@ -157,7 +157,7 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
while (true) {
uint16_t digit;
digit = numerator->DivideModuloIntBignum(*denominator);
- DCHECK(digit <= 9); // digit is a uint16_t and therefore always positive.
+ DCHECK_LE(digit, 9); // digit is a uint16_t and therefore always positive.
// digit = numerator / denominator (integer division).
// numerator = numerator % denominator.
buffer[(*length)++] = digit + '0';
@@ -203,7 +203,7 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
// loop would have stopped earlier.
// We still have an assert here in case the preconditions were not
// satisfied.
- DCHECK(buffer[(*length) - 1] != '9');
+ DCHECK_NE(buffer[(*length) - 1], '9');
buffer[(*length) - 1]++;
} else {
// Halfway case.
@@ -214,7 +214,7 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
if ((buffer[(*length) - 1] - '0') % 2 == 0) {
// Round down => Do nothing.
} else {
- DCHECK(buffer[(*length) - 1] != '9');
+ DCHECK_NE(buffer[(*length) - 1], '9');
buffer[(*length) - 1]++;
}
}
@@ -228,7 +228,7 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
// stopped the loop earlier.
// We still have an DCHECK here, in case the preconditions were not
// satisfied.
- DCHECK(buffer[(*length) -1] != '9');
+ DCHECK_NE(buffer[(*length) - 1], '9');
buffer[(*length) - 1]++;
return;
}
@@ -245,11 +245,11 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
static void GenerateCountedDigits(int count, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char>(buffer), int* length) {
- DCHECK(count >= 0);
+ DCHECK_GE(count, 0);
for (int i = 0; i < count - 1; ++i) {
uint16_t digit;
digit = numerator->DivideModuloIntBignum(*denominator);
- DCHECK(digit <= 9); // digit is a uint16_t and therefore always positive.
+ DCHECK_LE(digit, 9); // digit is a uint16_t and therefore always positive.
// digit = numerator / denominator (integer division).
// numerator = numerator % denominator.
buffer[i] = digit + '0';
@@ -381,7 +381,7 @@ static void InitialScaledStartValuesPositiveExponent(
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
// A positive exponent implies a positive power.
- DCHECK(estimated_power >= 0);
+ DCHECK_GE(estimated_power, 0);
// Since the estimated_power is positive we simply multiply the denominator
// by 10^estimated_power.
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index 9a4af3f497..087ec45323 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -24,7 +24,7 @@ static int BitSize(S value) {
// Guaranteed to lie in one Bigit.
void Bignum::AssignUInt16(uint16_t value) {
- DCHECK(kBigitSize >= BitSize(value));
+ DCHECK_GE(kBigitSize, BitSize(value));
Zero();
if (value == 0) return;
@@ -169,7 +169,7 @@ void Bignum::AddBignum(const Bignum& other) {
EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
Chunk carry = 0;
int bigit_pos = other.exponent_ - exponent_;
- DCHECK(bigit_pos >= 0);
+ DCHECK_GE(bigit_pos, 0);
for (int i = 0; i < other.used_digits_; ++i) {
Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
bigits_[bigit_pos] = sum & kBigitMask;
@@ -234,7 +234,7 @@ void Bignum::MultiplyByUInt32(uint32_t factor) {
// The product of a bigit with the factor is of size kBigitSize + 32.
// Assert that this number + 1 (for the carry) fits into double chunk.
- DCHECK(kDoubleChunkSize >= kBigitSize + 32 + 1);
+ DCHECK_GE(kDoubleChunkSize, kBigitSize + 32 + 1);
DoubleChunk carry = 0;
for (int i = 0; i < used_digits_; ++i) {
DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
@@ -256,7 +256,7 @@ void Bignum::MultiplyByUInt64(uint64_t factor) {
Zero();
return;
}
- DCHECK(kBigitSize < 32);
+ DCHECK_LT(kBigitSize, 32);
uint64_t carry = 0;
uint64_t low = factor & 0xFFFFFFFF;
uint64_t high = factor >> 32;
@@ -296,7 +296,7 @@ void Bignum::MultiplyByPowerOfTen(int exponent) {
{ kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
- DCHECK(exponent >= 0);
+ DCHECK_GE(exponent, 0);
if (exponent == 0) return;
if (used_digits_ == 0) return;
@@ -380,7 +380,7 @@ void Bignum::Square() {
}
// Since the result was guaranteed to lie inside the number the
// accumulator must be 0 now.
- DCHECK(accumulator == 0);
+ DCHECK_EQ(accumulator, 0);
// Don't forget to update the used_digits and the exponent.
used_digits_ = product_length;
@@ -390,8 +390,8 @@ void Bignum::Square() {
void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
- DCHECK(base != 0);
- DCHECK(power_exponent >= 0);
+ DCHECK_NE(base, 0);
+ DCHECK_GE(power_exponent, 0);
if (power_exponent == 0) {
AssignUInt16(1);
return;
@@ -466,7 +466,7 @@ void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
DCHECK(IsClamped());
DCHECK(other.IsClamped());
- DCHECK(other.used_digits_ > 0);
+ DCHECK_GT(other.used_digits_, 0);
// Easy case: if we have less digits than the divisor than the result is 0.
// Note: this handles the case where this == 0, too.
@@ -528,7 +528,7 @@ uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
template<typename S>
static int SizeInHexChars(S number) {
- DCHECK(number > 0);
+ DCHECK_GT(number, 0);
int result = 0;
while (number != 0) {
number >>= 4;
@@ -541,7 +541,7 @@ static int SizeInHexChars(S number) {
bool Bignum::ToHexString(char* buffer, int buffer_size) const {
DCHECK(IsClamped());
// Each bigit must be printable as separate hex-character.
- DCHECK(kBigitSize % 4 == 0);
+ DCHECK_EQ(kBigitSize % 4, 0);
const int kHexCharsPerBigit = kBigitSize / 4;
if (used_digits_ == 0) {
@@ -683,15 +683,15 @@ void Bignum::Align(const Bignum& other) {
}
used_digits_ += zero_digits;
exponent_ -= zero_digits;
- DCHECK(used_digits_ >= 0);
- DCHECK(exponent_ >= 0);
+ DCHECK_GE(used_digits_, 0);
+ DCHECK_GE(exponent_, 0);
}
}
void Bignum::BigitsShiftLeft(int shift_amount) {
- DCHECK(shift_amount < kBigitSize);
- DCHECK(shift_amount >= 0);
+ DCHECK_LT(shift_amount, kBigitSize);
+ DCHECK_GE(shift_amount, 0);
Chunk carry = 0;
for (int i = 0; i < used_digits_; ++i) {
Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index fe7d63fa95..35e65e1053 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -19,6 +19,7 @@
#include "src/extensions/trigger-failure-extension.h"
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
+#include "src/objects/js-regexp.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-js.h"
@@ -31,7 +32,7 @@ namespace v8 {
namespace internal {
void SourceCodeCache::Initialize(Isolate* isolate, bool create_heap_objects) {
- cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : NULL;
+ cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : nullptr;
}
bool SourceCodeCache::Lookup(Vector<const char> name,
@@ -86,17 +87,17 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
static const char* GCFunctionName() {
- bool flag_given = FLAG_expose_gc_as != NULL && strlen(FLAG_expose_gc_as) != 0;
+ bool flag_given =
+ FLAG_expose_gc_as != nullptr && strlen(FLAG_expose_gc_as) != 0;
return flag_given ? FLAG_expose_gc_as : "gc";
}
-
-v8::Extension* Bootstrapper::free_buffer_extension_ = NULL;
-v8::Extension* Bootstrapper::gc_extension_ = NULL;
-v8::Extension* Bootstrapper::externalize_string_extension_ = NULL;
-v8::Extension* Bootstrapper::statistics_extension_ = NULL;
-v8::Extension* Bootstrapper::trigger_failure_extension_ = NULL;
-v8::Extension* Bootstrapper::ignition_statistics_extension_ = NULL;
+v8::Extension* Bootstrapper::free_buffer_extension_ = nullptr;
+v8::Extension* Bootstrapper::gc_extension_ = nullptr;
+v8::Extension* Bootstrapper::externalize_string_extension_ = nullptr;
+v8::Extension* Bootstrapper::statistics_extension_ = nullptr;
+v8::Extension* Bootstrapper::trigger_failure_extension_ = nullptr;
+v8::Extension* Bootstrapper::ignition_statistics_extension_ = nullptr;
void Bootstrapper::InitializeOncePerProcess() {
free_buffer_extension_ = new FreeBufferExtension;
@@ -116,17 +117,17 @@ void Bootstrapper::InitializeOncePerProcess() {
void Bootstrapper::TearDownExtensions() {
delete free_buffer_extension_;
- free_buffer_extension_ = NULL;
+ free_buffer_extension_ = nullptr;
delete gc_extension_;
- gc_extension_ = NULL;
+ gc_extension_ = nullptr;
delete externalize_string_extension_;
- externalize_string_extension_ = NULL;
+ externalize_string_extension_ = nullptr;
delete statistics_extension_;
- statistics_extension_ = NULL;
+ statistics_extension_ = nullptr;
delete trigger_failure_extension_;
- trigger_failure_extension_ = NULL;
+ trigger_failure_extension_ = nullptr;
delete ignition_statistics_extension_;
- ignition_statistics_extension_ = NULL;
+ ignition_statistics_extension_ = nullptr;
}
void Bootstrapper::TearDown() {
@@ -222,8 +223,8 @@ class Genesis BASE_EMBEDDED {
ElementsKind elements_kind);
bool InstallNatives(GlobalContextType context_type);
- void InstallTypedArray(const char* name, ElementsKind elements_kind,
- Handle<JSFunction>* fun);
+ Handle<JSFunction> InstallTypedArray(const char* name,
+ ElementsKind elements_kind);
bool InstallExtraNatives();
bool InstallExperimentalExtraNatives();
bool InstallDebuggerNatives();
@@ -306,13 +307,18 @@ Handle<Context> Bootstrapper::CreateEnvironment(
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
GlobalContextType context_type) {
HandleScope scope(isolate_);
- Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
- context_snapshot_index, embedder_fields_deserializer,
- context_type);
- Handle<Context> env = genesis.result();
- if (env.is_null() || !InstallExtensions(env, extensions)) {
- return Handle<Context>();
+ Handle<Context> env;
+ {
+ Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
+ context_snapshot_index, embedder_fields_deserializer,
+ context_type);
+ env = genesis.result();
+ if (env.is_null() || !InstallExtensions(env, extensions)) {
+ return Handle<Context>();
+ }
}
+ // Log all maps created during bootstrapping.
+ if (FLAG_trace_maps) LOG(isolate_, LogMaps());
return scope.CloseAndEscape(env);
}
@@ -320,9 +326,14 @@ Handle<JSGlobalProxy> Bootstrapper::NewRemoteContext(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template) {
HandleScope scope(isolate_);
- Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template);
- Handle<JSGlobalProxy> global_proxy = genesis.global_proxy();
- if (global_proxy.is_null()) return Handle<JSGlobalProxy>();
+ Handle<JSGlobalProxy> global_proxy;
+ {
+ Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template);
+ global_proxy = genesis.global_proxy();
+ if (global_proxy.is_null()) return Handle<JSGlobalProxy>();
+ }
+ // Log all maps created during bootstrapping.
+ if (FLAG_trace_maps) LOG(isolate_, LogMaps());
return scope.CloseAndEscape(global_proxy);
}
@@ -345,11 +356,11 @@ namespace {
// Non-construct case.
V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
- Isolate* isolate, Builtins::Name call, Handle<String> name, int len) {
- Handle<Code> code = isolate->builtins()->builtin_handle(call);
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name, code, false);
- shared->set_lazy_deserialization_builtin_id(call);
+ Isolate* isolate, Builtins::Name builtin_id, Handle<String> name, int len) {
+ Handle<Code> code = isolate->builtins()->builtin_handle(builtin_id);
+ const bool kNotConstructor = false;
+ Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
+ name, code, kNotConstructor, kNormalFunction, builtin_id);
shared->set_internal_formal_parameter_count(len);
shared->set_length(len);
return shared;
@@ -357,14 +368,14 @@ V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
// Construct case.
V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
- Isolate* isolate, Builtins::Name call, Handle<String> name,
+ Isolate* isolate, Builtins::Name builtin_id, Handle<String> name,
Handle<String> instance_class_name, int len) {
- Handle<Code> code = isolate->builtins()->builtin_handle(call);
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name, code, false);
+ Handle<Code> code = isolate->builtins()->builtin_handle(builtin_id);
+ const bool kIsConstructor = true;
+ Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
+ name, code, kIsConstructor, kNormalFunction, builtin_id);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
shared->set_instance_class_name(*instance_class_name);
- if (Builtins::IsLazy(call)) shared->set_lazy_deserialization_builtin_id(call);
shared->set_internal_formal_parameter_count(len);
shared->set_length(len);
return shared;
@@ -390,42 +401,55 @@ V8_NOINLINE void InstallFunction(Handle<JSObject> target,
V8_NOINLINE Handle<JSFunction> CreateFunction(
Isolate* isolate, Handle<String> name, InstanceType type, int instance_size,
- MaybeHandle<Object> maybe_prototype, Builtins::Name call) {
- Factory* factory = isolate->factory();
- Handle<Code> call_code(isolate->builtins()->builtin(call));
+ int inobject_properties, MaybeHandle<Object> maybe_prototype,
+ Builtins::Name builtin_id) {
+ Handle<Code> code(isolate->builtins()->builtin(builtin_id));
Handle<Object> prototype;
- Handle<JSFunction> result =
- maybe_prototype.ToHandle(&prototype)
- ? factory->NewFunction(name, call_code, prototype, type,
- instance_size, STRICT, IMMUTABLE)
- : factory->NewFunctionWithoutPrototype(name, call_code, STRICT);
- if (Builtins::IsLazy(call)) {
- result->shared()->set_lazy_deserialization_builtin_id(call);
+ Handle<JSFunction> result;
+
+ if (maybe_prototype.ToHandle(&prototype)) {
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
+ name, code, prototype, type, instance_size, inobject_properties,
+ builtin_id, IMMUTABLE);
+
+ result = isolate->factory()->NewFunction(args);
+ // Make the JSFunction's prototype object fast.
+ JSObject::MakePrototypesFast(handle(result->prototype(), isolate),
+ kStartAtReceiver, isolate);
+ } else {
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
+ name, code, builtin_id, LanguageMode::kStrict);
+ result = isolate->factory()->NewFunction(args);
}
+
+ // Make the resulting JSFunction object fast.
+ JSObject::MakePrototypesFast(result, kStartAtReceiver, isolate);
result->shared()->set_native(true);
return result;
}
V8_NOINLINE Handle<JSFunction> InstallFunction(
Handle<JSObject> target, Handle<Name> name, InstanceType type,
- int instance_size, MaybeHandle<Object> maybe_prototype, Builtins::Name call,
+ int instance_size, int inobject_properties,
+ MaybeHandle<Object> maybe_prototype, Builtins::Name call,
PropertyAttributes attributes) {
Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
Handle<JSFunction> function =
CreateFunction(target->GetIsolate(), name_string, type, instance_size,
- maybe_prototype, call);
+ inobject_properties, maybe_prototype, call);
InstallFunction(target, name, function, name_string, attributes);
return function;
}
V8_NOINLINE Handle<JSFunction> InstallFunction(
Handle<JSObject> target, const char* name, InstanceType type,
- int instance_size, MaybeHandle<Object> maybe_prototype,
- Builtins::Name call) {
+ int instance_size, int inobject_properties,
+ MaybeHandle<Object> maybe_prototype, Builtins::Name call) {
Factory* const factory = target->GetIsolate()->factory();
PropertyAttributes attributes = DONT_ENUM;
return InstallFunction(target, factory->InternalizeUtf8String(name), type,
- instance_size, maybe_prototype, call, attributes);
+ instance_size, inobject_properties, maybe_prototype,
+ call, attributes);
}
V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
@@ -433,7 +457,7 @@ V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
Builtins::Name call,
int len, bool adapt) {
Handle<JSFunction> fun =
- CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize, 0,
MaybeHandle<JSObject>(), call);
if (adapt) {
fun->shared()->set_internal_formal_parameter_count(len);
@@ -586,9 +610,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the empty function as the prototype for function according to
// ES#sec-properties-of-the-function-prototype-object
Handle<Code> code(BUILTIN_CODE(isolate, EmptyFunction));
- Handle<JSFunction> empty_function =
- factory->NewFunction(empty_function_map, factory->empty_string(), code);
- empty_function->shared()->set_language_mode(STRICT);
+ NewFunctionArgs args =
+ NewFunctionArgs::ForBuiltin(factory->empty_string(), code,
+ empty_function_map, Builtins::kEmptyFunction);
+ Handle<JSFunction> empty_function = factory->NewFunction(args);
// --- E m p t y ---
Handle<String> source = factory->NewStringFromStaticChars("() {}");
@@ -640,8 +665,9 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic() {
}
Handle<String> name(factory()->empty_string());
Handle<Code> code = BUILTIN_CODE(isolate(), StrictPoisonPillThrower);
- Handle<JSFunction> function =
- factory()->NewFunctionWithoutPrototype(name, code, STRICT);
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
+ name, code, Builtins::kStrictPoisonPillThrower, i::LanguageMode::kStrict);
+ Handle<JSFunction> function = factory()->NewFunction(args);
function->shared()->DontAdaptArguments();
// %ThrowTypeError% must not have a name property.
@@ -658,8 +684,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic() {
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY))
.Assert();
- if (JSObject::PreventExtensions(function, Object::THROW_ON_ERROR)
- .IsNothing()) {
+ if (JSObject::PreventExtensions(function, kThrowOnError).IsNothing()) {
DCHECK(false);
}
@@ -728,12 +753,13 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
Factory* factory = isolate_->factory();
// --- O b j e c t ---
- int unused = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
- int instance_size = JSObject::kHeaderSize + kPointerSize * unused;
+ int inobject_properties = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
+ int instance_size =
+ JSObject::kHeaderSize + kPointerSize * inobject_properties;
Handle<JSFunction> object_fun = CreateFunction(
isolate_, factory->Object_string(), JS_OBJECT_TYPE, instance_size,
- factory->null_value(), Builtins::kObjectConstructor);
+ inobject_properties, factory->null_value(), Builtins::kObjectConstructor);
object_fun->shared()->set_length(1);
object_fun->shared()->DontAdaptArguments();
object_fun->shared()->SetConstructStub(
@@ -743,8 +769,6 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
{
// Finish setting up Object function's initial map.
Map* initial_map = object_fun->initial_map();
- initial_map->SetInObjectProperties(unused);
- initial_map->set_unused_property_fields(unused);
initial_map->set_elements_kind(HOLEY_ELEMENTS);
}
@@ -789,6 +813,19 @@ Handle<Map> CreateNonConstructorMap(Handle<Map> source_map,
Handle<JSObject> prototype,
const char* reason) {
Handle<Map> map = Map::Copy(source_map, reason);
+ // Ensure the resulting map has prototype slot (it is necessary for storing
+ // inital map even when the prototype property is not required).
+ if (!map->has_prototype_slot()) {
+ // Re-set the unused property fields after changing the instance size.
+ // TODO(ulan): Do not change instance size after map creation.
+ int unused_property_fields = map->UnusedPropertyFields();
+ map->set_instance_size(map->instance_size() + kPointerSize);
+ // The prototype slot shifts the in-object properties area by one slot.
+ map->SetInObjectPropertiesStartInWords(
+ map->GetInObjectPropertiesStartInWords() + 1);
+ map->set_has_prototype_slot(true);
+ map->SetInObjectUnusedPropertyFields(unused_property_fields);
+ }
map->set_is_constructor(false);
Map::SetPrototype(map, prototype);
return map;
@@ -1170,9 +1207,10 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
Handle<Code> code = BUILTIN_CODE(isolate(), Illegal);
Handle<JSObject> prototype =
factory()->NewFunctionPrototype(isolate()->object_function());
- js_global_object_function =
- factory()->NewFunction(name, code, prototype, JS_GLOBAL_OBJECT_TYPE,
- JSGlobalObject::kSize, STRICT);
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
+ name, code, prototype, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize, 0,
+ Builtins::kIllegal, MUTABLE);
+ js_global_object_function = factory()->NewFunction(args);
#ifdef DEBUG
LookupIterator it(prototype, factory()->constructor_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -1200,9 +1238,11 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
if (global_proxy_template.IsEmpty()) {
Handle<String> name(factory()->empty_string());
Handle<Code> code = BUILTIN_CODE(isolate(), Illegal);
- global_proxy_function =
- factory()->NewFunction(name, code, JS_GLOBAL_PROXY_TYPE,
- JSGlobalProxy::SizeWithEmbedderFields(0));
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
+ name, code, factory()->the_hole_value(), JS_GLOBAL_PROXY_TYPE,
+ JSGlobalProxy::SizeWithEmbedderFields(0), 0, Builtins::kIllegal,
+ MUTABLE);
+ global_proxy_function = factory()->NewFunction(args);
} else {
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_proxy_template);
@@ -1278,7 +1318,7 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Factory* factory = isolate->factory();
Handle<JSFunction> error_fun = InstallFunction(
- global, name, JS_ERROR_TYPE, JSObject::kHeaderSize,
+ global, name, JS_ERROR_TYPE, JSObject::kHeaderSize, 0,
factory->the_hole_value(), Builtins::kErrorConstructor, DONT_ENUM);
error_fun->shared()->set_instance_class_name(*factory->Error_string());
error_fun->shared()->DontAdaptArguments();
@@ -1315,11 +1355,11 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<JSFunction> global_error = isolate->error_function();
CHECK(JSReceiver::SetPrototype(error_fun, global_error, false,
- Object::THROW_ON_ERROR)
+ kThrowOnError)
.FromMaybe(false));
CHECK(JSReceiver::SetPrototype(prototype,
handle(global_error->prototype(), isolate),
- false, Object::THROW_ON_ERROR)
+ false, kThrowOnError)
.FromMaybe(false));
}
}
@@ -1327,22 +1367,24 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<Map> initial_map(error_fun->initial_map());
Map::EnsureDescriptorSlack(initial_map, 1);
- PropertyAttributes attribs = DONT_ENUM;
- Handle<AccessorInfo> error_stack =
- Accessors::ErrorStackInfo(isolate, attribs);
{
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(error_stack->name())), error_stack, attribs);
+ Handle<AccessorInfo> info = factory->error_stack_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, DONT_ENUM);
initial_map->AppendDescriptor(&d);
}
}
namespace {
-void InstallMakeError(Isolate* isolate, Handle<Code> code, int context_index) {
- Handle<JSFunction> function =
- isolate->factory()->NewFunction(isolate->factory()->empty_string(), code,
- JS_OBJECT_TYPE, JSObject::kHeaderSize);
+void InstallMakeError(Isolate* isolate, int builtin_id, int context_index) {
+ Handle<Code> code(isolate->builtins()->builtin(builtin_id));
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
+ isolate->factory()->empty_string(), code,
+ isolate->factory()->the_hole_value(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, 0, builtin_id, MUTABLE);
+
+ Handle<JSFunction> function = isolate->factory()->NewFunction(args);
function->shared()->DontAdaptArguments();
isolate->native_context()->set(context_index, *function);
}
@@ -1357,7 +1399,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// --- N a t i v e C o n t e x t ---
// Use the empty function as closure (no scope info).
native_context()->set_closure(*empty_function);
- native_context()->set_previous(NULL);
+ native_context()->set_previous(nullptr);
// Set extension and global object.
native_context()->set_extension(*global_object);
// Security setup: Set the security token of the native context to the global
@@ -1481,9 +1523,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- F u n c t i o n ---
Handle<JSFunction> prototype = empty_function;
- Handle<JSFunction> function_fun =
- InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- prototype, Builtins::kFunctionConstructor);
+ Handle<JSFunction> function_fun = InstallFunction(
+ global, "Function", JS_FUNCTION_TYPE, JSFunction::kSizeWithPrototype, 0,
+ prototype, Builtins::kFunctionConstructor);
// Function instances are sloppy by default.
function_fun->set_prototype_or_initial_map(*isolate->sloppy_function_map());
function_fun->shared()->DontAdaptArguments();
@@ -1585,7 +1627,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- A r r a y ---
Handle<JSFunction> array_function = InstallFunction(
- global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
+ global, "Array", JS_ARRAY_TYPE, JSArray::kSize, 0,
isolate->initial_object_prototype(), Builtins::kArrayConstructor);
array_function->shared()->DontAdaptArguments();
array_function->shared()->set_builtin_function_id(kArrayConstructor);
@@ -1604,12 +1646,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
- Handle<AccessorInfo> array_length =
- Accessors::ArrayLengthInfo(isolate, attribs);
{ // Add length.
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(array_length->name())), array_length,
- attribs);
+ factory->length_string(), factory->array_length_accessor(), attribs);
initial_map->AppendDescriptor(&d);
}
@@ -1644,7 +1683,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(proto, "push", Builtins::kFastArrayPush, 1, false);
SimpleInstallFunction(proto, "shift", Builtins::kFastArrayShift, 0, false);
SimpleInstallFunction(proto, "unshift", Builtins::kArrayUnshift, 1, false);
- SimpleInstallFunction(proto, "slice", Builtins::kArraySlice, 2, false);
+ if (FLAG_enable_experimental_builtins) {
+ SimpleInstallFunction(proto, "slice", Builtins::kFastArraySlice, 2,
+ false);
+ } else {
+ SimpleInstallFunction(proto, "slice", Builtins::kArraySlice, 2, false);
+ }
SimpleInstallFunction(proto, "splice", Builtins::kArraySplice, 2, false);
SimpleInstallFunction(proto, "includes", Builtins::kArrayIncludes, 1,
false);
@@ -1685,7 +1729,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> array_iterator_function = CreateFunction(
isolate, factory->ArrayIterator_string(),
- JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize,
+ JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize, 0,
array_iterator_prototype, Builtins::kIllegal);
array_iterator_function->shared()->set_native(false);
array_iterator_function->shared()->set_instance_class_name(
@@ -1752,7 +1796,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- N u m b e r ---
Handle<JSFunction> number_fun = InstallFunction(
- global, "Number", JS_VALUE_TYPE, JSValue::kSize,
+ global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate->initial_object_prototype(), Builtins::kNumberConstructor);
number_fun->shared()->DontAdaptArguments();
number_fun->shared()->SetConstructStub(
@@ -1867,10 +1911,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- B o o l e a n ---
- Handle<JSFunction> boolean_fun =
- InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kBooleanConstructor);
+ Handle<JSFunction> boolean_fun = InstallFunction(
+ global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, 0,
+ isolate->initial_object_prototype(), Builtins::kBooleanConstructor);
boolean_fun->shared()->DontAdaptArguments();
boolean_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, BooleanConstructor_ConstructStub));
@@ -1897,7 +1940,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- S t r i n g ---
Handle<JSFunction> string_fun = InstallFunction(
- global, "String", JS_VALUE_TYPE, JSValue::kSize,
+ global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate->initial_object_prototype(), Builtins::kStringConstructor);
string_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, StringConstructor_ConstructStub));
@@ -1913,12 +1956,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<AccessorInfo> string_length(
- Accessors::StringLengthInfo(isolate, attribs));
{ // Add length.
- Descriptor d = Descriptor::AccessorConstant(factory->length_string(),
- string_length, attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ factory->length_string(), factory->string_length_accessor(), attribs);
string_map->AppendDescriptor(&d);
}
@@ -1930,11 +1971,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(string_fun, "fromCodePoint",
Builtins::kStringFromCodePoint, 1, false);
+ // Install the String.raw function.
+ SimpleInstallFunction(string_fun, "raw", Builtins::kStringRaw, 1, false);
+
// Create the %StringPrototype%
Handle<JSValue> prototype =
Handle<JSValue>::cast(factory->NewJSObject(string_fun, TENURED));
prototype->set_value(isolate->heap()->empty_string());
JSFunction::SetPrototype(string_fun, prototype);
+ native_context()->set_initial_string_prototype(*prototype);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory->constructor_string(), string_fun,
@@ -1977,6 +2022,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
true);
SimpleInstallFunction(prototype, "localeCompare",
Builtins::kStringPrototypeLocaleCompare, 1, true);
+ SimpleInstallFunction(prototype, "match", Builtins::kStringPrototypeMatch,
+ 1, true);
#ifdef V8_INTL_SUPPORT
SimpleInstallFunction(prototype, "normalize",
Builtins::kStringPrototypeNormalizeIntl, 0, false);
@@ -1984,10 +2031,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "normalize",
Builtins::kStringPrototypeNormalize, 0, false);
#endif // V8_INTL_SUPPORT
+ SimpleInstallFunction(prototype, "padEnd", Builtins::kStringPrototypePadEnd,
+ 1, false);
+ SimpleInstallFunction(prototype, "padStart",
+ Builtins::kStringPrototypePadStart, 1, false);
SimpleInstallFunction(prototype, "repeat", Builtins::kStringPrototypeRepeat,
1, true);
SimpleInstallFunction(prototype, "replace",
Builtins::kStringPrototypeReplace, 2, true);
+ SimpleInstallFunction(prototype, "search", Builtins::kStringPrototypeSearch,
+ 1, true);
SimpleInstallFunction(prototype, "slice", Builtins::kStringPrototypeSlice,
2, false);
SimpleInstallFunction(prototype, "small", Builtins::kStringPrototypeSmall,
@@ -2059,7 +2112,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> string_iterator_function = CreateFunction(
isolate, factory->NewStringFromAsciiChecked("StringIterator"),
- JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize,
+ JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize, 0,
string_iterator_prototype, Builtins::kIllegal);
string_iterator_function->shared()->set_native(false);
native_context()->set_string_iterator_map(
@@ -2068,7 +2121,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- S y m b o l ---
Handle<JSFunction> symbol_fun = InstallFunction(
- global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
+ global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kSymbolConstructor);
symbol_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, SymbolConstructor_ConstructStub));
@@ -2119,7 +2172,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install the @@toPrimitive function.
Handle<JSFunction> to_primitive = InstallFunction(
prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ JSObject::kHeaderSize, 0, MaybeHandle<JSObject>(),
Builtins::kSymbolPrototypeToPrimitive,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -2132,7 +2185,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- D a t e ---
Handle<JSFunction> date_fun =
- InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
+ InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize, 0,
factory->the_hole_value(), Builtins::kDateConstructor);
InstallWithIntrinsicDefaultProto(isolate, date_fun,
Context::DATE_FUNCTION_INDEX);
@@ -2249,7 +2302,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install the @@toPrimitive function.
Handle<JSFunction> to_primitive = InstallFunction(
prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ JSObject::kHeaderSize, 0, MaybeHandle<JSObject>(),
Builtins::kDatePrototypeToPrimitive,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -2276,7 +2329,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- P r o m i s e
Handle<JSFunction> promise_fun = InstallFunction(
global, "Promise", JS_PROMISE_TYPE, JSPromise::kSizeWithEmbedderFields,
- factory->the_hole_value(), Builtins::kPromiseConstructor);
+ 0, factory->the_hole_value(), Builtins::kPromiseConstructor);
InstallWithIntrinsicDefaultProto(isolate, promise_fun,
Context::PROMISE_FUNCTION_INDEX);
@@ -2408,8 +2461,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
Handle<JSFunction> regexp_fun = InstallFunction(
- global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
- factory->the_hole_value(), Builtins::kRegExpConstructor);
+ global, "RegExp", JS_REGEXP_TYPE,
+ JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize,
+ JSRegExp::kInObjectFieldCount, factory->the_hole_value(),
+ Builtins::kRegExpConstructor);
InstallWithIntrinsicDefaultProto(isolate, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
@@ -2431,6 +2486,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_regexp_exec_function(*fun);
}
+ SimpleInstallGetter(prototype, factory->dotAll_string(),
+ Builtins::kRegExpPrototypeDotAllGetter, true);
SimpleInstallGetter(prototype, factory->flags_string(),
Builtins::kRegExpPrototypeFlagsGetter, true);
SimpleInstallGetter(prototype, factory->global_string(),
@@ -2545,7 +2602,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
DCHECK(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
- DCHECK_EQ(0, initial_map->GetInObjectProperties());
+ DCHECK_EQ(1, initial_map->GetInObjectProperties());
Map::EnsureDescriptorSlack(initial_map, 1);
@@ -2557,12 +2614,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
writable, Representation::Tagged());
initial_map->AppendDescriptor(&d);
- static const int num_fields = JSRegExp::kInObjectFieldCount;
- initial_map->SetInObjectProperties(num_fields);
- initial_map->set_unused_property_fields(0);
- initial_map->set_instance_size(initial_map->instance_size() +
- num_fields * kPointerSize);
-
{ // Internal: RegExpInternalMatch
Handle<JSFunction> function =
SimpleCreateFunction(isolate, isolate->factory()->empty_string(),
@@ -2590,8 +2641,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- E r r o r
InstallError(isolate, global, factory->Error_string(),
Context::ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, BUILTIN_CODE(isolate, MakeError),
- Context::MAKE_ERROR_INDEX);
+ InstallMakeError(isolate, Builtins::kMakeError, Context::MAKE_ERROR_INDEX);
}
{ // -- E v a l E r r o r
@@ -2602,7 +2652,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R a n g e E r r o r
InstallError(isolate, global, factory->RangeError_string(),
Context::RANGE_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, BUILTIN_CODE(isolate, MakeRangeError),
+ InstallMakeError(isolate, Builtins::kMakeRangeError,
Context::MAKE_RANGE_ERROR_INDEX);
}
@@ -2614,21 +2664,21 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- S y n t a x E r r o r
InstallError(isolate, global, factory->SyntaxError_string(),
Context::SYNTAX_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, BUILTIN_CODE(isolate, MakeSyntaxError),
+ InstallMakeError(isolate, Builtins::kMakeSyntaxError,
Context::MAKE_SYNTAX_ERROR_INDEX);
}
{ // -- T y p e E r r o r
InstallError(isolate, global, factory->TypeError_string(),
Context::TYPE_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, BUILTIN_CODE(isolate, MakeTypeError),
+ InstallMakeError(isolate, Builtins::kMakeTypeError,
Context::MAKE_TYPE_ERROR_INDEX);
}
{ // -- U R I E r r o r
InstallError(isolate, global, factory->URIError_string(),
Context::URI_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, BUILTIN_CODE(isolate, MakeURIError),
+ InstallMakeError(isolate, Builtins::kMakeURIError,
Context::MAKE_URI_ERROR_INDEX);
}
@@ -2733,10 +2783,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- C o n s o l e
Handle<String> name = factory->InternalizeUtf8String("console");
- Handle<JSFunction> cons = factory->NewFunction(
- isolate->strict_function_map(), name, MaybeHandle<Code>());
+ NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
+ name, isolate->strict_function_map(), LanguageMode::kStrict);
+ Handle<JSFunction> cons = factory->NewFunction(args);
+
Handle<JSObject> empty = factory->NewJSObject(isolate->object_function());
JSFunction::SetPrototype(cons, empty);
+
Handle<JSObject> console = factory->NewJSObject(cons, TENURED);
DCHECK(console->IsJSObject());
JSObject::AddProperty(global, name, console, DONT_ENUM);
@@ -2803,7 +2856,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> date_time_format_constructor = InstallFunction(
- intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize,
+ intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize, 0,
factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_intl_date_time_format_function(
*date_time_format_constructor);
@@ -2819,7 +2872,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> number_format_constructor = InstallFunction(
- intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize,
+ intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize, 0,
factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_intl_number_format_function(
*number_format_constructor);
@@ -2835,7 +2888,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> collator_constructor =
- InstallFunction(intl, "Collator", JS_OBJECT_TYPE, Collator::kSize,
+ InstallFunction(intl, "Collator", JS_OBJECT_TYPE, Collator::kSize, 0,
factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_intl_collator_function(*collator_constructor);
@@ -2850,7 +2903,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
- intl, "v8BreakIterator", JS_OBJECT_TYPE, V8BreakIterator::kSize,
+ intl, "v8BreakIterator", JS_OBJECT_TYPE, V8BreakIterator::kSize, 0,
factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_intl_v8_break_iterator_function(
*v8_break_iterator_constructor);
@@ -2929,7 +2982,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- T y p e d A r r a y
Handle<JSFunction> typed_array_fun =
CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
- JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, 0,
factory->the_hole_value(), Builtins::kIllegal);
typed_array_fun->shared()->set_native(false);
InstallSpeciesGetter(typed_array_fun);
@@ -2978,6 +3031,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeEvery, 1, false);
SimpleInstallFunction(prototype, "fill",
Builtins::kTypedArrayPrototypeFill, 1, false);
+ SimpleInstallFunction(prototype, "find", Builtins::kTypedArrayPrototypeFind,
+ 1, false);
+ SimpleInstallFunction(prototype, "findIndex",
+ Builtins::kTypedArrayPrototypeFindIndex, 1, false);
SimpleInstallFunction(prototype, "forEach",
Builtins::kTypedArrayPrototypeForEach, 1, false);
SimpleInstallFunction(prototype, "includes",
@@ -3005,8 +3062,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- T y p e d A r r a y s
#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
- Handle<JSFunction> fun; \
- InstallTypedArray(#Type "Array", TYPE##_ELEMENTS, &fun); \
+ Handle<JSFunction> fun = \
+ InstallTypedArray(#Type "Array", TYPE##_ELEMENTS); \
InstallWithIntrinsicDefaultProto(isolate, fun, \
Context::TYPE##_ARRAY_FUN_INDEX); \
}
@@ -3040,7 +3097,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- D a t a V i e w
Handle<JSFunction> data_view_fun = InstallFunction(
global, "DataView", JS_DATA_VIEW_TYPE,
- JSDataView::kSizeWithEmbedderFields, factory->the_hole_value(),
+ JSDataView::kSizeWithEmbedderFields, 0, factory->the_hole_value(),
Builtins::kDataViewConstructor);
InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
Context::DATA_VIEW_FUN_INDEX);
@@ -3118,7 +3175,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
Handle<JSFunction> js_map_fun =
- InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
+ InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize, 0,
factory->the_hole_value(), Builtins::kMapConstructor);
InstallWithIntrinsicDefaultProto(isolate, js_map_fun,
Context::JS_MAP_FUN_INDEX);
@@ -3174,7 +3231,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- S e t
Handle<JSFunction> js_set_fun =
- InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
+ InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize, 0,
factory->the_hole_value(), Builtins::kSetConstructor);
InstallWithIntrinsicDefaultProto(isolate, js_set_fun,
Context::JS_SET_FUN_INDEX);
@@ -3224,8 +3281,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- J S M o d u l e N a m e s p a c e
- Handle<Map> map =
- factory->NewMap(JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize);
+ Handle<Map> map = factory->NewMap(
+ JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND, JSModuleNamespace::kInObjectFieldCount);
Map::SetPrototype(map, isolate->factory()->null_value());
Map::EnsureDescriptorSlack(map, 1);
native_context()->set_js_module_namespace_map(*map);
@@ -3239,13 +3297,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
attribs, Representation::Tagged());
map->AppendDescriptor(&d);
}
-
- map->SetInObjectProperties(JSModuleNamespace::kInObjectFieldCount);
}
{ // -- I t e r a t o r R e s u l t
- Handle<Map> map =
- factory->NewMap(JS_OBJECT_TYPE, JSIteratorResult::kSize);
+ Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSIteratorResult::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 2);
Map::SetPrototype(map, isolate->initial_object_prototype());
Map::EnsureDescriptorSlack(map, 2);
@@ -3264,21 +3320,31 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
map->SetConstructor(native_context()->object_function());
- map->SetInObjectProperties(2);
native_context()->set_iterator_result_map(*map);
}
{ // -- W e a k M a p
- Handle<JSFunction> cons =
- InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- factory->the_hole_value(), Builtins::kIllegal);
+ Handle<JSFunction> cons = InstallFunction(
+ global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize, 0,
+ factory->the_hole_value(), Builtins::kWeakMapConstructor);
InstallWithIntrinsicDefaultProto(isolate, cons,
Context::JS_WEAK_MAP_FUN_INDEX);
+
+ Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
+ shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
+ shared->set_instance_class_name(isolate->heap()->WeakMap_string());
+ shared->DontAdaptArguments();
+ shared->set_length(0);
+
// Setup %WeakMapPrototype%.
Handle<JSObject> prototype(JSObject::cast(cons->instance_prototype()));
+ SimpleInstallFunction(prototype, "delete",
+ Builtins::kWeakMapPrototypeDelete, 1, true);
SimpleInstallFunction(prototype, "get", Builtins::kWeakMapGet, 1, true);
SimpleInstallFunction(prototype, "has", Builtins::kWeakMapHas, 1, true);
+ SimpleInstallFunction(prototype, "set", Builtins::kWeakMapPrototypeSet, 2,
+ true);
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
@@ -3287,15 +3353,26 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- W e a k S e t
- Handle<JSFunction> cons =
- InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
- factory->the_hole_value(), Builtins::kIllegal);
+ Handle<JSFunction> cons = InstallFunction(
+ global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize, 0,
+ factory->the_hole_value(), Builtins::kWeakSetConstructor);
InstallWithIntrinsicDefaultProto(isolate, cons,
Context::JS_WEAK_SET_FUN_INDEX);
+
+ Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
+ shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
+ shared->set_instance_class_name(isolate->heap()->WeakSet_string());
+ shared->DontAdaptArguments();
+ shared->set_length(0);
+
// Setup %WeakSetPrototype%.
Handle<JSObject> prototype(JSObject::cast(cons->instance_prototype()));
+ SimpleInstallFunction(prototype, "delete",
+ Builtins::kWeakSetPrototypeDelete, 1, true);
SimpleInstallFunction(prototype, "has", Builtins::kWeakSetHas, 1, true);
+ SimpleInstallFunction(prototype, "add", Builtins::kWeakSetPrototypeAdd, 1,
+ true);
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
@@ -3306,14 +3383,27 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- P r o x y
CreateJSProxyMaps();
+ // Proxy function map has prototype slot for storing initial map but does
+ // not have a prototype property.
Handle<Map> proxy_function_map =
Map::Copy(isolate->strict_function_without_prototype_map(), "Proxy");
+ // Re-set the unused property fields after changing the instance size.
+ // TODO(ulan): Do not change instance size after map creation.
+ int unused_property_fields = proxy_function_map->UnusedPropertyFields();
+ proxy_function_map->set_instance_size(JSFunction::kSizeWithPrototype);
+ // The prototype slot shifts the in-object properties area by one slot.
+ proxy_function_map->SetInObjectPropertiesStartInWords(
+ proxy_function_map->GetInObjectPropertiesStartInWords() + 1);
+ proxy_function_map->set_has_prototype_slot(true);
proxy_function_map->set_is_constructor(true);
+ proxy_function_map->SetInObjectUnusedPropertyFields(unused_property_fields);
Handle<String> name = factory->Proxy_string();
Handle<Code> code(BUILTIN_CODE(isolate, ProxyConstructor));
- Handle<JSFunction> proxy_function =
- factory->NewFunction(proxy_function_map, name, code);
+
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltin(
+ name, code, proxy_function_map, Builtins::kProxyConstructor);
+ Handle<JSFunction> proxy_function = factory->NewFunction(args);
JSFunction::SetInitialMap(proxy_function, isolate->proxy_map(),
factory->null_value());
@@ -3374,7 +3464,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- B o u n d F u n c t i o n
Handle<Map> map =
- factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize);
+ factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 0);
+ map->SetConstructor(native_context()->object_function());
map->set_is_callable();
Map::SetPrototype(map, empty_function);
@@ -3382,21 +3474,19 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
Map::EnsureDescriptorSlack(map, 2);
- Handle<AccessorInfo> bound_length =
- Accessors::BoundFunctionLengthInfo(isolate, roc_attribs);
{ // length
- Descriptor d = Descriptor::AccessorConstant(factory->length_string(),
- bound_length, roc_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ factory->length_string(), factory->bound_function_length_accessor(),
+ roc_attribs);
map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> bound_name =
- Accessors::BoundFunctionNameInfo(isolate, roc_attribs);
+
{ // name
- Descriptor d = Descriptor::AccessorConstant(factory->name_string(),
- bound_name, roc_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ factory->name_string(), factory->bound_function_name_accessor(),
+ roc_attribs);
map->AppendDescriptor(&d);
}
- map->SetInObjectProperties(0);
native_context()->set_bound_function_without_constructor_map(*map);
map = Map::Copy(map, "IsConstructor");
@@ -3409,13 +3499,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
Handle<String> arguments_string = factory->Arguments_string();
- Handle<Code> code(BUILTIN_CODE(isolate, Illegal));
- Handle<JSFunction> function =
- factory->NewFunctionWithoutPrototype(arguments_string, code, STRICT);
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
+ arguments_string, BUILTIN_CODE(isolate, Illegal),
+ isolate->initial_object_prototype(), JS_ARGUMENTS_TYPE,
+ JSSloppyArgumentsObject::kSize, 2, Builtins::kIllegal, MUTABLE);
+ Handle<JSFunction> function = factory->NewFunction(args);
function->shared()->set_instance_class_name(*arguments_string);
+ Handle<Map> map(function->initial_map());
- Handle<Map> map = factory->NewMap(
- JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, PACKED_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 2);
@@ -3433,13 +3524,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
// @@iterator method is added later.
- map->SetInObjectProperties(2);
native_context()->set_sloppy_arguments_map(*map);
- DCHECK(!function->has_initial_map());
- JSFunction::SetInitialMap(function, map,
- isolate->initial_object_prototype());
-
DCHECK(!map->is_dictionary_map());
DCHECK(IsObjectElementsKind(map->elements_kind()));
}
@@ -3472,7 +3558,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(
- JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, PACKED_ELEMENTS);
+ JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, PACKED_ELEMENTS, 1);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 2);
@@ -3492,7 +3578,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
DCHECK_EQ(native_context()->object_function()->prototype(),
*isolate->initial_object_prototype());
Map::SetPrototype(map, isolate->initial_object_prototype());
- map->SetInObjectProperties(1);
// Copy constructor from the sloppy arguments boilerplate.
map->SetConstructor(
@@ -3506,9 +3591,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- context extension
// Create a function for the context extension objects.
- Handle<JSFunction> context_extension_fun = CreateFunction(
- isolate, factory->empty_string(), JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JSObject::kHeaderSize, factory->the_hole_value(), Builtins::kIllegal);
+ Handle<JSFunction> context_extension_fun =
+ CreateFunction(isolate, factory->empty_string(),
+ JS_CONTEXT_EXTENSION_OBJECT_TYPE, JSObject::kHeaderSize,
+ 0, factory->the_hole_value(), Builtins::kIllegal);
Handle<String> name = factory->InternalizeUtf8String("context_extension");
context_extension_fun->shared()->set_instance_class_name(*name);
native_context()->set_context_extension_function(*context_extension_fun);
@@ -3531,8 +3617,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
} // NOLINT(readability/fn_size)
-void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
- Handle<JSFunction>* fun) {
+Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
+ ElementsKind elements_kind) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
Handle<JSObject> typed_array_prototype =
@@ -3540,21 +3626,29 @@ void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
Handle<JSFunction> typed_array_function =
Handle<JSFunction>(isolate()->typed_array_function());
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSFunction> result = InstallFunction(
global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithEmbedderFields,
- prototype, Builtins::kIllegal);
+ 0, factory()->the_hole_value(), Builtins::kIllegal);
result->initial_map()->set_elements_kind(elements_kind);
- CHECK(JSObject::SetPrototype(result, typed_array_function, false,
- Object::DONT_THROW)
+ CHECK(JSObject::SetPrototype(result, typed_array_function, false, kDontThrow)
.FromJust());
+ Handle<Smi> bytes_per_element(
+ Smi::FromInt(1 << ElementsKindToShiftSize(elements_kind)), isolate());
+
+ InstallConstant(isolate(), result, "BYTES_PER_ELEMENT", bytes_per_element);
+
+ // Setup prototype object.
+ DCHECK(result->prototype()->IsJSObject());
+ Handle<JSObject> prototype(JSObject::cast(result->prototype()), isolate());
+
CHECK(JSObject::SetPrototype(prototype, typed_array_prototype, false,
- Object::DONT_THROW)
+ kDontThrow)
.FromJust());
- *fun = result;
+
+ InstallConstant(isolate(), prototype, "BYTES_PER_ELEMENT", bytes_per_element);
+ return result;
}
@@ -3633,9 +3727,9 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(),
- MaybeHandle<Object>(), context, NULL, NULL,
- ScriptCompiler::kNoCompileOptions, natives_flag,
- MaybeHandle<FixedArray>());
+ MaybeHandle<Object>(), context, nullptr, nullptr,
+ ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
+ natives_flag, MaybeHandle<FixedArray>());
Handle<SharedFunctionInfo> function_info;
if (!maybe_function_info.ToHandle(&function_info)) return false;
@@ -3699,8 +3793,9 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(),
- MaybeHandle<Object>(), context, extension, NULL,
- ScriptCompiler::kNoCompileOptions, EXTENSION_CODE,
+ MaybeHandle<Object>(), context, extension, nullptr,
+ ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE,
MaybeHandle<FixedArray>());
if (!maybe_function_info.ToHandle(&function_info)) return false;
cache->Add(name, function_info);
@@ -3727,7 +3822,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
Factory* factory = isolate->factory();
Handle<JSGlobalObject> global(native_context->global_object());
const char* period_pos = strchr(holder_expr, '.');
- if (period_pos == NULL) {
+ if (period_pos == nullptr) {
return Handle<JSObject>::cast(
Object::GetPropertyOrElement(
global, factory->InternalizeUtf8String(holder_expr))
@@ -3761,7 +3856,7 @@ void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
case FULL_CONTEXT: {
// We still need the utils object after deserialization.
if (isolate()->serializer_enabled()) return;
- if (FLAG_expose_natives_as == NULL) break;
+ if (FLAG_expose_natives_as == nullptr) break;
if (strlen(FLAG_expose_natives_as) == 0) break;
HandleScope scope(isolate());
Handle<String> natives_key =
@@ -3815,8 +3910,9 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
generator_function_prototype, NONE);
Handle<JSFunction> generator_function_function = InstallFunction(
- container, "GeneratorFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
- generator_function_prototype, Builtins::kGeneratorFunctionConstructor);
+ container, "GeneratorFunction", JS_FUNCTION_TYPE,
+ JSFunction::kSizeWithPrototype, 0, generator_function_prototype,
+ Builtins::kGeneratorFunctionConstructor);
generator_function_function->set_prototype_or_initial_map(
native_context->generator_function_map());
generator_function_function->shared()->DontAdaptArguments();
@@ -3843,10 +3939,10 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSObject> async_generator_function_prototype(
iter.GetCurrent<JSObject>());
- Handle<JSFunction> async_generator_function_function =
- InstallFunction(container, "AsyncGeneratorFunction", JS_FUNCTION_TYPE,
- JSFunction::kSize, async_generator_function_prototype,
- Builtins::kAsyncGeneratorFunctionConstructor);
+ Handle<JSFunction> async_generator_function_function = InstallFunction(
+ container, "AsyncGeneratorFunction", JS_FUNCTION_TYPE,
+ JSFunction::kSizeWithPrototype, 0, async_generator_function_prototype,
+ Builtins::kAsyncGeneratorFunctionConstructor);
async_generator_function_function->set_prototype_or_initial_map(
native_context->async_generator_function_map());
async_generator_function_function->shared()->DontAdaptArguments();
@@ -3890,7 +3986,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// Setup SetIterator constructor.
Handle<JSFunction> set_iterator_function =
InstallFunction(container, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
- JSSetIterator::kSize, prototype, Builtins::kIllegal);
+ JSSetIterator::kSize, 0, prototype, Builtins::kIllegal);
set_iterator_function->shared()->set_native(false);
set_iterator_function->shared()->set_instance_class_name(*name);
@@ -3926,7 +4022,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// Setup MapIterator constructor.
Handle<JSFunction> map_iterator_function =
InstallFunction(container, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
- JSMapIterator::kSize, prototype, Builtins::kIllegal);
+ JSMapIterator::kSize, 0, prototype, Builtins::kIllegal);
map_iterator_function->shared()->set_native(false);
map_iterator_function->shared()->set_instance_class_name(*name);
@@ -3949,7 +4045,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
{ // -- S c r i p t
Handle<String> name = factory->InternalizeUtf8String("Script");
Handle<JSFunction> script_fun = InstallFunction(
- container, name, JS_VALUE_TYPE, JSValue::kSize,
+ container, name, JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kUnsupportedThrower, DONT_ENUM);
script_fun->shared()->set_instance_class_name(*name);
native_context->set_script_function(*script_fun);
@@ -3960,116 +4056,96 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<AccessorInfo> script_column =
- Accessors::ScriptColumnOffsetInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_column->name())), script_column,
- attribs);
+ { // column_offset
+ Handle<AccessorInfo> info = factory->script_column_offset_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_id = Accessors::ScriptIdInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_id->name())), script_id, attribs);
+ { // id
+ Handle<AccessorInfo> info = factory->script_id_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
-
- Handle<AccessorInfo> script_name =
- Accessors::ScriptNameInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_name->name())), script_name, attribs);
+ { // name
+ Handle<AccessorInfo> info = factory->script_name_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_line =
- Accessors::ScriptLineOffsetInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_line->name())), script_line, attribs);
+ { // line_offset
+ Handle<AccessorInfo> info = factory->script_line_offset_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_source =
- Accessors::ScriptSourceInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_source->name())), script_source,
- attribs);
+ { // source
+ Handle<AccessorInfo> info = factory->script_source_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_type =
- Accessors::ScriptTypeInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_type->name())), script_type, attribs);
+ { // type
+ Handle<AccessorInfo> info = factory->script_type_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_compilation_type =
- Accessors::ScriptCompilationTypeInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_compilation_type->name())),
- script_compilation_type, attribs);
+ { // compilation_type
+ Handle<AccessorInfo> info = factory->script_compilation_type_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_context_data =
- Accessors::ScriptContextDataInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_context_data->name())),
- script_context_data, attribs);
+ { // context_data
+ Handle<AccessorInfo> info = factory->script_context_data_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_eval_from_script =
- Accessors::ScriptEvalFromScriptInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_eval_from_script->name())),
- script_eval_from_script, attribs);
+ { // eval_from_script
+ Handle<AccessorInfo> info = factory->script_eval_from_script_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_eval_from_script_position =
- Accessors::ScriptEvalFromScriptPositionInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_eval_from_script_position->name())),
- script_eval_from_script_position, attribs);
+ { // eval_from_script_position
+ Handle<AccessorInfo> info =
+ factory->script_eval_from_script_position_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_eval_from_function_name =
- Accessors::ScriptEvalFromFunctionNameInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_eval_from_function_name->name())),
- script_eval_from_function_name, attribs);
+ { // eval_from_function_name
+ Handle<AccessorInfo> info =
+ factory->script_eval_from_function_name_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_source_url =
- Accessors::ScriptSourceUrlInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_source_url->name())),
- script_source_url, attribs);
+ { // source_url
+ Handle<AccessorInfo> info = factory->script_source_url_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_source_mapping_url =
- Accessors::ScriptSourceMappingUrlInfo(isolate, attribs);
- {
- Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(script_source_mapping_url->name())),
- script_source_mapping_url, attribs);
+ { // source_mapping_url
+ Handle<AccessorInfo> info = factory->script_source_mapping_url_accessor();
+ Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
+ info, attribs);
script_map->AppendDescriptor(&d);
}
}
@@ -4080,8 +4156,9 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
Handle<JSFunction> async_function_constructor = InstallFunction(
- container, "AsyncFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
- async_function_prototype, Builtins::kAsyncFunctionConstructor);
+ container, "AsyncFunction", JS_FUNCTION_TYPE,
+ JSFunction::kSizeWithPrototype, 0, async_function_prototype,
+ Builtins::kAsyncFunctionConstructor);
async_function_constructor->set_prototype_or_initial_map(
native_context->async_function_map());
async_function_constructor->shared()->DontAdaptArguments();
@@ -4151,7 +4228,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// CallSiteUtils::Construct to create CallSite objects.
Handle<JSFunction> callsite_fun = InstallFunction(
- container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize, 0,
factory->the_hole_value(), Builtins::kUnsupportedThrower);
callsite_fun->shared()->DontAdaptArguments();
isolate->native_context()->set_callsite_function(*callsite_fun);
@@ -4205,7 +4282,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
Handle<JSFunction> plural_rules_constructor = InstallFunction(
- container, "PluralRules", JS_OBJECT_TYPE, PluralRules::kSize,
+ container, "PluralRules", JS_OBJECT_TYPE, PluralRules::kSize, 0,
plural_rules_prototype, Builtins::kIllegal);
JSObject::AddProperty(plural_rules_prototype, factory->constructor_string(),
plural_rules_constructor, DONT_ENUM);
@@ -4219,19 +4296,15 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_rest_spread)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_template_escapes)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrict_constructor_return)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_strict_legacy_accessor_builtins)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -4340,29 +4413,14 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
}
}
-void Genesis::InitializeGlobal_harmony_regexp_dotall() {
- if (!FLAG_harmony_regexp_dotall) return;
-
- Handle<JSFunction> constructor(native_context()->regexp_function());
- Handle<JSObject> prototype(JSObject::cast(constructor->instance_prototype()));
-
- SimpleInstallGetter(prototype, isolate()->factory()->dotAll_string(),
- Builtins::kRegExpPrototypeDotAllGetter, true);
-
- // The regexp prototype map has changed because we added a property
- // to it, so we update the saved map.
- Handle<Map> prototype_map(prototype->map());
- Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate());
- native_context()->set_regexp_prototype_map(*prototype_map);
-}
-
void Genesis::InitializeGlobal_harmony_bigint() {
if (!FLAG_harmony_bigint) return;
+ Factory* factory = isolate()->factory();
Handle<JSGlobalObject> global(native_context()->global_object());
- Handle<JSFunction> bigint_fun = InstallFunction(
- global, "BigInt", JS_VALUE_TYPE, JSValue::kSize,
- isolate()->factory()->the_hole_value(), Builtins::kBigIntConstructor);
+ Handle<JSFunction> bigint_fun =
+ InstallFunction(global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
+ factory->the_hole_value(), Builtins::kBigIntConstructor);
bigint_fun->shared()->DontAdaptArguments();
bigint_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate(), BigIntConstructor_ConstructStub));
@@ -4398,6 +4456,10 @@ void Genesis::InitializeGlobal_harmony_bigint() {
// valueOf()
SimpleInstallFunction(prototype, "valueOf", Builtins::kBigIntPrototypeValueOf,
0, false);
+ // @@toStringTag
+ JSObject::AddProperty(prototype, factory->to_string_tag_symbol(),
+ factory->BigInt_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
#ifdef V8_INTL_SUPPORT
@@ -4410,7 +4472,7 @@ void Genesis::InitializeGlobal_harmony_number_format_to_parts() {
InstallFunction(number_format_prototype,
SimpleCreateFunction(
isolate(), name,
- Builtins::kNumberFormatPrototypeFormatToParts, 0, false),
+ Builtins::kNumberFormatPrototypeFormatToParts, 1, false),
name);
}
@@ -4444,7 +4506,7 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(Handle<String> name,
// Allocate the constructor with the given {prototype}.
Handle<JSFunction> array_buffer_fun =
CreateFunction(isolate(), name, JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSizeWithEmbedderFields, prototype,
+ JSArrayBuffer::kSizeWithEmbedderFields, 0, prototype,
Builtins::kArrayBufferConstructor);
Handle<Code> code =
BUILTIN_CODE(isolate(), ArrayBufferConstructor_ConstructStub);
@@ -4483,7 +4545,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSFunction> array_function =
- InstallFunction(target, name, JS_ARRAY_TYPE, JSArray::kSize, prototype,
+ InstallFunction(target, name, JS_ARRAY_TYPE, JSArray::kSize, 0, prototype,
Builtins::kInternalArrayConstructor);
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
@@ -4502,11 +4564,10 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
- Handle<AccessorInfo> array_length =
- Accessors::ArrayLengthInfo(isolate(), attribs);
{ // Add length.
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(array_length->name())), array_length, attribs);
+ factory()->length_string(), factory()->array_length_accessor(),
+ attribs);
initial_map->AppendDescriptor(&d);
}
@@ -4550,7 +4611,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSFunction> opaque_reference_fun =
CreateFunction(isolate(), factory()->empty_string(), JS_VALUE_TYPE,
- JSValue::kSize, prototype, Builtins::kIllegal);
+ JSValue::kSize, 0, prototype, Builtins::kIllegal);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@@ -4579,8 +4640,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
native_context()->set_fast_template_instantiations_cache(
*fast_template_instantiations_cache);
- auto slow_template_instantiations_cache = UnseededNumberDictionary::New(
- isolate(), ApiNatives::kInitialFunctionCacheSize);
+ auto slow_template_instantiations_cache =
+ NumberDictionary::New(isolate(), ApiNatives::kInitialFunctionCacheSize);
native_context()->set_slow_template_instantiations_cache(
*slow_template_instantiations_cache);
@@ -4656,7 +4717,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Verification of important array prototype properties.
Object* length = proto->length();
CHECK(length->IsSmi());
- CHECK(Smi::ToInt(length) == 0);
+ CHECK_EQ(Smi::ToInt(length), 0);
CHECK(proto->HasSmiOrObjectElements());
// This is necessary to enable fast checks for absence of elements
// on Array.prototype and below.
@@ -4678,7 +4739,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
{
// AccessorPropertyDescriptor initial map.
Handle<Map> map =
- factory()->NewMap(JS_OBJECT_TYPE, JSAccessorPropertyDescriptor::kSize);
+ factory()->NewMap(JS_OBJECT_TYPE, JSAccessorPropertyDescriptor::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 4);
// Create the descriptor array for the property descriptor object.
Map::EnsureDescriptorSlack(map, 4);
@@ -4711,8 +4773,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Map::SetPrototype(map, isolate()->initial_object_prototype());
map->SetConstructor(native_context()->object_function());
- map->SetInObjectProperties(4);
- map->set_unused_property_fields(0);
native_context()->set_accessor_property_descriptor_map(*map);
}
@@ -4723,7 +4783,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
{
// DataPropertyDescriptor initial map.
Handle<Map> map =
- factory()->NewMap(JS_OBJECT_TYPE, JSDataPropertyDescriptor::kSize);
+ factory()->NewMap(JS_OBJECT_TYPE, JSDataPropertyDescriptor::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 4);
// Create the descriptor array for the property descriptor object.
Map::EnsureDescriptorSlack(map, 4);
@@ -4757,8 +4818,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Map::SetPrototype(map, isolate()->initial_object_prototype());
map->SetConstructor(native_context()->object_function());
- map->SetInObjectProperties(4);
- map->set_unused_property_fields(0);
native_context()->set_data_property_descriptor_map(*map);
}
@@ -4774,8 +4833,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
JSObject::cast(array_constructor->instance_prototype()));
// Add initial map.
- Handle<Map> initial_map =
- factory()->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
+ Handle<Map> initial_map = factory()->NewMap(
+ JS_ARRAY_TYPE, JSRegExpResult::kSize, TERMINAL_FAST_ELEMENTS_KIND, 2);
initial_map->SetConstructor(*array_constructor);
// Set prototype on map.
@@ -4792,7 +4851,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Handle<String> length = factory()->length_string();
int old = array_descriptors->SearchWithCache(
isolate(), *length, array_function->initial_map());
- DCHECK(old != DescriptorArray::kNotFound);
+ DCHECK_NE(old, DescriptorArray::kNotFound);
Descriptor d = Descriptor::AccessorConstant(
length, handle(array_descriptors->GetValue(old), isolate()),
array_descriptors->GetDetails(old).attributes());
@@ -4812,9 +4871,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
initial_map->AppendDescriptor(&d);
}
- initial_map->SetInObjectProperties(2);
- initial_map->set_unused_property_fields(0);
-
native_context()->set_regexp_result_map(*initial_map);
}
@@ -4822,7 +4878,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
{
PropertyAttributes attribs = DONT_ENUM;
Handle<AccessorInfo> arguments_iterator =
- Accessors::ArgumentsIteratorInfo(isolate(), attribs);
+ factory()->arguments_iterator_accessor();
{
Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
arguments_iterator, attribs);
@@ -4981,7 +5037,7 @@ Genesis::ExtensionStates::ExtensionStates() : map_(8) {}
Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
RegisteredExtension* extension) {
base::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension));
- if (entry == NULL) {
+ if (entry == nullptr) {
return UNVISITED;
}
return static_cast<ExtensionTraversalState>(
@@ -5020,8 +5076,7 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
bool Genesis::InstallAutoExtensions(Isolate* isolate,
ExtensionStates* extension_states) {
for (v8::RegisteredExtension* it = v8::RegisteredExtension::first_extension();
- it != NULL;
- it = it->next()) {
+ it != nullptr; it = it->next()) {
if (it->extension()->auto_enable() &&
!InstallExtension(isolate, it, extension_states)) {
return false;
@@ -5047,8 +5102,7 @@ bool Genesis::InstallExtension(Isolate* isolate,
const char* name,
ExtensionStates* extension_states) {
for (v8::RegisteredExtension* it = v8::RegisteredExtension::first_extension();
- it != NULL;
- it = it->next()) {
+ it != nullptr; it = it->next()) {
if (strcmp(name, it->extension()->name()) == 0) {
return InstallExtension(isolate, it, extension_states);
}
@@ -5353,15 +5407,6 @@ Genesis::Genesis(
AddToWeakNativeContextList(*native_context());
isolate->set_context(*native_context());
isolate->counters()->contexts_created_by_snapshot()->Increment();
-#if V8_TRACE_MAPS
- if (FLAG_trace_maps) {
- Handle<JSFunction> object_fun = isolate->object_function();
- PrintF("[TraceMap: InitialMap map= %p SFI= %d_Object ]\n",
- reinterpret_cast<void*>(object_fun->initial_map()),
- object_fun->shared()->unique_id());
- Map::TraceAllTransitions(object_fun->initial_map());
- }
-#endif
if (context_snapshot_index == 0) {
Handle<JSGlobalObject> global_object =
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index d49180190e..8afd0a0601 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -20,7 +20,7 @@ namespace internal {
// generate an index for each native JS file.
class SourceCodeCache final BASE_EMBEDDED {
public:
- explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
+ explicit SourceCodeCache(Script::Type type) : type_(type), cache_(nullptr) {}
void Initialize(Isolate* isolate, bool create_heap_objects);
diff --git a/deps/v8/src/boxed-float.h b/deps/v8/src/boxed-float.h
index 1b62551f05..18ee98a9c0 100644
--- a/deps/v8/src/boxed-float.h
+++ b/deps/v8/src/boxed-float.h
@@ -7,6 +7,7 @@
#include <cmath>
#include "src/base/macros.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -17,9 +18,7 @@ namespace internal {
// the exact bit pattern during deoptimization when passing this value.
class Float32 {
public:
- Float32() : bit_pattern_(0) {}
-
- explicit Float32(uint32_t bit_pattern) : bit_pattern_(bit_pattern) {}
+ Float32() = default;
// This constructor does not guarantee that bit pattern of the input value
// is preserved if the input is a NaN.
@@ -33,29 +32,68 @@ class Float32 {
float get_scalar() const { return bit_cast<float>(bit_pattern_); }
- static Float32 FromBits(uint32_t bits) { return Float32(bits); }
+ bool is_nan() const {
+ // Even though {get_scalar()} might flip the quiet NaN bit, it's ok here,
+ // because this does not change the is_nan property.
+ return std::isnan(get_scalar());
+ }
+
+ // Return a pointer to the field storing the bit pattern. Used in code
+ // generation tests to store generated values there directly.
+ uint32_t* get_bits_address() { return &bit_pattern_; }
+
+ static constexpr Float32 FromBits(uint32_t bits) { return Float32(bits); }
private:
- uint32_t bit_pattern_;
+ uint32_t bit_pattern_ = 0;
+
+ explicit constexpr Float32(uint32_t bit_pattern)
+ : bit_pattern_(bit_pattern) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(Float32),
+ "Float32 should be trivially copyable");
+
// Safety wrapper for a 64-bit floating-point value to make sure we don't lose
-// the exact bit pattern during deoptimization when passing this value. Note
-// that there is intentionally no way to construct it from a {double} value.
+// the exact bit pattern during deoptimization when passing this value.
// TODO(ahaas): Unify this class with Double in double.h
class Float64 {
public:
- Float64() : bit_pattern_(0) {}
+ Float64() = default;
+
+ // This constructor does not guarantee that bit pattern of the input value
+ // is preserved if the input is a NaN.
+ explicit Float64(double value) : bit_pattern_(bit_cast<uint64_t>(value)) {
+ // Check that the provided value is not a NaN, because the bit pattern of a
+ // NaN may be changed by a bit_cast, e.g. for signalling NaNs on ia32.
+ DCHECK(!std::isnan(value));
+ }
+
uint64_t get_bits() const { return bit_pattern_; }
double get_scalar() const { return bit_cast<double>(bit_pattern_); }
bool is_hole_nan() const { return bit_pattern_ == kHoleNanInt64; }
- static Float64 FromBits(uint64_t bits) { return Float64(bits); }
+ bool is_nan() const {
+ // Even though {get_scalar()} might flip the quiet NaN bit, it's ok here,
+ // because this does not change the is_nan property.
+ return std::isnan(get_scalar());
+ }
+
+ // Return a pointer to the field storing the bit pattern. Used in code
+ // generation tests to store generated values there directly.
+ uint64_t* get_bits_address() { return &bit_pattern_; }
+
+ static constexpr Float64 FromBits(uint64_t bits) { return Float64(bits); }
private:
- explicit Float64(uint64_t bit_pattern) : bit_pattern_(bit_pattern) {}
- uint64_t bit_pattern_;
+ uint64_t bit_pattern_ = 0;
+
+ explicit constexpr Float64(uint64_t bit_pattern)
+ : bit_pattern_(bit_pattern) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(Float64),
+ "Float64 should be trivially copyable");
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index e8fa690660..1c31009d93 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -5,7 +5,7 @@
#if V8_TARGET_ARCH_ARM
#include "src/assembler-inl.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -23,9 +23,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
#if defined(__thumb__)
// Thumb mode builtin.
- DCHECK((reinterpret_cast<intptr_t>(
- ExternalReference(address, masm->isolate()).address()) &
- 1) == 1);
+ DCHECK_EQ(1, reinterpret_cast<intptr_t>(
+ ExternalReference(address, masm->isolate()).address()) &
+ 1);
#endif
__ mov(r5, Operand(ExternalReference(address, masm->isolate())));
if (exit_frame_type == BUILTIN_EXIT) {
@@ -62,10 +62,10 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// CEntryStub expects r0 to contain the number of arguments including the
// receiver and the extra arguments.
- const int num_extra_args = 3;
- __ add(r0, r0, Operand(num_extra_args + 1));
+ __ add(r0, r0, Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
// Insert extra arguments.
+ __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ SmiTag(r0);
__ Push(r0, r1, r3);
__ SmiUntag(r0);
@@ -466,7 +466,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the value to pass to the generator
// -- r1 : the JSGeneratorObject to resume
- // -- r2 : the resume mode (tagged)
// -- lr : return address
// -----------------------------------
__ AssertGeneratorObject(r1);
@@ -476,9 +475,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- // Store resume mode into generator object.
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
-
// Load suspended function and context.
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
__ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
@@ -505,13 +501,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ b(eq, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ b(lo, &stack_overflow);
+
// Push receiver.
__ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
// ----------- S t a t e -------------
// -- r1 : the JSGeneratorObject to resume
- // -- r2 : the resume mode (tagged)
// -- r4 : generator function
// -- cp : generator context
// -- lr : return address
@@ -561,9 +562,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r2, r4);
+ __ Push(r1, r4);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(r1, r2);
+ __ Pop(r1);
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -571,12 +572,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_suspended_generator);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r2);
+ __ Push(r1);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(r1, r2);
+ __ Pop(r1);
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bkpt(0); // This should be unreachable.
+ }
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -585,32 +593,19 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
-// Clobbers r2; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
- IsTagged argc_is_tagged) {
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch,
+ Label* stack_overflow) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
- if (argc_is_tagged == kArgcIsSmiTagged) {
- __ cmp(r2, Operand::PointerOffsetFromSmiKey(argc));
- } else {
- DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
- __ cmp(r2, Operand(argc, LSL, kPointerSizeLog2));
- }
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow);
-
- __ bind(&okay);
+ __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ b(le, stack_overflow); // Signed comparison.
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
@@ -641,7 +636,15 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
// Clobbers r2.
- Generate_CheckStackOverflow(masm, r3, kArgcIsUntaggedInt);
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, r3, r2, &stack_overflow);
+ __ b(&enough_stack_space);
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
+
+ __ bind(&enough_stack_space);
// Remember new.target.
__ mov(r5, r0);
@@ -806,7 +809,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// runtime to clear it.
Label found_deoptimized_code;
__ ldr(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
+ Code::kCodeDataContainerOffset));
+ __ ldr(
+ scratch2,
+ FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &found_deoptimized_code);
@@ -1041,21 +1047,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(&bytecode_array_loaded);
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
- __ b(le, stack_overflow); // Signed comparison.
-}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register num_args, Register index,
Register limit, Register scratch) {
@@ -1496,19 +1487,6 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Jump(r4);
}
-void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Preserve possible return result from lazy deopt.
- __ push(r0);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, false);
- __ pop(r0);
- }
-
- __ mov(pc, lr); // Jump to ContinueToBuiltin stub
-}
-
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
@@ -1613,9 +1591,8 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ldr(r1, FieldMemOperand(
- r1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex)));
+ __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code start + osr_offset
__ add(lr, r0, Operand::SmiUntag(r1));
@@ -2507,7 +2484,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- const RegList gp_regs = r0.bit() | r1.bit() | r2.bit() | r3.bit();
+ constexpr RegList gp_regs = Register::ListOf<r0, r1, r2, r3>();
constexpr DwVfpRegister lowest_fp_reg = d0;
constexpr DwVfpRegister highest_fp_reg = d7;
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 7aaa2d0003..875f261835 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -5,7 +5,7 @@
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -69,18 +69,18 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// CEntryStub expects x0 to contain the number of arguments including the
// receiver and the extra arguments.
- const int num_extra_args = 3;
- __ Add(x0, x0, num_extra_args + 1);
+ __ Add(x0, x0, BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
// Insert extra arguments.
- __ SmiTag(x0);
- __ Push(x0, x1, x3);
- __ SmiUntag(x0);
+ Register padding = x10;
+ __ LoadRoot(padding, Heap::kTheHoleValueRootIndex);
+ __ SmiTag(x11, x0);
+ __ Push(padding, x11, x1, x3);
// Jump to the C entry runtime stub directly here instead of using
// JumpToExternalReference. We have already loaded entry point to x5
// in Generate_adaptor.
- __ mov(x1, x5);
+ __ Mov(x1, x5);
CEntryStub stub(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
exit_frame_type == Builtins::BUILTIN_EXIT);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -169,13 +169,14 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
// Push a copy of the target function and the new target.
// Push another copy as a parameter to the runtime call.
__ SmiTag(x0);
- __ Push(x0, x1, x3, x1);
+ __ Push(x0, x1, x3, padreg);
+ __ PushArgument(x1);
__ CallRuntime(function_id, 1);
__ Move(x2, x0);
// Restore target function and new target.
- __ Pop(x3, x1, x0);
+ __ Pop(padreg, x3, x1, x0);
__ SmiUntag(x0);
}
@@ -202,61 +203,92 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ Label already_aligned;
+ Register argc = x0;
+
+ if (__ emit_debug_code()) {
+ // Check that FrameScope pushed the context on to the stack already.
+ __ Peek(x2, 0);
+ __ Cmp(x2, cp);
+ __ Check(eq, kUnexpectedValue);
+ }
+
+ // Push number of arguments.
+ __ SmiTag(x11, argc);
+ __ Push(x11, padreg);
+
+ // Add a slot for the receiver, and round up to maintain alignment.
+ Register slot_count = x2;
+ Register slot_count_without_rounding = x12;
+ __ Add(slot_count_without_rounding, argc, 2);
+ __ Bic(slot_count, slot_count_without_rounding, 1);
+ __ Claim(slot_count);
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
// Preserve the incoming parameters on the stack.
- __ SmiTag(x11, x0);
- __ Push(cp, x11, x10);
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+
+ // Compute a pointer to the slot immediately above the location on the
+ // stack to which arguments will be later copied.
+ __ SlotAddress(x2, argc);
- // Set up pointer to last argument.
- __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+ // Poke the hole (receiver) in the highest slot.
+ __ Str(x10, MemOperand(x2));
+ __ Tbnz(slot_count_without_rounding, 0, &already_aligned);
- // Copy arguments and receiver to the expression stack.
- // Copy 2 values every loop to use ldp/stp.
+ // Store padding, if needed.
+ __ Str(padreg, MemOperand(x2, 1 * kPointerSize));
+ __ Bind(&already_aligned);
+
+ // Copy arguments to the expression stack.
+ {
+ Register count = x2;
+ Register dst = x10;
+ Register src = x11;
+ __ Mov(count, argc);
+ __ SlotAddress(dst, 0);
+ __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
+ __ CopyDoubleWords(dst, src, count);
+ }
- // Compute pointer behind the first argument.
- __ Add(x4, x2, Operand(x0, LSL, kPointerSizeLog2));
- Label loop, entry, done_copying_arguments;
// ----------- S t a t e -------------
- // -- x0: number of arguments (untagged)
- // -- x1: constructor function
- // -- x3: new target
- // -- x2: pointer to last argument (caller sp)
- // -- x4: pointer to argument last copied
- // -- sp[0*kPointerSize]: the hole (receiver)
- // -- sp[1*kPointerSize]: number of arguments (tagged)
- // -- sp[2*kPointerSize]: context
+ // -- x0: number of arguments (untagged)
+ // -- x1: constructor function
+ // -- x3: new target
+ // If argc is odd:
+ // -- sp[0*kPointerSize]: argument n - 1
+ // -- ...
+ // -- sp[(n-1)*kPointerSize]: argument 0
+ // -- sp[(n+0)*kPointerSize]: the hole (receiver)
+ // -- sp[(n+1)*kPointerSize]: padding
+ // -- sp[(n+2)*kPointerSize]: padding
+ // -- sp[(n+3)*kPointerSize]: number of arguments (tagged)
+ // -- sp[(n+4)*kPointerSize]: context (pushed by FrameScope)
+ // If argc is even:
+ // -- sp[0*kPointerSize]: argument n - 1
+ // -- ...
+ // -- sp[(n-1)*kPointerSize]: argument 0
+ // -- sp[(n+0)*kPointerSize]: the hole (receiver)
+ // -- sp[(n+1)*kPointerSize]: padding
+ // -- sp[(n+2)*kPointerSize]: number of arguments (tagged)
+ // -- sp[(n+3)*kPointerSize]: context (pushed by FrameScope)
// -----------------------------------
- __ B(&entry);
- __ Bind(&loop);
- __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
- __ Push(x11, x10);
- __ Bind(&entry);
- __ Cmp(x4, x2);
- __ B(gt, &loop);
- // Because we copied values 2 by 2 we may have copied one extra value.
- // Drop it if that is the case.
- __ B(eq, &done_copying_arguments);
- __ Drop(1);
- __ Bind(&done_copying_arguments);
// Call the function.
- // x0: number of arguments
- // x1: constructor function
- // x3: new target
- ParameterCount actual(x0);
+ ParameterCount actual(argc);
__ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
// Restore the context from the frame.
__ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
- // Restore smi-tagged arguments count from the frame.
- __ Peek(x1, 0);
+ // Restore smi-tagged arguments count from the frame. Use fp relative
+ // addressing to avoid the circular dependency between padding existence and
+ // argc parity.
+ __ Ldrsw(x1,
+ UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- __ DropBySMI(x1);
- __ Drop(1);
+ __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
__ Ret();
}
@@ -272,28 +304,36 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- sp[...]: constructor arguments
// -----------------------------------
- ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
+ ASM_LOCATION("Builtins::Generate_JSConstructStubGeneric");
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ if (__ emit_debug_code()) {
+ // Check that FrameScope pushed the context on to the stack already.
+ __ Peek(x2, 0);
+ __ Cmp(x2, cp);
+ __ Check(eq, kUnexpectedValue);
+ }
+
// Preserve the incoming parameters on the stack.
__ SmiTag(x0);
- __ Push(cp, x0, x1, x3);
+ __ Push(x0, x1, padreg, x3);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- x1 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- x1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context (pushed by FrameScope)
// -----------------------------------
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(w4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
- __ B(ne, &not_create_implicit_receiver);
+ __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kDerivedConstructorMask,
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -303,7 +343,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ B(&post_instantiation_deopt_entry);
// Else: use TheHoleValue as receiver for constructor call
- __ bind(&not_create_implicit_receiver);
+ __ Bind(&not_create_implicit_receiver);
__ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
// ----------- S t a t e -------------
@@ -316,72 +356,77 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
- __ bind(&post_instantiation_deopt_entry);
- // Restore new target.
- __ Pop(x3);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(x0, x0);
+ __ Bind(&post_instantiation_deopt_entry);
- // ----------- S t a t e -------------
- // -- x3: new target
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
- // -----------------------------------
+ // Restore new target from the top of the stack.
+ __ Peek(x3, 0 * kPointerSize);
// Restore constructor function and argument count.
__ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ Ldr(x0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- __ SmiUntag(x0);
-
- // Set up pointer to last argument.
- __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
-
- // Copy arguments and receiver to the expression stack.
- // Copy 2 values every loop to use ldp/stp.
+ __ Ldrsw(x12,
+ UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
+
+ // Copy arguments to the expression stack. The called function pops the
+ // receiver along with its arguments, so we need an extra receiver on the
+ // stack, in case we have to return it later.
+
+ // Overwrite the new target with a receiver.
+ __ Poke(x0, 0);
+
+ // Push two further copies of the receiver. One will be popped by the called
+ // function. The second acts as padding if the number of arguments plus
+ // receiver is odd - pushing receiver twice avoids branching. It also means
+ // that we don't have to handle the even and odd cases specially on
+ // InvokeFunction's return, as top of stack will be the receiver in either
+ // case.
+ __ Push(x0, x0);
- // Compute pointer behind the first argument.
- __ Add(x4, x2, Operand(x0, LSL, kPointerSizeLog2));
- Label loop, entry, done_copying_arguments;
// ----------- S t a t e -------------
- // -- x0: number of arguments (untagged)
// -- x3: new target
- // -- x2: pointer to last argument (caller sp)
- // -- x4: pointer to argument last copied
- // -- sp[0*kPointerSize]: implicit receiver
+ // -- x12: number of arguments (untagged)
+ // -- sp[0*kPointerSize]: implicit receiver (overwrite if argc odd)
// -- sp[1*kPointerSize]: implicit receiver
- // -- x1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: implicit receiver
+ // -- x1 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
- __ B(&entry);
- __ Bind(&loop);
- __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
- __ Push(x11, x10);
- __ Bind(&entry);
- __ Cmp(x4, x2);
- __ B(gt, &loop);
- // Because we copied values 2 by 2 we may have copied one extra value.
- // Drop it if that is the case.
- __ B(eq, &done_copying_arguments);
- __ Drop(1);
- __ Bind(&done_copying_arguments);
+
+ // Round the number of arguments down to the next even number, and claim
+ // slots for the arguments. If the number of arguments was odd, the last
+ // argument will overwrite one of the receivers pushed above.
+ __ Bic(x10, x12, 1);
+ __ Claim(x10);
+
+ // Copy the arguments.
+ {
+ Register count = x2;
+ Register dst = x10;
+ Register src = x11;
+ __ Mov(count, x12);
+ __ SlotAddress(dst, 0);
+ __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
+ __ CopyDoubleWords(dst, src, count);
+ }
// Call the function.
+ __ Mov(x0, x12);
ParameterCount actual(x0);
__ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
// ----------- S t a t e -------------
- // -- x0: constructor result
+ // If argc is odd:
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: constructor function
// -- sp[2*kPointerSize]: number of arguments
// -- sp[3*kPointerSize]: context
+ // If argc is even:
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -416,13 +461,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ldr(x4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ldr(x4, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(w4, Operand(SharedFunctionInfo::kClassConstructorMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ B(eq, &use_receiver);
+ __ TestAndBranchIfAllClear(w4, SharedFunctionInfo::kClassConstructorMask,
+ &use_receiver);
} else {
- __ B(ne, &use_receiver);
+ __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kClassConstructorMask,
+ &use_receiver);
__ CallRuntime(
Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ B(&use_receiver);
@@ -440,12 +486,12 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
- __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ Ldrsw(x1,
+ UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- __ DropBySMI(x1);
- __ Drop(1);
+ __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
__ Ret();
}
} // namespace
@@ -467,7 +513,7 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ PushArgument(x1);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -476,7 +522,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the value to pass to the generator
// -- x1 : the JSGeneratorObject to resume
- // -- x2 : the resume mode (tagged)
// -- lr : return address
// -----------------------------------
__ AssertGeneratorObject(x1);
@@ -486,9 +531,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- // Store resume mode into generator object.
- __ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
-
// Load suspended function and context.
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
__ Ldr(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
@@ -511,28 +553,50 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
&prepare_step_in_suspended_generator);
__ Bind(&stepping_prepared);
- // Push receiver.
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ __ B(lo, &stack_overflow);
+
+ // Get number of arguments for generator function.
+ __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w10,
+ FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
+
+ // Claim slots for arguments and receiver.
+ __ Add(x11, x10, 1);
+ __ Claim(x11);
+
+ // Poke receiver into highest claimed slot.
__ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
- __ Push(x5);
+ __ Poke(x5, Operand(x10, LSL, kPointerSizeLog2));
// ----------- S t a t e -------------
- // -- x1 : the JSGeneratorObject to resume
- // -- x2 : the resume mode (tagged)
- // -- x4 : generator function
- // -- cp : generator context
- // -- lr : return address
- // -- jssp[0] : generator receiver
+ // -- x1 : the JSGeneratorObject to resume
+ // -- x4 : generator function
+ // -- x10 : argument count
+ // -- cp : generator context
+ // -- lr : return address
+ // -- jssp[arg count] : generator receiver
+ // -- jssp[0 .. arg count - 1] : claimed for args
// -----------------------------------
// Push holes for arguments to generator function. Since the parser forced
// context allocation for any variables in generators, the actual argument
// values have already been copied into the context and these dummy values
// will never be used.
- __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w10,
- FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
- __ PushMultipleTimes(x11, w10);
+ {
+ Label loop, done;
+ __ Cbz(x10, &done);
+ __ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
+
+ __ Bind(&loop);
+ __ Sub(x10, x10, 1);
+ __ Poke(x11, Operand(x10, LSL, kPointerSizeLog2));
+ __ Cbnz(x10, &loop);
+ __ Bind(&done);
+ }
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
@@ -560,9 +624,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x2, x4);
+ __ Push(x1);
+ __ PushArgument(x4);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(x2, x1);
+ __ Pop(x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -570,43 +635,39 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&prepare_step_in_suspended_generator);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x2);
+ __ Push(x1);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(x2, x1);
+ __ Pop(x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable(); // This should be unreachable.
+ }
}
-enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Label* stack_overflow) {
+ DCHECK(masm->StackPointer().Is(jssp));
+
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
-// Clobbers x10, x15; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
- IsTagged argc_is_tagged) {
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
Label enough_stack_space;
- __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
- // Make x10 the space we have left. The stack might already be overflowed
- // here which will cause x10 to become negative.
- // TODO(jbramley): Check that the stack usage here is safe.
- __ Sub(x10, jssp, x10);
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ Sub(scratch, masm->StackPointer(), scratch);
// Check if the arguments will overflow the stack.
- if (argc_is_tagged == kArgcIsSmiTagged) {
- __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
- } else {
- DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
- __ Cmp(x10, Operand(argc, LSL, kPointerSizeLog2));
- }
- __ B(gt, &enough_stack_space);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- // We should never return from the APPLY_OVERFLOW builtin.
- if (__ emit_debug_code()) {
- __ Unreachable();
- }
-
- __ Bind(&enough_stack_space);
+ __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ B(le, stack_overflow);
}
// Input:
@@ -626,6 +687,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
Register argc = x3;
Register argv = x4;
Register scratch = x10;
+ Register slots_to_claim = x11;
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -640,28 +702,54 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ InitializeRootRegister();
- // Push the function and the receiver onto the stack.
- __ Push(function, receiver);
+ // Claim enough space for the arguments, the receiver and the function,
+ // including an optional slot of padding.
+ __ Add(slots_to_claim, argc, 3);
+ __ Bic(slots_to_claim, slots_to_claim, 1);
// Check if we have enough stack space to push all arguments.
- // Expects argument count in eax. Clobbers ecx, edx, edi.
- Generate_CheckStackOverflow(masm, argc, kArgcIsUntaggedInt);
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
+ __ B(&enough_stack_space);
+
+ __ Bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable();
+
+ __ Bind(&enough_stack_space);
+ __ Claim(slots_to_claim);
+
+ // Store padding (which might be overwritten).
+ __ SlotAddress(scratch, slots_to_claim);
+ __ Str(padreg, MemOperand(scratch, -kPointerSize));
+
+ // Store receiver and function on the stack.
+ __ SlotAddress(scratch, argc);
+ __ Stp(receiver, function, MemOperand(scratch));
// Copy arguments to the stack in a loop, in reverse order.
// x3: argc.
// x4: argv.
- Label loop, entry;
- // Compute the copy end address.
- __ Add(scratch, argv, Operand(argc, LSL, kPointerSizeLog2));
+ Label loop, done;
+
+ // Skip the argument set up if we have no arguments.
+ __ Cbz(argc, &done);
+
+ // scratch has been set to point to the location of the receiver, which
+ // marks the end of the argument copy.
- __ B(&entry);
__ Bind(&loop);
+ // Load the handle.
__ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
- __ Ldr(x12, MemOperand(x11)); // Dereference the handle.
- __ Push(x12); // Push the argument.
- __ Bind(&entry);
- __ Cmp(scratch, argv);
- __ B(ne, &loop);
+ // Dereference the handle.
+ __ Ldr(x11, MemOperand(x11));
+ // Poke the result into the stack.
+ __ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
+ // Loop if we've not reached the end of copy marker.
+ __ Cmp(__ StackPointer(), scratch);
+ __ B(lt, &loop);
+
+ __ Bind(&done);
__ Mov(scratch, argc);
__ Mov(argc, new_target);
@@ -718,19 +806,24 @@ static void ReplaceClosureCodeWithOptimizedCode(
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_count = scratch;
+ Register args_size = scratch;
// Get the arguments + receiver count.
- __ ldr(args_count,
+ __ Ldr(args_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ Ldr(args_count.W(),
- FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ Ldr(args_size.W(),
+ FieldMemOperand(args_size, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ Drop(args_count, 1);
+ if (__ emit_debug_code()) {
+ __ Tst(args_size, kPointerSize - 1);
+ __ Check(eq, kUnexpectedValue);
+ }
+ __ Lsr(args_size, args_size, kPointerSizeLog2);
+ __ DropArguments(args_size);
}
// Tail-call |function_id| if |smi_entry| == |marker|
@@ -812,7 +905,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// runtime to clear it.
Label found_deoptimized_code;
__ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
+ Code::kCodeDataContainerOffset));
+ __ Ldr(
+ scratch2,
+ FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestAndBranchIfAnySet(scratch2, 1 << Code::kMarkedForDeoptimizationBit,
&found_deoptimized_code);
@@ -1050,44 +1146,70 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(&bytecode_array_loaded);
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch,
- Label* stack_overflow) {
- // Check the stack for overflow.
- // We are not trying to catch interruptions (e.g. debug break and
- // preemption) here, so the "real stack limit" is checked.
- Label enough_stack_space;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ Sub(scratch, jssp, scratch);
- // Check if the arguments will overflow the stack.
- __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
- __ B(le, stack_overflow);
-}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
- Register num_args, Register index,
- Register last_arg, Register stack_addr,
- Register scratch) {
- __ Mov(scratch, num_args);
- __ lsl(scratch, scratch, kPointerSizeLog2);
- __ sub(last_arg, index, scratch);
-
- // Set stack pointer and where to stop.
- __ Mov(stack_addr, jssp);
- __ Claim(scratch, 1);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ B(&loop_check);
- __ Bind(&loop_header);
- // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
- __ Ldr(scratch, MemOperand(index, -kPointerSize, PostIndex));
- __ Str(scratch, MemOperand(stack_addr, -kPointerSize, PreIndex));
- __ Bind(&loop_check);
- __ Cmp(index, last_arg);
- __ B(gt, &loop_header);
+ Register num_args,
+ Register first_arg_index,
+ Register spread_arg_out,
+ ConvertReceiverMode receiver_mode,
+ InterpreterPushArgsMode mode) {
+ Register last_arg_addr = x10;
+ Register stack_addr = x11;
+ Register slots_to_claim = x12;
+ Register slots_to_copy = x13; // May include receiver, unlike num_args.
+
+ DCHECK(!AreAliased(num_args, first_arg_index, last_arg_addr, stack_addr,
+ slots_to_claim, slots_to_copy));
+ // spread_arg_out may alias with the first_arg_index input.
+ DCHECK(!AreAliased(spread_arg_out, last_arg_addr, stack_addr, slots_to_claim,
+ slots_to_copy));
+
+ // Add one slot for the receiver.
+ __ Add(slots_to_claim, num_args, 1);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Exclude final spread from slots to claim and the number of arguments.
+ __ Sub(slots_to_claim, slots_to_claim, 1);
+ __ Sub(num_args, num_args, 1);
+ }
+
+ // Add a stack check before pushing arguments.
+ Label stack_overflow, done;
+ Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
+ __ B(&done);
+ __ Bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable();
+ __ Bind(&done);
+
+ // TODO(arm64): Claim one extra slot for padding and store padreg to the
+ // padding slot.
+ __ Claim(slots_to_claim);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Store "undefined" as the receiver arg if we need to.
+ Register receiver = x14;
+ __ LoadRoot(receiver, Heap::kUndefinedValueRootIndex);
+ __ SlotAddress(stack_addr, num_args);
+ __ Str(receiver, MemOperand(stack_addr));
+ __ Mov(slots_to_copy, num_args);
+ } else {
+ // If we're not given an explicit receiver to store, we'll need to copy it
+ // together with the rest of the arguments.
+ __ Add(slots_to_copy, num_args, 1);
+ }
+
+ __ Sub(last_arg_addr, first_arg_index,
+ Operand(slots_to_copy, LSL, kPointerSizeLog2));
+ __ Add(last_arg_addr, last_arg_addr, kPointerSize);
+
+ // Load the final spread argument into spread_arg_out, if necessary.
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kPointerSize));
+ }
+
+ // Copy the rest of the arguments.
+ __ SlotAddress(stack_addr, 0);
+ __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy);
}
// static
@@ -1101,28 +1223,16 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// they are to be pushed onto the stack.
// -- x1 : the target to call (can be any Object).
// -----------------------------------
- Label stack_overflow;
- // Add one for the receiver.
- __ Add(x3, x0, 1);
-
- // Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, x3, x6, &stack_overflow);
-
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ Push(x10);
- __ Mov(x3, x0); // Argument count is correct.
- }
-
- // Push the arguments. x2, x4, x5, x6 will be modified.
- Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(x2); // Pass the spread in a register
- __ Sub(x0, x0, 1); // Subtract one for spread
- }
+ // Push the arguments. num_args may be updated according to mode.
+ // spread_arg_out will be updated to contain the last spread argument, when
+ // mode == InterpreterPushArgsMode::kWithFinalSpread.
+ Register num_args = x0;
+ Register first_arg_index = x2;
+ Register spread_arg_out =
+ (mode == InterpreterPushArgsMode::kWithFinalSpread) ? x2 : no_reg;
+ Generate_InterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
+ receiver_mode, mode);
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
@@ -1136,12 +1246,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
-
- __ bind(&stack_overflow);
- {
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ Unreachable();
- }
}
// static
@@ -1154,23 +1258,17 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -- x2 : allocation site feedback if available, undefined otherwise
// -- x4 : address of the first argument
// -----------------------------------
- Label stack_overflow;
-
- // Push a slot for the receiver.
- __ Push(xzr);
-
- // Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, x0, x7, &stack_overflow);
-
- // Push the arguments. x5, x4, x6, x7 will be modified.
- Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(x2); // Pass the spread in a register
- __ Sub(x0, x0, 1); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(x2, x6);
- }
+ __ AssertUndefinedOrAllocationSite(x2);
+
+ // Push the arguments. num_args may be updated according to mode.
+ // spread_arg_out will be updated to contain the last spread argument, when
+ // mode == InterpreterPushArgsMode::kWithFinalSpread.
+ Register num_args = x0;
+ Register first_arg_index = x4;
+ Register spread_arg_out =
+ (mode == InterpreterPushArgsMode::kWithFinalSpread) ? x2 : no_reg;
+ Generate_InterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
+ ConvertReceiverMode::kNullOrUndefined, mode);
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(x1);
@@ -1190,12 +1288,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
-
- __ bind(&stack_overflow);
- {
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ Unreachable();
- }
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
@@ -1469,7 +1561,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
Label at_least_one_arg;
Label three_args;
- DCHECK(Smi::kZero == 0);
+ DCHECK_NULL(Smi::kZero);
__ Cbnz(argc, &at_least_one_arg);
// No arguments.
@@ -1514,8 +1606,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
scope.GenerateLeaveFrame();
// Drop arguments and receiver.
- __ Add(x4, x4, 1);
- __ DropArguments(x4);
+ __ DropArguments(x4, TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ Bind(&failed);
@@ -1530,51 +1621,61 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Jump(x4);
}
-void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve possible return result from lazy deopt.
- __ Push(x0);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, false);
- __ Pop(x0);
- }
-
- // Jump to the ContinueToBuiltin stub. Deoptimizer::EntryGenerator::Generate
- // loads this into lr before it jumps here.
- __ Br(lr);
-}
-
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
bool with_result) {
const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers();
+ int frame_size = BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp +
+ (allocatable_register_count +
+ BuiltinContinuationFrameConstants::PaddingSlotCount(
+ allocatable_register_count)) *
+ kPointerSize;
+
+ // Set up frame pointer.
+ __ Add(fp, jssp, frame_size);
+
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point.
- __ Str(x0, MemOperand(
- jssp,
- config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
+ __ Str(x0,
+ MemOperand(fp, BuiltinContinuationFrameConstants::kCallerSPOffset));
}
- for (int i = allocatable_register_count - 1; i >= 0; --i) {
- int code = config->GetAllocatableGeneralCode(i);
- __ Pop(Register::from_code(code));
- if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
- __ SmiUntag(Register::from_code(code));
- }
+
+ // Restore registers in pairs.
+ int offset = -BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
+ allocatable_register_count * kPointerSize;
+ for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
+ int code1 = config->GetAllocatableGeneralCode(i);
+ int code2 = config->GetAllocatableGeneralCode(i - 1);
+ Register reg1 = Register::from_code(code1);
+ Register reg2 = Register::from_code(code2);
+ __ Ldp(reg1, reg2, MemOperand(fp, offset));
+ offset += 2 * kPointerSize;
+ }
+
+ // Restore first register separately, if number of registers is odd.
+ if (allocatable_register_count % 2 != 0) {
+ int code = config->GetAllocatableGeneralCode(0);
+ __ Ldr(Register::from_code(code), MemOperand(fp, offset));
}
- __ ldr(fp,
- MemOperand(jssp,
- BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
- __ Pop(ip0);
- __ Add(jssp, jssp,
- Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
- __ Pop(lr);
- __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Br(ip0);
+
+ if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
+
+ // Load builtin object.
+ UseScratchRegisterScope temps(masm);
+ Register builtin = temps.AcquireX();
+ __ Ldr(builtin,
+ MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
+
+ // Restore fp, lr.
+ __ Mov(__ StackPointer(), fp);
+ __ Pop(fp, lr);
+
+ // Call builtin.
+ __ Add(builtin, builtin, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(builtin);
}
} // namespace
@@ -1621,7 +1722,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
- __ Push(x0);
+ __ PushArgument(x0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
@@ -1646,7 +1747,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ Ldrsw(w1, UntagSmiFieldMemOperand(
x1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex)));
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1689,24 +1790,35 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
- // Claim (2 - argc) dummy arguments from the stack, to put the stack in a
- // consistent state for a simple pop operation.
- __ Claim(2);
- __ Drop(argc);
-
- // ----------- S t a t e -------------
- // -- x0 : argc
- // -- jssp[0] : argArray (dummy value if argc <= 1)
- // -- jssp[8] : thisArg (dummy value if argc == 0)
- // -- jssp[16] : receiver
- // -----------------------------------
- __ Cmp(argc, 1);
- __ Pop(arg_array, this_arg); // Overwrites argc.
- __ CmovX(this_arg, undefined_value, lo); // undefined if argc == 0.
- __ CmovX(arg_array, undefined_value, ls); // undefined if argc <= 1.
-
- __ Peek(receiver, 0);
- __ Poke(this_arg, 0);
+ Register saved_argc = x10;
+ Register scratch = x11;
+
+ // Push two undefined values on the stack, to put it in a consistent state
+ // so that we can always read three arguments from it.
+ __ Push(undefined_value, undefined_value);
+
+ // The state of the stack (with arrows pointing to the slots we will read)
+ // is as follows:
+ //
+ // argc = 0 argc = 1 argc = 2
+ // -> sp[16]: receiver -> sp[24]: receiver -> sp[32]: receiver
+ // -> sp[8]: undefined -> sp[16]: this_arg -> sp[24]: this_arg
+ // -> sp[0]: undefined -> sp[8]: undefined -> sp[16]: arg_array
+ // sp[0]: undefined sp[8]: undefined
+ // sp[0]: undefined
+ //
+ // There are now always three arguments to read, in the slots starting from
+ // slot argc.
+ __ SlotAddress(scratch, argc);
+
+ __ Mov(saved_argc, argc);
+ __ Ldp(arg_array, this_arg, MemOperand(scratch)); // Overwrites argc.
+ __ Ldr(receiver, MemOperand(scratch, 2 * kPointerSize));
+
+ __ Drop(2); // Drop the undefined values we pushed above.
+ __ DropArguments(saved_argc, TurboAssembler::kCountExcludesReceiver);
+
+ __ PushArgument(this_arg);
}
// ----------- S t a t e -------------
@@ -1767,7 +1879,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label loop;
// Calculate the copy start address (destination). Copy end address is jssp.
- __ Add(scratch2, jssp, Operand(argc, LSL, kPointerSizeLog2));
+ __ SlotAddress(scratch2, argc);
__ Sub(scratch1, scratch2, kPointerSize);
__ Bind(&loop);
@@ -1807,26 +1919,44 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
- // Claim (3 - argc) dummy arguments from the stack, to put the stack in a
- // consistent state for a simple pop operation.
- __ Claim(3);
- __ Drop(argc);
-
- // ----------- S t a t e -------------
- // -- x0 : argc
- // -- jssp[0] : argumentsList (dummy value if argc <= 2)
- // -- jssp[8] : thisArgument (dummy value if argc <= 1)
- // -- jssp[16] : target (dummy value if argc == 0)
- // -- jssp[24] : receiver
- // -----------------------------------
- __ Adds(x10, argc, 0); // Preserve argc, and set the Z flag if it is zero.
- __ Pop(arguments_list, this_argument, target); // Overwrites argc.
- __ CmovX(target, undefined_value, eq); // undefined if argc == 0.
- __ Cmp(x10, 2);
- __ CmovX(this_argument, undefined_value, lo); // undefined if argc <= 1.
- __ CmovX(arguments_list, undefined_value, ls); // undefined if argc <= 2.
-
- __ Poke(this_argument, 0); // Overwrite receiver.
+ // Push four undefined values on the stack, to put it in a consistent state
+ // so that we can always read the three arguments we need from it. The
+ // fourth value is used for stack alignment.
+ __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
+
+ // The state of the stack (with arrows pointing to the slots we will read)
+ // is as follows:
+ //
+ // argc = 0 argc = 1 argc = 2
+ // sp[32]: receiver sp[40]: receiver sp[48]: receiver
+ // -> sp[24]: undefined -> sp[32]: target -> sp[40]: target
+ // -> sp[16]: undefined -> sp[24]: undefined -> sp[32]: this_argument
+ // -> sp[8]: undefined -> sp[16]: undefined -> sp[24]: undefined
+ // sp[0]: undefined sp[8]: undefined sp[16]: undefined
+ // sp[0]: undefined sp[8]: undefined
+ // sp[0]: undefined
+ // argc = 3
+ // sp[56]: receiver
+ // -> sp[48]: target
+ // -> sp[40]: this_argument
+ // -> sp[32]: arguments_list
+ // sp[24]: undefined
+ // sp[16]: undefined
+ // sp[8]: undefined
+ // sp[0]: undefined
+ //
+ // There are now always three arguments to read, in the slots starting from
+ // slot (argc + 1).
+ Register scratch = x10;
+ __ SlotAddress(scratch, argc);
+ __ Ldp(arguments_list, this_argument,
+ MemOperand(scratch, 1 * kPointerSize));
+ __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
+
+ __ Drop(4); // Drop the undefined values we pushed above.
+ __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+
+ __ PushArgument(this_argument);
}
// ----------- S t a t e -------------
@@ -1867,26 +1997,47 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
- // Claim (3 - argc) dummy arguments from the stack, to put the stack in a
- // consistent state for a simple pop operation.
- __ Claim(3);
- __ Drop(argc);
-
- // ----------- S t a t e -------------
- // -- x0 : argc
- // -- jssp[0] : new.target (dummy value if argc <= 2)
- // -- jssp[8] : argumentsList (dummy value if argc <= 1)
- // -- jssp[16] : target (dummy value if argc == 0)
- // -- jssp[24] : receiver
- // -----------------------------------
- __ Adds(x10, argc, 0); // Preserve argc, and set the Z flag if it is zero.
- __ Pop(new_target, arguments_list, target); // Overwrites argc.
- __ CmovX(target, undefined_value, eq); // undefined if argc == 0.
- __ Cmp(x10, 2);
- __ CmovX(arguments_list, undefined_value, lo); // undefined if argc <= 1.
- __ CmovX(new_target, target, ls); // target if argc <= 2.
-
- __ Poke(undefined_value, 0); // Overwrite receiver.
+ // Push four undefined values on the stack, to put it in a consistent state
+ // so that we can always read the three arguments we need from it. The
+ // fourth value is used for stack alignment.
+ __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
+
+ // The state of the stack (with arrows pointing to the slots we will read)
+ // is as follows:
+ //
+ // argc = 0 argc = 1 argc = 2
+ // sp[32]: receiver sp[40]: receiver sp[48]: receiver
+ // -> sp[24]: undefined -> sp[32]: target -> sp[40]: target
+ // -> sp[16]: undefined -> sp[24]: undefined -> sp[32]: arguments_list
+ // -> sp[8]: undefined -> sp[16]: undefined -> sp[24]: undefined
+ // sp[0]: undefined sp[8]: undefined sp[16]: undefined
+ // sp[0]: undefined sp[8]: undefined
+ // sp[0]: undefined
+ // argc = 3
+ // sp[56]: receiver
+ // -> sp[48]: target
+ // -> sp[40]: arguments_list
+ // -> sp[32]: new_target
+ // sp[24]: undefined
+ // sp[16]: undefined
+ // sp[8]: undefined
+ // sp[0]: undefined
+ //
+ // There are now always three arguments to read, in the slots starting from
+ // slot (argc + 1).
+ Register scratch = x10;
+ __ SlotAddress(scratch, argc);
+ __ Ldp(new_target, arguments_list, MemOperand(scratch, 1 * kPointerSize));
+ __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
+
+ __ Cmp(argc, 2);
+ __ CmovX(new_target, target, ls); // target if argc <= 2.
+
+ __ Drop(4); // Drop the undefined values we pushed above.
+ __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+
+ // Push receiver (undefined).
+ __ PushArgument(undefined_value);
}
// ----------- S t a t e -------------
@@ -1931,11 +2082,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Pop(fp, lr);
// Drop actual parameters and receiver.
- // TODO(all): This will need to be rounded up to a multiple of two when using
- // the CSP, as we will have claimed an even number of slots in total for the
- // parameters.
- __ DropBySMI(x10, kXRegSize);
- __ Drop(1);
+ __ SmiUntag(x10);
+ __ DropArguments(x10, TurboAssembler::kCountExcludesReceiver);
}
// static
@@ -2022,7 +2170,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
- __ Push(x3);
+ __ PushArgument(x3);
__ CallRuntime(Runtime::kThrowNotConstructor);
}
__ Bind(&new_target_constructor);
@@ -2055,14 +2203,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ B(le, &stack_done);
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, x6, x2, &stack_overflow);
+ Generate_StackOverflowCheck(masm, x6, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
__ Add(x5, x5, kPointerSize);
__ Add(x0, x0, x6);
- __ bind(&loop);
+ __ Bind(&loop);
{
__ Ldr(x4, MemOperand(x5, x6, LSL, kPointerSizeLog2));
__ Push(x4);
@@ -2176,7 +2324,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ Push(padreg, x1);
+ __ PushArgument(x1);
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
}
}
@@ -2205,7 +2353,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- x4 : the number of [[BoundArguments]]
// -----------------------------------
- // Reserve stack space for the [[BoundArguments]].
{
Label done;
__ Claim(x4);
@@ -2224,33 +2371,26 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Bind(&done);
}
+ UseScratchRegisterScope temps(masm);
+ Register argc = temps.AcquireX();
// Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ Mov(x5, 0);
- __ Bind(&loop);
- __ Cmp(x5, x0);
- __ B(gt, &done_loop);
- __ Peek(x10, Operand(x4, LSL, kPointerSizeLog2));
- __ Poke(x10, Operand(x5, LSL, kPointerSizeLog2));
- __ Add(x4, x4, 1);
- __ Add(x5, x5, 1);
- __ B(&loop);
- __ Bind(&done_loop);
- }
+ __ Mov(argc, x0);
+ __ CopySlots(0, x4, argc);
- // Copy [[BoundArguments]] to the stack (below the arguments).
+ // Copy [[BoundArguments]] to the stack (below the arguments). The first
+ // element of the array is copied to the highest address.
{
Label loop;
__ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
__ Add(x2, x2, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SlotAddress(x11, x0);
+ __ Add(x0, x0, x4);
__ Bind(&loop);
__ Sub(x4, x4, 1);
__ Ldr(x10, MemOperand(x2, x4, LSL, kPointerSizeLog2));
- __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x0, x0, 1);
- __ Cmp(x4, 0);
- __ B(gt, &loop);
+ // Poke into claimed area of stack.
+ __ Str(x10, MemOperand(x11, kPointerSize, PostIndex));
+ __ Cbnz(x4, &loop);
}
}
__ Bind(&no_bound_arguments);
@@ -2320,7 +2460,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ PushArgument(x1);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -2433,7 +2573,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- lr : return address
// -----------------------------------
__ SmiTag(x1);
- __ Push(x1);
+ __ PushArgument(x1);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2460,7 +2600,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
// -- lr : return address
// -----------------------------------
MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
- __ Push(x1);
+ __ PushArgument(x1);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
@@ -2473,7 +2613,7 @@ void Builtins::Generate_AbortJS(MacroAssembler* masm) {
// -- lr : return address
// -----------------------------------
MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
- __ Push(x1);
+ __ PushArgument(x1);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbortJS);
}
@@ -2559,7 +2699,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// number of actual arguments and the receiver.
__ RecordComment("-- Stack check --");
__ Add(scratch1, argc_expected, 2);
- Generate_StackOverflowCheck(masm, scratch1, scratch2, &stack_overflow);
+ Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
// Round up number of slots to be even, to maintain stack alignment.
__ RecordComment("-- Allocate callee frame slots --");
@@ -2675,10 +2815,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- const RegList gp_regs = x0.bit() | x1.bit() | x2.bit() | x3.bit() |
- x4.bit() | x5.bit() | x6.bit() | x7.bit();
- const RegList fp_regs = d0.bit() | d1.bit() | d2.bit() | d3.bit() |
- d4.bit() | d5.bit() | d6.bit() | d7.bit();
+ constexpr RegList gp_regs =
+ Register::ListOf<x0, x1, x2, x3, x4, x5, x6, x7>();
+ constexpr RegList fp_regs =
+ Register::ListOf<d0, d1, d2, d3, d4, d5, d6, d7>();
__ PushXRegList(gp_regs);
__ PushDRegList(fp_regs);
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 52023efd65..d50e045069 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -205,7 +205,8 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
for (int i = 0; i < argc; ++i) {
argv[cursor--] = *args[i];
}
- DCHECK(cursor == BuiltinArguments::kArgcOffset);
+ DCHECK_EQ(cursor, BuiltinArguments::kPaddingOffset);
+ argv[BuiltinArguments::kPaddingOffset] = isolate->heap()->the_hole_value();
argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc);
argv[BuiltinArguments::kTargetOffset] = *function;
argv[BuiltinArguments::kNewTargetOffset] = *new_target;
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 96d52e6db2..7db8b971d7 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -78,7 +78,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
base_size += FixedArray::kHeaderSize;
element_count = IntPtrOrSmiAdd(element_count, parameter_map_count, mode);
}
- bool empty = IsIntPtrOrSmiConstantZero(arguments_count);
+ bool empty = IsIntPtrOrSmiConstantZero(arguments_count, mode);
DCHECK_IMPLIES(empty, parameter_map_count == nullptr);
Node* size =
empty ? IntPtrConstant(base_size)
@@ -136,7 +136,7 @@ Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
Node* unused;
std::tie(result, elements, unused) =
AllocateArgumentsObject(map, rest_count, nullptr, param_mode, base_size);
- DCHECK(unused == nullptr);
+ DCHECK_NULL(unused);
CodeStubArguments arguments(this, arg_count, frame_ptr, param_mode);
VARIABLE(offset, MachineType::PointerRepresentation());
offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 46d20e57eb..5fec0abfa5 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -31,6 +31,32 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
PostLoopAction;
+ void FindResultGenerator() { a_.Bind(UndefinedConstant()); }
+
+ Node* FindProcessor(Node* k_value, Node* k) {
+ Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
+ this_arg(), k_value, k, o());
+ Label false_continue(this), return_true(this);
+ BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
+ BIND(&return_true);
+ ReturnFromBuiltin(k_value);
+ BIND(&false_continue);
+ return a();
+ }
+
+ void FindIndexResultGenerator() { a_.Bind(SmiConstant(-1)); }
+
+ Node* FindIndexProcessor(Node* k_value, Node* k) {
+ Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
+ this_arg(), k_value, k, o());
+ Label false_continue(this), return_true(this);
+ BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
+ BIND(&return_true);
+ ReturnFromBuiltin(k);
+ BIND(&false_continue);
+ return a();
+ }
+
void ForEachResultGenerator() { a_.Bind(UndefinedConstant()); }
Node* ForEachProcessor(Node* k_value, Node* k) {
@@ -92,8 +118,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void FilterResultGenerator() {
// 7. Let A be ArraySpeciesCreate(O, 0).
- Node* len = SmiConstant(0);
- ArraySpeciesCreate(len);
+ // This version of ArraySpeciesCreate will create with the correct
+ // ElementsKind in the fast case.
+ ArraySpeciesCreate();
}
Node* FilterProcessor(Node* k_value, Node* k) {
@@ -305,7 +332,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&slow);
CallRuntime(Runtime::kSetProperty, context(), a(), k, mapped_value,
- SmiConstant(STRICT));
+ SmiConstant(LanguageMode::kStrict));
Goto(&done);
BIND(&detached);
@@ -362,10 +389,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// TODO(danno): Seriously? Do we really need to throw the exact error
// message on null and undefined so that the webkit tests pass?
Label throw_null_undefined_exception(this, Label::kDeferred);
- GotoIf(WordEqual(receiver(), NullConstant()),
- &throw_null_undefined_exception);
- GotoIf(WordEqual(receiver(), UndefinedConstant()),
- &throw_null_undefined_exception);
+ GotoIf(IsNullOrUndefined(receiver()), &throw_null_undefined_exception);
// By the book: taken directly from the ECMAScript 2015 specification
@@ -534,11 +558,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
{
if (direction == ForEachDirection::kForward) {
// 8. Repeat, while k < len
- GotoIfNumberGreaterThanOrEqual(k(), len_, &after_loop);
+ GotoIfNumericGreaterThanOrEqual(k(), len_, &after_loop);
} else {
// OR
// 10. Repeat, while k >= 0
- GotoIfNumberGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
+ GotoIfNumericGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
}
Label done_element(this, &to_);
@@ -743,13 +767,61 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void ArraySpeciesCreate(Node* len) {
+ // This version is specialized to create a zero length array
+ // of the elements kind of the input array.
+ void ArraySpeciesCreate() {
+ Label runtime(this, Label::kDeferred), done(this);
+
+ TNode<Smi> len = SmiConstant(0);
+ TNode<Map> original_map = LoadMap(o());
+ GotoIfNot(
+ InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE),
+ &runtime);
+
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
+ &runtime);
+
+ Node* species_protector = SpeciesProtectorConstant();
+ Node* value =
+ LoadObjectField(species_protector, PropertyCell::kValueOffset);
+ TNode<Smi> const protector_invalid =
+ SmiConstant(Isolate::kProtectorInvalid);
+ GotoIf(WordEqual(value, protector_invalid), &runtime);
+
+ // Respect the ElementsKind of the input array.
+ TNode<Int32T> elements_kind = LoadMapElementsKind(original_map);
+ GotoIfNot(IsFastElementsKind(elements_kind), &runtime);
+ TNode<Context> native_context = CAST(LoadNativeContext(context()));
+ TNode<Map> array_map =
+ CAST(LoadJSArrayElementsMap(elements_kind, native_context));
+ TNode<JSArray> array =
+ CAST(AllocateJSArray(GetInitialFastElementsKind(), array_map, len, len,
+ nullptr, CodeStubAssembler::SMI_PARAMETERS));
+ a_.Bind(array);
+
+ Goto(&done);
+
+ BIND(&runtime);
+ {
+ // 5. Let A be ? ArraySpeciesCreate(O, len).
+ Node* constructor =
+ CallRuntime(Runtime::kArraySpeciesConstructor, context(), o());
+ a_.Bind(ConstructJS(CodeFactory::Construct(isolate()), context(),
+ constructor, len));
+ Goto(&fully_spec_compliant_);
+ }
+
+ BIND(&done);
+ }
+
+ // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+ void ArraySpeciesCreate(SloppyTNode<Smi> len) {
Label runtime(this, Label::kDeferred), done(this);
Node* const original_map = LoadMap(o());
- GotoIf(Word32NotEqual(LoadMapInstanceType(original_map),
- Int32Constant(JS_ARRAY_TYPE)),
- &runtime);
+ GotoIfNot(
+ InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE),
+ &runtime);
GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
&runtime);
@@ -769,8 +841,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());
- Node* const native_context = LoadNativeContext(context());
- Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
+ TNode<Context> native_context = CAST(LoadNativeContext(context()));
+ TNode<Map> array_map =
+ CAST(LoadJSArrayElementsMap(elements_kind, native_context));
a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
CodeStubAssembler::SMI_PARAMETERS));
@@ -809,8 +882,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
- CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
- UndefinedConstant()));
+ CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
Node* receiver = args.GetReceiver();
@@ -829,8 +901,7 @@ TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
BIND(&fast);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(
- LoadObjectField(receiver, JSArray::kLengthOffset)));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(receiver)));
Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
@@ -920,8 +991,7 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
// arguments are reordered.
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
- CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
- UndefinedConstant()));
+ CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
Node* receiver = args.GetReceiver();
@@ -954,7 +1024,7 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
- SmiConstant(STRICT));
+ SmiConstant(LanguageMode::kStrict));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
@@ -1001,7 +1071,7 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
- SmiConstant(STRICT));
+ SmiConstant(LanguageMode::kStrict));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
@@ -1021,7 +1091,7 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
[this, receiver, context](Node* arg) {
Node* length = LoadJSArrayLength(receiver);
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
- SmiConstant(STRICT));
+ SmiConstant(LanguageMode::kStrict));
},
arg_index);
args.PopAndReturn(LoadJSArrayLength(receiver));
@@ -1036,11 +1106,342 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
}
}
+class FastArraySliceCodeStubAssembler : public CodeStubAssembler {
+ public:
+ explicit FastArraySliceCodeStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* HandleFastSlice(Node* context, Node* array, Node* from, Node* count,
+ Label* slow) {
+ VARIABLE(result, MachineRepresentation::kTagged);
+ Label done(this);
+
+ GotoIf(TaggedIsNotSmi(from), slow);
+ GotoIf(TaggedIsNotSmi(count), slow);
+
+ Label try_fast_arguments(this), try_simple_slice(this);
+
+ Node* map = LoadMap(array);
+ GotoIfNot(IsJSArrayMap(map), &try_fast_arguments);
+
+ // Check prototype chain if receiver does not have packed elements
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), slow);
+
+ GotoIf(IsNoElementsProtectorCellInvalid(), slow);
+
+ GotoIf(IsSpeciesProtectorCellInvalid(), slow);
+
+ // Bailout if receiver has slow elements.
+ Node* elements_kind = LoadMapElementsKind(map);
+ GotoIfNot(IsFastElementsKind(elements_kind), &try_simple_slice);
+
+ // Make sure that the length hasn't been changed by side-effect.
+ Node* array_length = LoadJSArrayLength(array);
+ GotoIf(TaggedIsNotSmi(array_length), slow);
+ GotoIf(SmiAbove(SmiAdd(from, count), array_length), slow);
+
+ CSA_ASSERT(this, SmiGreaterThanOrEqual(from, SmiConstant(0)));
+
+ result.Bind(CallStub(CodeFactory::ExtractFastJSArray(isolate()), context,
+ array, from, count));
+ Goto(&done);
+
+ BIND(&try_fast_arguments);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const fast_aliasted_arguments_map = LoadContextElement(
+ native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+ GotoIf(WordNotEqual(map, fast_aliasted_arguments_map), &try_simple_slice);
+
+ Node* sloppy_elements = LoadElements(array);
+ Node* sloppy_elements_length = LoadFixedArrayBaseLength(sloppy_elements);
+ Node* parameter_map_length =
+ SmiSub(sloppy_elements_length,
+ SmiConstant(SloppyArgumentsElements::kParameterMapStart));
+ VARIABLE(index_out, MachineType::PointerRepresentation());
+
+ int max_fast_elements =
+ (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
+ AllocationMemento::kSize) /
+ kPointerSize;
+ GotoIf(SmiAboveOrEqual(count, SmiConstant(max_fast_elements)),
+ &try_simple_slice);
+
+ GotoIf(SmiLessThan(from, SmiConstant(0)), slow);
+
+ Node* end = SmiAdd(from, count);
+
+ Node* unmapped_elements = LoadFixedArrayElement(
+ sloppy_elements, SloppyArgumentsElements::kArgumentsIndex);
+ Node* unmapped_elements_length =
+ LoadFixedArrayBaseLength(unmapped_elements);
+
+ GotoIf(SmiAbove(end, unmapped_elements_length), slow);
+
+ Node* array_map = LoadJSArrayElementsMap(HOLEY_ELEMENTS, native_context);
+ result.Bind(AllocateJSArray(HOLEY_ELEMENTS, array_map, count, count,
+ nullptr, SMI_PARAMETERS));
+
+ index_out.Bind(IntPtrConstant(0));
+ Node* result_elements = LoadElements(result.value());
+ Node* from_mapped = SmiMin(parameter_map_length, from);
+ Node* to = SmiMin(parameter_map_length, end);
+ Node* arguments_context = LoadFixedArrayElement(
+ sloppy_elements, SloppyArgumentsElements::kContextIndex);
+ VariableList var_list({&index_out}, zone());
+ BuildFastLoop(
+ var_list, from_mapped, to,
+ [this, result_elements, arguments_context, sloppy_elements,
+ unmapped_elements, &index_out](Node* current) {
+ Node* context_index = LoadFixedArrayElement(
+ sloppy_elements, current,
+ kPointerSize * SloppyArgumentsElements::kParameterMapStart,
+ SMI_PARAMETERS);
+ Label is_the_hole(this), done(this);
+ GotoIf(IsTheHole(context_index), &is_the_hole);
+ Node* mapped_argument =
+ LoadContextElement(arguments_context, SmiUntag(context_index));
+ StoreFixedArrayElement(result_elements, index_out.value(),
+ mapped_argument, SKIP_WRITE_BARRIER);
+ Goto(&done);
+ BIND(&is_the_hole);
+ Node* argument = LoadFixedArrayElement(unmapped_elements, current, 0,
+ SMI_PARAMETERS);
+ StoreFixedArrayElement(result_elements, index_out.value(), argument,
+ SKIP_WRITE_BARRIER);
+ Goto(&done);
+ BIND(&done);
+ index_out.Bind(IntPtrAdd(index_out.value(), IntPtrConstant(1)));
+ },
+ 1, SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ Node* unmapped_from = SmiMin(SmiMax(parameter_map_length, from), end);
+
+ BuildFastLoop(
+ var_list, unmapped_from, end,
+ [this, unmapped_elements, result_elements, &index_out](Node* current) {
+ Node* argument = LoadFixedArrayElement(unmapped_elements, current, 0,
+ SMI_PARAMETERS);
+ StoreFixedArrayElement(result_elements, index_out.value(), argument,
+ SKIP_WRITE_BARRIER);
+ index_out.Bind(IntPtrAdd(index_out.value(), IntPtrConstant(1)));
+ },
+ 1, SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ Goto(&done);
+
+ BIND(&try_simple_slice);
+ Node* simple_result = CallRuntime(Runtime::kTrySliceSimpleNonFastElements,
+ context, array, from, count);
+ GotoIfNumber(simple_result, slow);
+ result.Bind(simple_result);
+
+ Goto(&done);
+
+ BIND(&done);
+ return result.value();
+ }
+
+ void CopyOneElement(Node* context, Node* o, Node* a, Node* p_k, Variable& n) {
+ // b. Let kPresent be HasProperty(O, Pk).
+ // c. ReturnIfAbrupt(kPresent).
+ Node* k_present = HasProperty(o, p_k, context, kHasProperty);
+
+ // d. If kPresent is true, then
+ Label done_element(this);
+ GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element);
+
+ // i. Let kValue be Get(O, Pk).
+ // ii. ReturnIfAbrupt(kValue).
+ Node* k_value = GetProperty(context, o, p_k);
+
+ // iii. Let status be CreateDataPropertyOrThrow(A, ToString(n), kValue).
+ // iv. ReturnIfAbrupt(status).
+ CallRuntime(Runtime::kCreateDataProperty, context, a, n.value(), k_value);
+
+ Goto(&done_element);
+ BIND(&done_element);
+ }
+};
+
+TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Label slow(this, Label::kDeferred), fast_elements_kind(this);
+
+ CodeStubArguments args(this, argc);
+ Node* receiver = args.GetReceiver();
+
+ VARIABLE(o, MachineRepresentation::kTagged);
+ VARIABLE(len, MachineRepresentation::kTagged);
+ Label length_done(this), generic_length(this), check_arguments_length(this),
+ load_arguments_length(this);
+
+ GotoIf(TaggedIsSmi(receiver), &generic_length);
+ GotoIfNot(IsJSArray(receiver), &check_arguments_length);
+
+ o.Bind(receiver);
+ len.Bind(LoadJSArrayLength(receiver));
+
+ // Check for the array clone case. There can be no arguments to slice, the
+ // array prototype chain must be intact and have no elements, the array has to
+ // have fast elements.
+ GotoIf(WordNotEqual(argc, IntPtrConstant(0)), &length_done);
+
+ Label clone(this);
+ BranchIfFastJSArrayForCopy(receiver, context, &clone, &length_done);
+ BIND(&clone);
+
+ args.PopAndReturn(
+ CallStub(CodeFactory::CloneFastJSArray(isolate()), context, receiver));
+
+ BIND(&check_arguments_length);
+
+ Node* map = LoadMap(receiver);
+ Node* native_context = LoadNativeContext(context);
+ GotoIfContextElementEqual(map, native_context,
+ Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX,
+ &load_arguments_length);
+ GotoIfContextElementEqual(map, native_context,
+ Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX,
+ &load_arguments_length);
+ GotoIfContextElementEqual(map, native_context,
+ Context::STRICT_ARGUMENTS_MAP_INDEX,
+ &load_arguments_length);
+ GotoIfContextElementEqual(map, native_context,
+ Context::SLOPPY_ARGUMENTS_MAP_INDEX,
+ &load_arguments_length);
+
+ Goto(&generic_length);
+
+ BIND(&load_arguments_length);
+ Node* arguments_length =
+ LoadObjectField(receiver, JSArgumentsObject::kLengthOffset);
+ GotoIf(TaggedIsNotSmi(arguments_length), &generic_length);
+ o.Bind(receiver);
+ len.Bind(arguments_length);
+ Goto(&length_done);
+
+ BIND(&generic_length);
+ // 1. Let O be ToObject(this value).
+ // 2. ReturnIfAbrupt(O).
+ o.Bind(CallBuiltin(Builtins::kToObject, context, receiver));
+
+ // 3. Let len be ToLength(Get(O, "length")).
+ // 4. ReturnIfAbrupt(len).
+ len.Bind(ToLength_Inline(
+ context,
+ GetProperty(context, o.value(), isolate()->factory()->length_string())));
+ Goto(&length_done);
+
+ BIND(&length_done);
+
+ // 5. Let relativeStart be ToInteger(start).
+ // 6. ReturnIfAbrupt(relativeStart).
+ Node* arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0));
+ Node* relative_start = ToInteger(context, arg0);
+
+ // 7. If relativeStart < 0, let k be max((len + relativeStart),0);
+ // else let k be min(relativeStart, len.value()).
+ VARIABLE(k, MachineRepresentation::kTagged);
+ Label relative_start_positive(this), relative_start_done(this);
+ GotoIfNumericGreaterThanOrEqual(relative_start, SmiConstant(0),
+ &relative_start_positive);
+ k.Bind(NumberMax(NumberAdd(len.value(), relative_start), NumberConstant(0)));
+ Goto(&relative_start_done);
+ BIND(&relative_start_positive);
+ k.Bind(NumberMin(relative_start, len.value()));
+ Goto(&relative_start_done);
+ BIND(&relative_start_done);
+
+ // 8. If end is undefined, let relativeEnd be len;
+ // else let relativeEnd be ToInteger(end).
+ // 9. ReturnIfAbrupt(relativeEnd).
+ Node* end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Label end_undefined(this), end_done(this);
+ VARIABLE(relative_end, MachineRepresentation::kTagged);
+ GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined);
+ relative_end.Bind(ToInteger(context, end));
+ Goto(&end_done);
+ BIND(&end_undefined);
+ relative_end.Bind(len.value());
+ Goto(&end_done);
+ BIND(&end_done);
+
+ // 10. If relativeEnd < 0, let final be max((len + relativeEnd),0);
+ // else let final be min(relativeEnd, len).
+ VARIABLE(final, MachineRepresentation::kTagged);
+ Label relative_end_positive(this), relative_end_done(this);
+ GotoIfNumericGreaterThanOrEqual(relative_end.value(), NumberConstant(0),
+ &relative_end_positive);
+ final.Bind(NumberMax(NumberAdd(len.value(), relative_end.value()),
+ NumberConstant(0)));
+ Goto(&relative_end_done);
+ BIND(&relative_end_positive);
+ final.Bind(NumberMin(relative_end.value(), len.value()));
+ Goto(&relative_end_done);
+ BIND(&relative_end_done);
+
+ // 11. Let count be max(final – k, 0).
+ Node* count =
+ NumberMax(NumberSub(final.value(), k.value()), NumberConstant(0));
+
+ // Handle FAST_ELEMENTS
+ Label non_fast(this);
+ Node* fast_result =
+ HandleFastSlice(context, o.value(), k.value(), count, &non_fast);
+ args.PopAndReturn(fast_result);
+
+ // 12. Let A be ArraySpeciesCreate(O, count).
+ // 13. ReturnIfAbrupt(A).
+ BIND(&non_fast);
+
+ Node* constructor =
+ CallRuntime(Runtime::kArraySpeciesConstructor, context, o.value());
+ Node* a = ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
+ count);
+
+ // 14. Let n be 0.
+ VARIABLE(n, MachineRepresentation::kTagged);
+ n.Bind(SmiConstant(0));
+
+ Label loop(this, {&k, &n});
+ Label after_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // 15. Repeat, while k < final
+ GotoIfNumericGreaterThanOrEqual(k.value(), final.value(), &after_loop);
+
+ Node* p_k = k.value(); // ToString(context, k.value()) is no-op
+
+ CopyOneElement(context, o.value(), a, p_k, n);
+
+ // e. Increase k by 1.
+ k.Bind(NumberInc(k.value()));
+
+ // f. Increase n by 1.
+ n.Bind(NumberInc(n.value()));
+
+ Goto(&loop);
+ }
+
+ BIND(&after_loop);
+
+ // 16. Let setStatus be Set(A, "length", n, true).
+ // 17. ReturnIfAbrupt(setStatus).
+ CallRuntime(Runtime::kSetProperty, context, a,
+ HeapConstant(isolate()->factory()->length_string()), n.value(),
+ SmiConstant(static_cast<int>(LanguageMode::kStrict)));
+
+ args.PopAndReturn(a);
+}
+
TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
- CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
- UndefinedConstant()));
+ CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
Node* receiver = args.GetReceiver();
@@ -1060,8 +1461,7 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
BIND(&fast);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(
- LoadObjectField(receiver, JSArray::kLengthOffset)));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(receiver)));
Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements_tagged(this),
fast_elements_smi(this);
@@ -1167,17 +1567,16 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
BIND(&fast_elements_smi);
{
Node* value = LoadFixedArrayElement(elements, 0);
- int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
- Node* memmove =
- ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
- Node* start = IntPtrAdd(
- BitcastTaggedToWord(elements),
- ElementOffsetFromIndex(IntPtrConstant(0), HOLEY_SMI_ELEMENTS,
- INTPTR_PARAMETERS, header_size));
- CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
- MachineType::Pointer(), MachineType::UintPtr(), memmove,
- start, IntPtrAdd(start, IntPtrConstant(kPointerSize)),
- IntPtrMul(new_length, IntPtrConstant(kPointerSize)));
+ BuildFastLoop(IntPtrConstant(0), new_length,
+ [&](Node* index) {
+ StoreFixedArrayElement(
+ elements, index,
+ LoadFixedArrayElement(
+ elements, IntPtrAdd(index, IntPtrConstant(1))),
+ SKIP_WRITE_BARRIER);
+ },
+ 1, ParameterMode::INTPTR_PARAMETERS,
+ IndexAdvanceMode::kPost);
StoreFixedArrayElement(elements, new_length, TheHoleConstant());
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
args.PopAndReturn(value);
@@ -1196,6 +1595,72 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
}
}
+TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinCodeStubAssembler) {
+ ParameterMode mode = OptimalParameterMode();
+ Node* context = Parameter(Descriptor::kContext);
+ Node* array = Parameter(Descriptor::kSource);
+ Node* begin = TaggedToParameter(Parameter(Descriptor::kBegin), mode);
+ Node* count = TaggedToParameter(Parameter(Descriptor::kCount), mode);
+
+ CSA_ASSERT(this, IsJSArray(array));
+ CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
+
+ Return(ExtractFastJSArray(context, array, begin, count, mode));
+}
+
+TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* array = Parameter(Descriptor::kSource);
+
+ CSA_ASSERT(this, IsJSArray(array));
+ CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
+
+ ParameterMode mode = OptimalParameterMode();
+ Return(CloneFastJSArray(context, array, mode));
+}
+
+// ES #sec-get-%typedarray%.prototype.find
+TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingTypedArrayBuiltinBody(
+ "%TypedArray%.prototype.find",
+ &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::FindProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+}
+
+// ES #sec-get-%typedarray%.prototype.findIndex
+TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingTypedArrayBuiltinBody(
+ "%TypedArray%.prototype.findIndex",
+ &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+}
+
TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1558,6 +2023,66 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* to = Parameter(Descriptor::kTo);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
+ receiver, initial_k, len, to));
+}
+
+TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* value_k = Parameter(Descriptor::kValueK);
+ Node* result = Parameter(Descriptor::kResult);
+
+ VARIABLE(to, MachineRepresentation::kTagged, Parameter(Descriptor::kTo));
+
+ // This custom lazy deopt point is right after the callback. filter() needs
+ // to pick up at the next step, which is setting the callback result in
+ // the output array. After incrementing k and to, we can glide into the loop
+ // continuation builtin.
+
+ Label true_continue(this, &to), false_continue(this);
+
+ // iii. If selected is true, then...
+ BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
+ BIND(&true_continue);
+ {
+ // 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
+ CallRuntime(Runtime::kCreateDataProperty, context, array, to.value(),
+ value_k);
+ // 2. Increase to by 1.
+ to.Bind(NumberInc(to.value()));
+ Goto(&false_continue);
+ }
+ BIND(&false_continue);
+
+ // Increment k.
+ initial_k = NumberInc(initial_k);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
+ receiver, initial_k, len, to.value()));
+}
+
TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
@@ -1689,18 +2214,17 @@ TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
GotoIf(TaggedIsSmi(object), &return_false);
TNode<Word32T> instance_type = LoadInstanceType(CAST(object));
- GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)),
- &return_true);
+ GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &return_true);
// TODO(verwaest): Handle proxies in-place.
- Branch(Word32Equal(instance_type, Int32Constant(JS_PROXY_TYPE)),
- &call_runtime, &return_false);
+ Branch(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), &call_runtime,
+ &return_false);
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
BIND(&call_runtime);
Return(CallRuntime(Runtime::kArrayIsArray, context, object));
@@ -1898,32 +2422,32 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
BIND(&string_loop);
{
- CSA_ASSERT(this, IsString(search_element));
+ TNode<String> search_element_string = CAST(search_element);
Label continue_loop(this), next_iteration(this, &index_var),
slow_compare(this), runtime(this, Label::kDeferred);
- Node* search_length = LoadStringLength(search_element);
+ TNode<IntPtrT> search_length =
+ LoadStringLengthAsWord(search_element_string);
Goto(&next_iteration);
BIND(&next_iteration);
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
&return_not_found);
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIf(WordEqual(search_element, element_k), &return_found);
+ GotoIf(WordEqual(search_element_string, element_k), &return_found);
Node* element_k_type = LoadInstanceType(element_k);
GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
- Branch(WordEqual(search_length, LoadStringLength(element_k)),
+ Branch(WordEqual(search_length, LoadStringLengthAsWord(element_k)),
&slow_compare, &continue_loop);
BIND(&slow_compare);
StringBuiltinsAssembler string_asm(state());
- string_asm.StringEqual_Core(context, search_element, search_type,
- search_length, element_k, element_k_type,
+ string_asm.StringEqual_Core(context, search_element_string, search_type,
+ element_k, element_k_type, search_length,
&return_found, &continue_loop, &runtime);
BIND(&runtime);
TNode<Object> result = CallRuntime(Runtime::kStringEqual, context,
- search_element, element_k);
- Branch(WordEqual(BooleanConstant(true), result), &return_found,
- &continue_loop);
+ search_element_string, element_k);
+ Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
BIND(&continue_loop);
Increment(&index_var);
@@ -1934,10 +2458,16 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
{
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
&return_not_found);
+
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- TNode<Object> result = CallRuntime(Runtime::kBigIntEqual, context,
+ Label continue_loop(this);
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIfNot(IsBigInt(element_k), &continue_loop);
+ TNode<Object> result = CallRuntime(Runtime::kBigIntEqualToBigInt, context,
search_element, element_k);
- GotoIf(WordEqual(result, TrueConstant()), &return_found);
+ Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+
+ BIND(&continue_loop);
Increment(&index_var);
Goto(&bigint_loop);
}
@@ -2178,11 +2708,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// (22.1.5.3), throw a TypeError exception
GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
TNode<Int32T> instance_type = LoadInstanceType(iterator);
- GotoIf(
- Uint32LessThan(
- Int32Constant(LAST_ARRAY_ITERATOR_TYPE - FIRST_ARRAY_ITERATOR_TYPE),
- Int32Sub(instance_type, Int32Constant(FIRST_ARRAY_ITERATOR_TYPE))),
- &throw_bad_receiver);
+ GotoIf(IsArrayIteratorInstanceType(instance_type), &throw_bad_receiver);
// Let a be O.[[IteratedObject]].
Node* array =
@@ -2201,10 +2727,10 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_isfastarray);
{
- CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(array_map),
- Int32Constant(JS_ARRAY_TYPE)));
+ CSA_ASSERT(
+ this, InstanceTypeEqual(LoadMapInstanceType(array_map), JS_ARRAY_TYPE));
- Node* length = LoadObjectField(array, JSArray::kLengthOffset);
+ Node* length = LoadJSArrayLength(array);
CSA_ASSERT(this, TaggedIsSmi(length));
CSA_ASSERT(this, TaggedIsSmi(index));
@@ -2262,8 +2788,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&holey_object_values);
{
- // Check the array_protector cell, and take the slow path if it's invalid.
- GotoIf(IsArrayProtectorCellInvalid(), &generic_values);
+ // Check the no_elements_protector cell, and take the slow path if it's
+ // invalid.
+ GotoIf(IsNoElementsProtectorCellInvalid(), &generic_values);
var_value.Bind(UndefinedConstant());
Node* value = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
@@ -2274,8 +2801,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&holey_double_values);
{
- // Check the array_protector cell, and take the slow path if it's invalid.
- GotoIf(IsArrayProtectorCellInvalid(), &generic_values);
+ // Check the no_elements_protector cell, and take the slow path if it's
+ // invalid.
+ GotoIf(IsNoElementsProtectorCellInvalid(), &generic_values);
var_value.Bind(UndefinedConstant());
Node* value = LoadFixedDoubleArrayElement(
@@ -2291,11 +2819,11 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Label if_istypedarray(this), if_isgeneric(this);
// If a is undefined, return CreateIterResultObject(undefined, true)
- GotoIf(WordEqual(array, UndefinedConstant()), &allocate_iterator_result);
+ GotoIf(IsUndefined(array), &allocate_iterator_result);
Node* array_type = LoadInstanceType(array);
- Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
- &if_istypedarray, &if_isgeneric);
+ Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_istypedarray,
+ &if_isgeneric);
BIND(&if_isgeneric);
{
@@ -2305,12 +2833,12 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
{
VARIABLE(var_length, MachineRepresentation::kTagged);
Label if_isarray(this), if_isnotarray(this), done(this);
- Branch(Word32Equal(array_type, Int32Constant(JS_ARRAY_TYPE)),
- &if_isarray, &if_isnotarray);
+ Branch(InstanceTypeEqual(array_type, JS_ARRAY_TYPE), &if_isarray,
+ &if_isnotarray);
BIND(&if_isarray);
{
- var_length.Bind(LoadObjectField(array, JSArray::kLengthOffset));
+ var_length.Bind(LoadJSArrayLength(array));
// Invalidate protector cell if needed
Branch(WordNotEqual(orig_map, UndefinedConstant()), &if_wasfastarray,
@@ -2349,7 +2877,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
length = var_length.value();
}
- GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
+ GotoIfNumericGreaterThanOrEqual(index, length, &set_done);
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
NumberInc(index));
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index ddd47fc480..70ee2326f5 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -48,28 +48,6 @@ inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
return false;
}
-inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
- int* out) {
- Context* context = *isolate->native_context();
- Map* map = object->map();
- if (map != context->sloppy_arguments_map() &&
- map != context->strict_arguments_map() &&
- map != context->fast_aliased_arguments_map()) {
- return false;
- }
- DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
- Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
- if (!len_obj->IsSmi()) return false;
- *out = Max(0, Smi::ToInt(len_obj));
-
- FixedArray* parameters = FixedArray::cast(object->elements());
- if (object->HasSloppyArgumentsElements()) {
- FixedArray* arguments = FixedArray::cast(parameters->get(1));
- return *out <= arguments->length();
- }
- return *out <= parameters->length();
-}
-
inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
JSArray* receiver) {
return JSObject::PrototypeHasNoElements(isolate, receiver);
@@ -281,8 +259,8 @@ BUILTIN(ArraySlice) {
}
len = Smi::ToInt(array->length());
} else if (receiver->IsJSObject() &&
- GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
- &len)) {
+ JSSloppyArgumentsObject::GetSloppyArgumentsLength(
+ isolate, Handle<JSObject>::cast(receiver), &len)) {
// Array.prototype.slice.call(arguments, ...) is quite a common idiom
// (notably more than 50% of invocations in Web apps).
// Treat it in C++ as well.
@@ -363,7 +341,7 @@ BUILTIN(ArraySplice) {
// given as a request to delete all the elements from the start.
// And it differs from the case of undefined delete count.
// This does not follow ECMA-262, but we do the same for compatibility.
- DCHECK(len - actual_start >= 0);
+ DCHECK_GE(len - actual_start, 0);
actual_delete_count = len - actual_start;
} else {
int delete_count = 0;
@@ -437,9 +415,8 @@ class ArrayConcatVisitor {
if (!is_fixed_array()) {
LookupIterator it(isolate_, storage_, index, LookupIterator::OWN);
- MAYBE_RETURN(
- JSReceiver::CreateDataProperty(&it, elm, Object::THROW_ON_ERROR),
- false);
+ MAYBE_RETURN(JSReceiver::CreateDataProperty(&it, elm, kThrowOnError),
+ false);
return true;
}
@@ -456,13 +433,12 @@ class ArrayConcatVisitor {
// Fall-through to dictionary mode.
}
DCHECK(!fast_elements());
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(*storage_));
+ Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_));
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
Handle<JSObject> not_a_prototype_holder;
- Handle<SeededNumberDictionary> result =
- SeededNumberDictionary::Set(dict, index, elm, not_a_prototype_holder);
+ Handle<NumberDictionary> result =
+ NumberDictionary::Set(dict, index, elm, not_a_prototype_holder);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -512,7 +488,7 @@ class ArrayConcatVisitor {
RETURN_ON_EXCEPTION(
isolate_,
JSReceiver::SetProperty(result, isolate_->factory()->length_string(),
- length, STRICT),
+ length, LanguageMode::kStrict),
JSReceiver);
return result;
}
@@ -525,8 +501,8 @@ class ArrayConcatVisitor {
void SetDictionaryMode() {
DCHECK(fast_elements() && is_fixed_array());
Handle<FixedArray> current_storage = storage_fixed_array();
- Handle<SeededNumberDictionary> slow_storage(
- SeededNumberDictionary::New(isolate_, current_storage->length()));
+ Handle<NumberDictionary> slow_storage(
+ NumberDictionary::New(isolate_, current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
FOR_WITH_HANDLE_SCOPE(
isolate_, uint32_t, i = 0, i, i < current_length, i++, {
@@ -535,9 +511,8 @@ class ArrayConcatVisitor {
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
Handle<JSObject> not_a_prototype_holder;
- Handle<SeededNumberDictionary> new_storage =
- SeededNumberDictionary::Set(slow_storage, i, element,
- not_a_prototype_holder);
+ Handle<NumberDictionary> new_storage = NumberDictionary::Set(
+ slow_storage, i, element, not_a_prototype_holder);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -594,7 +569,7 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
case HOLEY_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
- DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
+ DCHECK_GE(static_cast<int32_t>(FixedArray::kMaxLength), 0);
int fast_length = static_cast<int>(length);
Isolate* isolate = array->GetIsolate();
FixedArray* elements = FixedArray::cast(array->elements());
@@ -607,10 +582,10 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
case HOLEY_DOUBLE_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
- DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
+ DCHECK_GE(static_cast<int32_t>(FixedDoubleArray::kMaxLength), 0);
int fast_length = static_cast<int>(length);
if (array->elements()->IsFixedArray()) {
- DCHECK(FixedArray::cast(array->elements())->length() == 0);
+ DCHECK_EQ(FixedArray::cast(array->elements())->length(), 0);
break;
}
FixedDoubleArray* elements = FixedDoubleArray::cast(array->elements());
@@ -620,8 +595,7 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(array->elements());
+ NumberDictionary* dictionary = NumberDictionary::cast(array->elements());
Isolate* isolate = dictionary->GetIsolate();
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
@@ -674,7 +648,7 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
case HOLEY_DOUBLE_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS: {
if (object->elements()->IsFixedArray()) {
- DCHECK(object->elements()->length() == 0);
+ DCHECK_EQ(object->elements()->length(), 0);
break;
}
Handle<FixedDoubleArray> elements(
@@ -690,8 +664,7 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
case DICTIONARY_ELEMENTS: {
DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dict =
- SeededNumberDictionary::cast(object->elements());
+ NumberDictionary* dict = NumberDictionary::cast(object->elements());
uint32_t capacity = dict->Capacity();
FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
Object* k = dict->KeyAt(j);
@@ -773,7 +746,7 @@ bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
uint32_t length, ArrayConcatVisitor* visitor) {
FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, ++i, {
Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
- if (!maybe.IsJust()) return false;
+ if (maybe.IsNothing()) return false;
if (maybe.FromJust()) {
Handle<Object> element_value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -836,7 +809,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
if (!visitor->visit(j, element_value)) return false;
} else {
Maybe<bool> maybe = JSReceiver::HasElement(array, j);
- if (!maybe.IsJust()) return false;
+ if (maybe.IsNothing()) return false;
if (maybe.FromJust()) {
// Call GetElement on array, not its prototype, or getters won't
// have the correct receiver.
@@ -856,7 +829,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
if (array->elements()->IsFixedArray()) {
- DCHECK(array->elements()->length() == 0);
+ DCHECK_EQ(array->elements()->length(), 0);
break;
}
Handle<FixedDoubleArray> elements(
@@ -871,7 +844,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
if (!visitor->visit(j, element_value)) return false;
} else {
Maybe<bool> maybe = JSReceiver::HasElement(array, j);
- if (!maybe.IsJust()) return false;
+ if (maybe.IsNothing()) return false;
if (maybe.FromJust()) {
// Call GetElement on array, not its prototype, or getters won't
// have the correct receiver.
@@ -887,7 +860,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
}
case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dict(array->element_dictionary());
+ Handle<NumberDictionary> dict(array->element_dictionary());
std::vector<uint32_t> indices;
indices.reserve(dict->Capacity() / 2);
@@ -1094,7 +1067,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
storage =
isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
} else if (is_array_species) {
- storage = SeededNumberDictionary::New(isolate, estimate_nof);
+ storage = NumberDictionary::New(isolate, estimate_nof);
} else {
DCHECK(species->IsConstructor());
Handle<Object> length(Smi::kZero, isolate);
@@ -1182,7 +1155,7 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
// The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
// overflow.
result_len += Smi::ToInt(array->length());
- DCHECK(result_len >= 0);
+ DCHECK_GE(result_len, 0);
// Throw an Error if we overflow the FixedArray limits
if (FixedDoubleArray::kMaxLength < result_len ||
FixedArray::kMaxLength < result_len) {
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index cbb2d7b3e5..0d0e34ee0d 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -59,9 +59,14 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
LoadObjectField(generator, JSGeneratorObject::kContinuationOffset),
SmiConstant(JSGeneratorObject::kGeneratorClosed)));
+ // Remember the {resume_mode} for the {generator}.
+ StoreObjectFieldNoWriteBarrier(generator,
+ JSGeneratorObject::kResumeModeOffset,
+ SmiConstant(resume_mode));
+
// Resume the {receiver} using our trampoline.
Callable callable = CodeFactory::ResumeGenerator(isolate());
- CallStub(callable, context, sent_value, generator, SmiConstant(resume_mode));
+ CallStub(callable, context, sent_value, generator);
// The resulting Promise is a throwaway, so it doesn't matter what it
// resolves to. What is important is that we don't end up keeping the
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index a42bade80f..060696ee5d 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -31,42 +31,15 @@ Node* AsyncBuiltinsAssembler::Await(
Node* const native_context = LoadNativeContext(context);
-#ifdef DEBUG
- {
- Node* const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- Node* const instance_size = LoadMapInstanceSize(map);
- // Assert that the strict function map has an instance size is
- // JSFunction::kSize
- CSA_ASSERT(this, WordEqual(instance_size, IntPtrConstant(JSFunction::kSize /
- kPointerSize)));
- }
-#endif
-
-#ifdef DEBUG
- {
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Node* const map =
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const instance_size = LoadMapInstanceSize(map);
- // Assert that the JSPromise map has an instance size is
- // JSPromise::kSize
- CSA_ASSERT(this,
- WordEqual(instance_size,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
- kPointerSize)));
- }
-#endif
-
static const int kWrappedPromiseOffset = FixedArray::SizeFor(context_length);
static const int kThrowawayPromiseOffset =
kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
static const int kResolveClosureOffset =
kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
static const int kRejectClosureOffset =
- kResolveClosureOffset + JSFunction::kSize;
- static const int kTotalSize = kRejectClosureOffset + JSFunction::kSize;
+ kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
+ static const int kTotalSize =
+ kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
Node* const base = AllocateInNewSpace(kTotalSize);
Node* const closure_context = base;
@@ -79,16 +52,21 @@ Node* AsyncBuiltinsAssembler::Await(
// Let promiseCapability be ! NewPromiseCapability(%Promise%).
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
Node* const promise_map =
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ // Assert that the JSPromise map has an instance size is
+ // JSPromise::kSizeWithEmbedderFields.
+ CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
+ kPointerSize)));
Node* const wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
{
// Initialize Promise
StoreMapNoWriteBarrier(wrapped_value, promise_map);
InitializeJSObjectFromMap(
wrapped_value, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields),
- EmptyFixedArrayConstant(), EmptyFixedArrayConstant());
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
PromiseInit(wrapped_value);
}
@@ -98,8 +76,7 @@ Node* AsyncBuiltinsAssembler::Await(
StoreMapNoWriteBarrier(throwaway, promise_map);
InitializeJSObjectFromMap(
throwaway, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields),
- EmptyFixedArrayConstant(), EmptyFixedArrayConstant());
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
PromiseInit(throwaway);
}
@@ -147,7 +124,7 @@ Node* AsyncBuiltinsAssembler::Await(
Node* const key =
HeapConstant(factory()->promise_forwarding_handler_symbol());
CallRuntime(Runtime::kSetProperty, context, on_reject, key,
- TrueConstant(), SmiConstant(STRICT));
+ TrueConstant(), SmiConstant(LanguageMode::kStrict));
GotoIf(IsFalse(is_predicted_as_caught), &common);
PromiseSetHandledHint(value);
@@ -161,7 +138,7 @@ Node* AsyncBuiltinsAssembler::Await(
Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
- SmiConstant(STRICT));
+ SmiConstant(LanguageMode::kStrict));
}
Goto(&do_perform_promise_then);
@@ -179,6 +156,11 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
Node* context_index) {
Node* const function_map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ // Ensure that we don't have to initialize prototype_or_initial_map field of
+ // JSFunction.
+ CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
+ IntPtrConstant(JSFunction::kSizeWithoutPrototype /
+ kPointerSize)));
StoreMapNoWriteBarrier(function, function_map);
StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -186,8 +168,6 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(function, JSFunction::kFeedbackVectorOffset,
Heap::kUndefinedCellRootIndex);
- StoreObjectFieldRoot(function, JSFunction::kPrototypeOrInitialMapOffset,
- Heap::kTheHoleValueRootIndex);
Node* shared_info = LoadContextElement(native_context, context_index);
CSA_ASSERT(this, IsSharedFunctionInfo(shared_info));
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 230da6bbe7..70726a5f9d 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -237,8 +237,12 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
CSA_SLOW_ASSERT(this, IsGeneratorSuspended(generator));
- CallStub(CodeFactory::ResumeGenerator(isolate()), context, value, generator,
- SmiConstant(resume_mode));
+ // Remember the {resume_mode} for the {generator}.
+ StoreObjectFieldNoWriteBarrier(generator,
+ JSGeneratorObject::kResumeModeOffset,
+ SmiConstant(resume_mode));
+
+ CallStub(CodeFactory::ResumeGenerator(isolate()), context, value, generator);
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
@@ -489,8 +493,11 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
BIND(&resume_generator);
{
+ // Remember the {resume_type} for the {generator}.
+ StoreObjectFieldNoWriteBarrier(
+ generator, JSGeneratorObject::kResumeModeOffset, resume_type);
CallStub(CodeFactory::ResumeGenerator(isolate()), context,
- LoadValueFromAsyncGeneratorRequest(next), generator, resume_type);
+ LoadValueFromAsyncGeneratorRequest(next), generator);
var_state.Bind(LoadGeneratorState(generator));
var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator));
Goto(&start);
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index a2526795c3..6d9bb6e797 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -15,29 +15,25 @@ BUILTIN(BigIntConstructor) {
HandleScope scope(isolate);
Handle<Object> value = args.atOrUndefined(isolate, 1);
- // TODO(jkummerow): Implement properly.
+ if (value->IsJSReceiver()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(value),
+ ToPrimitiveHint::kNumber));
+ }
- // Dummy implementation only takes Smi args.
- if (!value->IsSmi()) return isolate->heap()->undefined_value();
- int num = Smi::ToInt(*value);
- return *isolate->factory()->NewBigIntFromInt(num);
+ if (value->IsNumber()) {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, value));
+ } else {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, value));
+ }
}
BUILTIN(BigIntConstructor_ConstructStub) {
HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- DCHECK(*target == target->native_context()->bigint_function());
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
-
- // TODO(jkummerow): Implement.
- USE(value);
- USE(result);
-
- UNIMPLEMENTED();
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor,
+ isolate->factory()->BigInt_string()));
}
BUILTIN(BigIntParseInt) {
@@ -48,8 +44,7 @@ BUILTIN(BigIntParseInt) {
// Convert {string} to a String and flatten it.
// Fast path: avoid back-and-forth conversion for Smi inputs.
if (string->IsSmi() && radix->IsUndefined(isolate)) {
- int num = Smi::ToInt(*string);
- return *isolate->factory()->NewBigIntFromInt(num);
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, string));
}
Handle<String> subject;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, subject,
@@ -65,7 +60,7 @@ BUILTIN(BigIntParseInt) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewSyntaxError(MessageTemplate::kToRadixFormatRange));
}
- RETURN_RESULT_OR_FAILURE(isolate, StringToBigInt(isolate, subject, radix32));
+ RETURN_RESULT_OR_FAILURE(isolate, BigIntParseInt(isolate, subject, radix32));
}
BUILTIN(BigIntAsUintN) {
@@ -73,11 +68,16 @@ BUILTIN(BigIntAsUintN) {
Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
Handle<Object> bigint_obj = args.atOrUndefined(isolate, 2);
- // TODO(jkummerow): Implement.
- USE(bits_obj);
- USE(bigint_obj);
+ Handle<Object> bits;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, bits,
+ Object::ToIndex(isolate, bits_obj, MessageTemplate::kInvalidIndex));
- UNIMPLEMENTED();
+ Handle<BigInt> bigint;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
+ BigInt::FromObject(isolate, bigint_obj));
+
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::AsUintN(bits->Number(), bigint));
}
BUILTIN(BigIntAsIntN) {
@@ -85,11 +85,16 @@ BUILTIN(BigIntAsIntN) {
Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
Handle<Object> bigint_obj = args.atOrUndefined(isolate, 2);
- // TODO(jkummerow): Implement.
- USE(bits_obj);
- USE(bigint_obj);
+ Handle<Object> bits;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, bits,
+ Object::ToIndex(isolate, bits_obj, MessageTemplate::kInvalidIndex));
- UNIMPLEMENTED();
+ Handle<BigInt> bigint;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
+ BigInt::FromObject(isolate, bigint_obj));
+
+ return *BigInt::AsIntN(bits->Number(), bigint);
}
BUILTIN(BigIntPrototypeToLocaleString) {
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index ab428e8caa..d4a7153d74 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -155,7 +155,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
// protector is intact and our prototype is the Array.prototype actually.
GotoIfNot(IsPrototypeInitialArrayPrototype(context, arguments_list_map),
&if_runtime);
- Branch(IsArrayProtectorCellInvalid(), &if_runtime, &if_done);
+ Branch(IsNoElementsProtectorCellInvalid(), &if_runtime, &if_done);
}
BIND(&if_arguments);
@@ -165,8 +165,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
LoadObjectField(arguments_list, JSArgumentsObject::kLengthOffset);
Node* elements =
LoadObjectField(arguments_list, JSArgumentsObject::kElementsOffset);
- Node* elements_length =
- LoadObjectField(elements, FixedArray::kLengthOffset);
+ Node* elements_length = LoadFixedArrayBaseLength(elements);
GotoIfNot(WordEqual(length, elements_length), &if_runtime);
var_elements.Bind(elements);
var_length.Bind(SmiToWord32(length));
@@ -291,8 +290,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
// Check that the map of the initial array iterator hasn't changed.
Node* native_context = LoadNativeContext(context);
- Node* arr_it_proto_map = LoadMap(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ Node* arr_it_proto_map = LoadMap(CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)));
Node* initial_map = LoadContextElement(
native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX);
GotoIfNot(WordEqual(arr_it_proto_map, initial_map), &if_runtime);
@@ -311,9 +310,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
&if_runtime);
Branch(Word32And(kind, Int32Constant(1)), &if_holey, &if_done);
- // Check the ArrayProtector cell for holey arrays.
+ // Check the NoElementsProtector cell for holey arrays.
BIND(&if_holey);
- { Branch(IsArrayProtectorCellInvalid(), &if_runtime, &if_done); }
+ { Branch(IsNoElementsProtectorCellInvalid(), &if_runtime, &if_done); }
BIND(&if_runtime);
{
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 4aa7fa310b..aec265dc35 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -13,22 +13,486 @@ namespace v8 {
namespace internal {
using compiler::Node;
+template <class T>
+using TNode = compiler::TNode<T>;
+template <class T>
+using TVariable = compiler::TypedCodeAssemblerVariable<T>;
-class CollectionsBuiltinsAssembler : public CodeStubAssembler {
+class BaseCollectionsAssembler : public CodeStubAssembler {
public:
- explicit CollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ explicit BaseCollectionsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
+ virtual ~BaseCollectionsAssembler() {}
+
protected:
- Node* AllocateJSMap(Node* js_map_function);
+ enum Variant { kMap, kSet };
+
+ // Adds an entry to a collection. For Maps, properly handles extracting the
+ // key and value from the entry (see LoadKeyValue()).
+ TNode<Object> AddConstructorEntry(Variant variant, TNode<Context> context,
+ TNode<Object> collection,
+ TNode<Object> add_function,
+ TNode<Object> key_value,
+ Label* if_exception = nullptr,
+ TVariable<Object>* var_exception = nullptr);
+
+ // Adds constructor entries to a collection. Choosing a fast path when
+ // possible.
+ void AddConstructorEntries(Variant variant, TNode<Context> context,
+ TNode<Context> native_context,
+ TNode<Object> collection,
+ TNode<Object> initial_entries,
+ TNode<BoolT> is_fast_jsarray);
+
+ // Fast path for adding constructor entries. Assumes the entries are a fast
+ // JS array (see CodeStubAssembler::BranchIfFastJSArray()).
+ void AddConstructorEntriesFromFastJSArray(Variant variant,
+ TNode<Context> context,
+ TNode<Object> collection,
+ TNode<JSArray> fast_jsarray);
+
+ // Adds constructor entries to a collection using the iterator protocol.
+ void AddConstructorEntriesFromIterable(Variant variant,
+ TNode<Context> context,
+ TNode<Context> native_context,
+ TNode<Object> collection,
+ TNode<Object> iterable);
+
+ // Constructs a collection instance. Choosing a fast path when possible.
+ TNode<Object> AllocateJSCollection(TNode<Context> context,
+ TNode<Context> native_context,
+ int constructor_function_index,
+ TNode<Object> new_target);
+
+ // Fast path for constructing a collection instance if the constructor
+ // function has not been modified.
+ TNode<Object> AllocateJSCollectionFast(TNode<HeapObject> constructor);
+
+ // Fallback for constructing a collection instance if the constructor function
+ // has been modified.
+ TNode<Object> AllocateJSCollectionSlow(TNode<Context> context,
+ TNode<HeapObject> constructor,
+ TNode<Object> new_target);
+
+ // Allocates the backing store for a collection.
+ virtual TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
+ TNode<IntPtrT> at_least_space_for) = 0;
+
+ // Main entry point for a collection constructor builtin.
+ void GenerateConstructor(Variant variant,
+ const int constructor_function_index,
+ Handle<String> constructor_function_name,
+ int collection_tableoffset);
+
+ // Retrieves the collection function that adds an entry. `set` for Maps and
+ // `add` for Sets.
+ TNode<Object> GetAddFunction(Variant variant, TNode<Context> context,
+ TNode<Object> collection);
+
+ // Estimates the number of entries the collection will have after adding the
+ // entries passed in the constructor. AllocateTable() can use this to avoid
+ // the time of growing/rehashing when adding the constructor entries.
+ TNode<IntPtrT> EstimatedInitialSize(TNode<Object> initial_entries,
+ TNode<BoolT> is_fast_jsarray);
+
+ void GotoIfNotJSReceiver(Node* const obj, Label* if_not_receiver);
+
+ // Loads an element from a fixed array. If the element is the hole, returns
+ // `undefined`.
+ TNode<Object> LoadAndNormalizeFixedArrayElement(TNode<Object> elements,
+ TNode<IntPtrT> index);
+
+ // Loads an element from a fixed double array. If the element is the hole,
+ // returns `undefined`.
+ TNode<Object> LoadAndNormalizeFixedDoubleArrayElement(TNode<Object> elements,
+ TNode<IntPtrT> index);
+
+ // Loads key and value variables with the first and second elements of an
+ // array. If the array lacks 2 elements, undefined is used.
+ void LoadKeyValue(TNode<Context> context, TNode<Object> maybe_array,
+ TVariable<Object>* key, TVariable<Object>* value,
+ Label* if_exception = nullptr,
+ TVariable<Object>* var_exception = nullptr);
+};
+
+TNode<Object> BaseCollectionsAssembler::AddConstructorEntry(
+ Variant variant, TNode<Context> context, TNode<Object> collection,
+ TNode<Object> add_function, TNode<Object> key_value, Label* if_exception,
+ TVariable<Object>* var_exception) {
+ CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
+ if (variant == kMap) {
+ Label exit(this), if_notobject(this, Label::kDeferred);
+ GotoIfNotJSReceiver(key_value, &if_notobject);
+
+ TVARIABLE(Object, key);
+ TVARIABLE(Object, value);
+ LoadKeyValue(context, key_value, &key, &value, if_exception, var_exception);
+ Node* key_n = key;
+ Node* value_n = value;
+ TNode<Object> add_call =
+ UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
+ add_function, collection, key_n, value_n));
+ Goto(&exit);
+
+ BIND(&if_notobject);
+ {
+ Node* ret = CallRuntime(
+ Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kIteratorValueNotAnObject), key_value);
+ if (if_exception != nullptr) {
+ DCHECK(var_exception != nullptr);
+ GotoIfException(ret, if_exception, var_exception);
+ }
+ Unreachable();
+ }
+ BIND(&exit);
+ return add_call;
+
+ } else { // variant == kSet
+ DCHECK(variant == kSet);
+ return UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
+ add_function, collection, key_value));
+ }
+}
+
+void BaseCollectionsAssembler::AddConstructorEntries(
+ Variant variant, TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> collection, TNode<Object> initial_entries,
+ TNode<BoolT> is_fast_jsarray) {
+ Label exit(this), slow_loop(this, Label::kDeferred);
+ GotoIf(IsNullOrUndefined(initial_entries), &exit);
+
+ // TODO(mvstanton): Re-enable the fast path when a fix is found for
+ // crbug.com/798026.
+ {
+ AddConstructorEntriesFromIterable(variant, context, native_context,
+ collection, initial_entries);
+ Goto(&exit);
+ }
+ BIND(&exit);
+}
+
+void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
+ Variant variant, TNode<Context> context, TNode<Object> collection,
+ TNode<JSArray> fast_jsarray) {
+ TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(LoadMap(fast_jsarray));
+ TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
+ TNode<Object> add_func = GetAddFunction(variant, context, collection);
+
+ CSA_ASSERT(this, IsFastJSArray(fast_jsarray, context));
+ CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
+
+ Label exit(this), if_doubles(this), if_smiorobjects(this);
+ Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
+ &if_doubles);
+ BIND(&if_smiorobjects);
+ {
+ auto set_entry = [&](Node* index) {
+ TNode<Object> element = LoadAndNormalizeFixedArrayElement(
+ elements, UncheckedCast<IntPtrT>(index));
+ AddConstructorEntry(variant, context, collection, add_func, element);
+ };
+ BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
+ ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Goto(&exit);
+ }
+ BIND(&if_doubles);
+ {
+ // A Map constructor requires entries to be arrays (ex. [key, value]),
+ // so a FixedDoubleArray can never succeed.
+ if (variant == kMap) {
+ TNode<Float64T> element =
+ UncheckedCast<Float64T>(LoadFixedDoubleArrayElement(
+ elements, IntPtrConstant(0), MachineType::Float64(), 0,
+ INTPTR_PARAMETERS));
+ ThrowTypeError(context, MessageTemplate::kIteratorValueNotAnObject,
+ AllocateHeapNumberWithValue(element));
+ } else {
+ auto set_entry = [&](Node* index) {
+ TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement(
+ elements, UncheckedCast<IntPtrT>(index));
+ AddConstructorEntry(kSet, context, collection, add_func, entry);
+ };
+ BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
+ ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Goto(&exit);
+ }
+ }
+ BIND(&exit);
+}
+
+void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
+ Variant variant, TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> collection, TNode<Object> iterable) {
+ Label exit(this), loop(this), if_exception(this, Label::kDeferred);
+ CSA_ASSERT(this, Word32BinaryNot(IsNullOrUndefined(iterable)));
+
+ TNode<Object> add_func = GetAddFunction(variant, context, collection);
+ IteratorBuiltinsAssembler iterator_assembler(this->state());
+ TNode<Object> iterator =
+ CAST(iterator_assembler.GetIterator(context, iterable));
+
+ CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator)));
+
+ TNode<Object> fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ TVARIABLE(Object, var_exception);
+
+ Goto(&loop);
+ BIND(&loop);
+ {
+ TNode<Object> next = CAST(iterator_assembler.IteratorStep(
+ context, iterator, &exit, fast_iterator_result_map));
+ TNode<Object> next_value = CAST(iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map));
+ TNode<Object> add_result =
+ AddConstructorEntry(variant, context, collection, add_func, next_value,
+ &if_exception, &var_exception);
+ GotoIfException(add_result, &if_exception, &var_exception);
+ Goto(&loop);
+ }
+ BIND(&if_exception);
+ {
+ iterator_assembler.IteratorCloseOnException(context, iterator,
+ &var_exception);
+ }
+ BIND(&exit);
+}
+
+TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
+ TNode<Context> context, TNode<Context> native_context,
+ int constructor_function_index, TNode<Object> new_target) {
+ TNode<HeapObject> constructor =
+ CAST(LoadContextElement(native_context, constructor_function_index));
+ TNode<BoolT> is_target_unmodified = WordEqual(constructor, new_target);
+
+ return Select<Object>(is_target_unmodified,
+ [=] { return AllocateJSCollectionFast(constructor); },
+ [=] {
+ return AllocateJSCollectionSlow(context, constructor,
+ new_target);
+ },
+ MachineRepresentation::kTagged);
+}
+
+TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionFast(
+ TNode<HeapObject> constructor) {
+ CSA_ASSERT(this, IsConstructorMap(LoadMap(constructor)));
+ TNode<Object> initial_map =
+ LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ return CAST(AllocateJSObjectFromMap(initial_map));
+}
+
+TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
+ TNode<Context> context, TNode<HeapObject> constructor,
+ TNode<Object> new_target) {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ return CAST(constructor_assembler.EmitFastNewObject(context, constructor,
+ new_target));
+}
+
+void BaseCollectionsAssembler::GenerateConstructor(
+ Variant variant, const int constructor_function_index,
+ Handle<String> constructor_function_name, int collection_tableoffset) {
+ const int kIterableArg = 0;
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ TNode<Object> iterable = args.GetOptionalArgumentValue(kIterableArg);
+ TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ Label if_undefined(this, Label::kDeferred);
+ GotoIf(IsUndefined(new_target), &if_undefined);
+
+ TNode<BoolT> is_fast_jsarray = IsFastJSArray(iterable, context);
+ TNode<IntPtrT> at_least_space_for =
+ EstimatedInitialSize(iterable, is_fast_jsarray);
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Object> collection = AllocateJSCollection(
+ context, native_context, constructor_function_index, new_target);
+ TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
+
+ StoreObjectField(collection, collection_tableoffset, table);
+ AddConstructorEntries(variant, context, native_context, collection, iterable,
+ is_fast_jsarray);
+ Return(collection);
+
+ BIND(&if_undefined);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction,
+ HeapConstant(constructor_function_name));
+}
+
+TNode<Object> BaseCollectionsAssembler::GetAddFunction(
+ Variant variant, TNode<Context> context, TNode<Object> collection) {
+ // TODO(pwong): Consider calling the builtin directly when the prototype is
+ // unmodified. This will require tracking WeakMap/WeakSet prototypes on the
+ // native context.
+ Handle<String> add_func_name = variant == kMap
+ ? isolate()->factory()->set_string()
+ : isolate()->factory()->add_string();
+ TNode<Object> add_func =
+ CAST(GetProperty(context, collection, add_func_name));
+
+ Label exit(this), if_notcallable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(add_func), &if_notcallable);
+ GotoIfNot(IsCallable(add_func), &if_notcallable);
+ Goto(&exit);
+
+ BIND(&if_notcallable);
+ ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, add_func,
+ HeapConstant(add_func_name), collection);
+
+ BIND(&exit);
+ return add_func;
+}
+
+TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize(
+ TNode<Object> initial_entries, TNode<BoolT> is_fast_jsarray) {
+ return Select<IntPtrT>(
+ is_fast_jsarray,
+ [=] { return SmiUntag(LoadFastJSArrayLength(CAST(initial_entries))); },
+ [=] { return IntPtrConstant(0); }, MachineType::PointerRepresentation());
+}
+
+void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj,
+ Label* if_not_receiver) {
+ GotoIf(TaggedIsSmi(obj), if_not_receiver);
+ GotoIfNot(IsJSReceiver(obj), if_not_receiver);
+}
+
+TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
+ TNode<Object> elements, TNode<IntPtrT> index) {
+ TNode<Object> element = CAST(LoadFixedArrayElement(elements, index));
+ return Select<Object>(IsTheHole(element), [=] { return UndefinedConstant(); },
+ [=] { return element; },
+ MachineRepresentation::kTagged);
+}
+
+TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
+ TNode<Object> elements, TNode<IntPtrT> index) {
+ TVARIABLE(Object, entry);
+ Label if_hole(this, Label::kDeferred), next(this);
+ TNode<Float64T> element = UncheckedCast<Float64T>(LoadFixedDoubleArrayElement(
+ elements, index, MachineType::Float64(), 0, INTPTR_PARAMETERS, &if_hole));
+ { // not hole
+ entry = AllocateHeapNumberWithValue(element);
+ Goto(&next);
+ }
+ BIND(&if_hole);
+ {
+ entry = UndefinedConstant();
+ Goto(&next);
+ }
+ BIND(&next);
+ return entry;
+}
+
+void BaseCollectionsAssembler::LoadKeyValue(TNode<Context> context,
+ TNode<Object> maybe_array,
+ TVariable<Object>* key,
+ TVariable<Object>* value,
+ Label* if_exception,
+ TVariable<Object>* var_exception) {
+ CSA_ASSERT(this, Word32BinaryNot(IsTheHole(maybe_array)));
+
+ Label exit(this), if_fast(this), if_slow(this, Label::kDeferred);
+ BranchIfFastJSArray(maybe_array, context, &if_fast, &if_slow);
+ BIND(&if_fast);
+ {
+ TNode<JSArray> array = CAST(maybe_array);
+ TNode<Smi> length = LoadFastJSArrayLength(array);
+ TNode<FixedArrayBase> elements = LoadElements(array);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(LoadMap(array));
+
+ Label if_smiorobjects(this), if_doubles(this);
+ Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
+ &if_doubles);
+ BIND(&if_smiorobjects);
+ {
+ Label if_one(this), if_two(this);
+ GotoIf(SmiGreaterThan(length, SmiConstant(1)), &if_two);
+ GotoIf(SmiEqual(length, SmiConstant(1)), &if_one);
+ { // empty array
+ *key = UndefinedConstant();
+ *value = UndefinedConstant();
+ Goto(&exit);
+ }
+ BIND(&if_one);
+ {
+ *key = LoadAndNormalizeFixedArrayElement(elements, IntPtrConstant(0));
+ *value = UndefinedConstant();
+ Goto(&exit);
+ }
+ BIND(&if_two);
+ {
+ *key = LoadAndNormalizeFixedArrayElement(elements, IntPtrConstant(0));
+ *value = LoadAndNormalizeFixedArrayElement(elements, IntPtrConstant(1));
+ Goto(&exit);
+ }
+ }
+ BIND(&if_doubles);
+ {
+ Label if_one(this), if_two(this);
+ GotoIf(SmiGreaterThan(length, SmiConstant(1)), &if_two);
+ GotoIf(SmiEqual(length, SmiConstant(1)), &if_one);
+ { // empty array
+ *key = UndefinedConstant();
+ *value = UndefinedConstant();
+ Goto(&exit);
+ }
+ BIND(&if_one);
+ {
+ *key = LoadAndNormalizeFixedDoubleArrayElement(elements,
+ IntPtrConstant(0));
+ *value = UndefinedConstant();
+ Goto(&exit);
+ }
+ BIND(&if_two);
+ {
+ *key = LoadAndNormalizeFixedDoubleArrayElement(elements,
+ IntPtrConstant(0));
+ *value = LoadAndNormalizeFixedDoubleArrayElement(elements,
+ IntPtrConstant(1));
+ Goto(&exit);
+ }
+ }
+ }
+ BIND(&if_slow);
+ {
+ *key = UncheckedCast<Object>(
+ GetProperty(context, maybe_array, isolate()->factory()->zero_string()));
+ if (if_exception != nullptr) {
+ DCHECK(var_exception != nullptr);
+ GotoIfException(*key, if_exception, var_exception);
+ }
+
+ *value = UncheckedCast<Object>(
+ GetProperty(context, maybe_array, isolate()->factory()->one_string()));
+ if (if_exception != nullptr) {
+ DCHECK(var_exception != nullptr);
+ GotoIfException(*value, if_exception, var_exception);
+ }
+ Goto(&exit);
+ }
+ BIND(&exit);
+}
+
+class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
+ public:
+ explicit CollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : BaseCollectionsAssembler(state) {}
+ protected:
template <typename CollectionType>
Node* AllocateOrderedHashTable();
- Node* AllocateJSCollection(Node* js_map_function);
template <typename IteratorType>
Node* AllocateJSCollectionIterator(Node* context, int map_index,
Node* collection);
-
+ TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
+ TNode<IntPtrT> at_least_space_for);
Node* GetHash(Node* const key);
Node* CallGetHashRaw(Node* const key);
Node* CallGetOrCreateHashRaw(Node* const key);
@@ -152,14 +616,11 @@ Node* CollectionsBuiltinsAssembler::AllocateOrderedHashTable() {
// Allocate the table and add the proper map.
const ElementsKind elements_kind = HOLEY_ELEMENTS;
Node* const length_intptr = IntPtrConstant(kFixedArrayLength);
- Node* const table = AllocateFixedArray(elements_kind, length_intptr);
- CSA_ASSERT(this,
- IntPtrLessThanOrEqual(
- length_intptr, IntPtrConstant(FixedArray::kMaxRegularLength)));
- Heap::RootListIndex map_index = Heap::kOrderedHashTableMapRootIndex;
- // TODO(gsathya): Directly store correct in AllocateFixedArray,
- // instead of overwriting here.
- StoreMapNoWriteBarrier(table, map_index);
+ Node* const fixed_array_map = LoadRoot(
+ static_cast<Heap::RootListIndex>(CollectionType::GetMapRootIndex()));
+ Node* const table =
+ AllocateFixedArray(elements_kind, length_intptr, INTPTR_PARAMETERS,
+ kAllowLargeObjectAllocation, fixed_array_map);
// Initialize the OrderedHashTable fields.
const WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER;
@@ -191,19 +652,6 @@ Node* CollectionsBuiltinsAssembler::AllocateOrderedHashTable() {
return table;
}
-Node* CollectionsBuiltinsAssembler::AllocateJSCollection(
- Node* js_map_function) {
- CSA_ASSERT(this, IsConstructorMap(LoadMap(js_map_function)));
- Node* const initial_map = LoadObjectField(
- js_map_function, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const instance = AllocateJSObjectFromMap(initial_map);
-
- StoreObjectFieldRoot(instance, JSMap::kTableOffset,
- Heap::kUndefinedValueRootIndex);
-
- return instance;
-}
-
template <typename IteratorType>
Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
Node* context, int map_index, Node* collection) {
@@ -222,225 +670,21 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
return iterator;
}
-TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
- const int kIterableArg = 0;
-
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
-
- Node* const iterable = args.GetOptionalArgumentValue(kIterableArg);
- Node* const new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
-
- Label if_target_is_undefined(this, Label::kDeferred);
- GotoIf(IsUndefined(new_target), &if_target_is_undefined);
-
- Node* const native_context = LoadNativeContext(context);
- Node* const js_map_fun =
- LoadContextElement(native_context, Context::JS_MAP_FUN_INDEX);
-
- VARIABLE(var_result, MachineRepresentation::kTagged);
-
- Label init(this), exit(this), if_targetisnotmodified(this),
- if_targetismodified(this);
- Branch(WordEqual(js_map_fun, new_target), &if_targetisnotmodified,
- &if_targetismodified);
-
- BIND(&if_targetisnotmodified);
- {
- Node* const instance = AllocateJSCollection(js_map_fun);
- var_result.Bind(instance);
- Goto(&init);
- }
-
- BIND(&if_targetismodified);
- {
- ConstructorBuiltinsAssembler constructor_assembler(this->state());
- Node* const instance = constructor_assembler.EmitFastNewObject(
- context, js_map_fun, new_target);
- var_result.Bind(instance);
- Goto(&init);
- }
-
- BIND(&init);
- Node* table = AllocateOrderedHashTable<OrderedHashMap>();
- StoreObjectField(var_result.value(), JSMap::kTableOffset, table);
-
- GotoIf(Word32Or(IsUndefined(iterable), IsNull(iterable)), &exit);
-
- Label if_notcallable(this);
- // TODO(gsathya): Add fast path for unmodified maps.
- Node* const adder = GetProperty(context, var_result.value(),
- isolate()->factory()->set_string());
- GotoIf(TaggedIsSmi(adder), &if_notcallable);
- GotoIfNot(IsCallable(adder), &if_notcallable);
-
- IteratorBuiltinsAssembler iterator_assembler(this->state());
- Node* const iterator = iterator_assembler.GetIterator(context, iterable);
- GotoIf(IsUndefined(iterator), &exit);
-
- Node* const fast_iterator_result_map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
-
- VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
-
- Label loop(this), if_notobject(this), if_exception(this);
- Goto(&loop);
-
- BIND(&loop);
- {
- Node* const next = iterator_assembler.IteratorStep(
- context, iterator, &exit, fast_iterator_result_map);
-
- Node* const next_value = iterator_assembler.IteratorValue(
- context, next, fast_iterator_result_map);
-
- GotoIf(TaggedIsSmi(next_value), &if_notobject);
- GotoIfNot(IsJSReceiver(next_value), &if_notobject);
-
- Node* const k =
- GetProperty(context, next_value, isolate()->factory()->zero_string());
- GotoIfException(k, &if_exception, &var_exception);
-
- Node* const v =
- GetProperty(context, next_value, isolate()->factory()->one_string());
- GotoIfException(v, &if_exception, &var_exception);
-
- Node* add_call = CallJS(CodeFactory::Call(isolate()), context, adder,
- var_result.value(), k, v);
- GotoIfException(add_call, &if_exception, &var_exception);
- Goto(&loop);
-
- BIND(&if_notobject);
- {
- Node* ret = CallRuntime(
- Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kIteratorValueNotAnObject), next_value);
- GotoIfException(ret, &if_exception, &var_exception);
- Unreachable();
- }
- }
-
- BIND(&if_exception);
- {
- iterator_assembler.IteratorCloseOnException(context, iterator,
- &var_exception);
- }
-
- BIND(&if_notcallable);
- {
- Node* const receiver_str = HeapConstant(isolate()->factory()->add_string());
- ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, adder,
- receiver_str, var_result.value());
- }
-
- BIND(&if_target_is_undefined);
- ThrowTypeError(context, MessageTemplate::kConstructorNotFunction,
- HeapConstant(isolate()->factory()->Map_string()));
+TNode<Object> CollectionsBuiltinsAssembler::AllocateTable(
+ Variant variant, TNode<Context> context,
+ TNode<IntPtrT> at_least_space_for) {
+ return CAST(variant == kMap ? AllocateOrderedHashTable<OrderedHashMap>()
+ : AllocateOrderedHashTable<OrderedHashSet>());
+}
- BIND(&exit);
- args.PopAndReturn(var_result.value());
+TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
+ GenerateConstructor(kMap, Context::JS_MAP_FUN_INDEX,
+ isolate()->factory()->Map_string(), JSMap::kTableOffset);
}
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
- const int kIterableArg = 0;
-
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
-
- Node* const iterable = args.GetOptionalArgumentValue(kIterableArg);
- Node* const new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
-
- Label if_target_is_undefined(this, Label::kDeferred);
- GotoIf(IsUndefined(new_target), &if_target_is_undefined);
-
- Node* const native_context = LoadNativeContext(context);
- Node* const js_set_fun =
- LoadContextElement(native_context, Context::JS_SET_FUN_INDEX);
-
- VARIABLE(var_result, MachineRepresentation::kTagged);
-
- Label init(this), exit(this), if_targetisnotmodified(this),
- if_targetismodified(this);
- Branch(WordEqual(js_set_fun, new_target), &if_targetisnotmodified,
- &if_targetismodified);
-
- BIND(&if_targetisnotmodified);
- {
- Node* const instance = AllocateJSCollection(js_set_fun);
- var_result.Bind(instance);
- Goto(&init);
- }
-
- BIND(&if_targetismodified);
- {
- ConstructorBuiltinsAssembler constructor_assembler(this->state());
- Node* const instance = constructor_assembler.EmitFastNewObject(
- context, js_set_fun, new_target);
- var_result.Bind(instance);
- Goto(&init);
- }
-
- BIND(&init);
- Node* table = AllocateOrderedHashTable<OrderedHashSet>();
- StoreObjectField(var_result.value(), JSSet::kTableOffset, table);
-
- GotoIf(Word32Or(IsUndefined(iterable), IsNull(iterable)), &exit);
-
- Label if_notcallable(this);
- // TODO(gsathya): Add fast path for unmodified maps.
- Node* const adder = GetProperty(context, var_result.value(),
- isolate()->factory()->add_string());
- GotoIf(TaggedIsSmi(adder), &if_notcallable);
- GotoIfNot(IsCallable(adder), &if_notcallable);
-
- IteratorBuiltinsAssembler iterator_assembler(this->state());
- Node* const iterator = iterator_assembler.GetIterator(context, iterable);
- GotoIf(IsUndefined(iterator), &exit);
-
- Node* const fast_iterator_result_map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
-
- VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
-
- Label loop(this), if_notobject(this), if_exception(this);
- Goto(&loop);
-
- BIND(&loop);
- {
- Node* const next = iterator_assembler.IteratorStep(
- context, iterator, &exit, fast_iterator_result_map);
-
- Node* const next_value = iterator_assembler.IteratorValue(
- context, next, fast_iterator_result_map);
-
- Node* add_call = CallJS(CodeFactory::Call(isolate()), context, adder,
- var_result.value(), next_value);
-
- GotoIfException(add_call, &if_exception, &var_exception);
- Goto(&loop);
- }
-
- BIND(&if_exception);
- {
- iterator_assembler.IteratorCloseOnException(context, iterator,
- &var_exception);
- }
-
- BIND(&if_notcallable);
- ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, adder,
- HeapConstant(isolate()->factory()->add_string()),
- var_result.value());
-
- BIND(&if_target_is_undefined);
- ThrowTypeError(context, MessageTemplate::kConstructorNotFunction,
- HeapConstant(isolate()->factory()->Set_string()));
-
- BIND(&exit);
- args.PopAndReturn(var_result.value());
+ GenerateConstructor(kSet, Context::JS_SET_FUN_INDEX,
+ isolate()->factory()->Set_string(), JSSet::kTableOffset);
}
Node* CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(Node* const key) {
@@ -473,30 +717,24 @@ Node* CollectionsBuiltinsAssembler::CallGetHashRaw(Node* const key) {
}
Node* CollectionsBuiltinsAssembler::GetHash(Node* const key) {
- VARIABLE(var_result, MachineType::PointerRepresentation());
- Label if_jsobject(this), other(this), done(this);
- Node* instance_type = LoadMapInstanceType(LoadMap(key));
- Branch(IsJSObjectInstanceType(instance_type), &if_jsobject, &other);
+ VARIABLE(var_hash, MachineType::PointerRepresentation());
+ Label if_receiver(this), if_other(this), done(this);
+ Branch(IsJSReceiver(key), &if_receiver, &if_other);
- BIND(&if_jsobject);
+ BIND(&if_receiver);
{
- Node* hash = LoadHashForJSObject(key, instance_type);
- // TODO(gsathya): Change all uses of -1 to PropertyArray::kNoHashSentinel.
- var_result.Bind(SelectConstant(
- Word32Equal(hash, Int32Constant(PropertyArray::kNoHashSentinel)),
- IntPtrConstant(-1), ChangeInt32ToIntPtr(hash),
- MachineType::PointerRepresentation()));
+ var_hash.Bind(LoadJSReceiverIdentityHash(key));
Goto(&done);
}
- BIND(&other);
+ BIND(&if_other);
{
- var_result.Bind(CallGetHashRaw(key));
+ var_hash.Bind(CallGetHashRaw(key));
Goto(&done);
}
BIND(&done);
- return var_result.value();
+ return var_hash.value();
}
void CollectionsBuiltinsAssembler::SameValueZeroSmi(Node* key_smi,
@@ -591,6 +829,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
Node* context, Node* table, Node* key, Variable* result, Label* entry_found,
Label* not_found) {
Node* hash = GetHash(key);
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
result->Bind(hash);
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -641,8 +880,8 @@ void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key,
GotoIf(TaggedIsSmi(candidate_key), if_not_same);
GotoIfNot(IsBigInt(candidate_key), if_not_same);
- Branch(WordEqual(CallRuntime(Runtime::kBigIntEqual, NoContextConstant(), key,
- candidate_key),
+ Branch(WordEqual(CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), key, candidate_key),
TrueConstant()),
if_same, if_not_same);
}
@@ -755,7 +994,7 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
Label return_index(this), return_zero(this);
// Check if we need to update the {index}.
- GotoIfNot(SmiLessThan(SmiConstant(Smi::kZero), index), &return_zero);
+ GotoIfNot(SmiLessThan(SmiConstant(0), index), &return_zero);
// Check if the {table} was cleared.
Node* number_of_deleted_elements = LoadAndUntagObjectField(
@@ -784,7 +1023,7 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
Return(var_index.value());
BIND(&return_zero);
- Return(SmiConstant(Smi::kZero));
+ Return(SmiConstant(0));
}
template <typename TableType>
@@ -973,8 +1212,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
BIND(&not_found);
{
// If we have a hash code, we can start adding the new entry.
- GotoIf(IntPtrGreaterThanOrEqual(entry_start_position_or_hash.value(),
- IntPtrConstant(0)),
+ GotoIf(IntPtrGreaterThan(entry_start_position_or_hash.value(),
+ IntPtrConstant(0)),
&add_entry);
// Otherwise, go to runtime to compute the hash code.
@@ -1139,8 +1378,8 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
BIND(&not_found);
{
// If we have a hash code, we can start adding the new entry.
- GotoIf(IntPtrGreaterThanOrEqual(entry_start_position_or_hash.value(),
- IntPtrConstant(0)),
+ GotoIf(IntPtrGreaterThan(entry_start_position_or_hash.value(),
+ IntPtrConstant(0)),
&add_entry);
// Otherwise, go to runtime to compute the hash code.
@@ -1438,7 +1677,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
BIND(&return_end);
{
StoreObjectFieldRoot(receiver, JSMapIterator::kTableOffset,
- Heap::kEmptyOrderedHashTableRootIndex);
+ Heap::kEmptyOrderedHashMapRootIndex);
Goto(&return_value);
}
}
@@ -1645,7 +1884,7 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
BIND(&return_end);
{
StoreObjectFieldRoot(receiver, JSSetIterator::kTableOffset,
- Heap::kEmptyOrderedHashTableRootIndex);
+ Heap::kEmptyOrderedHashSetRootIndex);
Goto(&return_value);
}
}
@@ -1713,57 +1952,307 @@ TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) {
Return(SmiConstant(-1));
}
-TF_BUILTIN(WeakMapLookupHashIndex, CollectionsBuiltinsAssembler) {
- Node* const table = Parameter(Descriptor::kTable);
- Node* const key = Parameter(Descriptor::kKey);
+class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
+ public:
+ explicit WeakCollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : BaseCollectionsAssembler(state) {}
- Label if_found(this), if_not_found(this);
+ protected:
+ void AddEntry(TNode<Object> table, TNode<IntPtrT> key_index,
+ TNode<Object> key, TNode<Object> value,
+ TNode<IntPtrT> number_of_elements);
+
+ TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
+ TNode<IntPtrT> at_least_space_for);
+
+ // Generates and sets the identity for a JSRececiver.
+ TNode<Smi> CreateIdentityHash(TNode<Object> receiver);
+ TNode<IntPtrT> EntryMask(TNode<IntPtrT> capacity);
+
+ // Builds code that finds the ObjectHashTable entry for a {key} using the
+ // comparison code generated by {key_compare}. The key index is returned if
+ // the {key} is found.
+ typedef std::function<void(TNode<Object> entry_key, Label* if_same)>
+ KeyComparator;
+ TNode<IntPtrT> FindKeyIndex(TNode<Object> table, TNode<IntPtrT> key_hash,
+ TNode<IntPtrT> entry_mask,
+ const KeyComparator& key_compare);
+
+ // Builds code that finds an ObjectHashTable entry available for a new entry.
+ TNode<IntPtrT> FindKeyIndexForInsertion(TNode<Object> table,
+ TNode<IntPtrT> key_hash,
+ TNode<IntPtrT> entry_mask);
+
+ // Builds code that finds the ObjectHashTable entry with key that matches
+ // {key} and returns the entry's key index. If {key} cannot be found, jumps to
+ // {if_not_found}.
+ TNode<IntPtrT> FindKeyIndexForKey(TNode<Object> table, TNode<Object> key,
+ TNode<IntPtrT> hash,
+ TNode<IntPtrT> entry_mask,
+ Label* if_not_found);
+
+ TNode<Word32T> InsufficientCapacityToAdd(TNode<IntPtrT> capacity,
+ TNode<IntPtrT> number_of_elements,
+ TNode<IntPtrT> number_of_deleted);
+ TNode<IntPtrT> KeyIndexFromEntry(TNode<IntPtrT> entry);
+
+ TNode<IntPtrT> LoadNumberOfElements(TNode<Object> table, int offset);
+ TNode<IntPtrT> LoadNumberOfDeleted(TNode<Object> table, int offset = 0);
+ TNode<Object> LoadTable(SloppyTNode<Object> collection);
+ TNode<IntPtrT> LoadTableCapacity(TNode<Object> table);
+
+ void RemoveEntry(TNode<Object> table, TNode<IntPtrT> key_index,
+ TNode<IntPtrT> number_of_elements);
+ TNode<BoolT> ShouldRehash(TNode<IntPtrT> number_of_elements,
+ TNode<IntPtrT> number_of_deleted);
+ TNode<Word32T> ShouldShrink(TNode<IntPtrT> capacity,
+ TNode<IntPtrT> number_of_elements);
+ TNode<IntPtrT> ValueIndexFromKeyIndex(TNode<IntPtrT> key_index);
+};
+
+void WeakCollectionsBuiltinsAssembler::AddEntry(
+ TNode<Object> table, TNode<IntPtrT> key_index, TNode<Object> key,
+ TNode<Object> value, TNode<IntPtrT> number_of_elements) {
+ // See ObjectHashTable::AddEntry().
+ TNode<IntPtrT> value_index = ValueIndexFromKeyIndex(key_index);
+ StoreFixedArrayElement(table, key_index, key);
+ StoreFixedArrayElement(table, value_index, value);
+
+ // See HashTableBase::ElementAdded().
+ StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
+ SmiFromWord(number_of_elements), SKIP_WRITE_BARRIER);
+}
+
+TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
+ Variant variant, TNode<Context> context,
+ TNode<IntPtrT> at_least_space_for) {
+ // See HashTable::New().
+ CSA_ASSERT(this,
+ IntPtrLessThanOrEqual(IntPtrConstant(0), at_least_space_for));
+ TNode<IntPtrT> capacity = HashTableComputeCapacity(at_least_space_for);
+
+ // See HashTable::NewInternal().
+ TNode<IntPtrT> length = KeyIndexFromEntry(capacity);
+ TNode<Object> table = CAST(AllocateFixedArray(
+ HOLEY_ELEMENTS, length, INTPTR_PARAMETERS, kAllowLargeObjectAllocation));
+
+ Heap::RootListIndex map_root_index =
+ static_cast<Heap::RootListIndex>(ObjectHashTableShape::GetMapRootIndex());
+ StoreMapNoWriteBarrier(table, map_root_index);
+ StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
+ SmiConstant(0), SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
+ SmiConstant(0), SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(table, ObjectHashTable::kCapacityIndex,
+ SmiFromWord(capacity), SKIP_WRITE_BARRIER);
+
+ TNode<IntPtrT> start = KeyIndexFromEntry(IntPtrConstant(0));
+ FillFixedArrayWithValue(HOLEY_ELEMENTS, table, start, length,
+ Heap::kUndefinedValueRootIndex);
+ return table;
+}
- Node* const capacity =
- SmiUntag(LoadFixedArrayElement(table, WeakHashTable::kCapacityIndex));
- Node* const mask = IntPtrSub(capacity, IntPtrConstant(1));
+TNode<Smi> WeakCollectionsBuiltinsAssembler::CreateIdentityHash(
+ TNode<Object> key) {
+ TNode<ExternalReference> function_addr = ExternalConstant(
+ ExternalReference::jsreceiver_create_identity_hash(isolate()));
+ TNode<ExternalReference> isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+
+ MachineType type_ptr = MachineType::Pointer();
+ MachineType type_tagged = MachineType::AnyTagged();
- Node* const hash = GetHash(key);
+ return CAST(CallCFunction2(type_tagged, type_ptr, type_tagged, function_addr,
+ isolate_ptr, key));
+}
- GotoIf(IntPtrLessThan(hash, IntPtrConstant(0)), &if_not_found);
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::EntryMask(
+ TNode<IntPtrT> capacity) {
+ return IntPtrSub(capacity, IntPtrConstant(1));
+}
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
+ TNode<Object> table, TNode<IntPtrT> key_hash, TNode<IntPtrT> entry_mask,
+ const KeyComparator& key_compare) {
// See HashTable::FirstProbe().
- Node* entry = WordAnd(hash, mask);
+ TVARIABLE(IntPtrT, var_entry, WordAnd(key_hash, entry_mask));
+ TVARIABLE(IntPtrT, var_count, IntPtrConstant(0));
- VARIABLE(var_count, MachineType::PointerRepresentation(), IntPtrConstant(0));
- VARIABLE(var_entry, MachineType::PointerRepresentation(), entry);
Variable* loop_vars[] = {&var_count, &var_entry};
- Label loop(this, arraysize(loop_vars), loop_vars);
+ Label loop(this, arraysize(loop_vars), loop_vars), if_found(this);
Goto(&loop);
BIND(&loop);
- Node* index;
+ TNode<IntPtrT> key_index;
{
- Node* entry = var_entry.value();
-
- index = IntPtrMul(entry, IntPtrConstant(WeakHashTable::kEntrySize));
- index =
- IntPtrAdd(index, IntPtrConstant(WeakHashTable::kElementsStartIndex));
+ key_index = KeyIndexFromEntry(var_entry);
+ TNode<Object> entry_key = CAST(LoadFixedArrayElement(table, key_index));
- Node* current = LoadFixedArrayElement(table, index);
- GotoIf(WordEqual(current, UndefinedConstant()), &if_not_found);
- GotoIf(WordEqual(current, key), &if_found);
+ key_compare(entry_key, &if_found);
// See HashTable::NextProbe().
Increment(&var_count);
- entry = WordAnd(IntPtrAdd(entry, var_count.value()), mask);
-
- var_entry.Bind(entry);
+ var_entry = WordAnd(IntPtrAdd(UncheckedCast<IntPtrT>(var_entry),
+ UncheckedCast<IntPtrT>(var_count)),
+ entry_mask);
Goto(&loop);
}
+ BIND(&if_found);
+ return key_index;
+}
+
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndexForInsertion(
+ TNode<Object> table, TNode<IntPtrT> key_hash, TNode<IntPtrT> entry_mask) {
+ // See HashTable::FindInsertionEntry().
+ auto is_not_live = [&](TNode<Object> entry_key, Label* if_found) {
+ // This is the the negative form BaseShape::IsLive().
+ GotoIf(Word32Or(IsTheHole(entry_key), IsUndefined(entry_key)), if_found);
+ };
+ return FindKeyIndex(table, key_hash, entry_mask, is_not_live);
+}
+
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndexForKey(
+ TNode<Object> table, TNode<Object> key, TNode<IntPtrT> hash,
+ TNode<IntPtrT> entry_mask, Label* if_not_found) {
+ // See HashTable::FindEntry().
+ auto match_key_or_exit_on_empty = [&](TNode<Object> entry_key,
+ Label* if_same) {
+ GotoIf(IsUndefined(entry_key), if_not_found);
+ GotoIf(WordEqual(entry_key, key), if_same);
+ };
+ return FindKeyIndex(table, hash, entry_mask, match_key_or_exit_on_empty);
+}
+
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::KeyIndexFromEntry(
+ TNode<IntPtrT> entry) {
+ // See HashTable::KeyAt().
+ // (entry * kEntrySize) + kElementsStartIndex + kEntryKeyIndex
+ return IntPtrAdd(
+ IntPtrMul(entry, IntPtrConstant(ObjectHashTable::kEntrySize)),
+ IntPtrConstant(ObjectHashTable::kElementsStartIndex +
+ ObjectHashTable::kEntryKeyIndex));
+}
+
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfElements(
+ TNode<Object> table, int offset) {
+ TNode<IntPtrT> number_of_elements = SmiUntag(
+ LoadFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex));
+ return IntPtrAdd(number_of_elements, IntPtrConstant(offset));
+}
+
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfDeleted(
+ TNode<Object> table, int offset) {
+ TNode<IntPtrT> number_of_deleted = SmiUntag(LoadFixedArrayElement(
+ table, ObjectHashTable::kNumberOfDeletedElementsIndex));
+ return IntPtrAdd(number_of_deleted, IntPtrConstant(offset));
+}
+
+TNode<Object> WeakCollectionsBuiltinsAssembler::LoadTable(
+ SloppyTNode<Object> collection) {
+ return LoadObjectField(CAST(collection), JSWeakCollection::kTableOffset);
+}
+
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadTableCapacity(
+ TNode<Object> table) {
+ return SmiUntag(
+ LoadFixedArrayElement(table, ObjectHashTable::kCapacityIndex));
+}
+
+TNode<Word32T> WeakCollectionsBuiltinsAssembler::InsufficientCapacityToAdd(
+ TNode<IntPtrT> capacity, TNode<IntPtrT> number_of_elements,
+ TNode<IntPtrT> number_of_deleted) {
+ // This is the negative form of HashTable::HasSufficientCapacityToAdd().
+ // Return true if:
+ // - more than 50% of the available space are deleted elements
+ // - less than 50% will be available
+ TNode<IntPtrT> available = IntPtrSub(capacity, number_of_elements);
+ TNode<IntPtrT> half_available = WordShr(available, 1);
+ TNode<IntPtrT> needed_available = WordShr(number_of_elements, 1);
+ return Word32Or(
+ // deleted > half
+ IntPtrGreaterThan(number_of_deleted, half_available),
+ // elements + needed available > capacity
+ IntPtrGreaterThan(IntPtrAdd(number_of_elements, needed_available),
+ capacity));
+}
+
+void WeakCollectionsBuiltinsAssembler::RemoveEntry(
+ TNode<Object> table, TNode<IntPtrT> key_index,
+ TNode<IntPtrT> number_of_elements) {
+ // See ObjectHashTable::RemoveEntry().
+ TNode<IntPtrT> value_index = ValueIndexFromKeyIndex(key_index);
+ StoreFixedArrayElement(table, key_index, TheHoleConstant());
+ StoreFixedArrayElement(table, value_index, TheHoleConstant());
+
+ // See HashTableBase::ElementRemoved().
+ TNode<IntPtrT> number_of_deleted = LoadNumberOfDeleted(table, 1);
+ StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
+ SmiFromWord(number_of_elements), SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
+ SmiFromWord(number_of_deleted), SKIP_WRITE_BARRIER);
+}
+
+TNode<BoolT> WeakCollectionsBuiltinsAssembler::ShouldRehash(
+ TNode<IntPtrT> number_of_elements, TNode<IntPtrT> number_of_deleted) {
+ // Rehash if more than 33% of the entries are deleted.
+ return IntPtrGreaterThanOrEqual(WordShl(number_of_deleted, 1),
+ number_of_elements);
+}
+
+TNode<Word32T> WeakCollectionsBuiltinsAssembler::ShouldShrink(
+ TNode<IntPtrT> capacity, TNode<IntPtrT> number_of_elements) {
+ // See HashTable::Shrink().
+ TNode<IntPtrT> quarter_capacity = WordShr(capacity, 2);
+ return Word32And(
+ // Shrink to fit the number of elements if only a quarter of the
+ // capacity is filled with elements.
+ IntPtrLessThanOrEqual(number_of_elements, quarter_capacity),
+
+ // Allocate a new dictionary with room for at least the current
+ // number of elements. The allocation method will make sure that
+ // there is extra room in the dictionary for additions. Don't go
+ // lower than room for 16 elements.
+ IntPtrGreaterThanOrEqual(number_of_elements, IntPtrConstant(16)));
+}
+
+TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
+ TNode<IntPtrT> key_index) {
+ return IntPtrAdd(key_index,
+ IntPtrConstant(ObjectHashTableShape::kEntryValueIndex -
+ ObjectHashTable::kEntryKeyIndex));
+}
+
+TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
+ GenerateConstructor(kMap, Context::JS_WEAK_MAP_FUN_INDEX,
+ isolate()->factory()->WeakMap_string(),
+ JSWeakMap::kTableOffset);
+}
+
+TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
+ GenerateConstructor(kSet, Context::JS_WEAK_SET_FUN_INDEX,
+ isolate()->factory()->WeakSet_string(),
+ JSWeakSet::kTableOffset);
+}
+
+TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
+ TNode<Object> table = CAST(Parameter(Descriptor::kTable));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+
+ Label if_not_found(this);
+
+ GotoIfNotJSReceiver(key, &if_not_found);
+
+ TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(key, &if_not_found);
+ TNode<IntPtrT> capacity = LoadTableCapacity(table);
+ TNode<IntPtrT> key_index =
+ FindKeyIndexForKey(table, key, hash, EntryMask(capacity), &if_not_found);
+ Return(SmiTag(ValueIndexFromKeyIndex(key_index)));
+
BIND(&if_not_found);
Return(SmiConstant(-1));
-
- BIND(&if_found);
- Return(SmiTag(Signed(IntPtrAdd(index, IntPtrConstant(1)))));
}
-TF_BUILTIN(WeakMapGet, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -1773,11 +2262,7 @@ TF_BUILTIN(WeakMapGet, CollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
"WeakMap.prototype.get");
- GotoIf(TaggedIsSmi(key), &return_undefined);
- GotoIfNot(IsJSReceiver(key), &return_undefined);
-
- Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
-
+ Node* const table = LoadTable(receiver);
Node* const index =
CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
@@ -1789,7 +2274,7 @@ TF_BUILTIN(WeakMapGet, CollectionsBuiltinsAssembler) {
Return(UndefinedConstant());
}
-TF_BUILTIN(WeakMapHas, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(WeakMapHas, WeakCollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -1797,13 +2282,9 @@ TF_BUILTIN(WeakMapHas, CollectionsBuiltinsAssembler) {
Label return_false(this);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
- "WeakMap.prototype.get");
-
- GotoIf(TaggedIsSmi(key), &return_false);
- GotoIfNot(IsJSReceiver(key), &return_false);
-
- Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
+ "WeakMap.prototype.has");
+ Node* const table = LoadTable(receiver);
Node* const index =
CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
@@ -1815,7 +2296,149 @@ TF_BUILTIN(WeakMapHas, CollectionsBuiltinsAssembler) {
Return(FalseConstant());
}
-TF_BUILTIN(WeakSetHas, CollectionsBuiltinsAssembler) {
+// Helper that removes the entry with a given key from the backing store
+// (ObjectHashTable) of a WeakMap or WeakSet.
+TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> collection = CAST(Parameter(Descriptor::kCollection));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+
+ Label call_runtime(this), if_not_found(this);
+
+ GotoIfNotJSReceiver(key, &if_not_found);
+
+ TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(key, &if_not_found);
+ TNode<Object> table = LoadTable(collection);
+ TNode<IntPtrT> capacity = LoadTableCapacity(table);
+ TNode<IntPtrT> key_index =
+ FindKeyIndexForKey(table, key, hash, EntryMask(capacity), &if_not_found);
+ TNode<IntPtrT> number_of_elements = LoadNumberOfElements(table, -1);
+ GotoIf(ShouldShrink(capacity, number_of_elements), &call_runtime);
+
+ RemoveEntry(table, key_index, number_of_elements);
+ Return(TrueConstant());
+
+ BIND(&if_not_found);
+ Return(FalseConstant());
+
+ BIND(&call_runtime);
+ Return(CallRuntime(Runtime::kWeakCollectionDelete, context, collection, key,
+ SmiTag(hash)));
+}
+
+// Helper that sets the key and value to the backing store (ObjectHashTable) of
+// a WeakMap or WeakSet.
+TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> collection = CAST(Parameter(Descriptor::kCollection));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+
+ CSA_ASSERT(this, IsJSReceiver(key));
+
+ Label call_runtime(this), if_no_hash(this), if_not_found(this);
+
+ TNode<Object> table = LoadTable(collection);
+ TNode<IntPtrT> capacity = LoadTableCapacity(table);
+ TNode<IntPtrT> entry_mask = EntryMask(capacity);
+
+ TVARIABLE(IntPtrT, var_hash, LoadJSReceiverIdentityHash(key, &if_no_hash));
+ TNode<IntPtrT> key_index =
+ FindKeyIndexForKey(table, key, var_hash, entry_mask, &if_not_found);
+
+ StoreFixedArrayElement(table, ValueIndexFromKeyIndex(key_index), value);
+ Return(collection);
+
+ BIND(&if_no_hash);
+ {
+ var_hash = SmiUntag(CreateIdentityHash(key));
+ Goto(&if_not_found);
+ }
+ BIND(&if_not_found);
+ {
+ TNode<IntPtrT> number_of_deleted = LoadNumberOfDeleted(table);
+ TNode<IntPtrT> number_of_elements = LoadNumberOfElements(table, 1);
+
+ // TODO(pwong): Port HashTable's Rehash() and EnsureCapacity() to CSA.
+ GotoIf(Word32Or(ShouldRehash(number_of_elements, number_of_deleted),
+ InsufficientCapacityToAdd(capacity, number_of_elements,
+ number_of_deleted)),
+ &call_runtime);
+
+ TNode<IntPtrT> insertion_key_index =
+ FindKeyIndexForInsertion(table, var_hash, entry_mask);
+ AddEntry(table, insertion_key_index, key, value, number_of_elements);
+ Return(collection);
+ }
+ BIND(&call_runtime);
+ {
+ CallRuntime(Runtime::kWeakCollectionSet, context, collection, key, value,
+ SmiTag(var_hash));
+ Return(collection);
+ }
+}
+
+TF_BUILTIN(WeakMapPrototypeDelete, CodeStubAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
+ "WeakMap.prototype.delete");
+
+ Return(CallBuiltin(Builtins::kWeakCollectionDelete, context, receiver, key));
+}
+
+TF_BUILTIN(WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
+ "WeakMap.prototype.set");
+
+ Label throw_invalid_key(this);
+ GotoIfNotJSReceiver(key, &throw_invalid_key);
+
+ Return(
+ CallBuiltin(Builtins::kWeakCollectionSet, context, receiver, key, value));
+
+ BIND(&throw_invalid_key);
+ ThrowTypeError(context, MessageTemplate::kInvalidWeakMapKey, key);
+}
+
+TF_BUILTIN(WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
+ "WeakSet.prototype.add");
+
+ Label throw_invalid_value(this);
+ GotoIfNotJSReceiver(value, &throw_invalid_value);
+
+ Return(CallBuiltin(Builtins::kWeakCollectionSet, context, receiver, value,
+ TrueConstant()));
+
+ BIND(&throw_invalid_value);
+ ThrowTypeError(context, MessageTemplate::kInvalidWeakSetValue, value);
+}
+
+TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
+ "WeakSet.prototype.delete");
+
+ Return(
+ CallBuiltin(Builtins::kWeakCollectionDelete, context, receiver, value));
+}
+
+TF_BUILTIN(WeakSetHas, WeakCollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -1823,13 +2446,9 @@ TF_BUILTIN(WeakSetHas, CollectionsBuiltinsAssembler) {
Label return_false(this);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
- "WeakSet.prototype.get");
-
- GotoIf(TaggedIsSmi(key), &return_false);
- GotoIfNot(IsJSReceiver(key), &return_false);
-
- Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
+ "WeakSet.prototype.has");
+ Node* const table = LoadTable(receiver);
Node* const index =
CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index d55f26163c..75ad302d3d 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -35,10 +35,7 @@ namespace internal {
V(Profile, profile) \
V(ProfileEnd, profileEnd) \
V(Timeline, timeline) \
- V(TimelineEnd, timelineEnd) \
- V(Time, time) \
- V(TimeEnd, timeEnd) \
- V(TimeStamp, timeStamp)
+ V(TimelineEnd, timelineEnd)
namespace {
void ConsoleCall(
@@ -63,6 +60,20 @@ void ConsoleCall(
wrapper,
v8::debug::ConsoleContext(context_id, Utils::ToLocal(context_name)));
}
+
+void LogTimerEvent(Isolate* isolate, BuiltinArguments args,
+ Logger::StartEnd se) {
+ if (!isolate->logger()->is_logging()) return;
+ HandleScope scope(isolate);
+ std::unique_ptr<char[]> name;
+ const char* raw_name = "default";
+ if (args.length() > 1 && args[1]->IsString()) {
+ // Try converting the first argument to a string.
+ name = args.at<String>(1)->ToCString();
+ raw_name = name.get();
+ }
+ LOG(isolate, TimerEvent(se, raw_name));
+}
} // namespace
#define CONSOLE_BUILTIN_IMPLEMENTATION(call, name) \
@@ -74,19 +85,42 @@ void ConsoleCall(
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
#undef CONSOLE_BUILTIN_IMPLEMENTATION
+BUILTIN(ConsoleTime) {
+ LogTimerEvent(isolate, args, Logger::START);
+ ConsoleCall(isolate, args, &debug::ConsoleDelegate::Time);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ return isolate->heap()->undefined_value();
+}
+
+BUILTIN(ConsoleTimeEnd) {
+ LogTimerEvent(isolate, args, Logger::END);
+ ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeEnd);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ return isolate->heap()->undefined_value();
+}
+
+BUILTIN(ConsoleTimeStamp) {
+ LogTimerEvent(isolate, args, Logger::STAMP);
+ ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeStamp);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ return isolate->heap()->undefined_value();
+}
+
namespace {
void InstallContextFunction(Handle<JSObject> target, const char* name,
- Builtins::Name call, int context_id,
+ Builtins::Name builtin_id, int context_id,
Handle<Object> context_name) {
Factory* const factory = target->GetIsolate()->factory();
- Handle<Code> call_code(target->GetIsolate()->builtins()->builtin(call));
+ Handle<Code> code(target->GetIsolate()->builtins()->builtin(builtin_id));
Handle<String> name_string =
Name::ToFunctionName(factory->InternalizeUtf8String(name))
.ToHandleChecked();
- Handle<JSFunction> fun =
- factory->NewFunctionWithoutPrototype(name_string, call_code, SLOPPY);
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
+ name_string, code, builtin_id, i::LanguageMode::kSloppy);
+ Handle<JSFunction> fun = factory->NewFunction(args);
+
fun->shared()->set_native(true);
fun->shared()->DontAdaptArguments();
fun->shared()->set_length(1);
@@ -107,9 +141,13 @@ BUILTIN(ConsoleContext) {
Factory* const factory = isolate->factory();
Handle<String> name = factory->InternalizeUtf8String("Context");
- Handle<JSFunction> cons = factory->NewFunction(name);
- Handle<JSObject> empty = factory->NewJSObject(isolate->object_function());
- JSFunction::SetPrototype(cons, empty);
+ NewFunctionArgs arguments = NewFunctionArgs::ForFunctionWithoutCode(
+ name, isolate->sloppy_function_map(), LanguageMode::kSloppy);
+ Handle<JSFunction> cons = factory->NewFunction(arguments);
+
+ Handle<JSObject> prototype = factory->NewJSObject(isolate->object_function());
+ JSFunction::SetPrototype(cons, prototype);
+
Handle<JSObject> context = factory->NewJSObject(cons, TENURED);
DCHECK(context->IsJSObject());
int id = isolate->last_console_context_id() + 1;
@@ -120,6 +158,12 @@ BUILTIN(ConsoleContext) {
args.at(1));
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_SETUP)
#undef CONSOLE_BUILTIN_SETUP
+ InstallContextFunction(context, "time", Builtins::kConsoleTime, id,
+ args.at(1));
+ InstallContextFunction(context, "timeEnd", Builtins::kConsoleTimeEnd, id,
+ args.at(1));
+ InstallContextFunction(context, "timeStamp", Builtins::kConsoleTimeStamp, id,
+ args.at(1));
return *context;
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 67a87271c2..2722f7b7a7 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -55,38 +55,6 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
typedef compiler::Node Node;
-Node* ConstructorBuiltinsAssembler::CopyFixedArrayBase(Node* fixed_array) {
- Label if_fixed_array(this), if_fixed_double_array(this), done(this);
- VARIABLE(result, MachineRepresentation::kTagged);
- Node* capacity = LoadAndUntagFixedArrayBaseLength(fixed_array);
- Branch(IsFixedDoubleArrayMap(LoadMap(fixed_array)), &if_fixed_double_array,
- &if_fixed_array);
- BIND(&if_fixed_double_array);
- {
- ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
- Node* copy = AllocateFixedArray(kind, capacity);
- CopyFixedArrayElements(kind, fixed_array, kind, copy, capacity, capacity,
- SKIP_WRITE_BARRIER);
- result.Bind(copy);
- Goto(&done);
- }
-
- BIND(&if_fixed_array);
- {
- ElementsKind kind = PACKED_ELEMENTS;
- Node* copy = AllocateFixedArray(kind, capacity);
- CopyFixedArrayElements(kind, fixed_array, kind, copy, capacity, capacity,
- UPDATE_WRITE_BARRIER);
- result.Bind(copy);
- Goto(&done);
- }
- BIND(&done);
- // Manually copy over the map of the incoming array to preserve the elements
- // kind.
- StoreMap(result.value(), LoadMap(fixed_array));
- return result.value();
-}
-
Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
Node* feedback_vector,
Node* slot,
@@ -116,11 +84,12 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
// Create a new closure from the given function info in new space
Node* instance_size_in_bytes =
- TimesPointerSize(LoadMapInstanceSize(function_map));
+ TimesPointerSize(LoadMapInstanceSizeInWords(function_map));
Node* result = Allocate(instance_size_in_bytes);
StoreMapNoWriteBarrier(result, function_map);
- InitializeJSObjectBody(result, function_map, instance_size_in_bytes,
- JSFunction::kSize);
+ InitializeJSObjectBodyNoSlackTracking(result, function_map,
+ instance_size_in_bytes,
+ JSFunction::kSizeWithoutPrototype);
// Initialize the rest of the function.
Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
@@ -128,6 +97,20 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
empty_fixed_array);
StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
empty_fixed_array);
+ {
+ // Set function prototype if necessary.
+ Label done(this), init_prototype(this);
+ Branch(IsFunctionWithPrototypeSlotMap(function_map), &init_prototype,
+ &done);
+
+ BIND(&init_prototype);
+ StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
+ Goto(&done);
+
+ BIND(&done);
+ }
+
Node* literals_cell = LoadFeedbackVectorSlot(
feedback_vector, slot, 0, CodeStubAssembler::SMI_PARAMETERS);
{
@@ -153,8 +136,6 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
}
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
literals_cell);
- StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
@@ -256,12 +237,8 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
}
BIND(&instantiate_map);
-
- Node* object = AllocateJSObjectFromMap(initial_map, properties.value());
-
- // Perform in-object slack tracking if requested.
- HandleSlackTracking(context, object, initial_map, JSObject::kHeaderSize);
- return object;
+ return AllocateJSObjectFromMap(initial_map, properties.value(), nullptr,
+ kNone, kWithSlackTracking);
}
Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
@@ -378,28 +355,6 @@ TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
Return(result);
}
-Node* ConstructorBuiltinsAssembler::NonEmptyShallowClone(
- Node* boilerplate, Node* boilerplate_map, Node* boilerplate_elements,
- Node* allocation_site, Node* capacity, ElementsKind kind) {
- ParameterMode param_mode = OptimalParameterMode();
-
- Node* length = LoadJSArrayLength(boilerplate);
- capacity = TaggedToParameter(capacity, param_mode);
-
- Node *array, *elements;
- std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- kind, boilerplate_map, length, allocation_site, capacity, param_mode);
-
- length = TaggedToParameter(length, param_mode);
-
- Comment("copy boilerplate elements");
- CopyFixedArrayElements(kind, boilerplate_elements, elements, length,
- SKIP_WRITE_BARRIER, param_mode);
- IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);
-
- return array;
-}
-
Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
Node* feedback_vector, Node* slot, Node* context, Label* call_runtime,
AllocationSiteMode allocation_site_mode) {
@@ -412,73 +367,12 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
- Node* boilerplate_map = LoadMap(boilerplate);
- CSA_ASSERT(this, IsJSArrayMap(boilerplate_map));
- Node* boilerplate_elements = LoadElements(boilerplate);
- Node* capacity = LoadFixedArrayBaseLength(boilerplate_elements);
allocation_site =
allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
- Node* zero = SmiConstant(0);
- GotoIf(SmiEqual(capacity, zero), &zero_capacity);
-
- Node* elements_map = LoadMap(boilerplate_elements);
- GotoIf(IsFixedCOWArrayMap(elements_map), &cow_elements);
-
- GotoIf(IsFixedArrayMap(elements_map), &fast_elements);
- {
- Comment("fast double elements path");
- if (FLAG_debug_code) CSA_CHECK(this, IsFixedDoubleArrayMap(elements_map));
- Node* array =
- NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
- allocation_site, capacity, PACKED_DOUBLE_ELEMENTS);
- result.Bind(array);
- Goto(&return_result);
- }
-
- BIND(&fast_elements);
- {
- Comment("fast elements path");
- Node* array =
- NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
- allocation_site, capacity, PACKED_ELEMENTS);
- result.Bind(array);
- Goto(&return_result);
- }
-
- VARIABLE(length, MachineRepresentation::kTagged);
- VARIABLE(elements, MachineRepresentation::kTagged);
- Label allocate_without_elements(this);
-
- BIND(&cow_elements);
- {
- Comment("fixed cow path");
- length.Bind(LoadJSArrayLength(boilerplate));
- elements.Bind(boilerplate_elements);
-
- Goto(&allocate_without_elements);
- }
-
- BIND(&zero_capacity);
- {
- Comment("zero capacity path");
- length.Bind(zero);
- elements.Bind(LoadRoot(Heap::kEmptyFixedArrayRootIndex));
-
- Goto(&allocate_without_elements);
- }
-
- BIND(&allocate_without_elements);
- {
- Node* array = AllocateUninitializedJSArrayWithoutElements(
- boilerplate_map, length.value(), allocation_site);
- StoreObjectField(array, JSObject::kElementsOffset, elements.value());
- result.Bind(array);
- Goto(&return_result);
- }
-
- BIND(&return_result);
- return result.value();
+ CSA_ASSERT(this, IsJSArrayMap(LoadMap(boilerplate)));
+ ParameterMode mode = OptimalParameterMode();
+ return CloneFastJSArray(context, boilerplate, mode, allocation_site);
}
TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
@@ -602,7 +496,11 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
BIND(&if_copy_elements);
CSA_ASSERT(this, Word32BinaryNot(
IsFixedCOWArrayMap(LoadMap(boilerplate_elements))));
- var_elements.Bind(CopyFixedArrayBase(boilerplate_elements));
+ ExtractFixedArrayFlags flags;
+ flags |= ExtractFixedArrayFlag::kAllFixedArrays;
+ flags |= ExtractFixedArrayFlag::kNewSpaceAllocationOnly;
+ flags |= ExtractFixedArrayFlag::kDontCopyCOW;
+ var_elements.Bind(CloneFixedArray(boilerplate_elements, flags));
Goto(&done);
BIND(&done);
}
@@ -610,7 +508,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
// Ensure new-space allocation for a fresh JSObject so we can skip write
// barriers when copying all object fields.
STATIC_ASSERT(JSObject::kMaxInstanceSize < kMaxRegularHeapObjectSize);
- Node* instance_size = TimesPointerSize(LoadMapInstanceSize(boilerplate_map));
+ Node* instance_size =
+ TimesPointerSize(LoadMapInstanceSizeInWords(boilerplate_map));
Node* allocation_size = instance_size;
bool needs_allocation_memento = FLAG_allocation_site_pretenuring;
if (needs_allocation_memento) {
@@ -800,7 +699,8 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_zero);
Node* context = Parameter(BuiltinDescriptor::kContext);
- args.PopAndReturn(ToNumber(context, args.AtIndex(0)));
+ args.PopAndReturn(
+ ToNumber(context, args.AtIndex(0), BigIntHandling::kConvertToNumber));
BIND(&return_zero);
args.PopAndReturn(SmiConstant(0));
@@ -816,28 +716,19 @@ TF_BUILTIN(NumberConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Label return_zero(this), wrap(this);
+ Label wrap(this);
- VARIABLE(var_result, MachineRepresentation::kTagged);
-
- GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_zero);
- {
- var_result.Bind(ToNumber(context, args.AtIndex(0)));
- Goto(&wrap);
- }
+ VARIABLE(var_result, MachineRepresentation::kTagged, SmiConstant(0));
- BIND(&return_zero);
- {
- var_result.Bind(SmiConstant(0));
- Goto(&wrap);
- }
+ GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &wrap);
+ var_result.Bind(
+ ToNumber(context, args.AtIndex(0), BigIntHandling::kConvertToNumber));
+ Goto(&wrap);
BIND(&wrap);
- {
- Node* result = EmitFastNewObject(context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, var_result.value());
- args.PopAndReturn(result);
- }
+ Node* result = EmitFastNewObject(context, target, new_target);
+ StoreObjectField(result, JSValue::kValueOffset, var_result.value());
+ args.PopAndReturn(result);
}
Node* ConstructorBuiltinsAssembler::EmitConstructString(Node* argc,
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index b889100148..ac13dcbb6d 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -42,11 +42,6 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
bool convert_symbol);
private:
- Node* NonEmptyShallowClone(Node* boilerplate, Node* boilerplate_map,
- Node* boilerplate_elements, Node* allocation_site,
- Node* capacity, ElementsKind kind);
- Node* CopyFixedArrayBase(Node* elements);
-
Node* NotHasBoilerplate(Node* literal_site);
Node* LoadAllocationSiteBoilerplate(Node* allocation_site);
};
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index c61ea70cf4..823e6ca937 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -33,9 +33,7 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
// Check if {exotic_to_prim} is neither null nor undefined.
Label ordinary_to_primitive(this);
- GotoIf(WordEqual(exotic_to_prim, NullConstant()), &ordinary_to_primitive);
- GotoIf(WordEqual(exotic_to_prim, UndefinedConstant()),
- &ordinary_to_primitive);
+ GotoIf(IsNullOrUndefined(exotic_to_prim), &ordinary_to_primitive);
{
// Invoke the {exotic_to_prim} method on the {input} with a string
// representation of the {hint}.
@@ -121,6 +119,22 @@ TF_BUILTIN(NonNumberToNumber, CodeStubAssembler) {
Return(NonNumberToNumber(context, input));
}
+TF_BUILTIN(NonNumberToNumeric, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* input = Parameter(Descriptor::kArgument);
+
+ Return(NonNumberToNumeric(context, input));
+}
+
+TF_BUILTIN(ToNumeric, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* input = Parameter(Descriptor::kArgument);
+
+ Return(Select(IsNumber(input), [=] { return input; },
+ [=] { return NonNumberToNumeric(context, input); },
+ MachineRepresentation::kTagged));
+}
+
// ES6 section 7.1.3 ToNumber ( argument )
TF_BUILTIN(ToNumber, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -129,6 +143,15 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
Return(ToNumber(context, input));
}
+// ES section #sec-tostring-applied-to-the-number-type
+TF_BUILTIN(NumberToString, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* input = Parameter(Descriptor::kArgument);
+
+ Return(NumberToString(context, input));
+}
+
+// ES section #sec-tostring
TF_BUILTIN(ToString, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
@@ -215,10 +238,10 @@ TF_BUILTIN(ToBoolean, CodeStubAssembler) {
BranchIfToBooleanIsTrue(value, &return_true, &return_false);
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
// ES6 section 7.1.2 ToBoolean ( argument )
@@ -231,10 +254,10 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
BranchIfToBooleanIsTrue(value, &return_true, &return_false);
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
TF_BUILTIN(ToLength, CodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 579d537b73..f6f3563d55 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -29,8 +29,8 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
GotoIf(TaggedIsSmi(receiver), &receiver_not_date);
Node* receiver_instance_type = LoadInstanceType(receiver);
- GotoIf(Word32NotEqual(receiver_instance_type, Int32Constant(JS_DATE_TYPE)),
- &receiver_not_date);
+ GotoIfNot(InstanceTypeEqual(receiver_instance_type, JS_DATE_TYPE),
+ &receiver_not_date);
// Load the specified date field, falling back to the runtime as necessary.
if (field_index == JSDate::kDateValue) {
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index c46a44d0d3..5f9f31e10b 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -891,7 +891,7 @@ BUILTIN(DatePrototypeToJson) {
isolate, NewTypeError(MessageTemplate::kCalledNonCallable, name));
}
RETURN_RESULT_OR_FAILURE(
- isolate, Execution::Call(isolate, function, receiver_obj, 0, NULL));
+ isolate, Execution::Call(isolate, function, receiver_obj, 0, nullptr));
}
}
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index cc89c4e365..2b2cc407b5 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -99,6 +99,8 @@ namespace internal {
TFS(StringIndexOf, kReceiver, kSearchString, kPosition) \
TFC(StringLessThan, Compare, 1) \
TFC(StringLessThanOrEqual, Compare, 1) \
+ TFS(StringRepeat, kString, kCount) \
+ TFS(SubString, kString, kFrom, kTo) \
\
/* OrderedHashTable helpers */ \
TFS(OrderedHashTableHealIndex, kTable, kIndex) \
@@ -124,7 +126,6 @@ namespace internal {
ASM(DeserializeLazy) \
ASM(InstantiateAsmJs) \
ASM(NotifyDeoptimized) \
- ASM(NotifyBuiltinContinuation) \
\
/* Trampolines called when returning from a deoptimization that expects */ \
/* to continue in a JavaScript builtin to finish the functionality of a */ \
@@ -183,7 +184,10 @@ namespace internal {
TFC(StringToNumber, TypeConversion, 1) \
TFC(ToName, TypeConversion, 1) \
TFC(NonNumberToNumber, TypeConversion, 1) \
+ TFC(NonNumberToNumeric, TypeConversion, 1) \
TFC(ToNumber, TypeConversion, 1) \
+ TFC(ToNumeric, TypeConversion, 1) \
+ TFC(NumberToString, TypeConversion, 1) \
TFC(ToString, TypeConversion, 1) \
TFC(ToInteger, TypeConversion, 1) \
TFC(ToLength, TypeConversion, 1) \
@@ -199,8 +203,8 @@ namespace internal {
TFH(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray) \
TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
TFH(KeyedLoadIC_Miss, LoadWithVector) \
+ TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
TFH(KeyedLoadIC_Slow, LoadWithVector) \
- TFH(KeyedLoadIC_IndexedString, LoadWithVector) \
TFH(KeyedStoreIC_Megamorphic, StoreWithVector) \
TFH(KeyedStoreIC_Miss, StoreWithVector) \
TFH(KeyedStoreIC_Slow, StoreWithVector) \
@@ -208,13 +212,13 @@ namespace internal {
TFH(LoadGlobalIC_Slow, LoadGlobalWithVector) \
TFH(LoadField, LoadField) \
TFH(LoadIC_FunctionPrototype, LoadWithVector) \
- ASM(LoadIC_Getter_ForDeopt) \
TFH(LoadIC_Miss, LoadWithVector) \
TFH(LoadIC_Slow, LoadWithVector) \
TFH(LoadIC_StringLength, LoadWithVector) \
+ TFH(LoadIC_StringWrapperLength, LoadWithVector) \
TFH(LoadIC_Uninitialized, LoadWithVector) \
+ TFH(StoreGlobalIC_Slow, StoreWithVector) \
TFH(StoreIC_Miss, StoreWithVector) \
- ASM(StoreIC_Setter_ForDeopt) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
\
/* Promise helpers */ \
@@ -260,10 +264,14 @@ namespace internal {
TFJ(FastArrayShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.slice */ \
CPP(ArraySlice) \
+ TFJ(FastArraySlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.splice */ \
CPP(ArraySplice) \
/* ES6 #sec-array.prototype.unshift */ \
CPP(ArrayUnshift) \
+ /* Support for Array.from and other array-copying idioms */ \
+ TFS(CloneFastJSArray, kSource) \
+ TFS(ExtractFastJSArray, kSource, kBegin, kCount) \
/* ES6 #sec-array.prototype.foreach */ \
TFS(ArrayForEachLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
@@ -284,6 +292,10 @@ namespace internal {
TFS(ArrayFilterLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
TFJ(ArrayFilter, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayFilterLoopEagerDeoptContinuation, 6, kCallbackFn, kThisArg, kArray, \
+ kInitialK, kLength, kTo) \
+ TFJ(ArrayFilterLoopLazyDeoptContinuation, 8, kCallbackFn, kThisArg, kArray, \
+ kInitialK, kLength, kValueK, kTo, kResult) \
/* ES6 #sec-array.prototype.foreach */ \
TFS(ArrayMapLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
@@ -676,6 +688,7 @@ namespace internal {
TFC(Multiply, BinaryOp, 1) \
TFC(Divide, BinaryOp, 1) \
TFC(Modulus, BinaryOp, 1) \
+ TFC(Exponentiate, BinaryOp, 1) \
TFC(BitwiseAnd, BinaryOp, 1) \
TFC(BitwiseOr, BinaryOp, 1) \
TFC(BitwiseXor, BinaryOp, 1) \
@@ -687,7 +700,12 @@ namespace internal {
TFC(GreaterThan, Compare, 1) \
TFC(GreaterThanOrEqual, Compare, 1) \
TFC(Equal, Compare, 1) \
+ TFC(SameValue, Compare, 1) \
TFC(StrictEqual, Compare, 1) \
+ TFS(BitwiseNot, kValue) \
+ TFS(Decrement, kValue) \
+ TFS(Increment, kValue) \
+ TFS(Negate, kValue) \
\
/* Object */ \
TFJ(ObjectConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -793,7 +811,7 @@ namespace internal {
CPP(ReflectGet) \
CPP(ReflectGetOwnPropertyDescriptor) \
CPP(ReflectGetPrototypeOf) \
- CPP(ReflectHas) \
+ TFJ(ReflectHas, 2, kTarget, kKey) \
CPP(ReflectIsExtensible) \
CPP(ReflectOwnKeys) \
CPP(ReflectPreventExtensions) \
@@ -853,8 +871,10 @@ namespace internal {
TFJ(RegExpPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* RegExp helpers */ \
TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \
+ TFS(RegExpMatchFast, kReceiver, kPattern) \
TFS(RegExpPrototypeExecSlow, kReceiver, kString) \
TFS(RegExpReplace, kRegExp, kString, kReplaceValue) \
+ TFS(RegExpSearchFast, kReceiver, kPattern) \
TFS(RegExpSplit, kRegExp, kString, kLimit) \
\
/* Set */ \
@@ -932,12 +952,21 @@ namespace internal {
CPP(StringPrototypeLastIndexOf) \
/* ES6 #sec-string.prototype.link */ \
TFJ(StringPrototypeLink, 1, kValue) \
+ /* ES6 #sec-string.prototype.match */ \
+ TFJ(StringPrototypeMatch, 1, kRegexp) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
+ /* ES6 #sec-string.prototype.padEnd */ \
+ TFJ(StringPrototypePadEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-string.prototype.padStart */ \
+ TFJ(StringPrototypePadStart, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.repeat */ \
TFJ(StringPrototypeRepeat, 1, kCount) \
/* ES6 #sec-string.prototype.replace */ \
TFJ(StringPrototypeReplace, 2, kSearch, kReplace) \
+ /* ES6 #sec-string.prototype.search */ \
+ TFJ(StringPrototypeSearch, 1, kRegexp) \
/* ES6 #sec-string.prototype.slice */ \
TFJ(StringPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.small */ \
@@ -966,6 +995,8 @@ namespace internal {
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.valueof */ \
TFJ(StringPrototypeValueOf, 0) \
+ /* ES6 #sec-string.raw */ \
+ CPP(StringRaw) \
/* ES6 #sec-string.prototype-@@iterator */ \
TFJ(StringPrototypeIterator, 0) \
\
@@ -1015,6 +1046,12 @@ namespace internal {
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
CPP(TypedArrayPrototypeFill) \
+ /* ES6 %TypedArray%.prototype.find */ \
+ TFJ(TypedArrayPrototypeFind, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.prototype.findIndex */ \
+ TFJ(TypedArrayPrototypeFindIndex, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES7 #sec-%typedarray%.prototype.includes */ \
CPP(TypedArrayPrototypeIncludes) \
/* ES6 #sec-%typedarray%.prototype.indexof */ \
@@ -1060,12 +1097,22 @@ namespace internal {
TFC(ThrowWasmTrapFuncSigMismatch, WasmRuntimeCall, 1) \
\
/* WeakMap */ \
+ TFJ(WeakMapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFS(WeakMapLookupHashIndex, kTable, kKey) \
TFJ(WeakMapGet, 1, kKey) \
TFJ(WeakMapHas, 1, kKey) \
+ TFJ(WeakMapPrototypeSet, 2, kKey, kValue) \
+ TFJ(WeakMapPrototypeDelete, 1, kKey) \
\
/* WeakSet */ \
+ TFJ(WeakSetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFJ(WeakSetHas, 1, kKey) \
+ TFJ(WeakSetPrototypeAdd, 1, kValue) \
+ TFJ(WeakSetPrototypeDelete, 1, kValue) \
+ \
+ /* WeakSet / WeakMap Helpers */ \
+ TFS(WeakCollectionDelete, kCollection, kKey) \
+ TFS(WeakCollectionSet, kCollection, kKey, kValue) \
\
/* AsyncGenerator */ \
\
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 6d33d88f3f..a80409794c 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -42,6 +42,8 @@ BUILTIN(ErrorCaptureStackTrace) {
HandleScope scope(isolate);
Handle<Object> object_obj = args.atOrUndefined(isolate, 1);
+ isolate->CountUsage(v8::Isolate::kErrorCaptureStackTrace);
+
if (!object_obj->IsJSObject()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument, object_obj));
@@ -60,19 +62,18 @@ BUILTIN(ErrorCaptureStackTrace) {
// Add the stack accessors.
- Handle<AccessorInfo> error_stack =
- Accessors::ErrorStackInfo(isolate, DONT_ENUM);
+ Handle<AccessorInfo> error_stack = isolate->factory()->error_stack_accessor();
+ Handle<Name> name(Name::cast(error_stack->name()), isolate);
// Explicitly check for frozen objects. Other access checks are performed by
// the LookupIterator in SetAccessor below.
if (!JSObject::IsExtensible(object)) {
return isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kDefineDisallowed,
- handle(error_stack->name(), isolate)));
+ MessageTemplate::kDefineDisallowed, name));
}
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- JSObject::SetAccessor(object, error_stack));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSObject::SetAccessor(object, name, error_stack, DONT_ENUM));
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index f5a173c71e..0b98a7169b 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -28,13 +28,11 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Node* receiver_map = LoadMap(receiver);
{
- Label fast(this);
Node* instance_type = LoadMapInstanceType(receiver_map);
- GotoIf(Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE)), &fast);
- GotoIf(Word32Equal(instance_type, Int32Constant(JS_BOUND_FUNCTION_TYPE)),
- &fast);
- Goto(&slow);
- BIND(&fast);
+ GotoIfNot(
+ Word32Or(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE),
+ InstanceTypeEqual(instance_type, JS_BOUND_FUNCTION_TYPE)),
+ &slow);
}
// Disallow binding of slow-mode functions. We need to figure out whether the
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index b94220603c..771c7243ac 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -234,7 +234,7 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
Handle<Object> length(Smi::kZero, isolate);
Maybe<PropertyAttributes> attributes =
JSReceiver::GetPropertyAttributes(&length_lookup);
- if (!attributes.IsJust()) return isolate->heap()->exception();
+ if (attributes.IsNothing()) return isolate->heap()->exception();
if (attributes.FromJust() != ABSENT) {
Handle<Object> target_length;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_length,
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 2dbf34fcff..b063b314b5 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -31,8 +31,7 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
Label if_receiverisincompatible(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible);
Node* receiver_instance_type = LoadInstanceType(receiver);
- GotoIfNot(Word32Equal(receiver_instance_type,
- Int32Constant(JS_GENERATOR_OBJECT_TYPE)),
+ GotoIfNot(InstanceTypeEqual(receiver_instance_type, JS_GENERATOR_OBJECT_TYPE),
&if_receiverisincompatible);
// Check if the {receiver} is running or already closed.
@@ -46,11 +45,15 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
JSGeneratorObject::kGeneratorClosed);
GotoIf(SmiLessThan(receiver_continuation, closed), &if_receiverisrunning);
+ // Remember the {resume_mode} for the {receiver}.
+ StoreObjectFieldNoWriteBarrier(receiver, JSGeneratorObject::kResumeModeOffset,
+ SmiConstant(resume_mode));
+
// Resume the {receiver} using our trampoline.
VARIABLE(var_exception, MachineRepresentation::kTagged, UndefinedConstant());
Label if_exception(this, Label::kDeferred), if_final_return(this);
Node* result = CallStub(CodeFactory::ResumeGenerator(isolate()), context,
- value, receiver, SmiConstant(resume_mode));
+ value, receiver);
// Make sure we close the generator if there was an exception.
GotoIfException(result, &if_exception, &var_exception);
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 1f16d81fe3..4d85be9f91 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -5,7 +5,6 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
#include "src/objects-inl.h"
@@ -14,32 +13,14 @@ namespace v8 {
namespace internal {
TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) {
- Node* value = Parameter(Descriptor::kReceiver);
- Node* string = LoadJSValueValue(value);
- Node* result = LoadStringLength(string);
- Return(result);
+ Node* string = Parameter(Descriptor::kReceiver);
+ Return(LoadStringLengthAsSmi(string));
}
-TF_BUILTIN(KeyedLoadIC_IndexedString, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* index = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- Label miss(this);
-
- Node* index_intptr = TryToIntptr(index, &miss);
- Node* length = SmiUntag(LoadStringLength(receiver));
- GotoIf(UintPtrGreaterThanOrEqual(index_intptr, length), &miss);
-
- Node* code = StringCharCodeAt(receiver, index_intptr, INTPTR_PARAMETERS);
- Node* result = StringFromCharCode(code);
- Return(result);
-
- BIND(&miss);
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, index, slot,
- vector);
+TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) {
+ Node* value = Parameter(Descriptor::kReceiver);
+ Node* string = LoadJSValueValue(value);
+ Return(LoadStringLengthAsSmi(string));
}
TF_BUILTIN(KeyedLoadIC_Miss, CodeStubAssembler) {
@@ -115,10 +96,6 @@ TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name, slot, vector);
}
-void Builtins::Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
-}
-
TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -163,8 +140,18 @@ TF_BUILTIN(StoreIC_Miss, CodeStubAssembler) {
receiver, name);
}
-void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
+TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ TailCallRuntime(Runtime::kStoreGlobalIC_Slow, context, value, slot, vector,
+ receiver, name);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 70dc34e302..536a7f31ed 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -28,6 +28,7 @@ IC_BUILTIN(LoadICTrampoline)
IC_BUILTIN(LoadField)
IC_BUILTIN(KeyedLoadICTrampoline)
IC_BUILTIN(KeyedLoadIC_Megamorphic)
+IC_BUILTIN(KeyedLoadIC_PolymorphicName)
IC_BUILTIN(StoreIC)
IC_BUILTIN(StoreICTrampoline)
IC_BUILTIN(KeyedStoreIC)
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 00e7422e59..bc9723700c 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -32,36 +32,9 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
// Load the {object}s elements.
Node* source = LoadObjectField(object, JSObject::kElementsOffset);
-
- ParameterMode mode = OptimalParameterMode();
- Node* length = TaggedToParameter(LoadFixedArrayBaseLength(source), mode);
-
- // Check if we can allocate in new space.
- ElementsKind kind = PACKED_ELEMENTS;
- int max_elements = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
- Label if_newspace(this), if_lospace(this, Label::kDeferred);
- Branch(UintPtrOrSmiLessThan(length, IntPtrOrSmiConstant(max_elements, mode),
- mode),
- &if_newspace, &if_lospace);
-
- BIND(&if_newspace);
- {
- Node* target = AllocateFixedArray(kind, length, mode);
- CopyFixedArrayElements(kind, source, target, length, SKIP_WRITE_BARRIER,
- mode);
- StoreObjectField(object, JSObject::kElementsOffset, target);
- Return(target);
- }
-
- BIND(&if_lospace);
- {
- Node* target =
- AllocateFixedArray(kind, length, mode, kAllowLargeObjectAllocation);
- CopyFixedArrayElements(kind, source, target, length, UPDATE_WRITE_BARRIER,
- mode);
- StoreObjectField(object, JSObject::kElementsOffset, target);
- Return(target);
- }
+ Node* target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays);
+ StoreObjectField(object, JSObject::kElementsOffset, target);
+ Return(target);
}
TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
@@ -219,7 +192,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
Label exit(this);
Label* black = &exit;
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK_EQ(strcmp(Marking::kBlackBitPattern, "11"), 0);
Node* cell;
Node* mask;
@@ -258,14 +231,15 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
Node* IsWhite(Node* object) {
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK_EQ(strcmp(Marking::kWhiteBitPattern, "00"), 0);
Node* cell;
Node* mask;
GetMarkBit(object, &cell, &mask);
+ mask = TruncateWordToWord32(mask);
// Non-white has 1 for the first bit, so we only need to check for the first
// bit.
- return WordEqual(WordAnd(Load(MachineType::Pointer(), cell), mask),
- IntPtrConstant(0));
+ return Word32Equal(Word32And(Load(MachineType::Int32(), cell), mask),
+ Int32Constant(0));
}
void GetMarkBit(Node* object, Node** cell, Node** mask) {
@@ -561,8 +535,9 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
BIND(&dont_delete);
{
- STATIC_ASSERT(LANGUAGE_END == 2);
- GotoIf(SmiNotEqual(language_mode, SmiConstant(SLOPPY)), &slow);
+ STATIC_ASSERT(LanguageModeSize == 2);
+ GotoIf(SmiNotEqual(language_mode, SmiConstant(LanguageMode::kSloppy)),
+ &slow);
Return(FalseConstant());
}
}
@@ -618,5 +593,19 @@ TF_BUILTIN(ForInFilter, CodeStubAssembler) {
Return(UndefinedConstant());
}
+TF_BUILTIN(SameValue, CodeStubAssembler) {
+ Node* lhs = Parameter(Descriptor::kLeft);
+ Node* rhs = Parameter(Descriptor::kRight);
+
+ Label if_true(this), if_false(this);
+ BranchIfSameValue(lhs, rhs, &if_true, &if_false);
+
+ BIND(&if_true);
+ Return(TrueConstant());
+
+ BIND(&if_false);
+ Return(FalseConstant());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index e9c90f5b31..3c7956246b 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -28,8 +28,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
// Early exit on empty strings.
- Node* const length = SmiUntag(LoadStringLength(string));
- GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_string);
+ TNode<Smi> const length = LoadStringLengthAsSmi(string);
+ GotoIf(SmiEqual(length, SmiConstant(0)), &return_string);
// Unpack strings if possible, and bail to runtime unless we get a one-byte
// flat string.
@@ -47,8 +47,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Node* const dst = AllocateSeqOneByteString(context, length);
const int kMaxShortStringLength = 24; // Determined empirically.
- GotoIf(IntPtrGreaterThan(length, IntPtrConstant(kMaxShortStringLength)),
- &call_c);
+ GotoIf(SmiGreaterThan(length, SmiConstant(kMaxShortStringLength)), &call_c);
{
Node* const dst_ptr = PointerToSeqStringData(dst);
@@ -56,7 +55,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
IntPtrConstant(0));
Node* const start_address = to_direct.PointerToData(&call_c);
- Node* const end_address = IntPtrAdd(start_address, length);
+ TNode<IntPtrT> const end_address =
+ Signed(IntPtrAdd(start_address, SmiUntag(length)));
Node* const to_lower_table_addr = ExternalConstant(
ExternalReference::intl_to_latin1_lower_table(isolate()));
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 79dc039b8b..45471171c7 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -83,7 +83,7 @@ BUILTIN(StringPrototypeNormalizeIntl) {
const icu::Normalizer2* normalizer =
icu::Normalizer2::getInstance(nullptr, form_name, form_mode, status);
DCHECK(U_SUCCESS(status));
- CHECK(normalizer != nullptr);
+ CHECK_NOT_NULL(normalizer);
int32_t normalized_prefix_length =
normalizer->spanQuickCheckYes(input, status);
// Quick return if the input is already normalized.
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 344aee3786..f186cf2d76 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -57,43 +57,33 @@ Node* IteratorBuiltinsAssembler::IteratorStep(Node* context, Node* iterator,
// 3. If Type(result) is not Object, throw a TypeError exception.
Label if_notobject(this, Label::kDeferred), return_result(this);
GotoIf(TaggedIsSmi(result), &if_notobject);
- GotoIfNot(IsJSReceiver(result), &if_notobject);
-
- VARIABLE(var_done, MachineRepresentation::kTagged);
+ Node* result_map = LoadMap(result);
if (fast_iterator_result_map != nullptr) {
// Fast iterator result case:
Label if_generic(this);
// 4. Return result.
- Node* map = LoadMap(result);
- GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic);
+ GotoIfNot(WordEqual(result_map, fast_iterator_result_map), &if_generic);
// IteratorComplete
// 2. Return ToBoolean(? Get(iterResult, "done")).
Node* done = LoadObjectField(result, JSIteratorResult::kDoneOffset);
- CSA_ASSERT(this, IsBoolean(done));
- var_done.Bind(done);
- Goto(&return_result);
+ BranchIfToBooleanIsTrue(done, if_done, &return_result);
BIND(&if_generic);
}
// Generic iterator result case:
{
+ // 3. If Type(result) is not Object, throw a TypeError exception.
+ GotoIfNot(IsJSReceiverMap(result_map), &if_notobject);
+
// IteratorComplete
// 2. Return ToBoolean(? Get(iterResult, "done")).
Node* done = GetProperty(context, result, factory()->done_string());
GotoIfException(done, if_exception, exception);
- var_done.Bind(done);
-
- Label to_boolean(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(done), &to_boolean);
- Branch(IsBoolean(done), &return_result, &to_boolean);
-
- BIND(&to_boolean);
- var_done.Bind(CallBuiltin(Builtins::kToBoolean, context, done));
- Goto(&return_result);
+ BranchIfToBooleanIsTrue(done, if_done, &return_result);
}
BIND(&if_notobject);
@@ -105,7 +95,6 @@ Node* IteratorBuiltinsAssembler::IteratorStep(Node* context, Node* iterator,
}
BIND(&return_result);
- GotoIf(IsTrue(var_done.value()), if_done);
return result;
}
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index 3e22a138eb..706fa4f3a8 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-math-gen.h"
+
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
@@ -14,24 +16,6 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 20.2.2 Function Properties of the Math Object
-class MathBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit MathBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- void MathRoundingOperation(
- Node* context, Node* x,
- TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>));
- void MathUnaryOperation(
- Node* context, Node* x,
- TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>));
- void MathMaxMin(Node* context, Node* argc,
- TNode<Float64T> (CodeStubAssembler::*float64op)(
- SloppyTNode<Float64T>, SloppyTNode<Float64T>),
- double default_val);
-};
-
// ES6 #sec-math.abs
TF_BUILTIN(MathAbs, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -53,7 +37,7 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
BIND(&if_xissmi);
{
Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
- Node* pair = NULL;
+ Node* pair = nullptr;
// check if support abs function
if (IsIntPtrAbsWithOverflowSupported()) {
@@ -405,16 +389,19 @@ TF_BUILTIN(MathLog2, MathBuiltinsAssembler) {
MathUnaryOperation(context, x, &CodeStubAssembler::Float64Log2);
}
+CodeStubAssembler::Node* MathBuiltinsAssembler::MathPow(Node* context,
+ Node* base,
+ Node* exponent) {
+ Node* base_value = TruncateTaggedToFloat64(context, base);
+ Node* exponent_value = TruncateTaggedToFloat64(context, exponent);
+ Node* value = Float64Pow(base_value, exponent_value);
+ return ChangeFloat64ToTagged(value);
+}
+
// ES6 #sec-math.pow
-TF_BUILTIN(MathPow, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kBase);
- Node* y = Parameter(Descriptor::kExponent);
- Node* x_value = TruncateTaggedToFloat64(context, x);
- Node* y_value = TruncateTaggedToFloat64(context, y);
- Node* value = Float64Pow(x_value, y_value);
- Node* result = ChangeFloat64ToTagged(value);
- Return(result);
+TF_BUILTIN(MathPow, MathBuiltinsAssembler) {
+ Return(MathPow(Parameter(Descriptor::kContext), Parameter(Descriptor::kBase),
+ Parameter(Descriptor::kExponent)));
}
// ES6 #sec-math.random
diff --git a/deps/v8/src/builtins/builtins-math-gen.h b/deps/v8/src/builtins/builtins-math-gen.h
new file mode 100644
index 0000000000..7b9079b6e9
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-math-gen.h
@@ -0,0 +1,36 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_MATH_GEN_H_
+#define V8_BUILTINS_BUILTINS_MATH_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class MathBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit MathBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* MathPow(Node* context, Node* base, Node* exponent);
+
+ protected:
+ void MathRoundingOperation(
+ Node* context, Node* x,
+ TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>));
+ void MathUnaryOperation(
+ Node* context, Node* x,
+ TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>));
+ void MathMaxMin(Node* context, Node* argc,
+ TNode<Float64T> (CodeStubAssembler::*float64op)(
+ SloppyTNode<Float64T>, SloppyTNode<Float64T>),
+ double default_val);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_MATH_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 3988204936..821dac9cc0 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-math-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
@@ -20,41 +21,50 @@ class NumberBuiltinsAssembler : public CodeStubAssembler {
protected:
template <typename Descriptor>
- void BitwiseOp(std::function<Node*(Node* lhs, Node* rhs)> body,
- Signedness signed_result = kSigned) {
+ void EmitBitwiseOp(Operation op) {
Node* left = Parameter(Descriptor::kLeft);
Node* right = Parameter(Descriptor::kRight);
Node* context = Parameter(Descriptor::kContext);
- Node* lhs_value = TruncateTaggedToWord32(context, left);
- Node* rhs_value = TruncateTaggedToWord32(context, right);
- Node* value = body(lhs_value, rhs_value);
- Node* result = signed_result == kSigned ? ChangeInt32ToTagged(value)
- : ChangeUint32ToTagged(value);
- Return(result);
- }
-
- template <typename Descriptor>
- void BitwiseShiftOp(std::function<Node*(Node* lhs, Node* shift_count)> body,
- Signedness signed_result = kSigned) {
- BitwiseOp<Descriptor>(
- [=](Node* lhs, Node* rhs) {
- Node* shift_count = Word32And(rhs, Int32Constant(0x1f));
- return body(lhs, shift_count);
- },
- signed_result);
+ VARIABLE(var_left_word32, MachineRepresentation::kWord32);
+ VARIABLE(var_right_word32, MachineRepresentation::kWord32);
+ VARIABLE(var_left_bigint, MachineRepresentation::kTagged, left);
+ VARIABLE(var_right_bigint, MachineRepresentation::kTagged);
+ Label if_left_number(this), do_number_op(this);
+ Label if_left_bigint(this), do_bigint_op(this);
+
+ TaggedToWord32OrBigInt(context, left, &if_left_number, &var_left_word32,
+ &if_left_bigint, &var_left_bigint);
+ BIND(&if_left_number);
+ TaggedToWord32OrBigInt(context, right, &do_number_op, &var_right_word32,
+ &do_bigint_op, &var_right_bigint);
+ BIND(&do_number_op);
+ Return(BitwiseOp(var_left_word32.value(), var_right_word32.value(), op));
+
+ // BigInt cases.
+ BIND(&if_left_bigint);
+ TaggedToNumeric(context, right, &do_bigint_op, &var_right_bigint);
+
+ BIND(&do_bigint_op);
+ Return(CallRuntime(Runtime::kBigIntBinaryOp, context,
+ var_left_bigint.value(), var_right_bigint.value(),
+ SmiConstant(op)));
}
template <typename Descriptor>
- void RelationalComparisonBuiltin(RelationalComparisonMode mode) {
+ void RelationalComparisonBuiltin(Operation op) {
Node* lhs = Parameter(Descriptor::kLeft);
Node* rhs = Parameter(Descriptor::kRight);
Node* context = Parameter(Descriptor::kContext);
- Return(RelationalComparison(mode, lhs, rhs, context));
+ Return(RelationalComparison(op, lhs, rhs, context));
}
template <typename Descriptor>
+ void UnaryOp(Variable* var_input, Label* do_smi, Label* do_double,
+ Variable* var_input_double, Label* do_bigint);
+
+ template <typename Descriptor>
void BinaryOp(Label* smis, Variable* var_left, Variable* var_right,
Label* doubles, Variable* var_left_double,
Variable* var_right_double, Label* bigints);
@@ -78,10 +88,10 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
&return_true);
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
TF_BUILTIN(AllocateHeapNumber, CodeStubAssembler) {
@@ -112,10 +122,10 @@ TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
&return_true, &return_false);
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
// ES6 #sec-number.isnan
@@ -135,10 +145,10 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
// ES6 #sec-number.issafeinteger
@@ -170,10 +180,10 @@ TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
&return_true, &return_false);
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
// ES6 #sec-number.parsefloat
@@ -277,7 +287,7 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
// Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
Label if_radix10(this), if_generic(this, Label::kDeferred);
- GotoIf(WordEqual(radix, UndefinedConstant()), &if_radix10);
+ GotoIf(IsUndefined(radix), &if_radix10);
GotoIf(WordEqual(radix, SmiConstant(10)), &if_radix10);
GotoIf(WordEqual(radix, SmiConstant(0)), &if_radix10);
Goto(&if_generic);
@@ -371,8 +381,8 @@ class AddStubAssembler : public CodeStubAssembler {
void ConvertNonReceiverAndLoop(Variable* var_value, Label* loop,
Node* context) {
- var_value->Bind(
- CallBuiltin(Builtins::kNonNumberToNumber, context, var_value->value()));
+ var_value->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context,
+ var_value->value()));
Goto(loop);
}
@@ -401,7 +411,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
VARIABLE(var_right_double, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive, ToString and/or
- // ToNumber conversions.
+ // ToNumeric conversions.
VARIABLE(var_result, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_left, &var_right};
Label loop(this, 2, loop_vars),
@@ -485,7 +495,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
GotoIf(IsStringInstanceType(left_instance_type),
&string_add_convert_right);
GotoIf(IsBigIntInstanceType(left_instance_type), &do_bigint_add);
- // {left} is neither a Number nor a String, and {right} is a Smi.
+ // {left} is neither a Numeric nor a String, and {right} is a Smi.
ConvertAndLoop(&var_left, left_instance_type, &loop, context);
}
} // if_right_smi
@@ -520,14 +530,14 @@ TF_BUILTIN(Add, AddStubAssembler) {
BIND(&if_left_not_number);
{
+ Label if_left_bigint(this);
Node* left_instance_type = LoadMapInstanceType(left_map);
GotoIf(IsStringInstanceType(left_instance_type),
&string_add_convert_right);
Node* right_instance_type = LoadMapInstanceType(right_map);
GotoIf(IsStringInstanceType(right_instance_type),
&string_add_convert_left);
- GotoIf(IsBigIntInstanceType(left_instance_type), &do_bigint_add);
- GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
+ GotoIf(IsBigIntInstanceType(left_instance_type), &if_left_bigint);
Label if_left_not_receiver(this, Label::kDeferred);
Label if_right_not_receiver(this, Label::kDeferred);
GotoIfNot(IsJSReceiverInstanceType(left_instance_type),
@@ -535,6 +545,15 @@ TF_BUILTIN(Add, AddStubAssembler) {
// {left} is a JSReceiver, convert it first.
ConvertReceiverAndLoop(&var_left, &loop, context);
+ BIND(&if_left_bigint);
+ {
+ // {right} is a HeapObject, but not a String. Jump to
+ // {do_bigint_add} if {right} is already a Numeric.
+ GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
+ GotoIf(IsHeapNumberMap(right_map), &do_bigint_add);
+ ConvertAndLoop(&var_right, right_instance_type, &loop, context);
+ }
+
BIND(&if_left_not_receiver);
GotoIfNot(IsJSReceiverInstanceType(right_instance_type),
&if_right_not_receiver);
@@ -568,7 +587,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
BIND(&do_bigint_add);
{
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Token::ADD)));
+ var_right.value(), SmiConstant(Operation::kAdd)));
}
BIND(&do_double_add);
@@ -579,19 +598,56 @@ TF_BUILTIN(Add, AddStubAssembler) {
}
template <typename Descriptor>
+void NumberBuiltinsAssembler::UnaryOp(Variable* var_input, Label* do_smi,
+ Label* do_double,
+ Variable* var_input_double,
+ Label* do_bigint) {
+ DCHECK_EQ(var_input->rep(), MachineRepresentation::kTagged);
+ DCHECK_IMPLIES(var_input_double != nullptr,
+ var_input_double->rep() == MachineRepresentation::kFloat64);
+
+ Node* context = Parameter(Descriptor::kContext);
+ var_input->Bind(Parameter(Descriptor::kValue));
+
+ // We might need to loop for ToNumeric conversion.
+ Label loop(this, {var_input});
+ Goto(&loop);
+ BIND(&loop);
+ Node* input = var_input->value();
+
+ Label not_number(this);
+ GotoIf(TaggedIsSmi(input), do_smi);
+ GotoIfNot(IsHeapNumber(input), &not_number);
+ if (var_input_double != nullptr) {
+ var_input_double->Bind(LoadHeapNumberValue(input));
+ }
+ Goto(do_double);
+
+ BIND(&not_number);
+ GotoIf(IsBigInt(input), do_bigint);
+ var_input->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, input));
+ Goto(&loop);
+}
+
+template <typename Descriptor>
void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
Variable* var_right, Label* doubles,
Variable* var_left_double,
Variable* var_right_double,
Label* bigints) {
- DCHECK(var_left->rep() == MachineRepresentation::kTagged);
- DCHECK(var_right->rep() == MachineRepresentation::kTagged);
+ DCHECK_EQ(var_left->rep(), MachineRepresentation::kTagged);
+ DCHECK_EQ(var_right->rep(), MachineRepresentation::kTagged);
+ DCHECK_IMPLIES(var_left_double != nullptr,
+ var_left_double->rep() == MachineRepresentation::kFloat64);
+ DCHECK_IMPLIES(var_right_double != nullptr,
+ var_right_double->rep() == MachineRepresentation::kFloat64);
+ DCHECK_EQ(var_left_double == nullptr, var_right_double == nullptr);
Node* context = Parameter(Descriptor::kContext);
var_left->Bind(Parameter(Descriptor::kLeft));
var_right->Bind(Parameter(Descriptor::kRight));
- // We might need to loop for ToNumber conversions.
+ // We might need to loop for ToNumeric conversions.
Label loop(this, {var_left, var_right});
Goto(&loop);
BIND(&loop);
@@ -603,8 +659,10 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
// At this point, var_left is a Smi but var_right is not.
GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number);
- var_left_double->Bind(SmiToFloat64(var_left->value()));
- var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ if (var_left_double != nullptr) {
+ var_left_double->Bind(SmiToFloat64(var_left->value()));
+ var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ }
Goto(doubles);
BIND(&left_not_smi);
@@ -613,33 +671,48 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
GotoIfNot(TaggedIsSmi(var_right->value()), &right_not_smi);
// At this point, var_left is a HeapNumber and var_right is a Smi.
- var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
- var_right_double->Bind(SmiToFloat64(var_right->value()));
+ if (var_left_double != nullptr) {
+ var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
+ var_right_double->Bind(SmiToFloat64(var_right->value()));
+ }
Goto(doubles);
}
BIND(&right_not_smi);
{
GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number);
- var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
- var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ if (var_left_double != nullptr) {
+ var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
+ var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ }
Goto(doubles);
}
BIND(&left_not_number);
{
- GotoIf(IsBigInt(var_left->value()), bigints);
- // TODO(jkummerow): Here and below, this should call NonNumericToNumeric.
+ Label left_bigint(this);
+ GotoIf(IsBigInt(var_left->value()), &left_bigint);
var_left->Bind(
- CallBuiltin(Builtins::kNonNumberToNumber, context, var_left->value()));
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value()));
Goto(&loop);
+
+ BIND(&left_bigint);
+ {
+ // Jump to {bigints} if {var_right} is already a Numeric.
+ GotoIf(TaggedIsSmi(var_right->value()), bigints);
+ GotoIf(IsBigInt(var_right->value()), bigints);
+ GotoIf(IsHeapNumber(var_right->value()), bigints);
+ var_right->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context,
+ var_right->value()));
+ Goto(&loop);
+ }
}
BIND(&right_not_number);
{
GotoIf(IsBigInt(var_right->value()), bigints);
- var_right->Bind(
- CallBuiltin(Builtins::kNonNumberToNumber, context, var_right->value()));
+ var_right->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context,
+ var_right->value()));
Goto(&loop);
}
}
@@ -684,7 +757,91 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
{
Node* context = Parameter(Descriptor::kContext);
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Token::SUB)));
+ var_right.value(), SmiConstant(Operation::kSubtract)));
+ }
+}
+
+TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ VARIABLE(var_input, MachineRepresentation::kTagged);
+ Label do_number(this), do_bigint(this);
+
+ UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
+
+ BIND(&do_number);
+ {
+ TailCallBuiltin(Builtins::kBitwiseXor, context, var_input.value(),
+ SmiConstant(-1));
+ }
+
+ BIND(&do_bigint);
+ {
+ Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
+ SmiConstant(Operation::kBitwiseNot)));
+ }
+}
+
+TF_BUILTIN(Decrement, NumberBuiltinsAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ VARIABLE(var_input, MachineRepresentation::kTagged);
+ Label do_number(this), do_bigint(this);
+
+ UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
+
+ BIND(&do_number);
+ {
+ TailCallBuiltin(Builtins::kSubtract, context, var_input.value(),
+ SmiConstant(1));
+ }
+
+ BIND(&do_bigint);
+ {
+ Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
+ SmiConstant(Operation::kDecrement)));
+ }
+}
+
+TF_BUILTIN(Increment, NumberBuiltinsAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ VARIABLE(var_input, MachineRepresentation::kTagged);
+ Label do_number(this), do_bigint(this);
+
+ UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
+
+ BIND(&do_number);
+ {
+ TailCallBuiltin(Builtins::kAdd, context, var_input.value(), SmiConstant(1));
+ }
+
+ BIND(&do_bigint);
+ {
+ Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
+ SmiConstant(Operation::kIncrement)));
+ }
+}
+
+TF_BUILTIN(Negate, NumberBuiltinsAssembler) {
+ VARIABLE(var_input, MachineRepresentation::kTagged);
+ VARIABLE(var_input_double, MachineRepresentation::kFloat64);
+ Label do_smi(this), do_double(this), do_bigint(this);
+
+ UnaryOp<Descriptor>(&var_input, &do_smi, &do_double, &var_input_double,
+ &do_bigint);
+
+ BIND(&do_smi);
+ { Return(SmiMul(var_input.value(), SmiConstant(-1))); }
+
+ BIND(&do_double);
+ {
+ Node* value = Float64Mul(var_input_double.value(), Float64Constant(-1));
+ Return(AllocateHeapNumberWithValue(value));
+ }
+
+ BIND(&do_bigint);
+ {
+ Node* context = Parameter(Descriptor::kContext);
+ Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
+ SmiConstant(Operation::kNegate)));
}
}
@@ -710,7 +867,7 @@ TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
{
Node* context = Parameter(Descriptor::kContext);
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Token::MUL)));
+ var_right.value(), SmiConstant(Operation::kMultiply)));
}
}
@@ -794,7 +951,7 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
{
Node* context = Parameter(Descriptor::kContext);
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Token::DIV)));
+ var_right.value(), SmiConstant(Operation::kDivide)));
}
}
@@ -819,57 +976,68 @@ TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
{
Node* context = Parameter(Descriptor::kContext);
Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Token::MOD)));
+ var_right.value(), SmiConstant(Operation::kModulus)));
}
}
+TF_BUILTIN(Exponentiate, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ Label do_number_exp(this), do_bigint_exp(this);
+ Node* context = Parameter(Descriptor::kContext);
+
+ BinaryOp<Descriptor>(&do_number_exp, &var_left, &var_right, &do_number_exp,
+ nullptr, nullptr, &do_bigint_exp);
+
+ BIND(&do_number_exp);
+ {
+ MathBuiltinsAssembler math_asm(state());
+ Return(math_asm.MathPow(context, var_left.value(), var_right.value()));
+ }
+
+ BIND(&do_bigint_exp);
+ Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
+ var_right.value(), SmiConstant(Operation::kExponentiate)));
+}
+
TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) {
- BitwiseShiftOp<Descriptor>([=](Node* lhs, Node* shift_count) {
- return Word32Shl(lhs, shift_count);
- });
+ EmitBitwiseOp<Descriptor>(Operation::kShiftLeft);
}
TF_BUILTIN(ShiftRight, NumberBuiltinsAssembler) {
- BitwiseShiftOp<Descriptor>([=](Node* lhs, Node* shift_count) {
- return Word32Sar(lhs, shift_count);
- });
+ EmitBitwiseOp<Descriptor>(Operation::kShiftRight);
}
TF_BUILTIN(ShiftRightLogical, NumberBuiltinsAssembler) {
- BitwiseShiftOp<Descriptor>(
- [=](Node* lhs, Node* shift_count) { return Word32Shr(lhs, shift_count); },
- kUnsigned);
+ EmitBitwiseOp<Descriptor>(Operation::kShiftRightLogical);
}
TF_BUILTIN(BitwiseAnd, NumberBuiltinsAssembler) {
- BitwiseOp<Descriptor>(
- [=](Node* lhs, Node* rhs) { return Word32And(lhs, rhs); });
+ EmitBitwiseOp<Descriptor>(Operation::kBitwiseAnd);
}
TF_BUILTIN(BitwiseOr, NumberBuiltinsAssembler) {
- BitwiseOp<Descriptor>(
- [=](Node* lhs, Node* rhs) { return Word32Or(lhs, rhs); });
+ EmitBitwiseOp<Descriptor>(Operation::kBitwiseOr);
}
TF_BUILTIN(BitwiseXor, NumberBuiltinsAssembler) {
- BitwiseOp<Descriptor>(
- [=](Node* lhs, Node* rhs) { return Word32Xor(lhs, rhs); });
+ EmitBitwiseOp<Descriptor>(Operation::kBitwiseXor);
}
TF_BUILTIN(LessThan, NumberBuiltinsAssembler) {
- RelationalComparisonBuiltin<Descriptor>(kLessThan);
+ RelationalComparisonBuiltin<Descriptor>(Operation::kLessThan);
}
TF_BUILTIN(LessThanOrEqual, NumberBuiltinsAssembler) {
- RelationalComparisonBuiltin<Descriptor>(kLessThanOrEqual);
+ RelationalComparisonBuiltin<Descriptor>(Operation::kLessThanOrEqual);
}
TF_BUILTIN(GreaterThan, NumberBuiltinsAssembler) {
- RelationalComparisonBuiltin<Descriptor>(kGreaterThan);
+ RelationalComparisonBuiltin<Descriptor>(Operation::kGreaterThan);
}
TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) {
- RelationalComparisonBuiltin<Descriptor>(kGreaterThanOrEqual);
+ RelationalComparisonBuiltin<Descriptor>(Operation::kGreaterThanOrEqual);
}
TF_BUILTIN(Equal, CodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index b1af0cf8ab..65170d321d 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -27,6 +27,13 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
void AddToDictionaryIf(Node* condition, Node* name_dictionary,
Handle<Name> name, Node* value, Label* bailout);
Node* FromPropertyDescriptor(Node* context, Node* desc);
+ Node* FromPropertyDetails(Node* context, Node* raw_value, Node* details,
+ Label* if_bailout);
+ Node* ConstructAccessorDescriptor(Node* context, Node* getter, Node* setter,
+ Node* enumerable, Node* configurable);
+ Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable,
+ Node* enumerable, Node* configurable);
+ Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout);
};
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
@@ -41,6 +48,55 @@ void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
rhs));
}
+Node* ObjectBuiltinsAssembler::ConstructAccessorDescriptor(Node* context,
+ Node* getter,
+ Node* setter,
+ Node* enumerable,
+ Node* configurable) {
+ Node* native_context = LoadNativeContext(context);
+ Node* map = LoadContextElement(
+ native_context, Context::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX);
+ Node* js_desc = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSAccessorPropertyDescriptor::kGetOffset, getter);
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSAccessorPropertyDescriptor::kSetOffset, setter);
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSAccessorPropertyDescriptor::kEnumerableOffset,
+ SelectBooleanConstant(enumerable));
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSAccessorPropertyDescriptor::kConfigurableOffset,
+ SelectBooleanConstant(configurable));
+
+ return js_desc;
+}
+
+Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
+ Node* value,
+ Node* writable,
+ Node* enumerable,
+ Node* configurable) {
+ Node* native_context = LoadNativeContext(context);
+ Node* map = LoadContextElement(native_context,
+ Context::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX);
+ Node* js_desc = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(js_desc,
+ JSDataPropertyDescriptor::kValueOffset, value);
+ StoreObjectFieldNoWriteBarrier(js_desc,
+ JSDataPropertyDescriptor::kWritableOffset,
+ SelectBooleanConstant(writable));
+ StoreObjectFieldNoWriteBarrier(js_desc,
+ JSDataPropertyDescriptor::kEnumerableOffset,
+ SelectBooleanConstant(enumerable));
+ StoreObjectFieldNoWriteBarrier(js_desc,
+ JSDataPropertyDescriptor::kConfigurableOffset,
+ SelectBooleanConstant(configurable));
+
+ return js_desc;
+}
+
TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
Node* object = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kKey);
@@ -101,10 +157,10 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
Branch(IsName(key), &return_false, &call_runtime);
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
BIND(&call_runtime);
Return(CallRuntime(Runtime::kObjectHasOwnProperty, context, object, key));
@@ -132,7 +188,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
// Ensure that the {object} doesn't have any elements.
CSA_ASSERT(this, IsJSObjectMap(object_map));
- Node* object_elements = LoadObjectField(object, JSObject::kElementsOffset);
+ Node* object_elements = LoadElements(object);
GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
&if_slow);
@@ -247,7 +303,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
if_error(this), if_function(this), if_number(this, Label::kDeferred),
if_object(this), if_primitive(this), if_proxy(this, Label::kDeferred),
if_regexp(this), if_string(this), if_symbol(this, Label::kDeferred),
- if_value(this);
+ if_value(this), if_bigint(this, Label::kDeferred);
Node* receiver = Parameter(Descriptor::kReceiver);
Node* context = Parameter(Descriptor::kContext);
@@ -371,19 +427,19 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&if_primitive);
{
- Label return_null(this), return_undefined(this);
+ Label return_undefined(this);
GotoIf(IsStringInstanceType(receiver_instance_type), &if_string);
+ GotoIf(IsBigIntInstanceType(receiver_instance_type), &if_bigint);
GotoIf(IsBooleanMap(receiver_map), &if_boolean);
GotoIf(IsHeapNumberMap(receiver_map), &if_number);
GotoIf(IsSymbolMap(receiver_map), &if_symbol);
- Branch(IsUndefined(receiver), &return_undefined, &return_null);
+ GotoIf(IsUndefined(receiver), &return_undefined);
+ CSA_ASSERT(this, IsNull(receiver));
+ Return(LoadRoot(Heap::knull_to_stringRootIndex));
BIND(&return_undefined);
Return(LoadRoot(Heap::kundefined_to_stringRootIndex));
-
- BIND(&return_null);
- Return(LoadRoot(Heap::knull_to_stringRootIndex));
}
BIND(&if_proxy);
@@ -451,6 +507,20 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
Goto(&checkstringtag);
}
+ BIND(&if_bigint);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* bigint_constructor =
+ LoadContextElement(native_context, Context::BIGINT_FUNCTION_INDEX);
+ Node* bigint_initial_map = LoadObjectField(
+ bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* bigint_prototype =
+ LoadObjectField(bigint_initial_map, Map::kPrototypeOffset);
+ var_default.Bind(LoadRoot(Heap::kobject_to_stringRootIndex));
+ var_holder.Bind(bigint_prototype);
+ Goto(&checkstringtag);
+ }
+
BIND(&if_value);
{
Node* receiver_value = LoadJSValueValue(receiver);
@@ -458,7 +528,12 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
Node* receiver_value_map = LoadMap(receiver_value);
GotoIf(IsHeapNumberMap(receiver_value_map), &if_number);
GotoIf(IsBooleanMap(receiver_value_map), &if_boolean);
- Branch(IsSymbolMap(receiver_value_map), &if_symbol, &if_string);
+ GotoIf(IsSymbolMap(receiver_value_map), &if_symbol);
+ Node* receiver_value_instance_type =
+ LoadMapInstanceType(receiver_value_map);
+ GotoIf(IsBigIntInstanceType(receiver_value_instance_type), &if_bigint);
+ CSA_ASSERT(this, IsStringInstanceType(receiver_value_instance_type));
+ Goto(&if_string);
}
BIND(&checkstringtag);
@@ -521,7 +596,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
no_properties(this);
{
Comment("Argument 1 check: prototype");
- GotoIf(WordEqual(prototype, NullConstant()), &prototype_valid);
+ GotoIf(IsNull(prototype), &prototype_valid);
BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
}
@@ -531,7 +606,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
// Check that we have a simple object
GotoIf(TaggedIsSmi(properties), &call_runtime);
// Undefined implies no properties.
- GotoIf(WordEqual(properties, UndefinedConstant()), &no_properties);
+ GotoIf(IsUndefined(properties), &no_properties);
Node* properties_map = LoadMap(properties);
GotoIf(IsSpecialReceiverMap(properties_map), &call_runtime);
// Stay on the fast path only if there are no elements.
@@ -552,7 +627,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
VARIABLE(properties, MachineRepresentation::kTagged);
Label non_null_proto(this), instantiate_map(this), good(this);
- Branch(WordEqual(prototype, NullConstant()), &good, &non_null_proto);
+ Branch(IsNull(prototype), &good, &non_null_proto);
BIND(&good);
{
@@ -578,7 +653,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
Comment("Load ObjectCreateMap from PrototypeInfo");
Node* weak_cell =
LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
- GotoIf(WordEqual(weak_cell, UndefinedConstant()), &call_runtime);
+ GotoIf(IsUndefined(weak_cell), &call_runtime);
map.Bind(LoadWeakCellValue(weak_cell, &call_runtime));
Goto(&instantiate_map);
}
@@ -669,9 +744,10 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
// Get the initial map from the function, jumping to the runtime if we don't
// have one.
+ Label runtime(this);
+ GotoIfNot(IsFunctionWithPrototypeSlotMap(LoadMap(closure)), &runtime);
Node* maybe_map =
LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset);
- Label runtime(this);
GotoIf(DoesntHaveInstanceType(maybe_map, MAP_TYPE), &runtime);
Node* shared =
@@ -684,9 +760,9 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* register_file = AllocateFixedArray(HOLEY_ELEMENTS, size);
FillFixedArrayWithValue(HOLEY_ELEMENTS, register_file, IntPtrConstant(0),
size, Heap::kUndefinedValueRootIndex);
-
- Node* const result = AllocateJSObjectFromMap(maybe_map);
-
+ // TODO(cbruni): support start_offset to avoid double initialization.
+ Node* result = AllocateJSObjectFromMap(maybe_map, nullptr, nullptr, kNone,
+ kWithSlackTracking);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kFunctionOffset,
closure);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContextOffset,
@@ -698,7 +774,6 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
executing);
- HandleSlackTracking(context, result, maybe_map, JSGeneratorObject::kSize);
Return(result);
BIND(&runtime);
@@ -712,31 +787,93 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
- CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
- UndefinedConstant()));
+ CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* obj = args.GetOptionalArgumentValue(0);
+ Node* object = args.GetOptionalArgumentValue(0);
Node* key = args.GetOptionalArgumentValue(1);
// 1. Let obj be ? ToObject(O).
- Node* object = CallBuiltin(Builtins::kToObject, context, obj);
+ object = CallBuiltin(Builtins::kToObject, context, object);
// 2. Let key be ? ToPropertyKey(P).
- Node* name = ToName(context, key);
+ key = ToName(context, key);
// 3. Let desc be ? obj.[[GetOwnProperty]](key).
- Node* desc =
- CallRuntime(Runtime::kGetOwnPropertyDescriptor, context, object, name);
+ Label if_keyisindex(this), if_iskeyunique(this),
+ call_runtime(this, Label::kDeferred),
+ return_undefined(this, Label::kDeferred), if_notunique_name(this);
+ Node* map = LoadMap(object);
+ Node* instance_type = LoadMapInstanceType(map);
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+ &call_runtime);
+ {
+ VARIABLE(var_index, MachineType::PointerRepresentation(),
+ IntPtrConstant(0));
+ VARIABLE(var_name, MachineRepresentation::kTagged);
+
+ TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_name,
+ &call_runtime, &if_notunique_name);
+
+ BIND(&if_notunique_name);
+ {
+ Label not_in_string_table(this);
+ TryInternalizeString(key, &if_keyisindex, &var_index, &if_iskeyunique,
+ &var_name, &not_in_string_table, &call_runtime);
+
+ BIND(&not_in_string_table);
+ {
+ // If the string was not found in the string table, then no regular
+ // object can have a property with that name, so return |undefined|.
+ Goto(&return_undefined);
+ }
+ }
+
+ BIND(&if_iskeyunique);
+ {
+ Label if_found_value(this), return_empty(this), if_not_found(this);
- Label return_undefined(this, Label::kDeferred);
- GotoIf(IsUndefined(desc), &return_undefined);
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ VARIABLE(var_details, MachineRepresentation::kWord32);
+ VARIABLE(var_raw_value, MachineRepresentation::kTagged);
- CSA_ASSERT(this, IsFixedArray(desc));
+ TryGetOwnProperty(context, object, object, map, instance_type,
+ var_name.value(), &if_found_value, &var_value,
+ &var_details, &var_raw_value, &return_empty,
+ &if_not_found, kReturnAccessorPair);
+
+ BIND(&if_found_value);
+ // 4. Return FromPropertyDescriptor(desc).
+ Node* js_desc = FromPropertyDetails(context, var_value.value(),
+ var_details.value(), &call_runtime);
+ args.PopAndReturn(js_desc);
+
+ BIND(&return_empty);
+ var_value.Bind(UndefinedConstant());
+ args.PopAndReturn(UndefinedConstant());
+
+ BIND(&if_not_found);
+ Goto(&call_runtime);
+ }
+ }
- // 4. Return FromPropertyDescriptor(desc).
- args.PopAndReturn(FromPropertyDescriptor(context, desc));
+ BIND(&if_keyisindex);
+ Goto(&call_runtime);
+ BIND(&call_runtime);
+ {
+ Node* desc =
+ CallRuntime(Runtime::kGetOwnPropertyDescriptor, context, object, key);
+
+ GotoIf(IsUndefined(desc), &return_undefined);
+
+ CSA_ASSERT(this, IsFixedArray(desc));
+
+ // 4. Return FromPropertyDescriptor(desc).
+ Node* js_desc = FromPropertyDescriptor(context, desc);
+ args.PopAndReturn(js_desc);
+ }
BIND(&return_undefined);
args.PopAndReturn(UndefinedConstant());
}
@@ -779,54 +916,21 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
BIND(&if_accessor_desc);
{
- Node* native_context = LoadNativeContext(context);
- Node* map = LoadContextElement(
- native_context, Context::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX);
- Node* js_desc = AllocateJSObjectFromMap(map);
-
- StoreObjectFieldNoWriteBarrier(
- js_desc, JSAccessorPropertyDescriptor::kGetOffset,
- LoadObjectField(desc, PropertyDescriptorObject::kGetOffset));
- StoreObjectFieldNoWriteBarrier(
- js_desc, JSAccessorPropertyDescriptor::kSetOffset,
- LoadObjectField(desc, PropertyDescriptorObject::kSetOffset));
- StoreObjectFieldNoWriteBarrier(
- js_desc, JSAccessorPropertyDescriptor::kEnumerableOffset,
- SelectBooleanConstant(
- IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags)));
- StoreObjectFieldNoWriteBarrier(
- js_desc, JSAccessorPropertyDescriptor::kConfigurableOffset,
- SelectBooleanConstant(
- IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)));
-
- js_descriptor.Bind(js_desc);
+ js_descriptor.Bind(ConstructAccessorDescriptor(
+ context, LoadObjectField(desc, PropertyDescriptorObject::kGetOffset),
+ LoadObjectField(desc, PropertyDescriptorObject::kSetOffset),
+ IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags),
+ IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)));
Goto(&return_desc);
}
BIND(&if_data_desc);
{
- Node* native_context = LoadNativeContext(context);
- Node* map = LoadContextElement(native_context,
- Context::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX);
- Node* js_desc = AllocateJSObjectFromMap(map);
-
- StoreObjectFieldNoWriteBarrier(
- js_desc, JSDataPropertyDescriptor::kValueOffset,
- LoadObjectField(desc, PropertyDescriptorObject::kValueOffset));
- StoreObjectFieldNoWriteBarrier(
- js_desc, JSDataPropertyDescriptor::kWritableOffset,
- SelectBooleanConstant(
- IsSetWord32<PropertyDescriptorObject::IsWritableBit>(flags)));
- StoreObjectFieldNoWriteBarrier(
- js_desc, JSDataPropertyDescriptor::kEnumerableOffset,
- SelectBooleanConstant(
- IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags)));
- StoreObjectFieldNoWriteBarrier(
- js_desc, JSDataPropertyDescriptor::kConfigurableOffset,
- SelectBooleanConstant(
- IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)));
-
- js_descriptor.Bind(js_desc);
+ js_descriptor.Bind(ConstructDataDescriptor(
+ context, LoadObjectField(desc, PropertyDescriptorObject::kValueOffset),
+ IsSetWord32<PropertyDescriptorObject::IsWritableBit>(flags),
+ IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags),
+ IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)));
Goto(&return_desc);
}
@@ -884,5 +988,60 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
BIND(&return_desc);
return js_descriptor.value();
}
+
+Node* ObjectBuiltinsAssembler::FromPropertyDetails(Node* context,
+ Node* raw_value,
+ Node* details,
+ Label* if_bailout) {
+ VARIABLE(js_descriptor, MachineRepresentation::kTagged);
+
+ Label if_accessor_desc(this), if_data_desc(this), return_desc(this);
+ BranchIfAccessorPair(raw_value, &if_accessor_desc, &if_data_desc);
+
+ BIND(&if_accessor_desc);
+ {
+ Node* getter = LoadObjectField(raw_value, AccessorPair::kGetterOffset);
+ Node* setter = LoadObjectField(raw_value, AccessorPair::kSetterOffset);
+ js_descriptor.Bind(ConstructAccessorDescriptor(
+ context, GetAccessorOrUndefined(getter, if_bailout),
+ GetAccessorOrUndefined(setter, if_bailout),
+ IsNotSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
+ IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask)));
+ Goto(&return_desc);
+ }
+
+ BIND(&if_data_desc);
+ {
+ js_descriptor.Bind(ConstructDataDescriptor(
+ context, raw_value,
+ IsNotSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
+ IsNotSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
+ IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask)));
+ Goto(&return_desc);
+ }
+
+ BIND(&return_desc);
+ return js_descriptor.value();
+}
+
+Node* ObjectBuiltinsAssembler::GetAccessorOrUndefined(Node* accessor,
+ Label* if_bailout) {
+ Label bind_undefined(this, Label::kDeferred), return_result(this);
+ VARIABLE(result, MachineRepresentation::kTagged);
+
+ GotoIf(IsNull(accessor), &bind_undefined);
+ result.Bind(accessor);
+ Node* map = LoadMap(accessor);
+ // TODO(ishell): probe template instantiations cache.
+ GotoIf(IsFunctionTemplateInfoMap(map), if_bailout);
+ Goto(&return_result);
+
+ BIND(&bind_undefined);
+ result.Bind(UndefinedConstant());
+ Goto(&return_result);
+
+ BIND(&return_result);
+ return result.value();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 3b34834e3d..36f7ebfc0a 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -52,7 +52,7 @@ BUILTIN(ObjectPrototypePropertyIsEnumerable) {
isolate, object, JSReceiver::ToObject(isolate, args.receiver()));
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnPropertyAttributes(object, name);
- if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return isolate->heap()->exception();
if (maybe.FromJust() == ABSENT) return isolate->heap()->false_value();
return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
}
@@ -86,11 +86,8 @@ Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
Handle<Object> name, Handle<Object> accessor) {
// 1. Let O be ? ToObject(this value).
Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, receiver,
- FLAG_harmony_strict_legacy_accessor_builtins
- ? Object::ToObject(isolate, object)
- : Object::ConvertReceiver(isolate, object));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
// 2. If IsCallable(getter) is false, throw a TypeError exception.
if (!accessor->IsCallable()) {
MessageTemplate::Template message =
@@ -116,10 +113,8 @@ Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
// 5. Perform ? DefinePropertyOrThrow(O, key, desc).
// To preserve legacy behavior, we ignore errors silently rather than
// throwing an exception.
- Maybe<bool> success = JSReceiver::DefineOwnProperty(
- isolate, receiver, name, &desc,
- FLAG_harmony_strict_legacy_accessor_builtins ? Object::THROW_ON_ERROR
- : Object::DONT_THROW);
+ Maybe<bool> success = JSReceiver::DefineOwnProperty(isolate, receiver, name,
+ &desc, kThrowOnError);
MAYBE_RETURN(success, isolate->heap()->exception());
if (!success.FromJust()) {
isolate->CountUsage(v8::Isolate::kDefineGetterOrSetterWouldThrow);
@@ -130,11 +125,8 @@ Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
Handle<Object> key, AccessorComponent component) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, object,
- FLAG_harmony_strict_legacy_accessor_builtins
- ? Object::ToObject(isolate, object)
- : Object::ConvertReceiver(isolate, object));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
+ Object::ToObject(isolate, object));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
Object::ToPropertyKey(isolate, key));
bool success = false;
@@ -242,7 +234,7 @@ BUILTIN(ObjectFreeze) {
Handle<Object> object = args.atOrUndefined(isolate, 1);
if (object->IsJSReceiver()) {
MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
- FROZEN, Object::THROW_ON_ERROR),
+ FROZEN, kThrowOnError),
isolate->heap()->exception());
}
return *object;
@@ -287,9 +279,8 @@ BUILTIN(ObjectSetPrototypeOf) {
// 4. Let status be ? O.[[SetPrototypeOf]](proto).
// 5. If status is false, throw a TypeError exception.
- MAYBE_RETURN(
- JSReceiver::SetPrototype(receiver, proto, true, Object::THROW_ON_ERROR),
- isolate->heap()->exception());
+ MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError),
+ isolate->heap()->exception());
// 6. Return O.
return *receiver;
@@ -332,9 +323,8 @@ BUILTIN(ObjectPrototypeSetProto) {
// 4. Let status be ? O.[[SetPrototypeOf]](proto).
// 5. If status is false, throw a TypeError exception.
- MAYBE_RETURN(
- JSReceiver::SetPrototype(receiver, proto, true, Object::THROW_ON_ERROR),
- isolate->heap()->exception());
+ MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError),
+ isolate->heap()->exception());
// Return undefined.
return isolate->heap()->undefined_value();
@@ -459,8 +449,8 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) {
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, descriptors, key, descriptors, LookupIterator::OWN);
- Maybe<bool> success = JSReceiver::CreateDataProperty(&it, from_descriptor,
- Object::DONT_THROW);
+ Maybe<bool> success =
+ JSReceiver::CreateDataProperty(&it, from_descriptor, kDontThrow);
CHECK(success.FromJust());
}
@@ -473,7 +463,7 @@ BUILTIN(ObjectPreventExtensions) {
Handle<Object> object = args.atOrUndefined(isolate, 1);
if (object->IsJSReceiver()) {
MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
- Object::THROW_ON_ERROR),
+ kThrowOnError),
isolate->heap()->exception());
}
return *object;
@@ -485,7 +475,7 @@ BUILTIN(ObjectSeal) {
Handle<Object> object = args.atOrUndefined(isolate, 1);
if (object->IsJSReceiver()) {
MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
- SEALED, Object::THROW_ON_ERROR),
+ SEALED, kThrowOnError),
isolate->heap()->exception());
}
return *object;
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 0d00c8bc27..67ebc85ba4 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -21,6 +21,7 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
Node* const initial_map =
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
Node* const instance = AllocateJSObjectFromMap(initial_map);
@@ -109,7 +110,7 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Node* native_context = LoadNativeContext(context);
- Node* map = LoadRoot(Heap::kPromiseCapabilityMapRootIndex);
+ Node* map = LoadRoot(Heap::kTuple3MapRootIndex);
Node* capability = AllocateStruct(map);
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -220,8 +221,6 @@ Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
Node* promise, Node* debug_event, Node* native_context) {
Node* const context =
CreatePromiseContext(native_context, kPromiseContextLength);
- StoreContextElementNoWriteBarrier(context, kAlreadyVisitedSlot,
- SmiConstant(0));
StoreContextElementNoWriteBarrier(context, kPromiseSlot, promise);
StoreContextElementNoWriteBarrier(context, kDebugEventSlot, debug_event);
return context;
@@ -236,28 +235,6 @@ Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext(
return context;
}
-Node* PromiseBuiltinsAssembler::ThrowIfNotJSReceiver(
- Node* context, Node* value, MessageTemplate::Template msg_template,
- const char* method_name) {
- Label out(this), throw_exception(this, Label::kDeferred);
- VARIABLE(var_value_map, MachineRepresentation::kTagged);
-
- GotoIf(TaggedIsSmi(value), &throw_exception);
-
- // Load the instance type of the {value}.
- var_value_map.Bind(LoadMap(value));
- Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
-
- Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
-
- // The {value} is not a compatible receiver for this method.
- BIND(&throw_exception);
- ThrowTypeError(context, msg_template, method_name);
-
- BIND(&out);
- return var_value_map.value();
-}
-
Node* PromiseBuiltinsAssembler::PromiseHasHandler(Node* promise) {
Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
return IsSetWord(SmiUntag(flags), 1 << JSPromise::kHasHandlerBit);
@@ -285,7 +262,7 @@ void PromiseBuiltinsAssembler::PromiseSetStatus(
Node* promise, v8::Promise::PromiseState const status) {
CSA_ASSERT(this,
IsPromiseStatus(PromiseStatus(promise), v8::Promise::kPending));
- CHECK(status != v8::Promise::kPending);
+ CHECK_NE(status, v8::Promise::kPending);
Node* mask = SmiConstant(status);
Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
@@ -323,8 +300,7 @@ Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
GetProperty(context, constructor, isolate->factory()->species_symbol());
// 6. If S is either undefined or null, return defaultConstructor.
- GotoIf(IsUndefined(species), &out);
- GotoIf(WordEqual(species, NullConstant()), &out);
+ GotoIf(IsNullOrUndefined(species), &out);
// 7. If IsConstructor(S) is true, return S.
Label throw_error(this);
@@ -351,16 +327,14 @@ void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
Node* delta = IntPtrOrSmiConstant(1, mode);
Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
- const ElementsKind kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
- const CodeStubAssembler::AllocationFlags flags =
- CodeStubAssembler::kAllowLargeObjectAllocation;
int additional_offset = 0;
- Node* new_elements = AllocateFixedArray(kind, new_capacity, mode, flags);
+ ExtractFixedArrayFlags flags;
+ flags |= ExtractFixedArrayFlag::kFixedArrays;
+ Node* new_elements =
+ ExtractFixedArray(elements, nullptr, length, new_capacity, flags, mode);
- CopyFixedArrayElements(kind, elements, new_elements, length, barrier_mode,
- mode);
StoreFixedArrayElement(new_elements, length, value, barrier_mode,
additional_offset, mode);
@@ -392,6 +366,8 @@ Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
VARIABLE(var_deferred_on_resolve, MachineRepresentation::kTagged);
VARIABLE(var_deferred_on_reject, MachineRepresentation::kTagged);
+ GotoIfForceSlowPath(&promise_capability);
+
Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
&promise_capability);
@@ -641,6 +617,8 @@ void PromiseBuiltinsAssembler::BranchIfFastPath(Node* native_context,
LoadContextElement(native_context,
Context::PROMISE_FUNCTION_INDEX)));
+ GotoIfForceSlowPath(if_ismodified);
+
Node* const map = LoadMap(promise);
Node* const initial_map =
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
@@ -803,7 +781,7 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
Node* const key =
HeapConstant(isolate->factory()->promise_handled_by_symbol());
CallRuntime(Runtime::kSetProperty, context, result, key, promise,
- SmiConstant(STRICT));
+ SmiConstant(LanguageMode::kStrict));
Goto(&enqueue);
// 12. Perform EnqueueJob("PromiseJobs",
@@ -847,12 +825,12 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
void PromiseBuiltinsAssembler::PromiseFulfill(
Node* context, Node* promise, Node* result,
v8::Promise::PromiseState status) {
- Label do_promisereset(this), debug_async_event_enqueue_recurring(this);
+ Label do_promisereset(this);
Node* const deferred_promise =
LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
- GotoIf(IsUndefined(deferred_promise), &debug_async_event_enqueue_recurring);
+ GotoIf(IsUndefined(deferred_promise), &do_promisereset);
Node* const tasks =
status == v8::Promise::kFulfilled
@@ -869,15 +847,7 @@ void PromiseBuiltinsAssembler::PromiseFulfill(
context);
CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
- Goto(&debug_async_event_enqueue_recurring);
-
- BIND(&debug_async_event_enqueue_recurring);
- {
- GotoIfNot(IsDebugActive(), &do_promisereset);
- CallRuntime(Runtime::kDebugAsyncEventEnqueueRecurring, context, promise,
- SmiConstant(status));
- Goto(&do_promisereset);
- }
+ Goto(&do_promisereset);
BIND(&do_promisereset);
{
@@ -934,7 +904,7 @@ void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
{
Branch(WordEqual(CallRuntime(Runtime::kAllowDynamicFunction, context,
promise_constructor),
- BooleanConstant(true)),
+ TrueConstant()),
&has_access, if_noaccess);
}
@@ -984,7 +954,7 @@ void PromiseBuiltinsAssembler::SetForwardingHandlerIfTrue(
GotoIfNot(condition, &done);
CallRuntime(Runtime::kSetProperty, context, object(),
HeapConstant(factory()->promise_forwarding_handler_symbol()),
- TrueConstant(), SmiConstant(STRICT));
+ TrueConstant(), SmiConstant(LanguageMode::kStrict));
Goto(&done);
BIND(&done);
}
@@ -998,42 +968,44 @@ void PromiseBuiltinsAssembler::SetPromiseHandledByIfTrue(
GotoIfNot(HasInstanceType(promise, JS_PROMISE_TYPE), &done);
CallRuntime(Runtime::kSetProperty, context, promise,
HeapConstant(factory()->promise_handled_by_symbol()),
- handled_by(), SmiConstant(STRICT));
+ handled_by(), SmiConstant(LanguageMode::kStrict));
Goto(&done);
BIND(&done);
}
-// ES#sec-promise-reject-functions
-// Promise Reject Functions
-TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
-
+void PromiseBuiltinsAssembler::PerformFulfillClosure(Node* context, Node* value,
+ bool should_resolve) {
Label out(this);
- // 3. Let alreadyResolved be F.[[AlreadyResolved]].
- int has_already_visited_slot = kAlreadyVisitedSlot;
-
- Node* const has_already_visited =
- LoadContextElement(context, has_already_visited_slot);
-
- // 4. If alreadyResolved.[[Value]] is true, return undefined.
- GotoIf(SmiEqual(has_already_visited, SmiConstant(1)), &out);
-
- // 5.Set alreadyResolved.[[Value]] to true.
- StoreContextElementNoWriteBarrier(context, has_already_visited_slot,
- SmiConstant(1));
-
// 2. Let promise be F.[[Promise]].
- Node* const promise =
- LoadContextElement(context, IntPtrConstant(kPromiseSlot));
- Node* const debug_event =
- LoadContextElement(context, IntPtrConstant(kDebugEventSlot));
+ Node* const promise_slot = IntPtrConstant(kPromiseSlot);
+ Node* const promise = LoadContextElement(context, promise_slot);
+
+ // We use `undefined` as a marker to know that this callback was
+ // already called.
+ GotoIf(IsUndefined(promise), &out);
+
+ if (should_resolve) {
+ InternalResolvePromise(context, promise, value);
+ } else {
+ Node* const debug_event =
+ LoadContextElement(context, IntPtrConstant(kDebugEventSlot));
+ InternalPromiseReject(context, promise, value, debug_event);
+ }
- InternalPromiseReject(context, promise, value, debug_event);
- Return(UndefinedConstant());
+ StoreContextElement(context, promise_slot, UndefinedConstant());
+ Goto(&out);
BIND(&out);
+}
+
+// ES#sec-promise-reject-functions
+// Promise Reject Functions
+TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ PerformFulfillClosure(context, value, false);
Return(UndefinedConstant());
}
@@ -1177,29 +1149,7 @@ TF_BUILTIN(PromiseResolveClosure, PromiseBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
- Label out(this);
-
- // 3. Let alreadyResolved be F.[[AlreadyResolved]].
- int has_already_visited_slot = kAlreadyVisitedSlot;
-
- Node* const has_already_visited =
- LoadContextElement(context, has_already_visited_slot);
-
- // 4. If alreadyResolved.[[Value]] is true, return undefined.
- GotoIf(SmiEqual(has_already_visited, SmiConstant(1)), &out);
-
- // 5.Set alreadyResolved.[[Value]] to true.
- StoreContextElementNoWriteBarrier(context, has_already_visited_slot,
- SmiConstant(1));
-
- // 2. Let promise be F.[[Promise]].
- Node* const promise =
- LoadContextElement(context, IntPtrConstant(kPromiseSlot));
-
- InternalResolvePromise(context, promise, value);
- Return(UndefinedConstant());
-
- BIND(&out);
+ PerformFulfillClosure(context, value, true);
Return(UndefinedConstant());
}
@@ -1478,14 +1428,12 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
Node* const capability = LoadContextElement(context, kCapabilitySlot);
Label if_alreadyinvoked(this, Label::kDeferred);
- GotoIf(WordNotEqual(
- LoadObjectField(capability, PromiseCapability::kResolveOffset),
- UndefinedConstant()),
- &if_alreadyinvoked);
- GotoIf(WordNotEqual(
- LoadObjectField(capability, PromiseCapability::kRejectOffset),
- UndefinedConstant()),
- &if_alreadyinvoked);
+ GotoIfNot(IsUndefined(
+ LoadObjectField(capability, PromiseCapability::kResolveOffset)),
+ &if_alreadyinvoked);
+ GotoIfNot(IsUndefined(
+ LoadObjectField(capability, PromiseCapability::kRejectOffset)),
+ &if_alreadyinvoked);
StoreObjectField(capability, PromiseCapability::kResolveOffset, resolve);
StoreObjectField(capability, PromiseCapability::kRejectOffset, reject);
@@ -1519,6 +1467,9 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
Node* const native_context = LoadNativeContext(context);
+
+ GotoIfForceSlowPath(&if_custompromise);
+
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
Branch(WordEqual(promise_fun, receiver), &if_nativepromise,
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index c2cadecfd2..759176757f 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -16,11 +16,10 @@ typedef compiler::CodeAssemblerState CodeAssemblerState;
class PromiseBuiltinsAssembler : public CodeStubAssembler {
public:
enum PromiseResolvingFunctionContextSlot {
- // Whether the resolve/reject callback was already called.
- kAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
-
- // The promise which resolve/reject callbacks fulfill.
- kPromiseSlot,
+ // The promise which resolve/reject callbacks fulfill. If this is
+ // undefined, then we've already visited this callback and it
+ // should be a no-op.
+ kPromiseSlot = Context::MIN_CONTEXT_SLOTS,
// Whether to trigger a debug event or not. Used in catch
// prediction.
@@ -112,10 +111,6 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
protected:
void PromiseInit(Node* promise);
- Node* ThrowIfNotJSReceiver(Node* context, Node* value,
- MessageTemplate::Template msg_template,
- const char* method_name = nullptr);
-
Node* SpeciesConstructor(Node* context, Node* object,
Node* default_constructor);
@@ -180,6 +175,7 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
const NodeGenerator& handled_by);
Node* PromiseStatus(Node* promise);
+ void PerformFulfillClosure(Node* context, Node* value, bool should_resolve);
private:
Node* IsPromiseStatus(Node* actual, v8::Promise::PromiseState expected);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 29c5a4eaeb..2d81867d51 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -67,8 +67,6 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
Heap::kEmptyPropertyDictionaryRootIndex);
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target);
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler);
- StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHashOffset,
- UndefinedConstant());
return proxy;
}
@@ -137,6 +135,8 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
CSA_ASSERT(this, IsJSProxy(proxy));
CSA_ASSERT(this, IsCallable(proxy));
+ PerformStackCheck(context);
+
Label throw_proxy_handler_revoked(this, Label::kDeferred),
trap_undefined(this);
@@ -441,7 +441,8 @@ TF_BUILTIN(ProxySetProperty, ProxiesCodeStubAssembler) {
BIND(&failure);
{
Label if_throw(this, Label::kDeferred);
- Branch(SmiEqual(language_mode, SmiConstant(STRICT)), &if_throw, &success);
+ Branch(SmiEqual(language_mode, SmiConstant(LanguageMode::kStrict)),
+ &if_throw, &success);
BIND(&if_throw);
ThrowTypeError(context, MessageTemplate::kProxyTrapReturnedFalsishFor,
@@ -456,8 +457,8 @@ TF_BUILTIN(ProxySetProperty, ProxiesCodeStubAssembler) {
{
Label failure(this), throw_error(this, Label::kDeferred);
- Branch(SmiEqual(language_mode, SmiConstant(STRICT)), &throw_error,
- &failure);
+ Branch(SmiEqual(language_mode, SmiConstant(LanguageMode::kStrict)),
+ &throw_error, &failure);
BIND(&failure);
Return(UndefinedConstant());
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index 67203ee4d9..2b2ac54ebe 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -16,12 +16,6 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- void BranchIfAccessorPair(Node* value, Label* if_accessor_pair,
- Label* if_not_accessor_pair) {
- GotoIf(TaggedIsSmi(value), if_not_accessor_pair);
- Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair);
- }
-
// ES6 section 9.5.8 [[Get]] ( P, Receiver )
// name should not be an index.
Node* ProxyGetProperty(Node* context, Node* proxy, Node* name,
diff --git a/deps/v8/src/builtins/builtins-reflect-gen.cc b/deps/v8/src/builtins/builtins-reflect-gen.cc
new file mode 100644
index 0000000000..3ab21f975d
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-reflect-gen.cc
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// ES section #sec-reflect.has
+TF_BUILTIN(ReflectHas, CodeStubAssembler) {
+ Node* target = Parameter(Descriptor::kTarget);
+ Node* key = Parameter(Descriptor::kKey);
+ Node* context = Parameter(Descriptor::kContext);
+
+ ThrowIfNotJSReceiver(context, target, MessageTemplate::kCalledOnNonObject,
+ "Reflect.has");
+
+ Return(CallBuiltin(Builtins::kHasProperty, context, key, target));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index 9b29634629..bc596e0ccb 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -41,9 +41,8 @@ BUILTIN(ReflectDefineProperty) {
return isolate->heap()->exception();
}
- Maybe<bool> result =
- JSReceiver::DefineOwnProperty(isolate, Handle<JSReceiver>::cast(target),
- name, &desc, Object::DONT_THROW);
+ Maybe<bool> result = JSReceiver::DefineOwnProperty(
+ isolate, Handle<JSReceiver>::cast(target), name, &desc, kDontThrow);
MAYBE_RETURN(result, isolate->heap()->exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -67,7 +66,7 @@ BUILTIN(ReflectDeleteProperty) {
Object::ToName(isolate, key));
Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
- Handle<JSReceiver>::cast(target), name, SLOPPY);
+ Handle<JSReceiver>::cast(target), name, LanguageMode::kSloppy);
MAYBE_RETURN(result, isolate->heap()->exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -138,30 +137,6 @@ BUILTIN(ReflectGetPrototypeOf) {
JSReceiver::GetPrototype(isolate, receiver));
}
-// ES6 section 26.1.9 Reflect.has
-BUILTIN(ReflectHas) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at(1);
- Handle<Object> key = args.at(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.has")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- Maybe<bool> result =
- JSReceiver::HasProperty(Handle<JSReceiver>::cast(target), name);
- return result.IsJust() ? *isolate->factory()->ToBoolean(result.FromJust())
- : isolate->heap()->exception();
-}
-
// ES6 section 26.1.10 Reflect.isExtensible
BUILTIN(ReflectIsExtensible) {
HandleScope scope(isolate);
@@ -217,7 +192,7 @@ BUILTIN(ReflectPreventExtensions) {
}
Maybe<bool> result = JSReceiver::PreventExtensions(
- Handle<JSReceiver>::cast(target), Object::DONT_THROW);
+ Handle<JSReceiver>::cast(target), kDontThrow);
MAYBE_RETURN(result, isolate->heap()->exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -244,7 +219,7 @@ BUILTIN(ReflectSet) {
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, receiver, name, Handle<JSReceiver>::cast(target));
Maybe<bool> result = Object::SetSuperProperty(
- &it, value, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED);
+ &it, value, LanguageMode::kSloppy, Object::MAY_BE_STORE_FROM_KEYED);
MAYBE_RETURN(result, isolate->heap()->exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -269,7 +244,7 @@ BUILTIN(ReflectSetPrototypeOf) {
}
Maybe<bool> result = JSReceiver::SetPrototype(
- Handle<JSReceiver>::cast(target), proto, true, Object::DONT_THROW);
+ Handle<JSReceiver>::cast(target), proto, true, kDontThrow);
MAYBE_RETURN(result, isolate->heap()->exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 8a760d0efa..5ce4abd557 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -11,6 +11,7 @@
#include "src/code-stub-assembler.h"
#include "src/counters.h"
#include "src/factory-inl.h"
+#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -118,7 +119,7 @@ void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
// Store through runtime.
// TODO(ishell): Use SetPropertyStub here once available.
Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
- Node* const language_mode = SmiConstant(STRICT);
+ Node* const language_mode = SmiConstant(LanguageMode::kStrict);
CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
language_mode);
}
@@ -310,7 +311,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
#ifdef V8_INTERPRETED_REGEXP
return CallRuntime(Runtime::kRegExpExec, context, regexp, string, last_index,
match_info);
-#else // V8_INTERPRETED_REGEXP
+#else // V8_INTERPRETED_REGEXP
CSA_ASSERT(this, TaggedIsNotSmi(regexp));
CSA_ASSERT(this, IsJSRegExp(regexp));
@@ -343,15 +344,15 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// must fail.
Label if_failure(this);
- Node* const smi_string_length = LoadStringLength(string);
- {
- CSA_ASSERT(this, IsNumberNormalized(last_index));
- CSA_ASSERT(this, IsNumberPositive(last_index));
- Node* const last_index_is_not_smi = TaggedIsNotSmi(last_index);
- Node* const last_index_is_oob =
- SmiGreaterThan(last_index, smi_string_length);
- GotoIf(Word32Or(last_index_is_not_smi, last_index_is_oob), &if_failure);
- }
+
+ CSA_ASSERT(this, IsNumberNormalized(last_index));
+ CSA_ASSERT(this, IsNumberPositive(last_index));
+ GotoIf(TaggedIsNotSmi(last_index), &if_failure);
+
+ Node* const int_string_length = LoadStringLengthAsWord(string);
+ Node* const int_last_index = SmiUntag(last_index);
+
+ GotoIf(UintPtrGreaterThan(int_last_index, int_string_length), &if_failure);
Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
{
@@ -407,14 +408,11 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Load the irregexp code object and offsets into the subject string. Both
// depend on whether the string is one- or two-byte.
- Node* const int_last_index = SmiUntag(last_index);
-
VARIABLE(var_string_start, MachineType::PointerRepresentation());
VARIABLE(var_string_end, MachineType::PointerRepresentation());
VARIABLE(var_code, MachineRepresentation::kTagged);
{
- Node* const int_string_length = SmiUntag(smi_string_length);
Node* const direct_string_data = to_direct.PointerToData(&runtime);
Label next(this), if_isonebyte(this), if_istwobyte(this, Label::kDeferred);
@@ -653,7 +651,6 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
Node* const context, Node* const regexp, Node* const string,
Label* if_didnotmatch, const bool is_fastpath) {
- Node* const null = NullConstant();
Node* const int_zero = IntPtrConstant(0);
Node* const smi_zero = SmiConstant(0);
@@ -714,14 +711,14 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
Label if_isoob(this, Label::kDeferred);
GotoIfNot(TaggedIsSmi(lastindex), &if_isoob);
- Node* const string_length = LoadStringLength(string);
+ TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
GotoIfNot(SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
Goto(&run_exec);
BIND(&if_isoob);
{
StoreLastIndex(context, regexp, smi_zero, is_fastpath);
- var_result.Bind(null);
+ var_result.Bind(NullConstant());
Goto(if_didnotmatch);
}
}
@@ -749,7 +746,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
// {match_indices} is either null or the RegExpMatchInfo array.
// Return early if exec failed, possibly updating last index.
- GotoIfNot(WordEqual(match_indices, null), &successful_match);
+ GotoIfNot(IsNull(match_indices), &successful_match);
GotoIfNot(should_update_last_index, if_didnotmatch);
@@ -779,8 +776,6 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
Node* const regexp,
Node* const string,
const bool is_fastpath) {
- Node* const null = NullConstant();
-
VARIABLE(var_result, MachineRepresentation::kTagged);
Label if_didnotmatch(this), out(this);
@@ -798,7 +793,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
BIND(&if_didnotmatch);
{
- var_result.Bind(null);
+ var_result.Bind(NullConstant());
Goto(&out);
}
@@ -839,6 +834,11 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
Label out(this);
VARIABLE(var_result, MachineRepresentation::kWord32);
+#if defined(DEBUG) || defined(ENABLE_FASTSLOW_SWITCH)
+ var_result.Bind(Int32Constant(0));
+ GotoIfForceSlowPath(&out);
+#endif
+
Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
@@ -876,6 +876,8 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* const context,
Label* const if_ismodified) {
CSA_ASSERT(this, WordEqual(LoadMap(object), map));
+ GotoIfForceSlowPath(if_ismodified);
+
// TODO(ishell): Update this check once map changes for constant field
// tracking are landing.
@@ -976,7 +978,8 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
// Callers ensure that last_index is in-bounds.
CSA_ASSERT(this,
- SmiLessThanOrEqual(last_index, LoadStringLength(subject_string)));
+ UintPtrLessThanOrEqual(SmiUntag(last_index),
+ LoadStringLengthAsWord(subject_string)));
Node* const needle_string =
LoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex);
@@ -993,12 +996,14 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
BIND(&if_success);
{
CSA_ASSERT(this, TaggedIsPositiveSmi(match_from));
- CSA_ASSERT(this, SmiLessThan(match_from, LoadStringLength(subject_string)));
+ CSA_ASSERT(this, UintPtrLessThan(SmiUntag(match_from),
+ LoadStringLengthAsWord(subject_string)));
const int kNumRegisters = 2;
STATIC_ASSERT(RegExpMatchInfo::kInitialCaptureIndices >= kNumRegisters);
- Node* const match_to = SmiAdd(match_from, LoadStringLength(needle_string));
+ Node* const match_to =
+ SmiAdd(match_from, LoadStringLengthAsSmi(needle_string));
StoreFixedArrayElement(match_info, RegExpMatchInfo::kNumberOfCapturesIndex,
SmiConstant(kNumRegisters), SKIP_WRITE_BARRIER);
@@ -1057,12 +1062,9 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
bool is_fastpath) {
Isolate* isolate = this->isolate();
- Node* const int_zero = IntPtrConstant(0);
- Node* const int_one = IntPtrConstant(1);
- VARIABLE(var_length, MachineType::PointerRepresentation(), int_zero);
- VARIABLE(var_flags, MachineType::PointerRepresentation());
-
- Node* const is_dotall_enabled = IsDotAllEnabled(isolate);
+ TNode<IntPtrT> const int_one = IntPtrConstant(1);
+ TVARIABLE(Smi, var_length, SmiConstant(0));
+ TVARIABLE(IntPtrT, var_flags);
// First, count the number of characters we will need and check which flags
// are set.
@@ -1071,28 +1073,21 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
// Refer to JSRegExp's flag property on the fast-path.
CSA_ASSERT(this, IsJSRegExp(regexp));
Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const flags_intptr = SmiUntag(flags_smi);
- var_flags.Bind(flags_intptr);
-
-#define CASE_FOR_FLAG(FLAG) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(flags_intptr, FLAG), &next); \
- var_length.Bind(IntPtrAdd(var_length.value(), int_one)); \
- Goto(&next); \
- BIND(&next); \
+ var_flags = SmiUntag(flags_smi);
+
+#define CASE_FOR_FLAG(FLAG) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags, FLAG), &next); \
+ var_length = SmiAdd(var_length, SmiConstant(1)); \
+ Goto(&next); \
+ BIND(&next); \
} while (false)
CASE_FOR_FLAG(JSRegExp::kGlobal);
CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
CASE_FOR_FLAG(JSRegExp::kMultiline);
- {
- Label next(this);
- GotoIfNot(is_dotall_enabled, &next);
- CASE_FOR_FLAG(JSRegExp::kDotAll);
- Goto(&next);
- BIND(&next);
- }
+ CASE_FOR_FLAG(JSRegExp::kDotAll);
CASE_FOR_FLAG(JSRegExp::kUnicode);
CASE_FOR_FLAG(JSRegExp::kSticky);
#undef CASE_FOR_FLAG
@@ -1100,7 +1095,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
DCHECK(!is_fastpath);
// Fall back to GetProperty stub on the slow-path.
- var_flags.Bind(int_zero);
+ var_flags = IntPtrConstant(0);
#define CASE_FOR_FLAG(NAME, FLAG) \
do { \
@@ -1110,8 +1105,8 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Label if_isflagset(this); \
BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \
BIND(&if_isflagset); \
- var_length.Bind(IntPtrAdd(var_length.value(), int_one)); \
- var_flags.Bind(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \
+ var_length = SmiAdd(var_length, SmiConstant(1)); \
+ var_flags = Signed(WordOr(var_flags, IntPtrConstant(FLAG))); \
Goto(&next); \
BIND(&next); \
} while (false)
@@ -1119,13 +1114,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
CASE_FOR_FLAG("global", JSRegExp::kGlobal);
CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase);
CASE_FOR_FLAG("multiline", JSRegExp::kMultiline);
- {
- Label next(this);
- GotoIfNot(is_dotall_enabled, &next);
- CASE_FOR_FLAG("dotAll", JSRegExp::kDotAll);
- Goto(&next);
- BIND(&next);
- }
+ CASE_FOR_FLAG("dotAll", JSRegExp::kDotAll);
CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
#undef CASE_FOR_FLAG
@@ -1135,8 +1124,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
// char for each set flag.
{
- Node* const result = AllocateSeqOneByteString(context, var_length.value());
- Node* const flags_intptr = var_flags.value();
+ Node* const result = AllocateSeqOneByteString(context, var_length);
VARIABLE(var_offset, MachineType::PointerRepresentation(),
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
@@ -1144,7 +1132,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
#define CASE_FOR_FLAG(FLAG, CHAR) \
do { \
Label next(this); \
- GotoIfNot(IsSetWord(flags_intptr, FLAG), &next); \
+ GotoIfNot(IsSetWord(var_flags, FLAG), &next); \
Node* const value = Int32Constant(CHAR); \
StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
var_offset.value(), value); \
@@ -1156,13 +1144,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
- {
- Label next(this);
- GotoIfNot(is_dotall_enabled, &next);
- CASE_FOR_FLAG(JSRegExp::kDotAll, 's');
- Goto(&next);
- BIND(&next);
- }
+ CASE_FOR_FLAG(JSRegExp::kDotAll, 's');
CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
#undef CASE_FOR_FLAG
@@ -1639,19 +1621,11 @@ TF_BUILTIN(RegExpPrototypeMultilineGetter, RegExpBuiltinsAssembler) {
"RegExp.prototype.multiline");
}
-Node* RegExpBuiltinsAssembler::IsDotAllEnabled(Isolate* isolate) {
- Node* flag_ptr = ExternalConstant(
- ExternalReference::address_of_regexp_dotall_flag(isolate));
- Node* const flag_value = Load(MachineType::Int8(), flag_ptr);
- return Word32NotEqual(flag_value, Int32Constant(0));
-}
-
// ES #sec-get-regexp.prototype.dotAll
TF_BUILTIN(RegExpPrototypeDotAllGetter, RegExpBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
static const int kNoCounter = -1;
- CSA_ASSERT(this, IsDotAllEnabled(isolate()));
FlagGetter(context, receiver, JSRegExp::kDotAll, kNoCounter,
"RegExp.prototype.dotAll");
}
@@ -1703,7 +1677,7 @@ Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
Node* const result = CallJS(call_callable, context, exec, regexp, string);
var_result.Bind(result);
- GotoIf(WordEqual(result, NullConstant()), &out);
+ GotoIf(IsNull(result), &out);
ThrowIfNotJSReceiver(context, result,
MessageTemplate::kInvalidRegExpExecResult, "");
@@ -1762,8 +1736,7 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
Node* const match_indices = RegExpExec(context, receiver, string);
// Return true iff exec matched successfully.
- Node* const result =
- SelectBooleanConstant(WordNotEqual(match_indices, NullConstant()));
+ Node* const result = SelectBooleanConstant(IsNotNull(match_indices));
Return(result);
}
}
@@ -1804,15 +1777,16 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
BIND(&if_isunicode);
{
- Node* const string_length = LoadStringLength(string);
- GotoIfNot(SmiLessThan(index_plus_one, string_length), &out);
+ TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
+ TNode<IntPtrT> untagged_plus_one = SmiUntag(index_plus_one);
+ GotoIfNot(IntPtrLessThan(untagged_plus_one, string_length), &out);
- Node* const lead = StringCharCodeAt(string, index);
+ Node* const lead = StringCharCodeAt(string, SmiUntag(index));
GotoIfNot(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&out);
- Node* const trail = StringCharCodeAt(string, index_plus_one);
+ Node* const trail = StringCharCodeAt(string, untagged_plus_one);
GotoIfNot(Word32Equal(Word32And(trail, Int32Constant(0xFC00)),
Int32Constant(0xDC00)),
&out);
@@ -1956,18 +1930,12 @@ class GrowableFixedArray {
CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
- const ElementsKind kind = PACKED_ELEMENTS;
- const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
- const CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
- const CodeStubAssembler::AllocationFlags flags =
- CodeStubAssembler::kAllowLargeObjectAllocation;
-
Node* const from_array = var_array_.value();
- Node* const to_array =
- a->AllocateFixedArray(kind, new_capacity, mode, flags);
- a->CopyFixedArrayElements(kind, from_array, kind, to_array, element_count,
- new_capacity, barrier_mode, mode);
+
+ CodeStubAssembler::ExtractFixedArrayFlags flags;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
+ Node* to_array = a->ExtractFixedArray(from_array, nullptr, element_count,
+ new_capacity, flags);
return to_array;
}
@@ -1988,7 +1956,6 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
CSA_ASSERT(this, IsString(string));
if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
- Node* const null = NullConstant();
Node* const int_zero = IntPtrConstant(0);
Node* const smi_zero = SmiConstant(0);
@@ -2050,7 +2017,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const result = RegExpExec(context, regexp, string);
Label load_match(this);
- Branch(WordEqual(result, null), &if_didnotmatch, &load_match);
+ Branch(IsNull(result), &if_didnotmatch, &load_match);
BIND(&load_match);
{
@@ -2063,7 +2030,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const match = LoadFixedArrayElement(result_fixed_array, 0);
// The match is guaranteed to be a string on the fast path.
- CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(match)));
+ CSA_ASSERT(this, IsString(match));
var_match.Bind(match);
Goto(&if_didmatch);
@@ -2083,7 +2050,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
{
// Return null if there were no matches, otherwise just exit the loop.
GotoIfNot(IntPtrEqual(array.length(), int_zero), &out);
- Return(null);
+ Return(NullConstant());
}
BIND(&if_didmatch);
@@ -2096,8 +2063,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Advance last index if the match is the empty string.
- Node* const match_length = LoadStringLength(match);
- GotoIfNot(SmiEqual(match_length, smi_zero), &loop);
+ TNode<Smi> const match_length = LoadStringLengthAsSmi(match);
+ GotoIfNot(SmiEqual(match_length, SmiConstant(0)), &loop);
Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
if (is_fastpath) {
@@ -2153,12 +2120,25 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
BIND(&fast_path);
- RegExpPrototypeMatchBody(context, receiver, string, true);
+ // TODO(pwong): Could be optimized to remove the overhead of calling the
+ // builtin (at the cost of a larger builtin).
+ Return(CallBuiltin(Builtins::kRegExpMatchFast, context, receiver, string));
BIND(&slow_path);
RegExpPrototypeMatchBody(context, receiver, string, false);
}
+// Helper that skips a few initial checks. and assumes...
+// 1) receiver is a "fast" RegExp
+// 2) pattern is a string
+TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const string = Parameter(Descriptor::kPattern);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ RegExpPrototypeMatchBody(context, receiver, string, true);
+}
+
void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
Node* const context, Node* const regexp, Node* const string) {
CSA_ASSERT(this, IsFastRegExp(context, regexp));
@@ -2236,7 +2216,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
// Return -1 if no match was found.
{
Label next(this);
- GotoIfNot(WordEqual(exec_result, NullConstant()), &next);
+ GotoIfNot(IsNull(exec_result), &next);
Return(SmiConstant(-1));
BIND(&next);
}
@@ -2281,12 +2261,25 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
BIND(&fast_path);
- RegExpPrototypeSearchBodyFast(context, receiver, string);
+ // TODO(pwong): Could be optimized to remove the overhead of calling the
+ // builtin (at the cost of a larger builtin).
+ Return(CallBuiltin(Builtins::kRegExpSearchFast, context, receiver, string));
BIND(&slow_path);
RegExpPrototypeSearchBodySlow(context, receiver, string);
}
+// Helper that skips a few initial checks. and assumes...
+// 1) receiver is a "fast" RegExp
+// 2) pattern is a string
+TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const string = Parameter(Descriptor::kPattern);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ RegExpPrototypeSearchBodyFast(context, receiver, string);
+}
+
// Generates the fast path for @@split. {regexp} is an unmodified, non-sticky
// JSRegExp, {string} is a String, and {limit} is a Smi.
void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
@@ -2298,10 +2291,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
CSA_ASSERT(this, TaggedIsSmi(limit));
CSA_ASSERT(this, IsString(string));
- Node* const null = NullConstant();
- Node* const smi_zero = SmiConstant(0);
- Node* const int_zero = IntPtrConstant(0);
- Node* const int_limit = SmiUntag(limit);
+ TNode<Smi> const smi_zero = SmiConstant(0);
+ TNode<IntPtrT> const int_zero = IntPtrConstant(0);
+ TNode<IntPtrT> const int_limit = SmiUntag(limit);
const ElementsKind kind = PACKED_ELEMENTS;
const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
@@ -2319,7 +2311,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&next);
}
- Node* const string_length = LoadStringLength(string);
+ TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
// If passed the empty {string}, return either an empty array or a singleton
// array depending on whether the {regexp} matches.
@@ -2336,7 +2328,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
smi_zero, last_match_info);
Label return_singleton_array(this);
- Branch(WordEqual(match_indices, null), &return_singleton_array,
+ Branch(IsNull(match_indices), &return_singleton_array,
&return_empty_array);
BIND(&return_singleton_array);
@@ -2400,7 +2392,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// We're done if no match was found.
{
Label next(this);
- Branch(WordEqual(match_indices, null), &push_suffix_and_out, &next);
+ Branch(IsNull(match_indices), &push_suffix_and_out, &next);
BIND(&next);
}
@@ -2410,7 +2402,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// We're done if the match starts beyond the string.
{
Label next(this);
- Branch(WordEqual(match_from, string_length), &push_suffix_and_out, &next);
+ Branch(SmiEqual(match_from, string_length), &push_suffix_and_out, &next);
BIND(&next);
}
@@ -2654,11 +2646,10 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Isolate* const isolate = this->isolate();
- Node* const null = NullConstant();
Node* const undefined = UndefinedConstant();
- Node* const int_zero = IntPtrConstant(0);
- Node* const int_one = IntPtrConstant(1);
- Node* const smi_zero = SmiConstant(0);
+ TNode<IntPtrT> const int_zero = IntPtrConstant(0);
+ TNode<IntPtrT> const int_one = IntPtrConstant(1);
+ TNode<Smi> const smi_zero = SmiConstant(0);
Node* const native_context = LoadNativeContext(context);
@@ -2673,8 +2664,8 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
{
ElementsKind kind = PACKED_ELEMENTS;
Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
- Node* const capacity = IntPtrConstant(16);
- Node* const length = smi_zero;
+ TNode<IntPtrT> const capacity = IntPtrConstant(16);
+ TNode<Smi> const length = smi_zero;
Node* const allocation_site = nullptr;
ParameterMode capacity_mode = CodeStubAssembler::INTPTR_PARAMETERS;
@@ -2693,7 +2684,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// If no matches, return the subject string.
var_result.Bind(string);
- GotoIf(WordEqual(res, null), &out);
+ GotoIf(IsNull(res), &out);
// Reload last match info since it might have changed.
last_match_info =
@@ -2720,22 +2711,19 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// input string and some replacements that were returned from the replace
// function.
- VARIABLE(var_match_start, MachineRepresentation::kTagged);
- var_match_start.Bind(smi_zero);
+ TVARIABLE(Smi, var_match_start, smi_zero);
- Node* const end = SmiUntag(res_length);
- VARIABLE(var_i, MachineType::PointerRepresentation());
- var_i.Bind(int_zero);
+ TNode<IntPtrT> const end = SmiUntag(res_length);
+ TVARIABLE(IntPtrT, var_i, int_zero);
Variable* vars[] = {&var_i, &var_match_start};
Label loop(this, 2, vars);
Goto(&loop);
BIND(&loop);
{
- Node* const i = var_i.value();
- GotoIfNot(IntPtrLessThan(i, end), &create_result);
+ GotoIfNot(IntPtrLessThan(var_i, end), &create_result);
- Node* const elem = LoadFixedArrayElement(res_elems, i);
+ Node* const elem = LoadFixedArrayElement(res_elems, var_i);
Label if_issmi(this), if_isstring(this), loop_epilogue(this);
Branch(TaggedIsSmi(elem), &if_issmi, &if_isstring);
@@ -2753,19 +2741,17 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
TNode<IntPtrT> new_match_start =
Signed(IntPtrAdd(WordShr(int_elem, IntPtrConstant(11)),
WordAnd(int_elem, IntPtrConstant(0x7ff))));
- var_match_start.Bind(SmiTag(new_match_start));
+ var_match_start = SmiTag(new_match_start);
Goto(&loop_epilogue);
}
BIND(&if_isnegativeorzero);
{
- Node* const next_i = IntPtrAdd(i, int_one);
- var_i.Bind(next_i);
+ var_i = IntPtrAdd(var_i, int_one);
- Node* const next_elem = LoadFixedArrayElement(res_elems, next_i);
+ Node* const next_elem = LoadFixedArrayElement(res_elems, var_i);
- Node* const new_match_start = SmiSub(next_elem, elem);
- var_match_start.Bind(new_match_start);
+ var_match_start = SmiSub(next_elem, elem);
Goto(&loop_epilogue);
}
}
@@ -2775,24 +2761,23 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
CSA_ASSERT(this, IsString(elem));
Callable call_callable = CodeFactory::Call(isolate);
+ TNode<Smi> match_start = var_match_start;
Node* const replacement_obj =
CallJS(call_callable, context, replace_callable, undefined, elem,
- var_match_start.value(), string);
+ match_start, string);
Node* const replacement_str = ToString_Inline(context, replacement_obj);
- StoreFixedArrayElement(res_elems, i, replacement_str);
+ StoreFixedArrayElement(res_elems, var_i, replacement_str);
- Node* const elem_length = LoadStringLength(elem);
- Node* const new_match_start =
- SmiAdd(var_match_start.value(), elem_length);
- var_match_start.Bind(new_match_start);
+ TNode<Smi> const elem_length = LoadStringLengthAsSmi(elem);
+ var_match_start = SmiAdd(match_start, elem_length);
Goto(&loop_epilogue);
}
BIND(&loop_epilogue);
{
- var_i.Bind(IntPtrAdd(var_i.value(), int_one));
+ var_i = IntPtrAdd(var_i, int_one);
Goto(&loop);
}
}
@@ -2863,57 +2848,43 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// ToString({replace_value}) does not contain '$', i.e. we're doing a simple
// string replacement.
- Node* const int_zero = IntPtrConstant(0);
Node* const smi_zero = SmiConstant(0);
+ const bool kIsFastPath = true;
CSA_ASSERT(this, IsFastRegExp(context, regexp));
CSA_ASSERT(this, IsString(replace_string));
CSA_ASSERT(this, IsString(string));
- Label out(this);
- VARIABLE(var_result, MachineRepresentation::kTagged);
-
- // Load the last match info.
- Node* const native_context = LoadNativeContext(context);
- Node* const last_match_info =
- LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
+ VARIABLE(var_match_indices, MachineRepresentation::kTagged);
+ VARIABLE(var_last_match_end, MachineRepresentation::kTagged, smi_zero);
+ VARIABLE(var_is_unicode, MachineRepresentation::kWord32, Int32Constant(0));
+ Variable* vars[] = {&var_result, &var_last_match_end};
+ Label out(this), loop(this, 2, vars), loop_end(this),
+ if_nofurthermatches(this);
// Is {regexp} global?
- Label if_isglobal(this), if_isnonglobal(this);
- Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const is_global =
- WordAnd(SmiUntag(flags), IntPtrConstant(JSRegExp::kGlobal));
- Branch(WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
+ Node* const is_global = FastFlagGetter(regexp, JSRegExp::kGlobal);
+ GotoIfNot(is_global, &loop);
- BIND(&if_isglobal);
- {
- // Hand off global regexps to runtime.
- FastStoreLastIndex(regexp, smi_zero);
- Node* const result =
- CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
- string, regexp, replace_string, last_match_info);
- var_result.Bind(result);
- Goto(&out);
- }
+ var_is_unicode.Bind(FastFlagGetter(regexp, JSRegExp::kUnicode));
+ FastStoreLastIndex(regexp, smi_zero);
+ Goto(&loop);
- BIND(&if_isnonglobal);
+ BIND(&loop);
{
- // Run exec, then manually construct the resulting string.
- Label if_didnotmatch(this);
- Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
- context, regexp, string, &if_didnotmatch, true);
+ var_match_indices.Bind(RegExpPrototypeExecBodyWithoutResult(
+ context, regexp, string, &if_nofurthermatches, kIsFastPath));
// Successful match.
{
- Node* const subject_start = smi_zero;
Node* const match_start = LoadFixedArrayElement(
- match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+ var_match_indices.value(), RegExpMatchInfo::kFirstCaptureIndex);
Node* const match_end = LoadFixedArrayElement(
- match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
- Node* const subject_end = LoadStringLength(string);
+ var_match_indices.value(), RegExpMatchInfo::kFirstCaptureIndex + 1);
Label if_replaceisempty(this), if_replaceisnotempty(this);
- Node* const replace_length = LoadStringLength(replace_string);
+ TNode<Smi> const replace_length = LoadStringLengthAsSmi(replace_string);
Branch(SmiEqual(replace_length, smi_zero), &if_replaceisempty,
&if_replaceisnotempty);
@@ -2921,40 +2892,52 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
{
// TODO(jgruber): We could skip many of the checks that using SubString
// here entails.
-
Node* const first_part =
- SubString(context, string, subject_start, match_start);
- Node* const second_part =
- SubString(context, string, match_end, subject_end);
+ SubString(context, string, var_last_match_end.value(), match_start);
- Node* const result = StringAdd(context, first_part, second_part);
+ Node* const result = StringAdd(context, var_result.value(), first_part);
var_result.Bind(result);
- Goto(&out);
+ Goto(&loop_end);
}
BIND(&if_replaceisnotempty);
{
Node* const first_part =
- SubString(context, string, subject_start, match_start);
- Node* const second_part = replace_string;
- Node* const third_part =
- SubString(context, string, match_end, subject_end);
-
- Node* result = StringAdd(context, first_part, second_part);
- result = StringAdd(context, result, third_part);
+ SubString(context, string, var_last_match_end.value(), match_start);
+ Node* result = StringAdd(context, var_result.value(), first_part);
+ result = StringAdd(context, result, replace_string);
var_result.Bind(result);
- Goto(&out);
+ Goto(&loop_end);
}
- }
- BIND(&if_didnotmatch);
- {
- var_result.Bind(string);
- Goto(&out);
+ BIND(&loop_end);
+ {
+ var_last_match_end.Bind(match_end);
+ // Non-global case ends here after the first replacement.
+ GotoIfNot(is_global, &if_nofurthermatches);
+
+ GotoIf(SmiNotEqual(match_end, match_start), &loop);
+ // If match is the empty string, we have to increment lastIndex.
+ Node* const this_index = FastLoadLastIndex(regexp);
+ Node* const next_index = AdvanceStringIndex(
+ string, this_index, var_is_unicode.value(), kIsFastPath);
+ FastStoreLastIndex(regexp, next_index);
+ Goto(&loop);
+ }
}
}
+ BIND(&if_nofurthermatches);
+ {
+ TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
+ Node* const last_part =
+ SubString(context, string, var_last_match_end.value(), string_length);
+ Node* const result = StringAdd(context, var_result.value(), last_part);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
BIND(&out);
return var_result.value();
}
@@ -3055,7 +3038,7 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
// if (replace.contains("$")) {
// CallRuntime(RegExpReplace)
// } else {
- // ReplaceSimpleStringFastPath() // Bails to runtime for global regexps.
+ // ReplaceSimpleStringFastPath()
// }
// }
@@ -3088,7 +3071,6 @@ TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
Node* const string = Parameter(Descriptor::kString);
Node* const context = Parameter(Descriptor::kContext);
- Node* const null = NullConstant();
Node* const smi_zero = SmiConstant(0);
CSA_ASSERT(this, IsJSRegExp(regexp));
@@ -3101,6 +3083,7 @@ TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
Node* const match_indices = RegExpExecInternal(context, regexp, string,
smi_zero, internal_match_info);
+ Node* const null = NullConstant();
Label if_matched(this), if_didnotmatch(this);
Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 89b3c59803..c8a94b7293 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -90,9 +90,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
void FlagGetter(Node* context, Node* receiver, JSRegExp::Flag flag,
int counter, const char* method_name);
- // Utility method, remove once dotall is unstaged.
- Node* IsDotAllEnabled(Isolate* isolate);
-
Node* IsRegExp(Node* const context, Node* const maybe_receiver);
Node* RegExpInitialize(Node* const context, Node* const regexp,
Node* const maybe_pattern, Node* const maybe_flags);
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index e9abd31e46..6122ff85da 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -46,9 +46,8 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
GotoIf(TaggedIsSmi(tagged), &invalid);
// Fail if the array's instance type is not JSTypedArray.
- GotoIf(Word32NotEqual(LoadInstanceType(tagged),
- Int32Constant(JS_TYPED_ARRAY_TYPE)),
- &invalid);
+ GotoIfNot(InstanceTypeEqual(LoadInstanceType(tagged), JS_TYPED_ARRAY_TYPE),
+ &invalid);
// Fail if the array's JSArrayBuffer is not shared.
Node* array_buffer = LoadObjectField(tagged, JSTypedArray::kBufferOffset);
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 8d407b35e6..9d86f3105b 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -14,10 +14,9 @@
namespace v8 {
namespace internal {
-typedef CodeStubAssembler::RelationalComparisonMode RelationalComparisonMode;
typedef compiler::Node Node;
-template <class A>
-using TNode = compiler::TNode<A>;
+template <class T>
+using TNode = compiler::TNode<T>;
Node* StringBuiltinsAssembler::DirectStringData(Node* string,
Node* string_instance_type) {
@@ -163,33 +162,13 @@ void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
Node* right) {
- // Here's pseudo-code for the algorithm below:
- //
- // if (lhs->length() != rhs->length()) return false;
- // restart:
- // if (lhs == rhs) return true;
- // if (lhs->IsInternalizedString() && rhs->IsInternalizedString()) {
- // return false;
- // }
- // if (lhs->IsSeqOneByteString() && rhs->IsSeqOneByteString()) {
- // for (i = 0; i != lhs->length(); ++i) {
- // if (lhs[i] != rhs[i]) return false;
- // }
- // return true;
- // }
- // if (lhs and/or rhs are indirect strings) {
- // unwrap them and restart from the "restart:" label;
- // }
- // return %StringEqual(lhs, rhs);
-
VARIABLE(var_left, MachineRepresentation::kTagged, left);
VARIABLE(var_right, MachineRepresentation::kTagged, right);
- Variable* input_vars[2] = {&var_left, &var_right};
- Label if_equal(this), if_notequal(this), if_notbothdirectonebytestrings(this),
- restart(this, 2, input_vars);
+ Label if_equal(this), if_notequal(this), if_indirect(this, Label::kDeferred),
+ restart(this, {&var_left, &var_right});
- Node* lhs_length = LoadStringLength(left);
- Node* rhs_length = LoadStringLength(right);
+ TNode<IntPtrT> lhs_length = LoadStringLengthAsWord(left);
+ TNode<IntPtrT> rhs_length = LoadStringLengthAsWord(right);
// Strings with different lengths cannot be equal.
GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal);
@@ -202,16 +181,14 @@ void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
Node* lhs_instance_type = LoadInstanceType(lhs);
Node* rhs_instance_type = LoadInstanceType(rhs);
- StringEqual_Core(context, lhs, lhs_instance_type, lhs_length, rhs,
- rhs_instance_type, &if_equal, &if_notequal,
- &if_notbothdirectonebytestrings);
+ StringEqual_Core(context, lhs, lhs_instance_type, rhs, rhs_instance_type,
+ lhs_length, &if_equal, &if_notequal, &if_indirect);
- BIND(&if_notbothdirectonebytestrings);
+ BIND(&if_indirect);
{
// Try to unwrap indirect strings, restart the above attempt on success.
MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
rhs_instance_type, &restart);
- // TODO(bmeurer): Add support for two byte string equality checks.
TailCallRuntime(Runtime::kStringEqual, context, lhs, rhs);
}
@@ -224,13 +201,13 @@ void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
}
void StringBuiltinsAssembler::StringEqual_Core(
- Node* context, Node* lhs, Node* lhs_instance_type, Node* lhs_length,
- Node* rhs, Node* rhs_instance_type, Label* if_equal, Label* if_not_equal,
- Label* if_notbothdirectonebyte) {
+ Node* context, Node* lhs, Node* lhs_instance_type, Node* rhs,
+ Node* rhs_instance_type, TNode<IntPtrT> length, Label* if_equal,
+ Label* if_not_equal, Label* if_indirect) {
CSA_ASSERT(this, IsString(lhs));
CSA_ASSERT(this, IsString(rhs));
- CSA_ASSERT(this, WordEqual(LoadStringLength(lhs), lhs_length));
- CSA_ASSERT(this, WordEqual(LoadStringLength(rhs), lhs_length));
+ CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(lhs), length));
+ CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(rhs), length));
// Fast check to see if {lhs} and {rhs} refer to the same String object.
GotoIf(WordEqual(lhs, rhs), if_equal);
@@ -249,56 +226,103 @@ void StringBuiltinsAssembler::StringEqual_Core(
Int32Constant(kBothInternalizedTag)),
if_not_equal);
- // Check that both {lhs} and {rhs} are flat one-byte strings, and that
- // in case of ExternalStrings the data pointer is cached..
+ // Check if both {lhs} and {rhs} are direct strings, and that in case of
+ // ExternalStrings the data pointer is cached.
STATIC_ASSERT(kShortExternalStringTag != 0);
- int const kBothDirectOneByteStringMask =
- kStringEncodingMask | kIsIndirectStringMask | kShortExternalStringMask |
- ((kStringEncodingMask | kIsIndirectStringMask | kShortExternalStringMask)
- << 8);
- int const kBothDirectOneByteStringTag =
- kOneByteStringTag | (kOneByteStringTag << 8);
+ STATIC_ASSERT(kIsIndirectStringTag != 0);
+ int const kBothDirectStringMask =
+ kIsIndirectStringMask | kShortExternalStringMask |
+ ((kIsIndirectStringMask | kShortExternalStringMask) << 8);
GotoIfNot(Word32Equal(Word32And(both_instance_types,
- Int32Constant(kBothDirectOneByteStringMask)),
- Int32Constant(kBothDirectOneByteStringTag)),
- if_notbothdirectonebyte);
+ Int32Constant(kBothDirectStringMask)),
+ Int32Constant(0)),
+ if_indirect);
+
+ // Dispatch based on the {lhs} and {rhs} string encoding.
+ int const kBothStringEncodingMask =
+ kStringEncodingMask | (kStringEncodingMask << 8);
+ int const kOneOneByteStringTag = kOneByteStringTag | (kOneByteStringTag << 8);
+ int const kTwoTwoByteStringTag = kTwoByteStringTag | (kTwoByteStringTag << 8);
+ int const kOneTwoByteStringTag = kOneByteStringTag | (kTwoByteStringTag << 8);
+ Label if_oneonebytestring(this), if_twotwobytestring(this),
+ if_onetwobytestring(this), if_twoonebytestring(this);
+ Node* masked_instance_types =
+ Word32And(both_instance_types, Int32Constant(kBothStringEncodingMask));
+ GotoIf(
+ Word32Equal(masked_instance_types, Int32Constant(kOneOneByteStringTag)),
+ &if_oneonebytestring);
+ GotoIf(
+ Word32Equal(masked_instance_types, Int32Constant(kTwoTwoByteStringTag)),
+ &if_twotwobytestring);
+ Branch(
+ Word32Equal(masked_instance_types, Int32Constant(kOneTwoByteStringTag)),
+ &if_onetwobytestring, &if_twoonebytestring);
+
+ BIND(&if_oneonebytestring);
+ StringEqual_Loop(lhs, lhs_instance_type, MachineType::Uint8(), rhs,
+ rhs_instance_type, MachineType::Uint8(), length, if_equal,
+ if_not_equal);
+
+ BIND(&if_twotwobytestring);
+ StringEqual_Loop(lhs, lhs_instance_type, MachineType::Uint16(), rhs,
+ rhs_instance_type, MachineType::Uint16(), length, if_equal,
+ if_not_equal);
+
+ BIND(&if_onetwobytestring);
+ StringEqual_Loop(lhs, lhs_instance_type, MachineType::Uint8(), rhs,
+ rhs_instance_type, MachineType::Uint16(), length, if_equal,
+ if_not_equal);
+
+ BIND(&if_twoonebytestring);
+ StringEqual_Loop(lhs, lhs_instance_type, MachineType::Uint16(), rhs,
+ rhs_instance_type, MachineType::Uint8(), length, if_equal,
+ if_not_equal);
+}
- // At this point we know that we have two direct one-byte strings.
+void StringBuiltinsAssembler::StringEqual_Loop(
+ Node* lhs, Node* lhs_instance_type, MachineType lhs_type, Node* rhs,
+ Node* rhs_instance_type, MachineType rhs_type, TNode<IntPtrT> length,
+ Label* if_equal, Label* if_not_equal) {
+ CSA_ASSERT(this, IsString(lhs));
+ CSA_ASSERT(this, IsString(rhs));
+ CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(lhs), length));
+ CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(rhs), length));
// Compute the effective offset of the first character.
Node* lhs_data = DirectStringData(lhs, lhs_instance_type);
Node* rhs_data = DirectStringData(rhs, rhs_instance_type);
- // Compute the first offset after the string from the length.
- Node* length = SmiUntag(lhs_length);
-
// Loop over the {lhs} and {rhs} strings to see if they are equal.
- VARIABLE(var_offset, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, var_offset, IntPtrConstant(0));
Label loop(this, &var_offset);
- var_offset.Bind(IntPtrConstant(0));
Goto(&loop);
BIND(&loop);
{
// If {offset} equals {end}, no difference was found, so the
// strings are equal.
- Node* offset = var_offset.value();
- GotoIf(WordEqual(offset, length), if_equal);
+ GotoIf(WordEqual(var_offset, length), if_equal);
// Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = Load(MachineType::Uint8(), lhs_data, offset);
- Node* rhs_value = Load(MachineType::Uint8(), rhs_data, offset);
+ Node* lhs_value =
+ Load(lhs_type, lhs_data,
+ WordShl(var_offset, ElementSizeLog2Of(lhs_type.representation())));
+ Node* rhs_value =
+ Load(rhs_type, rhs_data,
+ WordShl(var_offset, ElementSizeLog2Of(rhs_type.representation())));
// Check if the characters match.
GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
// Advance to next character.
- var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
+ var_offset = IntPtrAdd(var_offset, IntPtrConstant(1));
Goto(&loop);
}
}
-void StringBuiltinsAssembler::GenerateStringRelationalComparison(
- Node* context, Node* left, Node* right, RelationalComparisonMode mode) {
+void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
+ Node* left,
+ Node* right,
+ Operation op) {
VARIABLE(var_left, MachineRepresentation::kTagged, left);
VARIABLE(var_right, MachineRepresentation::kTagged, right);
@@ -338,36 +362,34 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
BIND(&if_bothonebyteseqstrings);
{
// Load the length of {lhs} and {rhs}.
- Node* lhs_length = LoadStringLength(lhs);
- Node* rhs_length = LoadStringLength(rhs);
+ TNode<IntPtrT> lhs_length = LoadStringLengthAsWord(lhs);
+ TNode<IntPtrT> rhs_length = LoadStringLengthAsWord(rhs);
// Determine the minimum length.
- Node* length = SmiMin(lhs_length, rhs_length);
+ TNode<IntPtrT> length = IntPtrMin(lhs_length, rhs_length);
// Compute the effective offset of the first character.
- Node* begin =
+ TNode<IntPtrT> begin =
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag);
// Compute the first offset after the string from the length.
- Node* end = IntPtrAdd(begin, SmiUntag(length));
+ TNode<IntPtrT> end = IntPtrAdd(begin, length);
// Loop over the {lhs} and {rhs} strings to see if they are equal.
- VARIABLE(var_offset, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, var_offset, begin);
Label loop(this, &var_offset);
- var_offset.Bind(begin);
Goto(&loop);
BIND(&loop);
{
// Check if {offset} equals {end}.
- Node* offset = var_offset.value();
Label if_done(this), if_notdone(this);
- Branch(WordEqual(offset, end), &if_done, &if_notdone);
+ Branch(WordEqual(var_offset, end), &if_done, &if_notdone);
BIND(&if_notdone);
{
// Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = Load(MachineType::Uint8(), lhs, offset);
- Node* rhs_value = Load(MachineType::Uint8(), rhs, offset);
+ Node* lhs_value = Load(MachineType::Uint8(), lhs, var_offset);
+ Node* rhs_value = Load(MachineType::Uint8(), rhs, var_offset);
// Check if the characters match.
Label if_valueissame(this), if_valueisnotsame(this);
@@ -377,7 +399,7 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
BIND(&if_valueissame);
{
// Advance to next character.
- var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
+ var_offset = IntPtrAdd(var_offset, IntPtrConstant(1));
}
Goto(&loop);
@@ -389,8 +411,8 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
{
// All characters up to the min length are equal, decide based on
// string length.
- GotoIf(SmiEqual(lhs_length, rhs_length), &if_equal);
- BranchIfSmiLessThan(lhs_length, rhs_length, &if_less, &if_greater);
+ GotoIf(IntPtrEqual(lhs_length, rhs_length), &if_equal);
+ Branch(IntPtrLessThan(lhs_length, rhs_length), &if_less, &if_greater);
}
}
}
@@ -401,59 +423,67 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
rhs_instance_type, &restart);
// TODO(bmeurer): Add support for two byte string relational comparisons.
- switch (mode) {
- case RelationalComparisonMode::kLessThan:
+ switch (op) {
+ case Operation::kLessThan:
TailCallRuntime(Runtime::kStringLessThan, context, lhs, rhs);
break;
- case RelationalComparisonMode::kLessThanOrEqual:
+ case Operation::kLessThanOrEqual:
TailCallRuntime(Runtime::kStringLessThanOrEqual, context, lhs, rhs);
break;
- case RelationalComparisonMode::kGreaterThan:
+ case Operation::kGreaterThan:
TailCallRuntime(Runtime::kStringGreaterThan, context, lhs, rhs);
break;
- case RelationalComparisonMode::kGreaterThanOrEqual:
+ case Operation::kGreaterThanOrEqual:
TailCallRuntime(Runtime::kStringGreaterThanOrEqual, context, lhs, rhs);
break;
+ default:
+ UNREACHABLE();
}
}
BIND(&if_less);
- switch (mode) {
- case RelationalComparisonMode::kLessThan:
- case RelationalComparisonMode::kLessThanOrEqual:
- Return(BooleanConstant(true));
+ switch (op) {
+ case Operation::kLessThan:
+ case Operation::kLessThanOrEqual:
+ Return(TrueConstant());
break;
- case RelationalComparisonMode::kGreaterThan:
- case RelationalComparisonMode::kGreaterThanOrEqual:
- Return(BooleanConstant(false));
+ case Operation::kGreaterThan:
+ case Operation::kGreaterThanOrEqual:
+ Return(FalseConstant());
break;
+ default:
+ UNREACHABLE();
}
BIND(&if_equal);
- switch (mode) {
- case RelationalComparisonMode::kLessThan:
- case RelationalComparisonMode::kGreaterThan:
- Return(BooleanConstant(false));
+ switch (op) {
+ case Operation::kLessThan:
+ case Operation::kGreaterThan:
+ Return(FalseConstant());
break;
- case RelationalComparisonMode::kLessThanOrEqual:
- case RelationalComparisonMode::kGreaterThanOrEqual:
- Return(BooleanConstant(true));
+ case Operation::kLessThanOrEqual:
+ case Operation::kGreaterThanOrEqual:
+ Return(TrueConstant());
break;
+ default:
+ UNREACHABLE();
}
BIND(&if_greater);
- switch (mode) {
- case RelationalComparisonMode::kLessThan:
- case RelationalComparisonMode::kLessThanOrEqual:
- Return(BooleanConstant(false));
+ switch (op) {
+ case Operation::kLessThan:
+ case Operation::kLessThanOrEqual:
+ Return(FalseConstant());
break;
- case RelationalComparisonMode::kGreaterThan:
- case RelationalComparisonMode::kGreaterThanOrEqual:
- Return(BooleanConstant(true));
+ case Operation::kGreaterThan:
+ case Operation::kGreaterThanOrEqual:
+ Return(TrueConstant());
break;
+ default:
+ UNREACHABLE();
}
}
@@ -469,15 +499,15 @@ TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
Node* left = Parameter(Descriptor::kLeft);
Node* right = Parameter(Descriptor::kRight);
GenerateStringRelationalComparison(context, left, right,
- RelationalComparisonMode::kLessThan);
+ Operation::kLessThan);
}
TF_BUILTIN(StringLessThanOrEqual, StringBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* left = Parameter(Descriptor::kLeft);
Node* right = Parameter(Descriptor::kRight);
- GenerateStringRelationalComparison(
- context, left, right, RelationalComparisonMode::kLessThanOrEqual);
+ GenerateStringRelationalComparison(context, left, right,
+ Operation::kLessThanOrEqual);
}
TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
@@ -485,15 +515,15 @@ TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
Node* left = Parameter(Descriptor::kLeft);
Node* right = Parameter(Descriptor::kRight);
GenerateStringRelationalComparison(context, left, right,
- RelationalComparisonMode::kGreaterThan);
+ Operation::kGreaterThan);
}
TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* left = Parameter(Descriptor::kLeft);
Node* right = Parameter(Descriptor::kRight);
- GenerateStringRelationalComparison(
- context, left, right, RelationalComparisonMode::kGreaterThanOrEqual);
+ GenerateStringRelationalComparison(context, left, right,
+ Operation::kGreaterThanOrEqual);
}
TF_BUILTIN(StringCharAt, CodeStubAssembler) {
@@ -501,7 +531,7 @@ TF_BUILTIN(StringCharAt, CodeStubAssembler) {
Node* position = Parameter(Descriptor::kPosition);
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position, INTPTR_PARAMETERS);
+ Node* code = StringCharCodeAt(receiver, position);
// And return the single character string with only that {code}
Node* result = StringFromCharCode(code);
@@ -513,7 +543,7 @@ TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) {
Node* position = Parameter(Descriptor::kPosition);
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position, INTPTR_PARAMETERS);
+ Node* code = StringCharCodeAt(receiver, position);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
@@ -528,17 +558,16 @@ TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) {
TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
Node* context = Parameter(BuiltinDescriptor::kContext);
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
- // From now on use word-size argc value.
- argc = arguments.GetLength();
-
+ TNode<Smi> smi_argc = SmiTag(arguments.GetLength());
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(this), if_notoneargument(this);
- Branch(WordEqual(argc, IntPtrConstant(1)), &if_oneargument,
+ Branch(Word32Equal(argc, Int32Constant(1)), &if_oneargument,
&if_notoneargument);
BIND(&if_oneargument);
@@ -558,16 +587,16 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
{
Label two_byte(this);
// Assume that the resulting string contains only one-byte characters.
- Node* one_byte_result = AllocateSeqOneByteString(context, argc);
+ Node* one_byte_result = AllocateSeqOneByteString(context, smi_argc);
- VARIABLE(max_index, MachineType::PointerRepresentation());
- max_index.Bind(IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_max_index);
+ var_max_index = IntPtrConstant(0);
// Iterate over the incoming arguments, converting them to 8-bit character
// codes. Stop if any of the conversions generates a code that doesn't fit
// in 8 bits.
- CodeStubAssembler::VariableList vars({&max_index}, zone());
- arguments.ForEach(vars, [this, context, &two_byte, &max_index, &code16,
+ CodeStubAssembler::VariableList vars({&var_max_index}, zone());
+ arguments.ForEach(vars, [this, context, &two_byte, &var_max_index, &code16,
one_byte_result](Node* arg) {
Node* code32 = TruncateTaggedToWord32(context, arg);
code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
@@ -578,12 +607,11 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// The {code16} fits into the SeqOneByteString {one_byte_result}.
Node* offset = ElementOffsetFromIndex(
- max_index.value(), UINT8_ELEMENTS,
- CodeStubAssembler::INTPTR_PARAMETERS,
+ var_max_index, UINT8_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result,
offset, code16);
- max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
+ var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
});
arguments.PopAndReturn(one_byte_result);
@@ -592,44 +620,42 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// At least one of the characters in the string requires a 16-bit
// representation. Allocate a SeqTwoByteString to hold the resulting
// string.
- Node* two_byte_result = AllocateSeqTwoByteString(context, argc);
+ Node* two_byte_result = AllocateSeqTwoByteString(context, smi_argc);
// Copy the characters that have already been put in the 8-bit string into
// their corresponding positions in the new 16-bit string.
- Node* zero = IntPtrConstant(0);
+ TNode<IntPtrT> zero = IntPtrConstant(0);
CopyStringCharacters(one_byte_result, two_byte_result, zero, zero,
- max_index.value(), String::ONE_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING,
- CodeStubAssembler::INTPTR_PARAMETERS);
+ var_max_index, String::ONE_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING);
// Write the character that caused the 8-bit to 16-bit fault.
- Node* max_index_offset =
- ElementOffsetFromIndex(max_index.value(), UINT16_ELEMENTS,
- CodeStubAssembler::INTPTR_PARAMETERS,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ Node* max_index_offset = ElementOffsetFromIndex(
+ var_max_index, UINT16_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
max_index_offset, code16);
- max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
+ var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
// Resume copying the passed-in arguments from the same place where the
// 8-bit copy stopped, but this time copying over all of the characters
// using a 16-bit representation.
arguments.ForEach(
vars,
- [this, context, two_byte_result, &max_index](Node* arg) {
+ [this, context, two_byte_result, &var_max_index](Node* arg) {
Node* code32 = TruncateTaggedToWord32(context, arg);
Node* code16 =
Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
Node* offset = ElementOffsetFromIndex(
- max_index.value(), UINT16_ELEMENTS,
+ var_max_index, UINT16_ELEMENTS,
CodeStubAssembler::INTPTR_PARAMETERS,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
offset, code16);
- max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
+ var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
},
- max_index.value());
+ var_max_index);
arguments.PopAndReturn(two_byte_result);
}
@@ -653,7 +679,7 @@ TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) {
GotoIfNot(TaggedIsSmi(position), &return_emptystring);
// Determine the actual length of the {receiver} String.
- Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
+ TNode<Smi> receiver_length = LoadStringLengthAsSmi(receiver);
// Return "" if the Smi {position} is outside the bounds of the {receiver}.
Label if_positioninbounds(this);
@@ -667,7 +693,11 @@ TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) {
}
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position);
+ CSA_ASSERT(this, IntPtrLessThan(SmiUntag(position),
+ LoadStringLengthAsWord(receiver)));
+ CSA_ASSERT(this,
+ IntPtrGreaterThanOrEqual(SmiUntag(position), IntPtrConstant(0)));
+ Node* code = StringCharCodeAt(receiver, SmiUntag(position));
// And return the single character string with only that {code}.
Node* result = StringFromCharCode(code);
@@ -692,7 +722,7 @@ TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) {
GotoIfNot(TaggedIsSmi(position), &return_nan);
// Determine the actual length of the {receiver} String.
- Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
+ TNode<Smi> receiver_length = LoadStringLengthAsSmi(receiver);
// Return NaN if the Smi {position} is outside the bounds of the {receiver}.
Label if_positioninbounds(this);
@@ -706,7 +736,7 @@ TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) {
}
// Load the character at the {position} from the {receiver}.
- Node* value = StringCharCodeAt(receiver, position);
+ Node* value = StringCharCodeAt(receiver, SmiUntag(position));
Node* result = SmiFromWord32(value);
Return(result);
}
@@ -726,13 +756,15 @@ TF_BUILTIN(StringPrototypeCodePointAt, StringBuiltinsAssembler) {
position =
ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
GotoIfNot(TaggedIsSmi(position), &if_outofbounds);
- Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
- Branch(SmiBelow(position, receiver_length), &if_inbounds, &if_outofbounds);
+ TNode<IntPtrT> untagged_position = SmiUntag(position);
+ TNode<IntPtrT> receiver_length = LoadStringLengthAsWord(receiver);
+ Branch(UintPtrLessThan(untagged_position, receiver_length), &if_inbounds,
+ &if_outofbounds);
BIND(&if_inbounds);
{
- Node* value = LoadSurrogatePairAt(receiver, receiver_length, position,
- UnicodeEncoding::UTF32);
+ Node* value = LoadSurrogatePairAt(
+ receiver, receiver_length, untagged_position, UnicodeEncoding::UTF32);
Node* result = SmiFromWord32(value);
Return(result);
}
@@ -774,14 +806,10 @@ void StringBuiltinsAssembler::StringIndexOf(
CSA_ASSERT(this, IsString(search_string));
CSA_ASSERT(this, TaggedIsSmi(position));
- Node* const int_zero = IntPtrConstant(0);
-
- VARIABLE(var_needle_byte, MachineType::PointerRepresentation(), int_zero);
- VARIABLE(var_string_addr, MachineType::PointerRepresentation(), int_zero);
-
- Node* const search_length = SmiUntag(LoadStringLength(search_string));
- Node* const subject_length = SmiUntag(LoadStringLength(subject_string));
- Node* const start_position = IntPtrMax(SmiUntag(position), int_zero);
+ TNode<IntPtrT> const int_zero = IntPtrConstant(0);
+ TNode<IntPtrT> const search_length = LoadStringLengthAsWord(search_string);
+ TNode<IntPtrT> const subject_length = LoadStringLengthAsWord(subject_string);
+ TNode<IntPtrT> const start_position = IntPtrMax(SmiUntag(position), int_zero);
Label zero_length_needle(this), return_minus_1(this);
{
@@ -1034,10 +1062,6 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant) {
}
}
-compiler::Node* StringBuiltinsAssembler::IsNullOrUndefined(Node* const value) {
- return Word32Or(IsUndefined(value), IsNull(value));
-}
-
void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
Node* const value,
const char* method_name) {
@@ -1173,9 +1197,9 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution(
{
CSA_ASSERT(this, TaggedIsPositiveSmi(dollar_index));
- Callable substring_callable = CodeFactory::SubString(isolate());
- Node* const matched = CallStub(substring_callable, context, subject_string,
- match_start_index, match_end_index);
+ Node* const matched =
+ CallBuiltin(Builtins::kSubString, context, subject_string,
+ match_start_index, match_end_index);
Node* const replacement_string =
CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
match_start_index, replace_string, dollar_index);
@@ -1199,7 +1223,7 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
Node* const string =
ToThisString(context, receiver, "String.prototype.repeat");
Node* const is_stringempty =
- SmiEqual(LoadStringLength(string), SmiConstant(0));
+ SmiEqual(LoadStringLengthAsSmi(string), SmiConstant(0));
VARIABLE(var_count, MachineRepresentation::kTagged,
ToInteger(context, count, CodeStubAssembler::kTruncateMinusZero));
@@ -1207,20 +1231,19 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
// Verifies a valid count and takes a fast path when the result will be an
// empty string.
{
- Label next(this), if_count_isheapnumber(this, Label::kDeferred);
+ Label if_count_isheapnumber(this, Label::kDeferred);
GotoIfNot(TaggedIsSmi(var_count.value()), &if_count_isheapnumber);
// If count is a SMI, throw a RangeError if less than 0 or greater than
// the maximum string length.
- {
- GotoIf(SmiLessThan(var_count.value(), SmiConstant(0)), &invalid_count);
- GotoIf(SmiEqual(var_count.value(), SmiConstant(0)), &return_emptystring);
- GotoIf(is_stringempty, &return_emptystring);
- GotoIf(SmiGreaterThan(var_count.value(), SmiConstant(String::kMaxLength)),
- &invalid_string_length);
- Goto(&next);
- }
+ GotoIf(SmiLessThan(var_count.value(), SmiConstant(0)), &invalid_count);
+ GotoIf(SmiEqual(var_count.value(), SmiConstant(0)), &return_emptystring);
+ GotoIf(is_stringempty, &return_emptystring);
+ GotoIf(SmiGreaterThan(var_count.value(), SmiConstant(String::kMaxLength)),
+ &invalid_string_length);
+ Return(CallBuiltin(Builtins::kStringRepeat, context, string,
+ var_count.value()));
// If count is a Heap Number...
// 1) If count is Infinity, throw a RangeError exception
@@ -1236,49 +1259,6 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
&invalid_count);
Branch(is_stringempty, &return_emptystring, &invalid_string_length);
}
- BIND(&next);
- }
-
- // The receiver is repeated with the following algorithm:
- // let n = count;
- // let power_of_two_repeats = receiver;
- // let result = "";
- // while (true) {
- // if (n & 1) result += s;
- // n >>= 1;
- // if (n === 0) return result;
- // power_of_two_repeats += power_of_two_repeats;
- // }
- {
- VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
- VARIABLE(var_temp, MachineRepresentation::kTagged, string);
-
- Callable stringadd_callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
-
- Label loop(this, {&var_count, &var_result, &var_temp}), return_result(this);
- Goto(&loop);
- BIND(&loop);
- {
- {
- Label next(this);
- GotoIfNot(SmiToWord32(SmiAnd(var_count.value(), SmiConstant(1))),
- &next);
- var_result.Bind(CallStub(stringadd_callable, context,
- var_result.value(), var_temp.value()));
- Goto(&next);
- BIND(&next);
- }
-
- var_count.Bind(SmiShr(var_count.value(), 1));
- GotoIf(SmiEqual(var_count.value(), SmiConstant(0)), &return_result);
- var_temp.Bind(CallStub(stringadd_callable, context, var_temp.value(),
- var_temp.value()));
- Goto(&loop);
- }
-
- BIND(&return_result);
- Return(var_result.value());
}
BIND(&return_emptystring);
@@ -1298,6 +1278,58 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
}
}
+// Helper with less checks
+TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const string = Parameter(Descriptor::kString);
+ Node* const count = Parameter(Descriptor::kCount);
+
+ CSA_ASSERT(this, IsString(string));
+ CSA_ASSERT(this, Word32BinaryNot(IsEmptyString(string)));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(count));
+ CSA_ASSERT(this, SmiLessThanOrEqual(count, SmiConstant(String::kMaxLength)));
+
+ // The string is repeated with the following algorithm:
+ // let n = count;
+ // let power_of_two_repeats = string;
+ // let result = "";
+ // while (true) {
+ // if (n & 1) result += s;
+ // n >>= 1;
+ // if (n === 0) return result;
+ // power_of_two_repeats += power_of_two_repeats;
+ // }
+ VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
+ VARIABLE(var_temp, MachineRepresentation::kTagged, string);
+ VARIABLE(var_count, MachineRepresentation::kTagged, count);
+
+ Callable stringadd_callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+
+ Label loop(this, {&var_count, &var_result, &var_temp}), return_result(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ {
+ Label next(this);
+ GotoIfNot(SmiToWord32(SmiAnd(var_count.value(), SmiConstant(1))), &next);
+ var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
+ var_temp.value()));
+ Goto(&next);
+ BIND(&next);
+ }
+
+ var_count.Bind(SmiShr(var_count.value(), 1));
+ GotoIf(SmiEqual(var_count.value(), SmiConstant(0)), &return_result);
+ var_temp.Bind(CallStub(stringadd_callable, context, var_temp.value(),
+ var_temp.value()));
+ Goto(&loop);
+ }
+
+ BIND(&return_result);
+ Return(var_result.value());
+}
+
// ES6 #sec-string.prototype.replace
TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Label out(this);
@@ -1326,11 +1358,11 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
// Convert {receiver} and {search} to strings.
- Node* const subject_string = ToString_Inline(context, receiver);
- Node* const search_string = ToString_Inline(context, search);
+ TNode<String> const subject_string = ToString_Inline(context, receiver);
+ TNode<String> const search_string = ToString_Inline(context, search);
- Node* const subject_length = LoadStringLength(subject_string);
- Node* const search_length = LoadStringLength(search_string);
+ TNode<Smi> const subject_length = LoadStringLengthAsSmi(subject_string);
+ TNode<Smi> const search_length = LoadStringLengthAsSmi(search_string);
// Fast-path single-char {search}, long cons {receiver}, and simple string
// {replace}.
@@ -1393,7 +1425,6 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Node* const match_end_index = SmiAdd(match_start_index, search_length);
- Callable substring_callable = CodeFactory::SubString(isolate());
Callable stringadd_callable =
CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
@@ -1404,8 +1435,9 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Label next(this);
GotoIf(SmiEqual(match_start_index, smi_zero), &next);
- Node* const prefix = CallStub(substring_callable, context, subject_string,
- smi_zero, match_start_index);
+ Node* const prefix =
+ CallBuiltin(Builtins::kSubString, context, subject_string, smi_zero,
+ match_start_index);
var_result.Bind(prefix);
Goto(&next);
@@ -1444,14 +1476,230 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
BIND(&out);
{
- Node* const suffix = CallStub(substring_callable, context, subject_string,
- match_end_index, subject_length);
+ Node* const suffix =
+ CallBuiltin(Builtins::kSubString, context, subject_string,
+ match_end_index, subject_length);
Node* const result =
CallStub(stringadd_callable, context, var_result.value(), suffix);
Return(result);
}
}
+class StringMatchSearchAssembler : public StringBuiltinsAssembler {
+ public:
+ explicit StringMatchSearchAssembler(compiler::CodeAssemblerState* state)
+ : StringBuiltinsAssembler(state) {}
+
+ protected:
+ enum Variant { kMatch, kSearch };
+
+ void Generate(Variant variant, const char* method_name, Node* const receiver,
+ Node* maybe_regexp, Node* const context) {
+ Label call_regexp_match_search(this);
+
+ Builtins::Name builtin;
+ Handle<Symbol> symbol;
+ if (variant == kMatch) {
+ builtin = Builtins::kRegExpMatchFast;
+ symbol = isolate()->factory()->match_symbol();
+ } else {
+ builtin = Builtins::kRegExpSearchFast;
+ symbol = isolate()->factory()->search_symbol();
+ }
+
+ RequireObjectCoercible(context, receiver, method_name);
+
+ MaybeCallFunctionAtSymbol(
+ context, maybe_regexp, receiver, symbol,
+ [=] { return CallBuiltin(builtin, context, maybe_regexp, receiver); },
+ [=](Node* fn) {
+ Callable call_callable = CodeFactory::Call(isolate());
+ return CallJS(call_callable, context, fn, maybe_regexp, receiver);
+ });
+
+ // maybe_regexp is not a RegExp nor has [@@match / @@search] property.
+ {
+ RegExpBuiltinsAssembler regexp_asm(state());
+
+ Node* const receiver_string = ToString_Inline(context, receiver);
+ Node* const pattern = Select(
+ IsUndefined(maybe_regexp), [=] { return EmptyStringConstant(); },
+ [=] { return ToString_Inline(context, maybe_regexp); },
+ MachineRepresentation::kTagged);
+
+ // Create RegExp
+ // TODO(pwong): This could be factored out as a helper (RegExpCreate) that
+ // also does the "is fast" checks.
+ Node* const native_context = LoadNativeContext(context);
+ Node* const regexp_function =
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map = LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const regexp = CallRuntime(
+ Runtime::kRegExpInitializeAndCompile, context,
+ AllocateJSObjectFromMap(initial_map), pattern, EmptyStringConstant());
+
+ Label fast_path(this), slow_path(this);
+ regexp_asm.BranchIfFastRegExp(context, regexp, initial_map, &fast_path,
+ &slow_path);
+
+ BIND(&fast_path);
+ Return(CallBuiltin(builtin, context, regexp, receiver_string));
+
+ BIND(&slow_path);
+ {
+ Node* const maybe_func = GetProperty(context, regexp, symbol);
+ Callable call_callable = CodeFactory::Call(isolate());
+ Return(CallJS(call_callable, context, maybe_func, regexp,
+ receiver_string));
+ }
+ }
+ }
+};
+
+// ES6 #sec-string.prototype.match
+TF_BUILTIN(StringPrototypeMatch, StringMatchSearchAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const maybe_regexp = Parameter(Descriptor::kRegexp);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Generate(kMatch, "String.prototype.match", receiver, maybe_regexp, context);
+}
+
+class StringPadAssembler : public StringBuiltinsAssembler {
+ public:
+ explicit StringPadAssembler(compiler::CodeAssemblerState* state)
+ : StringBuiltinsAssembler(state) {}
+
+ protected:
+ enum Variant { kStart, kEnd };
+
+ void Generate(Variant variant, const char* method_name) {
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments arguments(this, argc);
+ Node* const receiver = arguments.GetReceiver();
+ Node* const receiver_string = ToThisString(context, receiver, method_name);
+ TNode<Smi> const string_length = LoadStringLengthAsSmi(receiver_string);
+
+ TVARIABLE(String, var_fill_string, StringConstant(" "));
+ TVARIABLE(IntPtrT, var_fill_length, IntPtrConstant(1));
+
+ Label argc_2(this), dont_pad(this), invalid_string_length(this), pad(this);
+
+ // If no max_length was provided, return the string.
+ GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &dont_pad);
+
+ Node* const max_length = ToLength_Inline(context, arguments.AtIndex(0));
+ CSA_ASSERT(this, IsNumberNormalized(max_length));
+
+ // Throw if max_length is not a smi or greater than the max string length.
+ GotoIfNot(Word32And(TaggedIsSmi(max_length),
+ SmiLessThanOrEqual(max_length,
+ SmiConstant(String::kMaxLength))),
+ &invalid_string_length);
+
+ // If the max_length is less than length of the string, return the string.
+ CSA_ASSERT(this, TaggedIsPositiveSmi(max_length));
+ GotoIf(SmiLessThanOrEqual(max_length, string_length), &dont_pad);
+
+ Branch(IntPtrEqual(argc, IntPtrConstant(1)), &pad, &argc_2);
+ BIND(&argc_2);
+ {
+ Node* const fill = arguments.AtIndex(1);
+ GotoIf(IsUndefined(fill), &pad);
+
+ var_fill_string = ToString_Inline(context, fill);
+ var_fill_length = LoadStringLengthAsWord(var_fill_string);
+
+ Branch(IntPtrGreaterThan(var_fill_length, IntPtrConstant(0)), &pad,
+ &dont_pad);
+ }
+ BIND(&pad);
+ {
+ CSA_ASSERT(this, IntPtrGreaterThan(var_fill_length, IntPtrConstant(0)));
+ CSA_ASSERT(this, SmiGreaterThan(max_length, string_length));
+
+ Callable stringadd_callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ TNode<Smi> const pad_length = SmiSub(max_length, string_length);
+
+ VARIABLE(var_pad, MachineRepresentation::kTagged);
+
+ Label single_char_fill(this), multi_char_fill(this), return_result(this);
+ Branch(IntPtrEqual(var_fill_length, IntPtrConstant(1)), &single_char_fill,
+ &multi_char_fill);
+
+ // Fast path for a single character fill. No need to calculate number of
+ // repetitions or remainder.
+ BIND(&single_char_fill);
+ {
+ var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
+ static_cast<Node*>(var_fill_string),
+ pad_length));
+ Goto(&return_result);
+ }
+ BIND(&multi_char_fill);
+ {
+ TNode<Int32T> const fill_length_word32 =
+ TruncateWordToWord32(var_fill_length);
+ TNode<Int32T> const pad_length_word32 = SmiToWord32(pad_length);
+ TNode<Int32T> const repetitions_word32 =
+ Int32Div(pad_length_word32, fill_length_word32);
+ TNode<Int32T> const remaining_word32 =
+ Int32Mod(pad_length_word32, fill_length_word32);
+
+ var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
+ static_cast<Node*>(var_fill_string),
+ SmiFromWord32(repetitions_word32)));
+
+ GotoIfNot(remaining_word32, &return_result);
+ {
+ Node* const remainder_string =
+ CallBuiltin(Builtins::kSubString, context,
+ static_cast<Node*>(var_fill_string), SmiConstant(0),
+ SmiFromWord32(remaining_word32));
+ var_pad.Bind(CallStub(stringadd_callable, context, var_pad.value(),
+ remainder_string));
+ Goto(&return_result);
+ }
+ }
+ BIND(&return_result);
+ CSA_ASSERT(this,
+ SmiEqual(pad_length, LoadStringLengthAsSmi(var_pad.value())));
+ arguments.PopAndReturn(variant == kStart
+ ? CallStub(stringadd_callable, context,
+ var_pad.value(), receiver_string)
+ : CallStub(stringadd_callable, context,
+ receiver_string, var_pad.value()));
+ }
+ BIND(&dont_pad);
+ arguments.PopAndReturn(receiver_string);
+ BIND(&invalid_string_length);
+ {
+ CallRuntime(Runtime::kThrowInvalidStringLength, context);
+ Unreachable();
+ }
+ }
+};
+
+TF_BUILTIN(StringPrototypePadEnd, StringPadAssembler) {
+ Generate(kEnd, "String.prototype.padEnd");
+}
+
+TF_BUILTIN(StringPrototypePadStart, StringPadAssembler) {
+ Generate(kStart, "String.prototype.padStart");
+}
+
+// ES6 #sec-string.prototype.search
+TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const maybe_regexp = Parameter(Descriptor::kRegexp);
+ Node* const context = Parameter(Descriptor::kContext);
+ Generate(kSearch, "String.prototype.search", receiver, maybe_regexp, context);
+}
+
// ES6 section 21.1.3.18 String.prototype.slice ( start, end )
TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
Label out(this);
@@ -1478,14 +1726,14 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
CallBuiltin(Builtins::kToString, context, receiver);
// 3. Let len be the number of elements in S.
- Node* const length = LoadStringLength(subject_string);
+ TNode<Smi> const length = LoadStringLengthAsSmi(subject_string);
// Conversions and bounds-checks for {start}.
ConvertAndBoundsCheckStartArgument(context, &var_start, start, length);
// 5. If end is undefined, let intEnd be len;
var_end.Bind(length);
- GotoIf(WordEqual(end, UndefinedConstant()), &out);
+ GotoIf(IsUndefined(end), &out);
// else let intEnd be ? ToInteger(end).
Node* const end_int =
@@ -1618,7 +1866,8 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
// If the separator string is empty then return the elements in the subject.
{
Label next(this);
- GotoIfNot(SmiEqual(LoadStringLength(separator_string), smi_zero), &next);
+ GotoIfNot(SmiEqual(LoadStringLengthAsSmi(separator_string), SmiConstant(0)),
+ &next);
Node* const result = CallRuntime(Runtime::kStringToArray, context,
subject_string, limit_number);
@@ -1649,16 +1898,16 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
Label out(this);
- VARIABLE(var_start, MachineRepresentation::kTagged);
- VARIABLE(var_length, MachineRepresentation::kTagged);
+ TVARIABLE(Smi, var_start);
+ TVARIABLE(Number, var_length);
- Node* const zero = SmiConstant(0);
+ TNode<Smi> const zero = SmiConstant(0);
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
ToThisString(context, receiver, "String.prototype.substr");
- Node* const string_length = LoadStringLength(string);
+ TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
// Conversions and bounds-checks for {start}.
ConvertAndBoundsCheckStartArgument(context, &var_start, start, string_length);
@@ -1669,29 +1918,29 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
// Default to {string_length} if {length} is undefined.
{
Label if_isundefined(this, Label::kDeferred), if_isnotundefined(this);
- Branch(WordEqual(length, UndefinedConstant()), &if_isundefined,
- &if_isnotundefined);
+ Branch(IsUndefined(length), &if_isundefined, &if_isnotundefined);
BIND(&if_isundefined);
- var_length.Bind(string_length);
+ var_length = string_length;
Goto(&if_issmi);
BIND(&if_isnotundefined);
- var_length.Bind(
- ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
+ var_length =
+ ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero);
}
- Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+ TVARIABLE(Smi, var_result_length);
+
+ Branch(TaggedIsSmi(var_length), &if_issmi, &if_isheapnumber);
// Set {length} to min(max({length}, 0), {string_length} - {start}
BIND(&if_issmi);
{
- Node* const positive_length = SmiMax(var_length.value(), zero);
+ TNode<Smi> const positive_length = SmiMax(CAST(var_length), zero);
+ TNode<Smi> const minimal_length = SmiSub(string_length, var_start);
+ var_result_length = SmiMin(positive_length, minimal_length);
- Node* const minimal_length = SmiSub(string_length, var_start.value());
- var_length.Bind(SmiMin(positive_length, minimal_length));
-
- GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ GotoIfNot(SmiLessThanOrEqual(var_result_length, zero), &out);
args.PopAndReturn(EmptyStringConstant());
}
@@ -1701,11 +1950,11 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- CSA_ASSERT(this, IsHeapNumber(var_length.value()));
+ CSA_ASSERT(this, IsHeapNumber(var_length));
Label if_isnegative(this), if_ispositive(this);
- Node* const float_zero = Float64Constant(0.);
- Node* const length_float = LoadHeapNumberValue(var_length.value());
+ TNode<Float64T> const float_zero = Float64Constant(0.);
+ TNode<Float64T> const length_float = LoadHeapNumberValue(CAST(var_length));
Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
&if_ispositive);
@@ -1714,16 +1963,16 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
BIND(&if_ispositive);
{
- var_length.Bind(SmiSub(string_length, var_start.value()));
- GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ var_result_length = SmiSub(string_length, var_start);
+ GotoIfNot(SmiLessThanOrEqual(var_result_length, zero), &out);
args.PopAndReturn(EmptyStringConstant());
}
}
BIND(&out);
{
- Node* const end = SmiAdd(var_start.value(), var_length.value());
- Node* const result = SubString(context, string, var_start.value(), end);
+ TNode<Smi> const end = SmiAdd(var_start, var_result_length);
+ Node* const result = SubString(context, string, var_start, end);
args.PopAndReturn(result);
}
}
@@ -1777,6 +2026,15 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
return var_result;
}
+TF_BUILTIN(SubString, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* string = Parameter(Descriptor::kString);
+ Node* from = Parameter(Descriptor::kFrom);
+ Node* to = Parameter(Descriptor::kTo);
+
+ Return(SubString(context, string, from, to));
+}
+
// ES6 #sec-string.prototype.substring
TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
const int kStartArg = 0;
@@ -1800,7 +2058,7 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
Node* const string =
ToThisString(context, receiver, "String.prototype.substring");
- Node* const length = LoadStringLength(string);
+ Node* const length = LoadStringLengthAsSmi(string);
// Conversion and bounds-checks for {start}.
var_start.Bind(ToSmiBetweenZeroAnd(context, start, length));
@@ -1808,7 +2066,7 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
// Conversion and bounds-checks for {end}.
{
var_end.Bind(length);
- GotoIf(WordEqual(end, UndefinedConstant()), &out);
+ GotoIf(IsUndefined(end), &out);
var_end.Bind(ToSmiBetweenZeroAnd(context, end, length));
@@ -1859,7 +2117,7 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string = ToThisString(context, receiver, method_name);
- Node* const string_length = SmiUntag(LoadStringLength(string));
+ TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
ToDirectStringAssembler to_direct(state(), string);
to_direct.TryToDirect(&if_runtime);
@@ -1868,9 +2126,8 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
Node* const is_stringonebyte = IsOneByteStringInstanceType(instance_type);
Node* const string_data_offset = to_direct.offset();
- VARIABLE(var_start, MachineType::PointerRepresentation(), IntPtrConstant(0));
- VARIABLE(var_end, MachineType::PointerRepresentation(),
- IntPtrSub(string_length, IntPtrConstant(1)));
+ TVARIABLE(IntPtrT, var_start, IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_end, IntPtrSub(string_length, IntPtrConstant(1)));
if (mode == String::kTrimLeft || mode == String::kTrim) {
ScanForNonWhiteSpaceOrLineTerminator(string_data, string_data_offset,
@@ -1883,14 +2140,13 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
IntPtrConstant(-1), -1, &return_emptystring);
}
- arguments.PopAndReturn(
- SubString(context, string, SmiTag(var_start.value()),
- SmiAdd(SmiTag(var_end.value()), SmiConstant(1)),
- SubStringFlags::FROM_TO_ARE_BOUNDED));
+ arguments.PopAndReturn(SubString(context, string, SmiTag(var_start),
+ SmiAdd(SmiTag(var_end), SmiConstant(1)),
+ SubStringFlags::FROM_TO_ARE_BOUNDED));
BIND(&if_runtime);
- arguments.PopAndReturn(CallRuntime(Runtime::kStringTrim, context, string,
- SmiConstant(static_cast<int>(mode))));
+ arguments.PopAndReturn(
+ CallRuntime(Runtime::kStringTrim, context, string, SmiConstant(mode)));
BIND(&return_emptystring);
arguments.PopAndReturn(EmptyStringConstant());
@@ -2041,8 +2297,8 @@ TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
// Return the |word32| codepoint at {index}. Supports SeqStrings and
// ExternalStrings.
TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
- SloppyTNode<String> string, SloppyTNode<Smi> length, SloppyTNode<Smi> index,
- UnicodeEncoding encoding) {
+ SloppyTNode<String> string, SloppyTNode<IntPtrT> length,
+ SloppyTNode<IntPtrT> index, UnicodeEncoding encoding) {
Label handle_surrogate_pair(this), return_result(this);
TVARIABLE(Uint32T, var_result);
TVARIABLE(Uint32T, var_trail);
@@ -2052,9 +2308,9 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
GotoIf(Word32NotEqual(Word32And(var_result, Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&return_result);
- TNode<Smi> next_index = SmiAdd(index, SmiConstant(1));
+ TNode<IntPtrT> next_index = IntPtrAdd(index, IntPtrConstant(1));
- GotoIfNot(SmiLessThan(next_index, length), &return_result);
+ GotoIfNot(IntPtrLessThan(next_index, length), &return_result);
var_trail = StringCharCodeAt(string, next_index);
Branch(Word32Equal(Word32And(var_trail, Int32Constant(0xFC00)),
Int32Constant(0xDC00)),
@@ -2109,7 +2365,7 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
VARIABLE(var_done, MachineRepresentation::kTagged);
var_value.Bind(UndefinedConstant());
- var_done.Bind(BooleanConstant(true));
+ var_done.Bind(TrueConstant());
Label throw_bad_receiver(this), next_codepoint(this), return_result(this);
@@ -2117,16 +2373,16 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
Node* iterator = Parameter(Descriptor::kReceiver);
GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
- GotoIfNot(Word32Equal(LoadInstanceType(iterator),
- Int32Constant(JS_STRING_ITERATOR_TYPE)),
- &throw_bad_receiver);
+ GotoIfNot(
+ InstanceTypeEqual(LoadInstanceType(iterator), JS_STRING_ITERATOR_TYPE),
+ &throw_bad_receiver);
Node* string = LoadObjectField(iterator, JSStringIterator::kStringOffset);
- Node* position =
- LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
- Node* length = LoadObjectField(string, String::kLengthOffset);
+ TNode<IntPtrT> position = SmiUntag(
+ CAST(LoadObjectField(iterator, JSStringIterator::kNextIndexOffset)));
+ TNode<IntPtrT> length = LoadStringLengthAsWord(string);
- Branch(SmiLessThan(position, length), &next_codepoint, &return_result);
+ Branch(IntPtrLessThan(position, length), &next_codepoint, &return_result);
BIND(&next_codepoint);
{
@@ -2134,10 +2390,10 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
Node* ch = LoadSurrogatePairAt(string, length, position, encoding);
Node* value = StringFromCodePoint(ch, encoding);
var_value.Bind(value);
- Node* length = LoadObjectField(value, String::kLengthOffset);
+ TNode<IntPtrT> length = LoadStringLengthAsWord(value);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
- SmiAdd(position, length));
- var_done.Bind(BooleanConstant(false));
+ SmiTag(Signed(IntPtrAdd(position, length))));
+ var_done.Bind(FalseConstant());
Goto(&return_result);
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index c9af380270..f1111b3465 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -20,11 +20,16 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* match_start_index, Node* match_end_index,
Node* replace_string);
void StringEqual_Core(Node* context, Node* lhs, Node* lhs_instance_type,
- Node* lhs_length, Node* rhs, Node* rhs_instance_type,
- Label* if_equal, Label* if_not_equal,
- Label* if_notbothdirectonebyte);
+ Node* rhs, Node* rhs_instance_type,
+ TNode<IntPtrT> length, Label* if_equal,
+ Label* if_not_equal, Label* if_indirect);
protected:
+ void StringEqual_Loop(Node* lhs, Node* lhs_instance_type,
+ MachineType lhs_type, Node* rhs,
+ Node* rhs_instance_type, MachineType rhs_type,
+ TNode<IntPtrT> length, Label* if_equal,
+ Label* if_not_equal);
Node* DirectStringData(Node* string, Node* string_instance_type);
void DispatchOnStringEncodings(Node* const lhs_instance_type,
@@ -46,16 +51,15 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
void GenerateStringEqual(Node* context, Node* left, Node* right);
void GenerateStringRelationalComparison(Node* context, Node* left,
- Node* right,
- RelationalComparisonMode mode);
+ Node* right, Operation op);
TNode<Smi> ToSmiBetweenZeroAnd(SloppyTNode<Context> context,
SloppyTNode<Object> value,
SloppyTNode<Smi> limit);
TNode<Uint32T> LoadSurrogatePairAt(SloppyTNode<String> string,
- SloppyTNode<Smi> length,
- SloppyTNode<Smi> index,
+ SloppyTNode<IntPtrT> length,
+ SloppyTNode<IntPtrT> index,
UnicodeEncoding encoding);
void StringIndexOf(Node* const subject_string, Node* const search_string,
@@ -63,7 +67,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* IndexOfDollarChar(Node* const context, Node* const string);
- Node* IsNullOrUndefined(Node* const value);
void RequireObjectCoercible(Node* const context, Node* const value,
const char* method_name);
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 4e3058c220..14a74afb6d 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -8,6 +8,7 @@
#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/regexp/regexp-utils.h"
+#include "src/string-builder.h"
#include "src/string-case.h"
#include "src/unicode-inl.h"
#include "src/unicode.h"
@@ -511,5 +512,65 @@ BUILTIN(StringPrototypeToUpperCase) {
}
#endif // !V8_INTL_SUPPORT
+// ES6 #sec-string.prototype.raw
+BUILTIN(StringRaw) {
+ HandleScope scope(isolate);
+ Handle<Object> templ = args.atOrUndefined(isolate, 1);
+ const uint32_t argc = args.length();
+ Handle<String> raw_string =
+ isolate->factory()->NewStringFromAsciiChecked("raw");
+
+ Handle<Object> cooked;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, cooked,
+ Object::ToObject(isolate, templ));
+
+ Handle<Object> raw;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, raw,
+ Object::GetProperty(cooked, raw_string));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, raw,
+ Object::ToObject(isolate, raw));
+ Handle<Object> raw_len;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, raw_len,
+ Object::GetProperty(raw, isolate->factory()->length_string()));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, raw_len,
+ Object::ToLength(isolate, raw_len));
+
+ IncrementalStringBuilder result_builder(isolate);
+ const uint32_t length = static_cast<uint32_t>(raw_len->Number());
+ if (length > 0) {
+ Handle<Object> first_element;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, first_element,
+ Object::GetElement(isolate, raw, 0));
+
+ Handle<String> first_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, first_string, Object::ToString(isolate, first_element));
+ result_builder.AppendString(first_string);
+
+ for (uint32_t i = 1, arg_i = 2; i < length; i++, arg_i++) {
+ if (arg_i < argc) {
+ Handle<String> argument_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, argument_string,
+ Object::ToString(isolate, args.at(arg_i)));
+ result_builder.AppendString(argument_string);
+ }
+
+ Handle<Object> element;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element,
+ Object::GetElement(isolate, raw, i));
+
+ Handle<String> element_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_string,
+ Object::ToString(isolate, element));
+ result_builder.AppendString(element_string);
+ }
+ }
+
+ RETURN_RESULT_OR_FAILURE(isolate, result_builder.Finish());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index 86ec0e7bd9..df89d1ced3 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -46,6 +46,9 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
Node* LoadDataPtr(Node* typed_array);
Node* ByteLengthIsValid(Node* byte_length);
+ // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
+ TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
+
// Loads the element kind of TypedArray instance.
TNode<Word32T> LoadElementsKind(TNode<Object> typed_array);
@@ -130,7 +133,8 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
// need to convert the float heap number to an intptr.
Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(Node* backing_store,
Node* byte_offset) {
- return IntPtrAdd(backing_store, ChangeNumberToIntPtr(byte_offset));
+ return IntPtrAdd(backing_store,
+ ChangeNonnegativeNumberToUintPtr(byte_offset));
}
// Setup the TypedArray which is under construction.
@@ -388,7 +392,7 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
CSA_ASSERT(this, IsJSTypedArray(holder));
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
- Node* initialize = BooleanConstant(true);
+ Node* initialize = TrueConstant();
Label invalid_length(this);
@@ -432,7 +436,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
invalid_offset_error(this, Label::kDeferred);
Label offset_is_smi(this), offset_not_smi(this, Label::kDeferred),
check_length(this), call_init(this), invalid_length(this),
- length_undefined(this), length_defined(this);
+ length_undefined(this), length_defined(this), detached_error(this);
GotoIf(IsUndefined(byte_offset), &check_length);
@@ -463,11 +467,11 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
}
BIND(&check_length);
- // TODO(petermarshall): Throw on detached typedArray.
Branch(IsUndefined(length), &length_undefined, &length_defined);
BIND(&length_undefined);
{
+ GotoIf(IsDetachedBuffer(buffer), &detached_error);
Node* buffer_byte_length =
LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset);
@@ -489,6 +493,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&length_defined);
{
Node* new_length = ToSmiIndex(length, context, &invalid_length);
+ GotoIf(IsDetachedBuffer(buffer), &detached_error);
new_byte_length.Bind(SmiMul(new_length, element_size));
// Reading the byte length must come after the ToIndex operation, which
// could cause the buffer to become detached.
@@ -548,6 +553,9 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
SmiConstant(MessageTemplate::kInvalidTypedArrayLength), length);
Unreachable();
}
+
+ BIND(&detached_error);
+ { ThrowTypeError(context, MessageTemplate::kDetachedOperation, "Construct"); }
}
Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
@@ -590,7 +598,7 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
CSA_ASSERT(this, TaggedIsSmi(element_size));
Node* context = Parameter(Descriptor::kContext);
- Node* initialize = BooleanConstant(false);
+ Node* initialize = FalseConstant();
Label invalid_length(this), fill(this), fast_copy(this);
@@ -626,7 +634,7 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
Node* byte_length = SmiMul(length, element_size);
CSA_ASSERT(this, ByteLengthIsValid(byte_length));
- Node* byte_length_intptr = ChangeNumberToIntPtr(byte_length);
+ Node* byte_length_intptr = ChangeNonnegativeNumberToUintPtr(byte_length);
CSA_ASSERT(this, UintPtrLessThanOrEqual(
byte_length_intptr,
IntPtrConstant(FixedTypedArrayBase::kMaxByteLength)));
@@ -705,11 +713,16 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
JSTypedArray::kLengthOffset);
}
+TNode<Word32T> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
+ TNode<Word32T> kind) {
+ return Word32Or(Word32Equal(kind, Int32Constant(UINT8_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)));
+}
+
TNode<Word32T> TypedArrayBuiltinsAssembler::LoadElementsKind(
TNode<Object> typed_array) {
CSA_ASSERT(this, IsJSTypedArray(typed_array));
- return Int32Sub(LoadMapElementsKind(LoadMap(CAST(typed_array))),
- Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
+ return LoadMapElementsKind(LoadMap(CAST(typed_array)));
}
TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
@@ -722,8 +735,7 @@ TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
1;
int32_t elements_kinds[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- TYPE##_ELEMENTS - FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
@@ -802,8 +814,12 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
UintPtrGreaterThanOrEqual(source_byte_length, IntPtrConstant(0)));
Label call_memmove(this), fast_c_call(this), out(this);
- Branch(Word32Equal(source_el_kind, target_el_kind), &call_memmove,
- &fast_c_call);
+
+ // A fast memmove call can be used when the source and target types are are
+ // the same or either Uint8 or Uint8Clamped.
+ GotoIf(Word32Equal(source_el_kind, target_el_kind), &call_memmove);
+ GotoIfNot(IsUint8ElementsKind(source_el_kind), &fast_c_call);
+ Branch(IsUint8ElementsKind(target_el_kind), &call_memmove, &fast_c_call);
BIND(&call_memmove);
{
@@ -847,6 +863,7 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
void TypedArrayBuiltinsAssembler::SetJSArraySource(
TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> target,
TNode<IntPtrT> offset, Label* call_runtime, Label* if_source_too_large) {
+ CSA_ASSERT(this, IsFastJSArray(source, context));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(offset, IntPtrConstant(0)));
CSA_ASSERT(this,
IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue)));
@@ -942,7 +959,7 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
// Normalize offset argument (using ToInteger) and handle heap number cases.
TNode<Object> offset = args.GetOptionalArgumentValue(1, SmiConstant(0));
- TNode<Object> offset_num = ToInteger(context, offset, kTruncateMinusZero);
+ TNode<Number> offset_num = ToInteger(context, offset, kTruncateMinusZero);
CSA_ASSERT(this, IsNumberNormalized(offset_num));
// Since ToInteger always returns a Smi if the given value is within Smi
@@ -1061,8 +1078,8 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Node* map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(map);
- GotoIf(Word32NotEqual(instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
- &throw_bad_receiver);
+ GotoIfNot(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE),
+ &throw_bad_receiver);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 176a79965b..18625c8d90 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -311,12 +311,6 @@ BUILTIN(TypedArrayPrototypeSlice) {
isolate, result_array,
TypedArraySpeciesCreateByLength(isolate, array, method, count));
- // TODO(cwhan.tunz): neutering check of the result_array should be done in
- // TypedArraySpeciesCreate, but currently ValidateTypedArray does not throw
- // for neutered buffer, so this is a temporary neutering check for the result
- // array
- if (V8_UNLIKELY(result_array->WasNeutered())) return *result_array;
-
// TODO(cwhan.tunz): should throw.
if (V8_UNLIKELY(array->WasNeutered())) return *result_array;
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index b2f268e29f..27199c8462 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -46,8 +46,10 @@ class BuiltinArguments : public Arguments {
static const int kNewTargetOffset = 0;
static const int kTargetOffset = 1;
static const int kArgcOffset = 2;
- static const int kNumExtraArgs = 3;
- static const int kNumExtraArgsWithReceiver = 4;
+ static const int kPaddingOffset = 3;
+
+ static const int kNumExtraArgs = 4;
+ static const int kNumExtraArgsWithReceiver = 5;
Handle<JSFunction> target() {
return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset);
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index cb110bea95..87fe14743a 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -19,7 +19,7 @@ TF_BUILTIN(WasmStackGuard, CodeStubAssembler) {
#define DECLARE_ENUM(name) \
TF_BUILTIN(ThrowWasm##name, CodeStubAssembler) { \
int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
- TailCallRuntime(Runtime::kThrowWasmErrorFromTrapIf, NoContextConstant(), \
+ TailCallRuntime(Runtime::kThrowWasmError, NoContextConstant(), \
SmiConstant(message_id)); \
}
FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 9cd6821907..55fc1c8cd8 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -98,7 +98,7 @@ const char* Builtins::Lookup(byte* pc) {
if (entry->contains(pc)) return name(i);
}
}
- return NULL;
+ return nullptr;
}
Handle<Code> Builtins::NewFunctionContext(ScopeType scope_type) {
@@ -170,33 +170,15 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN)
#undef CASE_OTHER
- case kConsoleAssert: {
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayForEach:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kConsoleAssert:
return Callable(code, BuiltinDescriptor(isolate));
- }
- case kArrayForEach: {
- Handle<Code> code = BUILTIN_CODE(isolate, ArrayForEach);
- return Callable(code, BuiltinDescriptor(isolate));
- }
- case kArrayForEachLoopEagerDeoptContinuation: {
- Handle<Code> code =
- BUILTIN_CODE(isolate, ArrayForEachLoopEagerDeoptContinuation);
- return Callable(code, BuiltinDescriptor(isolate));
- }
- case kArrayForEachLoopLazyDeoptContinuation: {
- Handle<Code> code =
- BUILTIN_CODE(isolate, ArrayForEachLoopLazyDeoptContinuation);
- return Callable(code, BuiltinDescriptor(isolate));
- }
- case kArrayMapLoopEagerDeoptContinuation: {
- Handle<Code> code =
- BUILTIN_CODE(isolate, ArrayMapLoopEagerDeoptContinuation);
- return Callable(code, BuiltinDescriptor(isolate));
- }
- case kArrayMapLoopLazyDeoptContinuation: {
- Handle<Code> code =
- BUILTIN_CODE(isolate, ArrayMapLoopLazyDeoptContinuation);
- return Callable(code, BuiltinDescriptor(isolate));
- }
default:
UNREACHABLE();
}
@@ -235,6 +217,8 @@ bool Builtins::IsLazy(int index) {
case kArrayForEachLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayMapLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayMapLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kCheckOptimizationMarker:
case kCompileLazy:
case kDeserializeLazy:
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index e28feb7efe..d9090dc67e 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -48,6 +48,8 @@ class Builtins {
builtin_count
};
+ static const int32_t kNoBuiltinId = -1;
+
static bool IsBuiltinId(int maybe_id) {
return 0 <= maybe_id && maybe_id < builtin_count;
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index a689c3131d..7635bada49 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -5,7 +5,6 @@
#if V8_TARGET_ARCH_IA32
#include "src/code-factory.h"
-#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
@@ -55,12 +54,12 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// CEntryStub expects eax to contain the number of arguments including the
// receiver and the extra arguments.
- const int num_extra_args = 3;
- __ add(eax, Immediate(num_extra_args + 1));
+ __ add(eax, Immediate(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
// Insert extra arguments.
__ PopReturnAddressTo(ecx);
__ SmiTag(eax);
+ __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ Push(eax);
__ SmiUntag(eax);
__ Push(edi);
@@ -396,37 +395,30 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
-
-// Clobbers ecx, edx, edi; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- IsTagged eax_is_tagged) {
- // eax : the number of items to be pushed to the stack
- //
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch1, Register scratch2,
+ Label* stack_overflow,
+ bool include_receiver = false) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- Label okay;
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edx the space we need for the array when it is unrolled onto the
+ __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
+ // Make scratch2 the space we have left. The stack might already be overflowed
+ // here which will cause scratch2 to become negative.
+ __ mov(scratch2, esp);
+ __ sub(scratch2, scratch1);
+ // Make scratch1 the space we need for the array when it is unrolled onto the
// stack.
- __ mov(edx, eax);
- int smi_tag = eax_is_tagged == kEaxIsSmiTagged ? kSmiTagSize : 0;
- __ shl(edx, kPointerSizeLog2 - smi_tag);
+ __ mov(scratch1, num_args);
+ if (include_receiver) {
+ __ add(scratch1, Immediate(1));
+ }
+ __ shl(scratch1, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow);
-
- __ bind(&okay);
+ __ cmp(scratch2, scratch1);
+ __ j(less_equal, stack_overflow); // Signed comparison.
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
@@ -453,8 +445,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
// Check if we have enough stack space to push all arguments.
- // Expects argument count in eax. Clobbers ecx, edx, edi.
- Generate_CheckStackOverflow(masm, kEaxIsUntaggedInt);
+ // Argument count in eax. Clobbers ecx and edx.
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, eax, ecx, edx, &stack_overflow);
+ __ jmp(&enough_stack_space);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
+
+ __ bind(&enough_stack_space);
// Copy arguments to the stack in a loop.
Label loop, entry;
@@ -500,22 +501,18 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the value to pass to the generator
- // -- ebx : the JSGeneratorObject to resume
- // -- edx : the resume mode (tagged)
+ // -- edx : the JSGeneratorObject to resume
// -- esp[0] : return address
// -----------------------------------
- __ AssertGeneratorObject(ebx);
+ __ AssertGeneratorObject(edx);
// Store input value into generator object.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
- __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
+ __ mov(FieldOperand(edx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
+ __ RecordWriteField(edx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
kDontSaveFPRegs);
- // Store resume mode into generator object.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
-
// Load suspended function and context.
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Flood function if we are stepping.
@@ -529,20 +526,25 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
+ __ cmp(edx, Operand::StaticVariable(debug_suspended_generator));
__ j(equal, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+ __ j(below, &stack_overflow);
+
// Pop return address.
__ PopReturnAddressTo(eax);
// Push receiver.
- __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+ __ Push(FieldOperand(edx, JSGeneratorObject::kReceiverOffset));
// ----------- S t a t e -------------
// -- eax : return address
- // -- ebx : the JSGeneratorObject to resume
- // -- edx : the resume mode (tagged)
+ // -- edx : the JSGeneratorObject to resume
// -- edi : generator function
// -- esi : generator context
// -- esp[0] : generator receiver
@@ -582,7 +584,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
- __ mov(edx, ebx);
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -591,27 +592,30 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
__ Push(edx);
__ Push(edi);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(edx);
- __ Pop(ebx);
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
__ bind(&prepare_step_in_suspended_generator);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
__ Push(edx);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(edx);
- __ Pop(ebx);
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ int3(); // This should be unreachable.
+ }
}
static void ReplaceClosureCodeWithOptimizedCode(
@@ -717,18 +721,20 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ mov(optimized_code_entry,
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ push(eax);
+ __ push(edx);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
Label found_deoptimized_code;
- __ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
+ __ mov(eax,
+ FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
+ __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
- __ push(eax);
- __ push(edx);
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
@@ -741,6 +747,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
+ __ pop(edx);
+ __ pop(eax);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
@@ -962,32 +970,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow,
- bool include_receiver = false) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
- // Make scratch2 the space we have left. The stack might already be overflowed
- // here which will cause scratch2 to become negative.
- __ mov(scratch2, esp);
- __ sub(scratch2, scratch1);
- // Make scratch1 the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(scratch1, num_args);
- if (include_receiver) {
- __ add(scratch1, Immediate(1));
- }
- __ shl(scratch1, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch2, scratch1);
- __ j(less_equal, stack_overflow); // Signed comparison.
-}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register array_limit,
Register start_address) {
@@ -1542,20 +1524,6 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ jmp(ecx);
}
-void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve possible return result from lazy deopt.
- __ push(eax);
- __ CallRuntime(Runtime::kNotifyStubFailure, false);
- __ pop(eax);
- // Tear down internal frame.
- }
-
- __ Ret(); // Return to ContinueToBuiltin stub still on stack.
-}
-
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
@@ -1848,7 +1816,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
__ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
@@ -1877,7 +1845,7 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
__ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
@@ -2658,7 +2626,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
__ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) -
+ DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag));
__ SmiUntag(ebx);
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/src/builtins/mips/OWNERS
index 3f8fbfc7c8..978563cab5 100644
--- a/deps/v8/src/builtins/mips/OWNERS
+++ b/deps/v8/src/builtins/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 4835fb0b1b..167bc1b829 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -55,10 +55,10 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// CEntryStub expects a0 to contain the number of arguments including the
// receiver and the extra arguments.
- const int num_extra_args = 3;
- __ Addu(a0, a0, num_extra_args + 1);
+ __ Addu(a0, a0, BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
// Insert extra arguments.
+ __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ SmiTag(a0);
__ Push(a0, a1, a3);
__ SmiUntag(a0);
@@ -242,7 +242,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore context from the frame.
__ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ lw(a1, MemOperand(sp));
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
@@ -454,11 +454,8 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
// Clobbers a2; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
- IsTagged argc_is_tagged) {
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -468,12 +465,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// here which will cause a2 to become negative.
__ Subu(a2, sp, a2);
// Check if the arguments will overflow the stack.
- if (argc_is_tagged == kArgcIsSmiTagged) {
- __ sll(t3, argc, kPointerSizeLog2 - kSmiTagSize);
- } else {
- DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
- __ sll(t3, argc, kPointerSizeLog2);
- }
+ __ sll(t3, argc, kPointerSizeLog2);
// Signed comparison.
__ Branch(&okay, gt, a2, Operand(t3));
@@ -511,7 +503,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
// Clobbers a2.
- Generate_CheckStackOverflow(masm, a3, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, a3);
// Remember new.target.
__ mov(t1, a0);
@@ -572,7 +564,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- v0 : the value to pass to the generator
// -- a1 : the JSGeneratorObject to resume
- // -- a2 : the resume mode (tagged)
// -- ra : return address
// -----------------------------------
@@ -583,9 +574,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
kRAHasNotBeenSaved, kDontSaveFPRegs);
- // Store resume mode into generator object.
- __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
-
// Load suspended function and context.
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset));
@@ -607,13 +595,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1));
__ bind(&stepping_prepared);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&stack_overflow, lo, sp, Operand(at));
+
// Push receiver.
__ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(t1);
// ----------- S t a t e -------------
// -- a1 : the JSGeneratorObject to resume
- // -- a2 : the resume mode (tagged)
// -- t0 : generator function
// -- cp : generator context
// -- ra : return address
@@ -662,9 +655,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a2, t0);
+ __ Push(a1, t0);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(a1, a2);
+ __ Pop(a1);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -672,12 +665,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_suspended_generator);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a2);
+ __ Push(a1);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(a1, a2);
+ __ Pop(a1);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC); // This should be unreachable.
+ }
}
static void ReplaceClosureCodeWithOptimizedCode(
@@ -783,7 +783,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// runtime to clear it.
Label found_deoptimized_code;
__ lw(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
+ Code::kCodeDataContainerOffset));
+ __ lw(scratch2, FieldMemOperand(
+ scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(scratch2, scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, scratch2, Operand(zero_reg));
@@ -1471,19 +1473,6 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
}
-void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve possible return result from lazy deopt.
- __ Push(v0);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, false);
- __ Pop(v0);
- }
-
- __ Jump(ra); // Jump to the ContinueToBuiltin stub
-}
-
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
@@ -1580,7 +1569,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) -
+ DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag));
__ SmiUntag(a1);
@@ -2326,8 +2315,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_constructor);
// Dispatch based on instance type.
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ GetObjectType(a1, t1, t2);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
@@ -2549,9 +2537,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- const RegList gp_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
- const RegList fp_regs = f2.bit() | f4.bit() | f6.bit() | f8.bit() |
- f10.bit() | f12.bit() | f14.bit();
+ constexpr RegList gp_regs = Register::ListOf<a0, a1, a2, a3>();
+ constexpr RegList fp_regs =
+ DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
index 3f8fbfc7c8..978563cab5 100644
--- a/deps/v8/src/builtins/mips64/OWNERS
+++ b/deps/v8/src/builtins/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 2584444f1f..811ae637ad 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -55,10 +55,10 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// CEntryStub expects a0 to contain the number of arguments including the
// receiver and the extra arguments.
- const int num_extra_args = 3;
- __ Daddu(a0, a0, num_extra_args + 1);
+ __ Daddu(a0, a0, BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
// Insert extra arguments.
+ __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ SmiTag(a0);
__ Push(a0, a1, a3);
__ SmiUntag(a0);
@@ -243,7 +243,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore context from the frame.
__ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ Ld(a1, MemOperand(sp));
+ __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
@@ -456,7 +456,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- v0 : the value to pass to the generator
// -- a1 : the JSGeneratorObject to resume
- // -- a2 : the resume mode (tagged)
// -- ra : return address
// -----------------------------------
__ AssertGeneratorObject(a1);
@@ -466,9 +465,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
kRAHasNotBeenSaved, kDontSaveFPRegs);
- // Store resume mode into generator object.
- __ Sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
-
// Load suspended function and context.
__ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ Ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
@@ -490,13 +486,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
__ bind(&stepping_prepared);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&stack_overflow, lo, sp, Operand(at));
+
// Push receiver.
__ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(a5);
// ----------- S t a t e -------------
// -- a1 : the JSGeneratorObject to resume
- // -- a2 : the resume mode (tagged)
// -- a4 : generator function
// -- cp : generator context
// -- ra : return address
@@ -546,9 +547,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a2, a4);
+ __ Push(a1, a4);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(a1, a2);
+ __ Pop(a1);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
__ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -556,12 +557,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_suspended_generator);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a2);
+ __ Push(a1);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(a1, a2);
+ __ Pop(a1);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
__ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC); // This should be unreachable.
+ }
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -570,11 +578,8 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
// Clobbers a2; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
- IsTagged argc_is_tagged) {
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -584,12 +589,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// here which will cause r2 to become negative.
__ dsubu(a2, sp, a2);
// Check if the arguments will overflow the stack.
- if (argc_is_tagged == kArgcIsSmiTagged) {
- __ SmiScale(a7, v0, kPointerSizeLog2);
- } else {
- DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
- __ dsll(a7, argc, kPointerSizeLog2);
- }
+ __ dsll(a7, argc, kPointerSizeLog2);
__ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison.
// Out of stack space.
@@ -626,7 +626,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
// Clobbers a2.
- Generate_CheckStackOverflow(masm, a3, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, a3);
// Remember new.target.
__ mov(a5, a0);
@@ -782,8 +782,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
- __ Lw(a5, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
+ __ Ld(a5, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
+ __ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
@@ -1475,19 +1476,6 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Jump(t0);
}
-void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve possible return result from lazy deopt.
- __ push(v0);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, false);
- __ pop(v0);
- }
-
- __ Jump(ra); // Jump to the ContinueToBuiltin stub
-}
-
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
@@ -1583,10 +1571,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ Lw(a1,
- UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
+ __ Lw(a1, UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -2347,8 +2334,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_constructor);
// Dispatch based on instance type.
- __ Ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ GetObjectType(a1, t1, t2);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
@@ -2574,10 +2560,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- const RegList gp_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit() |
- a4.bit() | a5.bit() | a6.bit() | a7.bit();
- const RegList fp_regs = f2.bit() | f4.bit() | f6.bit() | f8.bit() |
- f10.bit() | f12.bit() | f14.bit();
+ constexpr RegList gp_regs =
+ Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7>();
+ constexpr RegList fp_regs =
+ DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
diff --git a/deps/v8/src/builtins/ppc/OWNERS b/deps/v8/src/builtins/ppc/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/builtins/ppc/OWNERS
+++ b/deps/v8/src/builtins/ppc/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index c242be5cf8..e0db87cc0c 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -5,7 +5,7 @@
#if V8_TARGET_ARCH_PPC
#include "src/assembler-inl.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
@@ -54,10 +54,11 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// CEntryStub expects r3 to contain the number of arguments including the
// receiver and the extra arguments.
- const int num_extra_args = 3;
- __ addi(r3, r3, Operand(num_extra_args + 1));
+ __ addi(r3, r3,
+ Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
// Insert extra arguments.
+ __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ SmiTag(r3);
__ Push(r3, r4, r6);
__ SmiUntag(r3);
@@ -467,7 +468,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the value to pass to the generator
// -- r4 : the JSGeneratorObject to resume
- // -- r5 : the resume mode (tagged)
// -- lr : return address
// -----------------------------------
__ AssertGeneratorObject(r4);
@@ -478,9 +478,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- // Store resume mode into generator object.
- __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
-
// Load suspended function and context.
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
__ LoadP(cp, FieldMemOperand(r7, JSFunction::kContextOffset));
@@ -507,13 +504,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ blt(&stack_overflow);
+
// Push receiver.
__ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
__ Push(ip);
// ----------- S t a t e -------------
// -- r4 : the JSGeneratorObject to resume
- // -- r5 : the resume mode (tagged)
// -- r7 : generator function
// -- cp : generator context
// -- lr : return address
@@ -561,9 +563,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r5, r7);
+ __ Push(r4, r7);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(r4, r5);
+ __ Pop(r4);
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -571,12 +573,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_suspended_generator);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r5);
+ __ Push(r4);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(r4, r5);
+ __ Pop(r4);
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bkpt(0); // This should be unreachable.
+ }
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -585,11 +594,8 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
// Clobbers r5; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
- IsTagged argc_is_tagged) {
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -599,12 +605,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// here which will cause r5 to become negative.
__ sub(r5, sp, r5);
// Check if the arguments will overflow the stack.
- if (argc_is_tagged == kArgcIsSmiTagged) {
- __ SmiToPtrArrayOffset(r0, argc);
- } else {
- DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
- __ ShiftLeftImm(r0, argc, Operand(kPointerSizeLog2));
- }
+ __ ShiftLeftImm(r0, argc, Operand(kPointerSizeLog2));
__ cmp(r5, r0);
__ bgt(&okay); // Signed comparison.
@@ -642,7 +643,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
// Clobbers r5.
- Generate_CheckStackOverflow(masm, r6, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, r6);
// Copy arguments to the stack in a loop.
// r4: function
@@ -803,9 +804,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
+ __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
__ LoadWordArith(
scratch2,
- FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
+ FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
__ bne(&found_deoptimized_code, cr0);
@@ -1440,7 +1443,8 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
__ addi(target_builtin, target_builtin,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(target_builtin);
+ __ mr(ip, target_builtin);
+ __ Jump(ip);
}
__ bind(&deserialize_in_runtime);
@@ -1512,19 +1516,6 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
-void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Preserve possible return result from lazy deopt.
- __ push(r3);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, false);
- __ pop(r3);
- }
-
- __ blr(); // Jump to ContinueToBuiltin stub
-}
-
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
@@ -1634,9 +1625,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ LoadP(r4, FieldMemOperand(
- r4, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex)));
+ __ LoadP(r4,
+ FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
__ SmiUntag(r4);
// Compute the target address = code start + osr_offset
@@ -2579,10 +2570,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- const RegList gp_regs = r3.bit() | r4.bit() | r5.bit() | r6.bit() |
- r7.bit() | r8.bit() | r9.bit() | r10.bit();
- const RegList fp_regs = d1.bit() | d2.bit() | d3.bit() | d4.bit() |
- d5.bit() | d6.bit() | d7.bit() | d8.bit();
+ constexpr RegList gp_regs =
+ Register::ListOf<r3, r4, r5, r6, r7, r8, r9, r10>();
+ constexpr RegList fp_regs =
+ DoubleRegister::ListOf<d1, d2, d3, d4, d5, d6, d7, d8>();
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
diff --git a/deps/v8/src/builtins/s390/OWNERS b/deps/v8/src/builtins/s390/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/builtins/s390/OWNERS
+++ b/deps/v8/src/builtins/s390/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index aa9e62f217..42c478bd42 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -5,7 +5,7 @@
#if V8_TARGET_ARCH_S390
#include "src/assembler-inl.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
@@ -54,10 +54,11 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// CEntryStub expects r2 to contain the number of arguments including the
// receiver and the extra arguments.
- const int num_extra_args = 3;
- __ AddP(r2, r2, Operand(num_extra_args + 1));
+ __ AddP(r2, r2,
+ Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
// Insert extra arguments.
+ __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ SmiTag(r2);
__ Push(r2, r3, r5);
__ SmiUntag(r2);
@@ -458,7 +459,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the value to pass to the generator
// -- r3 : the JSGeneratorObject to resume
- // -- r4 : the resume mode (tagged)
// -- lr : return address
// -----------------------------------
__ AssertGeneratorObject(r3);
@@ -469,9 +469,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- // Store resume mode into generator object.
- __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
-
// Load suspended function and context.
__ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
__ LoadP(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
@@ -497,13 +494,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ blt(&stack_overflow);
+
// Push receiver.
__ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
__ Push(ip);
// ----------- S t a t e -------------
// -- r3 : the JSGeneratorObject to resume
- // -- r4 : the resume mode (tagged)
// -- r6 : generator function
// -- cp : generator context
// -- lr : return address
@@ -556,9 +558,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r4, r6);
+ __ Push(r3, r6);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(r3, r4);
+ __ Pop(r3);
__ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -566,12 +568,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_suspended_generator);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r4);
+ __ Push(r3);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(r3, r4);
+ __ Pop(r3);
__ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bkpt(0); // This should be unreachable.
+ }
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -580,11 +589,8 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
// Clobbers r4; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
- IsTagged argc_is_tagged) {
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -594,12 +600,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// here which will cause r4 to become negative.
__ SubP(r4, sp, r4);
// Check if the arguments will overflow the stack.
- if (argc_is_tagged == kArgcIsSmiTagged) {
- __ SmiToPtrArrayOffset(r0, argc);
- } else {
- DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
- __ ShiftLeftP(r0, argc, Operand(kPointerSizeLog2));
- }
+ __ ShiftLeftP(r0, argc, Operand(kPointerSizeLog2));
__ CmpP(r4, r0);
__ bgt(&okay); // Signed comparison.
@@ -638,7 +639,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
// Clobbers r4.
- Generate_CheckStackOverflow(masm, r5, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, r5);
// Copy arguments to the stack in a loop from argv to sp.
// The arguments are actually placed in reverse order on sp
@@ -806,8 +807,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
- __ LoadW(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
+ __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
+ __ LoadW(
+ scratch2,
+ FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
__ bne(&found_deoptimized_code);
@@ -1436,7 +1440,8 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
__ AddP(target_builtin, target_builtin,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(target_builtin);
+ __ LoadRR(ip, target_builtin);
+ __ Jump(ip);
}
__ bind(&deserialize_in_runtime);
@@ -1508,19 +1513,6 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
-void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Preserve possible return result from lazy deopt.
- __ push(r2);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, false);
- __ pop(r2);
- }
-
- __ Ret(); // Jump to ContinueToBuiltin stub
-}
-
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
@@ -1621,9 +1613,8 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ LoadP(
- r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex)));
+ __ LoadP(r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
__ SmiUntag(r3);
// Compute the target address = code_obj + header_size + osr_offset
@@ -2577,12 +2568,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- const RegList gp_regs =
- r2.bit() | r3.bit() | r4.bit() | r5.bit() | r6.bit();
+ constexpr RegList gp_regs = Register::ListOf<r2, r3, r4, r5, r6>();
#if V8_TARGET_ARCH_S390X
- const RegList fp_regs = d0.bit() | d2.bit() | d4.bit() | d6.bit();
+ constexpr RegList fp_regs = DoubleRegister::ListOf<d0, d2, d4, d6>();
#else
- const RegList fp_regs = d0.bit() | d2.bit();
+ constexpr RegList fp_regs = DoubleRegister::ListOf<d0, d2>();
#endif
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index b21e3f5b99..b9073e1f13 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -42,7 +42,7 @@ void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
typedef void (*CodeAssemblerGenerator)(compiler::CodeAssemblerState*);
-Handle<Code> BuildPlaceholder(Isolate* isolate) {
+Handle<Code> BuildPlaceholder(Isolate* isolate, int32_t builtin_index) {
HandleScope scope(isolate);
const size_t buffer_size = 1 * KB;
byte buffer[buffer_size]; // NOLINT(runtime/arrays)
@@ -54,12 +54,12 @@ Handle<Code> BuildPlaceholder(Isolate* isolate) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::BUILTIN, masm.CodeObject(), builtin_index);
return scope.CloseAndEscape(code);
}
-Code* BuildWithMacroAssembler(Isolate* isolate,
+Code* BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
MacroAssemblerGenerator generator,
const char* s_name) {
HandleScope scope(isolate);
@@ -73,13 +73,14 @@ Code* BuildWithMacroAssembler(Isolate* isolate,
generator(&masm);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::BUILTIN, masm.CodeObject(), builtin_index);
PostBuildProfileAndTracing(isolate, *code, s_name);
return *code;
}
-Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
+Code* BuildAdaptor(Isolate* isolate, int32_t builtin_index,
+ Address builtin_address,
Builtins::ExitFrameType exit_frame_type, const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
@@ -92,14 +93,14 @@ Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
Builtins::Generate_Adaptor(&masm, builtin_address, exit_frame_type);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::BUILTIN, masm.CodeObject(), builtin_index);
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
// Builder for builtins implemented in TurboFan with JS linkage.
-Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
+Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
CodeAssemblerGenerator generator, int argc,
const char* name) {
HandleScope scope(isolate);
@@ -110,7 +111,7 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
- Code::BUILTIN, name);
+ Code::BUILTIN, name, builtin_index);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -118,7 +119,7 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
}
// Builder for builtins implemented in TurboFan with CallStub linkage.
-Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
+Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
CodeAssemblerGenerator generator,
CallDescriptors::Key interface_descriptor,
const char* name, int result_size) {
@@ -133,7 +134,7 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
compiler::CodeAssemblerState state(isolate, &zone, descriptor, Code::BUILTIN,
- name, result_size);
+ name, result_size, 0, builtin_index);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -143,8 +144,8 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, int index,
Code* code) {
+ DCHECK_EQ(index, code->builtin_index());
builtins->builtins_[index] = code;
- code->set_builtin_index(index);
}
void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) {
@@ -153,10 +154,9 @@ void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) {
// support circular references between builtins.
Builtins* builtins = isolate->builtins();
HandleScope scope(isolate);
- Handle<Code> placeholder = BuildPlaceholder(isolate);
- AddBuiltin(builtins, 0, *placeholder);
- for (int i = 1; i < Builtins::builtin_count; i++) {
- AddBuiltin(builtins, i, *isolate->factory()->CopyCode(placeholder));
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Handle<Code> placeholder = BuildPlaceholder(isolate, i);
+ AddBuiltin(builtins, i, *placeholder);
}
}
@@ -164,6 +164,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
// Replace references from all code objects to placeholders.
Builtins* builtins = isolate->builtins();
DisallowHeapAllocation no_gc;
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
static const int kRelocMask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
HeapIterator iterator(isolate->heap());
@@ -211,38 +212,40 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
int index = 0;
Code* code;
-#define BUILD_CPP(Name) \
- code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), \
- Builtins::BUILTIN_EXIT, #Name); \
+#define BUILD_CPP(Name) \
+ code = BuildAdaptor(isolate, index, FUNCTION_ADDR(Builtin_##Name), \
+ Builtins::BUILTIN_EXIT, #Name); \
AddBuiltin(builtins, index++, code);
-#define BUILD_API(Name) \
- code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), Builtins::EXIT, \
- #Name); \
+#define BUILD_API(Name) \
+ code = BuildAdaptor(isolate, index, FUNCTION_ADDR(Builtin_##Name), \
+ Builtins::EXIT, #Name); \
AddBuiltin(builtins, index++, code);
-#define BUILD_TFJ(Name, Argc, ...) \
- code = BuildWithCodeStubAssemblerJS(isolate, &Builtins::Generate_##Name, \
- Argc, #Name); \
+#define BUILD_TFJ(Name, Argc, ...) \
+ code = BuildWithCodeStubAssemblerJS( \
+ isolate, index, &Builtins::Generate_##Name, Argc, #Name); \
AddBuiltin(builtins, index++, code);
-#define BUILD_TFC(Name, InterfaceDescriptor, result_size) \
- { InterfaceDescriptor##Descriptor descriptor(isolate); } \
- code = BuildWithCodeStubAssemblerCS(isolate, &Builtins::Generate_##Name, \
- CallDescriptors::InterfaceDescriptor, \
- #Name, result_size); \
+#define BUILD_TFC(Name, InterfaceDescriptor, result_size) \
+ { InterfaceDescriptor##Descriptor descriptor(isolate); } \
+ code = BuildWithCodeStubAssemblerCS( \
+ isolate, index, &Builtins::Generate_##Name, \
+ CallDescriptors::InterfaceDescriptor, #Name, result_size); \
AddBuiltin(builtins, index++, code);
-#define BUILD_TFS(Name, ...) \
- /* Return size for generic TF builtins (stub linkage) is always 1. */ \
- code = BuildWithCodeStubAssemblerCS(isolate, &Builtins::Generate_##Name, \
- CallDescriptors::Name, #Name, 1); \
+#define BUILD_TFS(Name, ...) \
+ /* Return size for generic TF builtins (stub linkage) is always 1. */ \
+ code = \
+ BuildWithCodeStubAssemblerCS(isolate, index, &Builtins::Generate_##Name, \
+ CallDescriptors::Name, #Name, 1); \
AddBuiltin(builtins, index++, code);
-#define BUILD_TFH(Name, InterfaceDescriptor) \
- { InterfaceDescriptor##Descriptor descriptor(isolate); } \
- /* Return size for IC builtins/handlers is always 1. */ \
- code = BuildWithCodeStubAssemblerCS(isolate, &Builtins::Generate_##Name, \
- CallDescriptors::InterfaceDescriptor, \
- #Name, 1); \
+#define BUILD_TFH(Name, InterfaceDescriptor) \
+ { InterfaceDescriptor##Descriptor descriptor(isolate); } \
+ /* Return size for IC builtins/handlers is always 1. */ \
+ code = BuildWithCodeStubAssemblerCS( \
+ isolate, index, &Builtins::Generate_##Name, \
+ CallDescriptors::InterfaceDescriptor, #Name, 1); \
AddBuiltin(builtins, index++, code);
-#define BUILD_ASM(Name) \
- code = BuildWithMacroAssembler(isolate, Builtins::Generate_##Name, #Name); \
+#define BUILD_ASM(Name) \
+ code = BuildWithMacroAssembler(isolate, index, Builtins::Generate_##Name, \
+ #Name); \
AddBuiltin(builtins, index++, code);
BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH,
@@ -273,6 +276,10 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(SET_EXCEPTION_CAUGHT_PREDICTION)
#undef SET_EXCEPTION_CAUGHT_PREDICTION
+ // TODO(mstarzinger,6792): This code-space modification section should be
+ // moved into {Heap} eventually and a safe wrapper be provided.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+
#define SET_CODE_NON_TAGGED_PARAMS(Name) \
Code::cast(builtins->builtins_[Builtins::k##Name]) \
->set_has_tagged_params(false);
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 81c92681d5..f2820fa410 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -5,7 +5,6 @@
#if V8_TARGET_ARCH_X64
#include "src/code-factory.h"
-#include "src/codegen.h"
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
@@ -59,13 +58,13 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// CEntryStub expects rax to contain the number of arguments including the
// receiver and the extra arguments.
- const int num_extra_args = 3;
- __ addp(rax, Immediate(num_extra_args + 1));
+ __ addp(rax, Immediate(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
// Unconditionally insert argc, target and new target as extra arguments. They
// will be used by stack frame iterators when constructing the stack trace.
__ PopReturnAddressTo(kScratchRegister);
__ Integer32ToSmi(rax, rax);
+ __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ Push(rax);
__ SmiToInteger32(rax, rax);
__ Push(rdi);
@@ -401,39 +400,23 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-enum IsTagged { kRaxIsSmiTagged, kRaxIsUntaggedInt };
-
-// Clobbers rcx, r11, kScratchRegister; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- IsTagged rax_is_tagged) {
- // rax : the number of items to be pushed to the stack
- //
+static void Generate_StackOverflowCheck(
+ MacroAssembler* masm, Register num_args, Register scratch,
+ Label* stack_overflow,
+ Label::Distance stack_overflow_distance = Label::kFar) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- Label okay;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subp(rcx, kScratchRegister);
- // Make r11 the space we need for the array when it is unrolled onto the
- // stack.
- if (rax_is_tagged == kRaxIsSmiTagged) {
- __ PositiveSmiTimesPowerOfTwoToInteger64(r11, rax, kPointerSizeLog2);
- } else {
- DCHECK(rax_is_tagged == kRaxIsUntaggedInt);
- __ movp(r11, rax);
- __ shlq(r11, Immediate(kPointerSizeLog2));
- }
+ __ movp(scratch, rsp);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ subp(scratch, kScratchRegister);
+ __ sarp(scratch, Immediate(kPointerSizeLog2));
// Check if the arguments will overflow the stack.
- __ cmpp(rcx, r11);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow);
-
- __ bind(&okay);
+ __ cmpp(scratch, num_args);
+ // Signed comparison.
+ __ j(less_equal, stack_overflow, stack_overflow_distance);
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
@@ -533,8 +516,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// rdx : new.target
// Check if we have enough stack space to push all arguments.
- // Expects argument count in rax. Clobbers rcx, r11.
- Generate_CheckStackOverflow(masm, kRaxIsUntaggedInt);
+ // Argument count in rax. Clobbers rcx.
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
+ __ jmp(&enough_stack_space);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
+
+ __ bind(&enough_stack_space);
// Copy arguments to the stack in a loop.
// Register rbx points to array of pointers to handle locations.
@@ -576,22 +568,18 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the value to pass to the generator
- // -- rbx : the JSGeneratorObject to resume
- // -- rdx : the resume mode (tagged)
+ // -- rdx : the JSGeneratorObject to resume
// -- rsp[0] : return address
// -----------------------------------
- __ AssertGeneratorObject(rbx);
+ __ AssertGeneratorObject(rdx);
// Store input value into generator object.
- __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
- __ RecordWriteField(rbx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
+ __ movp(FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
+ __ RecordWriteField(rdx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
kDontSaveFPRegs);
- // Store resume mode into generator object.
- __ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
-
// Load suspended function and context.
- __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Flood function if we are stepping.
@@ -608,20 +596,25 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference::debug_suspended_generator_address(masm->isolate());
Operand debug_suspended_generator_operand =
masm->ExternalOperand(debug_suspended_generator);
- __ cmpp(rbx, debug_suspended_generator_operand);
+ __ cmpp(rdx, debug_suspended_generator_operand);
__ j(equal, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ CompareRoot(rsp, Heap::kRealStackLimitRootIndex);
+ __ j(below, &stack_overflow);
+
// Pop return address.
__ PopReturnAddressTo(rax);
// Push receiver.
- __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+ __ Push(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset));
// ----------- S t a t e -------------
// -- rax : return address
- // -- rbx : the JSGeneratorObject to resume
- // -- rdx : the resume mode (tagged)
+ // -- rdx : the JSGeneratorObject to resume
// -- rdi : generator function
// -- rsi : generator context
// -- rsp[0] : generator receiver
@@ -661,7 +654,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
- __ movp(rdx, rbx);
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
@@ -670,27 +662,30 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rbx);
__ Push(rdx);
__ Push(rdi);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
- __ Pop(rbx);
- __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
__ bind(&prepare_step_in_suspended_generator);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rbx);
__ Push(rdx);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(rdx);
- __ Pop(rbx);
- __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ int3(); // This should be unreachable.
+ }
}
// TODO(juliana): if we remove the code below then we don't need all
@@ -803,8 +798,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
+ __ movp(scratch2,
+ FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ testl(
- FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
+ FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
@@ -1042,25 +1039,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&bytecode_array_loaded);
}
-static void Generate_StackOverflowCheck(
- MacroAssembler* masm, Register num_args, Register scratch,
- Label* stack_overflow,
- Label::Distance stack_overflow_distance = Label::kFar) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(scratch, rsp);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ subp(scratch, kScratchRegister);
- __ sarp(scratch, Immediate(kPointerSizeLog2));
- // Check if the arguments will overflow the stack.
- __ cmpp(scratch, num_args);
- // Signed comparison.
- __ j(less_equal, stack_overflow, stack_overflow_distance);
-}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register num_args,
Register start_address,
@@ -1522,20 +1500,6 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ jmp(rcx);
}
-void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve possible return result from lazy deopt.
- __ pushq(rax);
- __ CallRuntime(Runtime::kNotifyStubFailure, false);
- __ popq(rax);
- // Tear down internal frame.
- }
-
- __ ret(0); // Return to ContinueToBuiltin stub still on stack.
-}
-
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
@@ -1839,7 +1803,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
__ Check(not_smi, kUnexpectedInitialMapForInternalArrayFunction);
@@ -1868,7 +1832,7 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
__ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
@@ -2620,10 +2584,10 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
- __ SmiToInteger32(
- rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
+ __ SmiToInteger32(rbx,
+ Operand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
__ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index 52fff7e145..b160c11bed 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -146,7 +146,7 @@ void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
DiyFp* power,
int* found_exponent) {
- DCHECK(kMinDecimalExponent <= requested_exponent);
+ DCHECK_LE(kMinDecimalExponent, requested_exponent);
DCHECK(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
int index =
(requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index 122d907881..4199ec3bbe 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -64,8 +64,6 @@ class CodeEventListener {
virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source,
int line, int column) = 0;
- virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- int args_count) = 0;
virtual void CallbackEvent(Name* name, Address entry_point) = 0;
virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
@@ -116,10 +114,6 @@ class CodeEventDispatcher {
CODE_EVENT_DISPATCH(
CodeCreateEvent(tag, code, shared, source, line, column));
}
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- int args_count) {
- CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, args_count));
- }
void CallbackEvent(Name* name, Address entry_point) {
CODE_EVENT_DISPATCH(CallbackEvent(name, entry_point));
}
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index dfb6bda9e1..245f2334f6 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -46,6 +46,12 @@ Callable CodeFactory::ApiGetter(Isolate* isolate) {
}
// static
+Callable CodeFactory::CallApiCallback(Isolate* isolate, int argc) {
+ CallApiCallbackStub stub(isolate, argc);
+ return make_callable(stub);
+}
+
+// static
Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
return Callable(
typeof_mode == NOT_INSIDE_TYPEOF
@@ -94,29 +100,29 @@ Callable CodeFactory::StoreGlobalICInOptimizedCode(Isolate* isolate,
}
// static
-Callable CodeFactory::BinaryOperation(Isolate* isolate, Token::Value op) {
+Callable CodeFactory::BinaryOperation(Isolate* isolate, Operation op) {
switch (op) {
- case Token::SAR:
+ case Operation::kShiftRight:
return Builtins::CallableFor(isolate, Builtins::kShiftRight);
- case Token::SHL:
+ case Operation::kShiftLeft:
return Builtins::CallableFor(isolate, Builtins::kShiftLeft);
- case Token::SHR:
+ case Operation::kShiftRightLogical:
return Builtins::CallableFor(isolate, Builtins::kShiftRightLogical);
- case Token::ADD:
+ case Operation::kAdd:
return Builtins::CallableFor(isolate, Builtins::kAdd);
- case Token::SUB:
+ case Operation::kSubtract:
return Builtins::CallableFor(isolate, Builtins::kSubtract);
- case Token::MUL:
+ case Operation::kMultiply:
return Builtins::CallableFor(isolate, Builtins::kMultiply);
- case Token::DIV:
+ case Operation::kDivide:
return Builtins::CallableFor(isolate, Builtins::kDivide);
- case Token::MOD:
+ case Operation::kModulus:
return Builtins::CallableFor(isolate, Builtins::kModulus);
- case Token::BIT_OR:
+ case Operation::kBitwiseOr:
return Builtins::CallableFor(isolate, Builtins::kBitwiseOr);
- case Token::BIT_AND:
+ case Operation::kBitwiseAnd:
return Builtins::CallableFor(isolate, Builtins::kBitwiseAnd);
- case Token::BIT_XOR:
+ case Operation::kBitwiseXor:
return Builtins::CallableFor(isolate, Builtins::kBitwiseXor);
default:
break;
@@ -145,12 +151,6 @@ Callable CodeFactory::OrdinaryToPrimitive(Isolate* isolate,
}
// static
-Callable CodeFactory::NumberToString(Isolate* isolate) {
- NumberToStringStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag) {
StringAddStub stub(isolate, flags, pretenure_flag);
@@ -158,33 +158,6 @@ Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
}
// static
-Callable CodeFactory::StringCompare(Isolate* isolate, Token::Value token) {
- switch (token) {
- case Token::EQ:
- case Token::EQ_STRICT:
- return Builtins::CallableFor(isolate, Builtins::kStringEqual);
- case Token::LT:
- return Builtins::CallableFor(isolate, Builtins::kStringLessThan);
- case Token::GT:
- return Builtins::CallableFor(isolate, Builtins::kStringGreaterThan);
- case Token::LTE:
- return Builtins::CallableFor(isolate, Builtins::kStringLessThanOrEqual);
- case Token::GTE:
- return Builtins::CallableFor(isolate,
- Builtins::kStringGreaterThanOrEqual);
- default:
- break;
- }
- UNREACHABLE();
-}
-
-// static
-Callable CodeFactory::SubString(Isolate* isolate) {
- SubStringStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
-}
-
-// static
Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ResumeGeneratorTrampoline),
ResumeGeneratorDescriptor(isolate));
@@ -341,6 +314,18 @@ Callable CodeFactory::ArrayShift(Isolate* isolate) {
}
// static
+Callable CodeFactory::ExtractFastJSArray(Isolate* isolate) {
+ return Callable(BUILTIN_CODE(isolate, ExtractFastJSArray),
+ ExtractFastJSArrayDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::CloneFastJSArray(Isolate* isolate) {
+ return Callable(BUILTIN_CODE(isolate, CloneFastJSArray),
+ CloneFastJSArrayDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::ArrayPush(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ArrayPush), BuiltinDescriptor(isolate));
}
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 1719cb549f..d85ca5f073 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -8,10 +8,9 @@
#include "src/allocation.h"
#include "src/assembler.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/globals.h"
#include "src/interface-descriptors.h"
-#include "src/parsing/token.h"
namespace v8 {
namespace internal {
@@ -40,9 +39,10 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable FrameDropperTrampoline(Isolate* isolate);
static Callable HandleDebuggerStatement(Isolate* isolate);
- static Callable BinaryOperation(Isolate* isolate, Token::Value op);
+ static Callable BinaryOperation(Isolate* isolate, Operation op);
static Callable ApiGetter(Isolate* isolate);
+ static Callable CallApiCallback(Isolate* isolate, int argc);
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
@@ -52,13 +52,10 @@ class V8_EXPORT_PRIVATE CodeFactory final {
Isolate* isolate, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
static Callable OrdinaryToPrimitive(Isolate* isolate,
OrdinaryToPrimitiveHint hint);
- static Callable NumberToString(Isolate* isolate);
static Callable StringAdd(Isolate* isolate,
StringAddFlags flags = STRING_ADD_CHECK_NONE,
PretenureFlag pretenure_flag = NOT_TENURED);
- static Callable StringCompare(Isolate* isolate, Token::Value token);
- static Callable SubString(Isolate* isolate);
static Callable FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type);
@@ -92,6 +89,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ArrayPop(Isolate* isolate);
static Callable ArrayPush(Isolate* isolate);
static Callable ArrayShift(Isolate* isolate);
+ static Callable ExtractFastJSArray(Isolate* isolate);
+ static Callable CloneFastJSArray(Isolate* isolate);
static Callable FunctionPrototypeBind(Isolate* isolate);
static Callable TransitionElementsKind(Isolate* isolate, ElementsKind from,
ElementsKind to, bool is_jsarray);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 0c64d011d4..e36a5cc796 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -10,8 +10,8 @@ namespace v8 {
namespace internal {
using compiler::Node;
-template <class A>
-using TNode = compiler::TNode<A>;
+template <class T>
+using TNode = compiler::TNode<T>;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state) {
@@ -217,27 +217,39 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
}
}
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test) {
+bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
+ ParameterMode mode) {
int32_t constant_test;
Smi* smi_test;
- if ((ToInt32Constant(test, constant_test) && constant_test == 0) ||
- (ToSmiConstant(test, smi_test) && smi_test->value() == 0)) {
- return true;
+ if (mode == INTPTR_PARAMETERS) {
+ if (ToInt32Constant(test, constant_test) && constant_test == 0) {
+ return true;
+ }
+ } else {
+ DCHECK_EQ(mode, SMI_PARAMETERS);
+ if (ToSmiConstant(test, smi_test) && smi_test->value() == 0) {
+ return true;
+ }
}
return false;
}
bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
- int* value) {
+ int* value,
+ ParameterMode mode) {
int32_t int32_constant;
- if (ToInt32Constant(maybe_constant, int32_constant)) {
- *value = int32_constant;
- return true;
- }
- Smi* smi_constant;
- if (ToSmiConstant(maybe_constant, smi_constant)) {
- *value = Smi::ToInt(smi_constant);
- return true;
+ if (mode == INTPTR_PARAMETERS) {
+ if (ToInt32Constant(maybe_constant, int32_constant)) {
+ *value = int32_constant;
+ return true;
+ }
+ } else {
+ DCHECK_EQ(mode, SMI_PARAMETERS);
+ Smi* smi_constant;
+ if (ToSmiConstant(maybe_constant, smi_constant)) {
+ *value = Smi::ToInt(smi_constant);
+ return true;
+ }
}
return false;
}
@@ -534,8 +546,8 @@ TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
- GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
- GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
+ GotoIfNumericGreaterThanOrEqual(a, b, &greater_than_equal_a);
+ GotoIfNumericGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
@@ -553,8 +565,8 @@ TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
- GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
- GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
+ GotoIfNumericGreaterThanOrEqual(a, b, &greater_than_equal_a);
+ GotoIfNumericGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
@@ -741,9 +753,9 @@ Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
TNode<Int32T> CodeStubAssembler::TruncateWordToWord32(
SloppyTNode<IntPtrT> value) {
if (Is64()) {
- return TruncateInt64ToInt32(UncheckedCast<Int64T>(value));
+ return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value));
}
- return UncheckedCast<Int32T>(value);
+ return ReinterpretCast<Int32T>(value);
}
TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<Object> a) {
@@ -791,20 +803,37 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
{
Node* map = var_map.value();
Node* prototype = LoadMapPrototype(map);
- GotoIf(WordEqual(prototype, NullConstant()), definitely_no_elements);
+ GotoIf(IsNull(prototype), definitely_no_elements);
Node* prototype_map = LoadMap(prototype);
+ Node* prototype_instance_type = LoadMapInstanceType(prototype_map);
+
// Pessimistically assume elements if a Proxy, Special API Object,
// or JSValue wrapper is found on the prototype chain. After this
// instance type check, it's not necessary to check for interceptors or
// access checks.
- GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(prototype_map),
+ Label if_custom(this, Label::kDeferred), if_notcustom(this);
+ Branch(Int32LessThanOrEqual(prototype_instance_type,
Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
- possibly_elements);
- Node* prototype_elements = LoadElements(prototype);
- var_map.Bind(prototype_map);
- GotoIf(WordEqual(prototype_elements, empty_fixed_array), &loop_body);
- Branch(WordEqual(prototype_elements, empty_slow_element_dictionary),
- &loop_body, possibly_elements);
+ &if_custom, &if_notcustom);
+
+ BIND(&if_custom);
+ {
+ // For string JSValue wrappers we still support the checks as long
+ // as they wrap the empty string.
+ GotoIfNot(InstanceTypeEqual(prototype_instance_type, JS_VALUE_TYPE),
+ possibly_elements);
+ Node* prototype_value = LoadJSValueValue(prototype);
+ Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements);
+ }
+
+ BIND(&if_notcustom);
+ {
+ Node* prototype_elements = LoadElements(prototype);
+ var_map.Bind(prototype_map);
+ GotoIf(WordEqual(prototype_elements, empty_fixed_array), &loop_body);
+ Branch(WordEqual(prototype_elements, empty_slow_element_dictionary),
+ &loop_body, possibly_elements);
+ }
}
}
@@ -812,22 +841,39 @@ void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
- Int32Constant(FIRST_JS_RECEIVER_TYPE)),
- if_true, if_false);
+ Branch(IsJSReceiver(object), if_true, if_false);
}
void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true,
Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
- Int32Constant(FIRST_JS_OBJECT_TYPE)),
- if_true, if_false);
+ Branch(IsJSObject(object), if_true, if_false);
+}
+
+TNode<BoolT> CodeStubAssembler::IsFastJSArray(SloppyTNode<Object> object,
+ SloppyTNode<Context> context) {
+ Label if_true(this), if_false(this, Label::kDeferred), exit(this);
+ BranchIfFastJSArray(object, context, &if_true, &if_false);
+ TVARIABLE(BoolT, var_result);
+ BIND(&if_true);
+ {
+ var_result = ReinterpretCast<BoolT>(Int32Constant(1));
+ Goto(&exit);
+ }
+ BIND(&if_false);
+ {
+ var_result = ReinterpretCast<BoolT>(Int32Constant(0));
+ Goto(&exit);
+ }
+ BIND(&exit);
+ return var_result;
}
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
Label* if_true, Label* if_false) {
+ GotoIfForceSlowPath(if_false);
+
// Bailout if receiver is a Smi.
GotoIf(TaggedIsSmi(object), if_false);
@@ -841,7 +887,7 @@ void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
// Check prototype chain if receiver does not have packed elements
GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), if_false);
- Branch(IsArrayProtectorCellInvalid(), if_false, if_true);
+ Branch(IsNoElementsProtectorCellInvalid(), if_false, if_true);
}
void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
@@ -851,6 +897,16 @@ void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
BranchIfFastJSArray(object, context, if_true, if_false);
}
+void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
+#if defined(DEBUG) || defined(ENABLE_FASTSLOW_SWITCH)
+ Node* const force_slow_path_addr =
+ ExternalConstant(ExternalReference::force_slow_path(isolate()));
+ Node* const force_slow = Load(MachineType::Uint8(), force_slow_path_addr);
+
+ GotoIf(force_slow, if_true);
+#endif
+}
+
Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
@@ -954,7 +1010,7 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
- DCHECK((flags & kDoubleAlignment) == 0);
+ DCHECK_EQ(flags & kDoubleAlignment, 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
@@ -1038,7 +1094,7 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred),
if_bigint(this, Label::kDeferred);
// Rule out false {value}.
- GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
+ GotoIf(WordEqual(value, FalseConstant()), if_false);
// Check if {value} is a Smi or a HeapObject.
Branch(TaggedIsSmi(value), &if_smi, &if_notsmi);
@@ -1082,7 +1138,7 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Node* result =
CallRuntime(Runtime::kBigIntToBoolean, NoContextConstant(), value);
CSA_ASSERT(this, IsBoolean(result));
- Branch(WordEqual(result, BooleanConstant(true)), if_true, if_false);
+ Branch(WordEqual(result, TrueConstant()), if_true, if_false);
}
}
}
@@ -1204,7 +1260,7 @@ TNode<Int32T> CodeStubAssembler::LoadInstanceType(
Node* CodeStubAssembler::HasInstanceType(Node* object,
InstanceType instance_type) {
- return Word32Equal(LoadInstanceType(object), Int32Constant(instance_type));
+ return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object,
@@ -1221,63 +1277,6 @@ Node* CodeStubAssembler::TaggedDoesntHaveInstanceType(Node* any_tagged,
MachineRepresentation::kBit);
}
-TNode<Int32T> CodeStubAssembler::LoadHashForJSObject(
- SloppyTNode<JSObject> jsobject, SloppyTNode<Int32T> instance_type) {
- VARIABLE(var_hash, MachineRepresentation::kWord32);
- Label if_global_proxy(this, Label::kDeferred);
- GotoIf(IsJSGlobalProxyInstanceType(instance_type), &if_global_proxy);
-
- Node* properties_or_hash =
- LoadObjectField(jsobject, JSObject::kPropertiesOrHashOffset);
-
- Label if_smi(this);
- GotoIf(TaggedIsSmi(properties_or_hash), &if_smi);
-
- Node* type = LoadInstanceType(properties_or_hash);
- Label if_property_array(this), if_property_dictionary(this), done(this);
- GotoIf(Word32Equal(type, Int32Constant(PROPERTY_ARRAY_TYPE)),
- &if_property_array);
- GotoIf(Word32Equal(type, Int32Constant(HASH_TABLE_TYPE)),
- &if_property_dictionary);
-
- var_hash.Bind(Int32Constant(PropertyArray::kNoHashSentinel));
- Goto(&done);
-
- BIND(&if_smi);
- {
- var_hash.Bind(SmiToWord32(properties_or_hash));
- Goto(&done);
- }
-
- BIND(&if_property_array);
- {
- Node* length_and_hash_int32 = LoadAndUntagToWord32ObjectField(
- properties_or_hash, PropertyArray::kLengthAndHashOffset);
- var_hash.Bind(
- DecodeWord32<PropertyArray::HashField>(length_and_hash_int32));
- Goto(&done);
- }
-
- BIND(&if_property_dictionary);
- {
- var_hash.Bind(SmiToWord32(LoadFixedArrayElement(
- properties_or_hash, NameDictionary::kObjectHashIndex)));
- Goto(&done);
- }
-
- BIND(&if_global_proxy);
- {
- Node* hash = LoadObjectField(jsobject, JSGlobalProxy::kHashOffset);
- var_hash.Bind(SelectConstant(TaggedIsSmi(hash), SmiToWord32(hash),
- Int32Constant(PropertyArray::kNoHashSentinel),
- MachineRepresentation::kWord32));
- Goto(&done);
- }
-
- BIND(&done);
- return UncheckedCast<Int32T>(var_hash.value());
-}
-
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, Word32Not(IsDictionaryMap(LoadMap(object))));
@@ -1343,7 +1342,7 @@ TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) {
TNode<Int32T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
return UncheckedCast<Int32T>(
- LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint8()));
+ LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16()));
}
TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(SloppyTNode<Map> map) {
@@ -1375,22 +1374,20 @@ TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
return CAST(prototype_info);
}
-TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSize(SloppyTNode<Map> map) {
+TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(
+ SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- return ChangeInt32ToIntPtr(
- LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8()));
+ return ChangeInt32ToIntPtr(LoadObjectField(
+ map, Map::kInstanceSizeInWordsOffset, MachineType::Uint8()));
}
-TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectProperties(
+TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- // See Map::GetInObjectProperties() for details.
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- CSA_ASSERT(this,
- Int32GreaterThanOrEqual(LoadMapInstanceType(map),
- Int32Constant(FIRST_JS_OBJECT_TYPE)));
+ // See Map::GetInObjectPropertiesStartInWords() for details.
+ CSA_ASSERT(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField(
- map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
@@ -1398,11 +1395,9 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetConstructorFunctionIndex() for details.
- STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
- CSA_ASSERT(this, Int32LessThanOrEqual(LoadMapInstanceType(map),
- Int32Constant(LAST_PRIMITIVE_TYPE)));
+ CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField(
- map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
@@ -1417,7 +1412,7 @@ TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
{
GotoIf(TaggedIsSmi(result), &done);
Node* is_map_type =
- Word32Equal(LoadInstanceType(CAST(result)), Int32Constant(MAP_TYPE));
+ InstanceTypeEqual(LoadInstanceType(CAST(result)), MAP_TYPE);
GotoIfNot(is_map_type, &done);
result =
LoadObjectField(CAST(result), Map::kConstructorOrBackPointerOffset);
@@ -1433,23 +1428,95 @@ Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
return DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
}
-Node* CodeStubAssembler::LoadNameHashField(Node* name) {
+Node* CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
+ Node* object = LoadObjectField(map, Map::kConstructorOrBackPointerOffset);
+ return Select(IsMap(object), [=] { return object; },
+ [=] { return UndefinedConstant(); },
+ MachineRepresentation::kTagged);
+}
+
+TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
+ SloppyTNode<Object> receiver, Label* if_no_hash) {
+ TVARIABLE(IntPtrT, var_hash);
+ Label done(this), if_smi(this), if_property_array(this),
+ if_property_dictionary(this), if_fixed_array(this);
+
+ TNode<Object> properties_or_hash =
+ LoadObjectField(TNode<HeapObject>::UncheckedCast(receiver),
+ JSReceiver::kPropertiesOrHashOffset);
+ GotoIf(TaggedIsSmi(properties_or_hash), &if_smi);
+
+ TNode<HeapObject> properties =
+ TNode<HeapObject>::UncheckedCast(properties_or_hash);
+ TNode<Int32T> properties_instance_type = LoadInstanceType(properties);
+
+ GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE),
+ &if_property_array);
+ Branch(InstanceTypeEqual(properties_instance_type, HASH_TABLE_TYPE),
+ &if_property_dictionary, &if_fixed_array);
+
+ BIND(&if_fixed_array);
+ {
+ var_hash = IntPtrConstant(PropertyArray::kNoHashSentinel);
+ Goto(&done);
+ }
+
+ BIND(&if_smi);
+ {
+ var_hash = SmiUntag(TNode<Smi>::UncheckedCast(properties_or_hash));
+ Goto(&done);
+ }
+
+ BIND(&if_property_array);
+ {
+ TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField(
+ properties, PropertyArray::kLengthAndHashOffset);
+ var_hash = TNode<IntPtrT>::UncheckedCast(
+ DecodeWord<PropertyArray::HashField>(length_and_hash));
+ Goto(&done);
+ }
+
+ BIND(&if_property_dictionary);
+ {
+ var_hash = SmiUntag(
+ LoadFixedArrayElement(properties, NameDictionary::kObjectHashIndex));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ if (if_no_hash != nullptr) {
+ GotoIf(
+ IntPtrEqual(var_hash, IntPtrConstant(PropertyArray::kNoHashSentinel)),
+ if_no_hash);
+ }
+ return var_hash;
+}
+
+TNode<Uint32T> CodeStubAssembler::LoadNameHashField(SloppyTNode<Name> name) {
CSA_ASSERT(this, IsName(name));
- return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
+ return LoadObjectField<Uint32T>(name, Name::kHashFieldOffset);
}
-Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
- Node* hash_field = LoadNameHashField(name);
+TNode<Uint32T> CodeStubAssembler::LoadNameHash(SloppyTNode<Name> name,
+ Label* if_hash_not_computed) {
+ TNode<Uint32T> hash_field = LoadNameHashField(name);
if (if_hash_not_computed != nullptr) {
GotoIf(IsSetWord32(hash_field, Name::kHashNotComputedMask),
if_hash_not_computed);
}
- return Word32Shr(hash_field, Int32Constant(Name::kHashShift));
+ return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift)));
+}
+
+TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(
+ SloppyTNode<String> object) {
+ return SmiUntag(LoadStringLengthAsSmi(object));
}
-Node* CodeStubAssembler::LoadStringLength(Node* object) {
+TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(
+ SloppyTNode<String> object) {
CSA_ASSERT(this, IsString(object));
- return LoadObjectField(object, String::kLengthOffset);
+ return CAST(LoadObjectField(object, String::kLengthOffset,
+ MachineType::TaggedPointer()));
}
Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
@@ -1573,7 +1640,7 @@ Node* CodeStubAssembler::LoadFeedbackVectorSlot(Node* object,
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
- CSA_SLOW_ASSERT(this, Word32Or(IsFixedArray(object), IsHashTable(object)));
+ CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
@@ -1629,63 +1696,98 @@ Node* CodeStubAssembler::LoadDoubleWithHoleCheck(Node* base, Node* offset,
return Load(machine_type, base, offset);
}
-Node* CodeStubAssembler::LoadContextElement(Node* context, int slot_index) {
+TNode<Object> CodeStubAssembler::LoadContextElement(
+ SloppyTNode<Context> context, int slot_index) {
int offset = Context::SlotOffset(slot_index);
- return Load(MachineType::AnyTagged(), context, IntPtrConstant(offset));
+ return UncheckedCast<Object>(
+ Load(MachineType::AnyTagged(), context, IntPtrConstant(offset)));
}
-Node* CodeStubAssembler::LoadContextElement(Node* context, Node* slot_index) {
+TNode<Object> CodeStubAssembler::LoadContextElement(
+ SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
Node* offset =
IntPtrAdd(TimesPointerSize(slot_index),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- return Load(MachineType::AnyTagged(), context, offset);
+ return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
-Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index,
- Node* value) {
+void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
+ int slot_index,
+ SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
- return Store(context, IntPtrConstant(offset), value);
+ Store(context, IntPtrConstant(offset), value);
}
-Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
- Node* value) {
+void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
+ SloppyTNode<IntPtrT> slot_index,
+ SloppyTNode<Object> value) {
Node* offset =
IntPtrAdd(TimesPointerSize(slot_index),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- return Store(context, offset, value);
+ Store(context, offset, value);
}
-Node* CodeStubAssembler::StoreContextElementNoWriteBarrier(Node* context,
- int slot_index,
- Node* value) {
+void CodeStubAssembler::StoreContextElementNoWriteBarrier(
+ SloppyTNode<Context> context, int slot_index, SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
- return StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
- IntPtrConstant(offset), value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
+ IntPtrConstant(offset), value);
}
-Node* CodeStubAssembler::LoadNativeContext(Node* context) {
- return LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX);
+TNode<Context> CodeStubAssembler::LoadNativeContext(
+ SloppyTNode<Context> context) {
+ return UncheckedCast<Context>(
+ LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX));
}
-Node* CodeStubAssembler::LoadJSArrayElementsMap(Node* kind,
- Node* native_context) {
+TNode<Context> CodeStubAssembler::LoadModuleContext(
+ SloppyTNode<Context> context) {
+ Node* module_map = LoadRoot(Heap::kModuleContextMapRootIndex);
+ Variable cur_context(this, MachineRepresentation::kTaggedPointer);
+ cur_context.Bind(context);
+
+ Label context_found(this);
+
+ Variable* context_search_loop_variables[1] = {&cur_context};
+ Label context_search(this, 1, context_search_loop_variables);
+
+ // Loop until cur_context->map() is module_map.
+ Goto(&context_search);
+ BIND(&context_search);
+ {
+ CSA_ASSERT(this, Word32BinaryNot(IsNativeContext(cur_context.value())));
+ GotoIf(WordEqual(LoadMap(cur_context.value()), module_map), &context_found);
+
+ cur_context.Bind(
+ LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
+ Goto(&context_search);
+ }
+
+ BIND(&context_found);
+ return UncheckedCast<Context>(cur_context.value());
+}
+
+TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
+ SloppyTNode<Int32T> kind, SloppyTNode<Context> native_context) {
CSA_ASSERT(this, IsFastElementsKind(kind));
CSA_ASSERT(this, IsNativeContext(native_context));
Node* offset = IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
ChangeInt32ToIntPtr(kind));
- return LoadContextElement(native_context, offset);
+ return UncheckedCast<Map>(LoadContextElement(native_context, offset));
}
-Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
- Node* native_context) {
+TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
+ ElementsKind kind, SloppyTNode<Context> native_context) {
CSA_ASSERT(this, IsNativeContext(native_context));
- return LoadContextElement(native_context, Context::ArrayMapIndex(kind));
+ return UncheckedCast<Map>(
+ LoadContextElement(native_context, Context::ArrayMapIndex(kind)));
}
Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
Label* if_bailout) {
CSA_ASSERT(this, TaggedIsNotSmi(function));
CSA_ASSERT(this, IsJSFunction(function));
+ CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
CSA_ASSERT(this, IsClearWord32(LoadMapBitField(LoadMap(function)),
1 << Map::kHasNonInstancePrototype));
Node* proto_or_map =
@@ -1703,9 +1805,10 @@ Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
return var_result.value();
}
-Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
- return StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
- MachineRepresentation::kFloat64);
+void CodeStubAssembler::StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
+ SloppyTNode<Float64T> value) {
+ StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
+ MachineRepresentation::kFloat64);
}
Node* CodeStubAssembler::StoreObjectField(
@@ -1997,18 +2100,18 @@ Node* CodeStubAssembler::StoreCellValue(Node* cell, Node* value,
}
}
-Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
+TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
Node* result = Allocate(HeapNumber::kSize, kNone);
Heap::RootListIndex heap_map_index =
mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex
: Heap::kMutableHeapNumberMapRootIndex;
StoreMapNoWriteBarrier(result, heap_map_index);
- return result;
+ return UncheckedCast<HeapNumber>(result);
}
-Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value,
- MutableMode mode) {
- Node* result = AllocateHeapNumber(mode);
+TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
+ SloppyTNode<Float64T> value, MutableMode mode) {
+ TNode<HeapNumber> result = AllocateHeapNumber(mode);
StoreHeapNumberValue(result, value);
return result;
}
@@ -2023,8 +2126,8 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
- SmiConstant(length));
- // Initialize both used and unused parts of hash field slot at once.
+ SmiConstant(length),
+ MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
@@ -2045,21 +2148,20 @@ Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
return var_result.value();
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
- ParameterMode mode,
+Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context,
+ TNode<Smi> length,
AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(length, mode));
VARIABLE(var_result, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
Label if_lengthiszero(this), if_sizeissmall(this),
if_notsizeissmall(this, Label::kDeferred), if_join(this);
- GotoIf(WordEqual(length, IntPtrOrSmiConstant(0, mode)), &if_lengthiszero);
+ GotoIf(SmiEqual(length, SmiConstant(0)), &if_lengthiszero);
Node* raw_size = GetArrayAllocationSize(
- length, UINT8_ELEMENTS, mode,
+ SmiUntag(length), UINT8_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
@@ -2072,8 +2174,7 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
- ParameterToTagged(length, mode));
- // Initialize both used and unused parts of hash field slot at once.
+ length, MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
@@ -2084,8 +2185,8 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
BIND(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
- Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
- ParameterToTagged(length, mode));
+ Node* result =
+ CallRuntime(Runtime::kAllocateSeqOneByteString, context, length);
var_result.Bind(result);
Goto(&if_join);
}
@@ -2110,29 +2211,28 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
- SmiConstant(length));
- // Initialize both used and unused parts of hash field slot at once.
+ SmiConstant(Smi::FromInt(length)),
+ MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
return result;
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
- ParameterMode mode,
+Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context,
+ TNode<Smi> length,
AllocationFlags flags) {
CSA_SLOW_ASSERT(this, IsFixedArray(context));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(length, mode));
Comment("AllocateSeqTwoByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
// Compute the SeqTwoByteString size and check if it fits into new space.
Label if_lengthiszero(this), if_sizeissmall(this),
if_notsizeissmall(this, Label::kDeferred), if_join(this);
- GotoIf(WordEqual(length, IntPtrOrSmiConstant(0, mode)), &if_lengthiszero);
+ GotoIf(SmiEqual(length, SmiConstant(0)), &if_lengthiszero);
Node* raw_size = GetArrayAllocationSize(
- length, UINT16_ELEMENTS, mode,
+ SmiUntag(length), UINT16_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
@@ -2144,10 +2244,8 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
Node* result = AllocateInNewSpace(size, flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
- StoreObjectFieldNoWriteBarrier(
- result, SeqTwoByteString::kLengthOffset,
- mode == SMI_PARAMETERS ? length : SmiFromWord(length));
- // Initialize both used and unused parts of hash field slot at once.
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
+ length, MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
@@ -2159,8 +2257,7 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
{
// We might need to allocate in large object space, go to the runtime.
Node* result =
- CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
- mode == SMI_PARAMETERS ? length : SmiFromWord(length));
+ CallRuntime(Runtime::kAllocateSeqTwoByteString, context, length);
var_result.Bind(result);
Goto(&if_join);
}
@@ -2176,17 +2273,15 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
}
Node* CodeStubAssembler::AllocateSlicedString(
- Heap::RootListIndex map_root_index, Node* length, Node* parent,
+ Heap::RootListIndex map_root_index, TNode<Smi> length, Node* parent,
Node* offset) {
CSA_ASSERT(this, IsString(parent));
- CSA_ASSERT(this, TaggedIsSmi(length));
CSA_ASSERT(this, TaggedIsSmi(offset));
Node* result = Allocate(SlicedString::kSize);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
MachineRepresentation::kTagged);
- // Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
@@ -2197,31 +2292,31 @@ Node* CodeStubAssembler::AllocateSlicedString(
return result;
}
-Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
+Node* CodeStubAssembler::AllocateSlicedOneByteString(TNode<Smi> length,
+ Node* parent,
Node* offset) {
return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
parent, offset);
}
-Node* CodeStubAssembler::AllocateSlicedTwoByteString(Node* length, Node* parent,
+Node* CodeStubAssembler::AllocateSlicedTwoByteString(TNode<Smi> length,
+ Node* parent,
Node* offset) {
return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
offset);
}
Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
- Node* length, Node* first,
+ TNode<Smi> length, Node* first,
Node* second,
AllocationFlags flags) {
CSA_ASSERT(this, IsString(first));
CSA_ASSERT(this, IsString(second));
- CSA_ASSERT(this, TaggedIsSmi(length));
Node* result = Allocate(ConsString::kSize, flags);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
MachineRepresentation::kTagged);
- // Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
@@ -2238,26 +2333,26 @@ Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
return result;
}
-Node* CodeStubAssembler::AllocateOneByteConsString(Node* length, Node* first,
- Node* second,
+Node* CodeStubAssembler::AllocateOneByteConsString(TNode<Smi> length,
+ Node* first, Node* second,
AllocationFlags flags) {
return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
second, flags);
}
-Node* CodeStubAssembler::AllocateTwoByteConsString(Node* length, Node* first,
- Node* second,
+Node* CodeStubAssembler::AllocateTwoByteConsString(TNode<Smi> length,
+ Node* first, Node* second,
AllocationFlags flags) {
return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
second, flags);
}
-Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
- Node* right, AllocationFlags flags) {
+Node* CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
+ Node* left, Node* right,
+ AllocationFlags flags) {
CSA_ASSERT(this, IsFixedArray(context));
CSA_ASSERT(this, IsString(left));
CSA_ASSERT(this, IsString(right));
- CSA_ASSERT(this, TaggedIsSmi(length));
// Added string can be a cons string.
Comment("Allocating ConsString");
Node* left_instance_type = LoadInstanceType(left);
@@ -2330,8 +2425,8 @@ Node* CodeStubAssembler::AllocateNameDictionaryWithCapacity(Node* capacity) {
Node* result = AllocateInNewSpace(store_size);
Comment("Initialize NameDictionary");
// Initialize FixedArray fields.
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kHashTableMapRootIndex));
- StoreMapNoWriteBarrier(result, Heap::kHashTableMapRootIndex);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kNameDictionaryMapRootIndex));
+ StoreMapNoWriteBarrier(result, Heap::kNameDictionaryMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
SmiFromWord(length));
// Initialized HashTable fields.
@@ -2382,7 +2477,7 @@ Node* CodeStubAssembler::CopyNameDictionary(Node* dictionary,
Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
Comment("AllocateStruct");
CSA_ASSERT(this, IsMap(map));
- Node* size = TimesPointerSize(LoadMapInstanceSize(map));
+ Node* size = TimesPointerSize(LoadMapInstanceSizeInWords(map));
Node* object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeStructBody(object, map, size, Struct::kHeaderSize);
@@ -2403,20 +2498,24 @@ void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
}
-Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
- Node* elements,
- AllocationFlags flags) {
+Node* CodeStubAssembler::AllocateJSObjectFromMap(
+ Node* map, Node* properties, Node* elements, AllocationFlags flags,
+ SlackTrackingMode slack_tracking_mode) {
CSA_ASSERT(this, IsMap(map));
- Node* size = TimesPointerSize(LoadMapInstanceSize(map));
- Node* object = AllocateInNewSpace(size, flags);
+ CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
+ CSA_ASSERT(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
+ JS_GLOBAL_OBJECT_TYPE)));
+ Node* instance_size = TimesPointerSize(LoadMapInstanceSizeInWords(map));
+ Node* object = AllocateInNewSpace(instance_size, flags);
StoreMapNoWriteBarrier(object, map);
- InitializeJSObjectFromMap(object, map, size, properties, elements);
+ InitializeJSObjectFromMap(object, map, instance_size, properties, elements,
+ slack_tracking_mode);
return object;
}
-void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map,
- Node* size, Node* properties,
- Node* elements) {
+void CodeStubAssembler::InitializeJSObjectFromMap(
+ Node* object, Node* map, Node* instance_size, Node* properties,
+ Node* elements, SlackTrackingMode slack_tracking_mode) {
CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
@@ -2438,22 +2537,79 @@ void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map,
CSA_ASSERT(this, IsFixedArray(elements));
StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements);
}
- InitializeJSObjectBody(object, map, size, JSObject::kHeaderSize);
+ if (slack_tracking_mode == kNoSlackTracking) {
+ InitializeJSObjectBodyNoSlackTracking(object, map, instance_size);
+ } else {
+ DCHECK_EQ(slack_tracking_mode, kWithSlackTracking);
+ InitializeJSObjectBodyWithSlackTracking(object, map, instance_size);
+ }
+}
+
+void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
+ Node* object, Node* map, Node* instance_size, int start_offset) {
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ CSA_ASSERT(this,
+ IsClearWord32<Map::ConstructionCounter>(LoadMapBitField3(map)));
+ InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), instance_size,
+ Heap::kUndefinedValueRootIndex);
}
-void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map,
- Node* size, int start_offset) {
+void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
+ Node* object, Node* map, Node* instance_size) {
CSA_SLOW_ASSERT(this, IsMap(map));
- // TODO(cbruni): activate in-object slack tracking machinery.
- Comment("InitializeJSObjectBody");
- Node* filler = UndefinedConstant();
- // Calculate the untagged field addresses.
- object = BitcastTaggedToWord(object);
- Node* start_address =
- IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
- Node* end_address =
- IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
- StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+ Comment("InitializeJSObjectBodyNoSlackTracking");
+
+ // Perform in-object slack tracking if requested.
+ int start_offset = JSObject::kHeaderSize;
+ Node* bit_field3 = LoadMapBitField3(map);
+ Label end(this), slack_tracking(this), complete(this, Label::kDeferred);
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
+ Comment("No slack tracking");
+ InitializeJSObjectBodyNoSlackTracking(object, map, instance_size);
+ Goto(&end);
+
+ BIND(&slack_tracking);
+ {
+ Comment("Decrease construction counter");
+ // Slack tracking is only done on initial maps.
+ CSA_ASSERT(this, IsUndefined(LoadMapBackPointer(map)));
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ Node* new_bit_field3 = Int32Sub(
+ bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
+ StoreObjectFieldNoWriteBarrier(map, Map::kBitField3Offset, new_bit_field3,
+ MachineRepresentation::kWord32);
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+
+ // The object still has in-object slack therefore the |unsed_or_unused|
+ // field contain the "used" value.
+ Node* used_size = TimesPointerSize(ChangeUint32ToWord(
+ LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
+ MachineType::Uint8())));
+
+ Comment("iInitialize filler fields");
+ InitializeFieldsWithRoot(object, used_size, instance_size,
+ Heap::kOnePointerFillerMapRootIndex);
+
+ Comment("Initialize undefined fields");
+ InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
+ Heap::kUndefinedValueRootIndex);
+
+ GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &complete);
+ Goto(&end);
+ }
+
+ // Finalize the instance size.
+ BIND(&complete);
+ {
+ // ComplextInobjectSlackTracking doesn't allocate and thus doesn't need a
+ // context.
+ CallRuntime(Runtime::kCompleteInobjectSlackTrackingForMap,
+ NoContextConstant(), map);
+ Goto(&end);
+ }
+
+ BIND(&end);
}
void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
@@ -2559,14 +2715,15 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
int capacity_as_constant;
Node *array = nullptr, *elements = nullptr;
- if (IsIntPtrOrSmiConstantZero(capacity)) {
+ if (IsIntPtrOrSmiConstantZero(capacity, capacity_mode)) {
// Array is empty. Use the shared empty fixed array instead of allocating a
// new one.
array = AllocateUninitializedJSArrayWithoutElements(array_map, length,
allocation_site);
StoreObjectFieldRoot(array, JSArray::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
- } else if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_as_constant) &&
+ } else if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_as_constant,
+ capacity_mode) &&
capacity_as_constant > 0) {
// Allocate both array and elements object, and initialize the JSArray.
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
@@ -2614,10 +2771,52 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
return array;
}
+Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
+ Node* begin, Node* count,
+ ParameterMode mode, Node* capacity,
+ Node* allocation_site) {
+ Node* original_array_map = LoadMap(array);
+ Node* elements_kind = LoadMapElementsKind(original_array_map);
+
+ // Use the cannonical map for the Array's ElementsKind
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
+
+ Node* new_elements =
+ ExtractFixedArray(LoadElements(array), begin, count, capacity,
+ ExtractFixedArrayFlag::kAllFixedArrays, mode);
+
+ Node* result = AllocateUninitializedJSArrayWithoutElements(
+ array_map, ParameterToTagged(count, mode), allocation_site);
+ StoreObjectField(result, JSObject::kElementsOffset, new_elements);
+ return result;
+}
+
+Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
+ ParameterMode mode,
+ Node* allocation_site) {
+ Node* length = LoadJSArrayLength(array);
+ Node* elements = LoadElements(array);
+
+ Node* original_array_map = LoadMap(array);
+ Node* elements_kind = LoadMapElementsKind(original_array_map);
+
+ Node* new_elements = CloneFixedArray(elements);
+
+ // Use the cannonical map for the Array's ElementsKind
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
+ Node* result = AllocateUninitializedJSArrayWithoutElements(array_map, length,
+ allocation_site);
+ StoreObjectField(result, JSObject::kElementsOffset, new_elements);
+ return result;
+}
+
Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
Node* capacity_node,
ParameterMode mode,
- AllocationFlags flags) {
+ AllocationFlags flags,
+ Node* fixed_array_map) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
IntPtrOrSmiConstant(0, mode), mode));
@@ -2626,16 +2825,177 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
// Allocate both array and elements object, and initialize the JSArray.
Node* array = Allocate(total_size, flags);
- Heap::RootListIndex map_index = IsDoubleElementsKind(kind)
- ? Heap::kFixedDoubleArrayMapRootIndex
- : Heap::kFixedArrayMapRootIndex;
- DCHECK(Heap::RootIsImmortalImmovable(map_index));
- StoreMapNoWriteBarrier(array, map_index);
+ if (fixed_array_map != nullptr) {
+ // Conservatively only skip the write barrier if there are no allocation
+ // flags, this ensures that the object hasn't ended up in LOS. Note that the
+ // fixed array map is currently always immortal and technically wouldn't
+ // need the write barrier even in LOS, but it's better to not take chances
+ // in case this invariant changes later, since it's difficult to enforce
+ // locally here.
+ if (flags == CodeStubAssembler::kNone) {
+ StoreMapNoWriteBarrier(array, fixed_array_map);
+ } else {
+ StoreMap(array, fixed_array_map);
+ }
+ } else {
+ Heap::RootListIndex map_index = IsDoubleElementsKind(kind)
+ ? Heap::kFixedDoubleArrayMapRootIndex
+ : Heap::kFixedArrayMapRootIndex;
+ DCHECK(Heap::RootIsImmortalImmovable(map_index));
+ StoreMapNoWriteBarrier(array, map_index);
+ }
StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
ParameterToTagged(capacity_node, mode));
return array;
}
+Node* CodeStubAssembler::ExtractFixedArray(Node* fixed_array, Node* first,
+ Node* count, Node* capacity,
+ ExtractFixedArrayFlags extract_flags,
+ ParameterMode parameter_mode) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ VARIABLE(var_fixed_array_map, MachineRepresentation::kTagged);
+ const AllocationFlags flags =
+ (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly)
+ ? CodeStubAssembler::kNone
+ : CodeStubAssembler::kAllowLargeObjectAllocation;
+ if (first == nullptr) {
+ first = IntPtrOrSmiConstant(0, parameter_mode);
+ }
+ if (count == nullptr) {
+ count =
+ IntPtrOrSmiSub(TaggedToParameter(LoadFixedArrayBaseLength(fixed_array),
+ parameter_mode),
+ first, parameter_mode);
+
+ CSA_ASSERT(
+ this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant(0, parameter_mode),
+ count, parameter_mode));
+ }
+ if (capacity == nullptr) {
+ capacity = count;
+ } else {
+ CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan(
+ IntPtrOrSmiAdd(first, count, parameter_mode), capacity,
+ parameter_mode)));
+ }
+
+ Label if_fixed_double_array(this), empty(this), cow(this),
+ done(this, {&var_result, &var_fixed_array_map});
+ var_fixed_array_map.Bind(LoadMap(fixed_array));
+ GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), count), &empty);
+
+ if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
+ if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
+ GotoIf(IsFixedDoubleArrayMap(var_fixed_array_map.value()),
+ &if_fixed_double_array);
+ } else {
+ CSA_ASSERT(this, IsFixedDoubleArrayMap(var_fixed_array_map.value()));
+ }
+ } else {
+ DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays);
+ CSA_ASSERT(this, Word32BinaryNot(
+ IsFixedDoubleArrayMap(var_fixed_array_map.value())));
+ }
+
+ if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
+ Label new_space_check(this, {&var_fixed_array_map});
+ Branch(WordEqual(var_fixed_array_map.value(),
+ LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
+ &cow, &new_space_check);
+
+ BIND(&new_space_check);
+
+ bool handle_old_space = true;
+ if (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) {
+ handle_old_space = false;
+ CSA_ASSERT(this, Word32BinaryNot(FixedArraySizeDoesntFitInNewSpace(
+ count, FixedArray::kHeaderSize, parameter_mode)));
+ } else {
+ int constant_count;
+ handle_old_space =
+ !TryGetIntPtrOrSmiConstantValue(count, &constant_count,
+ parameter_mode) ||
+ (constant_count >
+ FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
+ }
+
+ Label old_space(this, Label::kDeferred);
+ if (handle_old_space) {
+ GotoIfFixedArraySizeDoesntFitInNewSpace(
+ capacity, &old_space, FixedArray::kHeaderSize, parameter_mode);
+ }
+
+ Comment("Copy PACKED_ELEMENTS new space");
+
+ ElementsKind kind = PACKED_ELEMENTS;
+ Node* to_elements =
+ AllocateFixedArray(kind, capacity, parameter_mode,
+ AllocationFlag::kNone, var_fixed_array_map.value());
+ var_result.Bind(to_elements);
+ CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first, count,
+ capacity, SKIP_WRITE_BARRIER, parameter_mode);
+ Goto(&done);
+
+ if (handle_old_space) {
+ BIND(&old_space);
+ {
+ Comment("Copy PACKED_ELEMENTS old space");
+
+ to_elements = AllocateFixedArray(kind, capacity, parameter_mode, flags,
+ var_fixed_array_map.value());
+ var_result.Bind(to_elements);
+ CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first,
+ count, capacity, UPDATE_WRITE_BARRIER,
+ parameter_mode);
+ Goto(&done);
+ }
+ }
+
+ BIND(&cow);
+ {
+ if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) {
+ GotoIf(WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), first),
+ &new_space_check);
+
+ var_result.Bind(fixed_array);
+ Goto(&done);
+ } else {
+ var_fixed_array_map.Bind(LoadRoot(Heap::kFixedArrayMapRootIndex));
+ Goto(&new_space_check);
+ }
+ }
+ } else {
+ Goto(&if_fixed_double_array);
+ }
+
+ if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
+ BIND(&if_fixed_double_array);
+
+ Comment("Copy PACKED_DOUBLE_ELEMENTS");
+
+ ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
+ Node* to_elements = AllocateFixedArray(kind, capacity, parameter_mode,
+ flags, var_fixed_array_map.value());
+ var_result.Bind(to_elements);
+ CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first, count,
+ capacity, SKIP_WRITE_BARRIER, parameter_mode);
+
+ Goto(&done);
+ }
+
+ BIND(&empty);
+ {
+ Comment("Copy empty array");
+
+ var_result.Bind(EmptyFixedArrayConstant());
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return var_result.value();
+}
+
void CodeStubAssembler::InitializePropertyArrayLength(Node* property_array,
Node* length,
ParameterMode mode) {
@@ -2675,7 +3035,6 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
CSA_SLOW_ASSERT(this, IsPropertyArray(array));
- STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
ElementsKind kind = PACKED_ELEMENTS;
Node* value = UndefinedConstant();
BuildFastFixedArrayForEach(array, kind, from_node, to_node,
@@ -2693,38 +3052,22 @@ void CodeStubAssembler::FillFixedArrayWithValue(
CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, kind));
- bool is_double = IsDoubleElementsKind(kind);
DCHECK(value_root_index == Heap::kTheHoleValueRootIndex ||
value_root_index == Heap::kUndefinedValueRootIndex);
- DCHECK_IMPLIES(is_double, value_root_index == Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
- Node* double_hole =
- Is64() ? UncheckedCast<UintPtrT>(Int64Constant(kHoleNanInt64))
- : UncheckedCast<UintPtrT>(Int32Constant(kHoleNanLower32));
+
+ // Determine the value to initialize the {array} based
+ // on the {value_root_index} and the elements {kind}.
Node* value = LoadRoot(value_root_index);
+ if (IsDoubleElementsKind(kind)) {
+ value = LoadHeapNumberValue(value);
+ }
BuildFastFixedArrayForEach(
array, kind, from_node, to_node,
- [this, value, is_double, double_hole](Node* array, Node* offset) {
- if (is_double) {
- // Don't use doubles to store the hole double, since manipulating the
- // signaling NaN used for the hole in C++, e.g. with bit_cast, will
- // change its value on ia32 (the x87 stack is used to return values
- // and stores to the stack silently clear the signalling bit).
- //
- // TODO(danno): When we have a Float32/Float64 wrapper class that
- // preserves double bits during manipulation, remove this code/change
- // this to an indexed Float64 store.
- if (Is64()) {
- StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
- double_hole);
- } else {
- StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
- double_hole);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, array,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
- double_hole);
- }
+ [this, value, kind](Node* array, Node* offset) {
+ if (IsDoubleElementsKind(kind)) {
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, array, offset,
+ value);
} else {
StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
value);
@@ -2735,7 +3078,7 @@ void CodeStubAssembler::FillFixedArrayWithValue(
void CodeStubAssembler::CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
- Node* to_array, Node* element_count, Node* capacity,
+ Node* to_array, Node* first_element, Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode, ParameterMode mode) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(element_count, mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
@@ -2752,16 +3095,17 @@ void CodeStubAssembler::CopyFixedArrayElements(
Label done(this);
bool from_double_elements = IsDoubleElementsKind(from_kind);
bool to_double_elements = IsDoubleElementsKind(to_kind);
- bool element_size_matches = Is64() || IsDoubleElementsKind(from_kind) ==
- IsDoubleElementsKind(to_kind);
bool doubles_to_objects_conversion =
IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind);
bool needs_write_barrier =
doubles_to_objects_conversion ||
(barrier_mode == UPDATE_WRITE_BARRIER && IsObjectElementsKind(to_kind));
+ bool element_offset_matches =
+ !needs_write_barrier && (Is64() || IsDoubleElementsKind(from_kind) ==
+ IsDoubleElementsKind(to_kind));
Node* double_hole =
- Is64() ? UncheckedCast<UintPtrT>(Int64Constant(kHoleNanInt64))
- : UncheckedCast<UintPtrT>(Int32Constant(kHoleNanLower32));
+ Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
+ : ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
if (doubles_to_objects_conversion) {
// If the copy might trigger a GC, make sure that the FixedArray is
@@ -2774,15 +3118,18 @@ void CodeStubAssembler::CopyFixedArrayElements(
Heap::kTheHoleValueRootIndex, mode);
}
- Node* limit_offset = ElementOffsetFromIndex(
- IntPtrOrSmiConstant(0, mode), from_kind, mode, first_element_offset);
- VARIABLE(var_from_offset, MachineType::PointerRepresentation(),
- ElementOffsetFromIndex(element_count, from_kind, mode,
- first_element_offset));
+ Node* first_from_element_offset =
+ ElementOffsetFromIndex(first_element, from_kind, mode, 0);
+ Node* limit_offset = IntPtrAdd(first_from_element_offset,
+ IntPtrConstant(first_element_offset));
+ VARIABLE(
+ var_from_offset, MachineType::PointerRepresentation(),
+ ElementOffsetFromIndex(IntPtrOrSmiAdd(first_element, element_count, mode),
+ from_kind, mode, first_element_offset));
// This second variable is used only when the element sizes of source and
// destination arrays do not match.
VARIABLE(var_to_offset, MachineType::PointerRepresentation());
- if (element_size_matches) {
+ if (element_offset_matches) {
var_to_offset.Bind(var_from_offset.value());
} else {
var_to_offset.Bind(ElementOffsetFromIndex(element_count, to_kind, mode,
@@ -2792,6 +3139,11 @@ void CodeStubAssembler::CopyFixedArrayElements(
Variable* vars[] = {&var_from_offset, &var_to_offset};
Label decrement(this, 2, vars);
+ Node* to_array_adjusted =
+ element_offset_matches
+ ? IntPtrSub(BitcastTaggedToWord(to_array), first_from_element_offset)
+ : to_array;
+
Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
BIND(&decrement);
@@ -2802,7 +3154,7 @@ void CodeStubAssembler::CopyFixedArrayElements(
var_from_offset.Bind(from_offset);
Node* to_offset;
- if (element_size_matches) {
+ if (element_offset_matches) {
to_offset = from_offset;
} else {
to_offset = IntPtrSub(
@@ -2828,13 +3180,14 @@ void CodeStubAssembler::CopyFixedArrayElements(
from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
if (needs_write_barrier) {
- Store(to_array, to_offset, value);
+ CHECK_EQ(to_array, to_array_adjusted);
+ Store(to_array_adjusted, to_offset, value);
} else if (to_double_elements) {
- StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
- value);
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array_adjusted,
+ to_offset, value);
} else {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array, to_offset,
- value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array_adjusted,
+ to_offset, value);
}
Goto(&next_iter);
@@ -2849,12 +3202,12 @@ void CodeStubAssembler::CopyFixedArrayElements(
// preserves double bits during manipulation, remove this code/change
// this to an indexed Float64 store.
if (Is64()) {
- StoreNoWriteBarrier(MachineRepresentation::kWord64, to_array, to_offset,
- double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, to_array_adjusted,
+ to_offset, double_hole);
} else {
- StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array, to_offset,
- double_hole);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array,
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array_adjusted,
+ to_offset, double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array_adjusted,
IntPtrAdd(to_offset, IntPtrConstant(kPointerSize)),
double_hole);
}
@@ -2901,17 +3254,14 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
}
void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
- Node* from_index, Node* to_index,
- Node* character_count,
+ TNode<IntPtrT> from_index,
+ TNode<IntPtrT> to_index,
+ TNode<IntPtrT> character_count,
String::Encoding from_encoding,
- String::Encoding to_encoding,
- ParameterMode mode) {
+ String::Encoding to_encoding) {
// Cannot assert IsString(from_string) and IsString(to_string) here because
// CSA::SubString can pass in faked sequential strings when handling external
// subject strings.
- CSA_SLOW_ASSERT(this, MatchesParameterMode(character_count, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(from_index, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(to_index, mode));
bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
DCHECK_IMPLIES(to_one_byte, from_one_byte);
@@ -2923,11 +3273,12 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
- Node* from_offset =
- ElementOffsetFromIndex(from_index, from_kind, mode, header_size);
+ Node* from_offset = ElementOffsetFromIndex(from_index, from_kind,
+ INTPTR_PARAMETERS, header_size);
Node* to_offset =
- ElementOffsetFromIndex(to_index, to_kind, mode, header_size);
- Node* byte_count = ElementOffsetFromIndex(character_count, from_kind, mode);
+ ElementOffsetFromIndex(to_index, to_kind, INTPTR_PARAMETERS, header_size);
+ Node* byte_count =
+ ElementOffsetFromIndex(character_count, from_kind, INTPTR_PARAMETERS);
Node* limit_offset = IntPtrAdd(from_offset, byte_count);
// Prepare the fast loop
@@ -2941,16 +3292,11 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
VARIABLE(current_to_offset, MachineType::PointerRepresentation(), to_offset);
VariableList vars({&current_to_offset}, zone());
int to_index_constant = 0, from_index_constant = 0;
- Smi* to_index_smi = nullptr;
- Smi* from_index_smi = nullptr;
bool index_same = (from_encoding == to_encoding) &&
(from_index == to_index ||
(ToInt32Constant(from_index, from_index_constant) &&
ToInt32Constant(to_index, to_index_constant) &&
- from_index_constant == to_index_constant) ||
- (ToSmiConstant(from_index, from_index_smi) &&
- ToSmiConstant(to_index, to_index_smi) &&
- to_index_smi == from_index_smi));
+ from_index_constant == to_index_constant));
BuildFastLoop(vars, from_offset, limit_offset,
[this, from_string, to_string, &current_to_offset, to_increment,
type, rep, index_same](Node* offset) {
@@ -3157,53 +3503,131 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
}
Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
- // We might need to loop once due to ToNumber conversion.
- VARIABLE(var_value, MachineRepresentation::kTagged, value);
VARIABLE(var_result, MachineRepresentation::kWord32);
- Label loop(this, &var_value), done_loop(this, &var_result);
+ Label done(this);
+ TaggedToWord32OrBigIntImpl<Feedback::kNone, Object::Conversion::kToNumber>(
+ context, value, &done, &var_result);
+ BIND(&done);
+ return var_result.value();
+}
+
+// Truncate {value} to word32 and jump to {if_number} if it is a Number,
+// or find that it is a BigInt and jump to {if_bigint}.
+void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value,
+ Label* if_number,
+ Variable* var_word32,
+ Label* if_bigint,
+ Variable* var_bigint) {
+ TaggedToWord32OrBigIntImpl<Feedback::kNone, Object::Conversion::kToNumeric>(
+ context, value, if_number, var_word32, if_bigint, var_bigint);
+}
+
+// Truncate {value} to word32 and jump to {if_number} if it is a Number,
+// or find that it is a BigInt and jump to {if_bigint}. In either case,
+// store the type feedback in {var_feedback}.
+void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback(
+ Node* context, Node* value, Label* if_number, Variable* var_word32,
+ Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
+ TaggedToWord32OrBigIntImpl<Feedback::kCollect,
+ Object::Conversion::kToNumeric>(
+ context, value, if_number, var_word32, if_bigint, var_bigint,
+ var_feedback);
+}
+
+template <CodeStubAssembler::Feedback feedback, Object::Conversion conversion>
+void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
+ Node* context, Node* value, Label* if_number, Variable* var_word32,
+ Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
+ DCHECK(var_word32->rep() == MachineRepresentation::kWord32);
+ DCHECK(var_bigint == nullptr ||
+ var_bigint->rep() == MachineRepresentation::kTagged);
+ DCHECK(var_feedback == nullptr ||
+ var_feedback->rep() == MachineRepresentation::kTaggedSigned);
+
+ // We might need to loop after conversion.
+ VARIABLE(var_value, MachineRepresentation::kTagged, value);
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
+ } else {
+ DCHECK(var_feedback == nullptr);
+ }
+ Variable* loop_vars[] = {&var_value, var_feedback};
+ int num_vars = feedback == Feedback::kCollect ? arraysize(loop_vars)
+ : arraysize(loop_vars) - 1;
+ Label loop(this, num_vars, loop_vars);
Goto(&loop);
BIND(&loop);
{
- // Load the current {value}.
value = var_value.value();
-
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- BIND(&if_valueissmi);
- {
- // Convert the Smi {value}.
- var_result.Bind(SmiToWord32(value));
- Goto(&done_loop);
+ Label not_smi(this), is_heap_number(this), is_oddball(this),
+ is_bigint(this);
+ GotoIf(TaggedIsNotSmi(value), &not_smi);
+
+ // {value} is a Smi.
+ var_word32->Bind(SmiToWord32(value));
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(
+ SmiOr(var_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kSignedSmall)));
+ }
+ Goto(if_number);
+
+ BIND(&not_smi);
+ Node* map = LoadMap(value);
+ GotoIf(IsHeapNumberMap(map), &is_heap_number);
+ Node* instance_type = LoadMapInstanceType(map);
+ if (conversion == Object::Conversion::kToNumeric) {
+ GotoIf(IsBigIntInstanceType(instance_type), &is_bigint);
}
- BIND(&if_valueisnotsmi);
+ // Not HeapNumber (or BigInt if conversion == kToNumeric).
{
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this),
- if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumber(value), &if_valueisheapnumber,
- &if_valueisnotheapnumber);
+ if (feedback == Feedback::kCollect) {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a Numeric, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(this, SmiEqual(var_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kNone)));
+ }
+ GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball);
+ // Not an oddball either -> convert.
+ auto builtin = conversion == Object::Conversion::kToNumeric
+ ? Builtins::kNonNumberToNumeric
+ : Builtins::kNonNumberToNumber;
+ var_value.Bind(CallBuiltin(builtin, context, value));
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ }
+ Goto(&loop);
- BIND(&if_valueisheapnumber);
- {
- // Truncate the floating point value.
- var_result.Bind(TruncateHeapNumberValueToWord32(value));
- Goto(&done_loop);
+ BIND(&is_oddball);
+ var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(
+ SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
}
+ Goto(&loop);
+ }
- BIND(&if_valueisnotheapnumber);
- {
- // Convert the {value} to a Number first.
- var_value.Bind(
- CallBuiltin(Builtins::kNonNumberToNumber, context, value));
- Goto(&loop);
+ BIND(&is_heap_number);
+ var_word32->Bind(TruncateHeapNumberValueToWord32(value));
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(SmiOr(var_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kNumber)));
+ }
+ Goto(if_number);
+
+ if (conversion == Object::Conversion::kToNumeric) {
+ BIND(&is_bigint);
+ var_bigint->Bind(value);
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(
+ SmiOr(var_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kBigInt)));
}
+ Goto(if_bigint);
}
}
- BIND(&done_loop);
- return var_result.value();
}
Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
@@ -3211,9 +3635,10 @@ Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
return TruncateFloat64ToWord32(value);
}
-Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
- Node* value32 = RoundFloat64ToInt32(value);
- Node* value64 = ChangeInt32ToFloat64(value32);
+TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
+ SloppyTNode<Float64T> value) {
+ TNode<Int32T> value32 = RoundFloat64ToInt32(value);
+ TNode<Float64T> value64 = ChangeInt32ToFloat64(value32);
Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
@@ -3229,70 +3654,71 @@ Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
BIND(&if_valueisnotequal);
Goto(&if_valueisheapnumber);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(Number, var_result);
BIND(&if_valueisint32);
{
if (Is64()) {
- Node* result =
- SmiTag(UncheckedCast<IntPtrT>(ChangeInt32ToInt64(value32)));
- var_result.Bind(result);
+ TNode<Smi> result = SmiTag(ChangeInt32ToIntPtr(value32));
+ var_result = result;
Goto(&if_join);
} else {
- Node* pair = Int32AddWithOverflow(value32, value32);
- Node* overflow = Projection(1, pair);
+ TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value32, value32);
+ TNode<BoolT> overflow = Projection<1>(pair);
Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_overflow);
Goto(&if_valueisheapnumber);
BIND(&if_notoverflow);
{
- Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
- var_result.Bind(result);
+ TNode<IntPtrT> result = ChangeInt32ToIntPtr(Projection<0>(pair));
+ var_result = BitcastWordToTaggedSigned(result);
Goto(&if_join);
}
}
}
BIND(&if_valueisheapnumber);
{
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
+ var_result = AllocateHeapNumberWithValue(value);
Goto(&if_join);
}
BIND(&if_join);
- return var_result.value();
+ return var_result;
}
-Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
+TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
+ SloppyTNode<Int32T> value) {
if (Is64()) {
- return SmiTag(UncheckedCast<IntPtrT>(ChangeInt32ToInt64(value)));
+ return SmiTag(ChangeInt32ToIntPtr(value));
}
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Node* pair = Int32AddWithOverflow(value, value);
- Node* overflow = Projection(1, pair);
+ TVARIABLE(Number, var_result);
+ TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value, value);
+ TNode<BoolT> overflow = Projection<1>(pair);
Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
if_join(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_overflow);
{
- Node* value64 = ChangeInt32ToFloat64(value);
- Node* result = AllocateHeapNumberWithValue(value64);
- var_result.Bind(result);
+ TNode<Float64T> value64 = ChangeInt32ToFloat64(value);
+ TNode<HeapNumber> result = AllocateHeapNumberWithValue(value64);
+ var_result = result;
}
Goto(&if_join);
BIND(&if_notoverflow);
{
- Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
- var_result.Bind(result);
+ TNode<Smi> result =
+ BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
+ var_result = result;
}
Goto(&if_join);
BIND(&if_join);
- return var_result.value();
+ return var_result;
}
-Node* CodeStubAssembler::ChangeUint32ToTagged(Node* value) {
+TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
+ SloppyTNode<Uint32T> value) {
Label if_overflow(this, Label::kDeferred), if_not_overflow(this),
if_join(this);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(Number, var_result);
// If {value} > 2^31 - 1, we need to store it in a HeapNumber.
Branch(Uint32LessThan(Int32Constant(Smi::kMaxValue), value), &if_overflow,
&if_not_overflow);
@@ -3300,34 +3726,37 @@ Node* CodeStubAssembler::ChangeUint32ToTagged(Node* value) {
BIND(&if_not_overflow);
{
if (Is64()) {
- var_result.Bind(
- SmiTag(UncheckedCast<IntPtrT>(ChangeUint32ToUint64(value))));
+ var_result =
+ SmiTag(ReinterpretCast<IntPtrT>(ChangeUint32ToUint64(value)));
} else {
// If tagging {value} results in an overflow, we need to use a HeapNumber
// to represent it.
- Node* pair = Int32AddWithOverflow(value, value);
- Node* overflow = Projection(1, pair);
+ // TODO(tebbi): This overflow can never happen.
+ TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(
+ UncheckedCast<Int32T>(value), UncheckedCast<Int32T>(value));
+ TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, &if_overflow);
- Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
- var_result.Bind(result);
+ TNode<Smi> result =
+ BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
+ var_result = result;
}
}
Goto(&if_join);
BIND(&if_overflow);
{
- Node* float64_value = ChangeUint32ToFloat64(value);
- var_result.Bind(AllocateHeapNumberWithValue(float64_value));
+ TNode<Float64T> float64_value = ChangeUint32ToFloat64(value);
+ var_result = AllocateHeapNumberWithValue(float64_value);
}
Goto(&if_join);
BIND(&if_join);
- return var_result.value();
+ return var_result;
}
-Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
- char const* method_name) {
+TNode<String> CodeStubAssembler::ToThisString(Node* context, Node* value,
+ char const* method_name) {
VARIABLE(var_value, MachineRepresentation::kTagged, value);
// Check if the {value} is a Smi or a HeapObject.
@@ -3346,23 +3775,11 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
BIND(&if_valueisnotstring);
{
// Check if the {value} is null.
- Label if_valueisnullorundefined(this, Label::kDeferred),
- if_valueisnotnullorundefined(this, Label::kDeferred),
- if_valueisnotnull(this, Label::kDeferred);
- Branch(WordEqual(value, NullConstant()), &if_valueisnullorundefined,
- &if_valueisnotnull);
- BIND(&if_valueisnotnull);
- {
- // Check if the {value} is undefined.
- Branch(WordEqual(value, UndefinedConstant()),
- &if_valueisnullorundefined, &if_valueisnotnullorundefined);
- BIND(&if_valueisnotnullorundefined);
- {
- // Convert the {value} to a String.
- var_value.Bind(CallBuiltin(Builtins::kToString, context, value));
- Goto(&if_valueisstring);
- }
- }
+ Label if_valueisnullorundefined(this, Label::kDeferred);
+ GotoIf(IsNullOrUndefined(value), &if_valueisnullorundefined);
+ // Convert the {value} to a String.
+ var_value.Bind(CallBuiltin(Builtins::kToString, context, value));
+ Goto(&if_valueisstring);
BIND(&if_valueisnullorundefined);
{
@@ -3376,50 +3793,54 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
BIND(&if_valueissmi);
{
// The {value} is a Smi, convert it to a String.
- Callable callable = CodeFactory::NumberToString(isolate());
- var_value.Bind(CallStub(callable, context, value));
+ var_value.Bind(CallBuiltin(Builtins::kNumberToString, context, value));
Goto(&if_valueisstring);
}
BIND(&if_valueisstring);
- return var_value.value();
+ return CAST(var_value.value());
}
-Node* CodeStubAssembler::ChangeNumberToFloat64(Node* value) {
+TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
+ SloppyTNode<Number> value) {
+ // TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode.
CSA_SLOW_ASSERT(this, IsNumber(value));
- VARIABLE(result, MachineRepresentation::kFloat64);
+ TVARIABLE(Float64T, result);
Label smi(this);
Label done(this, &result);
GotoIf(TaggedIsSmi(value), &smi);
- result.Bind(
- LoadObjectField(value, HeapNumber::kValueOffset, MachineType::Float64()));
+ result = LoadHeapNumberValue(CAST(value));
Goto(&done);
BIND(&smi);
{
- result.Bind(SmiToFloat64(value));
+ result = SmiToFloat64(CAST(value));
Goto(&done);
}
BIND(&done);
- return result.value();
+ return result;
}
-Node* CodeStubAssembler::ChangeNumberToIntPtr(Node* value) {
+TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
+ SloppyTNode<Number> value) {
+ // TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode.
CSA_SLOW_ASSERT(this, IsNumber(value));
- VARIABLE(result, MachineType::PointerRepresentation());
+ TVARIABLE(UintPtrT, result);
Label smi(this), done(this, &result);
GotoIf(TaggedIsSmi(value), &smi);
- CSA_ASSERT(this, IsHeapNumber(value));
- result.Bind(ChangeFloat64ToUintPtr(LoadHeapNumberValue(value)));
+ TNode<HeapNumber> value_hn = CAST(value);
+ result = ChangeFloat64ToUintPtr(LoadHeapNumberValue(value_hn));
Goto(&done);
BIND(&smi);
- result.Bind(SmiToWord(value));
+ TNode<Smi> value_smi = CAST(value);
+ CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
+ result = UncheckedCast<UintPtrT>(SmiToWord(value_smi));
Goto(&done);
BIND(&done);
- return result.value();
+ return result;
}
Node* CodeStubAssembler::TimesPointerSize(Node* value) {
@@ -3452,7 +3873,7 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
// Check if {value} is a JSValue.
Label if_valueisvalue(this, Label::kDeferred), if_valueisnotvalue(this);
- Branch(Word32Equal(value_instance_type, Int32Constant(JS_VALUE_TYPE)),
+ Branch(InstanceTypeEqual(value_instance_type, JS_VALUE_TYPE),
&if_valueisvalue, &if_valueisnotvalue);
BIND(&if_valueisvalue);
@@ -3541,6 +3962,28 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
return var_value_map.value();
}
+Node* CodeStubAssembler::ThrowIfNotJSReceiver(
+ Node* context, Node* value, MessageTemplate::Template msg_template,
+ const char* method_name) {
+ Label out(this), throw_exception(this, Label::kDeferred);
+ VARIABLE(var_value_map, MachineRepresentation::kTagged);
+
+ GotoIf(TaggedIsSmi(value), &throw_exception);
+
+ // Load the instance type of the {value}.
+ var_value_map.Bind(LoadMap(value));
+ Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+
+ Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
+
+ // The {value} is not a compatible receiver for this method.
+ BIND(&throw_exception);
+ ThrowTypeError(context, msg_template, method_name);
+
+ BIND(&out);
+ return var_value_map.value();
+}
+
void CodeStubAssembler::ThrowRangeError(Node* context,
MessageTemplate::Template message,
Node* arg0, Node* arg1, Node* arg2) {
@@ -3628,9 +4071,9 @@ Node* CodeStubAssembler::IsUndetectableMap(Node* map) {
return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsUndetectable);
}
-Node* CodeStubAssembler::IsArrayProtectorCellInvalid() {
+Node* CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ Node* cell = LoadRoot(Heap::kNoElementsProtectorRootIndex);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return WordEqual(cell_value, invalid);
}
@@ -3668,6 +4111,11 @@ Node* CodeStubAssembler::IsConstructor(Node* object) {
return IsConstructorMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsFunctionWithPrototypeSlotMap(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
+ return IsSetWord32(LoadMapBitField(map), 1 << Map::kHasPrototypeSlot);
+}
+
Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
return Int32LessThanOrEqual(instance_type,
@@ -3727,6 +4175,12 @@ Node* CodeStubAssembler::IsJSReceiverInstanceType(Node* instance_type) {
Int32Constant(FIRST_JS_RECEIVER_TYPE));
}
+Node* CodeStubAssembler::IsArrayIteratorInstanceType(Node* instance_type) {
+ return Uint32LessThan(
+ Int32Constant(LAST_ARRAY_ITERATOR_TYPE - FIRST_ARRAY_ITERATOR_TYPE),
+ Int32Sub(instance_type, Int32Constant(FIRST_ARRAY_ITERATOR_TYPE)));
+}
+
Node* CodeStubAssembler::IsJSReceiverMap(Node* map) {
return IsJSReceiverInstanceType(LoadMapInstanceType(map));
}
@@ -3739,8 +4193,12 @@ Node* CodeStubAssembler::IsNullOrJSReceiver(Node* object) {
return Word32Or(IsJSReceiver(object), IsNull(object));
}
+Node* CodeStubAssembler::IsNullOrUndefined(Node* const value) {
+ return Word32Or(IsUndefined(value), IsNull(value));
+}
+
Node* CodeStubAssembler::IsJSGlobalProxyInstanceType(Node* instance_type) {
- return Word32Equal(instance_type, Int32Constant(JS_GLOBAL_PROXY_TYPE));
+ return InstanceTypeEqual(instance_type, JS_GLOBAL_PROXY_TYPE);
}
Node* CodeStubAssembler::IsJSObjectInstanceType(Node* instance_type) {
@@ -3759,21 +4217,17 @@ Node* CodeStubAssembler::IsJSObject(Node* object) {
}
Node* CodeStubAssembler::IsJSProxy(Node* object) {
- Node* object_map = LoadMap(object);
- Node* object_instance_type = LoadMapInstanceType(object_map);
-
- return InstanceTypeEqual(object_instance_type, JS_PROXY_TYPE);
+ return HasInstanceType(object, JS_PROXY_TYPE);
}
Node* CodeStubAssembler::IsJSGlobalProxy(Node* object) {
- return Word32Equal(LoadInstanceType(object),
- Int32Constant(JS_GLOBAL_PROXY_TYPE));
+ return HasInstanceType(object, JS_GLOBAL_PROXY_TYPE);
}
Node* CodeStubAssembler::IsMap(Node* map) { return IsMetaMap(LoadMap(map)); }
Node* CodeStubAssembler::IsJSValueInstanceType(Node* instance_type) {
- return Word32Equal(instance_type, Int32Constant(JS_VALUE_TYPE));
+ return InstanceTypeEqual(instance_type, JS_VALUE_TYPE);
}
Node* CodeStubAssembler::IsJSValue(Node* object) {
@@ -3785,7 +4239,7 @@ Node* CodeStubAssembler::IsJSValueMap(Node* map) {
}
Node* CodeStubAssembler::IsJSArrayInstanceType(Node* instance_type) {
- return Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
+ return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
}
Node* CodeStubAssembler::IsJSArray(Node* object) {
@@ -3800,6 +4254,14 @@ Node* CodeStubAssembler::IsFixedArray(Node* object) {
return HasInstanceType(object, FIXED_ARRAY_TYPE);
}
+Node* CodeStubAssembler::IsFixedArraySubclass(Node* object) {
+ Node* instance_type = LoadInstanceType(object);
+ return Word32And(Int32GreaterThanOrEqual(
+ instance_type, Int32Constant(FIRST_FIXED_ARRAY_TYPE)),
+ Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_FIXED_ARRAY_TYPE)));
+}
+
Node* CodeStubAssembler::IsPropertyArray(Node* object) {
return HasInstanceType(object, PROPERTY_ARRAY_TYPE);
}
@@ -3835,7 +4297,7 @@ Node* CodeStubAssembler::IsFixedArrayWithKind(Node* object, ElementsKind kind) {
return IsFixedDoubleArray(object);
} else {
DCHECK(IsSmiOrObjectElementsKind(kind));
- return Word32Or(IsFixedArray(object), IsHashTable(object));
+ return IsFixedArraySubclass(object);
}
}
@@ -3889,7 +4351,7 @@ Node* CodeStubAssembler::IsString(Node* object) {
}
Node* CodeStubAssembler::IsSymbolInstanceType(Node* instance_type) {
- return Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE));
+ return InstanceTypeEqual(instance_type, SYMBOL_TYPE);
}
Node* CodeStubAssembler::IsSymbol(Node* object) {
@@ -3897,7 +4359,7 @@ Node* CodeStubAssembler::IsSymbol(Node* object) {
}
Node* CodeStubAssembler::IsBigIntInstanceType(Node* instance_type) {
- return Word32Equal(instance_type, Int32Constant(BIGINT_TYPE));
+ return InstanceTypeEqual(instance_type, BIGINT_TYPE);
}
Node* CodeStubAssembler::IsBigInt(Node* object) {
@@ -3913,10 +4375,10 @@ Node* CodeStubAssembler::IsPrivateSymbol(Node* object) {
return Select(
IsSymbol(object),
[=] {
- Node* const flags =
- SmiToWord32(CAST(LoadObjectField(object, Symbol::kFlagsOffset)));
- const int kPrivateMask = 1 << Symbol::kPrivateBit;
- return IsSetWord32(flags, kPrivateMask);
+ TNode<Symbol> symbol = CAST(object);
+ TNode<Int32T> flags =
+ SmiToWord32(LoadObjectField<Smi>(symbol, Symbol::kFlagsOffset));
+ return IsSetWord32(flags, 1 << Symbol::kPrivateBit);
},
[=] { return Int32Constant(0); }, MachineRepresentation::kWord32);
}
@@ -3934,16 +4396,16 @@ Node* CodeStubAssembler::IsHashTable(Node* object) {
}
Node* CodeStubAssembler::IsDictionary(Node* object) {
- return Word32Or(IsHashTable(object), IsUnseededNumberDictionary(object));
+ return Word32Or(IsHashTable(object), IsNumberDictionary(object));
}
-Node* CodeStubAssembler::IsUnseededNumberDictionary(Node* object) {
+Node* CodeStubAssembler::IsNumberDictionary(Node* object) {
return WordEqual(LoadMap(object),
- LoadRoot(Heap::kUnseededNumberDictionaryMapRootIndex));
+ LoadRoot(Heap::kNumberDictionaryMapRootIndex));
}
Node* CodeStubAssembler::IsJSFunctionInstanceType(Node* instance_type) {
- return Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+ return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
Node* CodeStubAssembler::IsJSFunction(Node* object) {
@@ -3975,12 +4437,28 @@ Node* CodeStubAssembler::IsJSRegExp(Node* object) {
return HasInstanceType(object, JS_REGEXP_TYPE);
}
+Node* CodeStubAssembler::IsNumeric(Node* object) {
+ return Select(
+ TaggedIsSmi(object), [=] { return Int32Constant(1); },
+ [=] { return Word32Or(IsHeapNumber(object), IsBigInt(object)); },
+ MachineRepresentation::kWord32);
+}
+
Node* CodeStubAssembler::IsNumber(Node* object) {
return Select(TaggedIsSmi(object), [=] { return Int32Constant(1); },
[=] { return IsHeapNumber(object); },
MachineRepresentation::kWord32);
}
+Node* CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(Node* element_count,
+ int base_size,
+ ParameterMode mode) {
+ int max_newspace_elements =
+ (kMaxRegularHeapObjectSize - base_size) / kPointerSize;
+ return IntPtrOrSmiGreaterThan(
+ element_count, IntPtrOrSmiConstant(max_newspace_elements, mode), mode);
+}
+
Node* CodeStubAssembler::IsNumberNormalized(Node* number) {
CSA_ASSERT(this, IsNumber(number));
@@ -4022,12 +4500,13 @@ Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
Label check_upper_bound(this), check_is_integer(this), out(this),
return_false(this);
- GotoIfNumberGreaterThanOrEqual(number, NumberConstant(0), &check_upper_bound);
+ GotoIfNumericGreaterThanOrEqual(number, NumberConstant(0),
+ &check_upper_bound);
Goto(&return_false);
BIND(&check_upper_bound);
- GotoIfNumberGreaterThanOrEqual(number, NumberConstant(kMaxUInt32),
- &return_false);
+ GotoIfNumericGreaterThanOrEqual(number, NumberConstant(kMaxUInt32),
+ &return_false);
Goto(&check_is_integer);
BIND(&check_is_integer);
@@ -4046,15 +4525,12 @@ Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
return var_result.value();
}
-TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(
- SloppyTNode<String> string, Node* index, ParameterMode parameter_mode) {
- CSA_ASSERT(this, MatchesParameterMode(index, parameter_mode));
+TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> index) {
CSA_ASSERT(this, IsString(string));
- // Translate the {index} into a Word.
- index = ParameterToWord(index, parameter_mode);
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(index, IntPtrConstant(0)));
- CSA_ASSERT(this, IntPtrLessThan(index, SmiUntag(LoadStringLength(string))));
+ CSA_ASSERT(this, IntPtrLessThan(index, LoadStringLengthAsWord(string)));
VARIABLE(var_result, MachineRepresentation::kWord32);
@@ -4116,8 +4592,7 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
Label if_entryisundefined(this, Label::kDeferred),
if_entryisnotundefined(this);
Node* entry = LoadFixedArrayElement(cache, code_index);
- Branch(WordEqual(entry, UndefinedConstant()), &if_entryisundefined,
- &if_entryisnotundefined);
+ Branch(IsUndefined(entry), &if_entryisundefined, &if_entryisnotundefined);
BIND(&if_entryisundefined);
{
@@ -4158,29 +4633,24 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
// A wrapper around CopyStringCharacters which determines the correct string
// encoding, allocates a corresponding sequential string, and then copies the
// given character range using CopyStringCharacters.
-// |from_string| must be a sequential string. |from_index| and
-// |character_count| must be Smis s.t.
+// |from_string| must be a sequential string.
// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
-Node* CodeStubAssembler::AllocAndCopyStringCharacters(Node* context, Node* from,
- Node* from_instance_type,
- Node* from_index,
- Node* character_count) {
+Node* CodeStubAssembler::AllocAndCopyStringCharacters(
+ Node* context, Node* from, Node* from_instance_type,
+ TNode<IntPtrT> from_index, TNode<Smi> character_count) {
Label end(this), one_byte_sequential(this), two_byte_sequential(this);
Variable var_result(this, MachineRepresentation::kTagged);
- Node* const smi_zero = SmiConstant(0);
-
Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential,
&two_byte_sequential);
// The subject string is a sequential one-byte string.
BIND(&one_byte_sequential);
{
- Node* result =
- AllocateSeqOneByteString(context, SmiToWord(character_count));
- CopyStringCharacters(from, result, from_index, smi_zero, character_count,
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
- CodeStubAssembler::SMI_PARAMETERS);
+ Node* result = AllocateSeqOneByteString(context, character_count);
+ CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
+ SmiUntag(character_count), String::ONE_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING);
var_result.Bind(result);
Goto(&end);
@@ -4189,11 +4659,10 @@ Node* CodeStubAssembler::AllocAndCopyStringCharacters(Node* context, Node* from,
// The subject string is a sequential two-byte string.
BIND(&two_byte_sequential);
{
- Node* result =
- AllocateSeqTwoByteString(context, SmiToWord(character_count));
- CopyStringCharacters(from, result, from_index, smi_zero, character_count,
- String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
- CodeStubAssembler::SMI_PARAMETERS);
+ Node* result = AllocateSeqTwoByteString(context, character_count);
+ CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
+ SmiUntag(character_count), String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING);
var_result.Bind(result);
Goto(&end);
@@ -4226,8 +4695,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
CSA_ASSERT(this, TaggedIsPositiveSmi(to));
}
- Node* const substr_length = SmiSub(to, from);
- Node* const string_length = LoadStringLength(string);
+ TNode<Smi> const substr_length = SmiSub(to, from);
+ TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
// Begin dispatching based on substring length.
@@ -4253,44 +4722,48 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// encoding at this point.
Label external_string(this);
{
- Label next(this);
+ if (FLAG_string_slices) {
+ Label next(this);
- // Short slice. Copy instead of slicing.
- GotoIf(SmiLessThan(substr_length, SmiConstant(SlicedString::kMinLength)),
- &next);
+ // Short slice. Copy instead of slicing.
+ GotoIf(SmiLessThan(substr_length, SmiConstant(SlicedString::kMinLength)),
+ &next);
- // Allocate new sliced string.
+ // Allocate new sliced string.
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
- Label one_byte_slice(this), two_byte_slice(this);
- Branch(IsOneByteStringInstanceType(to_direct.instance_type()),
- &one_byte_slice, &two_byte_slice);
+ Label one_byte_slice(this), two_byte_slice(this);
+ Branch(IsOneByteStringInstanceType(to_direct.instance_type()),
+ &one_byte_slice, &two_byte_slice);
- BIND(&one_byte_slice);
- {
- var_result.Bind(
- AllocateSlicedOneByteString(substr_length, direct_string, offset));
- Goto(&end);
- }
+ BIND(&one_byte_slice);
+ {
+ var_result.Bind(
+ AllocateSlicedOneByteString(substr_length, direct_string, offset));
+ Goto(&end);
+ }
- BIND(&two_byte_slice);
- {
- var_result.Bind(
- AllocateSlicedTwoByteString(substr_length, direct_string, offset));
- Goto(&end);
- }
+ BIND(&two_byte_slice);
+ {
+ var_result.Bind(
+ AllocateSlicedTwoByteString(substr_length, direct_string, offset));
+ Goto(&end);
+ }
- BIND(&next);
+ BIND(&next);
+ }
// The subject string can only be external or sequential string of either
// encoding at this point.
GotoIf(to_direct.is_external(), &external_string);
- var_result.Bind(AllocAndCopyStringCharacters(
- context, direct_string, instance_type, offset, substr_length));
+ var_result.Bind(
+ AllocAndCopyStringCharacters(context, direct_string, instance_type,
+ SmiUntag(offset), substr_length));
+ Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
Goto(&end);
@@ -4302,7 +4775,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Node* const fake_sequential_string = to_direct.PointerToString(&runtime);
var_result.Bind(AllocAndCopyStringCharacters(
- context, fake_sequential_string, instance_type, offset, substr_length));
+ context, fake_sequential_string, instance_type, SmiUntag(offset),
+ substr_length));
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -4313,7 +4787,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Substrings of length 1 are generated through CharCodeAt and FromCharCode.
BIND(&single_char);
{
- Node* char_code = StringCharCodeAt(string, from);
+ Node* char_code = StringCharCodeAt(string, SmiUntag(from));
var_result.Bind(StringFromCharCode(char_code));
Goto(&end);
}
@@ -4418,7 +4892,7 @@ Node* ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
// Sliced string. Fetch parent and correct start index by offset.
BIND(&if_issliced);
{
- if (flags_ & kDontUnpackSlicedStrings) {
+ if (!FLAG_string_slices || (flags_ & kDontUnpackSlicedStrings)) {
Goto(if_bailout);
} else {
Node* const string = var_string_.value();
@@ -4579,27 +5053,25 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
done(this, &result), done_native(this, &result);
Counters* counters = isolate()->counters();
- Node* left_length = LoadStringLength(left);
- GotoIf(WordNotEqual(IntPtrConstant(0), left_length), &check_right);
+ TNode<Smi> left_length = LoadStringLengthAsSmi(left);
+ GotoIf(SmiNotEqual(SmiConstant(0), left_length), &check_right);
result.Bind(right);
Goto(&done_native);
BIND(&check_right);
- Node* right_length = LoadStringLength(right);
- GotoIf(WordNotEqual(IntPtrConstant(0), right_length), &cons);
+ TNode<Smi> right_length = LoadStringLengthAsSmi(right);
+ GotoIf(SmiNotEqual(SmiConstant(0), right_length), &cons);
result.Bind(left);
Goto(&done_native);
BIND(&cons);
{
- CSA_ASSERT(this, TaggedIsSmi(left_length));
- CSA_ASSERT(this, TaggedIsSmi(right_length));
- Node* new_length = SmiAdd(left_length, right_length);
+ TNode<Smi> new_length = SmiAdd(left_length, right_length);
// If new length is greater than String::kMaxLength, goto runtime to
// throw. Note: we also need to invalidate the string length protector, so
// can't just throw here directly.
- GotoIf(SmiAboveOrEqual(new_length, SmiConstant(String::kMaxLength)),
+ GotoIf(SmiGreaterThan(new_length, SmiConstant(String::kMaxLength)),
&runtime);
VARIABLE(var_left, MachineRepresentation::kTagged, left);
@@ -4630,35 +5102,37 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime);
GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow);
+ TNode<IntPtrT> word_left_length = SmiUntag(left_length);
+ TNode<IntPtrT> word_right_length = SmiUntag(right_length);
+
Label two_byte(this);
GotoIf(Word32Equal(Word32And(ored_instance_types,
Int32Constant(kStringEncodingMask)),
Int32Constant(kTwoByteStringTag)),
&two_byte);
// One-byte sequential string case
- Node* new_string =
- AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
- CopyStringCharacters(var_left.value(), new_string, SmiConstant(0),
- SmiConstant(0), left_length, String::ONE_BYTE_ENCODING,
- String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
- CopyStringCharacters(var_right.value(), new_string, SmiConstant(0),
- left_length, right_length, String::ONE_BYTE_ENCODING,
- String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
+ Node* new_string = AllocateSeqOneByteString(context, new_length);
+ CopyStringCharacters(var_left.value(), new_string, IntPtrConstant(0),
+ IntPtrConstant(0), word_left_length,
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
+ CopyStringCharacters(var_right.value(), new_string, IntPtrConstant(0),
+ word_left_length, word_right_length,
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
result.Bind(new_string);
Goto(&done_native);
BIND(&two_byte);
{
// Two-byte sequential string case
- new_string =
- AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
- CopyStringCharacters(var_left.value(), new_string, SmiConstant(0),
- SmiConstant(0), left_length,
- String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
- SMI_PARAMETERS);
- CopyStringCharacters(var_right.value(), new_string, SmiConstant(0),
- left_length, right_length, String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
+ new_string = AllocateSeqTwoByteString(context, new_length);
+ CopyStringCharacters(var_left.value(), new_string, IntPtrConstant(0),
+ IntPtrConstant(0), word_left_length,
+ String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING);
+ CopyStringCharacters(var_right.value(), new_string, IntPtrConstant(0),
+ word_left_length, word_right_length,
+ String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING);
result.Bind(new_string);
Goto(&done_native);
}
@@ -4739,30 +5213,30 @@ Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
return var_result.value();
}
-Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
+TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<Context> context,
+ SloppyTNode<String> input) {
CSA_SLOW_ASSERT(this, IsString(input));
Label runtime(this, Label::kDeferred);
Label end(this);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(Number, var_result);
// Check if string has a cached array index.
- Node* hash = LoadNameHashField(input);
+ TNode<Uint32T> hash = LoadNameHashField(input);
GotoIf(IsSetWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
&runtime);
- var_result.Bind(
- SmiTag(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash)));
+ var_result = SmiTag(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash));
Goto(&end);
BIND(&runtime);
{
- var_result.Bind(CallRuntime(Runtime::kStringToNumber, context, input));
+ var_result = CAST(CallRuntime(Runtime::kStringToNumber, context, input));
Goto(&end);
}
BIND(&end);
- return var_result.value();
+ return var_result;
}
Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
@@ -4859,19 +5333,18 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
BIND(&is_number);
{
- Callable callable = CodeFactory::NumberToString(isolate());
- var_result.Bind(CallStub(callable, context, value));
+ var_result.Bind(CallBuiltin(Builtins::kNumberToString, context, value));
Goto(&end);
}
BIND(&not_name);
{
- GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ GotoIf(InstanceTypeEqual(value_instance_type, HEAP_NUMBER_TYPE),
&is_number);
Label not_oddball(this);
- GotoIf(Word32NotEqual(value_instance_type, Int32Constant(ODDBALL_TYPE)),
- &not_oddball);
+ GotoIfNot(InstanceTypeEqual(value_instance_type, ODDBALL_TYPE),
+ &not_oddball);
var_result.Bind(LoadObjectField(value, Oddball::kToStringOffset));
Goto(&end);
@@ -4888,8 +5361,9 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
return var_result.value();
}
-Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
- // Assert input is a HeapObject (not smi or heap number)
+Node* CodeStubAssembler::NonNumberToNumberOrNumeric(
+ Node* context, Node* input, Object::Conversion mode,
+ BigIntHandling bigint_handling) {
CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(input)));
CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
@@ -4907,10 +5381,11 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// Dispatch on the {input} instance type.
Node* input_instance_type = LoadInstanceType(input);
Label if_inputisstring(this), if_inputisoddball(this),
- if_inputisreceiver(this, Label::kDeferred),
+ if_inputisbigint(this), if_inputisreceiver(this, Label::kDeferred),
if_inputisother(this, Label::kDeferred);
GotoIf(IsStringInstanceType(input_instance_type), &if_inputisstring);
- GotoIf(Word32Equal(input_instance_type, Int32Constant(ODDBALL_TYPE)),
+ GotoIf(IsBigIntInstanceType(input_instance_type), &if_inputisbigint);
+ GotoIf(InstanceTypeEqual(input_instance_type, ODDBALL_TYPE),
&if_inputisoddball);
Branch(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver,
&if_inputisother);
@@ -4922,6 +5397,21 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
Goto(&end);
}
+ BIND(&if_inputisbigint);
+ if (mode == Object::Conversion::kToNumeric) {
+ var_result.Bind(input);
+ Goto(&end);
+ } else {
+ DCHECK_EQ(mode, Object::Conversion::kToNumber);
+ if (bigint_handling == BigIntHandling::kThrow) {
+ Goto(&if_inputisother);
+ } else {
+ DCHECK_EQ(bigint_handling, BigIntHandling::kConvertToNumber);
+ var_result.Bind(CallRuntime(Runtime::kBigIntToNumber, context, input));
+ Goto(&end);
+ }
+ }
+
BIND(&if_inputisoddball);
{
// The {input} is an Oddball, we just need to load the Number value of it.
@@ -4937,21 +5427,23 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
isolate(), ToPrimitiveHint::kNumber);
Node* result = CallStub(callable, context, input);
- // Check if the {result} is already a Number.
- Label if_resultisnumber(this), if_resultisnotnumber(this);
- GotoIf(TaggedIsSmi(result), &if_resultisnumber);
- Branch(IsHeapNumber(result), &if_resultisnumber, &if_resultisnotnumber);
+ // Check if the {result} is already a Number/Numeric.
+ Label if_done(this), if_notdone(this);
+ Branch(mode == Object::Conversion::kToNumber ? IsNumber(result)
+ : IsNumeric(result),
+ &if_done, &if_notdone);
- BIND(&if_resultisnumber);
+ BIND(&if_done);
{
- // The ToPrimitive conversion already gave us a Number, so we're done.
+ // The ToPrimitive conversion already gave us a Number/Numeric, so we're
+ // done.
var_result.Bind(result);
Goto(&end);
}
- BIND(&if_resultisnotnumber);
+ BIND(&if_notdone);
{
- // We now have a Primitive {result}, but it's not yet a Number.
+ // We now have a Primitive {result}, but it's not yet a Number/Numeric.
var_input.Bind(result);
Goto(&loop);
}
@@ -4965,47 +5457,133 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// trampolines also use this code currently, and they declare all
// outgoing parameters as untagged, while we would push a tagged
// object here.
- var_result.Bind(CallRuntime(Runtime::kToNumber, context, input));
+ auto function_id = mode == Object::Conversion::kToNumber
+ ? Runtime::kToNumber
+ : Runtime::kToNumeric;
+ var_result.Bind(CallRuntime(function_id, context, input));
Goto(&end);
}
}
BIND(&end);
- CSA_ASSERT(this, IsNumber(var_result.value()));
+ if (mode == Object::Conversion::kToNumeric) {
+ CSA_ASSERT(this, IsNumeric(var_result.value()));
+ } else {
+ DCHECK_EQ(mode, Object::Conversion::kToNumber);
+ CSA_ASSERT(this, IsNumber(var_result.value()));
+ }
return var_result.value();
}
-Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Number> CodeStubAssembler::NonNumberToNumber(
+ SloppyTNode<Context> context, SloppyTNode<HeapObject> input,
+ BigIntHandling bigint_handling) {
+ return CAST(NonNumberToNumberOrNumeric(
+ context, input, Object::Conversion::kToNumber, bigint_handling));
+}
+
+TNode<Numeric> CodeStubAssembler::NonNumberToNumeric(
+ SloppyTNode<Context> context, SloppyTNode<HeapObject> input) {
+ Node* result = NonNumberToNumberOrNumeric(context, input,
+ Object::Conversion::kToNumeric);
+ CSA_SLOW_ASSERT(this, IsNumeric(result));
+ return UncheckedCast<Numeric>(result);
+}
+
+TNode<Number> CodeStubAssembler::ToNumber(SloppyTNode<Context> context,
+ SloppyTNode<Object> input,
+ BigIntHandling bigint_handling) {
+ TVARIABLE(Number, var_result);
Label end(this);
Label not_smi(this, Label::kDeferred);
GotoIfNot(TaggedIsSmi(input), &not_smi);
- var_result.Bind(input);
+ TNode<Smi> input_smi = CAST(input);
+ var_result = input_smi;
Goto(&end);
BIND(&not_smi);
{
Label not_heap_number(this, Label::kDeferred);
- GotoIfNot(IsHeapNumber(input), &not_heap_number);
+ TNode<HeapObject> input_ho = CAST(input);
+ GotoIfNot(IsHeapNumber(input_ho), &not_heap_number);
- var_result.Bind(input);
+ TNode<HeapNumber> input_hn = CAST(input_ho);
+ var_result = input_hn;
Goto(&end);
BIND(&not_heap_number);
{
- var_result.Bind(NonNumberToNumber(context, input));
+ var_result = NonNumberToNumber(context, input_ho, bigint_handling);
Goto(&end);
}
}
BIND(&end);
- CSA_ASSERT(this, IsNumber(var_result.value()));
- return var_result.value();
+ return var_result;
+}
+
+void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
+ Variable* var_numeric) {
+ TaggedToNumeric<Feedback::kNone>(context, value, done, var_numeric);
+}
+
+void CodeStubAssembler::TaggedToNumericWithFeedback(Node* context, Node* value,
+ Label* done,
+ Variable* var_numeric,
+ Variable* var_feedback) {
+ TaggedToNumeric<Feedback::kCollect>(context, value, done, var_numeric,
+ var_feedback);
+}
+
+template <CodeStubAssembler::Feedback feedback>
+void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
+ Variable* var_numeric,
+ Variable* var_feedback) {
+ var_numeric->Bind(value);
+ Label if_smi(this), if_heapnumber(this), if_bigint(this), if_oddball(this);
+ GotoIf(TaggedIsSmi(value), &if_smi);
+ Node* map = LoadMap(value);
+ GotoIf(IsHeapNumberMap(map), &if_heapnumber);
+ Node* instance_type = LoadMapInstanceType(map);
+ GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
+
+ // {value} is not a Numeric yet.
+ GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
+ var_numeric->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, value));
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ }
+ Goto(done);
+
+ BIND(&if_smi);
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ }
+ Goto(done);
+
+ BIND(&if_heapnumber);
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ }
+ Goto(done);
+
+ BIND(&if_bigint);
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
+ }
+ Goto(done);
+
+ BIND(&if_oddball);
+ var_numeric->Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ if (feedback == Feedback::kCollect) {
+ var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ }
+ Goto(done);
}
// ES#sec-touint32
-TNode<Object> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
+TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
SloppyTNode<Object> input) {
Node* const float_zero = Float64Constant(0.0);
Node* const float_two_32 = Float64Constant(static_cast<double>(1ULL << 32));
@@ -5104,8 +5682,7 @@ TNode<Object> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
}
BIND(&out);
- CSA_ASSERT(this, IsNumber(var_result.value()));
- return UncheckedCast<Object>(var_result.value());
+ return CAST(var_result.value());
}
TNode<String> CodeStubAssembler::ToString(SloppyTNode<Context> context,
@@ -5130,8 +5707,7 @@ TNode<String> CodeStubAssembler::ToString(SloppyTNode<Context> context,
BIND(&not_heap_number);
{
- GotoIf(Word32NotEqual(input_instance_type, Int32Constant(ODDBALL_TYPE)),
- &runtime);
+ GotoIfNot(InstanceTypeEqual(input_instance_type, ODDBALL_TYPE), &runtime);
result.Bind(LoadObjectField(CAST(input), Oddball::kToStringOffset));
Goto(&done);
}
@@ -5146,8 +5722,8 @@ TNode<String> CodeStubAssembler::ToString(SloppyTNode<Context> context,
return CAST(result.value());
}
-Node* CodeStubAssembler::ToString_Inline(Node* const context,
- Node* const input) {
+TNode<String> CodeStubAssembler::ToString_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input) {
VARIABLE(var_result, MachineRepresentation::kTagged, input);
Label stub_call(this, Label::kDeferred), out(this);
@@ -5159,7 +5735,7 @@ Node* CodeStubAssembler::ToString_Inline(Node* const context,
Goto(&out);
BIND(&out);
- return var_result.value();
+ return CAST(var_result.value());
}
Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
@@ -5252,11 +5828,11 @@ Node* CodeStubAssembler::ToLength_Inline(Node* const context,
MachineRepresentation::kTagged);
}
-TNode<Object> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
+TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
SloppyTNode<Object> input,
ToIntegerTruncationMode mode) {
// We might need to loop once for ToNumber conversion.
- VARIABLE(var_arg, MachineRepresentation::kTagged, input);
+ TVARIABLE(Object, var_arg, input);
Label loop(this, &var_arg), out(this);
Goto(&loop);
BIND(&loop);
@@ -5265,7 +5841,7 @@ TNode<Object> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
Label return_zero(this, Label::kDeferred);
// Load the current {arg} value.
- Node* arg = var_arg.value();
+ TNode<Object> arg = var_arg;
// Check if {arg} is a Smi.
GotoIf(TaggedIsSmi(arg), &out);
@@ -5277,39 +5853,40 @@ TNode<Object> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
BIND(&if_argisheapnumber);
{
+ TNode<HeapNumber> arg_hn = CAST(arg);
// Load the floating-point value of {arg}.
- Node* arg_value = LoadHeapNumberValue(arg);
+ Node* arg_value = LoadHeapNumberValue(arg_hn);
// Check if {arg} is NaN.
GotoIfNot(Float64Equal(arg_value, arg_value), &return_zero);
// Truncate {arg} towards zero.
- Node* value = Float64Trunc(arg_value);
+ TNode<Float64T> value = Float64Trunc(arg_value);
if (mode == kTruncateMinusZero) {
// Truncate -0.0 to 0.
GotoIf(Float64Equal(value, Float64Constant(0.0)), &return_zero);
}
- var_arg.Bind(ChangeFloat64ToTagged(value));
+ var_arg = ChangeFloat64ToTagged(value);
Goto(&out);
}
BIND(&if_argisnotheapnumber);
{
// Need to convert {arg} to a Number first.
- var_arg.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, arg));
+ var_arg = UncheckedCast<Object>(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, arg));
Goto(&loop);
}
BIND(&return_zero);
- var_arg.Bind(SmiConstant(0));
+ var_arg = SmiConstant(0);
Goto(&out);
}
BIND(&out);
- CSA_SLOW_ASSERT(this, IsNumber(var_arg.value()));
- return UncheckedCast<Object>(var_arg.value());
+ return CAST(var_arg);
}
TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
@@ -5341,7 +5918,7 @@ void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
}
void CodeStubAssembler::IncrementCounter(StatsCounter* counter, int delta) {
- DCHECK(delta > 0);
+ DCHECK_GT(delta, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Node* counter_address = ExternalConstant(ExternalReference(counter));
Node* value = Load(MachineType::Int32(), counter_address);
@@ -5351,7 +5928,7 @@ void CodeStubAssembler::IncrementCounter(StatsCounter* counter, int delta) {
}
void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) {
- DCHECK(delta > 0);
+ DCHECK_GT(delta, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Node* counter_address = ExternalConstant(ExternalReference(counter));
Node* value = Load(MachineType::Int32(), counter_address);
@@ -5407,11 +5984,10 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
// then it must be an uncacheable index. Handle this case in the runtime.
GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout);
// Check if we have a ThinString.
- GotoIf(Word32Equal(key_instance_type, Int32Constant(THIN_STRING_TYPE)),
+ GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE),
+ &if_thinstring);
+ GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE),
&if_thinstring);
- GotoIf(
- Word32Equal(key_instance_type, Int32Constant(THIN_ONE_BYTE_STRING_TYPE)),
- &if_thinstring);
// Finally, check if |key| is internalized.
STATIC_ASSERT(kNotInternalizedTag != 0);
GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask),
@@ -5436,7 +6012,7 @@ void CodeStubAssembler::TryInternalizeString(
Node* string, Label* if_index, Variable* var_index, Label* if_internalized,
Variable* var_internalized, Label* if_not_internalized, Label* if_bailout) {
DCHECK(var_index->rep() == MachineType::PointerRepresentation());
- DCHECK(var_internalized->rep() == MachineRepresentation::kTagged);
+ DCHECK_EQ(var_internalized->rep(), MachineRepresentation::kTagged);
CSA_SLOW_ASSERT(this, IsString(string));
Node* function = ExternalConstant(
ExternalReference::try_internalize_string_function(isolate()));
@@ -5466,14 +6042,13 @@ Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
template Node* CodeStubAssembler::EntryToIndex<NameDictionary>(Node*, int);
template Node* CodeStubAssembler::EntryToIndex<GlobalDictionary>(Node*, int);
-template Node* CodeStubAssembler::EntryToIndex<SeededNumberDictionary>(Node*,
- int);
+template Node* CodeStubAssembler::EntryToIndex<NumberDictionary>(Node*, int);
// This must be kept in sync with HashTableBase::ComputeCapacity().
TNode<IntPtrT> CodeStubAssembler::HashTableComputeCapacity(
SloppyTNode<IntPtrT> at_least_space_for) {
- Node* capacity = IntPtrRoundUpToPowerOfTwo32(IntPtrAdd(
- at_least_space_for, WordShr(at_least_space_for, IntPtrConstant(1))));
+ Node* capacity = IntPtrRoundUpToPowerOfTwo32(
+ IntPtrAdd(at_least_space_for, WordShr(at_least_space_for, 1)));
return IntPtrMax(capacity, IntPtrConstant(HashTableBase::kMinCapacity));
}
@@ -5622,22 +6197,19 @@ Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
return Word32And(hash, Int32Constant(0x3fffffff));
}
-template <typename Dictionary>
void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Node* intptr_index,
Label* if_found,
Variable* var_entry,
Label* if_not_found) {
- CSA_ASSERT(this, IsDictionary(dictionary));
+ CSA_ASSERT(this, IsNumberDictionary(dictionary));
DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
Comment("NumberDictionaryLookup");
- Node* capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
+ Node* capacity = SmiUntag(GetCapacity<NumberDictionary>(dictionary));
Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
- Node* int32_seed = std::is_same<Dictionary, SeededNumberDictionary>::value
- ? HashSeed()
- : Int32Constant(kZeroHashSeed);
+ Node* int32_seed = HashSeed();
Node* hash = ChangeUint32ToWord(ComputeIntegerHash(intptr_index, int32_seed));
Node* key_as_float64 = RoundIntPtrToFloat64(intptr_index);
@@ -5657,7 +6229,7 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
{
Node* entry = var_entry->value();
- Node* index = EntryToIndex<Dictionary>(entry);
+ Node* index = EntryToIndex<NumberDictionary>(entry);
Node* current = LoadFixedArrayElement(dictionary, index);
GotoIf(WordEqual(current, undefined), if_not_found);
Label next_probe(this);
@@ -5732,10 +6304,7 @@ void CodeStubAssembler::InsertEntry<NameDictionary>(Node* dictionary,
// Private names must be marked non-enumerable.
Label not_private(this, &var_details);
- GotoIfNot(IsSymbolMap(LoadMap(name)), &not_private);
- Node* flags = SmiToWord32(CAST(LoadObjectField(name, Symbol::kFlagsOffset)));
- const int kPrivateMask = 1 << Symbol::kPrivateBit;
- GotoIfNot(IsSetWord32(flags, kPrivateMask), &not_private);
+ GotoIfNot(IsPrivateSymbol(name), &not_private);
Node* dont_enum =
SmiShl(SmiConstant(DONT_ENUM), PropertyDetails::AttributesField::kShift);
var_details.Bind(SmiOr(var_details.value(), dont_enum));
@@ -5989,7 +6558,7 @@ void CodeStubAssembler::TryLookupProperty(
BIND(&if_objectisspecial);
{
// Handle global object here and bailout for other special objects.
- GotoIfNot(Word32Equal(instance_type, Int32Constant(JS_GLOBAL_OBJECT_TYPE)),
+ GotoIfNot(InstanceTypeEqual(instance_type, JS_GLOBAL_OBJECT_TYPE),
if_bailout);
// Handle interceptors and access checks in runtime.
@@ -6067,19 +6636,19 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Node* representation =
DecodeWord32<PropertyDetails::RepresentationField>(details);
- Node* inobject_properties = LoadMapInobjectProperties(map);
+ field_index =
+ IntPtrAdd(field_index, LoadMapInobjectPropertiesStartInWords(map));
+ Node* instance_size_in_words = LoadMapInstanceSizeInWords(map);
Label if_inobject(this), if_backing_store(this);
VARIABLE(var_double_value, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
- Branch(UintPtrLessThan(field_index, inobject_properties), &if_inobject,
+ Branch(UintPtrLessThan(field_index, instance_size_in_words), &if_inobject,
&if_backing_store);
BIND(&if_inobject);
{
Comment("if_inobject");
- Node* field_offset = TimesPointerSize(
- IntPtrAdd(IntPtrSub(LoadMapInstanceSize(map), inobject_properties),
- field_index));
+ Node* field_offset = TimesPointerSize(field_index);
Label if_double(this), if_tagged(this);
Branch(Word32NotEqual(representation,
@@ -6106,7 +6675,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
{
Comment("if_backing_store");
Node* properties = LoadFastProperties(object);
- field_index = IntPtrSub(field_index, inobject_properties);
+ field_index = IntPtrSub(field_index, instance_size_in_words);
Node* value = LoadFixedArrayElement(properties, field_index);
Label if_double(this), if_tagged(this);
@@ -6205,8 +6774,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
Node* getter_map = LoadMap(getter);
Node* instance_type = LoadMapInstanceType(getter_map);
// FunctionTemplateInfo getters are not supported yet.
- GotoIf(Word32Equal(instance_type,
- Int32Constant(FUNCTION_TEMPLATE_INFO_TYPE)),
+ GotoIf(InstanceTypeEqual(instance_type, FUNCTION_TEMPLATE_INFO_TYPE),
if_bailout);
// Return undefined if the {getter} is not callable.
@@ -6255,9 +6823,17 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
GotoIfNot(IsPrototypeString(
LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
if_bailout);
- GotoIf(IsSetWord32(LoadMapBitField(receiver_map),
- 1 << Map::kHasNonInstancePrototype),
- if_bailout);
+
+ // if (!(has_prototype_slot() && !has_non_instance_prototype())) use
+ // generic property loading mechanism.
+ int has_prototype_slot_mask = 1 << Map::kHasPrototypeSlot;
+ int has_non_instance_prototype_mask = 1 << Map::kHasNonInstancePrototype;
+ GotoIfNot(
+ Word32Equal(Word32And(LoadMapBitField(receiver_map),
+ Int32Constant(has_prototype_slot_mask |
+ has_non_instance_prototype_mask)),
+ Int32Constant(has_prototype_slot_mask)),
+ if_bailout);
var_value.Bind(LoadJSFunctionPrototype(receiver, if_bailout));
Goto(&done);
}
@@ -6272,7 +6848,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
Node* receiver_value = LoadJSValueValue(receiver);
GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout);
GotoIfNot(IsString(receiver_value), if_bailout);
- var_value.Bind(LoadStringLength(receiver_value));
+ var_value.Bind(LoadStringLengthAsSmi(receiver_value));
Goto(&done);
}
}
@@ -6341,6 +6917,7 @@ void CodeStubAssembler::TryGetOwnProperty(
// Here we have details and value which could be an accessor.
BIND(&if_found);
{
+ // TODO(ishell): Execute C++ accessor in case of accessor info
if (var_raw_value) {
var_raw_value->Bind(var_value->value());
}
@@ -6445,25 +7022,25 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
VARIABLE(var_entry, MachineType::PointerRepresentation());
Node* elements = LoadElements(object);
- NumberDictionaryLookup<SeededNumberDictionary>(
- elements, intptr_index, if_found, &var_entry, if_not_found);
+ NumberDictionaryLookup(elements, intptr_index, if_found, &var_entry,
+ if_not_found);
}
BIND(&if_isfaststringwrapper);
{
CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
Node* string = LoadJSValueValue(object);
- CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
- Node* length = LoadStringLength(string);
- GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
+ CSA_ASSERT(this, IsString(string));
+ Node* length = LoadStringLengthAsWord(string);
+ GotoIf(UintPtrLessThan(intptr_index, length), if_found);
Goto(&if_isobjectorsmi);
}
BIND(&if_isslowstringwrapper);
{
CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
Node* string = LoadJSValueValue(object);
- CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
- Node* length = LoadStringLength(string);
- GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
+ CSA_ASSERT(this, IsString(string));
+ Node* length = LoadStringLengthAsWord(string);
+ GotoIf(UintPtrLessThan(intptr_index, length), if_found);
Goto(&if_isdictionary);
}
BIND(&if_typedarray);
@@ -6484,12 +7061,6 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
}
-// Instantiate template methods to workaround GCC compilation issue.
-template void CodeStubAssembler::NumberDictionaryLookup<SeededNumberDictionary>(
- Node*, Node*, Label*, Variable*, Label*);
-template void CodeStubAssembler::NumberDictionaryLookup<
- UnseededNumberDictionary>(Node*, Node*, Label*, Variable*, Label*);
-
void CodeStubAssembler::TryPrototypeChainLookup(
Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder,
const LookupInHolder& lookup_element_in_holder, Label* if_end,
@@ -6505,14 +7076,12 @@ void CodeStubAssembler::TryPrototypeChainLookup(
Label if_objectisreceiver(this);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
STATIC_ASSERT(FIRST_JS_RECEIVER_TYPE == JS_PROXY_TYPE);
- Branch(Int32GreaterThanOrEqual(instance_type,
- Int32Constant(FIRST_JS_RECEIVER_TYPE)),
- &if_objectisreceiver, if_bailout);
+ Branch(IsJSReceiverInstanceType(instance_type), &if_objectisreceiver,
+ if_bailout);
BIND(&if_objectisreceiver);
if (if_proxy) {
- GotoIf(Word32Equal(instance_type, Int32Constant(JS_PROXY_TYPE)),
- if_proxy);
+ GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy);
}
}
@@ -6546,15 +7115,12 @@ void CodeStubAssembler::TryPrototypeChainLookup(
BIND(&next_proto);
// Bailout if it can be an integer indexed exotic case.
- GotoIf(
- Word32Equal(holder_instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
- if_bailout);
+ GotoIf(InstanceTypeEqual(holder_instance_type, JS_TYPED_ARRAY_TYPE),
+ if_bailout);
Node* proto = LoadMapPrototype(holder_map);
- Label if_not_null(this);
- Branch(WordEqual(proto, NullConstant()), if_end, &if_not_null);
- BIND(&if_not_null);
+ GotoIf(IsNull(proto), if_end);
Node* map = LoadMap(proto);
Node* instance_type = LoadMapInstanceType(map);
@@ -6587,9 +7153,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
Node* proto = LoadMapPrototype(var_holder_map.value());
- Label if_not_null(this);
- Branch(WordEqual(proto, NullConstant()), if_end, &if_not_null);
- BIND(&if_not_null);
+ GotoIf(IsNull(proto), if_end);
Node* map = LoadMap(proto);
Node* instance_type = LoadMapInstanceType(map);
@@ -6682,9 +7246,8 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
// Goto runtime if {callable} is not a JSFunction.
Node* callable_instance_type = LoadMapInstanceType(callable_map);
- GotoIfNot(
- Word32Equal(callable_instance_type, Int32Constant(JS_FUNCTION_TYPE)),
- &return_runtime);
+ GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE),
+ &return_runtime);
// Goto runtime if {callable} is not a constructor or has
// a non-instance "prototype".
@@ -6710,9 +7273,8 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
// created so far and hence we should return false.
Node* callable_prototype_instance_type =
LoadInstanceType(callable_prototype);
- GotoIfNot(
- Word32Equal(callable_prototype_instance_type, Int32Constant(MAP_TYPE)),
- &callable_prototype_valid);
+ GotoIfNot(InstanceTypeEqual(callable_prototype_instance_type, MAP_TYPE),
+ &callable_prototype_valid);
var_callable_prototype.Bind(
LoadObjectField(callable_prototype, Map::kPrototypeOffset));
Goto(&callable_prototype_valid);
@@ -6792,15 +7354,29 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
{
StoreFeedbackVectorSlot(feedback_vector, slot_id, combined_feedback,
SKIP_WRITE_BARRIER);
- // Reset profiler ticks.
- StoreObjectFieldNoWriteBarrier(
- feedback_vector, FeedbackVector::kProfilerTicksOffset, SmiConstant(0));
+ ReportFeedbackUpdate(feedback_vector, slot_id, "UpdateFeedback");
Goto(&end);
}
BIND(&end);
}
+void CodeStubAssembler::ReportFeedbackUpdate(
+ SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot_id,
+ const char* reason) {
+ // Reset profiler ticks.
+ StoreObjectFieldNoWriteBarrier(
+ feedback_vector, FeedbackVector::kProfilerTicksOffset, Int32Constant(0),
+ MachineRepresentation::kWord32);
+
+#ifdef V8_TRACE_FEEDBACK_UPDATES
+ // Trace the update.
+ CallRuntime(Runtime::kInterpreterTraceUpdateFeedback, NoContextConstant(),
+ LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset),
+ SmiTag(slot_id), StringConstant(reason));
+#endif // V8_TRACE_FEEDBACK_UPDATES
+}
+
void CodeStubAssembler::CombineFeedback(Variable* existing_feedback,
Node* feedback) {
existing_feedback->Bind(SmiOr(existing_feedback->value(), feedback));
@@ -7177,7 +7753,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
DCHECK(IsSmiOrObjectElementsKind(elements_kind) ||
IsDoubleElementsKind(elements_kind));
- Node* length = is_jsarray ? LoadObjectField(object, JSArray::kLengthOffset)
+ Node* length = is_jsarray ? LoadJSArrayLength(object)
: LoadFixedArrayBaseLength(elements);
length = TaggedToParameter(length, parameter_mode);
@@ -7439,83 +8015,10 @@ Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
// Store the WeakCell in the feedback vector.
StoreFeedbackVectorSlot(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER, 0,
- CodeStubAssembler::SMI_PARAMETERS);
+ CodeStubAssembler::INTPTR_PARAMETERS);
return cell;
}
-void CodeStubAssembler::HandleSlackTracking(Node* context, Node* object,
- Node* initial_map,
- int start_offset) {
- Node* instance_size_words = ChangeUint32ToWord(LoadObjectField(
- initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
- Node* instance_size = TimesPointerSize(instance_size_words);
-
- // Perform in-object slack tracking if requested.
- Node* bit_field3 = LoadMapBitField3(initial_map);
- Label end(this), slack_tracking(this), finalize(this, Label::kDeferred);
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
-
- // Initialize remaining fields.
- {
- Comment("no slack tracking");
- InitializeFieldsWithRoot(object, IntPtrConstant(start_offset),
- instance_size, Heap::kUndefinedValueRootIndex);
- Goto(&end);
- }
-
- {
- BIND(&slack_tracking);
-
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- Comment("update allocation count");
- Node* new_bit_field3 = Int32Sub(
- bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
- StoreObjectFieldNoWriteBarrier(initial_map, Map::kBitField3Offset,
- new_bit_field3,
- MachineRepresentation::kWord32);
- GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &finalize);
-
- Node* unused_fields = LoadObjectField(
- initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
- Node* used_size = IntPtrSub(
- instance_size, TimesPointerSize(ChangeUint32ToWord(unused_fields)));
-
- Comment("initialize filler fields (no finalize)");
- InitializeFieldsWithRoot(object, used_size, instance_size,
- Heap::kOnePointerFillerMapRootIndex);
-
- Comment("initialize undefined fields (no finalize)");
- InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
- Heap::kUndefinedValueRootIndex);
- Goto(&end);
- }
-
- {
- // Finalize the instance size.
- BIND(&finalize);
-
- Node* unused_fields = LoadObjectField(
- initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
- Node* used_size = IntPtrSub(
- instance_size, TimesPointerSize(ChangeUint32ToWord(unused_fields)));
-
- Comment("initialize filler fields (finalize)");
- InitializeFieldsWithRoot(object, used_size, instance_size,
- Heap::kOnePointerFillerMapRootIndex);
-
- Comment("initialize undefined fields (finalize)");
- InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
- Heap::kUndefinedValueRootIndex);
-
- CallRuntime(Runtime::kFinalizeInstanceSize, context, initial_map);
- Goto(&end);
- }
-
- BIND(&end);
-}
-
Node* CodeStubAssembler::BuildFastLoop(
const CodeStubAssembler::VariableList& vars, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
@@ -7569,7 +8072,7 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
bool constent_last = ToInt32Constant(last_element_exclusive, last_val);
if (constant_first && constent_last) {
int delta = last_val - first_val;
- DCHECK(delta >= 0);
+ DCHECK_GE(delta, 0);
if (delta <= kElementLoopUnrollThreshold) {
if (direction == ForEachDirection::kForward) {
for (int i = first_val; i < last_val; ++i) {
@@ -7612,11 +8115,7 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
Node* element_count, Label* doesnt_fit, int base_size, ParameterMode mode) {
- int max_newspace_parameters =
- (kMaxRegularHeapObjectSize - base_size) / kPointerSize;
- GotoIf(IntPtrOrSmiGreaterThan(
- element_count, IntPtrOrSmiConstant(max_newspace_parameters, mode),
- mode),
+ GotoIf(FixedArraySizeDoesntFitInNewSpace(element_count, base_size, mode),
doesnt_fit);
}
@@ -7637,8 +8136,7 @@ void CodeStubAssembler::InitializeFieldsWithRoot(
}
void CodeStubAssembler::BranchIfNumericRelationalComparison(
- RelationalComparisonMode mode, Node* lhs, Node* rhs, Label* if_true,
- Label* if_false) {
+ Operation op, Node* lhs, Node* rhs, Label* if_true, Label* if_false) {
CSA_SLOW_ASSERT(this, IsNumber(lhs));
CSA_SLOW_ASSERT(this, IsNumber(rhs));
@@ -7663,19 +8161,21 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
BIND(&if_rhsissmi);
{
// Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
- switch (mode) {
- case kLessThan:
+ switch (op) {
+ case Operation::kLessThan:
BranchIfSmiLessThan(lhs, rhs, if_true, if_false);
break;
- case kLessThanOrEqual:
+ case Operation::kLessThanOrEqual:
BranchIfSmiLessThanOrEqual(lhs, rhs, if_true, if_false);
break;
- case kGreaterThan:
+ case Operation::kGreaterThan:
BranchIfSmiLessThan(rhs, lhs, if_true, if_false);
break;
- case kGreaterThanOrEqual:
+ case Operation::kGreaterThanOrEqual:
BranchIfSmiLessThanOrEqual(rhs, lhs, if_true, if_false);
break;
+ default:
+ UNREACHABLE();
}
}
@@ -7726,34 +8226,53 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
Node* rhs = var_fcmp_rhs.value();
// Perform a fast floating point comparison.
- switch (mode) {
- case kLessThan:
+ switch (op) {
+ case Operation::kLessThan:
Branch(Float64LessThan(lhs, rhs), if_true, if_false);
break;
- case kLessThanOrEqual:
+ case Operation::kLessThanOrEqual:
Branch(Float64LessThanOrEqual(lhs, rhs), if_true, if_false);
break;
- case kGreaterThan:
+ case Operation::kGreaterThan:
Branch(Float64GreaterThan(lhs, rhs), if_true, if_false);
break;
- case kGreaterThanOrEqual:
+ case Operation::kGreaterThanOrEqual:
Branch(Float64GreaterThanOrEqual(lhs, rhs), if_true, if_false);
break;
+ default:
+ UNREACHABLE();
}
}
}
-void CodeStubAssembler::GotoIfNumberGreaterThanOrEqual(Node* lhs, Node* rhs,
- Label* if_true) {
+void CodeStubAssembler::GotoIfNumericGreaterThanOrEqual(Node* lhs, Node* rhs,
+ Label* if_true) {
Label if_false(this);
- BranchIfNumericRelationalComparison(kGreaterThanOrEqual, lhs, rhs, if_true,
- &if_false);
+ BranchIfNumericRelationalComparison(Operation::kGreaterThanOrEqual, lhs, rhs,
+ if_true, &if_false);
BIND(&if_false);
}
-Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
- Node* lhs, Node* rhs,
- Node* context,
+namespace {
+Operation Reverse(Operation op) {
+ switch (op) {
+ case Operation::kLessThan:
+ return Operation::kGreaterThan;
+ case Operation::kLessThanOrEqual:
+ return Operation::kGreaterThanOrEqual;
+ case Operation::kGreaterThan:
+ return Operation::kLessThan;
+ case Operation::kGreaterThanOrEqual:
+ return Operation::kLessThanOrEqual;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+} // anonymous namespace
+
+Node* CodeStubAssembler::RelationalComparison(Operation op, Node* lhs,
+ Node* rhs, Node* context,
Variable* var_type_feedback) {
Label return_true(this), return_false(this), end(this);
VARIABLE(result, MachineRepresentation::kTagged);
@@ -7763,7 +8282,7 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
VARIABLE(var_fcmp_lhs, MachineRepresentation::kFloat64);
VARIABLE(var_fcmp_rhs, MachineRepresentation::kFloat64);
- // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // We might need to loop several times due to ToPrimitive and/or ToNumeric
// conversions.
VARIABLE(var_lhs, MachineRepresentation::kTagged, lhs);
VARIABLE(var_rhs, MachineRepresentation::kTagged, rhs);
@@ -7788,9 +8307,15 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_lhsissmi);
{
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisheapnumber(this),
+ if_rhsisbigint(this, Label::kDeferred),
+ if_rhsisnotnumeric(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(rhs), &if_rhsissmi);
+ Node* rhs_map = LoadMap(rhs);
+ GotoIf(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
+ &if_rhsisnotnumeric);
BIND(&if_rhsissmi);
{
@@ -7799,61 +8324,66 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
CombineFeedback(var_type_feedback,
SmiConstant(CompareOperationFeedback::kSignedSmall));
}
- switch (mode) {
- case kLessThan:
+ switch (op) {
+ case Operation::kLessThan:
BranchIfSmiLessThan(lhs, rhs, &return_true, &return_false);
break;
- case kLessThanOrEqual:
+ case Operation::kLessThanOrEqual:
BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true, &return_false);
break;
- case kGreaterThan:
+ case Operation::kGreaterThan:
BranchIfSmiLessThan(rhs, lhs, &return_true, &return_false);
break;
- case kGreaterThanOrEqual:
+ case Operation::kGreaterThanOrEqual:
BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true, &return_false);
break;
+ default:
+ UNREACHABLE();
}
}
- BIND(&if_rhsisnotsmi);
+ BIND(&if_rhsisheapnumber);
{
- // Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumber(rhs), &if_rhsisnumber, &if_rhsisnotnumber);
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
+ var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
- BIND(&if_rhsisnumber);
- {
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- var_fcmp_lhs.Bind(SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ BIND(&if_rhsisbigint);
+ {
+ // The {lhs} is a Smi and {rhs} is a BigInt.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
}
+ result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(), SmiConstant(Reverse(op)),
+ rhs, lhs));
+ Goto(&end);
+ }
- BIND(&if_rhsisnotnumber);
- {
- // The {rhs} is not a HeapNumber and {lhs} is an Smi.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- // Convert the {rhs} to a Number; we don't need to perform the
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumber(rhs) will by itself already invoke ToPrimitive with
- // a Number hint.
- var_rhs.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, rhs));
- Goto(&loop);
+ BIND(&if_rhsisnotnumeric);
+ {
+ // The {lhs} is a Smi and {rhs} is not a Numeric.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
}
+ // Convert the {rhs} to a Numeric; we don't need to perform the
+ // dedicated ToPrimitive(rhs, hint Number) operation, as the
+ // ToNumeric(rhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ var_rhs.Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
+ Goto(&loop);
}
}
BIND(&if_lhsisnotsmi);
{
- // Load the map of {lhs}.
Node* lhs_map = LoadMap(lhs);
// Check if {rhs} is a Smi or a HeapObject.
@@ -7862,11 +8392,14 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_rhsissmi);
{
- // Check if the {lhs} is a HeapNumber.
- Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
-
- BIND(&if_lhsisnumber);
+ Label if_lhsisheapnumber(this), if_lhsisbigint(this, Label::kDeferred),
+ if_lhsisnotnumeric(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
+ Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+ Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
+ &if_lhsisnotnumeric);
+
+ BIND(&if_lhsisheapnumber);
{
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
@@ -7879,18 +8412,31 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
Goto(&do_fcmp);
}
- BIND(&if_lhsisnotnumber);
+ BIND(&if_lhsisbigint);
{
- // The {lhs} is not a HeapNumber and {rhs} is an Smi.
if (var_type_feedback != nullptr) {
var_type_feedback->Bind(
SmiConstant(CompareOperationFeedback::kAny));
}
- // Convert the {lhs} to a Number; we don't need to perform the
+ result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(), SmiConstant(op), lhs,
+ rhs));
+ Goto(&end);
+ }
+
+ BIND(&if_lhsisnotnumeric);
+ {
+ // The {lhs} is not a Numeric and {rhs} is an Smi.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+ // Convert the {lhs} to a Numeric; we don't need to perform the
// dedicated ToPrimitive(lhs, hint Number) operation, as the
- // ToNumber(lhs) will by itself already invoke ToPrimitive with
+ // ToNumeric(lhs) will by itself already invoke ToPrimitive with
// a Number hint.
- var_lhs.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, lhs));
+ var_lhs.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
Goto(&loop);
}
}
@@ -7900,18 +8446,27 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
// Load the map of {rhs}.
Node* rhs_map = LoadMap(rhs);
- // Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(this), if_lhsisnotnumber(this);
- Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
+ // Further analyze {lhs}.
+ Label if_lhsisheapnumber(this), if_lhsisbigint(this, Label::kDeferred),
+ if_lhsisstring(this), if_lhsisother(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
+ Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+ GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint);
+ Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ &if_lhsisother);
- BIND(&if_lhsisnumber);
+ BIND(&if_lhsisheapnumber);
{
- // Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(WordEqual(lhs_map, rhs_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
+ // Further inspect {rhs}.
+ Label if_rhsisheapnumber(this),
+ if_rhsisbigint(this, Label::kDeferred),
+ if_rhsisnotnumeric(this, Label::kDeferred);
+ GotoIf(WordEqual(rhs_map, lhs_map), &if_rhsisheapnumber);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
+ &if_rhsisnotnumeric);
- BIND(&if_rhsisnumber);
+ BIND(&if_rhsisheapnumber);
{
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
@@ -7924,175 +8479,218 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
Goto(&do_fcmp);
}
- BIND(&if_rhsisnotnumber);
+ BIND(&if_rhsisbigint);
+ {
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+ result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(),
+ SmiConstant(Reverse(op)), rhs, lhs));
+ Goto(&end);
+ }
+
+ BIND(&if_rhsisnotnumeric);
{
- // The {rhs} is not a HeapNumber and {lhs} is a HeapNumber.
+ // The {lhs} is a HeapNumber and {rhs} is not a Numeric.
if (var_type_feedback != nullptr) {
var_type_feedback->Bind(
SmiConstant(CompareOperationFeedback::kAny));
}
- // Convert the {rhs} to a Number; we don't need to perform
+ // Convert the {rhs} to a Numeric; we don't need to perform
// dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumber(rhs) will by itself already invoke ToPrimitive with
+ // ToNumeric(rhs) will by itself already invoke ToPrimitive with
// a Number hint.
var_rhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumber, context, rhs));
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
Goto(&loop);
}
}
- BIND(&if_lhsisnotnumber);
+ BIND(&if_lhsisbigint);
{
- // Load the instance type of {lhs}.
- Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
- // Check if {lhs} is a String.
- Label if_lhsisstring(this), if_lhsisnotstring(this, Label::kDeferred);
- Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
- &if_lhsisnotstring);
+ Label if_rhsisheapnumber(this), if_rhsisbigint(this),
+ if_rhsisnotnumeric(this);
+ GotoIf(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
+ &if_rhsisnotnumeric);
- BIND(&if_lhsisstring);
+ BIND(&if_rhsisheapnumber);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(), SmiConstant(op), lhs,
+ rhs));
+ Goto(&end);
+ }
- // Check if {rhs} is also a String.
- Label if_rhsisstring(this, Label::kDeferred),
- if_rhsisnotstring(this, Label::kDeferred);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
+ BIND(&if_rhsisbigint);
+ {
+ result.Bind(CallRuntime(Runtime::kBigIntCompareToBigInt,
+ NoContextConstant(), SmiConstant(op), lhs,
+ rhs));
+ Goto(&end);
+ }
- BIND(&if_rhsisstring);
- {
- // Both {lhs} and {rhs} are strings.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kString));
- }
- switch (mode) {
- case kLessThan:
- result.Bind(CallBuiltin(Builtins::kStringLessThan, context,
- lhs, rhs));
- Goto(&end);
- break;
- case kLessThanOrEqual:
- result.Bind(CallBuiltin(Builtins::kStringLessThanOrEqual,
- context, lhs, rhs));
- Goto(&end);
- break;
- case kGreaterThan:
- result.Bind(CallBuiltin(Builtins::kStringGreaterThan, context,
- lhs, rhs));
- Goto(&end);
- break;
- case kGreaterThanOrEqual:
- result.Bind(CallBuiltin(Builtins::kStringGreaterThanOrEqual,
- context, lhs, rhs));
- Goto(&end);
- break;
- }
- }
+ BIND(&if_rhsisnotnumeric);
+ {
+ // Convert the {rhs} to a Numeric; we don't need to perform
+ // dedicated ToPrimitive(rhs, hint Number) operation, as the
+ // ToNumeric(rhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ var_rhs.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
+ Goto(&loop);
+ }
+ }
- BIND(&if_rhsisnotstring);
- {
- // The {lhs} is a String and {rhs} is not a String.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- // The {lhs} is a String, while {rhs} is neither a Number nor a
- // String, so we need to call ToPrimitive(rhs, hint Number) if
- // {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
- // other cases.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing Number hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- isolate(), ToPrimitiveHint::kNumber);
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
+ BIND(&if_lhsisstring);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- BIND(&if_rhsisnotreceiver);
- {
- // Convert both {lhs} and {rhs} to Number.
- var_lhs.Bind(CallBuiltin(Builtins::kToNumber, context, lhs));
- var_rhs.Bind(CallBuiltin(Builtins::kToNumber, context, rhs));
- Goto(&loop);
- }
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this, Label::kDeferred);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ BIND(&if_rhsisstring);
+ {
+ // Both {lhs} and {rhs} are strings.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kString));
+ }
+ switch (op) {
+ case Operation::kLessThan:
+ result.Bind(
+ CallBuiltin(Builtins::kStringLessThan, context, lhs, rhs));
+ Goto(&end);
+ break;
+ case Operation::kLessThanOrEqual:
+ result.Bind(CallBuiltin(Builtins::kStringLessThanOrEqual,
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ case Operation::kGreaterThan:
+ result.Bind(CallBuiltin(Builtins::kStringGreaterThan, context,
+ lhs, rhs));
+ Goto(&end);
+ break;
+ case Operation::kGreaterThanOrEqual:
+ result.Bind(CallBuiltin(Builtins::kStringGreaterThanOrEqual,
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ default:
+ UNREACHABLE();
}
}
- BIND(&if_lhsisnotstring);
+ BIND(&if_rhsisnotstring);
{
+ // The {lhs} is a String and {rhs} is not a String.
if (var_type_feedback != nullptr) {
- // The {lhs} is not an Smi, HeapNumber or String and {rhs} is not
- // an Smi: collect NumberOrOddball feedback if {lhs} is an Oddball
- // and {rhs} is either a HeapNumber or Oddball.
- Label collect_any_feedback(this), collect_oddball_feedback(this),
- collect_feedback_done(this);
- GotoIfNot(
- Word32Equal(lhs_instance_type, Int32Constant(ODDBALL_TYPE)),
- &collect_any_feedback);
-
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- GotoIf(Word32Equal(rhs_instance_type,
- Int32Constant(HEAP_NUMBER_TYPE)),
- &collect_oddball_feedback);
- Branch(
- Word32Equal(rhs_instance_type, Int32Constant(ODDBALL_TYPE)),
- &collect_oddball_feedback, &collect_any_feedback);
-
- BIND(&collect_oddball_feedback);
- {
- CombineFeedback(
- var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumberOrOddball));
- Goto(&collect_feedback_done);
- }
-
- BIND(&collect_any_feedback);
- {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- Goto(&collect_feedback_done);
- }
-
- BIND(&collect_feedback_done);
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
}
- // The {lhs} is neither a Number nor a String, so we need to call
- // ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
- // ToNumber(lhs) and ToNumber(rhs) in the other cases.
+ // The {lhs} is a String, while {rhs} isn't. So we call
+ // ToPrimitive(rhs, hint Number) if {rhs} is a receiver, or
+ // ToNumeric(lhs) and then ToNumeric(rhs) in the other cases.
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_lhsisreceiver(this, Label::kDeferred),
- if_lhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
- BIND(&if_lhsisreceiver);
+ BIND(&if_rhsisreceiver);
{
- // Convert {lhs} to a primitive first passing Number hint.
+ // Convert {rhs} to a primitive first passing Number hint.
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
isolate(), ToPrimitiveHint::kNumber);
- var_lhs.Bind(CallStub(callable, context, lhs));
+ var_rhs.Bind(CallStub(callable, context, rhs));
Goto(&loop);
}
- BIND(&if_lhsisnotreceiver);
+ BIND(&if_rhsisnotreceiver);
{
- // Convert both {lhs} and {rhs} to Number.
- var_lhs.Bind(CallBuiltin(Builtins::kToNumber, context, lhs));
- var_rhs.Bind(CallBuiltin(Builtins::kToNumber, context, rhs));
+ // Convert both {lhs} and {rhs} to Numeric.
+ var_lhs.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
+ var_rhs.Bind(CallBuiltin(Builtins::kToNumeric, context, rhs));
Goto(&loop);
}
}
}
+
+ BIND(&if_lhsisother);
+ {
+ // The {lhs} is neither a Numeric nor a String, and {rhs} is not
+ // an Smi.
+ if (var_type_feedback != nullptr) {
+ // Collect NumberOrOddball feedback if {lhs} is an Oddball
+ // and {rhs} is either a HeapNumber or Oddball. Otherwise collect
+ // Any feedback.
+ Label collect_any_feedback(this), collect_oddball_feedback(this),
+ collect_feedback_done(this);
+ GotoIfNot(InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE),
+ &collect_any_feedback);
+
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ GotoIf(InstanceTypeEqual(rhs_instance_type, HEAP_NUMBER_TYPE),
+ &collect_oddball_feedback);
+ Branch(InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE),
+ &collect_oddball_feedback, &collect_any_feedback);
+
+ BIND(&collect_oddball_feedback);
+ {
+ CombineFeedback(
+ var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumberOrOddball));
+ Goto(&collect_feedback_done);
+ }
+
+ BIND(&collect_any_feedback);
+ {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ Goto(&collect_feedback_done);
+ }
+
+ BIND(&collect_feedback_done);
+ }
+
+ // If {lhs} is a receiver, we must call ToPrimitive(lhs, hint Number).
+ // Otherwise we must call ToNumeric(lhs) and then ToNumeric(rhs).
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_lhsisreceiver(this, Label::kDeferred),
+ if_lhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(lhs_instance_type), &if_lhsisreceiver,
+ &if_lhsisnotreceiver);
+
+ BIND(&if_lhsisreceiver);
+ {
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ isolate(), ToPrimitiveHint::kNumber);
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
+ }
+
+ BIND(&if_lhsisnotreceiver);
+ {
+ var_lhs.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
+ var_rhs.Bind(CallBuiltin(Builtins::kToNumeric, context, rhs));
+ Goto(&loop);
+ }
+ }
}
}
}
@@ -8104,32 +8702,34 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
Node* rhs = var_fcmp_rhs.value();
// Perform a fast floating point comparison.
- switch (mode) {
- case kLessThan:
+ switch (op) {
+ case Operation::kLessThan:
Branch(Float64LessThan(lhs, rhs), &return_true, &return_false);
break;
- case kLessThanOrEqual:
+ case Operation::kLessThanOrEqual:
Branch(Float64LessThanOrEqual(lhs, rhs), &return_true, &return_false);
break;
- case kGreaterThan:
+ case Operation::kGreaterThan:
Branch(Float64GreaterThan(lhs, rhs), &return_true, &return_false);
break;
- case kGreaterThanOrEqual:
+ case Operation::kGreaterThanOrEqual:
Branch(Float64GreaterThanOrEqual(lhs, rhs), &return_true,
&return_false);
break;
+ default:
+ UNREACHABLE();
}
}
BIND(&return_true);
{
- result.Bind(BooleanConstant(true));
+ result.Bind(TrueConstant());
Goto(&end);
}
BIND(&return_false);
{
- result.Bind(BooleanConstant(false));
+ result.Bind(FalseConstant());
Goto(&end);
}
@@ -8154,107 +8754,98 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
// for NaN values because they are not considered equal, even if both the
// left and the right hand side reference exactly the same value.
- // Check if {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- BIND(&if_valueisnotsmi);
- {
- // Load the map of {value}.
- Node* value_map = LoadMap(value);
+ Label if_smi(this), if_heapnumber(this);
+ GotoIf(TaggedIsSmi(value), &if_smi);
- // Check if {value} (and therefore {rhs}) is a HeapNumber.
- Label if_valueisnumber(this), if_valueisnotnumber(this);
- Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valueisnotnumber);
+ Node* value_map = LoadMap(value);
+ GotoIf(IsHeapNumberMap(value_map), &if_heapnumber);
- BIND(&if_valueisnumber);
- {
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ // For non-HeapNumbers, all we do is collect type feedback.
+ if (var_type_feedback != nullptr) {
+ Node* instance_type = LoadMapInstanceType(value_map);
- // Convert {value} (and therefore {rhs}) to floating point value.
- Node* value_value = LoadHeapNumberValue(value);
+ Label if_string(this), if_receiver(this), if_symbol(this),
+ if_other(this, Label::kDeferred);
+ GotoIf(IsStringInstanceType(instance_type), &if_string);
+ GotoIf(IsJSReceiverInstanceType(instance_type), &if_receiver);
+ Branch(IsSymbolInstanceType(instance_type), &if_symbol, &if_other);
- // Check if the HeapNumber value is a NaN.
- BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
+ BIND(&if_string);
+ {
+ CombineFeedback(var_type_feedback,
+ CollectFeedbackForString(instance_type));
+ Goto(if_equal);
}
- BIND(&if_valueisnotnumber);
- if (var_type_feedback != nullptr) {
- // Collect type feedback.
- Node* instance_type = LoadMapInstanceType(value_map);
-
- Label if_valueisstring(this), if_valueisreceiver(this),
- if_valueissymbol(this), if_valueisother(this, Label::kDeferred);
- GotoIf(IsStringInstanceType(instance_type), &if_valueisstring);
- GotoIf(IsJSReceiverInstanceType(instance_type), &if_valueisreceiver);
- Branch(IsSymbolInstanceType(instance_type), &if_valueissymbol,
- &if_valueisother);
-
- BIND(&if_valueisstring);
- {
- CombineFeedback(var_type_feedback,
- CollectFeedbackForString(instance_type));
- Goto(if_equal);
- }
+ BIND(&if_symbol);
+ {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kSymbol));
+ Goto(if_equal);
+ }
- BIND(&if_valueissymbol);
- {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSymbol));
- Goto(if_equal);
- }
+ BIND(&if_receiver);
+ {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kReceiver));
+ Goto(if_equal);
+ }
- BIND(&if_valueisreceiver);
- {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kReceiver));
- Goto(if_equal);
- }
+ // TODO(neis): Introduce BigInt CompareOperationFeedback and collect here
+ // and elsewhere?
- BIND(&if_valueisother);
- {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kAny));
- Goto(if_equal);
- }
- } else {
+ BIND(&if_other);
+ {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kAny));
Goto(if_equal);
}
+ } else {
+ Goto(if_equal);
}
- BIND(&if_valueissmi);
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSignedSmall));
+ BIND(&if_heapnumber);
+ {
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
+ Node* number_value = LoadHeapNumberValue(value);
+ BranchIfFloat64IsNaN(number_value, if_notequal, if_equal);
+ }
+
+ BIND(&if_smi);
+ {
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kSignedSmall));
+ }
+ Goto(if_equal);
}
- Goto(if_equal);
}
// ES6 section 7.2.12 Abstract Equality Comparison
-Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
+Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
Variable* var_type_feedback) {
- // This is a slightly optimized version of Object::Equals represented as
- // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
+ // This is a slightly optimized version of Object::Equals. Whenever you
// change something functionality wise in here, remember to update the
// Object::Equals method as well.
- Label if_equal(this), if_notequal(this),
- do_rhsstringtonumber(this, Label::kDeferred), end(this);
+ Label if_equal(this), if_notequal(this), do_float_comparison(this),
+ do_right_stringtonumber(this, Label::kDeferred), end(this);
VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Float64T, var_left_float);
+ TVARIABLE(Float64T, var_right_float);
- // Shared entry for floating point comparison.
- Label do_fcmp(this);
- VARIABLE(var_fcmp_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fcmp_rhs, MachineRepresentation::kFloat64);
+ // We can avoid code duplication by exploiting the fact that abstract equality
+ // is symmetric.
+ Label use_symmetry(this);
// We might need to loop several times due to ToPrimitive and/or ToNumber
// conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged, lhs);
- VARIABLE(var_rhs, MachineRepresentation::kTagged, rhs);
- VariableList loop_variable_list({&var_lhs, &var_rhs}, zone());
+ VARIABLE(var_left, MachineRepresentation::kTagged, left);
+ VARIABLE(var_right, MachineRepresentation::kTagged, right);
+ VariableList loop_variable_list({&var_left, &var_right}, zone());
if (var_type_feedback != nullptr) {
// Initialize the type feedback to None. The current feedback is combined
// with the previous feedback.
@@ -8265,472 +8856,303 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
Goto(&loop);
BIND(&loop);
{
- // Load the current {lhs} and {rhs} values.
- lhs = var_lhs.value();
- rhs = var_rhs.value();
+ left = var_left.value();
+ right = var_right.value();
- // Check if {lhs} and {rhs} refer to the same object.
- Label if_same(this), if_notsame(this);
- Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
-
- BIND(&if_same);
+ Label if_notsame(this);
+ GotoIf(WordNotEqual(left, right), &if_notsame);
{
- // The {lhs} and {rhs} reference the exact same value, yet we need special
+ // {left} and {right} reference the exact same value, yet we need special
// treatment for HeapNumber, as NaN is not equal to NaN.
- GenerateEqual_Same(lhs, &if_equal, &if_notequal, var_type_feedback);
+ GenerateEqual_Same(left, &if_equal, &if_notequal, var_type_feedback);
}
BIND(&if_notsame);
+ Label if_left_smi(this), if_left_not_smi(this);
+ Branch(TaggedIsSmi(left), &if_left_smi, &if_left_not_smi);
+
+ BIND(&if_left_smi);
{
- // Check if {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_right_smi(this), if_right_not_smi(this);
+ Branch(TaggedIsSmi(right), &if_right_smi, &if_right_not_smi);
- BIND(&if_lhsissmi);
+ BIND(&if_right_smi);
{
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- // We have already checked for {lhs} and {rhs} being the same value, so
- // if both are Smis when we get here they must not be equal.
+ // We have already checked for {left} and {right} being the same value,
+ // so when we get here they must be different Smis.
if (var_type_feedback != nullptr) {
CombineFeedback(var_type_feedback,
SmiConstant(CompareOperationFeedback::kSignedSmall));
}
Goto(&if_notequal);
+ }
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this);
- Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
-
- BIND(&if_rhsisnumber);
- {
- // Convert {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- Goto(&do_fcmp);
- }
-
- BIND(&if_rhsisnotnumber);
- {
- // The {lhs} is Smi and {rhs} is not HeapNumber or Smi.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
-
- // Load the instance type of the {rhs}.
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ BIND(&if_right_not_smi);
+ Node* right_map = LoadMap(right);
+ Label if_right_heapnumber(this), if_right_boolean(this),
+ if_right_bigint(this, Label::kDeferred),
+ if_right_receiver(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
+ // {left} is Smi and {right} is not HeapNumber or Smi.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ }
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ Node* right_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
+ GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
+ Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver,
+ &if_notequal);
+
+ BIND(&if_right_heapnumber);
+ {
+ var_left_float = SmiToFloat64(left);
+ var_right_float = LoadHeapNumberValue(right);
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
+ Goto(&do_float_comparison);
+ }
- // Check if the {rhs} is a String.
- Label if_rhsisstring(this, Label::kDeferred),
- if_rhsisnotstring(this);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
+ BIND(&if_right_boolean);
+ {
+ var_right.Bind(LoadObjectField(right, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
- BIND(&if_rhsisstring);
- {
- // The {rhs} is a String and the {lhs} is a Smi; we need
- // to convert the {rhs} to a Number and compare the output to
- // the Number on the {lhs}.
- Goto(&do_rhsstringtonumber);
- }
+ BIND(&if_right_bigint);
+ {
+ result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), right, left));
+ Goto(&end);
+ }
- BIND(&if_rhsisnotstring);
- {
- // Check if the {rhs} is a Boolean.
- Label if_rhsisboolean(this), if_rhsisnotboolean(this);
- Branch(IsBooleanMap(rhs_map), &if_rhsisboolean,
- &if_rhsisnotboolean);
-
- BIND(&if_rhsisboolean);
- {
- // The {rhs} is a Boolean, load its number value.
- var_rhs.Bind(LoadObjectField(rhs, Oddball::kToNumberOffset));
- Goto(&loop);
- }
+ BIND(&if_right_receiver);
+ {
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_right.Bind(CallStub(callable, context, right));
+ Goto(&loop);
+ }
+ }
- BIND(&if_rhsisnotboolean);
- {
- // Check if the {rhs} is a Receiver.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first (passing no hint).
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- Goto(&if_notequal);
- }
- }
- }
+ BIND(&if_left_not_smi);
+ {
+ GotoIf(TaggedIsSmi(right), &use_symmetry);
+
+ Label if_left_symbol(this), if_left_number(this), if_left_string(this),
+ if_left_bigint(this, Label::kDeferred), if_left_oddball(this),
+ if_left_receiver(this);
+
+ Node* left_map = LoadMap(left);
+ Node* right_map = LoadMap(right);
+ Node* left_type = LoadMapInstanceType(left_map);
+ Node* right_type = LoadMapInstanceType(right_map);
+
+ GotoIf(Int32LessThan(left_type, Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_left_string);
+ GotoIf(InstanceTypeEqual(left_type, SYMBOL_TYPE), &if_left_symbol);
+ GotoIf(InstanceTypeEqual(left_type, HEAP_NUMBER_TYPE), &if_left_number);
+ GotoIf(InstanceTypeEqual(left_type, ODDBALL_TYPE), &if_left_oddball);
+ GotoIf(InstanceTypeEqual(left_type, BIGINT_TYPE), &if_left_bigint);
+ Goto(&if_left_receiver);
+
+ BIND(&if_left_string);
+ {
+ GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
+ result.Bind(CallBuiltin(Builtins::kStringEqual, context, left, right));
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiOr(CollectFeedbackForString(left_type),
+ CollectFeedbackForString(right_type)));
}
+ Goto(&end);
}
- BIND(&if_lhsisnotsmi);
+ BIND(&if_left_number);
{
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_not_number(this);
+ GotoIf(Word32NotEqual(left_type, right_type), &if_right_not_number);
- BIND(&if_rhsissmi);
- {
- // The {lhs} is a HeapObject and the {rhs} is a Smi; swapping {lhs}
- // and {rhs} is not observable and doesn't matter for the result, so
- // we can just swap them and use the Smi handling above (for {lhs}
- // being a Smi).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- Goto(&loop);
+ var_left_float = LoadHeapNumberValue(left);
+ var_right_float = LoadHeapNumberValue(right);
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
}
+ Goto(&do_float_comparison);
- BIND(&if_rhsisnotsmi);
+ BIND(&if_right_not_number);
{
- Label if_lhsisstring(this), if_lhsisnumber(this),
- if_lhsissymbol(this), if_lhsisoddball(this),
- if_lhsisreceiver(this);
-
- // Both {lhs} and {rhs} are HeapObjects, load their maps
- // and their instance types.
- Node* lhs_map = LoadMap(lhs);
- Node* rhs_map = LoadMap(rhs);
-
- // Load the instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
-
- // Dispatch based on the instance type of {lhs}.
- size_t const kNumCases = FIRST_NONSTRING_TYPE + 3;
- Label* case_labels[kNumCases];
- int32_t case_values[kNumCases];
- for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- case_labels[i] = new Label(this);
- case_values[i] = i;
- }
- case_labels[FIRST_NONSTRING_TYPE + 0] = &if_lhsisnumber;
- case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
- case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsisoddball;
- case_values[FIRST_NONSTRING_TYPE + 2] = ODDBALL_TYPE;
- Switch(lhs_instance_type, &if_lhsisreceiver, case_values, case_labels,
- arraysize(case_values));
- for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- BIND(case_labels[i]);
- Goto(&if_lhsisstring);
- delete case_labels[i];
+ Label if_right_boolean(this);
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
}
+ GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ GotoIf(IsBigIntInstanceType(right_type), &use_symmetry);
+ Branch(IsJSReceiverInstanceType(right_type), &use_symmetry,
+ &if_notequal);
- BIND(&if_lhsisstring);
+ BIND(&if_right_boolean);
{
- // Check if {rhs} is also a String.
- Label if_rhsisstring(this, Label::kDeferred),
- if_rhsisnotstring(this);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
-
- BIND(&if_rhsisstring);
- {
- // Both {lhs} and {rhs} are of type String, just do the
- // string comparison then.
- result.Bind(
- CallBuiltin(Builtins::kStringEqual, context, lhs, rhs));
- if (var_type_feedback != nullptr) {
- Node* lhs_feedback =
- CollectFeedbackForString(lhs_instance_type);
- Node* rhs_feedback =
- CollectFeedbackForString(rhs_instance_type);
- CombineFeedback(var_type_feedback,
- SmiOr(lhs_feedback, rhs_feedback));
- }
- Goto(&end);
- }
-
- BIND(&if_rhsisnotstring);
- {
- // The {lhs} is a String and the {rhs} is some other HeapObject.
- // Swapping {lhs} and {rhs} is not observable and doesn't matter
- // for the result, so we can just swap them and use the String
- // handling below (for {rhs} being a String).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- Goto(&loop);
- }
+ var_right.Bind(LoadObjectField(right, Oddball::kToNumberOffset));
+ Goto(&loop);
}
+ }
+ }
- BIND(&if_lhsisnumber);
- {
- // Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this);
- Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- BIND(&if_rhsisnumber);
- {
- // Convert {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- Goto(&do_fcmp);
- }
+ BIND(&if_left_bigint);
+ {
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ }
- BIND(&if_rhsisnotnumber);
- {
- // The {lhs} is a Number, the {rhs} is some other HeapObject.
- Label if_rhsisstring(this, Label::kDeferred),
- if_rhsisnotstring(this);
+ Label if_right_heapnumber(this), if_right_bigint(this),
+ if_right_string(this), if_right_boolean(this);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
+ GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
+ GotoIf(IsStringInstanceType(right_type), &if_right_string);
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ Branch(IsJSReceiverInstanceType(right_type), &use_symmetry,
+ &if_notequal);
- if (var_type_feedback != nullptr) {
- // The {lhs} is number and {rhs} is not Smi or HeapNumber.
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
-
- BIND(&if_rhsisstring);
- {
- // The {rhs} is a String and the {lhs} is a HeapNumber; we need
- // to convert the {rhs} to a Number and compare the output to
- // the Number on the {lhs}.
- Goto(&do_rhsstringtonumber);
- }
+ BIND(&if_right_heapnumber);
+ {
+ result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), left, right));
+ Goto(&end);
+ }
- BIND(&if_rhsisnotstring);
- {
- // Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
- // Swapping {lhs} and {rhs} is not observable and doesn't
- // matter for the result, so we can just swap them and use
- // the JSReceiver handling below (for {lhs} being a
- // JSReceiver).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Check if {rhs} is a Boolean.
- Label if_rhsisboolean(this), if_rhsisnotboolean(this);
- Branch(IsBooleanMap(rhs_map), &if_rhsisboolean,
- &if_rhsisnotboolean);
-
- BIND(&if_rhsisboolean);
- {
- // The {rhs} is a Boolean, convert it to a Smi first.
- var_rhs.Bind(
- LoadObjectField(rhs, Oddball::kToNumberOffset));
- Goto(&loop);
- }
+ BIND(&if_right_bigint);
+ {
+ result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), left, right));
+ Goto(&end);
+ }
- BIND(&if_rhsisnotboolean);
- Goto(&if_notequal);
- }
- }
- }
- }
+ BIND(&if_right_string);
+ {
+ result.Bind(CallRuntime(Runtime::kBigIntEqualToString,
+ NoContextConstant(), left, right));
+ Goto(&end);
+ }
- BIND(&if_lhsisoddball);
- {
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
+ BIND(&if_right_boolean);
+ {
+ var_right.Bind(LoadObjectField(right, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
+ }
- // The {lhs} is an Oddball and {rhs} is some other HeapObject.
- Label if_lhsisboolean(this), if_lhsisnotboolean(this);
- Node* boolean_map = BooleanMapConstant();
- Branch(WordEqual(lhs_map, boolean_map), &if_lhsisboolean,
- &if_lhsisnotboolean);
+ BIND(&if_left_oddball);
+ {
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ }
- BIND(&if_lhsisboolean);
- {
- // The {lhs} is a Boolean, check if {rhs} is also a Boolean.
- Label if_rhsisboolean(this), if_rhsisnotboolean(this);
- Branch(WordEqual(rhs_map, boolean_map), &if_rhsisboolean,
- &if_rhsisnotboolean);
-
- BIND(&if_rhsisboolean);
- {
- // Both {lhs} and {rhs} are distinct Boolean values.
- Goto(&if_notequal);
- }
+ Label if_left_boolean(this);
+ GotoIf(IsBooleanMap(left_map), &if_left_boolean);
+ // {left} is either Null or Undefined. Check if {right} is
+ // undetectable (which includes Null and Undefined).
+ Branch(IsUndetectableMap(right_map), &if_equal, &if_notequal);
- BIND(&if_rhsisnotboolean);
- {
- // Convert the {lhs} to a Number first.
- var_lhs.Bind(LoadObjectField(lhs, Oddball::kToNumberOffset));
- Goto(&loop);
- }
- }
+ BIND(&if_left_boolean);
+ {
+ // If {right} is a Boolean too, it must be a different Boolean.
+ GotoIf(WordEqual(right_map, left_map), &if_notequal);
+ // Otherwise, convert {left} to number and try again.
+ var_left.Bind(LoadObjectField(left, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
+ }
- BIND(&if_lhsisnotboolean);
- {
- // The {lhs} is either Null or Undefined; check if the {rhs} is
- // undetectable (i.e. either also Null or Undefined or some
- // undetectable JSReceiver).
- Branch(IsUndetectableMap(rhs_map), &if_equal, &if_notequal);
- }
- }
+ BIND(&if_left_symbol);
+ {
+ Label if_right_receiver(this);
+ GotoIf(IsJSReceiverInstanceType(right_type), &if_right_receiver);
+ // {right} is not a JSReceiver and also not the same Symbol as {left},
+ // so the result is "not equal".
+ if (var_type_feedback != nullptr) {
+ Label if_right_symbol(this);
+ GotoIf(IsSymbolInstanceType(right_type), &if_right_symbol);
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ Goto(&if_notequal);
- BIND(&if_lhsissymbol);
+ BIND(&if_right_symbol);
{
- // Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
- // Swapping {lhs} and {rhs} is not observable and doesn't
- // matter for the result, so we can just swap them and use
- // the JSReceiver handling below (for {lhs} being a JSReceiver).
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // The {rhs} is not a JSReceiver and also not the same Symbol
- // as the {lhs}, so this is equality check is considered false.
- if (var_type_feedback != nullptr) {
- Label if_rhsissymbol(this), if_rhsisnotsymbol(this);
- Branch(IsSymbolInstanceType(rhs_instance_type), &if_rhsissymbol,
- &if_rhsisnotsymbol);
-
- BIND(&if_rhsissymbol);
- {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kSymbol));
- Goto(&if_notequal);
- }
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kSymbol));
+ Goto(&if_notequal);
+ }
+ } else {
+ Goto(&if_notequal);
+ }
- BIND(&if_rhsisnotsymbol);
- {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- Goto(&if_notequal);
- }
- } else {
- Goto(&if_notequal);
- }
- }
+ BIND(&if_right_receiver);
+ {
+ // {left} is a Primitive and {right} is a JSReceiver, so swapping
+ // the order is not observable.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
}
+ Goto(&use_symmetry);
+ }
+ }
- BIND(&if_lhsisreceiver);
- {
- CSA_ASSERT(this, IsJSReceiverInstanceType(lhs_instance_type));
- // Check if the {rhs} is also a JSReceiver.
- Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ BIND(&if_left_receiver);
+ {
+ CSA_ASSERT(this, IsJSReceiverInstanceType(left_type));
+ Label if_right_not_receiver(this);
+ GotoIfNot(IsJSReceiverInstanceType(right_type), &if_right_not_receiver);
- BIND(&if_rhsisreceiver);
- {
- if (var_type_feedback != nullptr) {
- // The {lhs} and {rhs} are receivers.
- CombineFeedback(
- var_type_feedback,
- SmiConstant(CompareOperationFeedback::kReceiver));
- }
+ // {left} and {right} are different JSReceiver references.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kReceiver));
+ }
+ Goto(&if_notequal);
- // Both {lhs} and {rhs} are different JSReceiver references, so
- // this cannot be considered equal.
- Goto(&if_notequal);
- }
+ BIND(&if_right_not_receiver);
+ {
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+ Label if_right_null_or_undefined(this);
+ GotoIf(IsUndetectableMap(right_map), &if_right_null_or_undefined);
- BIND(&if_rhsisnotreceiver);
- {
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
+ // {right} is a Primitive; convert {left} to Primitive too.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_left.Bind(CallStub(callable, context, left));
+ Goto(&loop);
- // Check if {rhs} is Null or Undefined (an undetectable check
- // is sufficient here, since we already know that {rhs} is not
- // a JSReceiver).
- Label if_rhsisundetectable(this),
- if_rhsisnotundetectable(this, Label::kDeferred);
- Branch(IsUndetectableMap(rhs_map), &if_rhsisundetectable,
- &if_rhsisnotundetectable);
-
- BIND(&if_rhsisundetectable);
- Branch(IsUndetectableMap(lhs_map), &if_equal, &if_notequal);
-
- BIND(&if_rhsisnotundetectable);
- {
- // The {rhs} is some Primitive different from Null and
- // Undefined, need to convert {lhs} to Primitive first.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
- }
+ BIND(&if_right_null_or_undefined);
+ Branch(IsUndetectableMap(left_map), &if_equal, &if_notequal);
}
}
}
- BIND(&do_rhsstringtonumber);
+ BIND(&do_right_stringtonumber);
+ {
+ var_right.Bind(CallBuiltin(Builtins::kStringToNumber, context, right));
+ Goto(&loop);
+ }
+
+ BIND(&use_symmetry);
{
- var_rhs.Bind(CallBuiltin(Builtins::kStringToNumber, context, rhs));
+ var_left.Bind(right);
+ var_right.Bind(left);
Goto(&loop);
}
}
- BIND(&do_fcmp);
+ BIND(&do_float_comparison);
{
- // Load the {lhs} and {rhs} floating point values.
- Node* lhs = var_fcmp_lhs.value();
- Node* rhs = var_fcmp_rhs.value();
-
- // Perform a fast floating point comparison.
- Branch(Float64Equal(lhs, rhs), &if_equal, &if_notequal);
+ Branch(Float64Equal(var_left_float, var_right_float), &if_equal,
+ &if_notequal);
}
BIND(&if_equal);
@@ -8779,7 +9201,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
// }
// } else if (lhs->IsBigInt()) {
// if (rhs->IsBigInt()) {
- // return %BigIntEqual(lhs, rhs);
+ // return %BigIntEqualToBigInt(lhs, rhs);
// } else {
// return false;
// }
@@ -8963,7 +9385,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
WordEqual(var_type_feedback->value(),
SmiConstant(CompareOperationFeedback::kAny)));
}
- result.Bind(CallRuntime(Runtime::kBigIntEqual,
+ result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -9115,14 +9537,16 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
BIND(&if_rhsisheapobject);
{
- // Now this can only yield true if either both {lhs} and {rhs}
- // are HeapNumbers with the same value or both {lhs} and {rhs}
- // are Strings with the same character sequence.
- Label if_lhsisheapnumber(this), if_lhsisstring(this);
+ // Now this can only yield true if either both {lhs} and {rhs} are
+ // HeapNumbers with the same value, or both are Strings with the same
+ // character sequence, or both are BigInts with the same value.
+ Label if_lhsisheapnumber(this), if_lhsisstring(this),
+ if_lhsisbigint(this);
Node* const lhs_map = LoadMap(lhs);
GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
Node* const lhs_instance_type = LoadMapInstanceType(lhs_map);
- Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ GotoIf(IsStringInstanceType(lhs_instance_type), &if_lhsisstring);
+ Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
if_false);
BIND(&if_lhsisheapnumber);
@@ -9142,6 +9566,14 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
CallBuiltin(Builtins::kStringEqual, NoContextConstant(), lhs, rhs);
Branch(IsTrue(result), if_true, if_false);
}
+
+ BIND(&if_lhsisbigint);
+ {
+ GotoIfNot(IsBigInt(rhs), if_false);
+ Node* const result = CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), lhs, rhs);
+ Branch(IsTrue(result), if_true, if_false);
+ }
}
}
@@ -9222,13 +9654,13 @@ Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
BIND(&return_true);
{
- result.Bind(BooleanConstant(true));
+ result.Bind(TrueConstant());
Goto(&end);
}
BIND(&return_false);
{
- result.Bind(BooleanConstant(false));
+ result.Bind(FalseConstant());
Goto(&end);
}
@@ -9274,9 +9706,7 @@ Node* CodeStubAssembler::ClassOf(Node* value) {
// Check if {value} is a primitive HeapObject.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- GotoIf(Uint32LessThan(value_instance_type,
- Int32Constant(FIRST_JS_RECEIVER_TYPE)),
- &if_primitive);
+ GotoIfNot(IsJSReceiverInstanceType(value_instance_type), &if_primitive);
// Load the {value}s constructor, and check that it's a JSFunction.
Node* constructor = LoadMapConstructor(value_map);
@@ -9335,7 +9765,7 @@ Node* CodeStubAssembler::Typeof(Node* value) {
Node* instance_type = LoadMapInstanceType(map);
- GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
+ GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &if_oddball);
Node* callable_or_undetectable_mask = Word32And(
LoadMapBitField(map),
@@ -9354,7 +9784,7 @@ Node* CodeStubAssembler::Typeof(Node* value) {
GotoIf(IsBigIntInstanceType(instance_type), &return_bigint);
- CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
+ CSA_ASSERT(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE));
result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
Goto(&return_result);
@@ -9698,6 +10128,36 @@ void CodeStubAssembler::GotoIfNumber(Node* input, Label* is_number) {
GotoIf(IsHeapNumber(input), is_number);
}
+Node* CodeStubAssembler::BitwiseOp(Node* left32, Node* right32,
+ Operation bitwise_op) {
+ switch (bitwise_op) {
+ case Operation::kBitwiseAnd:
+ return ChangeInt32ToTagged(Signed(Word32And(left32, right32)));
+ case Operation::kBitwiseOr:
+ return ChangeInt32ToTagged(Signed(Word32Or(left32, right32)));
+ case Operation::kBitwiseXor:
+ return ChangeInt32ToTagged(Signed(Word32Xor(left32, right32)));
+ case Operation::kShiftLeft:
+ if (!Word32ShiftIsSafe()) {
+ right32 = Word32And(right32, Int32Constant(0x1f));
+ }
+ return ChangeInt32ToTagged(Signed(Word32Shl(left32, right32)));
+ case Operation::kShiftRight:
+ if (!Word32ShiftIsSafe()) {
+ right32 = Word32And(right32, Int32Constant(0x1f));
+ }
+ return ChangeInt32ToTagged(Signed(Word32Sar(left32, right32)));
+ case Operation::kShiftRightLogical:
+ if (!Word32ShiftIsSafe()) {
+ right32 = Word32And(right32, Int32Constant(0x1f));
+ }
+ return ChangeUint32ToTagged(Unsigned(Word32Shr(left32, right32)));
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
Node* array_type, Node* context,
IterationKind mode) {
@@ -9748,8 +10208,8 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
Label if_istypedarray(this), if_isgeneric(this);
- Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
- &if_istypedarray, &if_isgeneric);
+ Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_istypedarray,
+ &if_isgeneric);
BIND(&if_isgeneric);
{
@@ -9782,8 +10242,8 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
}
} else {
Label if_istypedarray(this), if_isgeneric(this);
- Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
- &if_istypedarray, &if_isgeneric);
+ Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_istypedarray,
+ &if_isgeneric);
BIND(&if_isgeneric);
{
@@ -9804,7 +10264,7 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
// its initial state (because the protector cell is only tracked for
// initial the Array and Object prototypes). Check these conditions
// here, and take the slow path if any fail.
- GotoIf(IsArrayProtectorCellInvalid(), &if_isslow);
+ GotoIf(IsNoElementsProtectorCellInvalid(), &if_isslow);
Node* native_context = LoadNativeContext(context);
@@ -10077,6 +10537,11 @@ Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
Int32Constant(LAST_FAST_ELEMENTS_KIND));
}
+Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) {
+ return Uint32LessThanOrEqual(elements_kind,
+ Int32Constant(TERMINAL_FAST_ELEMENTS_KIND));
+}
+
Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
CSA_ASSERT(this, IsFastElementsKind(elements_kind));
@@ -10115,7 +10580,12 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
Node* const code =
LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset);
- Node* const fun = Allocate(JSFunction::kSize);
+ // TODO(ishell): All the callers of this function pass map loaded from
+ // Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
+ // map parameter.
+ CSA_ASSERT(this, Word32BinaryNot(IsConstructorMap(map)));
+ CSA_ASSERT(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
+ Node* const fun = Allocate(JSFunction::kSizeWithoutPrototype);
StoreMapNoWriteBarrier(fun, map);
StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -10123,8 +10593,6 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(fun, JSFunction::kFeedbackVectorOffset,
Heap::kUndefinedCellRootIndex);
- StoreObjectFieldRoot(fun, JSFunction::kPrototypeOrInitialMapOffset,
- Heap::kTheHoleValueRootIndex);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
@@ -10192,9 +10660,8 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
// It might still be an empty JSArray.
GotoIfNot(IsJSArrayMap(object_map), if_slow);
- Node* object_length = LoadObjectField(object, JSArray::kLengthOffset);
- Branch(WordEqual(object_length, SmiConstant(Smi::kZero)), &if_no_elements,
- if_slow);
+ Node* object_length = LoadJSArrayLength(object);
+ Branch(WordEqual(object_length, SmiConstant(0)), &if_no_elements, if_slow);
// Continue with the {object}s prototype.
BIND(&if_no_elements);
@@ -10229,7 +10696,7 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
Node* properties = LoadSlowProperties(receiver);
Node* length = LoadFixedArrayElement(
properties, NameDictionary::kNumberOfElementsIndex);
- GotoIfNot(WordEqual(length, SmiConstant(Smi::kZero)), if_runtime);
+ GotoIfNot(WordEqual(length, SmiConstant(0)), if_runtime);
// Check that there are no elements on the {receiver} and its prototype
// chain. Given that we do not create an EnumCache for dict-mode objects,
// directly jump to {if_empty} if there are no elements and no properties
@@ -10265,5 +10732,25 @@ void CodeStubAssembler::Print(const char* prefix, Node* tagged_value) {
CallRuntime(Runtime::kDebugPrint, NoContextConstant(), tagged_value);
}
+void CodeStubAssembler::PerformStackCheck(Node* context) {
+ Label ok(this), stack_check_interrupt(this, Label::kDeferred);
+
+ Node* sp = LoadStackPointer();
+ Node* stack_limit = Load(
+ MachineType::Pointer(),
+ ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
+ Node* interrupt = UintPtrLessThan(sp, stack_limit);
+
+ Branch(interrupt, &stack_check_interrupt, &ok);
+
+ BIND(&stack_check_interrupt);
+ {
+ CallRuntime(Runtime::kStackGuard, context);
+ Goto(&ok);
+ }
+
+ BIND(&ok);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index a2d5e80015..44becb3981 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -73,10 +73,10 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
public:
using Node = compiler::Node;
- template <class A>
- using TNode = compiler::TNode<A>;
- template <class A>
- using SloppyTNode = compiler::SloppyTNode<A>;
+ template <class T>
+ using TNode = compiler::TNode<T>;
+ template <class T>
+ using SloppyTNode = compiler::SloppyTNode<T>;
CodeStubAssembler(compiler::CodeAssemblerState* state);
@@ -87,6 +87,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
kAllowLargeObjectAllocation = 1 << 2,
};
+ enum SlackTrackingMode { kWithSlackTracking, kNoSlackTracking };
+
typedef base::Flags<AllocationFlag> AllocationFlags;
enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS };
@@ -177,8 +179,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
- bool IsIntPtrOrSmiConstantZero(Node* test);
- bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value);
+ bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
+ bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
+ ParameterMode mode);
// Round the 32bits payload of the provided word up to the next power of two.
Node* IntPtrRoundUpToPowerOfTwo32(Node* value);
@@ -284,6 +287,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void GotoIfNotNumber(Node* value, Label* is_not_number);
void GotoIfNumber(Node* value, Label* is_number);
+ Node* BitwiseOp(Node* left32, Node* right32, Operation bitwise_op);
+
// Allocate an object of the given size.
Node* AllocateInNewSpace(Node* size, AllocationFlags flags = kNone);
Node* AllocateInNewSpace(int size, AllocationFlags flags = kNone);
@@ -408,6 +413,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfFastJSArrayForCopy(Node* object, Node* context, Label* if_true,
Label* if_false);
+ // Branches to {if_true} when --force-slow-path flag has been passed.
+ // It's used for testing to ensure that slow path implementation behave
+ // equivalent to corresponding fast paths (where applicable).
+ //
+ // Works only in DEBUG mode or with ENABLE_FASTSLOW_SWITCH compile time flag.
+ // Nop otherwise.
+ void GotoIfForceSlowPath(Label* if_true);
+
// Load value from current frame by given offset in bytes.
Node* LoadFromFrame(int offset, MachineType rep = MachineType::AnyTagged());
// Load value from current parent frame by given offset in bytes.
@@ -420,6 +433,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load a field from an object on the heap.
Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset,
MachineType rep);
+ template <class T, typename std::enable_if<
+ std::is_convertible<TNode<T>, TNode<Object>>::value,
+ int>::type = 0>
+ TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
+ return CAST(LoadObjectField(object, offset, MachineTypeOf<T>::value));
+ }
+ template <class T, typename std::enable_if<
+ std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
+ int>::type = 0>
+ TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
+ return UncheckedCast<T>(
+ LoadObjectField(object, offset, MachineTypeOf<T>::value));
+ }
TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object, int offset) {
return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged()));
@@ -457,9 +483,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load the properties backing store of a JSObject.
TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object);
TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object);
- // Load the hash from the backing store of a JSObject.
- TNode<Int32T> LoadHashForJSObject(SloppyTNode<JSObject> jsobject,
- SloppyTNode<Int32T> instance_type);
// Load the elements backing store of a JSObject.
TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object);
// Load the length of a JSArray instance.
@@ -490,15 +513,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<PrototypeInfo> LoadMapPrototypeInfo(SloppyTNode<Map> map,
Label* if_has_no_proto_info);
// Load the instance size of a Map.
- TNode<IntPtrT> LoadMapInstanceSize(SloppyTNode<Map> map);
- // Load the inobject properties count of a Map (valid only for JSObjects).
- TNode<IntPtrT> LoadMapInobjectProperties(SloppyTNode<Map> map);
+ TNode<IntPtrT> LoadMapInstanceSizeInWords(SloppyTNode<Map> map);
+ // Load the inobject properties start of a Map (valid only for JSObjects).
+ TNode<IntPtrT> LoadMapInobjectPropertiesStartInWords(SloppyTNode<Map> map);
// Load the constructor function index of a Map (only for primitive maps).
TNode<IntPtrT> LoadMapConstructorFunctionIndex(SloppyTNode<Map> map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
TNode<Object> LoadMapConstructor(SloppyTNode<Map> map);
// Load the EnumLength of a Map.
Node* LoadMapEnumLength(SloppyTNode<Map> map);
+ // Load the back-pointer of a Map.
+ Node* LoadMapBackPointer(SloppyTNode<Map> map);
+ // Load the identity hash of a JSRececiver.
+ TNode<IntPtrT> LoadJSReceiverIdentityHash(SloppyTNode<Object> receiver,
+ Label* if_no_hash = nullptr);
// This is only used on a newly allocated PropertyArray which
// doesn't have an existing hash.
@@ -509,14 +537,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsDictionaryMap(SloppyTNode<Map> map);
// Load the hash field of a name as an uint32 value.
- Node* LoadNameHashField(Node* name);
+ TNode<Uint32T> LoadNameHashField(SloppyTNode<Name> name);
// Load the hash value of a name as an uint32 value.
// If {if_hash_not_computed} label is specified then it also checks if
// hash is actually computed.
- Node* LoadNameHash(Node* name, Label* if_hash_not_computed = nullptr);
+ TNode<Uint32T> LoadNameHash(SloppyTNode<Name> name,
+ Label* if_hash_not_computed = nullptr);
- // Load length field of a String object.
- Node* LoadStringLength(Node* object);
+ // Load length field of a String object as intptr_t value.
+ TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> object);
+ // Load length field of a String object as Smi value.
+ TNode<Smi> LoadStringLengthAsSmi(SloppyTNode<String> object);
// Loads a pointer to the sequential String char array.
Node* PointerToSeqStringData(Node* seq_string);
// Load value field of a JSValue object.
@@ -564,22 +595,39 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Context manipulation
- Node* LoadContextElement(Node* context, int slot_index);
- Node* LoadContextElement(Node* context, Node* slot_index);
- Node* StoreContextElement(Node* context, int slot_index, Node* value);
- Node* StoreContextElement(Node* context, Node* slot_index, Node* value);
- Node* StoreContextElementNoWriteBarrier(Node* context, int slot_index,
- Node* value);
- Node* LoadNativeContext(Node* context);
-
- Node* LoadJSArrayElementsMap(ElementsKind kind, Node* native_context);
- Node* LoadJSArrayElementsMap(Node* kind, Node* native_context);
+ TNode<Object> LoadContextElement(SloppyTNode<Context> context,
+ int slot_index);
+ TNode<Object> LoadContextElement(SloppyTNode<Context> context,
+ SloppyTNode<IntPtrT> slot_index);
+ void StoreContextElement(SloppyTNode<Context> context, int slot_index,
+ SloppyTNode<Object> value);
+ void StoreContextElement(SloppyTNode<Context> context,
+ SloppyTNode<IntPtrT> slot_index,
+ SloppyTNode<Object> value);
+ void StoreContextElementNoWriteBarrier(SloppyTNode<Context> context,
+ int slot_index,
+ SloppyTNode<Object> value);
+ TNode<Context> LoadNativeContext(SloppyTNode<Context> context);
+ // Calling this is only valid if there's a module context in the chain.
+ TNode<Context> LoadModuleContext(SloppyTNode<Context> context);
+
+ void GotoIfContextElementEqual(Node* value, Node* native_context,
+ int slot_index, Label* if_equal) {
+ GotoIf(WordEqual(value, LoadContextElement(native_context, slot_index)),
+ if_equal);
+ }
+
+ TNode<Map> LoadJSArrayElementsMap(ElementsKind kind,
+ SloppyTNode<Context> native_context);
+ TNode<Map> LoadJSArrayElementsMap(SloppyTNode<Int32T> kind,
+ SloppyTNode<Context> native_context);
// Load the "prototype" property of a JSFunction.
Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout);
// Store the floating point value of a HeapNumber.
- Node* StoreHeapNumberValue(Node* object, Node* value);
+ void StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
+ SloppyTNode<Float64T> value);
// Store a field to an object on the heap.
Node* StoreObjectField(Node* object, int offset, Node* value);
Node* StoreObjectField(Node* object, Node* offset, Node* value);
@@ -655,41 +703,42 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Allocate a HeapNumber without initializing its value.
- Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
+ TNode<HeapNumber> AllocateHeapNumber(MutableMode mode = IMMUTABLE);
// Allocate a HeapNumber with a specific value.
- Node* AllocateHeapNumberWithValue(Node* value, MutableMode mode = IMMUTABLE);
+ TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value,
+ MutableMode mode = IMMUTABLE);
// Allocate a SeqOneByteString with the given length.
Node* AllocateSeqOneByteString(int length, AllocationFlags flags = kNone);
- Node* AllocateSeqOneByteString(Node* context, Node* length,
- ParameterMode mode = INTPTR_PARAMETERS,
+ Node* AllocateSeqOneByteString(Node* context, TNode<Smi> length,
AllocationFlags flags = kNone);
// Allocate a SeqTwoByteString with the given length.
Node* AllocateSeqTwoByteString(int length, AllocationFlags flags = kNone);
- Node* AllocateSeqTwoByteString(Node* context, Node* length,
- ParameterMode mode = INTPTR_PARAMETERS,
+ Node* AllocateSeqTwoByteString(Node* context, TNode<Smi> length,
AllocationFlags flags = kNone);
// Allocate a SlicedOneByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- Node* AllocateSlicedOneByteString(Node* length, Node* parent, Node* offset);
+ Node* AllocateSlicedOneByteString(TNode<Smi> length, Node* parent,
+ Node* offset);
// Allocate a SlicedTwoByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- Node* AllocateSlicedTwoByteString(Node* length, Node* parent, Node* offset);
+ Node* AllocateSlicedTwoByteString(TNode<Smi> length, Node* parent,
+ Node* offset);
// Allocate a one-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be one-byte strings.
- Node* AllocateOneByteConsString(Node* length, Node* first, Node* second,
+ Node* AllocateOneByteConsString(TNode<Smi> length, Node* first, Node* second,
AllocationFlags flags = kNone);
// Allocate a two-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be two-byte strings.
- Node* AllocateTwoByteConsString(Node* length, Node* first, Node* second,
+ Node* AllocateTwoByteConsString(TNode<Smi> length, Node* first, Node* second,
AllocationFlags flags = kNone);
// Allocate an appropriate one- or two-byte ConsString with the first and
- // second parts specified by |first| and |second|.
- Node* NewConsString(Node* context, Node* length, Node* left, Node* right,
+ // second parts specified by |left| and |right|.
+ Node* NewConsString(Node* context, TNode<Smi> length, Node* left, Node* right,
AllocationFlags flags = kNone);
Node* AllocateNameDictionary(int at_least_space_for);
@@ -700,16 +749,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
void InitializeStructBody(Node* object, Node* map, Node* size,
int start_offset = Struct::kHeaderSize);
- Node* AllocateJSObjectFromMap(Node* map, Node* properties = nullptr,
- Node* elements = nullptr,
- AllocationFlags flags = kNone);
- void InitializeJSObjectFromMap(Node* object, Node* map, Node* size,
- Node* properties = nullptr,
- Node* elements = nullptr);
+ Node* AllocateJSObjectFromMap(
+ Node* map, Node* properties = nullptr, Node* elements = nullptr,
+ AllocationFlags flags = kNone,
+ SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
+
+ void InitializeJSObjectFromMap(
+ Node* object, Node* map, Node* instance_size, Node* properties = nullptr,
+ Node* elements = nullptr,
+ SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
- void InitializeJSObjectBody(Node* object, Node* map, Node* size,
- int start_offset = JSObject::kHeaderSize);
+ void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map,
+ Node* instance_size);
+ void InitializeJSObjectBodyNoSlackTracking(
+ Node* object, Node* map, Node* instance_size,
+ int start_offset = JSObject::kHeaderSize);
// Allocate a JSArray without elements and initialize the header fields.
Node* AllocateUninitializedJSArrayWithoutElements(Node* array_map,
@@ -727,9 +782,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* length, Node* allocation_site = nullptr,
ParameterMode capacity_mode = INTPTR_PARAMETERS);
+ Node* CloneFastJSArray(Node* context, Node* array,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ Node* allocation_site = nullptr);
+
+ Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ Node* capacity = nullptr,
+ Node* allocation_site = nullptr);
+
Node* AllocateFixedArray(ElementsKind kind, Node* capacity,
ParameterMode mode = INTPTR_PARAMETERS,
- AllocationFlags flags = kNone);
+ AllocationFlags flags = kNone,
+ Node* fixed_array_map = nullptr);
Node* AllocatePropertyArray(Node* capacity,
ParameterMode mode = INTPTR_PARAMETERS,
@@ -765,31 +830,103 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
ElementsKind kind, Node* from_array, Node* to_array, Node* length,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS) {
- CopyFixedArrayElements(kind, from_array, kind, to_array, length, length,
+ CopyFixedArrayElements(kind, from_array, kind, to_array,
+ IntPtrOrSmiConstant(0, mode), length, length,
barrier_mode, mode);
}
- // Copies |element_count| elements from |from_array| to |to_array| of
- // |capacity| size respecting both array's elements kinds.
+ // Copies |element_count| elements from |from_array| starting from element
+ // zero to |to_array| of |capacity| size respecting both array's elements
+ // kinds.
void CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ ParameterMode mode = INTPTR_PARAMETERS) {
+ CopyFixedArrayElements(from_kind, from_array, to_kind, to_array,
+ IntPtrOrSmiConstant(0, mode), element_count,
+ capacity, barrier_mode, mode);
+ }
+
+ // Copies |element_count| elements from |from_array| starting from element
+ // |first_element| to |to_array| of |capacity| size respecting both array's
+ // elements kinds.
+ void CopyFixedArrayElements(
+ ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
+ Node* to_array, Node* first_element, Node* element_count, Node* capacity,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS);
+ enum class ExtractFixedArrayFlag {
+ kFixedArrays = 1,
+ kFixedDoubleArrays = 2,
+ kDontCopyCOW = 4,
+ kNewSpaceAllocationOnly = 8,
+ kAllFixedArrays = kFixedArrays | kFixedDoubleArrays,
+ kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW
+ };
+
+ typedef base::Flags<ExtractFixedArrayFlag> ExtractFixedArrayFlags;
+
+ // Copy a portion of an existing FixedArray or FixedDoubleArray into a new
+ // FixedArray, including special appropriate handling for empty arrays and COW
+ // arrays.
+ //
+ // * |source| is either a FixedArray or FixedDoubleArray from which to copy
+ // elements.
+ // * |first| is the starting element index to copy from, if nullptr is passed
+ // then index zero is used by default.
+ // * |count| is the number of elements to copy out of the source array
+ // starting from and including the element indexed by |start|. If |count| is
+ // nullptr, then all of the elements from |start| to the end of |source| are
+ // copied.
+ // * |capacity| determines the size of the allocated result array, with
+ // |capacity| >= |count|. If |capacity| is nullptr, then |count| is used as
+ // the destination array's capacity.
+ // * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both
+ // are detected and copied. Although it's always correct to pass
+ // kAllFixedArrays, the generated code is more compact and efficient if the
+ // caller can specify whether only FixedArrays or FixedDoubleArrays will be
+ // passed as the |source| parameter.
+ // * |parameter_mode| determines the parameter mode of |first|, |count| and
+ // |capacity|.
+ Node* ExtractFixedArray(Node* source, Node* first, Node* count = nullptr,
+ Node* capacity = nullptr,
+ ExtractFixedArrayFlags extract_flags =
+ ExtractFixedArrayFlag::kAllFixedArrays,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
+
+ // Copy the entire contents of a FixedArray or FixedDoubleArray to a new
+ // array, including special appropriate handling for empty arrays and COW
+ // arrays.
+ //
+ // * |source| is either a FixedArray or FixedDoubleArray from which to copy
+ // elements.
+ // * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both
+ // are detected and copied. Although it's always correct to pass
+ // kAllFixedArrays, the generated code is more compact and efficient if the
+ // caller can specify whether only FixedArrays or FixedDoubleArrays will be
+ // passed as the |source| parameter.
+ Node* CloneFixedArray(Node* source,
+ ExtractFixedArrayFlags flags =
+ ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) {
+ ParameterMode mode = OptimalParameterMode();
+ return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr,
+ nullptr, flags, mode);
+ }
+
// Copies |character_count| elements from |from_string| to |to_string|
// starting at the |from_index|'th character. |from_string| and |to_string|
// can either be one-byte strings or two-byte strings, although if
// |from_string| is two-byte, then |to_string| must be two-byte.
- // |from_index|, |to_index| and |character_count| must be either Smis or
- // intptr_ts depending on |mode| s.t. 0 <= |from_index| <= |from_index| +
- // |character_count| <= from_string.length and 0 <= |to_index| <= |to_index| +
- // |character_count| <= to_string.length.
+ // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <=
+ // |from_index| <= |from_index| + |character_count| <= from_string.length and
+ // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length.
void CopyStringCharacters(Node* from_string, Node* to_string,
- Node* from_index, Node* to_index,
- Node* character_count,
+ TNode<IntPtrT> from_index, TNode<IntPtrT> to_index,
+ TNode<IntPtrT> character_count,
String::Encoding from_encoding,
- String::Encoding to_encoding, ParameterMode mode);
+ String::Encoding to_encoding);
// Loads an element from |array| of |from_kind| elements by given |offset|
// (NOTE: not index!), does a hole check if |if_hole| is provided and
@@ -836,22 +973,36 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber);
Node* TruncateTaggedToFloat64(Node* context, Node* value);
Node* TruncateTaggedToWord32(Node* context, Node* value);
+ void TaggedToWord32OrBigInt(Node* context, Node* value, Label* if_number,
+ Variable* var_word32, Label* if_bigint,
+ Variable* var_bigint);
+ void TaggedToWord32OrBigIntWithFeedback(
+ Node* context, Node* value, Label* if_number, Variable* var_word32,
+ Label* if_bigint, Variable* var_bigint, Variable* var_feedback);
+
// Truncate the floating point value of a HeapNumber to an Int32.
Node* TruncateHeapNumberValueToWord32(Node* object);
// Conversions.
- Node* ChangeFloat64ToTagged(Node* value);
- Node* ChangeInt32ToTagged(Node* value);
- Node* ChangeUint32ToTagged(Node* value);
- Node* ChangeNumberToFloat64(Node* value);
- Node* ChangeNumberToIntPtr(Node* value);
+ TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
+ TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
+ TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
+ TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
+ TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(SloppyTNode<Number> value);
+
+ void TaggedToNumeric(Node* context, Node* value, Label* done,
+ Variable* var_numeric);
+ void TaggedToNumericWithFeedback(Node* context, Node* value, Label* done,
+ Variable* var_numeric,
+ Variable* var_feedback);
Node* TimesPointerSize(Node* value);
// Type conversions.
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
// or returns the {value} converted to a String otherwise.
- Node* ToThisString(Node* context, Node* value, char const* method_name);
+ TNode<String> ToThisString(Node* context, Node* value,
+ char const* method_name);
// Throws a TypeError for {method_name} if {value} is neither of the given
// {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or
// returns the {value} (or wrapped value) otherwise.
@@ -867,6 +1018,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ThrowIfNotInstanceType(Node* context, Node* value,
InstanceType instance_type,
char const* method_name);
+ // Throws a TypeError for {method_name} if {value} is not a JSReceiver.
+ // Returns the {value}'s map.
+ Node* ThrowIfNotJSReceiver(Node* context, Node* value,
+ MessageTemplate::Template msg_template,
+ const char* method_name = nullptr);
void ThrowRangeError(Node* context, MessageTemplate::Template message,
Node* arg0 = nullptr, Node* arg1 = nullptr,
@@ -884,8 +1040,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsAccessorPair(Node* object);
Node* IsAllocationSite(Node* object);
Node* IsAnyHeapNumber(Node* object);
+ Node* IsArrayIteratorInstanceType(Node* instance_type);
+ Node* IsNoElementsProtectorCellInvalid();
+ Node* IsBigIntInstanceType(Node* instance_type);
+ Node* IsBigInt(Node* object);
Node* IsBoolean(Node* object);
- Node* IsExtensibleMap(Node* map);
Node* IsCallableMap(Node* map);
Node* IsCallable(Node* object);
Node* IsCell(Node* object);
@@ -894,14 +1053,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsConstructor(Node* object);
Node* IsDeprecatedMap(Node* map);
Node* IsDictionary(Node* object);
+ Node* IsExtensibleMap(Node* map);
Node* IsExternalStringInstanceType(Node* instance_type);
+ TNode<BoolT> IsFastJSArray(SloppyTNode<Object> object,
+ SloppyTNode<Context> context);
Node* IsFeedbackVector(Node* object);
Node* IsFixedArray(Node* object);
+ Node* IsFixedArraySubclass(Node* object);
Node* IsFixedArrayWithKind(Node* object, ElementsKind kind);
Node* IsFixedArrayWithKindOrEmpty(Node* object, ElementsKind kind);
Node* IsFixedDoubleArray(Node* object);
Node* IsFixedTypedArray(Node* object);
- Node* IsZeroOrFixedArray(Node* object);
+ Node* IsFunctionWithPrototypeSlotMap(Node* map);
Node* IsHashTable(Node* object);
Node* IsHeapNumber(Node* object);
Node* IsIndirectStringInstanceType(Node* instance_type);
@@ -912,16 +1075,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsJSFunctionInstanceType(Node* instance_type);
Node* IsJSFunctionMap(Node* object);
Node* IsJSFunction(Node* object);
+ Node* IsJSGlobalProxyInstanceType(Node* instance_type);
Node* IsJSGlobalProxy(Node* object);
Node* IsJSObjectInstanceType(Node* instance_type);
Node* IsJSObjectMap(Node* map);
Node* IsJSObject(Node* object);
- Node* IsJSGlobalProxyInstanceType(Node* instance_type);
Node* IsJSProxy(Node* object);
Node* IsJSReceiverInstanceType(Node* instance_type);
Node* IsJSReceiverMap(Node* map);
Node* IsJSReceiver(Node* object);
- Node* IsNullOrJSReceiver(Node* object);
Node* IsJSRegExp(Node* object);
Node* IsJSTypedArray(Node* object);
Node* IsJSValueInstanceType(Node* instance_type);
@@ -931,33 +1093,36 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsMutableHeapNumber(Node* object);
Node* IsName(Node* object);
Node* IsNativeContext(Node* object);
+ Node* IsNullOrJSReceiver(Node* object);
+ Node* IsNullOrUndefined(Node* object);
+ Node* IsNumberDictionary(Node* object);
Node* IsOneByteStringInstanceType(Node* instance_type);
Node* IsPrimitiveInstanceType(Node* instance_type);
Node* IsPrivateSymbol(Node* object);
Node* IsPropertyArray(Node* object);
Node* IsPropertyCell(Node* object);
+ Node* IsPrototypeInitialArrayPrototype(Node* context, Node* map);
Node* IsSequentialStringInstanceType(Node* instance_type);
- inline Node* IsSharedFunctionInfo(Node* object) {
- return IsSharedFunctionInfoMap(LoadMap(object));
- }
Node* IsShortExternalStringInstanceType(Node* instance_type);
Node* IsSpecialReceiverInstanceType(Node* instance_type);
Node* IsSpecialReceiverMap(Node* map);
+ Node* IsSpeciesProtectorCellInvalid();
Node* IsStringInstanceType(Node* instance_type);
Node* IsString(Node* object);
Node* IsSymbolInstanceType(Node* instance_type);
Node* IsSymbol(Node* object);
- Node* IsBigIntInstanceType(Node* instance_type);
- Node* IsBigInt(Node* object);
- Node* IsUnseededNumberDictionary(Node* object);
- Node* IsWeakCell(Node* object);
Node* IsUndetectableMap(Node* map);
- Node* IsArrayProtectorCellInvalid();
- Node* IsSpeciesProtectorCellInvalid();
- Node* IsPrototypeInitialArrayPrototype(Node* context, Node* map);
+ Node* IsWeakCell(Node* object);
+ Node* IsZeroOrFixedArray(Node* object);
+
+ inline Node* IsSharedFunctionInfo(Node* object) {
+ return IsSharedFunctionInfoMap(LoadMap(object));
+ }
// True iff |object| is a Smi or a HeapNumber.
Node* IsNumber(Node* object);
+ // True iff |object| is a Smi or a HeapNumber or a BigInt.
+ Node* IsNumeric(Node* object);
// True iff |number| is either a Smi, or a HeapNumber whose value is not
// within Smi range.
@@ -969,15 +1134,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// ElementsKind helpers:
Node* IsFastElementsKind(Node* elements_kind);
+ Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind);
Node* IsHoleyFastElementsKind(Node* elements_kind);
Node* IsElementsKindGreaterThan(Node* target_kind,
ElementsKind reference_kind);
+ Node* FixedArraySizeDoesntFitInNewSpace(
+ Node* element_count, int base_size = FixedArray::kHeaderSize,
+ ParameterMode mode = INTPTR_PARAMETERS);
+
// String helpers.
// Load a character from a String (might flatten a ConsString).
- TNode<Uint32T> StringCharCodeAt(
- SloppyTNode<String> string, Node* index,
- ParameterMode parameter_mode = SMI_PARAMETERS);
+ TNode<Uint32T> StringCharCodeAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> index);
// Return the single character string with only {code}.
Node* StringFromCharCode(Node* code);
@@ -1013,26 +1182,39 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* StringFromCodePoint(Node* codepoint, UnicodeEncoding encoding);
// Type conversion helpers.
+ enum class BigIntHandling { kConvertToNumber, kThrow };
// Convert a String to a Number.
- Node* StringToNumber(Node* context, Node* input);
+ TNode<Number> StringToNumber(SloppyTNode<Context> context,
+ SloppyTNode<String> input);
Node* NumberToString(Node* context, Node* input);
// Convert an object to a name.
Node* ToName(Node* context, Node* input);
// Convert a Non-Number object to a Number.
- Node* NonNumberToNumber(Node* context, Node* input);
+ TNode<Number> NonNumberToNumber(
+ SloppyTNode<Context> context, SloppyTNode<HeapObject> input,
+ BigIntHandling bigint_handling = BigIntHandling::kThrow);
+ // Convert a Non-Number object to a Numeric.
+ TNode<Numeric> NonNumberToNumeric(SloppyTNode<Context> context,
+ SloppyTNode<HeapObject> input);
// Convert any object to a Number.
- Node* ToNumber(Node* context, Node* input);
+ // Conforms to ES#sec-tonumber if {bigint_handling} == kThrow.
+ // With {bigint_handling} == kConvertToNumber, matches behavior of
+ // tc39.github.io/proposal-bigint/#sec-number-constructor-number-value.
+ TNode<Number> ToNumber(
+ SloppyTNode<Context> context, SloppyTNode<Object> input,
+ BigIntHandling bigint_handling = BigIntHandling::kThrow);
// Converts |input| to one of 2^32 integer values in the range 0 through
// 2^32-1, inclusive.
// ES#sec-touint32
- TNode<Object> ToUint32(SloppyTNode<Context> context,
+ TNode<Number> ToUint32(SloppyTNode<Context> context,
SloppyTNode<Object> input);
// Convert any object to a String.
TNode<String> ToString(SloppyTNode<Context> context,
SloppyTNode<Object> input);
- Node* ToString_Inline(Node* const context, Node* const input);
+ TNode<String> ToString_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input);
// Convert any object to a Primitive.
Node* JSReceiverToPrimitive(Node* context, Node* input);
@@ -1052,7 +1234,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ToLength_Inline(Node* const context, Node* const input);
// Convert any object to an Integer.
- TNode<Object> ToInteger(SloppyTNode<Context> context,
+ TNode<Number> ToInteger(SloppyTNode<Context> context,
SloppyTNode<Object> input,
ToIntegerTruncationMode mode = kNoTruncation);
@@ -1113,6 +1295,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Int32Constant(0));
}
+ // Returns true if none of the mask's bits in given |word32| are set.
+ TNode<BoolT> IsNotSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ return Word32Equal(Word32And(word32, Int32Constant(mask)),
+ Int32Constant(0));
+ }
+
// Returns true if any of the |T|'s bits in given |word| are set.
template <typename T>
Node* IsSetWord(Node* word) {
@@ -1305,7 +1493,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ComputeIntegerHash(Node* key);
Node* ComputeIntegerHash(Node* key, Node* seed);
- template <typename Dictionary>
void NumberDictionaryLookup(Node* dictionary, Node* intptr_index,
Label* if_found, Variable* var_entry,
Label* if_not_found);
@@ -1356,11 +1543,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <class... TArgs>
Node* CallBuiltin(Builtins::Name id, Node* context, TArgs... args) {
+ DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
+ !Builtins::IsLazy(id));
return CallStub(Builtins::CallableFor(isolate(), id), context, args...);
}
template <class... TArgs>
Node* TailCallBuiltin(Builtins::Name id, Node* context, TArgs... args) {
+ DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
+ !Builtins::IsLazy(id));
return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
}
@@ -1439,6 +1630,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Update the type feedback vector.
void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
+ // Report that there was a feedback update, performing any tasks that should
+ // be done after a feedback update.
+ void ReportFeedbackUpdate(SloppyTNode<FeedbackVector> feedback_vector,
+ SloppyTNode<IntPtrT> slot_id, const char* reason);
+
// Combine the new feedback with the existing_feedback.
void CombineFeedback(Variable* existing_feedback, Node* feedback);
@@ -1500,11 +1696,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Create a new AllocationSite and install it into a feedback vector.
Node* CreateAllocationSiteInFeedbackVector(Node* feedback_vector, Node* slot);
- // Given a recently allocated object {object}, with map {initial_map},
- // initialize remaining fields appropriately to comply with slack tracking.
- void HandleSlackTracking(Node* context, Node* object, Node* initial_map,
- int start_offset);
-
enum class IndexAdvanceMode { kPre, kPost };
typedef std::function<void(Node* index)> FastLoopBody;
@@ -1568,22 +1759,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void InitializeFieldsWithRoot(Node* object, Node* start_offset,
Node* end_offset, Heap::RootListIndex root);
- enum RelationalComparisonMode {
- kLessThan,
- kLessThanOrEqual,
- kGreaterThan,
- kGreaterThanOrEqual
- };
-
- Node* RelationalComparison(RelationalComparisonMode mode, Node* lhs,
- Node* rhs, Node* context,
+ Node* RelationalComparison(Operation op, Node* lhs, Node* rhs, Node* context,
Variable* var_type_feedback = nullptr);
- void BranchIfNumericRelationalComparison(RelationalComparisonMode mode,
- Node* lhs, Node* rhs, Label* if_true,
- Label* if_false);
+ void BranchIfNumericRelationalComparison(Operation op, Node* lhs, Node* rhs,
+ Label* if_true, Label* if_false);
+
+ void BranchIfAccessorPair(Node* value, Label* if_accessor_pair,
+ Label* if_not_accessor_pair) {
+ GotoIf(TaggedIsSmi(value), if_not_accessor_pair);
+ Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair);
+ }
- void GotoIfNumberGreaterThanOrEqual(Node* lhs, Node* rhs, Label* if_false);
+ void GotoIfNumericGreaterThanOrEqual(Node* lhs, Node* rhs, Label* if_false);
Node* Equal(Node* lhs, Node* rhs, Node* context,
Variable* var_type_feedback = nullptr);
@@ -1643,7 +1831,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Support for printf-style debugging
void Print(const char* s);
void Print(const char* prefix, Node* tagged_value);
- inline void Print(Node* tagged_value) { return Print(nullptr, tagged_value); }
+ inline void Print(SloppyTNode<Object> tagged_value) {
+ return Print(nullptr, tagged_value);
+ }
template <class... TArgs>
Node* MakeTypeError(MessageTemplate::Template message, Node* context,
@@ -1660,6 +1850,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Unreachable();
}
+ void PerformStackCheck(Node* context);
+
protected:
void DescriptorLookup(Node* unique_name, Node* descriptors, Node* bitfield3,
Label* if_found, Variable* var_name_index,
@@ -1710,11 +1902,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
Label* bailout);
- Node* AllocateSlicedString(Heap::RootListIndex map_root_index, Node* length,
- Node* parent, Node* offset);
+ Node* AllocateSlicedString(Heap::RootListIndex map_root_index,
+ TNode<Smi> length, Node* parent, Node* offset);
- Node* AllocateConsString(Heap::RootListIndex map_root_index, Node* length,
- Node* first, Node* second, AllocationFlags flags);
+ Node* AllocateConsString(Heap::RootListIndex map_root_index,
+ TNode<Smi> length, Node* first, Node* second,
+ AllocationFlags flags);
// Implements DescriptorArray::number_of_entries.
// Returns an untagged int32.
@@ -1728,19 +1921,37 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal,
Variable* var_type_feedback = nullptr);
Node* AllocAndCopyStringCharacters(Node* context, Node* from,
- Node* from_instance_type, Node* from_index,
- Node* character_count);
+ Node* from_instance_type,
+ TNode<IntPtrT> from_index,
+ TNode<Smi> character_count);
static const int kElementLoopUnrollThreshold = 8;
+
+ // {convert_bigint} is only meaningful when {mode} == kToNumber.
+ Node* NonNumberToNumberOrNumeric(
+ Node* context, Node* input, Object::Conversion mode,
+ BigIntHandling bigint_handling = BigIntHandling::kThrow);
+
+ enum class Feedback { kCollect, kNone };
+ template <Feedback feedback>
+ void TaggedToNumeric(Node* context, Node* value, Label* done,
+ Variable* var_numeric, Variable* var_feedback = nullptr);
+
+ template <Feedback feedback, Object::Conversion conversion>
+ void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number,
+ Variable* var_word32,
+ Label* if_bigint = nullptr,
+ Variable* var_bigint = nullptr,
+ Variable* var_feedback = nullptr);
};
class CodeStubArguments {
public:
typedef compiler::Node Node;
- template <class A>
- using TNode = compiler::TNode<A>;
- template <class A>
- using SloppyTNode = compiler::SloppyTNode<A>;
+ template <class T>
+ using TNode = compiler::TNode<T>;
+ template <class T>
+ using SloppyTNode = compiler::SloppyTNode<T>;
enum ReceiverMode { kHasReceiver, kNoReceiver };
// |argc| is an intptr value which specifies the number of arguments passed
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 4721642d4a..4b2bd1eaf4 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -34,7 +34,7 @@ CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
- deoptimization_handler_(NULL),
+ deoptimization_handler_(nullptr),
miss_handler_(),
has_miss_handler_(false) {
stub->InitializeDescriptor(this);
@@ -45,7 +45,7 @@ CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
- deoptimization_handler_(NULL),
+ deoptimization_handler_(nullptr),
miss_handler_(),
has_miss_handler_(false) {
CodeStub::InitializeDescriptor(isolate, stub_key, this);
@@ -71,9 +71,9 @@ void CodeStubDescriptor::Initialize(Register stack_parameter_count,
bool CodeStub::FindCodeInCache(Code** code_out) {
- UnseededNumberDictionary* stubs = isolate()->heap()->code_stubs();
+ NumberDictionary* stubs = isolate()->heap()->code_stubs();
int index = stubs->FindEntry(isolate(), GetKey());
- if (index != UnseededNumberDictionary::kNotFound) {
+ if (index != NumberDictionary::kNotFound) {
*code_out = Code::cast(stubs->ValueAt(index));
return true;
}
@@ -97,10 +97,10 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
void CodeStub::DeleteStubFromCacheForTesting() {
Heap* heap = isolate_->heap();
- Handle<UnseededNumberDictionary> dict(heap->code_stubs());
+ Handle<NumberDictionary> dict(heap->code_stubs());
int entry = dict->FindEntry(GetKey());
- DCHECK_NE(UnseededNumberDictionary::kNotFound, entry);
- dict = UnseededNumberDictionary::DeleteEntry(dict, entry);
+ DCHECK_NE(NumberDictionary::kNotFound, entry);
+ dict = NumberDictionary::DeleteEntry(dict, entry);
heap->SetRootCodeStubs(*dict);
}
@@ -108,7 +108,7 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Factory* factory = isolate()->factory();
// Generate the new code.
- MacroAssembler masm(isolate(), NULL, 256, CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate(), nullptr, 256, CodeObjectRequired::kYes);
{
// Update the static counter each time a new code stub is generated.
@@ -121,12 +121,17 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Generate(&masm);
}
+ // Allocate the handler table.
+ Handle<HandlerTable> table = GenerateHandlerTable();
+
// Create the code object.
CodeDesc desc;
masm.GetCode(isolate(), &desc);
// Copy the generated code into a heap object.
Handle<Code> new_object = factory->NewCode(
- desc, Code::STUB, masm.CodeObject(), NeedsImmovableCode());
+ desc, Code::STUB, masm.CodeObject(), Builtins::kNoBuiltinId, table,
+ MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate()),
+ NeedsImmovableCode(), GetKey());
return new_object;
}
@@ -146,8 +151,7 @@ Handle<Code> CodeStub::GetCode() {
CanonicalHandleScope canonical(isolate());
Handle<Code> new_object = GenerateCode();
- new_object->set_stub_key(GetKey());
- FinishCode(new_object);
+ DCHECK_EQ(GetKey(), new_object->stub_key());
RecordCodeGeneration(new_object);
#ifdef ENABLE_DISASSEMBLER
@@ -162,15 +166,14 @@ Handle<Code> CodeStub::GetCode() {
#endif
// Update the dictionary and the root in Heap.
- Handle<UnseededNumberDictionary> dict = UnseededNumberDictionary::Set(
- handle(heap->code_stubs()), GetKey(), new_object);
+ Handle<NumberDictionary> dict =
+ NumberDictionary::Set(handle(heap->code_stubs()), GetKey(), new_object);
heap->SetRootCodeStubs(*dict);
code = *new_object;
}
Activate(code);
- DCHECK(!NeedsImmovableCode() || Heap::IsImmovable(code) ||
- heap->code_space()->FirstPage()->Contains(code->address()));
+ DCHECK(!NeedsImmovableCode() || Heap::IsImmovable(code));
return Handle<Code>(code, isolate());
}
@@ -188,7 +191,7 @@ const char* CodeStub::MajorName(CodeStub::Major major_key) {
case NUMBER_OF_IDS:
UNREACHABLE();
}
- return NULL;
+ return nullptr;
}
@@ -222,6 +225,9 @@ void CodeStub::Dispatch(Isolate* isolate, uint32_t key, void** value_out,
}
}
+Handle<HandlerTable> PlatformCodeStub::GenerateHandlerTable() {
+ return HandlerTable::Empty(isolate());
+}
static void InitializeDescriptorDispatchedCall(CodeStub* stub,
void** value_out) {
@@ -267,13 +273,13 @@ TF_STUB(StringAddStub, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
if ((flags & STRING_ADD_CHECK_LEFT) != 0) {
- DCHECK((flags & STRING_ADD_CONVERT) != 0);
+ DCHECK_NE(flags & STRING_ADD_CONVERT, 0);
// TODO(danno): The ToString and JSReceiverToPrimitive below could be
// combined to avoid duplicate smi and instance type checks.
left = ToString(context, JSReceiverToPrimitive(context, left));
}
if ((flags & STRING_ADD_CHECK_RIGHT) != 0) {
- DCHECK((flags & STRING_ADD_CONVERT) != 0);
+ DCHECK_NE(flags & STRING_ADD_CONVERT, 0);
// TODO(danno): The ToString and JSReceiverToPrimitive below could be
// combined to avoid duplicate smi and instance type checks.
right = ToString(context, JSReceiverToPrimitive(context, right));
@@ -296,7 +302,7 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
Zone zone(isolate()->allocator(), ZONE_NAME);
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
compiler::CodeAssemblerState state(isolate(), &zone, descriptor, Code::STUB,
- name);
+ name, 1, GetKey());
GenerateAssembly(&state);
return compiler::CodeAssembler::GenerateCode(&state);
}
@@ -355,23 +361,6 @@ TF_STUB(TransitionElementsKindStub, CodeStubAssembler) {
}
}
-// TODO(ishell): move to builtins.
-TF_STUB(NumberToStringStub, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* argument = Parameter(Descriptor::kArgument);
- Return(NumberToString(context, argument));
-}
-
-// TODO(ishell): move to builtins.
-TF_STUB(SubStringStub, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* string = Parameter(Descriptor::kString);
- Node* from = Parameter(Descriptor::kFrom);
- Node* to = Parameter(Descriptor::kTo);
-
- Return(SubString(context, string, from, to));
-}
-
// TODO(ishell): move to builtins-handler-gen.
TF_STUB(KeyedLoadSloppyArgumentsStub, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -469,11 +458,11 @@ TF_STUB(LoadIndexedInterceptorStub, CodeStubAssembler) {
vector);
}
-void JSEntryStub::FinishCode(Handle<Code> code) {
+Handle<HandlerTable> JSEntryStub::GenerateHandlerTable() {
Handle<FixedArray> handler_table =
- code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
+ isolate()->factory()->NewFixedArray(1, TENURED);
handler_table->set(0, Smi::FromInt(handler_offset_));
- code->set_handler_table(*handler_table);
+ return Handle<HandlerTable>::cast(handler_table);
}
@@ -591,7 +580,7 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer,
Isolate* isolate) {
FunctionEntryHook entry_hook = isolate->function_entry_hook();
- DCHECK(entry_hook != NULL);
+ DCHECK_NOT_NULL(entry_hook);
entry_hook(function, stack_pointer);
}
@@ -726,16 +715,7 @@ TF_STUB(GrowArrayElementsStub, CodeStubAssembler) {
Return(new_elements);
BIND(&runtime);
- // TODO(danno): Make this a tail call when the stub is only used from TurboFan
- // code. This musn't be a tail call for now, since the caller site in lithium
- // creates a safepoint. This safepoint musn't have a different number of
- // arguments on the stack in the case that a GC happens from the slow-case
- // allocation path (zero, since all the stubs inputs are in registers) and
- // when the call happens (it would be two in the tail call case due to the
- // tail call pushing the arguments on the stack for the runtime call). By not
- // tail-calling, the runtime call case also has zero arguments on the stack
- // for the stub frame.
- Return(CallRuntime(Runtime::kGrowArrayElements, context, object, key));
+ TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 6e23fb9a9d..76057ffcc2 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -7,7 +7,6 @@
#include "src/allocation.h"
#include "src/assembler.h"
-#include "src/codegen.h"
#include "src/factory.h"
#include "src/globals.h"
#include "src/interface-descriptors.h"
@@ -38,11 +37,7 @@ class Node;
V(JSEntry) \
V(MathPow) \
V(ProfileEntryHook) \
- V(RecordWrite) \
- V(StoreBufferOverflow) \
V(StoreSlowElement) \
- V(SubString) \
- V(NameDictionaryLookup) \
/* --- TurboFanCodeStubs --- */ \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
@@ -54,7 +49,6 @@ class Node;
V(KeyedStoreSloppyArguments) \
V(LoadScriptContextField) \
V(StoreScriptContextField) \
- V(NumberToString) \
V(StringAdd) \
V(GetProperty) \
V(StoreFastElement) \
@@ -81,35 +75,23 @@ class Node;
// List of code stubs only used on PPC platforms.
#ifdef V8_TARGET_ARCH_PPC
-#define CODE_STUB_LIST_PPC(V) \
- V(DirectCEntry) \
- V(StoreRegistersState) \
- V(RestoreRegistersState)
+#define CODE_STUB_LIST_PPC(V) V(DirectCEntry)
#else
#define CODE_STUB_LIST_PPC(V)
#endif
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
-#define CODE_STUB_LIST_MIPS(V) \
- V(DirectCEntry) \
- V(RestoreRegistersState) \
- V(StoreRegistersState)
+#define CODE_STUB_LIST_MIPS(V) V(DirectCEntry)
#elif V8_TARGET_ARCH_MIPS64
-#define CODE_STUB_LIST_MIPS(V) \
- V(DirectCEntry) \
- V(RestoreRegistersState) \
- V(StoreRegistersState)
+#define CODE_STUB_LIST_MIPS(V) V(DirectCEntry)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
// List of code stubs only used on S390 platforms.
#ifdef V8_TARGET_ARCH_S390
-#define CODE_STUB_LIST_S390(V) \
- V(DirectCEntry) \
- V(StoreRegistersState) \
- V(RestoreRegistersState)
+#define CODE_STUB_LIST_S390(V) V(DirectCEntry)
#else
#define CODE_STUB_LIST_S390(V)
#endif
@@ -211,7 +193,7 @@ class CodeStub : public ZoneObject {
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
- virtual bool NeedsImmovableCode() { return false; }
+ virtual Movability NeedsImmovableCode() { return kMovable; }
virtual void PrintName(std::ostream& os) const; // NOLINT
virtual void PrintBaseName(std::ostream& os) const; // NOLINT
@@ -230,9 +212,6 @@ class CodeStub : public ZoneObject {
// initially generated.
void RecordCodeGeneration(Handle<Code> code);
- // Finish the code object after it has been generated.
- virtual void FinishCode(Handle<Code> code) { }
-
// Activate newly generated stub. Is called after
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
@@ -282,11 +261,6 @@ class CodeStub : public ZoneObject {
void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
- public: \
- Handle<Code> GenerateCode() override; \
- DEFINE_CODE_STUB(NAME, SUPER)
-
#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME) \
public: \
typedef NAME##Descriptor Descriptor; \
@@ -316,6 +290,9 @@ class PlatformCodeStub : public CodeStub {
// Generates the assembler code for the stub.
virtual void Generate(MacroAssembler* masm) = 0;
+ // Generates the exception handler table for the stub.
+ virtual Handle<HandlerTable> GenerateHandlerTable();
+
DEFINE_CODE_STUB_BASE(PlatformCodeStub, CodeStub);
};
@@ -329,11 +306,11 @@ class CodeStubDescriptor {
CodeStubDescriptor(Isolate* isolate, uint32_t stub_key);
- void Initialize(Address deoptimization_handler = NULL,
+ void Initialize(Address deoptimization_handler = nullptr,
int hint_stack_parameter_count = -1,
StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
void Initialize(Register stack_parameter_count,
- Address deoptimization_handler = NULL,
+ Address deoptimization_handler = nullptr,
int hint_stack_parameter_count = -1,
StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
@@ -439,9 +416,7 @@ class TurboFanCodeStub : public CodeStub {
} // namespace v8
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "src/x64/code-stubs-x64.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
@@ -518,14 +493,6 @@ class GetPropertyStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(GetProperty, TurboFanCodeStub);
};
-class NumberToStringStub final : public TurboFanCodeStub {
- public:
- explicit NumberToStringStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_TURBOFAN_CODE_STUB(NumberToString, TurboFanCodeStub);
-};
-
class GrowArrayElementsStub : public TurboFanCodeStub {
public:
GrowArrayElementsStub(Isolate* isolate, ElementsKind kind)
@@ -638,30 +605,17 @@ class CallApiCallbackStub : public PlatformCodeStub {
static const int kArgBits = 3;
static const int kArgMax = (1 << kArgBits) - 1;
- // CallApiCallbackStub for regular setters and getters.
- CallApiCallbackStub(Isolate* isolate, bool is_store, bool is_lazy)
- : CallApiCallbackStub(isolate, is_store ? 1 : 0, is_store, is_lazy) {}
-
- // CallApiCallbackStub for callback functions.
- CallApiCallbackStub(Isolate* isolate, int argc, bool is_lazy)
- : CallApiCallbackStub(isolate, argc, false, is_lazy) {}
-
- private:
- CallApiCallbackStub(Isolate* isolate, int argc, bool is_store, bool is_lazy)
+ CallApiCallbackStub(Isolate* isolate, int argc)
: PlatformCodeStub(isolate) {
- CHECK(0 <= argc && argc <= kArgMax);
- minor_key_ = IsStoreBits::encode(is_store) |
- ArgumentBits::encode(argc) |
- IsLazyAccessorBits::encode(is_lazy);
+ CHECK_LE(0, argc);
+ CHECK_LE(argc, kArgMax);
+ minor_key_ = ArgumentBits::encode(argc);
}
- bool is_store() const { return IsStoreBits::decode(minor_key_); }
- bool is_lazy() const { return IsLazyAccessorBits::decode(minor_key_); }
+ private:
int argc() const { return ArgumentBits::decode(minor_key_); }
- class IsStoreBits: public BitField<bool, 0, 1> {};
- class IsLazyAccessorBits : public BitField<bool, 1, 1> {};
- class ArgumentBits : public BitField<int, 2, kArgBits> {};
+ class ArgumentBits : public BitField<int, 0, kArgBits> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiCallback);
DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
@@ -730,7 +684,7 @@ class CEntryStub : public PlatformCodeStub {
bool is_builtin_exit() const { return FrameTypeBits::decode(minor_key_); }
int result_size() const { return ResultSizeBits::decode(minor_key_); }
- bool NeedsImmovableCode() override;
+ Movability NeedsImmovableCode() override;
class SaveDoublesBits : public BitField<bool, 0, 1> {};
class ArgvMode : public BitField<bool, 1, 1> {};
@@ -741,7 +695,6 @@ class CEntryStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(CEntry, PlatformCodeStub);
};
-
class JSEntryStub : public PlatformCodeStub {
public:
JSEntryStub(Isolate* isolate, StackFrame::Type type)
@@ -751,7 +704,7 @@ class JSEntryStub : public PlatformCodeStub {
}
private:
- void FinishCode(Handle<Code> code) override;
+ Handle<HandlerTable> GenerateHandlerTable() override;
void PrintName(std::ostream& os) const override { // NOLINT
os << (type() == StackFrame::ENTRY ? "JSEntryStub"
@@ -790,45 +743,24 @@ enum EmbedMode {
class DoubleToIStub : public PlatformCodeStub {
public:
- DoubleToIStub(Isolate* isolate, Register source, Register destination,
- int offset, bool is_truncating, bool skip_fastpath = false)
+ DoubleToIStub(Isolate* isolate, Register destination)
: PlatformCodeStub(isolate) {
- minor_key_ = SourceRegisterBits::encode(source.code()) |
- DestinationRegisterBits::encode(destination.code()) |
- OffsetBits::encode(offset) |
- IsTruncatingBits::encode(is_truncating) |
- SkipFastPathBits::encode(skip_fastpath) |
+ minor_key_ = DestinationRegisterBits::encode(destination.code()) |
SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
}
bool SometimesSetsUpAFrame() override { return false; }
private:
- Register source() const {
- return Register::from_code(SourceRegisterBits::decode(minor_key_));
- }
Register destination() const {
return Register::from_code(DestinationRegisterBits::decode(minor_key_));
}
- bool is_truncating() const { return IsTruncatingBits::decode(minor_key_); }
- bool skip_fastpath() const { return SkipFastPathBits::decode(minor_key_); }
- int offset() const { return OffsetBits::decode(minor_key_); }
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
- class SourceRegisterBits:
- public BitField<int, 0, kBitsPerRegisterNumber> {}; // NOLINT
- class DestinationRegisterBits:
- public BitField<int, kBitsPerRegisterNumber,
- kBitsPerRegisterNumber> {}; // NOLINT
- class IsTruncatingBits:
- public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {}; // NOLINT
- class OffsetBits:
- public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
- class SkipFastPathBits:
- public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
- class SSE3Bits:
- public BitField<int, 2 * kBitsPerRegisterNumber + 5, 1> {}; // NOLINT
+ class DestinationRegisterBits
+ : public BitField<int, 0, kBitsPerRegisterNumber> {};
+ class SSE3Bits : public BitField<int, kBitsPerRegisterNumber, 1> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DoubleToI, PlatformCodeStub);
@@ -1095,37 +1027,8 @@ class ProfileEntryHookStub : public PlatformCodeStub {
};
-class StoreBufferOverflowStub : public PlatformCodeStub {
- public:
- StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
- : PlatformCodeStub(isolate) {
- minor_key_ = SaveDoublesBits::encode(save_fp == kSaveFPRegs);
- }
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
-
- class SaveDoublesBits : public BitField<bool, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(StoreBufferOverflow, PlatformCodeStub);
-};
-
-class SubStringStub : public TurboFanCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(SubString);
- DEFINE_TURBOFAN_CODE_STUB(SubString, TurboFanCodeStub);
-};
-
-
#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
#undef DEFINE_PLATFORM_CODE_STUB
-#undef DEFINE_HANDLER_CODE_STUB
#undef DEFINE_CODE_STUB
#undef DEFINE_CODE_STUB_BASE
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index c313a1139a..10dfdbbd4a 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -4,51 +4,14 @@
#include "src/codegen.h"
-#if defined(V8_OS_AIX)
-#include <fenv.h> // NOLINT(build/c++11)
-#endif
-
+#include <cmath>
#include <memory>
-#include "src/bootstrapper.h"
-#include "src/compilation-info.h"
-#include "src/counters.h"
-#include "src/debug/debug.h"
-#include "src/eh-frame.h"
-#include "src/objects-inl.h"
-#include "src/runtime/runtime.h"
+#include "src/flags.h"
namespace v8 {
namespace internal {
-
-#if defined(V8_OS_WIN)
-double modulo(double x, double y) {
- // Workaround MS fmod bugs. ECMA-262 says:
- // dividend is finite and divisor is an infinity => result equals dividend
- // dividend is a zero and divisor is nonzero finite => result equals dividend
- if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) &&
- !(x == 0 && (y != 0 && std::isfinite(y)))) {
- x = fmod(x, y);
- }
- return x;
-}
-#else // POSIX
-
-double modulo(double x, double y) {
-#if defined(V8_OS_AIX)
- // AIX raises an underflow exception for (Number.MIN_VALUE % Number.MAX_VALUE)
- feclearexcept(FE_ALL_EXCEPT);
- double result = std::fmod(x, y);
- int exception = fetestexcept(FE_UNDERFLOW);
- return (exception ? x : result);
-#else
- return std::fmod(x, y);
-#endif
-}
-#endif // defined(V8_OS_WIN)
-
-
#define UNARY_MATH_FUNCTION(name, generator) \
static UnaryMathFunctionWithIsolate fast_##name##_function = nullptr; \
double std_##name(double x, Isolate* isolate) { return std::name(x); } \
@@ -67,169 +30,5 @@ UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
#undef UNARY_MATH_FUNCTION
-Handle<Code> CodeGenerator::MakeCodeEpilogue(TurboAssembler* tasm,
- EhFrameWriter* eh_frame_writer,
- CompilationInfo* info,
- Handle<Object> self_reference) {
- Isolate* isolate = info->isolate();
-
- // Allocate and install the code.
- CodeDesc desc;
- tasm->GetCode(isolate, &desc);
- if (eh_frame_writer) eh_frame_writer->GetEhFrame(&desc);
-
- Handle<Code> code = isolate->factory()->NewCode(desc, info->code_kind(),
- self_reference, false);
- isolate->counters()->total_compiled_code_size()->Increment(
- code->instruction_size());
- return code;
-}
-
-// Print function's source if it was not printed before.
-// Return a sequential id under which this function was printed.
-static int PrintFunctionSource(CompilationInfo* info,
- std::vector<Handle<SharedFunctionInfo>>* printed,
- int inlining_id,
- Handle<SharedFunctionInfo> shared) {
- // Outermost function has source id -1 and inlined functions take
- // source ids starting from 0.
- int source_id = -1;
- if (inlining_id != SourcePosition::kNotInlined) {
- for (unsigned i = 0; i < printed->size(); i++) {
- if (printed->at(i).is_identical_to(shared)) {
- return i;
- }
- }
- source_id = static_cast<int>(printed->size());
- printed->push_back(shared);
- }
-
- Isolate* isolate = info->isolate();
- if (!shared->script()->IsUndefined(isolate)) {
- Handle<Script> script(Script::cast(shared->script()), isolate);
-
- if (!script->source()->IsUndefined(isolate)) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- Object* source_name = script->name();
- OFStream os(tracing_scope.file());
- os << "--- FUNCTION SOURCE (";
- if (source_name->IsString()) {
- os << String::cast(source_name)->ToCString().get() << ":";
- }
- os << shared->DebugName()->ToCString().get() << ") id{";
- os << info->optimization_id() << "," << source_id << "} start{";
- os << shared->start_position() << "} ---\n";
- {
- DisallowHeapAllocation no_allocation;
- int start = shared->start_position();
- int len = shared->end_position() - start;
- String::SubStringRange source(String::cast(script->source()), start,
- len);
- for (const auto& c : source) {
- os << AsReversiblyEscapedUC16(c);
- }
- }
-
- os << "\n--- END ---\n";
- }
- }
-
- return source_id;
-}
-
-// Print information for the given inlining: which function was inlined and
-// where the inlining occurred.
-static void PrintInlinedFunctionInfo(
- CompilationInfo* info, int source_id, int inlining_id,
- const CompilationInfo::InlinedFunctionHolder& h) {
- CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "INLINE (" << h.shared_info->DebugName()->ToCString().get() << ") id{"
- << info->optimization_id() << "," << source_id << "} AS " << inlining_id
- << " AT ";
- const SourcePosition position = h.position.position;
- if (position.IsKnown()) {
- os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
- } else {
- os << "<?>";
- }
- os << std::endl;
-}
-
-// Print the source of all functions that participated in this optimizing
-// compilation. For inlined functions print source position of their inlining.
-static void DumpParticipatingSource(CompilationInfo* info) {
- AllowDeferredHandleDereference allow_deference_for_print_code;
-
- std::vector<Handle<SharedFunctionInfo>> printed;
- printed.reserve(info->inlined_functions().size());
-
- PrintFunctionSource(info, &printed, SourcePosition::kNotInlined,
- info->shared_info());
- const auto& inlined = info->inlined_functions();
- for (unsigned id = 0; id < inlined.size(); id++) {
- const int source_id =
- PrintFunctionSource(info, &printed, id, inlined[id].shared_info);
- PrintInlinedFunctionInfo(info, source_id, id, inlined[id]);
- }
-}
-
-void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
- if (FLAG_print_opt_source && info->IsOptimizing()) {
- DumpParticipatingSource(info);
- }
-
-#ifdef ENABLE_DISASSEMBLER
- AllowDeferredHandleDereference allow_deference_for_print_code;
- Isolate* isolate = info->isolate();
- bool print_code =
- isolate->bootstrapper()->IsActive()
- ? FLAG_print_builtin_code
- : (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
- (info->IsOptimizing() && FLAG_print_opt_code &&
- info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
- (info->IsWasm() && FLAG_print_wasm_code));
- if (print_code) {
- std::unique_ptr<char[]> debug_name = info->GetDebugName();
- CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
-
- // Print the source code if available.
- bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION;
- if (print_source) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- if (shared->script()->IsScript() &&
- !Script::cast(shared->script())->source()->IsUndefined(isolate)) {
- os << "--- Raw source ---\n";
- StringCharacterStream stream(
- String::cast(Script::cast(shared->script())->source()),
- shared->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len = shared->end_position() - shared->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.HasMore()) {
- os << AsReversiblyEscapedUC16(stream.GetNext());
- }
- }
- os << "\n\n";
- }
- }
- if (info->IsOptimizing()) {
- os << "--- Optimized code ---\n"
- << "optimization_id = " << info->optimization_id() << "\n";
- } else {
- os << "--- Code ---\n";
- }
- if (print_source) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- os << "source_position = " << shared->start_position() << "\n";
- }
- code->Disassemble(debug_name.get(), os);
- os << "--- End code ---\n";
- }
-#endif // ENABLE_DISASSEMBLER
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 712d283c6b..1b57c74447 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -5,84 +5,11 @@
#ifndef V8_CODEGEN_H_
#define V8_CODEGEN_H_
-#include "src/code-stubs.h"
#include "src/globals.h"
-#include "src/runtime/runtime.h"
-
-// Include the declaration of the architecture defined class CodeGenerator.
-// The contract to the shared code is that the the CodeGenerator is a subclass
-// of Visitor and that the following methods are available publicly:
-// MakeCode
-// MakeCodeEpilogue
-// masm
-// frame
-// script
-// has_valid_frame
-// SetFrame
-// DeleteFrame
-// allocator
-// AddDeferred
-// in_spilled_code
-// set_in_spilled_code
-// RecordPositions
-//
-// These methods are either used privately by the shared code or implemented as
-// shared code:
-// CodeGenerator
-// ~CodeGenerator
-// Generate
-// ComputeLazyCompile
-// ProcessDeclarations
-// DeclareGlobals
-// CheckForInlineRuntimeCall
-// AnalyzeCondition
-// CodeForFunctionPosition
-// CodeForReturnPosition
-// CodeForStatementPosition
-// CodeForDoWhileConditionPosition
-// CodeForSourcePosition
-
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/codegen-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/codegen-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/codegen-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/codegen-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/codegen-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/codegen-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/codegen-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/codegen-s390.h" // NOLINT
-#else
-#error Unsupported target architecture.
-#endif
namespace v8 {
namespace internal {
-class CompilationInfo;
-class EhFrameWriter;
-
-class CodeGenerator {
- public:
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(TurboAssembler* tasm,
- EhFrameWriter* unwinding,
- CompilationInfo* info,
- Handle<Object> self_reference);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
@@ -90,8 +17,6 @@ typedef double (*UnaryMathFunctionWithIsolate)(double x, Isolate* isolate);
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate);
-V8_EXPORT_PRIVATE double modulo(double x, double y);
-
// Custom implementation of math functions.
double fast_sqrt(double input, Isolate* isolate);
void lazily_initialize_fast_sqrt(Isolate* isolate);
diff --git a/deps/v8/src/collector.h b/deps/v8/src/collector.h
index 307e8b886f..a3e940663f 100644
--- a/deps/v8/src/collector.h
+++ b/deps/v8/src/collector.h
@@ -53,7 +53,7 @@ class Collector {
// A basic Collector will keep this vector valid as long as the Collector
// is alive.
inline Vector<T> AddBlock(int size, T initial_value) {
- DCHECK(size > 0);
+ DCHECK_GT(size, 0);
if (size > current_chunk_.length() - index_) {
Grow(size);
}
@@ -131,7 +131,7 @@ class Collector {
// Creates a new current chunk, and stores the old chunk in the chunks_ list.
void Grow(int min_capacity) {
- DCHECK(growth_factor > 1);
+ DCHECK_GT(growth_factor, 1);
int new_capacity;
int current_length = current_chunk_.length();
if (current_length < kMinCapacity) {
@@ -187,12 +187,12 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
virtual ~SequenceCollector() {}
void StartSequence() {
- DCHECK(sequence_start_ == kNoSequence);
+ DCHECK_EQ(sequence_start_, kNoSequence);
sequence_start_ = this->index_;
}
Vector<T> EndSequence() {
- DCHECK(sequence_start_ != kNoSequence);
+ DCHECK_NE(sequence_start_, kNoSequence);
int sequence_start = sequence_start_;
sequence_start_ = kNoSequence;
if (sequence_start == this->index_) return Vector<T>();
@@ -201,7 +201,7 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
// Drops the currently added sequence, and all collected elements in it.
void DropSequence() {
- DCHECK(sequence_start_ != kNoSequence);
+ DCHECK_NE(sequence_start_, kNoSequence);
int sequence_length = this->index_ - sequence_start_;
this->index_ = sequence_start_;
this->size_ -= sequence_length;
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 3e1bd04664..927e09a940 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -133,7 +133,7 @@ InfoVectorPair CompilationCacheScript::Lookup(
// into the caller's handle scope.
{ HandleScope scope(isolate());
const int generation = 0;
- DCHECK(generations() == 1);
+ DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
InfoVectorPair probe = table->LookupScript(source, context, language_mode);
if (probe.has_shared()) {
@@ -195,7 +195,7 @@ InfoVectorPair CompilationCacheEval::Lookup(
// having cleared the cache.
InfoVectorPair result;
const int generation = 0;
- DCHECK(generations() == 1);
+ DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(source, outer_info, native_context, language_mode,
position);
@@ -284,7 +284,7 @@ InfoVectorPair CompilationCache::LookupEval(
result = eval_global_.Lookup(source, outer_info, context, language_mode,
position);
} else {
- DCHECK(position != kNoSourcePosition);
+ DCHECK_NE(position, kNoSourcePosition);
Handle<Context> native_context(context->native_context(), isolate());
result = eval_contextual_.Lookup(source, outer_info, native_context,
language_mode, position);
@@ -321,7 +321,7 @@ void CompilationCache::PutEval(Handle<String> source,
eval_global_.Put(source, outer_info, function_info, context, literals,
position);
} else {
- DCHECK(position != kNoSourcePosition);
+ DCHECK_NE(position, kNoSourcePosition);
Handle<Context> native_context(context->native_context(), isolate());
eval_contextual_.Put(source, outer_info, function_info, native_context,
literals, position);
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 50c3dea59e..3c9751ac2f 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -42,7 +42,7 @@ class CompilationSubCache {
return GetTable(kFirstGeneration);
}
void SetFirstTable(Handle<CompilationCacheTable> value) {
- DCHECK(kFirstGeneration < generations_);
+ DCHECK_LT(kFirstGeneration, generations_);
tables_[kFirstGeneration] = *value;
}
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
index 1a9cd7d9f5..5913f3a5a6 100644
--- a/deps/v8/src/compilation-dependencies.cc
+++ b/deps/v8/src/compilation-dependencies.cc
@@ -42,9 +42,10 @@ void CompilationDependencies::Set(Handle<Object> object,
void CompilationDependencies::Insert(DependentCode::DependencyGroup group,
Handle<HeapObject> object) {
if (groups_[group] == nullptr) {
- groups_[group] = new (zone_) ZoneList<Handle<HeapObject>>(2, zone_);
+ groups_[group] = new (zone_->New(sizeof(ZoneVector<Handle<HeapObject>>)))
+ ZoneVector<Handle<HeapObject>>(zone_);
}
- groups_[group]->Add(object, zone_);
+ groups_[group]->push_back(object);
if (object_wrapper_.is_null()) {
// Allocate the wrapper if necessary.
@@ -73,11 +74,11 @@ void CompilationDependencies::Commit(Handle<Code> code) {
Handle<WeakCell> cell = Code::WeakCellFor(code);
AllowDeferredHandleDereference get_wrapper;
for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ZoneList<Handle<HeapObject>>* group_objects = groups_[i];
+ ZoneVector<Handle<HeapObject>>* group_objects = groups_[i];
if (group_objects == nullptr) continue;
DependentCode::DependencyGroup group =
static_cast<DependentCode::DependencyGroup>(i);
- for (int j = 0; j < group_objects->length(); j++) {
+ for (size_t j = 0; j < group_objects->size(); j++) {
DependentCode* dependent_code = Get(group_objects->at(j));
dependent_code->UpdateToFinishedCode(group, *object_wrapper_, *cell);
}
@@ -92,11 +93,11 @@ void CompilationDependencies::Rollback() {
AllowDeferredHandleDereference get_wrapper;
// Unregister from all dependent maps if not yet committed.
for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ZoneList<Handle<HeapObject>>* group_objects = groups_[i];
+ ZoneVector<Handle<HeapObject>>* group_objects = groups_[i];
if (group_objects == nullptr) continue;
DependentCode::DependencyGroup group =
static_cast<DependentCode::DependencyGroup>(i);
- for (int j = 0; j < group_objects->length(); j++) {
+ for (size_t j = 0; j < group_objects->size(); j++) {
DependentCode* dependent_code = Get(group_objects->at(j));
dependent_code->RemoveCompilationDependencies(group, *object_wrapper_);
}
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
index d0d9b7647f..990d536e38 100644
--- a/deps/v8/src/compilation-dependencies.h
+++ b/deps/v8/src/compilation-dependencies.h
@@ -8,6 +8,7 @@
#include "src/handles.h"
#include "src/objects.h"
#include "src/objects/map.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -62,7 +63,7 @@ class CompilationDependencies {
Zone* zone_;
Handle<Foreign> object_wrapper_;
bool aborted_;
- ZoneList<Handle<HeapObject> >* groups_[DependentCode::kGroupCount];
+ ZoneVector<Handle<HeapObject> >* groups_[DependentCode::kGroupCount];
DependentCode* Get(Handle<Object> object) const;
void Set(Handle<Object> object, Handle<DependentCode> dep);
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index cf3ca63642..b722cc4e5c 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -19,10 +19,9 @@ namespace internal {
// TODO(mvstanton): the Code::OPTIMIZED_FUNCTION constant below is
// bogus, it's just that I've eliminated Code::FUNCTION and there isn't
// a "better" value to put in this place.
-CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
- ParseInfo* parse_info,
+CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
FunctionLiteral* literal)
- : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, BASE, isolate, zone) {
+ : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, BASE, zone) {
// NOTE: The parse_info passed here represents the global information gathered
// during parsing, but does not represent specific details of the actual
// function literal being compiled for this CompilationInfo. As such,
@@ -34,16 +33,17 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
if (parse_info->is_eval()) MarkAsEval();
if (parse_info->is_native()) MarkAsNative();
- if (parse_info->will_serialize()) MarkAsSerializing();
+ if (parse_info->collect_type_profile()) MarkAsCollectTypeProfile();
}
CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure)
- : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, OPTIMIZE, isolate, zone) {
+ : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, OPTIMIZE, zone) {
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
+ dependencies_.reset(new CompilationDependencies(isolate, zone));
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
@@ -51,39 +51,41 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
// more memory consumption.
- if (isolate_->NeedsSourcePositionsForProfiling()) {
+ if (isolate->NeedsSourcePositionsForProfiling()) {
MarkAsSourcePositionsEnabled();
}
}
-CompilationInfo::CompilationInfo(Vector<const char> debug_name,
- Isolate* isolate, Zone* zone,
+CompilationInfo::CompilationInfo(Vector<const char> debug_name, Zone* zone,
Code::Kind code_kind)
- : CompilationInfo(debug_name, code_kind, STUB, isolate, zone) {}
+ : CompilationInfo(debug_name, code_kind, STUB, zone) {}
CompilationInfo::CompilationInfo(Vector<const char> debug_name,
- Code::Kind code_kind, Mode mode,
- Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- literal_(nullptr),
- flags_(0),
+ Code::Kind code_kind, Mode mode, Zone* zone)
+ : literal_(nullptr),
+ source_range_map_(nullptr),
+ flags_(FLAG_untrusted_code_mitigations ? kUntrustedCodeMitigations : 0),
code_kind_(code_kind),
+ stub_key_(0),
+ builtin_index_(Builtins::kNoBuiltinId),
mode_(mode),
osr_offset_(BailoutId::None()),
+ feedback_vector_spec_(zone),
zone_(zone),
deferred_handles_(nullptr),
- dependencies_(isolate, zone),
+ dependencies_(nullptr),
bailout_reason_(kNoReason),
parameter_count_(0),
optimization_id_(-1),
- osr_expr_stack_height_(-1),
debug_name_(debug_name) {}
CompilationInfo::~CompilationInfo() {
if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
}
- dependencies()->Rollback();
+ if (dependencies()) {
+ dependencies()->Rollback();
+ }
}
DeclarationScope* CompilationInfo::scope() const {
@@ -103,12 +105,12 @@ bool CompilationInfo::is_this_defined() const { return !IsStub(); }
void CompilationInfo::set_deferred_handles(
std::shared_ptr<DeferredHandles> deferred_handles) {
- DCHECK(deferred_handles_.get() == nullptr);
+ DCHECK_NULL(deferred_handles_);
deferred_handles_.swap(deferred_handles);
}
void CompilationInfo::set_deferred_handles(DeferredHandles* deferred_handles) {
- DCHECK(deferred_handles_.get() == nullptr);
+ DCHECK_NULL(deferred_handles_);
deferred_handles_.reset(deferred_handles);
}
@@ -127,8 +129,7 @@ bool CompilationInfo::has_simple_parameters() {
std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
if (literal()) {
- AllowHandleDereference allow_deref;
- return literal()->debug_name()->ToCString();
+ return literal()->GetDebugName();
}
if (!shared_info().is_null()) {
return shared_info()->DebugName()->ToCString();
@@ -151,6 +152,8 @@ StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
return StackFrame::WASM_COMPILED;
case Code::JS_TO_WASM_FUNCTION:
return StackFrame::JS_TO_WASM;
+ case Code::WASM_TO_WASM_FUNCTION:
+ return StackFrame::WASM_TO_WASM;
case Code::WASM_TO_JS_FUNCTION:
return StackFrame::WASM_TO_JS;
case Code::WASM_INTERPRETER_ENTRY:
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index ab4fda5223..e0f5c73a9c 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -8,6 +8,7 @@
#include <memory>
#include "src/compilation-dependencies.h"
+#include "src/feedback-vector.h"
#include "src/frames.h"
#include "src/globals.h"
#include "src/handles.h"
@@ -31,6 +32,8 @@ class Zone;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
+// TODO(rmcilroy): Split CompilationInfo into two classes, one for unoptimized
+// compilation and one for optimized compilation, since they don't share much.
class V8_EXPORT_PRIVATE CompilationInfo final {
public:
// Various configuration flags for a compilation, as well as some properties
@@ -38,7 +41,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
enum Flag {
kIsEval = 1 << 0,
kIsNative = 1 << 1,
- kSerializing = 1 << 2,
+ kCollectTypeProfile = 1 << 2,
kAccessorInliningEnabled = 1 << 3,
kFunctionContextSpecializing = 1 << 4,
kInliningEnabled = 1 << 5,
@@ -47,17 +50,29 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
kSourcePositionsEnabled = 1 << 8,
kBailoutOnUninitialized = 1 << 9,
kLoopPeelingEnabled = 1 << 10,
+ kUntrustedCodeMitigations = 1 << 11,
+ };
+
+ // TODO(mtrofin): investigate if this might be generalized outside wasm, with
+ // the goal of better separating the compiler from where compilation lands. At
+ // that point, the Handle<Code> member of CompilationInfo would also be
+ // removed.
+ struct WasmCodeDesc {
+ CodeDesc code_desc;
+ size_t safepoint_table_offset = 0;
+ uint32_t frame_slot_count = 0;
+ Handle<ByteArray> source_positions_table;
+ MaybeHandle<HandlerTable> handler_table;
};
// Construct a compilation info for unoptimized compilation.
- CompilationInfo(Zone* zone, Isolate* isolate, ParseInfo* parse_info,
- FunctionLiteral* literal);
+ CompilationInfo(Zone* zone, ParseInfo* parse_info, FunctionLiteral* literal);
// Construct a compilation info for optimized compilation.
CompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure);
// Construct a compilation info for stub compilation (or testing).
- CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
+ CompilationInfo(Vector<const char> debug_name, Zone* zone,
Code::Kind code_kind);
~CompilationInfo();
@@ -75,7 +90,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
DeclarationScope* scope() const;
- Isolate* isolate() const { return isolate_; }
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_offset_.IsNone(); }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
@@ -86,6 +100,10 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
Code::Kind code_kind() const { return code_kind_; }
+ uint32_t stub_key() const { return stub_key_; }
+ void set_stub_key(uint32_t stub_key) { stub_key_ = stub_key; }
+ int32_t builtin_index() const { return builtin_index_; }
+ void set_builtin_index(int32_t index) { builtin_index_ = index; }
BailoutId osr_offset() const { return osr_offset_; }
JavaScriptFrame* osr_frame() const { return osr_frame_; }
int num_parameters() const;
@@ -105,15 +123,15 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
// Flags used by unoptimized compilation.
- void MarkAsSerializing() { SetFlag(kSerializing); }
- bool will_serialize() const { return GetFlag(kSerializing); }
-
void MarkAsEval() { SetFlag(kIsEval); }
bool is_eval() const { return GetFlag(kIsEval); }
void MarkAsNative() { SetFlag(kIsNative); }
bool is_native() const { return GetFlag(kIsNative); }
+ void MarkAsCollectTypeProfile() { SetFlag(kCollectTypeProfile); }
+ bool collect_type_profile() const { return GetFlag(kCollectTypeProfile); }
+
// Flags used by optimized compilation.
void MarkAsFunctionContextSpecializing() {
@@ -147,6 +165,10 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
void MarkAsLoopPeelingEnabled() { SetFlag(kLoopPeelingEnabled); }
bool is_loop_peeling_enabled() const { return GetFlag(kLoopPeelingEnabled); }
+ bool has_untrusted_code_mitigations() const {
+ return GetFlag(kUntrustedCodeMitigations);
+ }
+
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }
@@ -159,6 +181,8 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
asm_wasm_data_ = asm_wasm_data;
}
+ FeedbackVectorSpec* feedback_vector_spec() { return &feedback_vector_spec_; }
+
bool has_context() const;
Context* context() const;
@@ -187,36 +211,26 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
void ReopenHandlesInNewHandleScope();
void AbortOptimization(BailoutReason reason) {
- DCHECK(reason != kNoReason);
+ DCHECK_NE(reason, kNoReason);
if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
SetFlag(kDisableFutureOptimization);
}
void RetryOptimization(BailoutReason reason) {
- DCHECK(reason != kNoReason);
+ DCHECK_NE(reason, kNoReason);
if (GetFlag(kDisableFutureOptimization)) return;
bailout_reason_ = reason;
}
BailoutReason bailout_reason() const { return bailout_reason_; }
- CompilationDependencies* dependencies() { return &dependencies_; }
+ CompilationDependencies* dependencies() { return dependencies_.get(); }
int optimization_id() const {
DCHECK(IsOptimizing());
return optimization_id_;
}
- int osr_expr_stack_height() {
- DCHECK_GE(osr_expr_stack_height_, 0);
- return osr_expr_stack_height_;
- }
- void set_osr_expr_stack_height(int height) {
- DCHECK_EQ(osr_expr_stack_height_, -1);
- osr_expr_stack_height_ = height;
- DCHECK_GE(osr_expr_stack_height_, 0);
- }
-
bool has_simple_parameters();
struct InlinedFunctionHolder {
@@ -229,7 +243,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
: shared_info(inlined_shared_info) {
position.position = pos;
// initialized when generating the deoptimization literals
- position.inlined_function_id = DeoptimizationInputData::kNotInlinedIndex;
+ position.inlined_function_id = DeoptimizationData::kNotInlinedIndex;
}
void RegisterInlinedFunctionId(size_t inlined_function_id) {
@@ -258,6 +272,8 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
coverage_info_ = coverage_info;
}
+ WasmCodeDesc* wasm_code_desc() { return &wasm_code_desc_; }
+
private:
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
@@ -265,7 +281,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
enum Mode { BASE, OPTIMIZE, STUB };
CompilationInfo(Vector<const char> debug_name, Code::Kind code_kind,
- Mode mode, Isolate* isolate, Zone* zone);
+ Mode mode, Zone* zone);
void SetMode(Mode mode) { mode_ = mode; }
@@ -277,13 +293,14 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
- Isolate* isolate_;
FunctionLiteral* literal_;
SourceRangeMap* source_range_map_; // Used when block coverage is enabled.
unsigned flags_;
Code::Kind code_kind_;
+ uint32_t stub_key_;
+ int32_t builtin_index_;
Handle<SharedFunctionInfo> shared_info_;
@@ -291,6 +308,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
// The compiled code.
Handle<Code> code_;
+ WasmCodeDesc wasm_code_desc_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
@@ -304,6 +322,9 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
// Holds the asm_wasm array generated by the asmjs compiler.
Handle<FixedArray> asm_wasm_data_;
+ // Holds the feedback vector spec generated during compilation
+ FeedbackVectorSpec feedback_vector_spec_;
+
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
Zone* zone_;
@@ -311,7 +332,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
std::shared_ptr<DeferredHandles> deferred_handles_;
// Dependencies for this compilation, e.g. stable maps.
- CompilationDependencies dependencies_;
+ std::unique_ptr<CompilationDependencies> dependencies_;
BailoutReason bailout_reason_;
@@ -322,8 +343,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
int optimization_id_;
- int osr_expr_stack_height_;
-
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
index 3d87b600db..9e46556fae 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -11,7 +11,7 @@ namespace internal {
const UnoptimizedCompileJob* CompilerDispatcherJob::AsUnoptimizedCompileJob()
const {
- DCHECK_EQ(type(), kUnoptimizedCompile);
+ DCHECK_EQ(type(), Type::kUnoptimizedCompile);
return static_cast<const UnoptimizedCompileJob*>(this);
}
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
index d07ff843de..aed4960119 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -17,42 +17,73 @@ class UnoptimizedCompileJob;
class V8_EXPORT_PRIVATE CompilerDispatcherJob {
public:
- enum Type { kUnoptimizedCompile };
+ enum class Type { kUnoptimizedCompile };
+
+ enum class Status {
+ kInitial,
+ kPrepared,
+ kCompiled,
+ kHasErrorsToReport,
+ kDone,
+ kFailed,
+ };
+
+ CompilerDispatcherJob(Type type) : type_(type), status_(Status::kInitial) {}
virtual ~CompilerDispatcherJob() {}
- virtual Type type() const = 0;
+ Type type() const { return type_; }
+
+ // Returns the current status of the compile
+ Status status() const { return status_; }
// Returns true if this CompilerDispatcherJob has finished (either with a
// success or a failure).
- virtual bool IsFinished() = 0;
+ bool IsFinished() const {
+ return status() == Status::kDone || status() == Status::kFailed;
+ }
// Returns true if this CompilerDispatcherJob has failed.
- virtual bool IsFailed() = 0;
+ bool IsFailed() const { return status() == Status::kFailed; }
- // Return true if the next step can be run on any thread, that is when both
- // StepNextOnMainThread and StepNextOnBackgroundThread could be used for the
- // next step.
- virtual bool CanStepNextOnAnyThread() = 0;
+ // Return true if the next step can be run on any thread.
+ bool NextStepCanRunOnAnyThread() const {
+ return status() == Status::kPrepared;
+ }
+
+ // Casts to implementations.
+ const UnoptimizedCompileJob* AsUnoptimizedCompileJob() const;
- // Step the job forward by one state on the main thread.
- virtual void StepNextOnMainThread(Isolate* isolate) = 0;
+ // Transition from kInitial to kPrepared. Must only be invoked on the
+ // main thread.
+ virtual void PrepareOnMainThread(Isolate* isolate) = 0;
- // Step the job forward by one state on a background thread.
- virtual void StepNextOnBackgroundThread() = 0;
+ // Transition from kPrepared to kCompiled (or kReportErrors).
+ virtual void Compile(bool on_background_thread) = 0;
- // Transition from any state to kInitial and free all resources.
+ // Transition from kCompiled to kDone (or kFailed). Must only be invoked on
+ // the main thread.
+ virtual void FinalizeOnMainThread(Isolate* isolate) = 0;
+
+ // Transition from kReportErrors to kFailed. Must only be invoked on the main
+ // thread.
+ virtual void ReportErrorsOnMainThread(Isolate* isolate) = 0;
+
+ // Free all resources. Must only be invoked on the main thread.
virtual void ResetOnMainThread(Isolate* isolate) = 0;
// Estimate how long the next step will take using the tracer.
virtual double EstimateRuntimeOfNextStepInMs() const = 0;
- // Even though the name does not imply this, ShortPrint() must only be invoked
- // on the main thread.
+ // Print short description of job. Must only be invoked on the main thread.
virtual void ShortPrintOnMainThread() = 0;
- // Casts to implementations.
- const UnoptimizedCompileJob* AsUnoptimizedCompileJob() const;
+ protected:
+ void set_status(Status status) { status_ = status; }
+
+ private:
+ Type type_;
+ Status status_;
};
} // namespace internal
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
index 481c81ab74..862efda83e 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
@@ -30,26 +30,14 @@ CompilerDispatcherTracer::Scope::Scope(CompilerDispatcherTracer* tracer,
CompilerDispatcherTracer::Scope::~Scope() {
double elapsed = MonotonicallyIncreasingTimeInMs() - start_time_;
switch (scope_id_) {
- case ScopeID::kPrepareToParse:
- tracer_->RecordPrepareToParse(elapsed);
- break;
- case ScopeID::kParse:
- tracer_->RecordParse(elapsed, num_);
- break;
- case ScopeID::kFinalizeParsing:
- tracer_->RecordFinalizeParsing(elapsed);
- break;
- case ScopeID::kAnalyze:
- tracer_->RecordAnalyze(elapsed);
- break;
- case ScopeID::kPrepareToCompile:
- tracer_->RecordPrepareToCompile(elapsed);
+ case ScopeID::kPrepare:
+ tracer_->RecordPrepare(elapsed);
break;
case ScopeID::kCompile:
- tracer_->RecordCompile(elapsed);
+ tracer_->RecordCompile(elapsed, num_);
break;
- case ScopeID::kFinalizeCompiling:
- tracer_->RecordFinalizeCompiling(elapsed);
+ case ScopeID::kFinalize:
+ tracer_->RecordFinalize(elapsed);
break;
}
}
@@ -57,20 +45,12 @@ CompilerDispatcherTracer::Scope::~Scope() {
// static
const char* CompilerDispatcherTracer::Scope::Name(ScopeID scope_id) {
switch (scope_id) {
- case ScopeID::kPrepareToParse:
- return "V8.BackgroundCompile_PrepareToParse";
- case ScopeID::kParse:
- return "V8.BackgroundCompile_Parse";
- case ScopeID::kFinalizeParsing:
- return "V8.BackgroundCompile_FinalizeParsing";
- case ScopeID::kAnalyze:
- return "V8.BackgroundCompile_Analyze";
- case ScopeID::kPrepareToCompile:
- return "V8.BackgroundCompile_PrepareToCompile";
+ case ScopeID::kPrepare:
+ return "V8.BackgroundCompile_Prepare";
case ScopeID::kCompile:
return "V8.BackgroundCompile_Compile";
- case ScopeID::kFinalizeCompiling:
- return "V8.BackgroundCompile_FinalizeCompiling";
+ case ScopeID::kFinalize:
+ return "V8.BackgroundCompile_Finalize";
}
UNREACHABLE();
}
@@ -85,87 +65,44 @@ CompilerDispatcherTracer::CompilerDispatcherTracer(Isolate* isolate)
CompilerDispatcherTracer::~CompilerDispatcherTracer() {}
-void CompilerDispatcherTracer::RecordPrepareToParse(double duration_ms) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- prepare_parse_events_.Push(duration_ms);
-}
-
-void CompilerDispatcherTracer::RecordParse(double duration_ms,
- size_t source_length) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- parse_events_.Push(std::make_pair(source_length, duration_ms));
-}
-
-void CompilerDispatcherTracer::RecordFinalizeParsing(double duration_ms) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- finalize_parsing_events_.Push(duration_ms);
-}
-
-void CompilerDispatcherTracer::RecordAnalyze(double duration_ms) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- analyze_events_.Push(duration_ms);
-}
-
-void CompilerDispatcherTracer::RecordPrepareToCompile(double duration_ms) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- prepare_compile_events_.Push(duration_ms);
-}
-
-void CompilerDispatcherTracer::RecordCompile(double duration_ms) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- compile_events_.Push(duration_ms);
-}
-
-void CompilerDispatcherTracer::RecordFinalizeCompiling(double duration_ms) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- finalize_compiling_events_.Push(duration_ms);
-}
-
-double CompilerDispatcherTracer::EstimatePrepareToParseInMs() const {
- base::LockGuard<base::Mutex> lock(&mutex_);
- return Average(prepare_parse_events_);
-}
-
-double CompilerDispatcherTracer::EstimateParseInMs(size_t source_length) const {
+void CompilerDispatcherTracer::RecordPrepare(double duration_ms) {
base::LockGuard<base::Mutex> lock(&mutex_);
- return Estimate(parse_events_, source_length);
+ prepare_events_.Push(duration_ms);
}
-double CompilerDispatcherTracer::EstimateFinalizeParsingInMs() const {
+void CompilerDispatcherTracer::RecordCompile(double duration_ms,
+ size_t source_length) {
base::LockGuard<base::Mutex> lock(&mutex_);
- return Average(finalize_parsing_events_);
+ compile_events_.Push(std::make_pair(source_length, duration_ms));
}
-double CompilerDispatcherTracer::EstimateAnalyzeInMs() const {
+void CompilerDispatcherTracer::RecordFinalize(double duration_ms) {
base::LockGuard<base::Mutex> lock(&mutex_);
- return Average(analyze_events_);
+ finalize_events_.Push(duration_ms);
}
-double CompilerDispatcherTracer::EstimatePrepareToCompileInMs() const {
+double CompilerDispatcherTracer::EstimatePrepareInMs() const {
base::LockGuard<base::Mutex> lock(&mutex_);
- return Average(prepare_compile_events_);
+ return Average(prepare_events_);
}
-double CompilerDispatcherTracer::EstimateCompileInMs() const {
+double CompilerDispatcherTracer::EstimateCompileInMs(
+ size_t source_length) const {
base::LockGuard<base::Mutex> lock(&mutex_);
- return Average(compile_events_);
+ return Estimate(compile_events_, source_length);
}
-double CompilerDispatcherTracer::EstimateFinalizeCompilingInMs() const {
+double CompilerDispatcherTracer::EstimateFinalizeInMs() const {
base::LockGuard<base::Mutex> lock(&mutex_);
- return Average(finalize_compiling_events_);
+ return Average(finalize_events_);
}
void CompilerDispatcherTracer::DumpStatistics() const {
PrintF(
"CompilerDispatcherTracer: "
- "prepare_parsing=%.2lfms parsing=%.2lfms/kb finalize_parsing=%.2lfms "
- "analyze=%.2lfms prepare_compiling=%.2lfms compiling=%.2lfms/kb "
- "finalize_compiling=%.2lfms\n",
- EstimatePrepareToParseInMs(), EstimateParseInMs(1 * KB),
- EstimateFinalizeParsingInMs(), EstimateAnalyzeInMs(),
- EstimatePrepareToCompileInMs(), EstimateCompileInMs(),
- EstimateFinalizeCompilingInMs());
+ "prepare=%.2lfms compiling=%.2lfms/kb finalize=%.2lfms\n",
+ EstimatePrepareInMs(), EstimateCompileInMs(1 * KB),
+ EstimateFinalizeInMs());
}
double CompilerDispatcherTracer::Average(
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
index 19a6fd9899..3043e07d72 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
@@ -31,15 +31,7 @@ class RuntimeCallStats;
class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
public:
- enum class ScopeID {
- kPrepareToParse,
- kParse,
- kFinalizeParsing,
- kAnalyze,
- kPrepareToCompile,
- kCompile,
- kFinalizeCompiling
- };
+ enum class ScopeID { kPrepare, kCompile, kFinalize };
class Scope {
public:
@@ -60,21 +52,13 @@ class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
explicit CompilerDispatcherTracer(Isolate* isolate);
~CompilerDispatcherTracer();
- void RecordPrepareToParse(double duration_ms);
- void RecordParse(double duration_ms, size_t source_length);
- void RecordFinalizeParsing(double duration_ms);
- void RecordAnalyze(double duration_ms);
- void RecordPrepareToCompile(double duration_ms);
- void RecordCompile(double duration_ms);
- void RecordFinalizeCompiling(double duration_ms);
-
- double EstimatePrepareToParseInMs() const;
- double EstimateParseInMs(size_t source_length) const;
- double EstimateFinalizeParsingInMs() const;
- double EstimateAnalyzeInMs() const;
- double EstimatePrepareToCompileInMs() const;
- double EstimateCompileInMs() const;
- double EstimateFinalizeCompilingInMs() const;
+ void RecordPrepare(double duration_ms);
+ void RecordCompile(double duration_ms, size_t source_length);
+ void RecordFinalize(double duration_ms);
+
+ double EstimatePrepareInMs() const;
+ double EstimateCompileInMs(size_t source_length) const;
+ double EstimateFinalizeInMs() const;
void DumpStatistics() const;
@@ -84,13 +68,9 @@ class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num);
mutable base::Mutex mutex_;
- base::RingBuffer<double> prepare_parse_events_;
- base::RingBuffer<std::pair<size_t, double>> parse_events_;
- base::RingBuffer<double> finalize_parsing_events_;
- base::RingBuffer<double> analyze_events_;
- base::RingBuffer<double> prepare_compile_events_;
- base::RingBuffer<double> compile_events_;
- base::RingBuffer<double> finalize_compiling_events_;
+ base::RingBuffer<double> prepare_events_;
+ base::RingBuffer<std::pair<size_t, double>> compile_events_;
+ base::RingBuffer<double> finalize_events_;
RuntimeCallStats* runtime_call_stats_;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index cb1a70b1fa..e365e301d1 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -27,7 +27,23 @@ bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherForgroundStep");
- job->StepNextOnMainThread(isolate);
+ switch (job->status()) {
+ case CompilerDispatcherJob::Status::kInitial:
+ job->PrepareOnMainThread(isolate);
+ break;
+ case CompilerDispatcherJob::Status::kPrepared:
+ job->Compile(false);
+ break;
+ case CompilerDispatcherJob::Status::kCompiled:
+ job->FinalizeOnMainThread(isolate);
+ break;
+ case CompilerDispatcherJob::Status::kHasErrorsToReport:
+ job->ReportErrorsOnMainThread(isolate);
+ break;
+ case CompilerDispatcherJob::Status::kFailed:
+ case CompilerDispatcherJob::Status::kDone:
+ UNREACHABLE();
+ }
DCHECK_EQ(job->IsFailed(), isolate->has_pending_exception());
if (job->IsFailed() && exception_handling == ExceptionHandling::kSwallow) {
@@ -37,10 +53,16 @@ bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
}
void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
- DCHECK(job->CanStepNextOnAnyThread());
+ DCHECK(job->NextStepCanRunOnAnyThread());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherBackgroundStep");
- job->StepNextOnBackgroundThread();
+ switch (job->status()) {
+ case CompilerDispatcherJob::Status::kPrepared:
+ job->Compile(true);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
// Theoretically we get 50ms of idle time max, however it's unlikely that
@@ -484,7 +506,7 @@ void CompilerDispatcher::ScheduleAbortTask() {
void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
CompilerDispatcherJob* job) {
- if (!job->CanStepNextOnAnyThread()) return;
+ if (!job->NextStepCanRunOnAnyThread()) return;
{
base::LockGuard<base::Mutex> lock(&mutex_);
pending_background_jobs_.insert(job);
@@ -667,7 +689,8 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
CompilerDispatcherJob* inserted_job = it->second.get();
// Maps unoptimized jobs' SFIs to their job id.
- if (inserted_job->type() == CompilerDispatcherJob::kUnoptimizedCompile) {
+ if (inserted_job->type() ==
+ CompilerDispatcherJob::Type::kUnoptimizedCompile) {
Handle<SharedFunctionInfo> shared =
inserted_job->AsUnoptimizedCompileJob()->shared();
if (!shared.is_null()) {
@@ -684,7 +707,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
job->ResetOnMainThread(isolate_);
// Unmaps unoptimized jobs' SFIs to their job id.
- if (job->type() == CompilerDispatcherJob::kUnoptimizedCompile) {
+ if (job->type() == CompilerDispatcherJob::Type::kUnoptimizedCompile) {
Handle<SharedFunctionInfo> shared =
job->AsUnoptimizedCompileJob()->shared();
if (!shared.is_null()) {
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 2de39c0ac4..59872b2535 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -5,6 +5,7 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/base/atomicops.h"
+#include "src/cancelable-task.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/isolate.h"
@@ -34,11 +35,11 @@ void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
} // namespace
-class OptimizingCompileDispatcher::CompileTask : public v8::Task {
+class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
public:
explicit CompileTask(Isolate* isolate,
OptimizingCompileDispatcher* dispatcher)
- : isolate_(isolate), dispatcher_(dispatcher) {
+ : CancelableTask(isolate), isolate_(isolate), dispatcher_(dispatcher) {
base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
++dispatcher_->ref_count_;
}
@@ -47,7 +48,7 @@ class OptimizingCompileDispatcher::CompileTask : public v8::Task {
private:
// v8::Task overrides.
- void Run() override {
+ void RunInternal() override {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -92,7 +93,7 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
- if (input_queue_length_ == 0) return NULL;
+ if (input_queue_length_ == 0) return nullptr;
CompilationJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
@@ -101,7 +102,7 @@ CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
AllowHandleDereference allow_handle_dereference;
DisposeCompilationJob(job, true);
- return NULL;
+ return nullptr;
}
}
return job;
@@ -124,7 +125,7 @@ void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
for (;;) {
- CompilationJob* job = NULL;
+ CompilationJob* job = nullptr;
{
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
@@ -189,7 +190,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
for (;;) {
- CompilationJob* job = NULL;
+ CompilationJob* job = nullptr;
{
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
@@ -206,7 +207,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
DisposeCompilationJob(job, false);
} else {
- Compiler::FinalizeCompilationJob(job);
+ Compiler::FinalizeCompilationJob(job, isolate_);
}
}
}
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
index ca7e12e402..74b2352bd8 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
@@ -11,6 +11,7 @@
#include "src/compiler.h"
#include "src/flags.h"
#include "src/global-handles.h"
+#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
@@ -66,14 +67,18 @@ UnoptimizedCompileJob::UnoptimizedCompileJob(Isolate* isolate,
CompilerDispatcherTracer* tracer,
Handle<SharedFunctionInfo> shared,
size_t max_stack_size)
- : status_(Status::kInitial),
+ : CompilerDispatcherJob(Type::kUnoptimizedCompile),
main_thread_id_(isolate->thread_id().ToInteger()),
tracer_(tracer),
+ allocator_(isolate->allocator()),
context_(isolate->global_handles()->Create(isolate->context())),
shared_(isolate->global_handles()->Create(*shared)),
max_stack_size_(max_stack_size),
trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) {
DCHECK(!shared_->is_toplevel());
+ // TODO(rmcilroy): Handle functions with non-empty outer scope info.
+ DCHECK(shared_->outer_scope_info()->IsTheHole(isolate) ||
+ ScopeInfo::cast(shared_->outer_scope_info())->length() == 0);
HandleScope scope(isolate);
Handle<Script> script(Script::cast(shared_->script()), isolate);
Handle<String> source(String::cast(script->source()), isolate);
@@ -85,8 +90,7 @@ UnoptimizedCompileJob::UnoptimizedCompileJob(Isolate* isolate,
}
UnoptimizedCompileJob::~UnoptimizedCompileJob() {
- DCHECK(status_ == Status::kInitial ||
- status_ == Status::kDone);
+ DCHECK(status() == Status::kInitial || status() == Status::kDone);
if (!shared_.is_null()) {
DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
i::GlobalHandles::Destroy(Handle<Object>::cast(shared_).location());
@@ -102,71 +106,17 @@ bool UnoptimizedCompileJob::IsAssociatedWith(
return *shared_ == *shared;
}
-void UnoptimizedCompileJob::StepNextOnMainThread(Isolate* isolate) {
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
-
- // Ensure we are in the correct context for the job.
- SaveContext save(isolate);
- if (has_context()) {
- isolate->set_context(context());
- } else {
- // Phases which can run off the main thread by definition can't execute any
- // JS code, and so we don't need to enter their context.
- DCHECK(CanStepNextOnAnyThread());
- }
-
- switch (status()) {
- case Status::kInitial:
- return PrepareToParseOnMainThread(isolate);
-
- case Status::kReadyToParse:
- return Parse();
-
- case Status::kParsed:
- return FinalizeParsingOnMainThread(isolate);
-
- case Status::kReadyToAnalyze:
- return AnalyzeOnMainThread(isolate);
-
- case Status::kAnalyzed:
- return PrepareToCompileOnMainThread(isolate);
-
- case Status::kReadyToCompile:
- return Compile();
-
- case Status::kCompiled:
- return FinalizeCompilingOnMainThread(isolate);
-
- case Status::kFailed:
- case Status::kDone:
- return;
- }
- UNREACHABLE();
-}
-
-void UnoptimizedCompileJob::StepNextOnBackgroundThread() {
- DCHECK(CanStepNextOnAnyThread());
- switch (status()) {
- case Status::kReadyToParse:
- return Parse();
-
- case Status::kReadyToCompile:
- return Compile();
-
- default:
- UNREACHABLE();
- }
-}
-
-void UnoptimizedCompileJob::PrepareToParseOnMainThread(Isolate* isolate) {
+void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- DCHECK(status() == Status::kInitial);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToParse);
+ DCHECK_EQ(status(), Status::kInitial);
+ COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepare);
+
if (trace_compiler_dispatcher_jobs_) {
PrintF("UnoptimizedCompileJob[%p]: Preparing to parse\n",
static_cast<void*>(this));
}
+
HandleScope scope(isolate);
unicode_cache_.reset(new UnicodeCache());
Handle<Script> script(Script::cast(shared_->script()), isolate);
@@ -266,142 +216,76 @@ void UnoptimizedCompileJob::PrepareToParseOnMainThread(Isolate* isolate) {
Handle<String> name(shared_->name());
parse_info_->set_function_name(
parse_info_->ast_value_factory()->GetString(name));
- status_ = Status::kReadyToParse;
+ set_status(Status::kPrepared);
}
-void UnoptimizedCompileJob::Parse() {
- DCHECK(status() == Status::kReadyToParse);
+void UnoptimizedCompileJob::Compile(bool on_background_thread) {
+ DCHECK_EQ(status(), Status::kPrepared);
COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
- tracer_, kParse,
+ tracer_, kCompile,
parse_info_->end_position() - parse_info_->start_position());
if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Parsing\n", static_cast<void*>(this));
+ PrintF("UnoptimizedCompileJob[%p]: Compiling\n", static_cast<void*>(this));
}
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
+ parse_info_->set_on_background_thread(on_background_thread);
uintptr_t stack_limit = GetCurrentStackPosition() - max_stack_size_ * KB;
-
parser_->set_stack_limit(stack_limit);
+ parse_info_->set_stack_limit(stack_limit);
parser_->ParseOnBackground(parse_info_.get());
- status_ = Status::kParsed;
-}
-
-void UnoptimizedCompileJob::FinalizeParsingOnMainThread(Isolate* isolate) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- DCHECK(status() == Status::kParsed);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeParsing);
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Finalizing parsing\n",
- static_cast<void*>(this));
- }
-
- if (!source_.is_null()) {
- i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
- source_ = Handle<String>::null();
- }
- if (!wrapper_.is_null()) {
- i::GlobalHandles::Destroy(Handle<Object>::cast(wrapper_).location());
- wrapper_ = Handle<String>::null();
- }
-
- Handle<Script> script(Script::cast(shared_->script()), isolate);
- parse_info_->set_script(script);
-
- if (!shared_->outer_scope_info()->IsTheHole(isolate) &&
- ScopeInfo::cast(shared_->outer_scope_info())->length() > 0) {
- Handle<ScopeInfo> outer_scope_info(
- handle(ScopeInfo::cast(shared_->outer_scope_info())));
- parse_info_->set_outer_scope_info(outer_scope_info);
- }
if (parse_info_->literal() == nullptr) {
- parser_->ReportErrors(isolate, script);
- status_ = Status::kFailed;
- } else {
- parse_info_->literal()->scope()->AttachOuterScopeInfo(parse_info_.get(),
- isolate);
- status_ = Status::kReadyToAnalyze;
- }
- parser_->UpdateStatistics(isolate, script);
- parse_info_->UpdateStatisticsAfterBackgroundParse(isolate);
-
- parser_->HandleSourceURLComments(isolate, script);
-
- parse_info_->set_unicode_cache(nullptr);
- parser_.reset();
- unicode_cache_.reset();
-}
-
-void UnoptimizedCompileJob::AnalyzeOnMainThread(Isolate* isolate) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- DCHECK(status() == Status::kReadyToAnalyze);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kAnalyze);
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Analyzing\n", static_cast<void*>(this));
+ // Parser sets error in pending error handler.
+ set_status(Status::kHasErrorsToReport);
+ return;
}
- if (Compiler::Analyze(parse_info_.get())) {
- status_ = Status::kAnalyzed;
- } else {
- status_ = Status::kFailed;
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ if (!Compiler::Analyze(parse_info_.get())) {
+ parse_info_->pending_error_handler()->set_stack_overflow();
+ set_status(Status::kHasErrorsToReport);
+ return;
}
-}
-void UnoptimizedCompileJob::PrepareToCompileOnMainThread(Isolate* isolate) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- DCHECK(status() == Status::kAnalyzed);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToCompile);
+ compilation_job_.reset(interpreter::Interpreter::NewCompilationJob(
+ parse_info_.get(), parse_info_->literal(), allocator_));
- compilation_job_.reset(
- Compiler::PrepareUnoptimizedCompilationJob(parse_info_.get(), isolate));
if (!compilation_job_.get()) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- status_ = Status::kFailed;
+ parse_info_->pending_error_handler()->set_stack_overflow();
+ set_status(Status::kHasErrorsToReport);
return;
}
- CHECK(compilation_job_->can_execute_on_background_thread());
- status_ = Status::kReadyToCompile;
-}
-
-void UnoptimizedCompileJob::Compile() {
- DCHECK(status() == Status::kReadyToCompile);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kCompile);
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Compiling\n", static_cast<void*>(this));
+ if (compilation_job_->ExecuteJob() != CompilationJob::SUCCEEDED) {
+ parse_info_->pending_error_handler()->set_stack_overflow();
+ set_status(Status::kHasErrorsToReport);
+ return;
}
- // Disallowing of handle dereference and heap access dealt with in
- // CompilationJob::ExecuteJob.
-
- uintptr_t stack_limit = GetCurrentStackPosition() - max_stack_size_ * KB;
- compilation_job_->set_stack_limit(stack_limit);
-
- CompilationJob::Status status = compilation_job_->ExecuteJob();
- USE(status);
-
- // Always transition to kCompiled - errors will be reported by
- // FinalizeCompilingOnMainThread.
- status_ = Status::kCompiled;
+ set_status(Status::kCompiled);
}
-void UnoptimizedCompileJob::FinalizeCompilingOnMainThread(Isolate* isolate) {
+void UnoptimizedCompileJob::FinalizeOnMainThread(Isolate* isolate) {
DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- DCHECK(status() == Status::kCompiled);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeCompiling);
+ DCHECK_EQ(status(), Status::kCompiled);
+ DCHECK_NOT_NULL(parse_info_->literal());
+ DCHECK_NOT_NULL(compilation_job_.get());
+ COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalize);
if (trace_compiler_dispatcher_jobs_) {
PrintF("UnoptimizedCompileJob[%p]: Finalizing compiling\n",
static_cast<void*>(this));
}
+ Handle<Script> script(Script::cast(shared_->script()), isolate);
+ parse_info_->set_script(script);
+ parser_->UpdateStatistics(isolate, script);
+ parse_info_->UpdateBackgroundParseStatisticsOnMainThread(isolate);
+ parser_->HandleSourceURLComments(isolate, script);
+
{
HandleScope scope(isolate);
// Internalize ast values onto the heap.
@@ -411,24 +295,44 @@ void UnoptimizedCompileJob::FinalizeCompilingOnMainThread(Isolate* isolate) {
AnalyzeMode::kRegular);
compilation_job_->compilation_info()->set_shared_info(shared_);
if (compilation_job_->state() == CompilationJob::State::kFailed ||
- !Compiler::FinalizeCompilationJob(compilation_job_.release())) {
+ !Compiler::FinalizeCompilationJob(compilation_job_.release(),
+ isolate)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
- status_ = Status::kFailed;
+ set_status(Status::kFailed);
return;
}
}
- compilation_job_.reset();
- parse_info_.reset();
-
- status_ = Status::kDone;
+ ResetDataOnMainThread(isolate);
+ set_status(Status::kDone);
}
-void UnoptimizedCompileJob::ResetOnMainThread(Isolate* isolate) {
+void UnoptimizedCompileJob::ReportErrorsOnMainThread(Isolate* isolate) {
+ DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
+ DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
+ DCHECK_EQ(status(), Status::kHasErrorsToReport);
+
if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Resetting\n", static_cast<void*>(this));
+ PrintF("UnoptimizedCompileJob[%p]: Reporting Errors\n",
+ static_cast<void*>(this));
}
+ // Ensure we report errors in the correct context for the job.
+ SaveContext save(isolate);
+ isolate->set_context(context());
+
+ Handle<Script> script(Script::cast(shared_->script()), isolate);
+ parse_info_->pending_error_handler()->ReportErrors(
+ isolate, script, parse_info_->ast_value_factory());
+
+ ResetDataOnMainThread(isolate);
+ set_status(Status::kFailed);
+}
+
+void UnoptimizedCompileJob::ResetDataOnMainThread(Isolate* isolate) {
+ DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
+ DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
+
compilation_job_.reset();
parser_.reset();
unicode_cache_.reset();
@@ -446,34 +350,28 @@ void UnoptimizedCompileJob::ResetOnMainThread(Isolate* isolate) {
i::GlobalHandles::Destroy(Handle<Object>::cast(wrapper_).location());
wrapper_ = Handle<String>::null();
}
+}
+
+void UnoptimizedCompileJob::ResetOnMainThread(Isolate* isolate) {
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("UnoptimizedCompileJob[%p]: Resetting\n", static_cast<void*>(this));
+ }
- status_ = Status::kInitial;
+ ResetDataOnMainThread(isolate);
+ set_status(Status::kInitial);
}
double UnoptimizedCompileJob::EstimateRuntimeOfNextStepInMs() const {
switch (status()) {
case Status::kInitial:
- return tracer_->EstimatePrepareToParseInMs();
-
- case Status::kReadyToParse:
- return tracer_->EstimateParseInMs(parse_info_->end_position() -
- parse_info_->start_position());
-
- case Status::kParsed:
- return tracer_->EstimateFinalizeParsingInMs();
-
- case Status::kReadyToAnalyze:
- return tracer_->EstimateAnalyzeInMs();
-
- case Status::kAnalyzed:
- return tracer_->EstimatePrepareToCompileInMs();
-
- case Status::kReadyToCompile:
- return tracer_->EstimateCompileInMs();
-
+ return tracer_->EstimatePrepareInMs();
+ case Status::kPrepared:
+ return tracer_->EstimateCompileInMs(parse_info_->end_position() -
+ parse_info_->start_position());
case Status::kCompiled:
- return tracer_->EstimateFinalizeCompilingInMs();
+ return tracer_->EstimateFinalizeInMs();
+ case Status::kHasErrorsToReport:
case Status::kFailed:
case Status::kDone:
return 0.0;
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
index 3ebfa12a0f..153c8ba26d 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
@@ -32,72 +32,38 @@ class Utf16CharacterStream;
class V8_EXPORT_PRIVATE UnoptimizedCompileJob : public CompilerDispatcherJob {
public:
- enum class Status {
- kInitial,
- kReadyToParse,
- kParsed,
- kReadyToAnalyze,
- kAnalyzed,
- kReadyToCompile,
- kCompiled,
- kDone,
- kFailed,
- };
-
// Creates a UnoptimizedCompileJob in the initial state.
UnoptimizedCompileJob(Isolate* isolate, CompilerDispatcherTracer* tracer,
Handle<SharedFunctionInfo> shared,
size_t max_stack_size);
~UnoptimizedCompileJob() override;
- Type type() const override { return kUnoptimizedCompile; }
-
Handle<SharedFunctionInfo> shared() const { return shared_; }
// Returns true if this UnoptimizedCompileJob was created for the given
// function.
bool IsAssociatedWith(Handle<SharedFunctionInfo> shared) const;
- bool IsFinished() override {
- return status() == Status::kDone || status() == Status::kFailed;
- }
-
- bool IsFailed() override { return status() == Status::kFailed; }
-
- // Return true if the next step can be run on any thread, that is when both
- // StepNextOnMainThread and StepNextOnBackgroundThread could be used for the
- // next step.
- bool CanStepNextOnAnyThread() override {
- return status() == Status::kReadyToParse ||
- status() == Status::kReadyToCompile;
- }
-
- // Step the job forward by one state on the main thread.
- void StepNextOnMainThread(Isolate* isolate) override;
-
- // Step the job forward by one state on a background thread.
- void StepNextOnBackgroundThread() override;
-
- // Transition from any state to kInitial and free all resources.
+ // CompilerDispatcherJob implementation.
+ void PrepareOnMainThread(Isolate* isolate) override;
+ void Compile(bool on_background_thread) override;
+ void FinalizeOnMainThread(Isolate* isolate) override;
+ void ReportErrorsOnMainThread(Isolate* isolate) override;
void ResetOnMainThread(Isolate* isolate) override;
-
- // Estimate how long the next step will take using the tracer.
double EstimateRuntimeOfNextStepInMs() const override;
-
void ShortPrintOnMainThread() override;
private:
friend class CompilerDispatcherTest;
friend class UnoptimizedCompileJobTest;
- bool has_context() const { return !context_.is_null(); }
- Context* context() { return *context_; }
+ void ResetDataOnMainThread(Isolate* isolate);
- Status status() const { return status_; }
+ Context* context() { return *context_; }
- Status status_;
int main_thread_id_;
CompilerDispatcherTracer* tracer_;
+ AccountingAllocator* allocator_;
Handle<Context> context_; // Global handle.
Handle<SharedFunctionInfo> shared_; // Global handle.
Handle<String> source_; // Global handle.
@@ -115,27 +81,6 @@ class V8_EXPORT_PRIVATE UnoptimizedCompileJob : public CompilerDispatcherJob {
bool trace_compiler_dispatcher_jobs_;
- // Transition from kInitial to kReadyToParse.
- void PrepareToParseOnMainThread(Isolate* isolate);
-
- // Transition from kReadyToParse to kParsed.
- void Parse();
-
- // Transition from kParsed to kReadyToAnalyze (or kFailed).
- void FinalizeParsingOnMainThread(Isolate* isolate);
-
- // Transition from kReadyToAnalyze to kAnalyzed (or kFailed).
- void AnalyzeOnMainThread(Isolate* isolate);
-
- // Transition from kAnalyzed to kReadyToCompile (or kFailed).
- void PrepareToCompileOnMainThread(Isolate* isolate);
-
- // Transition from kReadyToCompile to kCompiled.
- void Compile();
-
- // Transition from kCompiled to kDone (or kFailed).
- void FinalizeCompilingOnMainThread(Isolate* isolate);
-
DISALLOW_COPY_AND_ASSIGN(UnoptimizedCompileJob);
};
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index b1eaf448c1..e508f5a5a7 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -15,7 +15,6 @@
#include "src/ast/scopes.h"
#include "src/base/optional.h"
#include "src/bootstrapper.h"
-#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compilation-info.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
@@ -47,8 +46,8 @@ namespace internal {
// destruction.
class CompilationHandleScope final {
public:
- explicit CompilationHandleScope(CompilationInfo* info)
- : deferred_(info->isolate()), info_(info) {}
+ explicit CompilationHandleScope(Isolate* isolate, CompilationInfo* info)
+ : deferred_(isolate), info_(info) {}
~CompilationHandleScope() { info_->set_deferred_handles(deferred_.Detach()); }
private:
@@ -59,7 +58,7 @@ class CompilationHandleScope final {
// Helper that times a scoped region and records the elapsed time.
struct ScopedTimer {
explicit ScopedTimer(base::TimeDelta* location) : location_(location) {
- DCHECK(location_ != NULL);
+ DCHECK_NOT_NULL(location_);
timer_.Start();
}
@@ -72,21 +71,18 @@ struct ScopedTimer {
// ----------------------------------------------------------------------------
// Implementation of CompilationJob
-CompilationJob::CompilationJob(Isolate* isolate, ParseInfo* parse_info,
+CompilationJob::CompilationJob(uintptr_t stack_limit, ParseInfo* parse_info,
CompilationInfo* compilation_info,
const char* compiler_name, State initial_state)
: parse_info_(parse_info),
compilation_info_(compilation_info),
- isolate_thread_id_(isolate->thread_id()),
compiler_name_(compiler_name),
state_(initial_state),
- stack_limit_(isolate->stack_guard()->real_climit()),
- executed_on_background_thread_(false) {}
+ stack_limit_(stack_limit) {}
-CompilationJob::Status CompilationJob::PrepareJob() {
- DCHECK(
- ThreadId::Current().Equals(compilation_info()->isolate()->thread_id()));
- DisallowJavascriptExecution no_js(isolate());
+CompilationJob::Status CompilationJob::PrepareJob(Isolate* isolate) {
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+ DisallowJavascriptExecution no_js(isolate);
if (FLAG_trace_opt && compilation_info()->IsOptimizing()) {
OFStream os(stdout);
@@ -97,44 +93,34 @@ CompilationJob::Status CompilationJob::PrepareJob() {
}
// Delegate to the underlying implementation.
- DCHECK(state() == State::kReadyToPrepare);
+ DCHECK_EQ(state(), State::kReadyToPrepare);
ScopedTimer t(&time_taken_to_prepare_);
- return UpdateState(PrepareJobImpl(), State::kReadyToExecute);
+ return UpdateState(PrepareJobImpl(isolate), State::kReadyToExecute);
}
CompilationJob::Status CompilationJob::ExecuteJob() {
- base::Optional<DisallowHeapAllocation> no_allocation;
- base::Optional<DisallowHandleAllocation> no_handles;
- base::Optional<DisallowHandleDereference> no_deref;
- base::Optional<DisallowCodeDependencyChange> no_dependency_change;
- if (can_execute_on_background_thread()) {
- no_allocation.emplace();
- no_handles.emplace();
- no_deref.emplace();
- no_dependency_change.emplace();
- executed_on_background_thread_ =
- !ThreadId::Current().Equals(isolate_thread_id_);
- } else {
- DCHECK(ThreadId::Current().Equals(isolate_thread_id_));
- }
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ DisallowCodeDependencyChange no_dependency_change;
// Delegate to the underlying implementation.
- DCHECK(state() == State::kReadyToExecute);
+ DCHECK_EQ(state(), State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_);
return UpdateState(ExecuteJobImpl(), State::kReadyToFinalize);
}
-CompilationJob::Status CompilationJob::FinalizeJob() {
- DCHECK(
- ThreadId::Current().Equals(compilation_info()->isolate()->thread_id()));
+CompilationJob::Status CompilationJob::FinalizeJob(Isolate* isolate) {
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
DisallowCodeDependencyChange no_dependency_change;
- DisallowJavascriptExecution no_js(isolate());
- DCHECK(!compilation_info()->dependencies()->HasAborted());
+ DisallowJavascriptExecution no_js(isolate);
+ DCHECK(!compilation_info()->dependencies() ||
+ !compilation_info()->dependencies()->HasAborted());
// Delegate to the underlying implementation.
- DCHECK(state() == State::kReadyToFinalize);
+ DCHECK_EQ(state(), State::kReadyToFinalize);
ScopedTimer t(&time_taken_to_finalize_);
- return UpdateState(FinalizeJobImpl(), State::kSucceeded);
+ return UpdateState(FinalizeJobImpl(isolate), State::kSucceeded);
}
CompilationJob::Status CompilationJob::RetryOptimization(BailoutReason reason) {
@@ -151,7 +137,7 @@ CompilationJob::Status CompilationJob::AbortOptimization(BailoutReason reason) {
return FAILED;
}
-void CompilationJob::RecordUnoptimizedCompilationStats() const {
+void CompilationJob::RecordUnoptimizedCompilationStats(Isolate* isolate) const {
int code_size;
if (compilation_info()->has_bytecode_array()) {
code_size = compilation_info()->bytecode_array()->SizeIncludingMetadata();
@@ -159,7 +145,7 @@ void CompilationJob::RecordUnoptimizedCompilationStats() const {
code_size = compilation_info()->code()->SizeIncludingMetadata();
}
- Counters* counters = isolate()->counters();
+ Counters* counters = isolate->counters();
// TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
counters->total_baseline_code_size()->Increment(code_size);
counters->total_baseline_compile_count()->Increment(1);
@@ -192,8 +178,65 @@ void CompilationJob::RecordOptimizedCompilationStats() const {
}
}
-Isolate* CompilationJob::isolate() const {
- return compilation_info()->isolate();
+void CompilationJob::RecordFunctionCompilation(
+ CodeEventListener::LogEventsAndTags tag, Isolate* isolate) const {
+ // Log the code generation. If source information is available include
+ // script name and line number. Check explicitly whether logging is
+ // enabled as finding the line number is not free.
+ CompilationInfo* compilation_info = this->compilation_info();
+ if (!isolate->logger()->is_logging_code_events() &&
+ !isolate->is_profiling() && !FLAG_log_function_events) {
+ return;
+ }
+
+ Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
+ Handle<Script> script = parse_info()->script();
+ Handle<AbstractCode> abstract_code =
+ compilation_info->has_bytecode_array()
+ ? Handle<AbstractCode>::cast(compilation_info->bytecode_array())
+ : Handle<AbstractCode>::cast(compilation_info->code());
+
+ if (abstract_code.is_identical_to(BUILTIN_CODE(isolate, CompileLazy))) {
+ return;
+ }
+
+ int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ Script::GetColumnNumber(script, shared->start_position()) + 1;
+ String* script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : isolate->heap()->empty_string();
+ CodeEventListener::LogEventsAndTags log_tag =
+ Logger::ToNativeByScript(tag, *script);
+ PROFILE(isolate, CodeCreateEvent(log_tag, *abstract_code, *shared,
+ script_name, line_num, column_num));
+ if (!FLAG_log_function_events) return;
+
+ DisallowHeapAllocation no_gc;
+
+ double ms = time_taken_to_prepare_.InMillisecondsF();
+ ms += time_taken_to_execute_.InMillisecondsF();
+ ms += time_taken_to_finalize_.InMillisecondsF();
+
+ std::string name = compilation_info->IsOptimizing() ? "optimize" : "compile";
+ switch (tag) {
+ case CodeEventListener::EVAL_TAG:
+ name += "-eval";
+ break;
+ case CodeEventListener::SCRIPT_TAG:
+ break;
+ case CodeEventListener::LAZY_COMPILE_TAG:
+ name += "-lazy";
+ break;
+ case CodeEventListener::FUNCTION_TAG:
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ LOG(isolate, FunctionEvent(name.c_str(), nullptr, script->id(), ms,
+ shared->start_position(), shared->end_position(),
+ shared->DebugName()));
}
// ----------------------------------------------------------------------------
@@ -201,39 +244,8 @@ Isolate* CompilationJob::isolate() const {
namespace {
-void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
- Handle<Script> script,
- CompilationInfo* compilation_info) {
- // Log the code generation. If source information is available include
- // script name and line number. Check explicitly whether logging is
- // enabled as finding the line number is not free.
- if (compilation_info->isolate()->logger()->is_logging_code_events() ||
- compilation_info->isolate()->is_profiling()) {
- Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
- Handle<AbstractCode> abstract_code =
- compilation_info->has_bytecode_array()
- ? Handle<AbstractCode>::cast(compilation_info->bytecode_array())
- : Handle<AbstractCode>::cast(compilation_info->code());
- if (abstract_code.is_identical_to(
- BUILTIN_CODE(compilation_info->isolate(), CompileLazy))) {
- return;
- }
- int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
- int column_num =
- Script::GetColumnNumber(script, shared->start_position()) + 1;
- String* script_name =
- script->name()->IsString()
- ? String::cast(script->name())
- : compilation_info->isolate()->heap()->empty_string();
- CodeEventListener::LogEventsAndTags log_tag =
- Logger::ToNativeByScript(tag, *script);
- PROFILE(compilation_info->isolate(),
- CodeCreateEvent(log_tag, *abstract_code, *shared, script_name,
- line_num, column_num));
- }
-}
-
-void EnsureFeedbackMetadata(CompilationInfo* compilation_info) {
+void EnsureFeedbackMetadata(CompilationInfo* compilation_info,
+ Isolate* isolate) {
DCHECK(compilation_info->has_shared_info());
// If no type feedback metadata exists, create it. At this point the
@@ -245,15 +257,14 @@ void EnsureFeedbackMetadata(CompilationInfo* compilation_info) {
if (compilation_info->shared_info()->feedback_metadata()->length() == 0 ||
!compilation_info->shared_info()->is_compiled()) {
Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(
- compilation_info->isolate(),
- compilation_info->literal()->feedback_vector_spec());
+ isolate, compilation_info->feedback_vector_spec());
compilation_info->shared_info()->set_feedback_metadata(*feedback_metadata);
}
// It's very important that recompiles do not alter the structure of the type
// feedback vector. Verify that the structure fits the function literal.
CHECK(!compilation_info->shared_info()->feedback_metadata()->SpecDiffersFrom(
- compilation_info->literal()->feedback_vector_spec()));
+ compilation_info->feedback_vector_spec()));
}
bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
@@ -271,13 +282,14 @@ bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
return literal->scope()->IsAsmModule();
}
-void InstallUnoptimizedCode(CompilationInfo* compilation_info) {
+void InstallUnoptimizedCode(CompilationInfo* compilation_info,
+ Isolate* isolate) {
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
DCHECK_EQ(compilation_info->shared_info()->language_mode(),
compilation_info->literal()->language_mode());
// Ensure feedback metadata is installed.
- EnsureFeedbackMetadata(compilation_info);
+ EnsureFeedbackMetadata(compilation_info, isolate);
// Update the shared function info with the scope info.
Handle<ScopeInfo> scope_info = compilation_info->scope()->scope_info();
@@ -299,9 +311,9 @@ void InstallUnoptimizedCode(CompilationInfo* compilation_info) {
// Install coverage info on the shared function info.
if (compilation_info->has_coverage_info()) {
- DCHECK(compilation_info->isolate()->is_block_code_coverage());
- compilation_info->isolate()->debug()->InstallCoverageInfo(
- compilation_info->shared_info(), compilation_info->coverage_info());
+ DCHECK(isolate->is_block_code_coverage());
+ isolate->debug()->InstallCoverageInfo(compilation_info->shared_info(),
+ compilation_info->coverage_info());
}
}
@@ -333,16 +345,17 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
}
}
-CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
+CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job,
+ Isolate* isolate) {
CompilationInfo* compilation_info = job->compilation_info();
ParseInfo* parse_info = job->parse_info();
SetSharedFunctionFlagsFromLiteral(compilation_info->literal(),
compilation_info->shared_info());
- CompilationJob::Status status = job->FinalizeJob();
+ CompilationJob::Status status = job->FinalizeJob(isolate);
if (status == CompilationJob::SUCCEEDED) {
- InstallUnoptimizedCode(compilation_info);
+ InstallUnoptimizedCode(compilation_info, isolate);
CodeEventListener::LogEventsAndTags log_tag;
if (parse_info->is_toplevel()) {
log_tag = compilation_info->is_eval() ? CodeEventListener::EVAL_TAG
@@ -351,28 +364,30 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
log_tag = parse_info->lazy_compile() ? CodeEventListener::LAZY_COMPILE_TAG
: CodeEventListener::FUNCTION_TAG;
}
- RecordFunctionCompilation(log_tag, parse_info->script(), compilation_info);
- job->RecordUnoptimizedCompilationStats();
+ job->RecordFunctionCompilation(log_tag, isolate);
+ job->RecordUnoptimizedCompilationStats(isolate);
}
return status;
}
bool Renumber(ParseInfo* parse_info,
Compiler::EagerInnerFunctionLiterals* eager_literals) {
- RuntimeCallTimerScope runtimeTimer(parse_info->runtime_call_stats(),
- &RuntimeCallStats::CompileRenumber);
+ RuntimeCallTimerScope runtimeTimer(
+ parse_info->runtime_call_stats(),
+ parse_info->on_background_thread()
+ ? &RuntimeCallStats::CompileBackgroundRenumber
+ : &RuntimeCallStats::CompileRenumber);
return AstNumbering::Renumber(parse_info->stack_limit(), parse_info->zone(),
- parse_info->literal(), eager_literals,
- parse_info->collect_type_profile());
+ parse_info->literal(), eager_literals);
}
std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJob(
- ParseInfo* parse_info, FunctionLiteral* literal, Isolate* isolate) {
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator) {
if (UseAsmWasm(literal, parse_info->is_asm_wasm_broken())) {
std::unique_ptr<CompilationJob> asm_job(
- AsmJs::NewCompilationJob(parse_info, literal, isolate));
- if (asm_job->PrepareJob() == CompilationJob::SUCCEEDED &&
- asm_job->ExecuteJob() == CompilationJob::SUCCEEDED) {
+ AsmJs::NewCompilationJob(parse_info, literal, allocator));
+ if (asm_job->ExecuteJob() == CompilationJob::SUCCEEDED) {
return asm_job;
}
// asm.js validation failed, fall through to standard unoptimized compile.
@@ -383,26 +398,22 @@ std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJob(
}
std::unique_ptr<CompilationJob> job(
interpreter::Interpreter::NewCompilationJob(parse_info, literal,
- isolate));
- if (job->PrepareJob() == CompilationJob::SUCCEEDED &&
- job->ExecuteJob() == CompilationJob::SUCCEEDED) {
+ allocator));
+
+ if (job->ExecuteJob() == CompilationJob::SUCCEEDED) {
return job;
}
return std::unique_ptr<CompilationJob>(); // Compilation failed, return null.
}
-// TODO(rmcilroy): Remove |isolate| once CompilationJob doesn't need it.
std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
- ParseInfo* parse_info, Isolate* isolate,
- std::forward_list<std::unique_ptr<CompilationJob>>* inner_function_jobs) {
+ ParseInfo* parse_info, AccountingAllocator* allocator,
+ CompilationJobList* inner_function_jobs) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
DCHECK(inner_function_jobs->empty());
- DCHECK_IMPLIES(parse_info->consumed_preparsed_scope_data()->HasData(),
- ThreadId::Current().Equals(isolate->thread_id()));
-
Compiler::EagerInnerFunctionLiterals inner_literals;
if (!Compiler::Analyze(parse_info, &inner_literals)) {
return std::unique_ptr<CompilationJob>();
@@ -411,7 +422,7 @@ std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
// Prepare and execute compilation of the outer-most function.
std::unique_ptr<CompilationJob> outer_function_job(
PrepareAndExecuteUnoptimizedCompileJob(parse_info, parse_info->literal(),
- isolate));
+ allocator));
if (!outer_function_job) return std::unique_ptr<CompilationJob>();
// Prepare and execute compilation jobs for eager inner functions.
@@ -419,7 +430,7 @@ std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
FunctionLiteral* inner_literal = it->value();
std::unique_ptr<CompilationJob> inner_job(
PrepareAndExecuteUnoptimizedCompileJob(parse_info, inner_literal,
- isolate));
+ allocator));
if (!inner_job) return std::unique_ptr<CompilationJob>();
inner_function_jobs->emplace_front(std::move(inner_job));
}
@@ -430,10 +441,10 @@ std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
return outer_function_job;
}
-bool FinalizeUnoptimizedCode(
- ParseInfo* parse_info, Isolate* isolate,
- Handle<SharedFunctionInfo> shared_info, CompilationJob* outer_function_job,
- std::forward_list<std::unique_ptr<CompilationJob>>* inner_function_jobs) {
+bool FinalizeUnoptimizedCode(ParseInfo* parse_info, Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info,
+ CompilationJob* outer_function_job,
+ CompilationJobList* inner_function_jobs) {
DCHECK(AllowCompilation::IsAllowed(isolate));
// Allocate scope infos for the literal.
@@ -442,7 +453,7 @@ bool FinalizeUnoptimizedCode(
// Finalize the outer-most function's compilation job.
outer_function_job->compilation_info()->set_shared_info(shared_info);
- if (FinalizeUnoptimizedCompilationJob(outer_function_job) !=
+ if (FinalizeUnoptimizedCompilationJob(outer_function_job, isolate) !=
CompilationJob::SUCCEEDED) {
return false;
}
@@ -457,11 +468,18 @@ bool FinalizeUnoptimizedCode(
// TODO(rmcilroy): Fix this and DCHECK !is_compiled() once Full-Codegen dies
if (inner_shared_info->is_compiled()) continue;
inner_job->compilation_info()->set_shared_info(inner_shared_info);
- if (FinalizeUnoptimizedCompilationJob(inner_job.get()) !=
+ if (FinalizeUnoptimizedCompilationJob(inner_job.get(), isolate) !=
CompilationJob::SUCCEEDED) {
return false;
}
}
+
+ // Report any warnings generated during compilation.
+ if (parse_info->pending_error_handler()->has_pending_warnings()) {
+ parse_info->pending_error_handler()->ReportWarnings(isolate,
+ parse_info->script());
+ }
+
return true;
}
@@ -523,18 +541,17 @@ void InsertCodeIntoOptimizedCodeCache(CompilationInfo* compilation_info) {
}
}
-bool GetOptimizedCodeNow(CompilationJob* job) {
- CompilationInfo* compilation_info = job->compilation_info();
- Isolate* isolate = compilation_info->isolate();
+bool GetOptimizedCodeNow(CompilationJob* job, Isolate* isolate) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::RecompileSynchronous);
+ CompilationInfo* compilation_info = job->compilation_info();
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
- if (job->PrepareJob() != CompilationJob::SUCCEEDED ||
+ if (job->PrepareJob(isolate) != CompilationJob::SUCCEEDED ||
job->ExecuteJob() != CompilationJob::SUCCEEDED ||
- job->FinalizeJob() != CompilationJob::SUCCEEDED) {
+ job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
if (FLAG_trace_opt) {
PrintF("[aborted optimizing ");
compilation_info->closure()->ShortPrint();
@@ -548,15 +565,12 @@ bool GetOptimizedCodeNow(CompilationJob* job) {
job->RecordOptimizedCompilationStats();
DCHECK(!isolate->has_pending_exception());
InsertCodeIntoOptimizedCodeCache(compilation_info);
- RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
- job->parse_info()->script(), compilation_info);
+ job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, isolate);
return true;
}
-bool GetOptimizedCodeLater(CompilationJob* job) {
+bool GetOptimizedCodeLater(CompilationJob* job, Isolate* isolate) {
CompilationInfo* compilation_info = job->compilation_info();
- Isolate* isolate = compilation_info->isolate();
-
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
@@ -581,7 +595,7 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
- if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
+ if (job->PrepareJob(isolate) != CompilationJob::SUCCEEDED) return false;
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
if (FLAG_trace_concurrent_recompilation) {
@@ -666,7 +680,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// the background thread when we return.
base::Optional<CompilationHandleScope> compilation;
if (mode == ConcurrencyMode::kConcurrent) {
- compilation.emplace(compilation_info);
+ compilation.emplace(isolate, compilation_info);
}
// All handles below will be canonicalized.
@@ -677,7 +691,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
parse_info->ReopenHandlesInNewHandleScope();
if (mode == ConcurrencyMode::kConcurrent) {
- if (GetOptimizedCodeLater(job.get())) {
+ if (GetOptimizedCodeLater(job.get(), isolate)) {
job.release(); // The background recompile job owns this now.
// Set the optimization marker and return a code object which checks it.
@@ -689,16 +703,17 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
}
}
} else {
- if (GetOptimizedCodeNow(job.get())) return compilation_info->code();
+ if (GetOptimizedCodeNow(job.get(), isolate))
+ return compilation_info->code();
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return MaybeHandle<Code>();
}
-CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
+CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
+ Isolate* isolate) {
CompilationInfo* compilation_info = job->compilation_info();
- Isolate* isolate = compilation_info->isolate();
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
@@ -723,10 +738,10 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
job->RetryOptimization(kOptimizationDisabled);
} else if (compilation_info->dependencies()->HasAborted()) {
job->RetryOptimization(kBailedOutDueToDependencyChange);
- } else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
+ } else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
job->RecordOptimizedCompilationStats();
- RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
- job->parse_info()->script(), compilation_info);
+ job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
+ isolate);
InsertCodeIntoOptimizedCodeCache(compilation_info);
if (FLAG_trace_opt) {
PrintF("[completed optimizing ");
@@ -738,7 +753,7 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
}
}
- DCHECK(job->state() == CompilationJob::State::kFailed);
+ DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
if (FLAG_trace_opt) {
PrintF("[aborted optimizing ");
compilation_info->closure()->ShortPrint();
@@ -753,18 +768,49 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
return CompilationJob::FAILED;
}
+MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
+ ParseInfo* parse_info, Isolate* isolate, CompilationJob* outer_function_job,
+ CompilationJobList* inner_function_jobs) {
+ Handle<Script> script = parse_info->script();
+
+ // Internalize ast values onto the heap.
+ parse_info->ast_value_factory()->Internalize(isolate);
+
+ // Create shared function infos for top level and shared function infos array
+ // for inner functions.
+ EnsureSharedFunctionInfosArrayOnScript(parse_info, isolate);
+ DCHECK_EQ(kNoSourcePosition,
+ parse_info->literal()->function_token_position());
+ Handle<SharedFunctionInfo> shared_info =
+ isolate->factory()->NewSharedFunctionInfoForLiteral(parse_info->literal(),
+ parse_info->script());
+ shared_info->set_is_toplevel(true);
+
+ // Finalize compilation of the unoptimized bytecode or asm-js data.
+ if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
+ outer_function_job, inner_function_jobs)) {
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+
+ if (!script.is_null()) {
+ script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ }
+
+ return shared_info;
+}
+
MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
Isolate* isolate) {
TimerEventScope<TimerEventCompileCode> top_level_timer(isolate);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
RuntimeCallTimerScope runtimeTimer(
isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
: &RuntimeCallStats::CompileScript);
-
- Handle<Script> script = parse_info->script();
- Handle<SharedFunctionInfo> result;
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, isolate)) {
@@ -781,40 +827,16 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
// Generate the unoptimized bytecode or asm-js data.
- std::forward_list<std::unique_ptr<CompilationJob>> inner_function_jobs;
- std::unique_ptr<CompilationJob> outer_function_job(
- GenerateUnoptimizedCode(parse_info, isolate, &inner_function_jobs));
+ CompilationJobList inner_function_jobs;
+ std::unique_ptr<CompilationJob> outer_function_job(GenerateUnoptimizedCode(
+ parse_info, isolate->allocator(), &inner_function_jobs));
if (!outer_function_job) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return MaybeHandle<SharedFunctionInfo>();
}
- // Internalize ast values onto the heap.
- parse_info->ast_value_factory()->Internalize(isolate);
-
- // Create shared function infos for top level and shared function infos array
- // for inner functions.
- EnsureSharedFunctionInfosArrayOnScript(parse_info, isolate);
- DCHECK_EQ(kNoSourcePosition,
- parse_info->literal()->function_token_position());
- Handle<SharedFunctionInfo> shared_info =
- isolate->factory()->NewSharedFunctionInfoForLiteral(parse_info->literal(),
- parse_info->script());
- shared_info->set_is_toplevel(true);
-
- // Finalize compilation of the unoptimized bytecode or asm-js data.
- if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
- outer_function_job.get(),
- &inner_function_jobs)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return MaybeHandle<SharedFunctionInfo>();
- }
-
- if (!script.is_null()) {
- script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- }
-
- return shared_info;
+ return FinalizeTopLevel(parse_info, isolate, outer_function_job.get(),
+ &inner_function_jobs);
}
bool FailWithPendingException(Isolate* isolate,
@@ -835,8 +857,11 @@ bool FailWithPendingException(Isolate* isolate,
bool Compiler::Analyze(ParseInfo* parse_info,
EagerInnerFunctionLiterals* eager_literals) {
DCHECK_NOT_NULL(parse_info->literal());
- RuntimeCallTimerScope runtimeTimer(parse_info->runtime_call_stats(),
- &RuntimeCallStats::CompileAnalyse);
+ RuntimeCallTimerScope runtimeTimer(
+ parse_info->runtime_call_stats(),
+ parse_info->on_background_thread()
+ ? &RuntimeCallStats::CompileBackgroundAnalyse
+ : &RuntimeCallStats::CompileAnalyse);
if (!Rewriter::Rewrite(parse_info)) return false;
DeclarationScope::Analyze(parse_info);
if (!Renumber(parse_info, eager_literals)) return false;
@@ -858,6 +883,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
DCHECK(!shared_info->is_compiled());
Isolate* isolate = shared_info->GetIsolate();
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
DCHECK(!isolate->has_pending_exception());
DCHECK(!shared_info->HasBytecodeArray());
VMState<BYTECODE_COMPILER> state(isolate);
@@ -887,7 +913,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
parse_info.consumed_preparsed_scope_data()->SetData(data);
// After we've compiled the function, we don't need data about its
// skippable functions any more.
- shared_info->set_preparsed_scope_data(isolate->heap()->null_value());
+ shared_info->ClearPreParsedScopeData();
}
}
@@ -897,9 +923,9 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
// Generate the unoptimized bytecode or asm-js data.
- std::forward_list<std::unique_ptr<CompilationJob>> inner_function_jobs;
- std::unique_ptr<CompilationJob> outer_function_job(
- GenerateUnoptimizedCode(&parse_info, isolate, &inner_function_jobs));
+ CompilationJobList inner_function_jobs;
+ std::unique_ptr<CompilationJob> outer_function_job(GenerateUnoptimizedCode(
+ &parse_info, isolate->allocator(), &inner_function_jobs));
if (!outer_function_job) {
return FailWithPendingException(isolate, flag);
}
@@ -1105,6 +1131,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (!context->IsNativeContext()) {
parse_info.set_outer_scope_info(handle(context->scope_info()));
}
+ DCHECK(!parse_info.is_module());
if (!CompileToplevel(&parse_info, isolate).ToHandle(&shared_info)) {
return MaybeHandle<JSFunction>();
@@ -1172,7 +1199,7 @@ bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
// Check with callback if set.
AllowCodeGenerationFromStringsCallback callback =
isolate->allow_code_gen_callback();
- if (callback == NULL) {
+ if (callback == nullptr) {
// No callback set and code generation disallowed.
return false;
} else {
@@ -1203,31 +1230,226 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
int eval_scope_position = 0;
int eval_position = kNoSourcePosition;
Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared());
- return Compiler::GetFunctionFromEval(source, outer_info, native_context,
- SLOPPY, restriction, parameters_end_pos,
- eval_scope_position, eval_position);
+ return Compiler::GetFunctionFromEval(
+ source, outer_info, native_context, LanguageMode::kSloppy, restriction,
+ parameters_end_pos, eval_scope_position, eval_position);
}
+namespace {
+
+struct ScriptCompileTimerScope {
+ public:
+ // TODO(leszeks): There are too many blink-specific entries in this enum,
+ // figure out a way to push produce/hit-isolate-cache/consume/consume-failed
+ // back up the API and log them in blink instead.
+ enum class CacheBehaviour {
+ kProduceCodeCache,
+ kHitIsolateCacheWhenNoCache,
+ kConsumeCodeCache,
+ kConsumeCodeCacheFailed,
+ kNoCacheBecauseInlineScript,
+ kNoCacheBecauseScriptTooSmall,
+ kNoCacheBecauseCacheTooCold,
+ kNoCacheNoReason,
+ kNoCacheBecauseNoResource,
+ kNoCacheBecauseInspector,
+ kNoCacheBecauseCachingDisabled,
+ kNoCacheBecauseModule,
+ kNoCacheBecauseStreamingSource,
+ kNoCacheBecauseV8Extension,
+ kHitIsolateCacheWhenProduceCodeCache,
+ kHitIsolateCacheWhenConsumeCodeCache,
+ kNoCacheBecauseExtensionModule,
+ kNoCacheBecausePacScript,
+ kNoCacheBecauseInDocumentWrite,
+ kNoCacheBecauseResourceWithNoCacheHandler,
+ kCount
+ };
+
+ explicit ScriptCompileTimerScope(
+ Isolate* isolate, ScriptCompiler::NoCacheReason no_cache_reason)
+ : isolate_(isolate),
+ all_scripts_histogram_scope_(isolate->counters()->compile_script(),
+ true),
+ no_cache_reason_(no_cache_reason),
+ hit_isolate_cache_(false),
+ producing_code_cache_(false),
+ consuming_code_cache_(false),
+ consuming_code_cache_failed_(false) {}
+
+ ~ScriptCompileTimerScope() {
+ CacheBehaviour cache_behaviour = GetCacheBehaviour();
+
+ Histogram* cache_behaviour_histogram =
+ isolate_->counters()->compile_script_cache_behaviour();
+ // Sanity check that the histogram has exactly one bin per enum entry.
+ DCHECK_EQ(0, cache_behaviour_histogram->min());
+ DCHECK_EQ(static_cast<int>(CacheBehaviour::kCount),
+ cache_behaviour_histogram->max() + 1);
+ DCHECK_EQ(static_cast<int>(CacheBehaviour::kCount),
+ cache_behaviour_histogram->num_buckets());
+ cache_behaviour_histogram->AddSample(static_cast<int>(cache_behaviour));
+
+ histogram_scope_.set_histogram(
+ GetCacheBehaviourTimedHistogram(cache_behaviour));
+ }
+
+ void set_hit_isolate_cache() { hit_isolate_cache_ = true; }
+
+ void set_producing_code_cache() { producing_code_cache_ = true; }
+
+ void set_consuming_code_cache() { consuming_code_cache_ = true; }
+
+ void set_consuming_code_cache_failed() {
+ consuming_code_cache_failed_ = true;
+ }
+
+ private:
+ Isolate* isolate_;
+ LazyTimedHistogramScope histogram_scope_;
+ // TODO(leszeks): This timer is the sum of the other times, consider removing
+ // it to save space.
+ HistogramTimerScope all_scripts_histogram_scope_;
+ ScriptCompiler::NoCacheReason no_cache_reason_;
+ bool hit_isolate_cache_;
+ bool producing_code_cache_;
+ bool consuming_code_cache_;
+ bool consuming_code_cache_failed_;
+
+ CacheBehaviour GetCacheBehaviour() {
+ if (producing_code_cache_) {
+ if (hit_isolate_cache_) {
+ return CacheBehaviour::kHitIsolateCacheWhenProduceCodeCache;
+ } else {
+ return CacheBehaviour::kProduceCodeCache;
+ }
+ }
+
+ if (consuming_code_cache_) {
+ if (hit_isolate_cache_) {
+ return CacheBehaviour::kHitIsolateCacheWhenConsumeCodeCache;
+ } else if (consuming_code_cache_failed_) {
+ return CacheBehaviour::kConsumeCodeCacheFailed;
+ }
+ return CacheBehaviour::kConsumeCodeCache;
+ }
+
+ if (hit_isolate_cache_) {
+ // There's probably no need to distinguish the different isolate cache
+ // hits.
+ return CacheBehaviour::kHitIsolateCacheWhenNoCache;
+ }
+
+ switch (no_cache_reason_) {
+ case ScriptCompiler::kNoCacheBecauseInlineScript:
+ return CacheBehaviour::kNoCacheBecauseInlineScript;
+ case ScriptCompiler::kNoCacheBecauseScriptTooSmall:
+ return CacheBehaviour::kNoCacheBecauseScriptTooSmall;
+ case ScriptCompiler::kNoCacheBecauseCacheTooCold:
+ return CacheBehaviour::kNoCacheBecauseCacheTooCold;
+ case ScriptCompiler::kNoCacheNoReason:
+ return CacheBehaviour::kNoCacheNoReason;
+ case ScriptCompiler::kNoCacheBecauseNoResource:
+ return CacheBehaviour::kNoCacheBecauseNoResource;
+ case ScriptCompiler::kNoCacheBecauseInspector:
+ return CacheBehaviour::kNoCacheBecauseInspector;
+ case ScriptCompiler::kNoCacheBecauseCachingDisabled:
+ return CacheBehaviour::kNoCacheBecauseCachingDisabled;
+ case ScriptCompiler::kNoCacheBecauseModule:
+ return CacheBehaviour::kNoCacheBecauseModule;
+ case ScriptCompiler::kNoCacheBecauseStreamingSource:
+ return CacheBehaviour::kNoCacheBecauseStreamingSource;
+ case ScriptCompiler::kNoCacheBecauseV8Extension:
+ return CacheBehaviour::kNoCacheBecauseV8Extension;
+ case ScriptCompiler::kNoCacheBecauseExtensionModule:
+ return CacheBehaviour::kNoCacheBecauseExtensionModule;
+ case ScriptCompiler::kNoCacheBecausePacScript:
+ return CacheBehaviour::kNoCacheBecausePacScript;
+ case ScriptCompiler::kNoCacheBecauseInDocumentWrite:
+ return CacheBehaviour::kNoCacheBecauseInDocumentWrite;
+ case ScriptCompiler::kNoCacheBecauseResourceWithNoCacheHandler:
+ return CacheBehaviour::kNoCacheBecauseResourceWithNoCacheHandler;
+ }
+ UNREACHABLE();
+ }
+
+ TimedHistogram* GetCacheBehaviourTimedHistogram(
+ CacheBehaviour cache_behaviour) {
+ switch (cache_behaviour) {
+ case CacheBehaviour::kProduceCodeCache:
+ // Even if we hit the isolate's compilation cache, we currently recompile
+ // when we want to produce the code cache.
+ case CacheBehaviour::kHitIsolateCacheWhenProduceCodeCache:
+ return isolate_->counters()->compile_script_with_produce_cache();
+ case CacheBehaviour::kHitIsolateCacheWhenNoCache:
+ case CacheBehaviour::kHitIsolateCacheWhenConsumeCodeCache:
+ return isolate_->counters()->compile_script_with_isolate_cache_hit();
+ case CacheBehaviour::kConsumeCodeCacheFailed:
+ return isolate_->counters()->compile_script_consume_failed();
+ case CacheBehaviour::kConsumeCodeCache:
+ return isolate_->counters()->compile_script_with_consume_cache();
+
+ case CacheBehaviour::kNoCacheBecauseInlineScript:
+ return isolate_->counters()
+ ->compile_script_no_cache_because_inline_script();
+ case CacheBehaviour::kNoCacheBecauseScriptTooSmall:
+ return isolate_->counters()
+ ->compile_script_no_cache_because_script_too_small();
+ case CacheBehaviour::kNoCacheBecauseCacheTooCold:
+ return isolate_->counters()
+ ->compile_script_no_cache_because_cache_too_cold();
+
+ // Aggregate all the other "no cache" counters into a single histogram, to
+ // save space.
+ case CacheBehaviour::kNoCacheNoReason:
+ case CacheBehaviour::kNoCacheBecauseNoResource:
+ case CacheBehaviour::kNoCacheBecauseInspector:
+ case CacheBehaviour::kNoCacheBecauseCachingDisabled:
+ // TODO(leszeks): Consider counting separately once modules are more
+ // common.
+ case CacheBehaviour::kNoCacheBecauseModule:
+ // TODO(leszeks): Count separately or remove entirely once we have
+ // background compilation.
+ case CacheBehaviour::kNoCacheBecauseStreamingSource:
+ case CacheBehaviour::kNoCacheBecauseV8Extension:
+ case CacheBehaviour::kNoCacheBecauseExtensionModule:
+ case CacheBehaviour::kNoCacheBecausePacScript:
+ case CacheBehaviour::kNoCacheBecauseInDocumentWrite:
+ case CacheBehaviour::kNoCacheBecauseResourceWithNoCacheHandler:
+ return isolate_->counters()->compile_script_no_cache_other();
+
+ case CacheBehaviour::kCount:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ }
+};
+
+} // namespace
+
MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Handle<String> source, MaybeHandle<Object> maybe_script_name,
int line_offset, int column_offset, ScriptOriginOptions resource_options,
MaybeHandle<Object> maybe_source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options, NativesFlag natives,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives,
MaybeHandle<FixedArray> maybe_host_defined_options) {
Isolate* isolate = source->GetIsolate();
+ ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
+
if (compile_options == ScriptCompiler::kNoCompileOptions) {
- cached_data = NULL;
+ cached_data = nullptr;
} else if (compile_options == ScriptCompiler::kProduceParserCache ||
ShouldProduceCodeCache(compile_options)) {
DCHECK(cached_data && !*cached_data);
- DCHECK(extension == NULL);
+ DCHECK_NULL(extension);
DCHECK(!isolate->debug()->is_loaded());
} else {
DCHECK(compile_options == ScriptCompiler::kConsumeParserCache ||
compile_options == ScriptCompiler::kConsumeCodeCache);
DCHECK(cached_data && *cached_data);
- DCHECK(extension == NULL);
+ DCHECK_NULL(extension);
}
int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length);
@@ -1239,14 +1461,20 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Do a lookup in the compilation cache but not for extensions.
MaybeHandle<SharedFunctionInfo> maybe_result;
Handle<Cell> vector;
- if (extension == NULL) {
+ if (extension == nullptr) {
+ bool can_consume_code_cache =
+ compile_options == ScriptCompiler::kConsumeCodeCache &&
+ !isolate->debug()->is_loaded();
+ if (can_consume_code_cache) {
+ compile_timer.set_consuming_code_cache();
+ }
+
// First check per-isolate compilation cache.
InfoVectorPair pair = compilation_cache->LookupScript(
source, maybe_script_name, line_offset, column_offset, resource_options,
context, language_mode);
- if (!pair.has_shared() &&
- compile_options == ScriptCompiler::kConsumeCodeCache &&
- !isolate->debug()->is_loaded()) {
+ if (can_consume_code_cache && !pair.has_shared()) {
+ compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
RuntimeCallTimerScope runtimeTimer(isolate,
@@ -1268,9 +1496,11 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
return inner_result;
}
// Deserializer failed. Fall through to compile.
+ compile_timer.set_consuming_code_cache_failed();
} else {
if (pair.has_shared()) {
maybe_result = MaybeHandle<SharedFunctionInfo>(pair.shared(), isolate);
+ compile_timer.set_hit_isolate_cache();
}
if (pair.has_vector()) {
vector = Handle<Cell>(pair.vector(), isolate);
@@ -1326,17 +1556,14 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (!context->IsNativeContext()) {
parse_info.set_outer_scope_info(handle(context->scope_info()));
}
- if (ShouldProduceCodeCache(compile_options)) {
- parse_info.set_will_serialize();
- parse_info.set_eager(compile_options ==
- ScriptCompiler::kProduceFullCodeCache);
- }
+ parse_info.set_eager(compile_options ==
+ ScriptCompiler::kProduceFullCodeCache);
parse_info.set_language_mode(
- static_cast<LanguageMode>(parse_info.language_mode() | language_mode));
+ stricter_language_mode(parse_info.language_mode(), language_mode));
maybe_result = CompileToplevel(&parse_info, isolate);
Handle<SharedFunctionInfo> result;
- if (extension == NULL && maybe_result.ToHandle(&result)) {
+ if (extension == nullptr && maybe_result.ToHandle(&result)) {
// We need a feedback vector.
DCHECK(result->is_compiled());
Handle<FeedbackVector> feedback_vector =
@@ -1346,6 +1573,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
vector);
if (ShouldProduceCodeCache(compile_options) &&
!ContainsAsmModule(script)) {
+ compile_timer.set_producing_code_cache();
+
HistogramTimerScope histogram_timer(
isolate->counters()->compile_serialize());
RuntimeCallTimerScope runtimeTimer(isolate,
@@ -1371,16 +1600,72 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
return maybe_result;
}
+std::unique_ptr<CompilationJob> Compiler::CompileTopLevelOnBackgroundThread(
+ ParseInfo* parse_info, AccountingAllocator* allocator,
+ CompilationJobList* inner_function_jobs) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileCodeBackground");
+ RuntimeCallTimerScope runtimeTimer(
+ parse_info->runtime_call_stats(),
+ parse_info->is_eval() ? &RuntimeCallStats::CompileBackgroundEval
+ : &RuntimeCallStats::CompileBackgroundScript);
+
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
+ parse_info->set_language_mode(
+ stricter_language_mode(parse_info->language_mode(), language_mode));
+
+ // Can't access scope info data off-main-thread.
+ DCHECK(!parse_info->consumed_preparsed_scope_data()->HasData());
+
+ // Generate the unoptimized bytecode or asm-js data.
+ std::unique_ptr<CompilationJob> outer_function_job(
+ GenerateUnoptimizedCode(parse_info, allocator, inner_function_jobs));
+ return outer_function_job;
+}
+
+Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForBackgroundCompile(
+ Handle<Script> script, ParseInfo* parse_info, int source_length,
+ CompilationJob* outer_function_job,
+ CompilationJobList* inner_function_jobs) {
+ Isolate* isolate = script->GetIsolate();
+ ScriptCompileTimerScope compile_timer(
+ isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
+ PostponeInterruptsScope postpone(isolate);
+
+ // TODO(titzer): increment the counters in caller.
+ isolate->counters()->total_load_size()->Increment(source_length);
+ isolate->counters()->total_compile_size()->Increment(source_length);
+
+ if (outer_function_job == nullptr) {
+ // Compilation failed on background thread - throw an exception.
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return Handle<SharedFunctionInfo>();
+ }
+
+ Handle<SharedFunctionInfo> result;
+ if (FinalizeTopLevel(parse_info, isolate, outer_function_job,
+ inner_function_jobs)
+ .ToHandle(&result)) {
+ isolate->debug()->OnAfterCompile(script);
+ }
+ return result;
+}
+
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
Handle<Script> script, ParseInfo* parse_info, int source_length) {
Isolate* isolate = script->GetIsolate();
+ ScriptCompileTimerScope compile_timer(
+ isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
// TODO(titzer): increment the counters in caller.
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
parse_info->set_language_mode(
- static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
+ stricter_language_mode(parse_info->language_mode(), language_mode));
Handle<SharedFunctionInfo> result;
if (CompileToplevel(parse_info, isolate).ToHandle(&result)) {
@@ -1415,38 +1700,6 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
return result;
}
-Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
- v8::Extension* extension, Handle<String> name) {
- Isolate* isolate = name->GetIsolate();
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
-
- // Compute the function template for the native function.
- v8::Local<v8::FunctionTemplate> fun_template =
- extension->GetNativeFunctionTemplate(v8_isolate,
- v8::Utils::ToLocal(name));
- DCHECK(!fun_template.IsEmpty());
-
- // Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
- *fun_template->GetFunction(v8_isolate->GetCurrentContext())
- .ToLocalChecked()));
- Handle<Code> code = Handle<Code>(fun->shared()->code());
- Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
- Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
- name, FunctionKind::kNormalFunction, code,
- Handle<ScopeInfo>(fun->shared()->scope_info()));
- shared->set_outer_scope_info(fun->shared()->outer_scope_info());
- shared->SetConstructStub(*construct_stub);
- shared->set_feedback_metadata(fun->shared()->feedback_metadata());
-
- // Copy the function data to the shared function info.
- shared->set_function_data(fun->shared()->function_data());
- int parameters = fun->shared()->internal_formal_parameter_count();
- shared->set_internal_formal_parameter_count(parameters);
-
- return shared;
-}
-
MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
BailoutId osr_offset,
JavaScriptFrame* osr_frame) {
@@ -1456,29 +1709,18 @@ MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
osr_frame);
}
-CompilationJob* Compiler::PrepareUnoptimizedCompilationJob(
- ParseInfo* parse_info, Isolate* isolate) {
- VMState<BYTECODE_COMPILER> state(isolate);
- std::unique_ptr<CompilationJob> job(
- interpreter::Interpreter::NewCompilationJob(
- parse_info, parse_info->literal(), isolate));
- if (job->PrepareJob() != CompilationJob::SUCCEEDED) {
- return nullptr;
- }
- return job.release();
-}
-
-bool Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
+bool Compiler::FinalizeCompilationJob(CompilationJob* raw_job,
+ Isolate* isolate) {
// Take ownership of compilation job. Deleting job also tears down the zone.
std::unique_ptr<CompilationJob> job(raw_job);
if (job->compilation_info()->IsOptimizing()) {
- VMState<COMPILER> state(job->compilation_info()->isolate());
- return FinalizeOptimizedCompilationJob(job.get()) ==
+ VMState<COMPILER> state(isolate);
+ return FinalizeOptimizedCompilationJob(job.get(), isolate) ==
CompilationJob::SUCCEEDED;
} else {
- VMState<BYTECODE_COMPILER> state(job->compilation_info()->isolate());
- return FinalizeUnoptimizedCompilationJob(job.get()) ==
+ VMState<BYTECODE_COMPILER> state(isolate);
+ return FinalizeUnoptimizedCompilationJob(job.get(), isolate) ==
CompilationJob::SUCCEEDED;
}
}
@@ -1501,7 +1743,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
}
}
- if (shared->is_compiled()) {
+ if (shared->is_compiled() && !shared->HasAsmWasmData()) {
// TODO(mvstanton): pass pretenure flag to EnsureLiterals.
JSFunction::EnsureLiterals(function);
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 5bd7b53f66..cc63697221 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -5,10 +5,12 @@
#ifndef V8_COMPILER_H_
#define V8_COMPILER_H_
+#include <forward_list>
#include <memory>
#include "src/allocation.h"
#include "src/bailout-reason.h"
+#include "src/code-events.h"
#include "src/contexts.h"
#include "src/isolate.h"
#include "src/zone/zone.h"
@@ -27,6 +29,8 @@ class ThreadedList;
template <typename T>
class ThreadedListZoneEntry;
+typedef std::forward_list<std::unique_ptr<CompilationJob>> CompilationJobList;
+
// The V8 compiler API.
//
// This is the central hub for dispatching to the various compilers within V8.
@@ -53,12 +57,14 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
- // Prepare a compilation job for unoptimized code. Requires ParseAndAnalyse.
- static CompilationJob* PrepareUnoptimizedCompilationJob(ParseInfo* parse_info,
- Isolate* isolate);
+ // Compile top level code on a background thread. Should be finalized by
+ // GetSharedFunctionInfoForBackgroundCompile.
+ static std::unique_ptr<CompilationJob> CompileTopLevelOnBackgroundThread(
+ ParseInfo* parse_info, AccountingAllocator* allocator,
+ CompilationJobList* inner_function_jobs);
// Generate and install code from previously queued compilation job.
- static bool FinalizeCompilationJob(CompilationJob* job);
+ static bool FinalizeCompilationJob(CompilationJob* job, Isolate* isolate);
// Give the compiler a chance to perform low-latency initialization tasks of
// the given {function} on its instantiation. Note that only the runtime will
@@ -113,6 +119,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
MaybeHandle<Object> maybe_source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason,
NativesFlag is_natives_code,
MaybeHandle<FixedArray> maybe_host_defined_options);
@@ -121,15 +128,19 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static Handle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
Handle<Script> script, ParseInfo* info, int source_length);
- // Create a shared function info object (the code may be lazily compiled).
+ // Create a shared function info object for a Script that has already been
+ // compiled on a background thread.
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfoForBackgroundCompile(
+ Handle<Script> script, ParseInfo* parse_info, int source_length,
+ CompilationJob* outer_function_job,
+ CompilationJobList* inner_function_jobs);
+
+ // Create a shared function info object for the given function literal
+ // node (the code may be lazily compiled).
static Handle<SharedFunctionInfo> GetSharedFunctionInfo(FunctionLiteral* node,
Handle<Script> script,
Isolate* isolate);
- // Create a shared function info object for a native function literal.
- static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
- v8::Extension* extension, Handle<String> name);
-
// ===========================================================================
// The following family of methods provides support for OSR. Code generated
// for entry via OSR might not be suitable for normal entry, hence will be
@@ -164,21 +175,20 @@ class V8_EXPORT_PRIVATE CompilationJob {
kSucceeded,
kFailed,
};
-
- CompilationJob(Isolate* isolate, ParseInfo* parse_info, CompilationInfo* info,
- const char* compiler_name,
+ CompilationJob(uintptr_t stack_limit, ParseInfo* parse_info,
+ CompilationInfo* compilation_info, const char* compiler_name,
State initial_state = State::kReadyToPrepare);
virtual ~CompilationJob() {}
// Prepare the compile job. Must be called on the main thread.
- MUST_USE_RESULT Status PrepareJob();
+ MUST_USE_RESULT Status PrepareJob(Isolate* isolate);
// Executes the compile job. Can be called on a background thread if
// can_execute_on_background_thread() returns true.
MUST_USE_RESULT Status ExecuteJob();
// Finalizes the compile job. Must be called on the main thread.
- MUST_USE_RESULT Status FinalizeJob();
+ MUST_USE_RESULT Status FinalizeJob(Isolate* isolate);
// Report a transient failure, try again next time. Should only be called on
// optimization compilation jobs.
@@ -189,42 +199,34 @@ class V8_EXPORT_PRIVATE CompilationJob {
Status AbortOptimization(BailoutReason reason);
void RecordOptimizedCompilationStats() const;
- void RecordUnoptimizedCompilationStats() const;
-
- virtual bool can_execute_on_background_thread() const { return true; }
+ void RecordUnoptimizedCompilationStats(Isolate* isolate) const;
+ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
+ Isolate* isolate) const;
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
uintptr_t stack_limit() const { return stack_limit_; }
- bool executed_on_background_thread() const {
- DCHECK_IMPLIES(!can_execute_on_background_thread(),
- !executed_on_background_thread_);
- return executed_on_background_thread_;
- }
State state() const { return state_; }
ParseInfo* parse_info() const { return parse_info_; }
CompilationInfo* compilation_info() const { return compilation_info_; }
- Isolate* isolate() const;
virtual size_t AllocatedMemory() const { return 0; }
protected:
// Overridden by the actual implementation.
- virtual Status PrepareJobImpl() = 0;
+ virtual Status PrepareJobImpl(Isolate* isolate) = 0;
virtual Status ExecuteJobImpl() = 0;
- virtual Status FinalizeJobImpl() = 0;
+ virtual Status FinalizeJobImpl(Isolate* isolate) = 0;
private:
// TODO(6409): Remove parse_info once Fullcode and AstGraphBuilder are gone.
ParseInfo* parse_info_;
CompilationInfo* compilation_info_;
- ThreadId isolate_thread_id_;
base::TimeDelta time_taken_to_prepare_;
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
const char* compiler_name_;
State state_;
uintptr_t stack_limit_;
- bool executed_on_background_thread_;
MUST_USE_RESULT Status UpdateState(Status status, State next_state) {
if (status == SUCCEEDED) {
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 02f59f493b..ac4fc4363b 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -550,7 +550,7 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
FieldAccess AccessBuilder::ForMapInstanceType() {
FieldAccess access = {
kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint16, MachineType::Uint16(),
kNoWriteBarrier};
return access;
}
@@ -711,6 +711,16 @@ FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
}
// static
+FieldAccess AccessBuilder::ForJSGlobalProxyNativeContext() {
+ FieldAccess access = {
+ kTaggedBase, JSGlobalProxy::kNativeContextOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSArrayIteratorObject() {
FieldAccess access = {kTaggedBase,
JSArrayIterator::kIteratedObjectOffset,
@@ -1092,7 +1102,7 @@ ElementAccess AccessBuilder::ForOrderedHashMapEntryValue() {
FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
FieldAccess access = {
kTaggedBase,
- FixedArray::OffsetOfElementAt(SeededNumberDictionary::kMaxNumberKeyIndex),
+ FixedArray::OffsetOfElementAt(NumberDictionary::kMaxNumberKeyIndex),
MaybeHandle<Name>(),
MaybeHandle<Map>(),
Type::Any(),
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index d1f6acfc56..e348c0f71b 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -235,6 +235,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGlobalObject::native_context() field.
static FieldAccess ForJSGlobalObjectNativeContext();
+ // Provides access to JSGlobalProxy::native_context() field.
+ static FieldAccess ForJSGlobalProxyNativeContext();
+
// Provides access to JSArrayIterator::object() field.
static FieldAccess ForJSArrayIteratorObject();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index b1c680e517..9b0c4b41b1 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -360,8 +360,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
if (details.kind() == kData) {
int index = descriptors->GetFieldIndex(number);
Representation details_representation = details.representation();
- FieldIndex field_index = FieldIndex::ForPropertyIndex(
- *map, index, details_representation.IsDouble());
+ FieldIndex field_index =
+ FieldIndex::ForPropertyIndex(*map, index, details_representation);
Type* field_type = Type::NonInternal();
MachineRepresentation field_representation =
MachineRepresentation::kTagged;
@@ -446,6 +446,11 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
if (!accessor->IsJSFunction()) {
CallOptimization optimization(accessor);
if (!optimization.is_simple_api_call()) return false;
+ if (optimization.IsCrossContextLazyAccessorPair(*native_context_,
+ *map)) {
+ return false;
+ }
+
CallOptimization::HolderLookup lookup;
holder =
optimization.LookupHolderOfExpectedType(receiver_map, &lookup);
@@ -595,9 +600,8 @@ bool AccessInfoFactory::ConsolidateElementLoad(MapHandles const& maps,
bool AccessInfoFactory::LookupSpecialFieldAccessor(
Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
// Check for special JSObject field accessors.
- int offset;
- if (Accessors::IsJSObjectFieldAccessor(map, name, &offset)) {
- FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
+ FieldIndex field_index;
+ if (Accessors::IsJSObjectFieldAccessor(map, name, &field_index)) {
Type* field_type = Type::NonInternal();
MachineRepresentation field_representation = MachineRepresentation::kTagged;
if (map->IsStringMap()) {
@@ -651,8 +655,8 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
if (details.location() != kField) return false;
int const index = details.field_index();
Representation details_representation = details.representation();
- FieldIndex field_index = FieldIndex::ForPropertyIndex(
- *transition_map, index, details_representation.IsDouble());
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(*transition_map, index,
+ details_representation);
Type* field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
MachineRepresentation field_representation = MachineRepresentation::kTagged;
diff --git a/deps/v8/src/compiler/all-nodes.h b/deps/v8/src/compiler/all-nodes.h
index 7c70bf75f6..b86c8fa132 100644
--- a/deps/v8/src/compiler/all-nodes.h
+++ b/deps/v8/src/compiler/all-nodes.h
@@ -25,12 +25,12 @@ class AllNodes {
// reachable from the End node.
AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs = true);
- bool IsLive(Node* node) {
+ bool IsLive(const Node* node) const {
CHECK(only_inputs_);
return IsReachable(node);
}
- bool IsReachable(Node* node) {
+ bool IsReachable(const Node* node) const {
if (!node) return false;
size_t id = node->id();
return id < is_reachable_.size() && is_reachable_[id];
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
new file mode 100644
index 0000000000..a0ba02cd33
--- /dev/null
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -0,0 +1,98 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ALLOCATION_BUILDER_H_
+#define V8_COMPILER_ALLOCATION_BUILDER_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper class to construct inline allocations on the simplified operator
+// level. This keeps track of the effect chain for initial stores on a newly
+// allocated object and also provides helpers for commonly allocated objects.
+class AllocationBuilder final {
+ public:
+ AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
+ : jsgraph_(jsgraph),
+ allocation_(nullptr),
+ effect_(effect),
+ control_(control) {}
+
+ // Primitive allocation of static size.
+ void Allocate(int size, PretenureFlag pretenure = NOT_TENURED,
+ Type* type = Type::Any()) {
+ DCHECK_LE(size, kMaxRegularHeapObjectSize);
+ effect_ = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect_);
+ allocation_ =
+ graph()->NewNode(simplified()->Allocate(type, pretenure),
+ jsgraph()->Constant(size), effect_, control_);
+ effect_ = allocation_;
+ }
+
+ // Primitive store into a field.
+ void Store(const FieldAccess& access, Node* value) {
+ effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
+ value, effect_, control_);
+ }
+
+ // Primitive store into an element.
+ void Store(ElementAccess const& access, Node* index, Node* value) {
+ effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
+ index, value, effect_, control_);
+ }
+
+ // Compound allocation of a FixedArray.
+ void AllocateArray(int length, Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED) {
+ DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
+ map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+ ? FixedArray::SizeFor(length)
+ : FixedDoubleArray::SizeFor(length);
+ Allocate(size, pretenure, Type::OtherInternal());
+ Store(AccessBuilder::ForMap(), map);
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+ }
+
+ // Compound store of a constant into a field.
+ void Store(const FieldAccess& access, Handle<Object> value) {
+ Store(access, jsgraph()->Constant(value));
+ }
+
+ void FinishAndChange(Node* node) {
+ NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
+ node->ReplaceInput(0, allocation_);
+ node->ReplaceInput(1, effect_);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, common()->FinishRegion());
+ }
+
+ Node* Finish() {
+ return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
+ }
+
+ protected:
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
+
+ private:
+ JSGraph* const jsgraph_;
+ Node* allocation_;
+ Node* effect_;
+ Node* control_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ALLOCATION_BUILDER_H_
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index fa9f6a027e..1a66e5b7d4 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -134,6 +134,20 @@ class ArmOperandConverter final : public InstructionOperandConverter {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
+
+ NeonMemOperand NeonInputOperand(size_t first_index) {
+ const size_t index = first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_Offset_RR:
+ return NeonMemOperand(InputRegister(index + 0),
+ InputRegister(index + 1));
+ case kMode_Operand2_R:
+ return NeonMemOperand(InputRegister(index + 0));
+ default:
+ break;
+ }
+ UNREACHABLE();
+ }
};
namespace {
@@ -238,14 +252,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ Push(lr);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
}
-#ifdef V8_CSA_WRITE_BARRIER
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
-#else
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
-#endif
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -669,15 +677,16 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
-// the first set of flags ({kKindSpecificFlags1Offset});
+// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int pc_offset = __ pc_offset();
int offset =
- Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset + 8);
+ Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset + 8);
// We can use the register pc - 8 for the address of the current instruction.
__ ldr(ip, MemOperand(pc, offset));
+ __ ldr(ip, FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
@@ -711,6 +720,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallWasmFunction: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
+ if (info()->IsWasm()) {
+ scope.Open(tasm());
+ rmode = RelocInfo::WASM_CALL;
+ }
+
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ __ Call(wasm_code, rmode);
+ } else {
+ __ Call(i.InputRegister(0));
+ }
+ RecordCallPosition(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
// We must not share code targets for calls to builtins for wasm code, as
@@ -736,6 +767,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+ case kArchTailCallWasm: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
+ if (info()->IsWasm()) {
+ scope.Open(tasm());
+ rmode = RelocInfo::WASM_CALL;
+ }
+
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ __ Jump(wasm_code, rmode);
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ unwinding_info_writer_.MarkBlockWillExit();
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
__ Jump(i.InputRegister(0));
@@ -1536,22 +1590,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmVld1F64: {
__ vld1(Neon8, NeonListOperand(i.OutputDoubleRegister()),
- NeonMemOperand(i.InputRegister(0)));
+ i.NeonInputOperand(0));
break;
}
case kArmVst1F64: {
__ vst1(Neon8, NeonListOperand(i.InputDoubleRegister(0)),
- NeonMemOperand(i.InputRegister(1)));
+ i.NeonInputOperand(1));
break;
}
case kArmVld1S128: {
__ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()),
- NeonMemOperand(i.InputRegister(0)));
+ i.NeonInputOperand(0));
break;
}
case kArmVst1S128: {
__ vst1(Neon8, NeonListOperand(i.InputSimd128Register(0)),
- NeonMemOperand(i.InputRegister(1)));
+ i.NeonInputOperand(1));
break;
}
case kArmVldrF64:
@@ -2411,7 +2465,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Ensure byte indices are in [0, 31] so masks are never NaNs.
four_lanes &= 0x1F1F1F1F;
__ vmov(SwVfpRegister::from_code(scratch_s_base + j),
- Float32(four_lanes));
+ Float32::FromBits(four_lanes));
}
NeonListOperand table(table_base, table_size);
if (dst != src0 && dst != src1) {
@@ -2790,15 +2844,14 @@ void CodeGenerator::FinishFrame(Frame* frame) {
STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
- DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+ DCHECK_EQ((last - first + 1), base::bits::CountPopulation(saves_fp));
frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
(kDoubleSize / kPointerSize));
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
- frame->AllocateSavedCalleeRegisterSlots(
- base::bits::CountPopulation32(saves));
+ frame->AllocateSavedCalleeRegisterSlots(base::bits::CountPopulation(saves));
}
}
@@ -2836,7 +2889,9 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
+ const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+
if (shrink_slots > 0) {
if (info()->IsWasm()) {
if (shrink_slots > 128) {
@@ -2849,14 +2904,15 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
- __ Move(kScratchReg,
+ if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Move(scratch,
Operand(ExternalReference::address_of_real_stack_limit(
__ isolate())));
- __ ldr(kScratchReg, MemOperand(kScratchReg));
- __ add(kScratchReg, kScratchReg,
- Operand(shrink_slots * kPointerSize));
- __ cmp(sp, kScratchReg);
+ __ ldr(scratch, MemOperand(scratch));
+ __ add(scratch, scratch, Operand(shrink_slots * kPointerSize));
+ __ cmp(sp, scratch);
__ b(cs, &done);
}
@@ -2879,7 +2935,13 @@ void CodeGenerator::AssembleConstructFrame() {
__ bind(&done);
}
}
- __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
+
+ // Skip callee-saved slots, which are pushed below.
+ shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= 2 * base::bits::CountPopulation(saves_fp);
+ if (shrink_slots > 0) {
+ __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
+ }
}
if (saves_fp != 0) {
@@ -2887,11 +2949,10 @@ void CodeGenerator::AssembleConstructFrame() {
STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
- DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+ DCHECK_EQ((last - first + 1), base::bits::CountPopulation(saves_fp));
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
@@ -3022,7 +3083,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ str(temp, dst);
} else {
SwVfpRegister dst = g.ToFloatRegister(destination);
- __ vmov(dst, Float32(src.ToFloat32AsInt()));
+ __ vmov(dst, Float32::FromBits(src.ToFloat32AsInt()));
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 391356e960..4ded82fa5b 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -606,9 +606,10 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
if (CpuFeatures::IsSupported(NEON)) {
// With NEON we can load directly from the calculated address.
- ArchOpcode op = load_rep == MachineRepresentation::kFloat64
- ? kArmVld1F64
- : kArmVld1S128;
+ InstructionCode op = load_rep == MachineRepresentation::kFloat64
+ ? kArmVld1F64
+ : kArmVld1S128;
+ op |= AddressingModeField::encode(kMode_Operand2_R);
Emit(op, g.DefineAsRegister(node), addr);
} else {
DCHECK_NE(MachineRepresentation::kSimd128, load_rep);
@@ -680,9 +681,10 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
inputs[input_count++] = g.UseRegister(value);
inputs[input_count++] = address;
- ArchOpcode op = store_rep == MachineRepresentation::kFloat64
- ? kArmVst1F64
- : kArmVst1S128;
+ InstructionCode op = store_rep == MachineRepresentation::kFloat64
+ ? kArmVst1F64
+ : kArmVst1S128;
+ op |= AddressingModeField::encode(kMode_Operand2_R);
Emit(op, 0, nullptr, input_count, inputs);
} else {
DCHECK_NE(MachineRepresentation::kSimd128, store_rep);
@@ -856,7 +858,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
if (m.right().HasValue()) {
uint32_t const value = m.right().Value();
- uint32_t width = base::bits::CountPopulation32(value);
+ uint32_t width = base::bits::CountPopulation(value);
uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
// Try to merge SHR operations on the left hand input into this AND.
@@ -1027,7 +1029,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint32_t value = (mleft.right().Value() >> lsb) << lsb;
- uint32_t width = base::bits::CountPopulation32(value);
+ uint32_t width = base::bits::CountPopulation(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
if (msb + width + lsb == 32) {
DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 2836e77c51..3673ee2426 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -340,14 +340,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
}
-#ifdef V8_CSA_WRITE_BARRIER
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
-#else
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
-#endif
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -534,40 +528,40 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
do { \
Label exchange; \
- __ bind(&exchange); \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Bind(&exchange); \
__ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
- __ store_instr(i.TempRegister32(0), i.InputRegister32(2), \
+ __ store_instr(i.TempRegister32(1), i.InputRegister32(2), \
i.TempRegister(0)); \
- __ cbnz(i.TempRegister32(0), &exchange); \
+ __ Cbnz(i.TempRegister32(1), &exchange); \
} while (0)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label compareExchange; \
- Label exit; \
- __ bind(&compareExchange); \
- __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
- __ cmp(i.TempRegister32(1), i.OutputRegister32()); \
- __ B(ne, &exit); \
- __ store_instr(i.TempRegister32(0), i.InputRegister32(3), \
- i.TempRegister(0)); \
- __ cbnz(i.TempRegister32(0), &compareExchange); \
- __ bind(&exit); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Bind(&compareExchange); \
+ __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
+ __ Cmp(i.OutputRegister32(), Operand(i.InputRegister32(2), ext)); \
+ __ B(ne, &exit); \
+ __ store_instr(i.TempRegister32(1), i.InputRegister32(3), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(1), &compareExchange); \
+ __ Bind(&exit); \
} while (0)
#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
do { \
Label binop; \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ bind(&binop); \
+ __ Bind(&binop); \
__ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
__ bin_instr(i.TempRegister32(1), i.OutputRegister32(), \
Operand(i.InputRegister32(2))); \
- __ store_instr(i.TempRegister32(1), i.TempRegister32(1), \
+ __ store_instr(i.TempRegister32(2), i.TempRegister32(1), \
i.TempRegister(0)); \
- __ cbnz(i.TempRegister32(1), &binop); \
+ __ Cbnz(i.TempRegister32(2), &binop); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
@@ -667,7 +661,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
-// the first set of flags ({kKindSpecificFlags1Offset});
+// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -676,8 +670,9 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Adr(x2, &current);
__ Bind(&current);
int pc = __ pc_offset();
- int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
+ int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
__ Ldr(x2, MemOperand(x2, offset));
+ __ Ldr(x2, FieldMemOperand(x2, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tst(x2, Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
@@ -720,6 +715,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallWasmFunction: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+ if (info()->IsWasm()) {
+ __ Call(wasm_code, RelocInfo::WASM_CALL);
+ } else {
+ __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
+ } else {
+ Register target = i.InputRegister(0);
+ __ Call(target);
+ }
+ RecordCallPosition(instr);
+ // TODO(titzer): this is ugly. JSSP should be a caller-save register
+ // in this case, but it is not possible to express in the register
+ // allocator.
+ CallDescriptor::Flags flags(MiscField::decode(opcode));
+ if (flags & CallDescriptor::kRestoreJSSP) {
+ __ Ldr(jssp, MemOperand(csp));
+ __ Mov(csp, jssp);
+ }
+ if (flags & CallDescriptor::kRestoreCSP) {
+ __ Mov(csp, jssp);
+ __ AssertCspAligned();
+ }
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
// We must not share code targets for calls to builtins for wasm code, as
@@ -744,6 +773,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+ case kArchTailCallWasm: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+ if (info()->IsWasm()) {
+ __ Jump(wasm_code, RelocInfo::WASM_CALL);
+ } else {
+ __ Jump(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
+
+ } else {
+ Register target = i.InputRegister(0);
+ __ Jump(target);
+ }
+ unwinding_info_writer_.MarkBlockWillExit();
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
__ Jump(i.InputRegister(0));
@@ -1767,26 +1820,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr);
break;
case kAtomicCompareExchangeInt8:
- __ Uxtb(i.TempRegister(1), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
- __ Uxtb(i.TempRegister(1), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB);
break;
case kAtomicCompareExchangeInt16:
- __ Uxth(i.TempRegister(1), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
- __ Uxth(i.TempRegister(1), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH);
break;
case kAtomicCompareExchangeWord32:
- __ mov(i.TempRegister(1), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kAtomic##op##Int8: \
@@ -1812,6 +1860,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE
+#undef ASSEMBLE_BOUNDS_CHECK
+#undef ASSEMBLE_CHECKED_LOAD_FLOAT
+#undef ASSEMBLE_CHECKED_LOAD_INTEGER
+#undef ASSEMBLE_CHECKED_LOAD_INTEGER_64
+#undef ASSEMBLE_CHECKED_STORE_FLOAT
+#undef ASSEMBLE_CHECKED_STORE_INTEGER
+#undef ASSEMBLE_CHECKED_STORE_INTEGER_64
+#undef ASSEMBLE_SHIFT
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
#define SIMD_UNOP_CASE(Op, Instr, FORMAT) \
case Op: \
@@ -2484,6 +2547,11 @@ void CodeGenerator::AssembleConstructFrame() {
int shrink_slots =
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+
if (frame_access_state()->has_frame()) {
// Link the frame
if (descriptor->IsJSFunctionCall()) {
@@ -2554,6 +2622,10 @@ void CodeGenerator::AssembleConstructFrame() {
__ Bind(&done);
}
+ // Skip callee-saved slots, which are pushed below.
+ shrink_slots -= saves.Count();
+ shrink_slots -= saves_fp.Count();
+
// Build remainder of frame, including accounting for and filling-in
// frame-specific header information, i.e. claiming the extra slot that
// other platforms explicitly push for STUB (code object) frames and frames
@@ -2568,7 +2640,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ Claim(shrink_slots);
}
break;
- case CallDescriptor::kCallCodeObject: {
+ case CallDescriptor::kCallCodeObject:
+ case CallDescriptor::kCallWasmFunction: {
UseScratchRegisterScope temps(tasm());
__ Claim(shrink_slots + 1); // Claim extra slot for frame type marker.
Register scratch = temps.AcquireX();
@@ -2585,8 +2658,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Save FP registers.
- CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
DCHECK_IMPLIES(saves_fp.Count() != 0,
saves_fp.list() == CPURegList::GetCalleeSavedV().list());
__ PushCPURegList(saves_fp);
@@ -2595,8 +2666,6 @@ void CodeGenerator::AssembleConstructFrame() {
// TODO(palfia): TF save list is not in sync with
// CPURegList::GetCalleeSaved(): x30 is missing.
// DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
- CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
__ PushCPURegList(saves);
}
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 47bc685b8b..201c0613c4 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -938,7 +938,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint32_t mask = m.right().Value();
- uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_width != 32) &&
(mask_msb + mask_width == 32)) {
@@ -979,7 +979,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint64_t mask = m.right().Value();
- uint64_t mask_width = base::bits::CountPopulation64(mask);
+ uint64_t mask_width = base::bits::CountPopulation(mask);
uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_width != 64) &&
(mask_msb + mask_width == 64)) {
@@ -1054,7 +1054,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint32_t mask = mleft.right().Value();
- uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
uint32_t shift = m.right().Value();
@@ -1138,7 +1138,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
- unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
Arm64OperandGenerator g(this);
@@ -1182,7 +1182,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
- unsigned mask_width = base::bits::CountPopulation64(mask);
+ unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
Arm64OperandGenerator g(this);
@@ -2755,12 +2755,12 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
AddressingMode addressing_mode = kMode_MRR;
InstructionOperand inputs[3];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseRegister(base);
+ inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temps[] = {g.TempRegister()};
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
@@ -2791,17 +2791,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
AddressingMode addressing_mode = kMode_MRR;
InstructionOperand inputs[4];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseRegister(base);
+ inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temp[2];
- temp[0] = g.TempRegister();
- temp[1] = g.TempRegister();
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 2, temp);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
void InstructionSelector::VisitAtomicBinaryOperation(
@@ -2831,16 +2829,15 @@ void InstructionSelector::VisitAtomicBinaryOperation(
AddressingMode addressing_mode = kMode_MRR;
InstructionOperand inputs[3];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseRegister(base);
+ inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temps[2];
- temps[0] = g.TempRegister();
- temps[1] = g.TempRegister();
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 2, temps);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
#define VISIT_ATOMIC_BINOP(op) \
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 36ffcf1623..bb2229b2f8 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -47,14 +47,13 @@ static const Operator* PointerConstant(CommonOperatorBuilder* common,
static_cast<int32_t>(reinterpret_cast<intptr_t>(ptr)));
}
-
BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
- CompilationInfo* info, Graph* graph, Schedule* schedule) {
+ CompilationInfo* info, Graph* graph, Schedule* schedule, Isolate* isolate) {
// Skip the exit block in profiles, since the register allocator can't handle
// it and entry into it means falling off the end of the function anyway.
size_t n_blocks = static_cast<size_t>(schedule->RpoBlockCount()) - 1;
BasicBlockProfiler::Data* data =
- info->isolate()->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
+ isolate->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
// Set the function name.
if (info->has_shared_info()) {
std::ostringstream os;
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.h b/deps/v8/src/compiler/basic-block-instrumentor.h
index 32dd82ade1..074f19b308 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.h
+++ b/deps/v8/src/compiler/basic-block-instrumentor.h
@@ -21,7 +21,8 @@ class Schedule;
class BasicBlockInstrumentor : public AllStatic {
public:
static BasicBlockProfiler::Data* Instrument(CompilationInfo* info,
- Graph* graph, Schedule* schedule);
+ Graph* graph, Schedule* schedule,
+ Isolate* isolate);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index e11847e502..5406ec5766 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -18,9 +18,7 @@ BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
jsgraph_(js_graph),
node_conditions_(zone, js_graph->graph()->NodeCount()),
zone_(zone),
- dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {
- NodeProperties::SetType(dead_, Type::None());
-}
+ dead_(js_graph->Dead()) {}
BranchElimination::~BranchElimination() {}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index ca3a70ab1f..7e1fbfddb3 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -963,11 +963,11 @@ void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
}
void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
- BuildStoreGlobal(LanguageMode::SLOPPY);
+ BuildStoreGlobal(LanguageMode::kSloppy);
}
void BytecodeGraphBuilder::VisitStaGlobalStrict() {
- BuildStoreGlobal(LanguageMode::STRICT);
+ BuildStoreGlobal(LanguageMode::kStrict);
}
void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
@@ -1935,7 +1935,7 @@ void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
}
void BytecodeGraphBuilder::VisitThrow() {
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetOutLivenessFor(
+ BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
bytecode_iterator().current_offset()));
Node* value = environment()->LookupAccumulator();
Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
@@ -1945,7 +1945,7 @@ void BytecodeGraphBuilder::VisitThrow() {
}
void BytecodeGraphBuilder::VisitAbort() {
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetOutLivenessFor(
+ BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
bytecode_iterator().current_offset()));
BailoutReason reason =
static_cast<BailoutReason>(bytecode_iterator().GetIndexOperand(0));
@@ -1955,7 +1955,7 @@ void BytecodeGraphBuilder::VisitAbort() {
}
void BytecodeGraphBuilder::VisitReThrow() {
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetOutLivenessFor(
+ BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
bytecode_iterator().current_offset()));
Node* value = environment()->LookupAccumulator();
NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
@@ -2015,6 +2015,27 @@ void BytecodeGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
Runtime::kThrowSuperAlreadyCalledError);
}
+void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
+ PrepareEagerCheckpoint();
+ Node* operand = environment()->LookupAccumulator();
+
+ FeedbackSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(kUnaryOperationHintIndex));
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedUnaryOp(op, operand, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ node = NewNode(op, operand);
+ }
+
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
PrepareEagerCheckpoint();
Node* left =
@@ -2083,56 +2104,20 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
invocation_frequency_.value());
}
-void BytecodeGraphBuilder::VisitNegate() {
- PrepareEagerCheckpoint();
-
- // TODO(adamk): Create a JSNegate operator, as this desugaring is
- // invalid for BigInts.
- const Operator* op = javascript()->Multiply();
- Node* operand = environment()->LookupAccumulator();
- Node* multiplier = jsgraph()->SmiConstant(-1);
-
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kUnaryOperationHintIndex));
- JSTypeHintLowering::LoweringResult lowering =
- TryBuildSimplifiedBinaryOp(op, operand, multiplier, slot);
- if (lowering.IsExit()) return;
-
- Node* node = nullptr;
- if (lowering.IsSideEffectFree()) {
- node = lowering.value();
- } else {
- DCHECK(!lowering.Changed());
- node = NewNode(op, operand, multiplier);
- }
-
- environment()->BindAccumulator(node, Environment::kAttachFrameState);
-}
-
void BytecodeGraphBuilder::VisitBitwiseNot() {
- PrepareEagerCheckpoint();
-
- // TODO(adamk): Create a JSBitwiseNot operator, as this desugaring is
- // invalid for BigInts.
- const Operator* op = javascript()->BitwiseXor();
- Node* operand = environment()->LookupAccumulator();
- Node* xor_value = jsgraph()->SmiConstant(-1);
+ BuildUnaryOp(javascript()->BitwiseNot());
+}
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kUnaryOperationHintIndex));
- JSTypeHintLowering::LoweringResult lowering =
- TryBuildSimplifiedBinaryOp(op, operand, xor_value, slot);
- if (lowering.IsExit()) return;
+void BytecodeGraphBuilder::VisitDec() {
+ BuildUnaryOp(javascript()->Decrement());
+}
- Node* node = nullptr;
- if (lowering.IsSideEffectFree()) {
- node = lowering.value();
- } else {
- DCHECK(!lowering.Changed());
- node = NewNode(op, operand, xor_value);
- }
+void BytecodeGraphBuilder::VisitInc() {
+ BuildUnaryOp(javascript()->Increment());
+}
- environment()->BindAccumulator(node, Environment::kAttachFrameState);
+void BytecodeGraphBuilder::VisitNegate() {
+ BuildUnaryOp(javascript()->Negate());
}
void BytecodeGraphBuilder::VisitAdd() {
@@ -2154,6 +2139,10 @@ void BytecodeGraphBuilder::VisitMod() {
BuildBinaryOp(javascript()->Modulus());
}
+void BytecodeGraphBuilder::VisitExp() {
+ BuildBinaryOp(javascript()->Exponentiate());
+}
+
void BytecodeGraphBuilder::VisitBitwiseOr() {
BuildBinaryOp(javascript()->BitwiseOr());
}
@@ -2220,6 +2209,10 @@ void BytecodeGraphBuilder::VisitModSmi() {
BuildBinaryOpWithImmediate(javascript()->Modulus());
}
+void BytecodeGraphBuilder::VisitExpSmi() {
+ BuildBinaryOpWithImmediate(javascript()->Exponentiate());
+}
+
void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
BuildBinaryOpWithImmediate(javascript()->BitwiseOr());
}
@@ -2244,52 +2237,6 @@ void BytecodeGraphBuilder::VisitShiftRightLogicalSmi() {
BuildBinaryOpWithImmediate(javascript()->ShiftRightLogical());
}
-void BytecodeGraphBuilder::VisitInc() {
- PrepareEagerCheckpoint();
- // Note: Use subtract -1 here instead of add 1 to ensure we always convert to
- // a number, not a string.
- Node* left = environment()->LookupAccumulator();
- Node* right = jsgraph()->Constant(-1);
- const Operator* op = javascript()->Subtract();
-
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
- JSTypeHintLowering::LoweringResult lowering =
- TryBuildSimplifiedBinaryOp(op, left, right, slot);
- if (lowering.IsExit()) return;
-
- Node* node = nullptr;
- if (lowering.IsSideEffectFree()) {
- node = lowering.value();
- } else {
- DCHECK(!lowering.Changed());
- node = NewNode(op, left, right);
- }
- environment()->BindAccumulator(node, Environment::kAttachFrameState);
-}
-
-void BytecodeGraphBuilder::VisitDec() {
- PrepareEagerCheckpoint();
- Node* left = environment()->LookupAccumulator();
- Node* right = jsgraph()->OneConstant();
- const Operator* op = javascript()->Subtract();
-
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
- JSTypeHintLowering::LoweringResult lowering =
- TryBuildSimplifiedBinaryOp(op, left, right, slot);
- if (lowering.IsExit()) return;
-
- Node* node = nullptr;
- if (lowering.IsSideEffectFree()) {
- node = lowering.value();
- } else {
- DCHECK(!lowering.Changed());
- node = NewNode(op, left, right);
- }
- environment()->BindAccumulator(node, Environment::kAttachFrameState);
-}
-
void BytecodeGraphBuilder::VisitLogicalNot() {
Node* value = environment()->LookupAccumulator();
Node* node = NewNode(simplified()->BooleanNot(), value);
@@ -2297,15 +2244,15 @@ void BytecodeGraphBuilder::VisitLogicalNot() {
}
void BytecodeGraphBuilder::VisitToBooleanLogicalNot() {
- Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- environment()->LookupAccumulator());
+ Node* value =
+ NewNode(simplified()->ToBoolean(), environment()->LookupAccumulator());
Node* node = NewNode(simplified()->BooleanNot(), value);
environment()->BindAccumulator(node);
}
void BytecodeGraphBuilder::VisitTypeOf() {
Node* node =
- NewNode(javascript()->TypeOf(), environment()->LookupAccumulator());
+ NewNode(simplified()->TypeOf(), environment()->LookupAccumulator());
environment()->BindAccumulator(node);
}
@@ -2320,11 +2267,11 @@ void BytecodeGraphBuilder::BuildDelete(LanguageMode language_mode) {
}
void BytecodeGraphBuilder::VisitDeletePropertyStrict() {
- BuildDelete(LanguageMode::STRICT);
+ BuildDelete(LanguageMode::kStrict);
}
void BytecodeGraphBuilder::VisitDeletePropertySloppy() {
- BuildDelete(LanguageMode::SLOPPY);
+ BuildDelete(LanguageMode::kSloppy);
}
void BytecodeGraphBuilder::VisitGetSuperConstructor() {
@@ -2409,7 +2356,8 @@ void BytecodeGraphBuilder::VisitTestIn() {
}
void BytecodeGraphBuilder::VisitTestInstanceOf() {
- BuildTestingOp(javascript()->InstanceOf());
+ int const slot_index = bytecode_iterator().GetIndexOperand(1);
+ BuildCompareOp(javascript()->InstanceOf(CreateVectorSlotPair(slot_index)));
}
void BytecodeGraphBuilder::VisitTestUndetectable() {
@@ -2447,6 +2395,9 @@ void BytecodeGraphBuilder::VisitTestTypeOf() {
case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol:
result = NewNode(simplified()->ObjectIsSymbol(), object);
break;
+ case interpreter::TestTypeOfFlags::LiteralFlag::kBigInt:
+ result = NewNode(simplified()->ObjectIsBigInt(), object);
+ break;
case interpreter::TestTypeOfFlags::LiteralFlag::kBoolean:
result = NewNode(common()->Select(MachineRepresentation::kTagged),
NewNode(simplified()->ReferenceEqual(), object,
@@ -2516,6 +2467,28 @@ void BytecodeGraphBuilder::VisitToNumber() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitToNumeric() {
+ PrepareEagerCheckpoint();
+ Node* object = environment()->LookupAccumulator();
+
+ // If we have some kind of Number feedback, we do the same lowering as for
+ // ToNumber.
+ FeedbackSlot slot =
+ feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(0));
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedToNumber(object, slot);
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ node = NewNode(javascript()->ToNumeric(), object);
+ }
+
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
@@ -2945,15 +2918,13 @@ void BytecodeGraphBuilder::BuildJumpIfTrue() {
void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
Node* accumulator = environment()->LookupAccumulator();
- Node* condition =
- NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+ Node* condition = NewNode(simplified()->ToBoolean(), accumulator);
BuildJumpIf(condition);
}
void BytecodeGraphBuilder::BuildJumpIfToBooleanFalse() {
Node* accumulator = environment()->LookupAccumulator();
- Node* condition =
- NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+ Node* condition = NewNode(simplified()->ToBoolean(), accumulator);
BuildJumpIfNot(condition);
}
@@ -2971,6 +2942,19 @@ void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
}
JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedUnaryOp(const Operator* op,
+ Node* operand,
+ FeedbackSlot slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceUnaryOperation(op, operand, effect, control,
+ slot);
+ ApplyEarlyReduction(result);
+ return result;
+}
+
+JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op, Node* left,
Node* right,
FeedbackSlot slot) {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 94fbd5099f..562c3ddaea 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -171,6 +171,7 @@ class BytecodeGraphBuilder {
std::initializer_list<Node*> args, int slot_id) {
BuildCall(receiver_mode, args.begin(), args.size(), slot_id);
}
+ void BuildUnaryOp(const Operator* op);
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
void BuildCompareOp(const Operator* op);
@@ -183,6 +184,8 @@ class BytecodeGraphBuilder {
// Optional early lowering to the simplified operator level. Note that
// the result has already been wired into the environment just like
// any other invocation of {NewNode} would do.
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedUnaryOp(
+ const Operator* op, Node* operand, FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedBinaryOp(
const Operator* op, Node* left, Node* right, FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedForInNext(
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 9523ef4e08..dd4197d466 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -235,7 +235,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
target_loc, // target location
locations.Build(), // location_sig
0, // stack_parameter_count
- Operator::kNoProperties, // properties
+ Operator::kNoThrow, // properties
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
flags, "c-call");
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index a0ed0af93d..f24cec64a7 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -39,33 +39,48 @@
namespace v8 {
namespace internal {
+
+constexpr MachineType MachineTypeOf<Smi>::value;
+constexpr MachineType MachineTypeOf<Object>::value;
+
namespace compiler {
+static_assert(std::is_convertible<TNode<Number>, TNode<Object>>::value,
+ "test subtyping");
+static_assert(std::is_convertible<TNode<UnionT<Smi, HeapNumber>>,
+ TNode<UnionT<Smi, HeapObject>>>::value,
+ "test subtyping");
+static_assert(
+ !std::is_convertible<TNode<UnionT<Smi, HeapObject>>, TNode<Number>>::value,
+ "test subtyping");
+
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- Code::Kind kind, const char* name, size_t result_size)
+ Code::Kind kind, const char* name, size_t result_size, uint32_t stub_key,
+ int32_t builtin_index)
: CodeAssemblerState(
isolate, zone,
Linkage::GetStubCallDescriptor(
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size),
- kind, name) {}
+ kind, name, stub_key, builtin_index) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int parameter_count, Code::Kind kind,
- const char* name)
+ const char* name, int32_t builtin_index)
: CodeAssemblerState(
isolate, zone,
Linkage::GetJSCallDescriptor(zone, false, parameter_count,
kind == Code::BUILTIN
? CallDescriptor::kPushArgumentCount
: CallDescriptor::kNoFlags),
- kind, name) {}
+ kind, name, 0, builtin_index) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor,
- Code::Kind kind, const char* name)
+ Code::Kind kind, const char* name,
+ uint32_t stub_key, int32_t builtin_index)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone), call_descriptor,
MachineType::PointerRepresentation(),
@@ -73,6 +88,8 @@ CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
InstructionSelector::AlignmentRequirements())),
kind_(kind),
name_(name),
+ stub_key_(stub_key),
+ builtin_index_(builtin_index),
code_generated_(false),
variables_(zone) {}
@@ -88,6 +105,8 @@ CodeAssembler::~CodeAssembler() {}
void CodeAssemblerState::PrintCurrentBlock(std::ostream& os) {
raw_assembler_->PrintCurrentBlock(os);
}
+
+bool CodeAssemblerState::InsideBlock() { return raw_assembler_->InsideBlock(); }
#endif
void CodeAssemblerState::SetInitialDebugInformation(const char* msg,
@@ -148,6 +167,10 @@ void CodeAssembler::CallEpilogue() {
}
}
+bool CodeAssembler::Word32ShiftIsSafe() const {
+ return raw_assembler()->machine()->Word32ShiftIsSafe();
+}
+
// static
Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
DCHECK(!state->code_generated_);
@@ -161,7 +184,8 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
- state->kind_, state->name_, should_optimize_jumps ? &jump_opt : nullptr);
+ state->kind_, state->name_, state->stub_key_, state->builtin_index_,
+ should_optimize_jumps ? &jump_opt : nullptr);
if (jump_opt.is_optimizable()) {
jump_opt.set_optimizing();
@@ -169,7 +193,8 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
// Regenerate machine code
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
- state->kind_, state->name_, &jump_opt);
+ state->kind_, state->name_, state->stub_key_, state->builtin_index_,
+ &jump_opt);
}
state->code_generated_ = true;
@@ -219,8 +244,8 @@ TNode<IntPtrT> CodeAssembler::IntPtrConstant(intptr_t value) {
return UncheckedCast<IntPtrT>(raw_assembler()->IntPtrConstant(value));
}
-TNode<Object> CodeAssembler::NumberConstant(double value) {
- return UncheckedCast<Object>(raw_assembler()->NumberConstant(value));
+TNode<Number> CodeAssembler::NumberConstant(double value) {
+ return UncheckedCast<Number>(raw_assembler()->NumberConstant(value));
}
TNode<Smi> CodeAssembler::SmiConstant(Smi* value) {
@@ -281,12 +306,13 @@ bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
bool CodeAssembler::ToSmiConstant(Node* node, Smi*& out_value) {
if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
node = node->InputAt(0);
- } else {
- return false;
}
IntPtrMatcher m(node);
if (m.HasValue()) {
- out_value = Smi::cast(bit_cast<Object*>(m.Value()));
+ intptr_t value = m.Value();
+ // Make sure that the value is actually a smi
+ CHECK_EQ(0, value & ((static_cast<intptr_t>(1) << kSmiShiftSize) - 1));
+ out_value = Smi::cast(bit_cast<Object*>(value));
return true;
}
return false;
@@ -448,12 +474,12 @@ TNode<WordT> CodeAssembler::IntPtrMul(SloppyTNode<WordT> left,
if (is_right_constant) {
return IntPtrConstant(left_constant * right_constant);
}
- if (left_constant == 1) {
- return right;
+ if (base::bits::IsPowerOfTwo(left_constant)) {
+ return WordShl(right, WhichPowerOf2(left_constant));
}
} else if (is_right_constant) {
- if (right_constant == 1) {
- return left;
+ if (base::bits::IsPowerOfTwo(right_constant)) {
+ return WordShl(left, WhichPowerOf2(right_constant));
}
}
return UncheckedCast<IntPtrT>(raw_assembler()->IntPtrMul(left, right));
@@ -788,21 +814,24 @@ TNode<UintPtrT> CodeAssembler::ChangeUint32ToWord(SloppyTNode<Word32T> value) {
return UncheckedCast<UintPtrT>(
raw_assembler()->ChangeUint32ToUint64(value));
}
- return UncheckedCast<UintPtrT>(value);
+ return ReinterpretCast<UintPtrT>(value);
}
TNode<IntPtrT> CodeAssembler::ChangeInt32ToIntPtr(SloppyTNode<Word32T> value) {
if (raw_assembler()->machine()->Is64()) {
- return UncheckedCast<IntPtrT>(raw_assembler()->ChangeInt32ToInt64(value));
+ return ReinterpretCast<IntPtrT>(raw_assembler()->ChangeInt32ToInt64(value));
}
- return UncheckedCast<IntPtrT>(value);
+ return ReinterpretCast<IntPtrT>(value);
}
-Node* CodeAssembler::ChangeFloat64ToUintPtr(Node* value) {
+TNode<UintPtrT> CodeAssembler::ChangeFloat64ToUintPtr(
+ SloppyTNode<Float64T> value) {
if (raw_assembler()->machine()->Is64()) {
- return raw_assembler()->ChangeFloat64ToUint64(value);
+ return ReinterpretCast<UintPtrT>(
+ raw_assembler()->ChangeFloat64ToUint64(value));
}
- return raw_assembler()->ChangeFloat64ToUint32(value);
+ return ReinterpretCast<UintPtrT>(
+ raw_assembler()->ChangeFloat64ToUint32(value));
}
Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
@@ -977,7 +1006,7 @@ TNode<Object> CodeAssembler::TailCallRuntimeImpl(Runtime::FunctionId function,
int argc = static_cast<int>(sizeof...(args));
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
+ CallDescriptor::kNoFlags);
int return_count = static_cast<int>(desc->ReturnCount());
Node* centry =
@@ -1003,7 +1032,10 @@ Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
size_t result_size, Node* target, Node* context,
TArgs... args) {
Node* nodes[] = {target, args..., context};
- return CallStubN(descriptor, result_size, arraysize(nodes), nodes);
+ int input_count = arraysize(nodes);
+ if (context == nullptr) --input_count;
+ return CallStubN(descriptor, result_size, input_count, nodes,
+ context != nullptr);
}
// Instantiate CallStubR() for argument counts used by CSA-generated code.
@@ -1015,10 +1047,11 @@ REPEAT_1_TO_11(INSTANTIATE, Node*)
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
size_t result_size, int input_count,
- Node* const* inputs) {
- // 2 is for target and context.
- DCHECK_LE(2, input_count);
- int argc = input_count - 2;
+ Node* const* inputs, bool pass_context) {
+ // implicit nodes are target and optionally context.
+ int implicit_nodes = pass_context ? 2 : 1;
+ DCHECK_LE(implicit_nodes, input_count);
+ int argc = input_count - implicit_nodes;
DCHECK_LE(descriptor.GetParameterCount(), argc);
// Extra arguments not mentioned in the descriptor are passed on the stack.
int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
@@ -1026,7 +1059,8 @@ Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, stack_parameter_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
+ MachineType::AnyTagged(), result_size,
+ pass_context ? Linkage::kPassContext : Linkage::kNoContext);
CallPrologue();
Node* return_value = raw_assembler()->CallN(desc, input_count, inputs);
@@ -1042,7 +1076,7 @@ Node* CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
size_t result_size = 1;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node* nodes[] = {target, args..., context};
@@ -1068,7 +1102,7 @@ Node* CodeAssembler::TailCallStubThenBytecodeDispatch(
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, stack_parameter_count,
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), 0);
Node* nodes[] = {target, args..., context};
@@ -1321,11 +1355,14 @@ Node* CodeAssemblerVariable::value() const {
if (!IsBound()) {
std::stringstream str;
str << "#Use of unbound variable:"
+ << "#\n Variable: " << *this << "#\n Current Block: ";
+ state_->PrintCurrentBlock(str);
+ FATAL(str.str().c_str());
+ }
+ if (!state_->InsideBlock()) {
+ std::stringstream str;
+ str << "#Accessing variable value outside a block:"
<< "#\n Variable: " << *this;
- if (state_) {
- str << "#\n Current Block: ";
- state_->PrintCurrentBlock(str);
- }
FATAL(str.str().c_str());
}
#endif // DEBUG
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 64e959a1c0..90a9d02fce 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -39,64 +39,147 @@ struct WordT : IntegralT {
: MachineRepresentation::kWord64;
};
-struct IntPtrT : WordT {};
-struct UintPtrT : WordT {};
-
struct RawPtrT : WordT {};
template <class To>
struct RawPtr : RawPtrT {};
-struct Word64T : IntegralT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kWord64;
-};
-
-struct Int64T : Word64T {};
-
struct Word32T : IntegralT {
static const MachineRepresentation kMachineRepresentation =
MachineRepresentation::kWord32;
};
+struct Int32T : Word32T {
+ static constexpr MachineType kMachineType = MachineType::Int32();
+};
+struct Uint32T : Word32T {
+ static constexpr MachineType kMachineType = MachineType::Uint32();
+};
-struct Int32T : Word32T {};
+struct Word64T : IntegralT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kWord64;
+};
+struct Int64T : Word64T {
+ static constexpr MachineType kMachineType = MachineType::Int64();
+};
+struct Uint64T : Word64T {
+ static constexpr MachineType kMachineType = MachineType::Uint64();
+};
-struct Uint32T : Word32T {};
+struct IntPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::IntPtr();
+};
+struct UintPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::UintPtr();
+};
struct Float32T : UntaggedT {
static const MachineRepresentation kMachineRepresentation =
MachineRepresentation::kFloat32;
+ static constexpr MachineType kMachineType = MachineType::Float32();
};
struct Float64T : UntaggedT {
static const MachineRepresentation kMachineRepresentation =
MachineRepresentation::kFloat64;
+ static constexpr MachineType kMachineType = MachineType::Float64();
};
// Result of a comparison operation.
struct BoolT : Word32T {};
+// Value type of a Turbofan node with two results.
+template <class T1, class T2>
+struct PairT {};
+
+inline constexpr MachineType CommonMachineType(MachineType type1,
+ MachineType type2) {
+ return (type1 == type2) ? type1
+ : ((type1.IsTagged() && type2.IsTagged())
+ ? MachineType::AnyTagged()
+ : MachineType::None());
+}
+
template <class Type, class Enable = void>
-struct MachineRepresentationOf {
- static const MachineRepresentation value = Type::kMachineRepresentation;
+struct MachineTypeOf {
+ static constexpr MachineType value = Type::kMachineType;
};
+
+template <class Type, class Enable>
+constexpr MachineType MachineTypeOf<Type, Enable>::value;
+
template <>
-struct MachineRepresentationOf<Object> {
- static const MachineRepresentation value = MachineRepresentation::kTagged;
+struct MachineTypeOf<Object> {
+ static constexpr MachineType value = MachineType::AnyTagged();
};
template <>
-struct MachineRepresentationOf<Smi> {
- static const MachineRepresentation value =
- MachineRepresentation::kTaggedSigned;
+struct MachineTypeOf<Smi> {
+ static constexpr MachineType value = MachineType::TaggedSigned();
};
template <class HeapObjectSubtype>
-struct MachineRepresentationOf<
+struct MachineTypeOf<HeapObjectSubtype,
+ typename std::enable_if<std::is_base_of<
+ HeapObject, HeapObjectSubtype>::value>::type> {
+ static constexpr MachineType value = MachineType::TaggedPointer();
+};
+
+template <class HeapObjectSubtype>
+constexpr MachineType MachineTypeOf<
HeapObjectSubtype, typename std::enable_if<std::is_base_of<
- HeapObject, HeapObjectSubtype>::value>::type> {
+ HeapObject, HeapObjectSubtype>::value>::type>::value;
+
+template <class Type, class Enable = void>
+struct MachineRepresentationOf {
+ static const MachineRepresentation value = Type::kMachineRepresentation;
+};
+template <class T>
+struct MachineRepresentationOf<
+ T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> {
static const MachineRepresentation value =
- MachineRepresentation::kTaggedPointer;
+ MachineTypeOf<T>::value.representation();
+};
+
+template <class T>
+struct is_valid_type_tag {
+ static const bool value = std::is_base_of<Object, T>::value ||
+ std::is_base_of<UntaggedT, T>::value ||
+ std::is_same<ExternalReference, T>::value;
+ static const bool is_tagged = std::is_base_of<Object, T>::value;
+};
+
+template <class T1, class T2>
+struct is_valid_type_tag<PairT<T1, T2>> {
+ static const bool value =
+ is_valid_type_tag<T1>::value && is_valid_type_tag<T2>::value;
+ static const bool is_tagged = false;
+};
+
+template <class T1, class T2>
+struct UnionT;
+
+template <class T1, class T2>
+struct is_valid_type_tag<UnionT<T1, T2>> {
+ static const bool is_tagged =
+ is_valid_type_tag<T1>::is_tagged && is_valid_type_tag<T2>::is_tagged;
+ static const bool value = is_tagged;
+};
+
+template <class T1, class T2>
+struct UnionT {
+ static constexpr MachineType kMachineType =
+ CommonMachineType(MachineTypeOf<T1>::value, MachineTypeOf<T2>::value);
+ static const MachineRepresentation kMachineRepresentation =
+ kMachineType.representation();
+ static_assert(kMachineRepresentation != MachineRepresentation::kNone,
+ "no common representation");
+ static_assert(is_valid_type_tag<T1>::is_tagged &&
+ is_valid_type_tag<T2>::is_tagged,
+ "union types are only possible for tagged values");
};
+using Number = UnionT<Smi, HeapNumber>;
+using Numeric = UnionT<Number, BigInt>;
+
#define ENUM_ELEMENT(Name) k##Name,
#define ENUM_STRUCT_ELEMENT(NAME, Name, name) k##Name,
enum class ObjectType {
@@ -108,6 +191,7 @@ enum class ObjectType {
#undef ENUM_STRUCT_ELEMENT
class AccessCheckNeeded;
+class ClassBoilerplate;
class CompilationCacheTable;
class Constructor;
class Filler;
@@ -172,21 +256,90 @@ typedef ZoneList<CodeAssemblerVariable*> CodeAssemblerVariableList;
typedef std::function<void()> CodeAssemblerCallback;
-// TNode<A> is an SSA value with the static type tag A, which is either a
-// subclass of internal::Object or of internal::UntaggedT or it is
-// ExternalReference.
-template <class A>
+template <class T, class U>
+struct is_subtype {
+ static const bool value = std::is_base_of<U, T>::value;
+};
+template <class T1, class T2, class U>
+struct is_subtype<UnionT<T1, T2>, U> {
+ static const bool value =
+ is_subtype<T1, U>::value && is_subtype<T2, U>::value;
+};
+template <class T, class U1, class U2>
+struct is_subtype<T, UnionT<U1, U2>> {
+ static const bool value =
+ is_subtype<T, U1>::value || is_subtype<T, U2>::value;
+};
+template <class T1, class T2, class U1, class U2>
+struct is_subtype<UnionT<T1, T2>, UnionT<U1, U2>> {
+ static const bool value =
+ (is_subtype<T1, U1>::value || is_subtype<T1, U2>::value) &&
+ (is_subtype<T2, U1>::value || is_subtype<T2, U2>::value);
+};
+
+template <class T, class U>
+struct types_have_common_values {
+ static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value;
+};
+template <class U>
+struct types_have_common_values<Uint32T, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Int32T, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Uint64T, U> {
+ static const bool value = types_have_common_values<Word64T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Int64T, U> {
+ static const bool value = types_have_common_values<Word64T, U>::value;
+};
+template <class U>
+struct types_have_common_values<IntPtrT, U> {
+ static const bool value = types_have_common_values<WordT, U>::value;
+};
+template <class U>
+struct types_have_common_values<UintPtrT, U> {
+ static const bool value = types_have_common_values<WordT, U>::value;
+};
+template <class T1, class T2, class U>
+struct types_have_common_values<UnionT<T1, T2>, U> {
+ static const bool value = types_have_common_values<T1, U>::value ||
+ types_have_common_values<T2, U>::value;
+};
+
+template <class T, class U1, class U2>
+struct types_have_common_values<T, UnionT<U1, U2>> {
+ static const bool value = types_have_common_values<T, U1>::value ||
+ types_have_common_values<T, U2>::value;
+};
+template <class T1, class T2, class U1, class U2>
+struct types_have_common_values<UnionT<T1, T2>, UnionT<U1, U2>> {
+ static const bool value = types_have_common_values<T1, U1>::value ||
+ types_have_common_values<T1, U2>::value ||
+ types_have_common_values<T2, U1>::value ||
+ types_have_common_values<T2, U2>::value;
+};
+
+// TNode<T> is an SSA value with the static type tag T, which is one of the
+// following:
+// - a subclass of internal::Object represents a tagged type
+// - a subclass of internal::UntaggedT represents an untagged type
+// - ExternalReference
+// - PairT<T1, T2> for an operation returning two values, with types T1
+// and T2
+// - UnionT<T1, T2> represents either a value of type T1 or of type T2.
+template <class T>
class TNode {
public:
- static_assert(std::is_base_of<Object, A>::value ||
- std::is_base_of<UntaggedT, A>::value ||
- std::is_same<ExternalReference, A>::value,
- "type tag must be ExternalReference or a subclass of Object or "
- "UntaggedT");
+ static_assert(is_valid_type_tag<T>::value, "invalid type tag");
- template <class B, typename std::enable_if<std::is_base_of<A, B>::value,
- int>::type = 0>
- TNode(const TNode<B>& other) : node_(other) {}
+ template <class U,
+ typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
+ TNode(const TNode<U>& other) : node_(other) {}
TNode() : node_(nullptr) {}
TNode operator=(TNode other) {
@@ -206,21 +359,24 @@ class TNode {
compiler::Node* node_;
};
-// SloppyTNode<A> is a variant of TNode<A> and allows implicit casts from
+// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
// Node*. It is intended for function arguments as long as some call sites
// still use untyped Node* arguments.
// TODO(tebbi): Delete this class once transition is finished.
-template <class A>
-class SloppyTNode : public TNode<A> {
+template <class T>
+class SloppyTNode : public TNode<T> {
public:
SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
- : TNode<A>(node) {}
- template <class B, typename std::enable_if<std::is_base_of<A, B>::value,
+ : TNode<T>(node) {}
+ template <class U, typename std::enable_if<is_subtype<U, T>::value,
int>::type = 0>
- SloppyTNode(const TNode<B>& other) // NOLINT(runtime/explicit)
- : TNode<A>(other) {}
+ SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit)
+ : TNode<T>(other) {}
};
+// This macro alias allows to use PairT<T1, T2> as a macro argument.
+#define PAIR_TYPE(T1, T2) PairT<T1, T2>
+
#define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
V(Float32Equal, BoolT, Float32T, Float32T) \
V(Float32LessThan, BoolT, Float32T, Float32T) \
@@ -255,84 +411,84 @@ class SloppyTNode : public TNode<A> {
V(Word64Equal, BoolT, Word64T, Word64T) \
V(Word64NotEqual, BoolT, Word64T, Word64T)
-#define CODE_ASSEMBLER_BINARY_OP_LIST(V) \
- CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
- V(Float64Add, Float64T, Float64T, Float64T) \
- V(Float64Sub, Float64T, Float64T, Float64T) \
- V(Float64Mul, Float64T, Float64T, Float64T) \
- V(Float64Div, Float64T, Float64T, Float64T) \
- V(Float64Mod, Float64T, Float64T, Float64T) \
- V(Float64Atan2, Float64T, Float64T, Float64T) \
- V(Float64Pow, Float64T, Float64T, Float64T) \
- V(Float64Max, Float64T, Float64T, Float64T) \
- V(Float64Min, Float64T, Float64T, Float64T) \
- V(Float64InsertLowWord32, Float64T, Float64T, Word32T) \
- V(Float64InsertHighWord32, Float64T, Float64T, Word32T) \
- V(IntPtrAddWithOverflow, IntPtrT, IntPtrT, IntPtrT) \
- V(IntPtrSubWithOverflow, IntPtrT, IntPtrT, IntPtrT) \
- V(Int32Add, Word32T, Word32T, Word32T) \
- V(Int32AddWithOverflow, Int32T, Int32T, Int32T) \
- V(Int32Sub, Word32T, Word32T, Word32T) \
- V(Int32Mul, Word32T, Word32T, Word32T) \
- V(Int32MulWithOverflow, Int32T, Int32T, Int32T) \
- V(Int32Div, Int32T, Int32T, Int32T) \
- V(Int32Mod, Int32T, Int32T, Int32T) \
- V(WordRor, WordT, WordT, IntegralT) \
- V(Word32Ror, Word32T, Word32T, Word32T) \
+#define CODE_ASSEMBLER_BINARY_OP_LIST(V) \
+ CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+ V(Float64Add, Float64T, Float64T, Float64T) \
+ V(Float64Sub, Float64T, Float64T, Float64T) \
+ V(Float64Mul, Float64T, Float64T, Float64T) \
+ V(Float64Div, Float64T, Float64T, Float64T) \
+ V(Float64Mod, Float64T, Float64T, Float64T) \
+ V(Float64Atan2, Float64T, Float64T, Float64T) \
+ V(Float64Pow, Float64T, Float64T, Float64T) \
+ V(Float64Max, Float64T, Float64T, Float64T) \
+ V(Float64Min, Float64T, Float64T, Float64T) \
+ V(Float64InsertLowWord32, Float64T, Float64T, Word32T) \
+ V(Float64InsertHighWord32, Float64T, Float64T, Word32T) \
+ V(IntPtrAddWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT, IntPtrT) \
+ V(IntPtrSubWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT, IntPtrT) \
+ V(Int32Add, Word32T, Word32T, Word32T) \
+ V(Int32AddWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
+ V(Int32Sub, Word32T, Word32T, Word32T) \
+ V(Int32Mul, Word32T, Word32T, Word32T) \
+ V(Int32MulWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
+ V(Int32Div, Int32T, Int32T, Int32T) \
+ V(Int32Mod, Int32T, Int32T, Int32T) \
+ V(WordRor, WordT, WordT, IntegralT) \
+ V(Word32Ror, Word32T, Word32T, Word32T) \
V(Word64Ror, Word64T, Word64T, Word64T)
TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
-#define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
- V(Float64Abs, Float64T, Float64T) \
- V(Float64Acos, Float64T, Float64T) \
- V(Float64Acosh, Float64T, Float64T) \
- V(Float64Asin, Float64T, Float64T) \
- V(Float64Asinh, Float64T, Float64T) \
- V(Float64Atan, Float64T, Float64T) \
- V(Float64Atanh, Float64T, Float64T) \
- V(Float64Cos, Float64T, Float64T) \
- V(Float64Cosh, Float64T, Float64T) \
- V(Float64Exp, Float64T, Float64T) \
- V(Float64Expm1, Float64T, Float64T) \
- V(Float64Log, Float64T, Float64T) \
- V(Float64Log1p, Float64T, Float64T) \
- V(Float64Log2, Float64T, Float64T) \
- V(Float64Log10, Float64T, Float64T) \
- V(Float64Cbrt, Float64T, Float64T) \
- V(Float64Neg, Float64T, Float64T) \
- V(Float64Sin, Float64T, Float64T) \
- V(Float64Sinh, Float64T, Float64T) \
- V(Float64Sqrt, Float64T, Float64T) \
- V(Float64Tan, Float64T, Float64T) \
- V(Float64Tanh, Float64T, Float64T) \
- V(Float64ExtractLowWord32, Word32T, Float64T) \
- V(Float64ExtractHighWord32, Word32T, Float64T) \
- V(BitcastTaggedToWord, IntPtrT, Object) \
- V(BitcastWordToTagged, Object, WordT) \
- V(BitcastWordToTaggedSigned, Smi, WordT) \
- V(TruncateFloat64ToFloat32, Float32T, Float64T) \
- V(TruncateFloat64ToWord32, Word32T, Float64T) \
- V(TruncateInt64ToInt32, Int32T, Int64T) \
- V(ChangeFloat32ToFloat64, Float64T, Float32T) \
- V(ChangeFloat64ToUint32, Int32T, Float64T) \
- V(ChangeFloat64ToUint64, Word64T, Float64T) \
- V(ChangeInt32ToFloat64, Float64T, Int32T) \
- V(ChangeInt32ToInt64, Int64T, Int32T) \
- V(ChangeUint32ToFloat64, Float64T, Word32T) \
- V(ChangeUint32ToUint64, Word64T, Word32T) \
- V(RoundFloat64ToInt32, Int32T, Float64T) \
- V(RoundInt32ToFloat32, Int32T, Float32T) \
- V(Float64SilenceNaN, Float64T, Float64T) \
- V(Float64RoundDown, Float64T, Float64T) \
- V(Float64RoundUp, Float64T, Float64T) \
- V(Float64RoundTiesEven, Float64T, Float64T) \
- V(Float64RoundTruncate, Float64T, Float64T) \
- V(Word32Clz, Int32T, Word32T) \
- V(Word32Not, Word32T, Word32T) \
- V(Int32AbsWithOverflow, Int32T, Int32T) \
- V(Int64AbsWithOverflow, Int64T, Int64T) \
- V(IntPtrAbsWithOverflow, IntPtrT, IntPtrT) \
+#define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
+ V(Float64Abs, Float64T, Float64T) \
+ V(Float64Acos, Float64T, Float64T) \
+ V(Float64Acosh, Float64T, Float64T) \
+ V(Float64Asin, Float64T, Float64T) \
+ V(Float64Asinh, Float64T, Float64T) \
+ V(Float64Atan, Float64T, Float64T) \
+ V(Float64Atanh, Float64T, Float64T) \
+ V(Float64Cos, Float64T, Float64T) \
+ V(Float64Cosh, Float64T, Float64T) \
+ V(Float64Exp, Float64T, Float64T) \
+ V(Float64Expm1, Float64T, Float64T) \
+ V(Float64Log, Float64T, Float64T) \
+ V(Float64Log1p, Float64T, Float64T) \
+ V(Float64Log2, Float64T, Float64T) \
+ V(Float64Log10, Float64T, Float64T) \
+ V(Float64Cbrt, Float64T, Float64T) \
+ V(Float64Neg, Float64T, Float64T) \
+ V(Float64Sin, Float64T, Float64T) \
+ V(Float64Sinh, Float64T, Float64T) \
+ V(Float64Sqrt, Float64T, Float64T) \
+ V(Float64Tan, Float64T, Float64T) \
+ V(Float64Tanh, Float64T, Float64T) \
+ V(Float64ExtractLowWord32, Word32T, Float64T) \
+ V(Float64ExtractHighWord32, Word32T, Float64T) \
+ V(BitcastTaggedToWord, IntPtrT, Object) \
+ V(BitcastWordToTagged, Object, WordT) \
+ V(BitcastWordToTaggedSigned, Smi, WordT) \
+ V(TruncateFloat64ToFloat32, Float32T, Float64T) \
+ V(TruncateFloat64ToWord32, Word32T, Float64T) \
+ V(TruncateInt64ToInt32, Int32T, Int64T) \
+ V(ChangeFloat32ToFloat64, Float64T, Float32T) \
+ V(ChangeFloat64ToUint32, Uint32T, Float64T) \
+ V(ChangeFloat64ToUint64, Uint64T, Float64T) \
+ V(ChangeInt32ToFloat64, Float64T, Int32T) \
+ V(ChangeInt32ToInt64, Int64T, Int32T) \
+ V(ChangeUint32ToFloat64, Float64T, Word32T) \
+ V(ChangeUint32ToUint64, Uint64T, Word32T) \
+ V(RoundFloat64ToInt32, Int32T, Float64T) \
+ V(RoundInt32ToFloat32, Int32T, Float32T) \
+ V(Float64SilenceNaN, Float64T, Float64T) \
+ V(Float64RoundDown, Float64T, Float64T) \
+ V(Float64RoundUp, Float64T, Float64T) \
+ V(Float64RoundTiesEven, Float64T, Float64T) \
+ V(Float64RoundTruncate, Float64T, Float64T) \
+ V(Word32Clz, Int32T, Word32T) \
+ V(Word32Not, Word32T, Word32T) \
+ V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \
+ V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \
+ V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \
V(Word32BinaryNot, Word32T, Word32T)
// A "public" interface used by components outside of compiler directory to
@@ -382,6 +538,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Base Assembler
// ===========================================================================
+ template <class PreviousType>
class CheckedNode {
public:
#ifdef DEBUG
@@ -394,8 +551,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class A>
operator TNode<A>() {
- static_assert(std::is_base_of<Object, A>::value,
- "Coercion to untagged values cannot be checked.");
+ static_assert(types_have_common_values<A, PreviousType>::value,
+ "Incompatible types: this cast can never succeed.");
+ static_assert(std::is_convertible<TNode<A>, TNode<Object>>::value,
+ "Coercion to untagged values cannot be "
+ "checked.");
#ifdef DEBUG
if (FLAG_debug_code) {
Node* function = code_assembler_->ExternalConstant(
@@ -427,12 +587,31 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#endif
};
- CheckedNode Cast(Node* value, const char* location) {
- return CheckedNode(value, this, location);
+ template <class T>
+ TNode<T> UncheckedCast(Node* value) {
+ return TNode<T>::UncheckedCast(value);
+ }
+ template <class T, class U>
+ TNode<T> UncheckedCast(TNode<U> value) {
+ static_assert(types_have_common_values<T, U>::value,
+ "Incompatible types: this cast can never succeed.");
+ return TNode<T>::UncheckedCast(value);
+ }
+
+ // ReinterpretCast<T>(v) has the power to cast even when the type of v is
+ // unrelated to T. Use with care.
+ template <class T>
+ TNode<T> ReinterpretCast(Node* value) {
+ return TNode<T>::UncheckedCast(value);
+ }
+
+ CheckedNode<Object> Cast(Node* value, const char* location) {
+ return CheckedNode<Object>(value, this, location);
}
- template <class A>
- TNode<A> UncheckedCast(Node* value) {
- return TNode<A>::UncheckedCast(value);
+
+ template <class T>
+ CheckedNode<T> Cast(TNode<T> value, const char* location) {
+ return CheckedNode<T>(value, this, location);
}
#ifdef DEBUG
@@ -448,9 +627,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Int32T> Int32Constant(int32_t value);
TNode<Int64T> Int64Constant(int64_t value);
TNode<IntPtrT> IntPtrConstant(intptr_t value);
- TNode<Object> NumberConstant(double value);
+ TNode<Number> NumberConstant(double value);
TNode<Smi> SmiConstant(Smi* value);
TNode<Smi> SmiConstant(int value);
+ template <typename E,
+ typename = typename std::enable_if<std::is_enum<E>::value>::type>
+ TNode<Smi> SmiConstant(E value) {
+ STATIC_ASSERT(sizeof(E) <= sizeof(int));
+ return SmiConstant(static_cast<int>(value));
+ }
TNode<HeapObject> UntypedHeapConstant(Handle<HeapObject> object);
template <class Type>
TNode<Type> HeapConstant(Handle<Type> object) {
@@ -577,29 +762,32 @@ class V8_EXPORT_PRIVATE CodeAssembler {
std::is_base_of<Object, Left>::value &&
std::is_base_of<Object, Right>::value>::type>
TNode<BoolT> WordEqual(TNode<Left> left, TNode<Right> right) {
- return WordEqual(UncheckedCast<WordT>(left), UncheckedCast<WordT>(right));
+ return WordEqual(ReinterpretCast<WordT>(left),
+ ReinterpretCast<WordT>(right));
}
TNode<BoolT> WordEqual(TNode<Object> left, Node* right) {
- return WordEqual(UncheckedCast<WordT>(left), UncheckedCast<WordT>(right));
+ return WordEqual(ReinterpretCast<WordT>(left),
+ ReinterpretCast<WordT>(right));
}
TNode<BoolT> WordEqual(Node* left, TNode<Object> right) {
- return WordEqual(UncheckedCast<WordT>(left), UncheckedCast<WordT>(right));
+ return WordEqual(ReinterpretCast<WordT>(left),
+ ReinterpretCast<WordT>(right));
}
template <class Left, class Right,
class = typename std::enable_if<
std::is_base_of<Object, Left>::value &&
std::is_base_of<Object, Right>::value>::type>
TNode<BoolT> WordNotEqual(TNode<Left> left, TNode<Right> right) {
- return WordNotEqual(UncheckedCast<WordT>(left),
- UncheckedCast<WordT>(right));
+ return WordNotEqual(ReinterpretCast<WordT>(left),
+ ReinterpretCast<WordT>(right));
}
TNode<BoolT> WordNotEqual(TNode<Object> left, Node* right) {
- return WordNotEqual(UncheckedCast<WordT>(left),
- UncheckedCast<WordT>(right));
+ return WordNotEqual(ReinterpretCast<WordT>(left),
+ ReinterpretCast<WordT>(right));
}
TNode<BoolT> WordNotEqual(Node* left, TNode<Object> right) {
- return WordNotEqual(UncheckedCast<WordT>(left),
- UncheckedCast<WordT>(right));
+ return WordNotEqual(ReinterpretCast<WordT>(left),
+ ReinterpretCast<WordT>(right));
}
TNode<Int32T> Int32Add(TNode<Int32T> left, TNode<Int32T> right) {
@@ -669,7 +857,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
// Assumes that the double can be exactly represented as an int.
- Node* ChangeFloat64ToUintPtr(Node* value);
+ TNode<UintPtrT> ChangeFloat64ToUintPtr(SloppyTNode<Float64T> value);
// Changes an intptr_t to a double, e.g. for storing an element index
// outside Smi range in a HeapNumber. Lossless on 32-bit,
@@ -687,6 +875,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Projections
Node* Projection(int index, Node* value);
+ template <int index, class T1, class T2>
+ TNode<typename std::tuple_element<index, std::tuple<T1, T2>>::type>
+ Projection(TNode<PairT<T1, T2>> value) {
+ return UncheckedCast<
+ typename std::tuple_element<index, std::tuple<T1, T2>>::type>(
+ Projection(index, value));
+ }
+
// Calls
template <class... TArgs>
TNode<Object> CallRuntimeImpl(Runtime::FunctionId function,
@@ -708,6 +904,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
function, context, base::implicit_cast<SloppyTNode<Object>>(args)...);
}
+ //
+ // If context passed to CallStub is nullptr, it won't be passed to the stub.
+ //
+
template <class... TArgs>
Node* CallStub(Callable const& callable, Node* context, TArgs... args) {
Node* target = HeapConstant(callable.code());
@@ -727,7 +927,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* target, Node* context, TArgs... args);
Node* CallStubN(const CallInterfaceDescriptor& descriptor, size_t result_size,
- int input_count, Node* const* inputs);
+ int input_count, Node* const* inputs,
+ bool pass_context = true);
template <class... TArgs>
Node* TailCallStub(Callable const& callable, Node* context, TArgs... args) {
@@ -858,6 +1059,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CodeAssemblerCallback& call_epilogue);
void UnregisterCallGenerationCallbacks();
+ bool Word32ShiftIsSafe() const;
+
private:
RawMachineAssembler* raw_assembler() const;
@@ -923,14 +1126,14 @@ class TypedCodeAssemblerVariable : public CodeAssemblerVariable {
initial_value) {}
#endif // DEBUG
- template <class A,
- class = typename std::enable_if<std::is_base_of<A, T>::value>::type>
- operator TNode<A>() const {
+ template <class U, class = typename std::enable_if<
+ std::is_convertible<TNode<T>, TNode<U>>::value>::type>
+ operator TNode<U>() const {
return TNode<T>::UncheckedCast(value());
}
- template <class A,
- class = typename std::enable_if<std::is_base_of<A, T>::value>::type>
- operator SloppyTNode<A>() const {
+ template <class U, class = typename std::enable_if<
+ std::is_convertible<TNode<T>, TNode<U>>::value>::type>
+ operator SloppyTNode<U>() const {
return value();
}
operator Node*() const { return value(); }
@@ -1002,11 +1205,14 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, Code::Kind kind,
- const char* name, size_t result_size = 1);
+ const char* name, size_t result_size = 1,
+ uint32_t stub_key = 0,
+ int32_t builtin_index = Builtins::kNoBuiltinId);
// Create with JSCall linkage.
CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Kind kind, const char* name);
+ Code::Kind kind, const char* name,
+ int32_t builtin_index = Builtins::kNoBuiltinId);
~CodeAssemblerState();
@@ -1015,6 +1221,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
#if DEBUG
void PrintCurrentBlock(std::ostream& os);
+ bool InsideBlock();
#endif // DEBUG
void SetInitialDebugInformation(const char* msg, const char* file, int line);
@@ -1022,14 +1229,18 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
friend class CodeAssembler;
friend class CodeAssemblerLabel;
friend class CodeAssemblerVariable;
+ friend class CodeAssemblerTester;
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, Code::Kind kind,
- const char* name);
+ const char* name, uint32_t stub_key,
+ int32_t builtin_index);
std::unique_ptr<RawMachineAssembler> raw_assembler_;
Code::Kind kind_;
const char* name_;
+ uint32_t stub_key_;
+ int32_t builtin_index_;
bool code_generated_;
ZoneSet<CodeAssemblerVariable::Impl*> variables_;
CodeAssemblerCallback call_prologue_;
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index e8aa1a4796..3d43ab4765 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -11,8 +11,10 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
+#include "src/eh-frame.h"
#include "src/frames.h"
#include "src/macro-assembler-inl.h"
+#include "src/trap-handler/trap-handler.h"
namespace v8 {
namespace internal {
@@ -35,12 +37,14 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info,
- base::Optional<OsrHelper> osr_helper,
- int start_source_position,
- JumpOptimizationInfo* jump_opt)
+CodeGenerator::CodeGenerator(
+ Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* code, CompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper, int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions)
: zone_(codegen_zone),
+ isolate_(isolate),
frame_access_state_(nullptr),
linkage_(linkage),
code_(code),
@@ -50,7 +54,7 @@ CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
- tasm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
+ tasm_(isolate, nullptr, 0, CodeObjectRequired::kNo),
resolver_(this),
safepoints_(zone()),
handlers_(zone()),
@@ -66,8 +70,8 @@ CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
osr_helper_(osr_helper),
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
- source_position_table_builder_(zone(),
- info->SourcePositionRecordingMode()),
+ source_position_table_builder_(info->SourcePositionRecordingMode()),
+ protected_instructions_(protected_instructions),
result_(kSuccess) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
@@ -75,9 +79,21 @@ CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
CreateFrameAccessState(frame);
CHECK_EQ(info->is_osr(), osr_helper_.has_value());
tasm_.set_jump_optimization_info(jump_opt);
+ Code::Kind code_kind = info_->code_kind();
+ if (code_kind == Code::JS_TO_WASM_FUNCTION ||
+ code_kind == Code::WASM_FUNCTION) {
+ tasm_.enable_serializer();
+ }
}
-Isolate* CodeGenerator::isolate() const { return info_->isolate(); }
+void CodeGenerator::AddProtectedInstructionLanding(uint32_t instr_offset,
+ uint32_t landing_offset) {
+ if (protected_instructions_ != nullptr) {
+ trap_handler::ProtectedInstructionData data = {instr_offset,
+ landing_offset};
+ protected_instructions_->emplace_back(data);
+ }
+}
void CodeGenerator::CreateFrameAccessState(Frame* frame) {
FinishFrame(frame);
@@ -274,21 +290,11 @@ void CodeGenerator::AssembleCode() {
result_ = kSuccess;
}
-Handle<Code> CodeGenerator::FinalizeCode() {
- if (result_ != kSuccess) return Handle<Code>();
-
- Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
- tasm(), unwinding_info_writer_.eh_frame_writer(), info(),
- Handle<Object>());
- result->set_is_turbofanned(true);
- result->set_stack_slots(frame()->GetTotalFrameSlotCount());
- result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
- Handle<ByteArray> source_positions =
- source_position_table_builder_.ToSourcePositionTable(
- isolate(), Handle<AbstractCode>::cast(result));
- result->set_source_position_table(*source_positions);
+Handle<ByteArray> CodeGenerator::GetSourcePositionTable() {
+ return source_position_table_builder_.ToSourcePositionTable(isolate());
+}
- // Emit exception handler table.
+MaybeHandle<HandlerTable> CodeGenerator::GetHandlerTable() const {
if (!handlers_.empty()) {
Handle<HandlerTable> table =
Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
@@ -298,10 +304,50 @@ Handle<Code> CodeGenerator::FinalizeCode() {
table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
}
- result->set_handler_table(*table);
+ return table;
+ }
+ return {};
+}
+
+Handle<Code> CodeGenerator::FinalizeCode() {
+ if (result_ != kSuccess) return Handle<Code>();
+
+ // Allocate exception handler table.
+ Handle<HandlerTable> table = HandlerTable::Empty(isolate());
+ if (!handlers_.empty()) {
+ table = Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
+ TENURED));
+ for (size_t i = 0; i < handlers_.size(); ++i) {
+ table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
+ table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
+ }
+ }
+
+ // Allocate the source position table.
+ Handle<ByteArray> source_positions =
+ source_position_table_builder_.ToSourcePositionTable(isolate());
+
+ // Allocate deoptimization data.
+ Handle<DeoptimizationData> deopt_data = GenerateDeoptimizationData();
+
+ // Allocate and install the code.
+ CodeDesc desc;
+ tasm()->GetCode(isolate(), &desc);
+ if (unwinding_info_writer_.eh_frame_writer()) {
+ unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
}
- PopulateDeoptimizationData(result);
+ Handle<Code> result = isolate()->factory()->NewCode(
+ desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
+ table, source_positions, deopt_data, kMovable, info()->stub_key(), true,
+ frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset());
+ isolate()->counters()->total_compiled_code_size()->Increment(
+ result->instruction_size());
+
+ LOG_CODE_EVENT(isolate(),
+ CodeLinePosInfoRecordEvent(result->instruction_start(),
+ *source_positions));
return result;
}
@@ -580,16 +626,16 @@ void CodeGenerator::AssembleGaps(Instruction* instr) {
namespace {
Handle<PodArray<InliningPosition>> CreateInliningPositions(
- CompilationInfo* info) {
+ CompilationInfo* info, Isolate* isolate) {
const CompilationInfo::InlinedFunctionList& inlined_functions =
info->inlined_functions();
if (inlined_functions.size() == 0) {
return Handle<PodArray<InliningPosition>>::cast(
- info->isolate()->factory()->empty_byte_array());
+ isolate->factory()->empty_byte_array());
}
Handle<PodArray<InliningPosition>> inl_positions =
PodArray<InliningPosition>::New(
- info->isolate(), static_cast<int>(inlined_functions.size()), TENURED);
+ isolate, static_cast<int>(inlined_functions.size()), TENURED);
for (size_t i = 0; i < inlined_functions.size(); ++i) {
inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
}
@@ -598,12 +644,14 @@ Handle<PodArray<InliningPosition>> CreateInliningPositions(
} // namespace
-void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
+Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
CompilationInfo* info = this->info();
int deopt_count = static_cast<int>(deoptimization_states_.size());
- if (deopt_count == 0 && !info->is_osr()) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), deopt_count, TENURED);
+ if (deopt_count == 0 && !info->is_osr()) {
+ return DeoptimizationData::Empty(isolate());
+ }
+ Handle<DeoptimizationData> data =
+ DeoptimizationData::New(isolate(), deopt_count, TENURED);
Handle<ByteArray> translation_array =
translations_.CreateByteArray(isolate()->factory());
@@ -627,7 +675,8 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
}
data->SetLiteralArray(*literals);
- Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info);
+ Handle<PodArray<InliningPosition>> inl_pos =
+ CreateInliningPositions(info, isolate());
data->SetInliningPositions(*inl_pos);
if (info->is_osr()) {
@@ -650,7 +699,7 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
}
- code_object->set_deoptimization_data(*data);
+ return data;
}
@@ -851,12 +900,6 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
bailout_id, shared_info_id, parameter_count);
break;
}
- case FrameStateType::kGetterStub:
- translation->BeginGetterStubFrame(shared_info_id);
- break;
- case FrameStateType::kSetterStub:
- translation->BeginSetterStubFrame(shared_info_id);
- break;
}
TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 94bcd5ef31..425ea2ebf2 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -14,13 +14,16 @@
#include "src/macro-assembler.h"
#include "src/safepoint-table.h"
#include "src/source-position-table.h"
-#include "src/trap-handler/trap-handler.h"
namespace v8 {
namespace internal {
class CompilationInfo;
+namespace trap_handler {
+struct ProtectedInstructionData;
+} // namespace trap_handler
+
namespace compiler {
// Forward declarations.
@@ -79,9 +82,11 @@ class CodeGenerator final : public GapResolver::Assembler {
public:
explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info,
- base::Optional<OsrHelper> osr_helper,
+ Isolate* isolate, base::Optional<OsrHelper> osr_helper,
int start_source_position,
- JumpOptimizationInfo* jump_opt);
+ JumpOptimizationInfo* jump_opt,
+ std::vector<trap_handler::ProtectedInstructionData>*
+ protected_instructions);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -89,14 +94,20 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleCode(); // Does not need to run on main thread.
Handle<Code> FinalizeCode();
+ Handle<ByteArray> GetSourcePositionTable();
+ MaybeHandle<HandlerTable> GetHandlerTable() const;
+
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
const Frame* frame() const { return frame_access_state_->frame(); }
- Isolate* isolate() const;
+ Isolate* isolate() const { return isolate_; }
Linkage* linkage() const { return linkage_; }
Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
+ void AddProtectedInstructionLanding(uint32_t instr_offset,
+ uint32_t landing_offset);
+
SourcePosition start_source_position() const {
return start_source_position_;
}
@@ -109,9 +120,10 @@ class CodeGenerator final : public GapResolver::Assembler {
int arguments, Safepoint::DeoptMode deopt_mode);
Zone* zone() const { return zone_; }
+ TurboAssembler* tasm() { return &tasm_; }
+ size_t GetSafepointTableOffset() const { return safepoints_.GetCodeOffset(); }
private:
- TurboAssembler* tasm() { return &tasm_; }
GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; }
CompilationInfo* info() const { return info_; }
@@ -252,7 +264,7 @@ class CodeGenerator final : public GapResolver::Assembler {
// ===========================================================================
void RecordCallPosition(Instruction* instr);
- void PopulateDeoptimizationData(Handle<Code> code);
+ Handle<DeoptimizationData> GenerateDeoptimizationData();
int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
size_t frame_state_offset);
@@ -314,6 +326,7 @@ class CodeGenerator final : public GapResolver::Assembler {
friend class CodeGeneratorTester;
Zone* zone_;
+ Isolate* isolate_;
FrameAccessState* frame_access_state_;
Linkage* const linkage_;
InstructionSequence* const code_;
@@ -353,6 +366,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_;
CodeGenResult result_;
};
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 1693e90ec2..f43ff7e515 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -297,11 +297,6 @@ RegionObservability RegionObservabilityOf(Operator const* op) {
return OpParameter<RegionObservability>(op);
}
-ZoneHandleSet<Map> MapGuardMapsOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kMapGuard, op->opcode());
- return OpParameter<ZoneHandleSet<Map>>(op);
-}
-
Type* TypeGuardTypeOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kTypeGuard, op->opcode());
return OpParameter<Type*>(op);
@@ -348,6 +343,8 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define COMMON_CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
+ V(DeadValue, Operator::kFoldable, 0, 0, 0, 1, 0, 0) \
+ V(Unreachable, Operator::kFoldable, 0, 1, 1, 0, 1, 0) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
@@ -1130,14 +1127,6 @@ const Operator* CommonOperatorBuilder::Phi(MachineRepresentation rep,
rep); // parameter
}
-const Operator* CommonOperatorBuilder::MapGuard(ZoneHandleSet<Map> maps) {
- return new (zone()) Operator1<ZoneHandleSet<Map>>( // --
- IrOpcode::kMapGuard, Operator::kEliminatable, // opcode
- "MapGuard", // name
- 1, 1, 1, 0, 1, 0, // counts
- maps); // parameter
-}
-
const Operator* CommonOperatorBuilder::TypeGuard(Type* type) {
return new (zone()) Operator1<Type*>( // --
IrOpcode::kTypeGuard, Operator::kPure, // opcode
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 4f72267617..06541d9a38 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -304,8 +304,6 @@ RegionObservability RegionObservabilityOf(Operator const*) WARN_UNUSED_RESULT;
std::ostream& operator<<(std::ostream& os,
const ZoneVector<MachineType>* types);
-ZoneHandleSet<Map> MapGuardMapsOf(Operator const*) WARN_UNUSED_RESULT;
-
Type* TypeGuardTypeOf(Operator const*) WARN_UNUSED_RESULT;
int OsrValueIndexOf(Operator const*);
@@ -348,6 +346,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
explicit CommonOperatorBuilder(Zone* zone);
const Operator* Dead();
+ const Operator* DeadValue();
+ const Operator* Unreachable();
const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
@@ -419,7 +419,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
const Operator* Retain();
- const Operator* MapGuard(ZoneHandleSet<Map> maps);
const Operator* TypeGuard(Type* type);
// Constructs a new merge or phi operator with the same opcode as {op}, but
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 10ec4eb042..d40bc37b6d 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -6,6 +6,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
@@ -14,14 +15,38 @@ namespace internal {
namespace compiler {
DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
- CommonOperatorBuilder* common)
+ CommonOperatorBuilder* common,
+ Zone* temp_zone)
: AdvancedReducer(editor),
graph_(graph),
common_(common),
- dead_(graph->NewNode(common->Dead())) {
+ dead_(graph->NewNode(common->Dead())),
+ dead_value_(graph->NewNode(common->DeadValue())),
+ zone_(temp_zone) {
NodeProperties::SetType(dead_, Type::None());
+ NodeProperties::SetType(dead_value_, Type::None());
}
+namespace {
+
+// True if we can guarantee that {node} will never actually produce a value or
+// effect.
+bool NoReturn(Node* node) {
+ return node->opcode() == IrOpcode::kDead ||
+ node->opcode() == IrOpcode::kUnreachable ||
+ node->opcode() == IrOpcode::kDeadValue ||
+ NodeProperties::GetTypeOrAny(node)->IsNone();
+}
+
+bool HasDeadInput(Node* node) {
+ for (Node* input : node->inputs()) {
+ if (NoReturn(input)) return true;
+ }
+ return false;
+}
+
+} // namespace
+
Reduction DeadCodeElimination::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kEnd:
@@ -31,12 +56,34 @@ Reduction DeadCodeElimination::Reduce(Node* node) {
return ReduceLoopOrMerge(node);
case IrOpcode::kLoopExit:
return ReduceLoopExit(node);
+ case IrOpcode::kUnreachable:
+ case IrOpcode::kIfException:
+ return ReduceUnreachableOrIfException(node);
+ case IrOpcode::kPhi:
+ return ReducePhi(node);
+ case IrOpcode::kEffectPhi:
+ return PropagateDeadControl(node);
+ case IrOpcode::kDeoptimize:
+ case IrOpcode::kReturn:
+ case IrOpcode::kTerminate:
+ return ReduceDeoptimizeOrReturnOrTerminate(node);
+ case IrOpcode::kThrow:
+ return PropagateDeadControl(node);
+ case IrOpcode::kBranch:
+ case IrOpcode::kSwitch:
+ return ReduceBranchOrSwitch(node);
default:
return ReduceNode(node);
}
UNREACHABLE();
}
+Reduction DeadCodeElimination::PropagateDeadControl(Node* node) {
+ DCHECK_EQ(1, node->op()->ControlInputCount());
+ Node* control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kDead) return Replace(control);
+ return NoChange();
+}
Reduction DeadCodeElimination::ReduceEnd(Node* node) {
DCHECK_EQ(IrOpcode::kEnd, node->opcode());
@@ -140,13 +187,101 @@ Reduction DeadCodeElimination::RemoveLoopExit(Node* node) {
}
Reduction DeadCodeElimination::ReduceNode(Node* node) {
- // If {node} has exactly one control input and this is {Dead},
- // replace {node} with {Dead}.
+ DCHECK(!IrOpcode::IsGraphTerminator(node->opcode()));
+ int const effect_input_count = node->op()->EffectInputCount();
int const control_input_count = node->op()->ControlInputCount();
- if (control_input_count == 0) return NoChange();
- DCHECK_EQ(1, control_input_count);
- Node* control = NodeProperties::GetControlInput(node);
- if (control->opcode() == IrOpcode::kDead) return Replace(control);
+ DCHECK_LE(control_input_count, 1);
+ if (control_input_count == 1) {
+ Reduction reduction = PropagateDeadControl(node);
+ if (reduction.Changed()) return reduction;
+ }
+ if (effect_input_count == 0 &&
+ (control_input_count == 0 || node->op()->ControlOutputCount() == 0)) {
+ return ReducePureNode(node);
+ }
+ if (effect_input_count > 0) {
+ return ReduceEffectNode(node);
+ }
+ return NoChange();
+}
+
+Reduction DeadCodeElimination::ReducePhi(Node* node) {
+ DCHECK_EQ(IrOpcode::kPhi, node->opcode());
+ Reduction reduction = PropagateDeadControl(node);
+ if (reduction.Changed()) return reduction;
+ if (PhiRepresentationOf(node->op()) == MachineRepresentation::kNone ||
+ NodeProperties::GetTypeOrAny(node)->IsNone()) {
+ return Replace(dead_value());
+ }
+ return NoChange();
+}
+
+Reduction DeadCodeElimination::ReducePureNode(Node* node) {
+ DCHECK_EQ(0, node->op()->EffectInputCount());
+ if (HasDeadInput(node)) {
+ return Replace(dead_value());
+ }
+ return NoChange();
+}
+
+Reduction DeadCodeElimination::ReduceUnreachableOrIfException(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kUnreachable ||
+ node->opcode() == IrOpcode::kIfException);
+ Reduction reduction = PropagateDeadControl(node);
+ if (reduction.Changed()) return reduction;
+ Node* effect = NodeProperties::GetEffectInput(node, 0);
+ if (effect->opcode() == IrOpcode::kDead) {
+ return Replace(effect);
+ }
+ if (effect->opcode() == IrOpcode::kUnreachable) {
+ RelaxEffectsAndControls(node);
+ return Replace(dead_value());
+ }
+ return NoChange();
+}
+
+Reduction DeadCodeElimination::ReduceEffectNode(Node* node) {
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ Node* effect = NodeProperties::GetEffectInput(node, 0);
+ if (effect->opcode() == IrOpcode::kDead) {
+ return Replace(effect);
+ }
+ if (HasDeadInput(node)) {
+ if (effect->opcode() == IrOpcode::kUnreachable) {
+ RelaxEffectsAndControls(node);
+ return Replace(dead_value());
+ }
+
+ Node* control = node->op()->ControlInputCount() == 1
+ ? NodeProperties::GetControlInput(node, 0)
+ : graph()->start();
+ Node* unreachable =
+ graph()->NewNode(common()->Unreachable(), effect, control);
+ ReplaceWithValue(node, dead_value(), node, control);
+ return Replace(unreachable);
+ }
+
+ return NoChange();
+}
+
+Reduction DeadCodeElimination::ReduceDeoptimizeOrReturnOrTerminate(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kDeoptimize ||
+ node->opcode() == IrOpcode::kReturn ||
+ node->opcode() == IrOpcode::kTerminate);
+ Reduction reduction = PropagateDeadControl(node);
+ if (reduction.Changed()) return reduction;
+ if (HasDeadInput(node)) {
+ Node* effect = NodeProperties::GetEffectInput(node, 0);
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ if (effect->opcode() != IrOpcode::kUnreachable) {
+ effect = graph()->NewNode(common()->Unreachable(), effect, control);
+ }
+ node->TrimInputCount(2);
+ node->ReplaceInput(0, effect);
+ node->ReplaceInput(1, control);
+ NodeProperties::ChangeOp(node, common()->Throw());
+ return Changed(node);
+ }
return NoChange();
}
@@ -160,6 +295,27 @@ Reduction DeadCodeElimination::ReduceLoopExit(Node* node) {
return NoChange();
}
+Reduction DeadCodeElimination::ReduceBranchOrSwitch(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kBranch ||
+ node->opcode() == IrOpcode::kSwitch);
+ Reduction reduction = PropagateDeadControl(node);
+ if (reduction.Changed()) return reduction;
+ Node* condition = NodeProperties::GetValueInput(node, 0);
+ if (condition->opcode() == IrOpcode::kDeadValue) {
+ // Branches or switches on {DeadValue} must originate from unreachable code
+ // and cannot matter. Due to schedule freedom between the effect and the
+ // control chain, they might still appear in reachable code. Remove them by
+ // always choosing the first projection.
+ size_t const projection_cnt = node->op()->ControlOutputCount();
+ Node** projections = zone_->NewArray<Node*>(projection_cnt);
+ NodeProperties::CollectControlProjections(node, projections,
+ projection_cnt);
+ Replace(projections[0], NodeProperties::GetControlInput(node));
+ return Replace(dead());
+ }
+ return NoChange();
+}
+
void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
const Operator* const op = common()->ResizeMergeOrPhi(node->op(), size);
node->TrimInputCount(OperatorProperties::GetTotalInputCount(op));
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index ede2daac25..b1e403ca86 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -16,16 +16,20 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
-
-// Propagates {Dead} control through the graph and thereby removes dead code.
-// Note that this does not include trimming dead uses from the graph, and it
-// also does not include detecting dead code by any other means than seeing a
-// {Dead} control input; that is left to other reducers.
+// Propagates {Dead} control and {DeadValue} values through the graph and
+// thereby removes dead code. When {DeadValue} hits the effect chain, a crashing
+// {Unreachable} node is inserted and the rest of the effect chain is collapsed.
+// We wait for the {EffectControlLinearizer} to connect {Unreachable} nodes to
+// the graph end, since this is much easier if there is no floating control.
+// We detect dead values based on types, pruning uses of DeadValue except for
+// uses by phi. These remaining uses are eliminated in the
+// {EffectControlLinearizer}, where they are replaced with dummy values.
+// In contrast to {DeadValue}, {Dead} can never remain in the graph.
class V8_EXPORT_PRIVATE DeadCodeElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
DeadCodeElimination(Editor* editor, Graph* graph,
- CommonOperatorBuilder* common);
+ CommonOperatorBuilder* common, Zone* temp_zone);
~DeadCodeElimination() final {}
const char* reducer_name() const override { return "DeadCodeElimination"; }
@@ -37,18 +41,28 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
Reduction ReduceLoopOrMerge(Node* node);
Reduction ReduceLoopExit(Node* node);
Reduction ReduceNode(Node* node);
+ Reduction ReducePhi(Node* node);
+ Reduction ReducePureNode(Node* node);
+ Reduction ReduceUnreachableOrIfException(Node* node);
+ Reduction ReduceEffectNode(Node* node);
+ Reduction ReduceDeoptimizeOrReturnOrTerminate(Node* node);
+ Reduction ReduceBranchOrSwitch(Node* node);
Reduction RemoveLoopExit(Node* node);
+ Reduction PropagateDeadControl(Node* node);
void TrimMergeOrPhi(Node* node, int size);
Graph* graph() const { return graph_; }
CommonOperatorBuilder* common() const { return common_; }
Node* dead() const { return dead_; }
+ Node* dead_value() const { return dead_value_; }
Graph* const graph_;
CommonOperatorBuilder* const common_;
Node* const dead_;
+ Node* const dead_value_;
+ Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
};
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index d886fda97a..2372a0fe40 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -21,10 +21,12 @@ namespace compiler {
EffectControlLinearizer::EffectControlLinearizer(
JSGraph* js_graph, Schedule* schedule, Zone* temp_zone,
- SourcePositionTable* source_positions)
+ SourcePositionTable* source_positions,
+ MaskArrayIndexEnable mask_array_index)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
+ mask_array_index_(mask_array_index),
source_positions_(source_positions),
graph_assembler_(js_graph, nullptr, nullptr, temp_zone),
frame_state_zapper_(nullptr) {}
@@ -76,8 +78,19 @@ struct PendingEffectPhi {
: effect_phi(effect_phi), block(block) {}
};
+void ConnectUnreachableToEnd(Node* effect, Node* control, JSGraph* jsgraph) {
+ Graph* graph = jsgraph->graph();
+ CommonOperatorBuilder* common = jsgraph->common();
+ if (effect->opcode() == IrOpcode::kDead) return;
+ if (effect->opcode() != IrOpcode::kUnreachable) {
+ effect = graph->NewNode(common->Unreachable(), effect, control);
+ }
+ Node* throw_node = graph->NewNode(common->Throw(), effect, control);
+ NodeProperties::MergeControlToEnd(graph, common, throw_node);
+}
+
void UpdateEffectPhi(Node* node, BasicBlock* block,
- BlockEffectControlMap* block_effects) {
+ BlockEffectControlMap* block_effects, JSGraph* jsgraph) {
// Update all inputs to an effect phi with the effects from the given
// block->effect map.
DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
@@ -88,8 +101,9 @@ void UpdateEffectPhi(Node* node, BasicBlock* block,
BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
const BlockEffectControlData& block_effect =
block_effects->For(predecessor, block);
- if (input != block_effect.current_effect) {
- node->ReplaceInput(i, block_effect.current_effect);
+ Node* effect = block_effect.current_effect;
+ if (input != effect) {
+ node->ReplaceInput(i, effect);
}
}
}
@@ -303,6 +317,29 @@ void TryCloneBranch(Node* node, BasicBlock* block, Zone* temp_zone,
cond->Kill();
merge->Kill();
}
+
+Node* DummyValue(JSGraph* jsgraph, MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedSigned:
+ return jsgraph->SmiConstant(0xdead);
+ case MachineRepresentation::kTaggedPointer:
+ return jsgraph->TheHoleConstant();
+ case MachineRepresentation::kWord64:
+ return jsgraph->Int64Constant(0xdead);
+ case MachineRepresentation::kWord32:
+ return jsgraph->Int32Constant(0xdead);
+ case MachineRepresentation::kFloat64:
+ return jsgraph->Float64Constant(0xdead);
+ case MachineRepresentation::kFloat32:
+ return jsgraph->Float32Constant(0xdead);
+ case MachineRepresentation::kBit:
+ return jsgraph->Int32Constant(0);
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace
void EffectControlLinearizer::Run() {
@@ -330,29 +367,32 @@ void EffectControlLinearizer::Run() {
instr++;
// Iterate over the phis and update the effect phis.
- Node* effect = nullptr;
+ Node* effect_phi = nullptr;
Node* terminate = nullptr;
+ int predecessor_count = static_cast<int>(block->PredecessorCount());
for (; instr < block->NodeCount(); instr++) {
Node* node = block->NodeAt(instr);
// Only go through the phis and effect phis.
if (node->opcode() == IrOpcode::kEffectPhi) {
// There should be at most one effect phi in a block.
- DCHECK_NULL(effect);
+ DCHECK_NULL(effect_phi);
// IfException blocks should not have effect phis.
DCHECK_NE(IrOpcode::kIfException, control->opcode());
- effect = node;
-
- // Make sure we update the inputs to the incoming blocks' effects.
- if (HasIncomingBackEdges(block)) {
- // In case of loops, we do not update the effect phi immediately
- // because the back predecessor has not been handled yet. We just
- // record the effect phi for later processing.
- pending_effect_phis.push_back(PendingEffectPhi(node, block));
- } else {
- UpdateEffectPhi(node, block, &block_effects);
- }
+ effect_phi = node;
} else if (node->opcode() == IrOpcode::kPhi) {
- // Just skip phis.
+ DCHECK_EQ(predecessor_count, node->op()->ValueInputCount());
+ for (int i = 0; i < predecessor_count; ++i) {
+ if (NodeProperties::GetValueInput(node, i)->opcode() ==
+ IrOpcode::kDeadValue) {
+ // Phi uses of {DeadValue} must originate from unreachable code. Due
+ // to schedule freedom between the effect and the control chain,
+ // they might still appear in reachable code. So we replace them
+ // with a dummy value.
+ NodeProperties::ReplaceValueInput(
+ node, DummyValue(jsgraph(), PhiRepresentationOf(node->op())),
+ i);
+ }
+ }
} else if (node->opcode() == IrOpcode::kTerminate) {
DCHECK_NULL(terminate);
terminate = node;
@@ -361,9 +401,28 @@ void EffectControlLinearizer::Run() {
}
}
+ if (effect_phi) {
+ // Make sure we update the inputs to the incoming blocks' effects.
+ if (HasIncomingBackEdges(block)) {
+ // In case of loops, we do not update the effect phi immediately
+ // because the back predecessor has not been handled yet. We just
+ // record the effect phi for later processing.
+ pending_effect_phis.push_back(PendingEffectPhi(effect_phi, block));
+ } else {
+ UpdateEffectPhi(effect_phi, block, &block_effects, jsgraph());
+ }
+ }
+
+ Node* effect = effect_phi;
if (effect == nullptr) {
// There was no effect phi.
- DCHECK(!HasIncomingBackEdges(block));
+
+ // Since a loop should have at least a StackCheck, only loops in
+ // unreachable code can have no effect phi.
+ DCHECK_IMPLIES(
+ HasIncomingBackEdges(block),
+ block_effects.For(block->PredecessorAt(0), block)
+ .current_effect->opcode() == IrOpcode::kUnreachable);
if (block == schedule()->start()) {
// Start block => effect is start.
DCHECK_EQ(graph()->start(), control);
@@ -376,11 +435,11 @@ void EffectControlLinearizer::Run() {
} else {
// If all the predecessors have the same effect, we can use it as our
// current effect.
- effect =
- block_effects.For(block->PredecessorAt(0), block).current_effect;
- for (size_t i = 1; i < block->PredecessorCount(); ++i) {
- if (block_effects.For(block->PredecessorAt(i), block)
- .current_effect != effect) {
+ for (size_t i = 0; i < block->PredecessorCount(); ++i) {
+ const BlockEffectControlData& data =
+ block_effects.For(block->PredecessorAt(i), block);
+ if (!effect) effect = data.current_effect;
+ if (data.current_effect != effect) {
effect = nullptr;
break;
}
@@ -399,7 +458,7 @@ void EffectControlLinearizer::Run() {
if (control->opcode() == IrOpcode::kLoop) {
pending_effect_phis.push_back(PendingEffectPhi(effect, block));
} else {
- UpdateEffectPhi(effect, block, &block_effects);
+ UpdateEffectPhi(effect, block, &block_effects, jsgraph());
}
} else if (control->opcode() == IrOpcode::kIfException) {
// The IfException is connected into the effect chain, so we need
@@ -476,14 +535,14 @@ void EffectControlLinearizer::Run() {
}
}
+ for (BasicBlock* pending_block_control : pending_block_controls) {
+ UpdateBlockControl(pending_block_control, &block_effects);
+ }
// Update the incoming edges of the effect phis that could not be processed
// during the first pass (because they could have incoming back edges).
for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
- &block_effects);
- }
- for (BasicBlock* pending_block_control : pending_block_controls) {
- UpdateBlockControl(pending_block_control, &block_effects);
+ &block_effects, jsgraph());
}
}
@@ -569,6 +628,13 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
if (node->op()->ControlOutputCount() > 0) {
*control = node;
}
+
+ // Break the effect chain on {Unreachable} and reconnect to the graph end.
+ // Mark the following code for deletion by connecting to the {Dead} node.
+ if (node->opcode() == IrOpcode::kUnreachable) {
+ ConnectUnreachableToEnd(*effect, *control, jsgraph());
+ *effect = *control = jsgraph()->Dead();
+ }
}
bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
@@ -626,8 +692,11 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckBounds:
result = LowerCheckBounds(node, frame_state);
break;
+ case IrOpcode::kMaskIndexWithBound:
+ result = LowerMaskIndexWithBound(node);
+ break;
case IrOpcode::kCheckMaps:
- result = LowerCheckMaps(node, frame_state);
+ LowerCheckMaps(node, frame_state);
break;
case IrOpcode::kCompareMaps:
result = LowerCompareMaps(node);
@@ -651,7 +720,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerCheckInternalizedString(node, frame_state);
break;
case IrOpcode::kCheckIf:
- result = LowerCheckIf(node, frame_state);
+ LowerCheckIf(node, frame_state);
break;
case IrOpcode::kCheckedInt32Add:
result = LowerCheckedInt32Add(node, frame_state);
@@ -715,6 +784,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kObjectIsArrayBufferView:
result = LowerObjectIsArrayBufferView(node);
break;
+ case IrOpcode::kObjectIsBigInt:
+ result = LowerObjectIsBigInt(node);
+ break;
case IrOpcode::kObjectIsCallable:
result = LowerObjectIsCallable(node);
break;
@@ -757,6 +829,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kArgumentsLength:
result = LowerArgumentsLength(node);
break;
+ case IrOpcode::kToBoolean:
+ result = LowerToBoolean(node);
+ break;
+ case IrOpcode::kTypeOf:
+ result = LowerTypeOf(node);
+ break;
+ case IrOpcode::kClassOf:
+ result = LowerClassOf(node);
+ break;
case IrOpcode::kNewDoubleElements:
result = LowerNewDoubleElements(node);
break;
@@ -769,6 +850,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kArrayBufferWasNeutered:
result = LowerArrayBufferWasNeutered(node);
break;
+ case IrOpcode::kSameValue:
+ result = LowerSameValue(node);
+ break;
case IrOpcode::kStringFromCharCode:
result = LowerStringFromCharCode(node);
break;
@@ -778,6 +862,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringIndexOf:
result = LowerStringIndexOf(node);
break;
+ case IrOpcode::kStringToNumber:
+ result = LowerStringToNumber(node);
+ break;
case IrOpcode::kStringCharAt:
result = LowerStringCharAt(node);
break;
@@ -811,6 +898,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kConvertTaggedHoleToUndefined:
result = LowerConvertTaggedHoleToUndefined(node);
break;
+ case IrOpcode::kCheckEqualsInternalizedString:
+ LowerCheckEqualsInternalizedString(node, frame_state);
+ break;
+ case IrOpcode::kAllocate:
+ result = LowerAllocate(node);
+ break;
+ case IrOpcode::kCheckEqualsSymbol:
+ LowerCheckEqualsSymbol(node, frame_state);
+ break;
case IrOpcode::kPlainPrimitiveToNumber:
result = LowerPlainPrimitiveToNumber(node);
break;
@@ -847,12 +943,21 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kFindOrderedHashMapEntryForInt32Key:
result = LowerFindOrderedHashMapEntryForInt32Key(node);
break;
+ case IrOpcode::kTransitionAndStoreNumberElement:
+ LowerTransitionAndStoreNumberElement(node);
+ break;
+ case IrOpcode::kTransitionAndStoreNonNumberElement:
+ LowerTransitionAndStoreNonNumberElement(node);
+ break;
case IrOpcode::kTransitionAndStoreElement:
LowerTransitionAndStoreElement(node);
break;
case IrOpcode::kRuntimeAbort:
LowerRuntimeAbort(node);
break;
+ case IrOpcode::kConvertReceiver:
+ result = LowerConvertReceiver(node);
+ break;
case IrOpcode::kFloat64RoundUp:
if (!LowerFloat64RoundUp(node).To(&result)) {
return false;
@@ -876,6 +981,14 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
default:
return false;
}
+
+ if ((result ? 1 : 0) != node->op()->ValueOutputCount()) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Effect control linearizer lowering of '%s':"
+ " value output count does not agree.",
+ node->op()->mnemonic());
+ }
+
*effect = gasm()->ExtractCurrentEffect();
*control = gasm()->ExtractCurrentControl();
NodeProperties::ReplaceUses(node, result, *effect, *control);
@@ -1018,44 +1131,40 @@ Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
return __ WordEqual(value, __ TrueConstant());
}
-Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
+void EffectControlLinearizer::TruncateTaggedPointerToBit(
+ Node* node, GraphAssemblerLabel<1>* done) {
Node* value = node->InputAt(0);
- auto if_smi = __ MakeDeferredLabel();
auto if_heapnumber = __ MakeDeferredLabel();
- auto done = __ MakeLabel(MachineRepresentation::kBit);
Node* zero = __ Int32Constant(0);
Node* fzero = __ Float64Constant(0.0);
// Check if {value} is false.
- __ GotoIf(__ WordEqual(value, __ FalseConstant()), &done, zero);
-
- // Check if {value} is a Smi.
- Node* check_smi = ObjectIsSmi(value);
- __ GotoIf(check_smi, &if_smi);
+ __ GotoIf(__ WordEqual(value, __ FalseConstant()), done, zero);
// Check if {value} is the empty string.
- __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), &done, zero);
+ __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), done, zero);
// Load the map of {value}.
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
// Check if the {value} is undetectable and immediately return false.
+ // This includes undefined and null.
Node* value_map_bitfield =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
__ GotoIfNot(
__ Word32Equal(__ Word32And(value_map_bitfield,
__ Int32Constant(1 << Map::kIsUndetectable)),
zero),
- &done, zero);
+ done, zero);
// Check if {value} is a HeapNumber.
__ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
&if_heapnumber);
// All other values that reach here are true.
- __ Goto(&done, __ Int32Constant(1));
+ __ Goto(done, __ Int32Constant(1));
__ Bind(&if_heapnumber);
{
@@ -1063,14 +1172,24 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
// NaN.
Node* value_value =
__ LoadField(AccessBuilder::ForHeapNumberValue(), value);
- __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
+ __ Goto(done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
}
+}
+
+Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+ auto if_smi = __ MakeDeferredLabel();
+
+ Node* value = node->InputAt(0);
+ __ GotoIf(ObjectIsSmi(value), &if_smi);
+
+ TruncateTaggedPointerToBit(node, &done);
__ Bind(&if_smi);
{
// If {value} is a Smi, then we only need to check that it's not zero.
- __ Goto(&done,
- __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), zero));
+ __ Goto(&done, __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)),
+ __ Int32Constant(0)));
}
__ Bind(&done);
@@ -1078,47 +1197,9 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
}
Node* EffectControlLinearizer::LowerTruncateTaggedPointerToBit(Node* node) {
- Node* value = node->InputAt(0);
-
- auto if_heapnumber = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kBit);
- Node* zero = __ Int32Constant(0);
- Node* fzero = __ Float64Constant(0.0);
-
- // Check if {value} is false.
- __ GotoIf(__ WordEqual(value, __ FalseConstant()), &done, zero);
-
- // Check if {value} is the empty string.
- __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), &done, zero);
-
- // Load the map of {value}.
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-
- // Check if the {value} is undetectable and immediately return false.
- Node* value_map_bitfield =
- __ LoadField(AccessBuilder::ForMapBitField(), value_map);
- __ GotoIfNot(
- __ Word32Equal(__ Word32And(value_map_bitfield,
- __ Int32Constant(1 << Map::kIsUndetectable)),
- zero),
- &done, zero);
-
- // Check if {value} is a HeapNumber.
- __ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
- &if_heapnumber);
-
- // All other values that reach here are true.
- __ Goto(&done, __ Int32Constant(1));
-
- __ Bind(&if_heapnumber);
- {
- // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
- // NaN.
- Node* value_value =
- __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
- __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
- }
+ TruncateTaggedPointerToBit(node, &done);
__ Bind(&done);
return done.PhiAt(0);
@@ -1219,7 +1300,20 @@ Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
return index;
}
-Node* EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
+Node* EffectControlLinearizer::LowerMaskIndexWithBound(Node* node) {
+ Node* index = node->InputAt(0);
+ if (mask_array_index_ == kMaskArrayIndex) {
+ Node* limit = node->InputAt(1);
+
+ Node* mask = __ Word32Sar(__ Word32Or(__ Int32Sub(limit, index), index),
+ __ Int32Constant(31));
+ mask = __ Word32Xor(mask, __ Int32Constant(-1));
+ index = __ Word32And(index, mask);
+ }
+ return index;
+}
+
+void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
Node* value = node->InputAt(0);
@@ -1304,11 +1398,10 @@ Node* EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
__ Goto(&done);
__ Bind(&done);
}
- return value;
}
Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
- ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
+ ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op()).maps();
size_t const map_count = maps.size();
Node* value = node->InputAt(0);
@@ -1396,16 +1489,12 @@ Node* EffectControlLinearizer::LowerCheckSeqString(Node* node,
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* is_string = __ Uint32LessThan(value_instance_type,
- __ Uint32Constant(FIRST_NONSTRING_TYPE));
- Node* is_sequential =
- __ Word32Equal(__ Word32And(value_instance_type,
- __ Int32Constant(kStringRepresentationMask)),
- __ Int32Constant(kSeqStringTag));
- Node* is_sequential_string = __ Word32And(is_string, is_sequential);
-
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, is_sequential_string,
- frame_state);
+ Node* check = __ Word32Equal(
+ __ Word32And(
+ value_instance_type,
+ __ Int32Constant(kStringRepresentationMask | kIsNotStringMask)),
+ __ Int32Constant(kSeqStringTag | kStringTag));
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
return value;
}
@@ -1426,11 +1515,10 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
return value;
}
-Node* EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
+void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- __ DeoptimizeIfNot(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason, value,
- frame_state);
- return value;
+ __ DeoptimizeIfNot(DeoptimizeKind::kEager, DeoptimizeReasonOf(node->op()),
+ value, frame_state);
}
Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
@@ -1885,6 +1973,13 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerAllocate(Node* node) {
+ Node* size = node->InputAt(0);
+ PretenureFlag pretenure = PretenureFlagOf(node->op());
+ Node* new_node = __ Allocate(pretenure, size);
+ return new_node;
+}
+
Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
Node* value = node->InputAt(0);
@@ -1910,6 +2005,28 @@ Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerObjectIsBigInt(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_smi = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* vfalse =
+ __ Word32Equal(value_instance_type, __ Uint32Constant(BIGINT_TYPE));
+ __ Goto(&done, vfalse);
+
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
Node* value = node->InputAt(0);
@@ -2179,6 +2296,41 @@ Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
+ Node* obj = node->InputAt(0);
+ Callable const callable = Builtins::CallableFor(isolate(), Builtins::kTypeof);
+ Operator::Properties const properties = Operator::kEliminatable;
+ CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), obj,
+ __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerClassOf(Node* node) {
+ Node* obj = node->InputAt(0);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kClassOf);
+ Operator::Properties const properties = Operator::kEliminatable;
+ CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), obj,
+ __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
+ Node* obj = node->InputAt(0);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ Operator::Properties const properties = Operator::kEliminatable;
+ CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), obj,
+ __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
int formal_parameter_count = FormalParameterCountOf(node->op());
@@ -2372,6 +2524,33 @@ Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
__ Int32Constant(0));
}
+Node* EffectControlLinearizer::LowerSameValue(Node* node) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kSameValue);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+ __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
+ Node* string = node->InputAt(0);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringToNumber);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), string,
+ __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
@@ -2723,6 +2902,87 @@ Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
return done.PhiAt(0);
}
+void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
+ Node* node, Node* frame_state) {
+ Node* exp = node->InputAt(0);
+ Node* val = node->InputAt(1);
+
+ auto if_same = __ MakeLabel();
+ auto if_notsame = __ MakeDeferredLabel();
+ auto if_thinstring = __ MakeLabel();
+ auto if_notthinstring = __ MakeLabel();
+
+ // Check if {exp} and {val} are the same, which is the likely case.
+ __ Branch(__ WordEqual(exp, val), &if_same, &if_notsame);
+
+ __ Bind(&if_notsame);
+ {
+ // Now {val} could still be a non-internalized String that matches {exp}.
+ __ DeoptimizeIf(DeoptimizeReason::kWrongName, ObjectIsSmi(val),
+ frame_state);
+ Node* val_map = __ LoadField(AccessBuilder::ForMap(), val);
+ Node* val_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), val_map);
+
+ // Check for the common case of ThinString first.
+ __ GotoIf(__ Word32Equal(val_instance_type,
+ __ Int32Constant(THIN_ONE_BYTE_STRING_TYPE)),
+ &if_thinstring);
+ __ Branch(
+ __ Word32Equal(val_instance_type, __ Int32Constant(THIN_STRING_TYPE)),
+ &if_thinstring, &if_notthinstring);
+
+ __ Bind(&if_notthinstring);
+ {
+ // Check that the {val} is a non-internalized String, if it's anything
+ // else it cannot match the recorded feedback {exp} anyways.
+ __ DeoptimizeIfNot(
+ DeoptimizeReason::kWrongName,
+ __ Word32Equal(__ Word32And(val_instance_type,
+ __ Int32Constant(kIsNotStringMask |
+ kIsNotInternalizedMask)),
+ __ Int32Constant(kStringTag | kNotInternalizedTag)),
+ frame_state);
+
+ // Try to find the {val} in the string table.
+ MachineSignature::Builder builder(graph()->zone(), 1, 1);
+ builder.AddReturn(MachineType::AnyTagged());
+ builder.AddParam(MachineType::AnyTagged());
+ Node* try_internalize_string_function = __ ExternalConstant(
+ ExternalReference::try_internalize_string_function(isolate()));
+ CallDescriptor const* const desc =
+ Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
+ Node* val_internalized =
+ __ Call(common()->Call(desc), try_internalize_string_function, val);
+
+ // Now see if the results match.
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName,
+ __ WordEqual(exp, val_internalized), frame_state);
+ __ Goto(&if_same);
+ }
+
+ __ Bind(&if_thinstring);
+ {
+ // The {val} is a ThinString, let's check the actual value.
+ Node* val_actual =
+ __ LoadField(AccessBuilder::ForThinStringActual(), val);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName,
+ __ WordEqual(exp, val_actual), frame_state);
+ __ Goto(&if_same);
+ }
+ }
+
+ __ Bind(&if_same);
+}
+
+void EffectControlLinearizer::LowerCheckEqualsSymbol(Node* node,
+ Node* frame_state) {
+ Node* exp = node->InputAt(0);
+ Node* val = node->InputAt(1);
+ Node* check = __ WordEqual(exp, val);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, check, frame_state);
+}
+
Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(HeapNumber::kSize));
__ StoreField(AccessBuilder::ForMap(), result, __ HeapNumberMapConstant());
@@ -3275,22 +3535,152 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
__ Bind(&done);
}
-void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
+void EffectControlLinearizer::LowerTransitionAndStoreNumberElement(Node* node) {
+ Node* array = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2); // This is a Float64, not tagged.
+
+ // Possibly transition array based on input and store.
+ //
+ // -- TRANSITION PHASE -----------------
+ // kind = ElementsKind(array)
+ // if kind == HOLEY_SMI_ELEMENTS {
+ // Transition array to HOLEY_DOUBLE_ELEMENTS
+ // } else if kind != HOLEY_DOUBLE_ELEMENTS {
+ // This is UNREACHABLE, execute a debug break.
+ // }
+ //
+ // -- STORE PHASE ----------------------
+ // Store array[index] = value (it's a float)
+ //
+ Node* map = __ LoadField(AccessBuilder::ForMap(), array);
+ Node* kind;
+ {
+ Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
+ Node* mask = __ Int32Constant(Map::ElementsKindBits::kMask);
+ Node* andit = __ Word32And(bit_field2, mask);
+ Node* shift = __ Int32Constant(Map::ElementsKindBits::kShift);
+ kind = __ Word32Shr(andit, shift);
+ }
+
+ auto do_store = __ MakeLabel();
+
+ // {value} is a float64.
+ auto transition_smi_array = __ MakeDeferredLabel();
+ {
+ __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
+ &transition_smi_array);
+ // We expect that our input array started at HOLEY_SMI_ELEMENTS, and
+ // climbs the lattice up to HOLEY_DOUBLE_ELEMENTS. Force a debug break
+ // if this assumption is broken. It also would be the case that
+ // loop peeling can break this assumption.
+ __ GotoIf(__ Word32Equal(kind, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
+ &do_store);
+ // TODO(turbofan): It would be good to have an "Unreachable()" node type.
+ __ DebugBreak();
+ __ Goto(&do_store);
+ }
+
+ __ Bind(&transition_smi_array); // deferred code.
+ {
+ // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS.
+ TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
+ HOLEY_DOUBLE_ELEMENTS);
+ __ Goto(&do_store);
+ }
+
+ __ Bind(&do_store);
+
+ Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
+ __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements, index,
+ value);
+}
+
+void EffectControlLinearizer::LowerTransitionAndStoreNonNumberElement(
+ Node* node) {
Node* array = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
+ // Possibly transition array based on input and store.
+ //
+ // -- TRANSITION PHASE -----------------
+ // kind = ElementsKind(array)
+ // if kind == HOLEY_SMI_ELEMENTS {
+ // Transition array to HOLEY_ELEMENTS
+ // } else if kind == HOLEY_DOUBLE_ELEMENTS {
+ // Transition array to HOLEY_ELEMENTS
+ // }
+ //
+ // -- STORE PHASE ----------------------
+ // // kind is HOLEY_ELEMENTS
+ // Store array[index] = value
+ //
+ Node* map = __ LoadField(AccessBuilder::ForMap(), array);
+ Node* kind;
+ {
+ Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
+ Node* mask = __ Int32Constant(Map::ElementsKindBits::kMask);
+ Node* andit = __ Word32And(bit_field2, mask);
+ Node* shift = __ Int32Constant(Map::ElementsKindBits::kShift);
+ kind = __ Word32Shr(andit, shift);
+ }
+
+ auto do_store = __ MakeLabel();
+
+ auto transition_smi_array = __ MakeDeferredLabel();
+ auto transition_double_to_fast = __ MakeDeferredLabel();
+ {
+ __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
+ &transition_smi_array);
+ __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
+ &transition_double_to_fast);
+ __ Goto(&do_store);
+ }
+
+ __ Bind(&transition_smi_array); // deferred code.
+ {
+ // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_ELEMENTS.
+ TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
+ __ Goto(&do_store);
+ }
+
+ __ Bind(&transition_double_to_fast); // deferred code.
+ {
+ TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
+ __ Goto(&do_store);
+ }
+
+ __ Bind(&do_store);
+
+ Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
+ // Our ElementsKind is HOLEY_ELEMENTS.
+ ElementAccess access = AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS);
+ Type* value_type = ValueTypeParameterOf(node->op());
+ if (value_type->Is(Type::BooleanOrNullOrUndefined())) {
+ access.type = value_type;
+ access.write_barrier_kind = kNoWriteBarrier;
+ }
+ __ StoreElement(access, elements, index, value);
+}
+
+void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
+ Node* array = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2); // int32
+
// Store a signed small in an output array.
//
// kind = ElementsKind(array)
//
// -- STORE PHASE ----------------------
// if kind == HOLEY_DOUBLE_ELEMENTS {
- // float_value = convert smi to float
+ // float_value = convert int32 to float
// Store array[index] = float_value
// } else {
// // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
- // Store array[index] = value
+ // smi_value = convert int32 to smi
+ // Store array[index] = smi_value
// }
//
Node* map = __ LoadField(AccessBuilder::ForMap(), array);
@@ -3316,14 +3706,14 @@ void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
access.type = Type::SignedSmall();
access.machine_type = MachineType::TaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
- __ StoreElement(access, elements, index, value);
+ Node* smi_value = ChangeInt32ToSmi(value);
+ __ StoreElement(access, elements, index, smi_value);
__ Goto(&done);
}
__ Bind(&if_kind_is_double);
{
// Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
- Node* int_value = ChangeSmiToInt32(value);
- Node* float_value = __ ChangeInt32ToFloat64(int_value);
+ Node* float_value = __ ChangeInt32ToFloat64(value);
__ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
index, float_value);
__ Goto(&done);
@@ -3343,6 +3733,93 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
__ Int32Constant(1), __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
+ ConvertReceiverMode const mode = ConvertReceiverModeOf(node->op());
+ Node* value = node->InputAt(0);
+ Node* global_proxy = node->InputAt(1);
+
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined: {
+ return global_proxy;
+ }
+ case ConvertReceiverMode::kNotNullOrUndefined: {
+ auto convert_to_object = __ MakeDeferredLabel();
+ auto done_convert = __ MakeLabel(MachineRepresentation::kTagged);
+
+ // Check if {value} is already a JSReceiver.
+ __ GotoIf(ObjectIsSmi(value), &convert_to_object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* check = __ Uint32LessThan(
+ value_instance_type, __ Uint32Constant(FIRST_JS_RECEIVER_TYPE));
+ __ GotoIf(check, &convert_to_object);
+ __ Goto(&done_convert, value);
+
+ // Wrap the primitive {value} into a JSValue.
+ __ Bind(&convert_to_object);
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ properties);
+ Node* native_context = __ LoadField(
+ AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
+ Node* result = __ Call(desc, __ HeapConstant(callable.code()), value,
+ native_context);
+ __ Goto(&done_convert, result);
+
+ __ Bind(&done_convert);
+ return done_convert.PhiAt(0);
+ }
+ case ConvertReceiverMode::kAny: {
+ auto convert_to_object = __ MakeDeferredLabel();
+ auto convert_global_proxy = __ MakeDeferredLabel();
+ auto done_convert = __ MakeLabel(MachineRepresentation::kTagged);
+
+ // Check if {value} is already a JSReceiver, or null/undefined.
+ __ GotoIf(ObjectIsSmi(value), &convert_to_object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* check = __ Uint32LessThan(
+ value_instance_type, __ Uint32Constant(FIRST_JS_RECEIVER_TYPE));
+ __ GotoIf(check, &convert_to_object);
+ __ Goto(&done_convert, value);
+
+ // Wrap the primitive {value} into a JSValue.
+ __ Bind(&convert_to_object);
+ __ GotoIf(__ WordEqual(value, __ UndefinedConstant()),
+ &convert_global_proxy);
+ __ GotoIf(__ WordEqual(value, __ NullConstant()), &convert_global_proxy);
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ properties);
+ Node* native_context = __ LoadField(
+ AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
+ Node* result = __ Call(desc, __ HeapConstant(callable.code()), value,
+ native_context);
+ __ Goto(&done_convert, result);
+
+ // Replace the {value} with the {global_proxy}.
+ __ Bind(&convert_global_proxy);
+ __ Goto(&done_convert, global_proxy);
+
+ __ Bind(&done_convert);
+ return done_convert.PhiAt(0);
+ }
+ }
+
+ UNREACHABLE();
+ return nullptr;
+}
+
Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundUp().IsSupported()) {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index e17f097e9e..7cf6910386 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -30,8 +30,11 @@ class SourcePositionTable;
class V8_EXPORT_PRIVATE EffectControlLinearizer {
public:
+ enum MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
+
EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
- SourcePositionTable* source_positions);
+ SourcePositionTable* source_positions,
+ MaskArrayIndexEnable mask_array_index);
void Run();
@@ -53,15 +56,16 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
Node* LowerCheckBounds(Node* node, Node* frame_state);
+ Node* LowerMaskIndexWithBound(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
- Node* LowerCheckMaps(Node* node, Node* frame_state);
+ void LowerCheckMaps(Node* node, Node* frame_state);
Node* LowerCompareMaps(Node* node);
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
Node* LowerCheckSeqString(Node* node, Node* frame_state);
Node* LowerCheckSymbol(Node* node, Node* frame_state);
- Node* LowerCheckIf(Node* node, Node* frame_state);
+ void LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
@@ -79,12 +83,15 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
Node* LowerChangeTaggedToFloat64(Node* node);
+ void TruncateTaggedPointerToBit(Node* node, GraphAssemblerLabel<1>* done);
Node* LowerTruncateTaggedToBit(Node* node);
Node* LowerTruncateTaggedPointerToBit(Node* node);
Node* LowerTruncateTaggedToFloat64(Node* node);
Node* LowerTruncateTaggedToWord32(Node* node);
Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
+ Node* LowerAllocate(Node* node);
Node* LowerObjectIsArrayBufferView(Node* node);
+ Node* LowerObjectIsBigInt(Node* node);
Node* LowerObjectIsCallable(Node* node);
Node* LowerObjectIsConstructor(Node* node);
Node* LowerObjectIsDetectableCallable(Node* node);
@@ -103,6 +110,8 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerNewSmiOrObjectElements(Node* node);
Node* LowerNewArgumentsElements(Node* node);
Node* LowerArrayBufferWasNeutered(Node* node);
+ Node* LowerSameValue(Node* node);
+ Node* LowerStringToNumber(Node* node);
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
Node* LowerSeqStringCharCodeAt(Node* node);
@@ -117,6 +126,11 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
Node* LowerConvertTaggedHoleToUndefined(Node* node);
+ void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
+ void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
+ Node* LowerTypeOf(Node* node);
+ Node* LowerClassOf(Node* node);
+ Node* LowerToBoolean(Node* node);
Node* LowerPlainPrimitiveToNumber(Node* node);
Node* LowerPlainPrimitiveToWord32(Node* node);
Node* LowerPlainPrimitiveToFloat64(Node* node);
@@ -130,7 +144,10 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerFindOrderedHashMapEntry(Node* node);
Node* LowerFindOrderedHashMapEntryForInt32Key(Node* node);
void LowerTransitionAndStoreElement(Node* node);
+ void LowerTransitionAndStoreNumberElement(Node* node);
+ void LowerTransitionAndStoreNonNumberElement(Node* node);
void LowerRuntimeAbort(Node* node);
+ Node* LowerConvertReceiver(Node* node);
// Lowering of optional operators.
Maybe<Node*> LowerFloat64RoundUp(Node* node);
@@ -177,6 +194,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
+ MaskArrayIndexEnable mask_array_index_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
GraphAssembler graph_assembler_;
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index ab2b06a952..b3b1abb6df 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -503,7 +503,7 @@ Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
DCHECK(op->opcode() == IrOpcode::kLoadElement ||
op->opcode() == IrOpcode::kStoreElement);
Type* index_type = NodeProperties::GetType(index_node);
- if (!index_type->Is(Type::Number())) return Nothing<int>();
+ if (!index_type->Is(Type::OrderedNumber())) return Nothing<int>();
double max = index_type->Max();
double min = index_type->Min();
int index = static_cast<int>(min);
@@ -651,8 +651,8 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
// types (which might confuse representation selection). We get
// around this by refusing to constant-fold and escape-analyze
// if the type is not inhabited.
- if (NodeProperties::GetType(left)->IsInhabited() &&
- NodeProperties::GetType(right)->IsInhabited()) {
+ if (!NodeProperties::GetType(left)->IsNone() &&
+ !NodeProperties::GetType(right)->IsNone()) {
current->SetReplacement(replacement);
} else {
current->SetEscaped(left);
@@ -673,8 +673,8 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
if (map) {
Type* const map_type = NodeProperties::GetType(map);
if (map_type->IsHeapConstant() &&
- params.maps().contains(ZoneHandleSet<Map>(bit_cast<Handle<Map>>(
- map_type->AsHeapConstant()->Value())))) {
+ params.maps().contains(
+ bit_cast<Handle<Map>>(map_type->AsHeapConstant()->Value()))) {
current->MarkForDeletion();
break;
}
@@ -697,7 +697,7 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
current->Get(map_field).To(&object_map)) {
if (object_map) {
current->SetReplacement(LowerCompareMapsWithoutLoad(
- object_map, CompareMapsParametersOf(op), jsgraph));
+ object_map, CompareMapsParametersOf(op).maps(), jsgraph));
break;
} else {
// If the variable has no value, we have not reached the fixed-point
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 5629acc9ca..0a0e3ec868 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -63,12 +63,6 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kJavaScriptBuiltinContinuation:
os << "JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME";
break;
- case FrameStateType::kGetterStub:
- os << "GETTER_STUB";
- break;
- case FrameStateType::kSetterStub:
- os << "SETTER_STUB";
- break;
}
return os;
}
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 4e25fa026b..ac00f8c129 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -63,8 +63,6 @@ enum class FrameStateType {
kInterpretedFunction, // Represents an InterpretedFrame.
kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
kConstructStub, // Represents a ConstructStubFrame.
- kGetterStub, // Represents a GetterStubFrame.
- kSetterStub, // Represents a SetterStubFrame.
kBuiltinContinuation, // Represents a continuation to a stub.
kJavaScriptBuiltinContinuation // Represents a continuation to a JavaScipt
// builtin.
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index b99f4da060..50001976a9 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -97,8 +97,8 @@ Node* GraphAssembler::Projection(int index, Node* value) {
}
Node* GraphAssembler::Allocate(PretenureFlag pretenure, Node* size) {
- return current_effect_ =
- graph()->NewNode(simplified()->Allocate(Type::Any(), NOT_TENURED),
+ return current_control_ = current_effect_ =
+ graph()->NewNode(simplified()->AllocateRaw(Type::Any(), pretenure),
size, current_effect_, current_control_);
}
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index dac2bc52bd..3d3c2ed103 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -40,6 +40,7 @@ namespace compiler {
V(Word32Xor) \
V(Word32Shr) \
V(Word32Shl) \
+ V(Word32Sar) \
V(IntAdd) \
V(IntSub) \
V(IntMul) \
@@ -74,6 +75,7 @@ namespace compiler {
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
V(TrueConstant) \
V(FalseConstant) \
+ V(NullConstant) \
V(HeapNumberMapConstant) \
V(NoContextConstant) \
V(EmptyStringConstant) \
@@ -284,6 +286,9 @@ void GraphAssembler::MergeState(GraphAssemblerLabel<sizeof...(Vars)>* label,
current_control_);
label->effect_ = graph()->NewNode(common()->EffectPhi(2), current_effect_,
current_effect_, label->control_);
+ Node* terminate = graph()->NewNode(common()->Terminate(), label->effect_,
+ label->control_);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
for (size_t i = 0; i < sizeof...(vars); i++) {
label->bindings_[i] = graph()->NewNode(
common()->Phi(label->representations_[i], 2), var_array[i + 1],
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index dcb8184e55..8e9505bae1 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -226,7 +226,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
void Generate() final {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(MemOperand(esp, 0), input_);
- __ SlowTruncateToIDelayed(zone_, result_, esp, 0);
+ __ SlowTruncateToIDelayed(zone_, result_);
__ add(esp, Immediate(kDoubleSize));
}
@@ -282,14 +282,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-#ifdef V8_CSA_WRITE_BARRIER
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
-#else
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
-#endif
}
private:
@@ -850,7 +844,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack, scratch_count);
+ scratch3, scratch_count);
__ pop(scratch3);
__ pop(scratch2);
__ pop(scratch1);
@@ -925,7 +919,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
-// the first set of flags ({kKindSpecificFlags1Offset});
+// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -937,8 +931,9 @@ void CodeGenerator::BailoutIfDeoptimized() {
// to use a call and then use a pop, thus pushing the return address to
// the stack and then popping it into the register.
__ pop(ecx);
- int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
- __ test(Operand(ecx, offset),
+ int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
+ __ mov(ecx, Operand(ecx, offset));
+ __ test(FieldOperand(ecx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
@@ -965,6 +960,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallWasmFunction: {
+ if (HasImmediateInput(instr, 0)) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ if (info()->IsWasm()) {
+ __ wasm_call(wasm_code, RelocInfo::WASM_CALL);
+ } else {
+ __ call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
+ } else {
+ Register reg = i.InputRegister(0);
+ __ call(reg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
@@ -983,6 +995,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+ case kArchTailCallWasm: {
+ if (HasImmediateInput(instr, 0)) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ if (info()->IsWasm()) {
+ __ jmp(wasm_code, RelocInfo::WASM_CALL);
+ } else {
+ __ jmp(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
+ } else {
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
@@ -1987,6 +2016,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4ReplaceLane: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
@@ -2000,8 +2030,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
- Register ireg = Register::from_code(dst.code());
- if (src.is_reg(ireg)) {
+ if (src.is_reg(dst)) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Psignd(dst, kScratchDoubleReg);
} else {
@@ -2011,6 +2040,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4Shl: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pslld(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
@@ -2021,6 +2051,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4ShrS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psrad(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
@@ -2031,6 +2062,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
@@ -2041,6 +2073,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
@@ -2051,6 +2084,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmulld(i.OutputSimd128Register(), i.InputOperand(1));
break;
@@ -2062,6 +2096,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4MinS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsd(i.OutputSimd128Register(), i.InputOperand(1));
break;
@@ -2073,6 +2108,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4MaxS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsd(i.OutputSimd128Register(), i.InputOperand(1));
break;
@@ -2084,6 +2120,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
@@ -2094,6 +2131,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
@@ -2109,6 +2147,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4GtS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpgtd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
@@ -2119,6 +2158,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4GeS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
@@ -2135,6 +2175,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4ShrU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
@@ -2145,6 +2186,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4MinU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminud(i.OutputSimd128Register(), i.InputOperand(1));
break;
@@ -2156,6 +2198,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4MaxU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxud(i.OutputSimd128Register(), i.InputOperand(1));
break;
@@ -2167,6 +2210,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4GtU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
@@ -2188,6 +2232,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI32x4GeU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
@@ -2217,6 +2262,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI16x8ReplaceLane: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
@@ -2226,6 +2272,267 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(2), i.InputInt8(1));
break;
}
+ case kIA32I16x8Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignw(dst, kScratchDoubleReg);
+ } else {
+ __ Pxor(dst, dst);
+ __ Psubw(dst, src);
+ }
+ break;
+ }
+ case kSSEI16x8Shl: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psllw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kAVXI16x8Shl: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kSSEI16x8ShrS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psraw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kAVXI16x8ShrS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kSSEI16x8Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8Add: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpaddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8AddSaturateS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddsw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8AddSaturateS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpaddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8Sub: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8SubSaturateS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubsw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8SubSaturateS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pmullw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8Mul: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8MinS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pminsw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8MinS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8MaxS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pmaxsw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8MaxS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8Eq: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kAVXI16x8Ne: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kSSEI16x8GtS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pcmpgtw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8GtS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8GeS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(1);
+ __ pminsw(dst, src);
+ __ pcmpeqw(dst, src);
+ break;
+ }
+ case kAVXI16x8GeS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src1 = i.InputSimd128Register(0);
+ Operand src2 = i.InputOperand(1);
+ __ vpminsw(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqw(i.OutputSimd128Register(), kScratchDoubleReg, src2);
+ break;
+ }
+ case kSSEI16x8ShrU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kAVXI16x8ShrU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kSSEI16x8AddSaturateU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddusw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8AddSaturateU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpaddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8SubSaturateU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubusw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8SubSaturateU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8MinU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pminuw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8MinU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8MaxU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pmaxuw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8MaxU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI16x8GtU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(1);
+ __ pmaxuw(dst, src);
+ __ pcmpeqw(dst, src);
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kAVXI16x8GtU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src1 = i.InputSimd128Register(0);
+ Operand src2 = i.InputOperand(1);
+ __ vpmaxuw(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqw(dst, kScratchDoubleReg, src2);
+ __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpxor(dst, dst, kScratchDoubleReg);
+ break;
+ }
+ case kSSEI16x8GeU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(1);
+ __ pminuw(dst, src);
+ __ pcmpeqw(dst, src);
+ break;
+ }
+ case kAVXI16x8GeU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src1 = i.InputSimd128Register(0);
+ Operand src2 = i.InputOperand(1);
+ __ vpminuw(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqw(i.OutputSimd128Register(), kScratchDoubleReg, src2);
+ break;
+ }
case kIA32I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -2240,6 +2547,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI8x16ReplaceLane: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
@@ -2250,6 +2558,222 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(2), i.InputInt8(1));
break;
}
+ case kIA32I8x16Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignb(dst, kScratchDoubleReg);
+ } else {
+ __ Pxor(dst, dst);
+ __ Psubb(dst, src);
+ }
+ break;
+ }
+ case kSSEI8x16Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16Add: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpaddb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16AddSaturateS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddsb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16AddSaturateS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpaddsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16Sub: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsubb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16SubSaturateS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubsb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16SubSaturateS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsubsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16MinS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pminsb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16MinS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16MaxS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pmaxsb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16MaxS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16Eq: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
+ __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kAVXI8x16Ne: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ __ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kSSEI8x16GtS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pcmpgtb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16GtS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16GeS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(1);
+ __ pminsb(dst, src);
+ __ pcmpeqb(dst, src);
+ break;
+ }
+ case kAVXI8x16GeS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src1 = i.InputSimd128Register(0);
+ Operand src2 = i.InputOperand(1);
+ __ vpminsb(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
+ break;
+ }
+ case kSSEI8x16AddSaturateU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddusb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16AddSaturateU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpaddusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16SubSaturateU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubusb(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16SubSaturateU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsubusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16MinU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pminub(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16MinU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16MaxU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pmaxub(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI8x16MaxU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEI8x16GtU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(1);
+ __ pmaxub(dst, src);
+ __ pcmpeqb(dst, src);
+ __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kAVXI8x16GtU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src1 = i.InputSimd128Register(0);
+ Operand src2 = i.InputOperand(1);
+ __ vpmaxub(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqb(dst, kScratchDoubleReg, src2);
+ __ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpxor(dst, dst, kScratchDoubleReg);
+ break;
+ }
+ case kSSEI8x16GeU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(1);
+ __ pminub(dst, src);
+ __ pcmpeqb(dst, src);
+ break;
+ }
+ case kAVXI8x16GeU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src1 = i.InputSimd128Register(0);
+ Operand src2 = i.InputOperand(1);
+ __ vpminub(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
@@ -2810,16 +3334,18 @@ void CodeGenerator::AssembleConstructFrame() {
__ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
- __ sub(esp, Immediate(shrink_slots * kPointerSize));
+
+ // Skip callee-saved slots, which are pushed below.
+ shrink_slots -= base::bits::CountPopulation(saves);
+ if (shrink_slots > 0) {
+ __ sub(esp, Immediate(shrink_slots * kPointerSize));
+ }
}
if (saves != 0) { // Save callee-saved registers.
DCHECK(!info()->is_osr());
- int pushed = 0;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ push(Register::from_code(i));
- ++pushed;
+ if (((1 << i) & saves)) __ push(Register::from_code(i));
}
}
}
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index d23161f414..b9bf261022 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -152,10 +152,84 @@ namespace compiler {
V(IA32I16x8ExtractLane) \
V(SSEI16x8ReplaceLane) \
V(AVXI16x8ReplaceLane) \
+ V(IA32I16x8Neg) \
+ V(SSEI16x8Shl) \
+ V(AVXI16x8Shl) \
+ V(SSEI16x8ShrS) \
+ V(AVXI16x8ShrS) \
+ V(SSEI16x8Add) \
+ V(AVXI16x8Add) \
+ V(SSEI16x8AddSaturateS) \
+ V(AVXI16x8AddSaturateS) \
+ V(SSEI16x8Sub) \
+ V(AVXI16x8Sub) \
+ V(SSEI16x8SubSaturateS) \
+ V(AVXI16x8SubSaturateS) \
+ V(SSEI16x8Mul) \
+ V(AVXI16x8Mul) \
+ V(SSEI16x8MinS) \
+ V(AVXI16x8MinS) \
+ V(SSEI16x8MaxS) \
+ V(AVXI16x8MaxS) \
+ V(SSEI16x8Eq) \
+ V(AVXI16x8Eq) \
+ V(SSEI16x8Ne) \
+ V(AVXI16x8Ne) \
+ V(SSEI16x8GtS) \
+ V(AVXI16x8GtS) \
+ V(SSEI16x8GeS) \
+ V(AVXI16x8GeS) \
+ V(SSEI16x8ShrU) \
+ V(AVXI16x8ShrU) \
+ V(SSEI16x8AddSaturateU) \
+ V(AVXI16x8AddSaturateU) \
+ V(SSEI16x8SubSaturateU) \
+ V(AVXI16x8SubSaturateU) \
+ V(SSEI16x8MinU) \
+ V(AVXI16x8MinU) \
+ V(SSEI16x8MaxU) \
+ V(AVXI16x8MaxU) \
+ V(SSEI16x8GtU) \
+ V(AVXI16x8GtU) \
+ V(SSEI16x8GeU) \
+ V(AVXI16x8GeU) \
V(IA32I8x16Splat) \
V(IA32I8x16ExtractLane) \
V(SSEI8x16ReplaceLane) \
- V(AVXI8x16ReplaceLane)
+ V(AVXI8x16ReplaceLane) \
+ V(IA32I8x16Neg) \
+ V(SSEI8x16Add) \
+ V(AVXI8x16Add) \
+ V(SSEI8x16AddSaturateS) \
+ V(AVXI8x16AddSaturateS) \
+ V(SSEI8x16Sub) \
+ V(AVXI8x16Sub) \
+ V(SSEI8x16SubSaturateS) \
+ V(AVXI8x16SubSaturateS) \
+ V(SSEI8x16MinS) \
+ V(AVXI8x16MinS) \
+ V(SSEI8x16MaxS) \
+ V(AVXI8x16MaxS) \
+ V(SSEI8x16Eq) \
+ V(AVXI8x16Eq) \
+ V(SSEI8x16Ne) \
+ V(AVXI8x16Ne) \
+ V(SSEI8x16GtS) \
+ V(AVXI8x16GtS) \
+ V(SSEI8x16GeS) \
+ V(AVXI8x16GeS) \
+ V(SSEI8x16AddSaturateU) \
+ V(AVXI8x16AddSaturateU) \
+ V(SSEI8x16SubSaturateU) \
+ V(AVXI8x16SubSaturateU) \
+ V(SSEI8x16MinU) \
+ V(AVXI8x16MinU) \
+ V(SSEI8x16MaxU) \
+ V(AVXI8x16MaxU) \
+ V(SSEI8x16GtU) \
+ V(AVXI8x16GtU) \
+ V(SSEI8x16GeU) \
+ V(AVXI8x16GeU)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 538e4c220f..83c60e4455 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -138,10 +138,84 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8ExtractLane:
case kSSEI16x8ReplaceLane:
case kAVXI16x8ReplaceLane:
+ case kIA32I16x8Neg:
+ case kSSEI16x8Shl:
+ case kAVXI16x8Shl:
+ case kSSEI16x8ShrS:
+ case kAVXI16x8ShrS:
+ case kSSEI16x8Add:
+ case kAVXI16x8Add:
+ case kSSEI16x8AddSaturateS:
+ case kAVXI16x8AddSaturateS:
+ case kSSEI16x8Sub:
+ case kAVXI16x8Sub:
+ case kSSEI16x8SubSaturateS:
+ case kAVXI16x8SubSaturateS:
+ case kSSEI16x8Mul:
+ case kAVXI16x8Mul:
+ case kSSEI16x8MinS:
+ case kAVXI16x8MinS:
+ case kSSEI16x8MaxS:
+ case kAVXI16x8MaxS:
+ case kSSEI16x8Eq:
+ case kAVXI16x8Eq:
+ case kSSEI16x8Ne:
+ case kAVXI16x8Ne:
+ case kSSEI16x8GtS:
+ case kAVXI16x8GtS:
+ case kSSEI16x8GeS:
+ case kAVXI16x8GeS:
+ case kSSEI16x8ShrU:
+ case kAVXI16x8ShrU:
+ case kSSEI16x8AddSaturateU:
+ case kAVXI16x8AddSaturateU:
+ case kSSEI16x8SubSaturateU:
+ case kAVXI16x8SubSaturateU:
+ case kSSEI16x8MinU:
+ case kAVXI16x8MinU:
+ case kSSEI16x8MaxU:
+ case kAVXI16x8MaxU:
+ case kSSEI16x8GtU:
+ case kAVXI16x8GtU:
+ case kSSEI16x8GeU:
+ case kAVXI16x8GeU:
case kIA32I8x16Splat:
case kIA32I8x16ExtractLane:
case kSSEI8x16ReplaceLane:
case kAVXI8x16ReplaceLane:
+ case kIA32I8x16Neg:
+ case kSSEI8x16Add:
+ case kAVXI8x16Add:
+ case kSSEI8x16AddSaturateS:
+ case kAVXI8x16AddSaturateS:
+ case kSSEI8x16Sub:
+ case kAVXI8x16Sub:
+ case kSSEI8x16SubSaturateS:
+ case kAVXI8x16SubSaturateS:
+ case kSSEI8x16MinS:
+ case kAVXI8x16MinS:
+ case kSSEI8x16MaxS:
+ case kAVXI8x16MaxS:
+ case kSSEI8x16Eq:
+ case kAVXI8x16Eq:
+ case kSSEI8x16Ne:
+ case kAVXI8x16Ne:
+ case kSSEI8x16GtS:
+ case kAVXI8x16GtS:
+ case kSSEI8x16GeS:
+ case kAVXI8x16GeS:
+ case kSSEI8x16AddSaturateU:
+ case kAVXI8x16AddSaturateU:
+ case kSSEI8x16SubSaturateU:
+ case kAVXI8x16SubSaturateU:
+ case kSSEI8x16MinU:
+ case kAVXI8x16MinU:
+ case kSSEI8x16MaxU:
+ case kAVXI8x16MaxU:
+ case kSSEI8x16GtU:
+ case kAVXI8x16GtU:
+ case kSSEI8x16GeU:
+ case kAVXI8x16GeU:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index eb7a7d7cd5..bae563d7b6 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -46,8 +46,7 @@ class IA32OperandGenerator final : public OperandGenerator {
case kIA32Sub:
case kIA32Cmp:
case kIA32Test:
- return rep == MachineRepresentation::kWord32 ||
- rep == MachineRepresentation::kTagged;
+ return rep == MachineRepresentation::kWord32 || IsAnyTagged(rep);
case kIA32Cmp16:
case kIA32Test16:
return rep == MachineRepresentation::kWord16;
@@ -1916,14 +1915,53 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4MinU) \
V(I32x4MaxU) \
V(I32x4GtU) \
- V(I32x4GeU)
-
-#define SIMD_UNOP_LIST(V) V(I32x4Neg)
+ V(I32x4GeU) \
+ V(I16x8Add) \
+ V(I16x8AddSaturateS) \
+ V(I16x8Sub) \
+ V(I16x8SubSaturateS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8AddSaturateU) \
+ V(I16x8SubSaturateU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
+ V(I8x16Add) \
+ V(I8x16AddSaturateS) \
+ V(I8x16Sub) \
+ V(I8x16SubSaturateS) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16AddSaturateU) \
+ V(I8x16SubSaturateU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
+ V(I8x16GtU) \
+ V(I8x16GeU)
+
+#define SIMD_UNOP_LIST(V) \
+ V(I32x4Neg) \
+ V(I16x8Neg) \
+ V(I8x16Neg)
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
V(I32x4ShrS) \
- V(I32x4ShrU)
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU)
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index a02ad5d99a..f5457ee562 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -51,6 +51,8 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchRestoreCallerRegisters) \
V(ArchCallCFunction) \
V(ArchPrepareTailCall) \
+ V(ArchCallWasmFunction) \
+ V(ArchTailCallWasm) \
V(ArchJmp) \
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index 666fa60d95..b1164767f2 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -296,11 +296,13 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchCallCFunction:
case kArchCallCodeObject:
case kArchCallJSFunction:
+ case kArchCallWasmFunction:
return kHasSideEffect;
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
case kArchTailCallAddress:
+ case kArchTailCallWasm:
return kHasSideEffect | kIsBlockTerminator;
case kArchDeoptimize:
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 4e07049de7..d19692e3dd 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -690,6 +690,7 @@ struct CallBuffer {
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
CallBufferFlags flags,
+ bool is_tail_call,
int stack_param_delta) {
OperandGenerator g(this);
DCHECK_LE(call->op()->ValueOutputCount(),
@@ -759,6 +760,14 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
? g.UseImmediate(callee)
: g.UseRegister(callee));
break;
+ case CallDescriptor::kCallWasmFunction:
+ buffer->instruction_args.push_back(
+ (call_address_immediate &&
+ (callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
+ callee->opcode() == IrOpcode::kRelocatableInt32Constant))
+ ? g.UseImmediate(callee)
+ : g.UseRegister(callee));
+ break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
@@ -779,7 +788,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// If it was a syntactic tail call we need to drop the current frame and
// all the frames on top of it that are either an arguments adaptor frame
// or a tail caller frame.
- if (buffer->descriptor->SupportsTailCalls()) {
+ if (is_tail_call) {
frame_state = NodeProperties::GetFrameStateInput(frame_state);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
@@ -999,11 +1008,15 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
if (sw.min_value > value) sw.min_value = value;
if (sw.max_value < value) sw.max_value = value;
}
- DCHECK_LE(sw.min_value, sw.max_value);
- // Note that {value_range} can be 0 if {min_value} is -2^31 and
- // {max_value} is 2^31-1, so don't assume that it's non-zero below.
- sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
- bit_cast<uint32_t>(sw.min_value);
+ if (sw.case_count != 0) {
+ DCHECK_LE(sw.min_value, sw.max_value);
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and
+ // {max_value} is 2^31-1, so don't assume that it's non-zero below.
+ sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
+ bit_cast<uint32_t>(sw.min_value);
+ } else {
+ sw.value_range = 0;
+ }
return VisitSwitch(input, sw);
}
case BasicBlock::kReturn: {
@@ -1120,6 +1133,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
return;
+ case IrOpcode::kUnreachable:
+ VisitUnreachable(node);
+ return;
case IrOpcode::kComment:
VisitComment(node);
return;
@@ -2221,12 +2237,7 @@ void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
@@ -2245,7 +2256,8 @@ void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2254,7 +2266,7 @@ void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
@@ -2279,7 +2291,8 @@ void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
@@ -2298,7 +2311,7 @@ void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
@@ -2309,7 +2322,8 @@ void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
@@ -2339,7 +2353,7 @@ void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
@@ -2364,7 +2378,8 @@ void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
@@ -2383,7 +2398,7 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2399,7 +2414,12 @@ void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
@@ -2550,7 +2570,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Improve constant pool and the heuristics in the register allocator
// for where to emit constants.
CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
- InitializeCallBuffer(node, &buffer, call_buffer_flags);
+ InitializeCallBuffer(node, &buffer, call_buffer_flags, false);
EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
@@ -2585,6 +2605,9 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
+ case CallDescriptor::kCallWasmFunction:
+ opcode = kArchCallWasmFunction | MiscField::encode(flags);
+ break;
}
// Emit the call instruction.
@@ -2612,7 +2635,6 @@ void InstructionSelector::VisitCallWithCallerSavedRegisters(
void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);
CallDescriptor const* descriptor = CallDescriptorOf(node->op());
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
CallDescriptor* caller = linkage()->GetIncomingDescriptor();
DCHECK(caller->CanTailCall(node));
@@ -2625,7 +2647,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
if (IsTailCallAddressImmediate()) {
flags |= kCallAddressImmediate;
}
- InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
+ InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
@@ -2651,6 +2673,9 @@ void InstructionSelector::VisitTailCall(Node* node) {
case CallDescriptor::kCallAddress:
opcode = kArchTailCallAddress;
break;
+ case CallDescriptor::kCallWasmFunction:
+ opcode = kArchTailCallWasm;
+ break;
default:
UNREACHABLE();
return;
@@ -2764,6 +2789,11 @@ void InstructionSelector::VisitDebugBreak(Node* node) {
Emit(kArchDebugBreak, g.NoOutput());
}
+void InstructionSelector::VisitUnreachable(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchDebugBreak, g.NoOutput());
+}
+
void InstructionSelector::VisitComment(Node* node) {
OperandGenerator g(this);
InstructionOperand operand(g.UseImmediate(node));
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index ce99e480fc..2bd85d7dab 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -285,7 +285,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
// {call_code_immediate} to generate immediate operands to calls of code.
// {call_address_immediate} to generate immediate operands to address calls.
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
- CallBufferFlags flags, int stack_slot_delta = 0);
+ CallBufferFlags flags, bool is_tail_call,
+ int stack_slot_delta = 0);
bool IsTailCallAddressImmediate();
int GetTempsCountForTailCallFromJSFunction();
@@ -348,6 +349,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
+ void VisitUnreachable(Node* node);
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* descriptor, Node* node);
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 72a8b0b06f..b0f6661274 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -901,7 +901,8 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsTailCall() const {
return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
- arch_opcode() == ArchOpcode::kArchTailCallAddress;
+ arch_opcode() == ArchOpcode::kArchTailCallAddress ||
+ arch_opcode() == ArchOpcode::kArchTailCallWasm;
}
bool IsThrow() const {
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 4710f35dcc..042d9e0ef7 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -75,7 +75,24 @@ void Int64Lowering::LowerGraph() {
namespace {
-static int GetParameterIndexAfterLowering(
+int GetReturnIndexAfterLowering(
+ CallDescriptor* descriptor, int old_index) {
+ int result = old_index;
+ for (int i = 0; i < old_index; i++) {
+ if (descriptor->GetReturnType(i).representation() ==
+ MachineRepresentation::kWord64) {
+ result++;
+ }
+ }
+ return result;
+}
+
+int GetReturnCountAfterLowering(CallDescriptor* descriptor) {
+ return GetReturnIndexAfterLowering(
+ descriptor, static_cast<int>(descriptor->ReturnCount()));
+}
+
+int GetParameterIndexAfterLowering(
Signature<MachineRepresentation>* signature, int old_index) {
int result = old_index;
for (int i = 0; i < old_index; i++) {
@@ -276,13 +293,12 @@ void Int64Lowering::LowerNode(Node* node) {
++new_index;
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
- Node* high_node = nullptr;
if (signature()->GetParam(old_index) ==
MachineRepresentation::kWord64) {
- high_node = graph()->NewNode(common()->Parameter(new_index + 1),
- graph()->start());
+ Node* high_node = graph()->NewNode(common()->Parameter(new_index + 1),
+ graph()->start());
+ ReplaceNode(node, node, high_node);
}
- ReplaceNode(node, node, high_node);
}
break;
}
@@ -297,26 +313,64 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
- case IrOpcode::kCall: {
- // TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
+ case IrOpcode::kTailCall: {
CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
(descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Int64())) {
+ // Tail calls do not have return values, so adjusting the call
+ // descriptor is enough.
+ auto new_descriptor = GetI32WasmCallDescriptor(zone(), descriptor);
+ NodeProperties::ChangeOp(node, common()->TailCall(new_descriptor));
+ }
+ break;
+ }
+ case IrOpcode::kCall: {
+ CallDescriptor* descriptor =
+ const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
+ bool returns_require_lowering =
+ GetReturnCountAfterLowering(descriptor) !=
+ static_cast<int>(descriptor->ReturnCount());
+ if (DefaultLowering(node) || returns_require_lowering) {
// We have to adjust the call descriptor.
- const Operator* op =
- common()->Call(GetI32WasmCallDescriptor(zone(), descriptor));
- NodeProperties::ChangeOp(node, op);
+ NodeProperties::ChangeOp(
+ node, common()->Call(GetI32WasmCallDescriptor(zone(), descriptor)));
}
- if (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Int64()) {
- // We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ if (returns_require_lowering) {
+ size_t return_arity = descriptor->ReturnCount();
+ if (return_arity == 1) {
+ // We access the additional return values through projections.
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
+ ReplaceNode(node, low_node, high_node);
+ } else {
+ ZoneVector<Node*> projections(return_arity, zone());
+ NodeProperties::CollectValueProjections(node, projections.data(),
+ return_arity);
+ for (size_t old_index = 0, new_index = 0; old_index < return_arity;
+ ++old_index, ++new_index) {
+ Node* use_node = projections[old_index];
+ DCHECK_EQ(ProjectionIndexOf(use_node->op()), old_index);
+ DCHECK_EQ(GetReturnIndexAfterLowering(descriptor,
+ static_cast<int>(old_index)),
+ static_cast<int>(new_index));
+ if (new_index != old_index) {
+ NodeProperties::ChangeOp(
+ use_node, common()->Projection(new_index));
+ }
+ if (descriptor->GetReturnType(old_index).representation() ==
+ MachineRepresentation::kWord64) {
+ Node* high_node = graph()->NewNode(
+ common()->Projection(new_index + 1), node,
+ graph()->start());
+ ReplaceNode(use_node, use_node, high_node);
+ ++new_index;
+ }
+ }
+ }
}
break;
}
@@ -801,18 +855,6 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
- case IrOpcode::kProjection: {
- Node* call = node->InputAt(0);
- DCHECK_EQ(IrOpcode::kCall, call->opcode());
- CallDescriptor* descriptor =
- const_cast<CallDescriptor*>(CallDescriptorOf(call->op()));
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == MachineType::Int64()) {
- UNREACHABLE(); // TODO(titzer): implement multiple i64 returns.
- }
- }
- break;
- }
case IrOpcode::kWord64ReverseBytes: {
Node* input = node->InputAt(0);
ReplaceNode(node, graph()->NewNode(machine()->Word32ReverseBytes().op(),
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 597587f80b..df6fdba3f0 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -9,6 +9,7 @@
#include "src/code-factory.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/allocation-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -148,7 +149,7 @@ bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
IsFastElementsKind(receiver_map->elements_kind()) &&
!receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
(!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
- isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+ isolate->IsNoElementsProtectorIntact() &&
isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
!IsReadOnlyLengthDescriptor(receiver_map);
}
@@ -174,11 +175,12 @@ bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
if (!current->map()->is_stable()) return false;
}
- // For holey Arrays, ensure that the array_protector cell is valid (must be
- // a CompilationDependency), and the JSArray prototype has not been altered.
+ // For holey Arrays, ensure that the no_elements_protector cell is valid (must
+ // be a CompilationDependency), and the JSArray prototype has not been
+ // altered.
return receiver_map->instance_type() == JS_ARRAY_TYPE &&
(!receiver_map->is_dictionary_map() || receiver_map->is_stable()) &&
- isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+ isolate->IsNoElementsProtectorIntact() &&
isolate->IsAnyInitialArrayPrototype(receiver_prototype);
}
@@ -231,8 +233,9 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
Node* check = effect = graph()->NewNode(
simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
check = graph()->NewNode(simplified()->BooleanNot(), check);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered),
+ check, effect, control);
}
}
@@ -292,35 +295,20 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
Handle<Map> map(Map::cast(native_context()->get(map_index)), isolate());
- // allocate new iterator
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- Node* value = effect = graph()->NewNode(
- simplified()->Allocate(Type::OtherObject(), NOT_TENURED),
- jsgraph()->Constant(JSArrayIterator::kSize), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- value, jsgraph()->Constant(map), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectPropertiesOrHash()),
- value, jsgraph()->EmptyFixedArrayConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
- jsgraph()->EmptyFixedArrayConstant(), effect, control);
-
- // attach the iterator to this object
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
- value, receiver, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex()), value,
- jsgraph()->ZeroConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObjectMap()),
- value, object_map, effect, control);
-
- value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
-
- // replace it
+ // Allocate new iterator and attach the iterator to this object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSArrayIterator::kSize, NOT_TENURED, Type::OtherObject());
+ a.Store(AccessBuilder::ForMap(), map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSArrayIteratorObject(), receiver);
+ a.Store(AccessBuilder::ForJSArrayIteratorIndex(), jsgraph()->ZeroConstant());
+ a.Store(AccessBuilder::ForJSArrayIteratorObjectMap(), object_map);
+ Node* value = effect = a.Finish();
+
+ // Replace it.
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -343,12 +331,12 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
iterator_map->instance_type());
if (IsHoleyElementsKind(elements_kind)) {
- if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ if (!isolate()->IsNoElementsProtectorIntact()) {
return NoChange();
} else {
Handle<JSObject> initial_array_prototype(
native_context()->initial_array_prototype(), isolate());
- dependencies()->AssumePropertyCell(factory()->array_protector());
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
}
}
@@ -400,8 +388,9 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
iterator, etrue1, if_true1);
Node* check_map = graph()->NewNode(simplified()->ReferenceEqual(),
array_map, orig_map);
- etrue1 = graph()->NewNode(simplified()->CheckIf(), check_map, etrue1,
- if_true1);
+ etrue1 =
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongMap),
+ check_map, etrue1, if_true1);
}
if (kind != IterationKind::kKeys) {
@@ -540,8 +529,9 @@ Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
Node* check1 = efalse0 = graph()->NewNode(
simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
- efalse0 =
- graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered),
+ check1, efalse0, if_false0);
}
Node* length = efalse0 = graph()->NewNode(
@@ -919,7 +909,7 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
- dependencies()->AssumePropertyCell(factory()->array_protector());
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
dependencies()->AssumePrototypeMapsStable(receiver_map);
// Load the "length" property of the {receiver}.
@@ -1023,7 +1013,7 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
- dependencies()->AssumePropertyCell(factory()->array_protector());
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
dependencies()->AssumePrototypeMapsStable(receiver_map);
// If the {receiver_maps} information is not reliable, we need
@@ -1139,7 +1129,7 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
- dependencies()->AssumePropertyCell(factory()->array_protector());
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
dependencies()->AssumePrototypeMapsStable(receiver_map);
// Load length of the {receiver}.
@@ -1194,6 +1184,8 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
Node* eloop =
graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
Node* index = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2),
jsgraph()->OneConstant(),
@@ -1263,9 +1255,10 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
Node* argc =
jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(desc), stub_code, receiver, argc,
- target, jsgraph()->UndefinedConstant(), entry,
- argc, context, frame_state, efalse1, if_false1);
+ graph()->NewNode(common()->Call(desc), stub_code, receiver,
+ jsgraph()->PaddingConstant(), argc, target,
+ jsgraph()->UndefinedConstant(), entry, argc,
+ context, frame_state, efalse1, if_false1);
}
if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
@@ -1338,27 +1331,17 @@ Reduction JSBuiltinReducer::ReduceCollectionIterator(
receiver, effect, control);
// Create the JSCollectionIterator result.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- Node* value = effect = graph()->NewNode(
- simplified()->Allocate(Type::OtherObject(), NOT_TENURED),
- jsgraph()->Constant(JSCollectionIterator::kSize), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), value,
- jsgraph()->Constant(collection_iterator_map), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectPropertiesOrHash()),
- value, jsgraph()->EmptyFixedArrayConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
- jsgraph()->EmptyFixedArrayConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorTable()),
- value, table, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorIndex()),
- value, jsgraph()->ZeroConstant(), effect, control);
- value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSCollectionIterator::kSize, NOT_TENURED, Type::OtherObject());
+ a.Store(AccessBuilder::ForMap(), collection_iterator_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSCollectionIteratorTable(), table);
+ a.Store(AccessBuilder::ForJSCollectionIteratorIndex(),
+ jsgraph()->ZeroConstant());
+ Node* value = effect = a.Finish();
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -1386,7 +1369,7 @@ Reduction JSBuiltinReducer::ReduceCollectionSize(
}
Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
- Node* node, int entry_size,
+ Node* node, int entry_size, Handle<HeapObject> empty_collection,
InstanceType collection_iterator_instance_type_first,
InstanceType collection_iterator_instance_type_last) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -1434,6 +1417,8 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
graph()->NewNode(common()->Loop(2), control, control);
Node* eloop = effect =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
// Check if reached the final table of the {receiver}.
Node* table = effect = graph()->NewNode(
@@ -1522,6 +1507,8 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
Node* loop = graph()->NewNode(common()->Loop(2), control, control);
Node* eloop =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
Node* iloop = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop);
NodeProperties::SetType(iloop, type_cache_.kFixedArrayLengthType);
@@ -1538,9 +1525,8 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
efalse0 = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForJSCollectionIteratorTable()),
- receiver,
- jsgraph()->HeapConstant(factory()->empty_ordered_hash_table()),
- efalse0, if_false0);
+ receiver, jsgraph()->HeapConstant(empty_collection), efalse0,
+ if_false0);
controls[0] = if_false0;
effects[0] = efalse0;
@@ -1681,114 +1667,6 @@ Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
return NoChange();
}
-// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
-Reduction JSBuiltinReducer::ReduceFunctionBind(Node* node) {
- // Value inputs to the {node} are as follows:
- //
- // - target, which is Function.prototype.bind JSFunction
- // - receiver, which is the [[BoundTargetFunction]]
- // - bound_this (optional), which is the [[BoundThis]]
- // - and all the remaining value inouts are [[BoundArguments]]
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Type* receiver_type = NodeProperties::GetType(receiver);
- Node* bound_this = (node->op()->ValueInputCount() < 3)
- ? jsgraph()->UndefinedConstant()
- : NodeProperties::GetValueInput(node, 2);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- if (receiver_type->IsHeapConstant() &&
- receiver_type->AsHeapConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> target_function =
- Handle<JSFunction>::cast(receiver_type->AsHeapConstant()->Value());
-
- // Check that the "length" property on the {target_function} is the
- // default JSFunction accessor.
- LookupIterator length_lookup(target_function, factory()->length_string(),
- target_function, LookupIterator::OWN);
- if (length_lookup.state() != LookupIterator::ACCESSOR ||
- !length_lookup.GetAccessors()->IsAccessorInfo()) {
- return NoChange();
- }
-
- // Check that the "name" property on the {target_function} is the
- // default JSFunction accessor.
- LookupIterator name_lookup(target_function, factory()->name_string(),
- target_function, LookupIterator::OWN);
- if (name_lookup.state() != LookupIterator::ACCESSOR ||
- !name_lookup.GetAccessors()->IsAccessorInfo()) {
- return NoChange();
- }
-
- // Determine the prototype of the {target_function}.
- Handle<Object> prototype(target_function->map()->prototype(), isolate());
-
- // Setup the map for the JSBoundFunction instance.
- Handle<Map> map = target_function->IsConstructor()
- ? isolate()->bound_function_with_constructor_map()
- : isolate()->bound_function_without_constructor_map();
- if (map->prototype() != *prototype) {
- map = Map::TransitionToPrototype(map, prototype);
- }
- DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
-
- // Create the [[BoundArguments]] for the result.
- Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
- if (node->op()->ValueInputCount() > 3) {
- int const length = node->op()->ValueInputCount() - 3;
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- bound_arguments = effect = graph()->NewNode(
- simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
- jsgraph()->Constant(FixedArray::SizeFor(length)), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), bound_arguments,
- jsgraph()->FixedArrayMapConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForFixedArrayLength()),
- bound_arguments, jsgraph()->Constant(length), effect, control);
- for (int i = 0; i < length; ++i) {
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)),
- bound_arguments, NodeProperties::GetValueInput(node, 3 + i), effect,
- control);
- }
- bound_arguments = effect =
- graph()->NewNode(common()->FinishRegion(), bound_arguments, effect);
- }
-
- // Create the JSBoundFunction result.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- Node* value = effect = graph()->NewNode(
- simplified()->Allocate(Type::BoundFunction(), NOT_TENURED),
- jsgraph()->Constant(JSBoundFunction::kSize), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- value, jsgraph()->Constant(map), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectPropertiesOrHash()),
- value, jsgraph()->EmptyFixedArrayConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
- jsgraph()->EmptyFixedArrayConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSBoundFunctionBoundTargetFunction()),
- value, receiver, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSBoundFunctionBoundThis()),
- value, bound_this, effect, control);
- effect =
- graph()->NewNode(simplified()->StoreField(
- AccessBuilder::ForJSBoundFunctionBoundArguments()),
- value, bound_arguments, effect, control);
- value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
// ES6 section 18.2.2 isFinite ( number )
Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
JSCallReduction r(node);
@@ -2379,73 +2257,51 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* prototype = NodeProperties::GetValueInput(node, 2);
Type* prototype_type = NodeProperties::GetType(prototype);
- Handle<Map> instance_map;
if (!prototype_type->IsHeapConstant()) return NoChange();
Handle<HeapObject> prototype_const =
prototype_type->AsHeapConstant()->Value();
- if (!prototype_const->IsNull(isolate()) && !prototype_const->IsJSReceiver()) {
- return NoChange();
- }
- instance_map = Map::GetObjectCreateMap(prototype_const);
+ Handle<Map> instance_map;
+ MaybeHandle<Map> maybe_instance_map =
+ Map::TryGetObjectCreateMap(prototype_const);
+ if (!maybe_instance_map.ToHandle(&instance_map)) return NoChange();
Node* properties = jsgraph()->EmptyFixedArrayConstant();
if (instance_map->is_dictionary_map()) {
// Allocated an empty NameDictionary as backing store for the properties.
- Handle<Map> map(isolate()->heap()->hash_table_map(), isolate());
+ Handle<Map> map(isolate()->heap()->name_dictionary_map(), isolate());
int capacity =
NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
DCHECK(base::bits::IsPowerOfTwo(capacity));
int length = NameDictionary::EntryToIndex(capacity);
int size = NameDictionary::SizeFor(length);
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
-
- Node* value = effect =
- graph()->NewNode(simplified()->Allocate(Type::Any(), NOT_TENURED),
- jsgraph()->Constant(size), effect, control);
- effect =
- graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- value, jsgraph()->HeapConstant(map), effect, control);
-
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(size, NOT_TENURED, Type::Any());
+ a.Store(AccessBuilder::ForMap(), map);
// Initialize FixedArray fields.
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForFixedArrayLength()), value,
- jsgraph()->SmiConstant(length), effect, control);
+ a.Store(AccessBuilder::ForFixedArrayLength(),
+ jsgraph()->SmiConstant(length));
// Initialize HashTable fields.
- effect =
- graph()->NewNode(simplified()->StoreField(
- AccessBuilder::ForHashTableBaseNumberOfElements()),
- value, jsgraph()->SmiConstant(0), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForHashTableBaseNumberOfDeletedElement()),
- value, jsgraph()->SmiConstant(0), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHashTableBaseCapacity()),
- value, jsgraph()->SmiConstant(capacity), effect, control);
+ a.Store(AccessBuilder::ForHashTableBaseNumberOfElements(),
+ jsgraph()->SmiConstant(0));
+ a.Store(AccessBuilder::ForHashTableBaseNumberOfDeletedElement(),
+ jsgraph()->SmiConstant(0));
+ a.Store(AccessBuilder::ForHashTableBaseCapacity(),
+ jsgraph()->SmiConstant(capacity));
// Initialize Dictionary fields.
- Node* undefined = jsgraph()->UndefinedConstant();
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForDictionaryNextEnumerationIndex()),
- value, jsgraph()->SmiConstant(PropertyDetails::kInitialIndex), effect,
- control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForDictionaryObjectHashIndex()),
- value, jsgraph()->SmiConstant(PropertyArray::kNoHashSentinel), effect,
- control);
+ a.Store(AccessBuilder::ForDictionaryNextEnumerationIndex(),
+ jsgraph()->SmiConstant(PropertyDetails::kInitialIndex));
+ a.Store(AccessBuilder::ForDictionaryObjectHashIndex(),
+ jsgraph()->SmiConstant(PropertyArray::kNoHashSentinel));
// Initialize the Properties fields.
+ Node* undefined = jsgraph()->UndefinedConstant();
STATIC_ASSERT(NameDictionary::kElementsStartIndex ==
NameDictionary::kObjectHashIndex + 1);
for (int index = NameDictionary::kElementsStartIndex; index < length;
index++) {
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForFixedArraySlot(index, kNoWriteBarrier)),
- value, undefined, effect, control);
+ a.Store(AccessBuilder::ForFixedArraySlot(index, kNoWriteBarrier),
+ undefined);
}
- properties = effect =
- graph()->NewNode(common()->FinishRegion(), value, effect);
+ properties = effect = a.Finish();
}
int const instance_size = instance_map->instance_size();
@@ -2454,82 +2310,26 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
// Emit code to allocate the JSObject instance for the given
// {instance_map}.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- Node* value = effect =
- graph()->NewNode(simplified()->Allocate(Type::Any(), NOT_TENURED),
- jsgraph()->Constant(instance_size), effect, control);
- effect =
- graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()), value,
- jsgraph()->HeapConstant(instance_map), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectPropertiesOrHash()),
- value, properties, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
- jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(instance_size, NOT_TENURED, Type::Any());
+ a.Store(AccessBuilder::ForMap(), instance_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
// Initialize Object fields.
Node* undefined = jsgraph()->UndefinedConstant();
for (int offset = JSObject::kHeaderSize; offset < instance_size;
offset += kPointerSize) {
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSObjectOffset(offset, kNoWriteBarrier)),
- value, undefined, effect, control);
+ a.Store(AccessBuilder::ForJSObjectOffset(offset, kNoWriteBarrier),
+ undefined);
}
- value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+ Node* value = effect = a.Finish();
// replace it
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
-// ES #sec-object.is
-Reduction JSBuiltinReducer::ReduceObjectIs(Node* node) {
- // TODO(turbofan): At some point we should probably introduce a new
- // SameValue simplified operator (and also a StrictEqual simplified
- // operator) and create unified handling in SimplifiedLowering.
- JSCallReduction r(node);
- if (r.GetJSCallArity() == 2 && r.left() == r.right()) {
- // Object.is(x,x) => #true
- Node* value = jsgraph()->TrueConstant();
- return Replace(value);
- } else if (r.InputsMatchTwo(Type::Unique(), Type::Unique())) {
- // Object.is(x:Unique,y:Unique) => ReferenceEqual(x,y)
- Node* left = r.GetJSCallInput(0);
- Node* right = r.GetJSCallInput(1);
- Node* value = graph()->NewNode(simplified()->ReferenceEqual(), left, right);
- return Replace(value);
- } else if (r.InputsMatchTwo(Type::MinusZero(), Type::Any())) {
- // Object.is(x:MinusZero,y) => ObjectIsMinusZero(y)
- Node* input = r.GetJSCallInput(1);
- Node* value = graph()->NewNode(simplified()->ObjectIsMinusZero(), input);
- return Replace(value);
- } else if (r.InputsMatchTwo(Type::Any(), Type::MinusZero())) {
- // Object.is(x,y:MinusZero) => ObjectIsMinusZero(x)
- Node* input = r.GetJSCallInput(0);
- Node* value = graph()->NewNode(simplified()->ObjectIsMinusZero(), input);
- return Replace(value);
- } else if (r.InputsMatchTwo(Type::NaN(), Type::Any())) {
- // Object.is(x:NaN,y) => ObjectIsNaN(y)
- Node* input = r.GetJSCallInput(1);
- Node* value = graph()->NewNode(simplified()->ObjectIsNaN(), input);
- return Replace(value);
- } else if (r.InputsMatchTwo(Type::Any(), Type::NaN())) {
- // Object.is(x,y:NaN) => ObjectIsNaN(x)
- Node* input = r.GetJSCallInput(0);
- Node* value = graph()->NewNode(simplified()->ObjectIsNaN(), input);
- return Replace(value);
- } else if (r.InputsMatchTwo(Type::String(), Type::String())) {
- // Object.is(x:String,y:String) => StringEqual(x,y)
- Node* left = r.GetJSCallInput(0);
- Node* right = r.GetJSCallInput(1);
- Node* value = graph()->NewNode(simplified()->StringEqual(), left, right);
- return Replace(value);
- }
- return NoChange();
-}
-
// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
JSCallReduction r(node);
@@ -2602,8 +2402,12 @@ Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
// Return the character from the {receiver} as single character string.
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* masked_index = graph()->NewNode(
+ simplified()->MaskIndexWithBound(), index, receiver_length);
+
Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
- index, if_true);
+ masked_index, if_true);
// Return the empty string otherwise.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -2656,8 +2460,12 @@ Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
// Load the character from the {receiver}.
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* masked_index = graph()->NewNode(
+ simplified()->MaskIndexWithBound(), index, receiver_length);
+
Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(),
- receiver, index, if_true);
+ receiver, masked_index, if_true);
// Return NaN otherwise.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -2744,32 +2552,20 @@ Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
Node* map = jsgraph()->HeapConstant(
handle(native_context()->string_iterator_map(), isolate()));
- // allocate new iterator
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- Node* value = effect = graph()->NewNode(
- simplified()->Allocate(Type::OtherObject(), NOT_TENURED),
- jsgraph()->Constant(JSStringIterator::kSize), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- value, map, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectPropertiesOrHash()),
- value, jsgraph()->EmptyFixedArrayConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
- jsgraph()->EmptyFixedArrayConstant(), effect, control);
-
- // attach the iterator to this string
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSStringIteratorString()),
- value, receiver, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
- value, jsgraph()->SmiConstant(0), effect, control);
-
- value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
-
- // replace it
+ // Allocate new iterator and attach the iterator to this string.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSStringIterator::kSize, NOT_TENURED, Type::OtherObject());
+ a.Store(AccessBuilder::ForMap(), map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSStringIteratorString(), receiver);
+ a.Store(AccessBuilder::ForJSStringIteratorIndex(),
+ jsgraph()->SmiConstant(0));
+ Node* value = effect = a.Finish();
+
+ // Replace it.
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -2916,6 +2712,60 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
return NoChange();
}
+// ES section #sec-string.prototype.slice
+Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
+ if (Node* receiver = GetStringWitness(node)) {
+ Node* start = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Type* start_type = NodeProperties::GetType(start);
+ Node* end = node->op()->ValueInputCount() >= 4
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Type* end_type = NodeProperties::GetType(end);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ if (start_type->Is(type_cache_.kSingletonMinusOne) &&
+ end_type->Is(Type::Undefined())) {
+ Node* receiver_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+ effect, control);
+
+ Node* check =
+ graph()->NewNode(simplified()->NumberEqual(), receiver_length,
+ jsgraph()->ZeroConstant());
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->EmptyStringConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse;
+ {
+ // We need to convince TurboFan that {receiver_length}-1 is a valid
+ // Unsigned32 value, so we just apply NumberToUint32 to the result
+ // of the subtraction, which is a no-op and merely acts as a marker.
+ Node* index =
+ graph()->NewNode(simplified()->NumberSubtract(), receiver_length,
+ jsgraph()->OneConstant());
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ vfalse = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
+ if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ return NoChange();
+}
+
Reduction JSBuiltinReducer::ReduceStringToLowerCaseIntl(Node* node) {
if (Node* receiver = GetStringWitness(node)) {
RelaxEffectsAndControls(node);
@@ -3015,8 +2865,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceDateNow(node);
case kDateGetTime:
return ReduceDateGetTime(node);
- case kFunctionBind:
- return ReduceFunctionBind(node);
case kGlobalIsFinite:
reduction = ReduceGlobalIsFinite(node);
break;
@@ -3041,9 +2889,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceCollectionIterator(node, JS_MAP_TYPE,
Context::MAP_VALUE_ITERATOR_MAP_INDEX);
case kMapIteratorNext:
- return ReduceCollectionIteratorNext(node, OrderedHashMap::kEntrySize,
- FIRST_MAP_ITERATOR_TYPE,
- LAST_MAP_ITERATOR_TYPE);
+ return ReduceCollectionIteratorNext(
+ node, OrderedHashMap::kEntrySize, factory()->empty_ordered_hash_map(),
+ FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE);
case kMathAbs:
reduction = ReduceMathAbs(node);
break;
@@ -3161,9 +3009,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kObjectCreate:
reduction = ReduceObjectCreate(node);
break;
- case kObjectIs:
- reduction = ReduceObjectIs(node);
- break;
case kSetEntries:
return ReduceCollectionIterator(
node, JS_SET_TYPE, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX);
@@ -3173,9 +3018,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceCollectionIterator(node, JS_SET_TYPE,
Context::SET_VALUE_ITERATOR_MAP_INDEX);
case kSetIteratorNext:
- return ReduceCollectionIteratorNext(node, OrderedHashSet::kEntrySize,
- FIRST_SET_ITERATOR_TYPE,
- LAST_SET_ITERATOR_TYPE);
+ return ReduceCollectionIteratorNext(
+ node, OrderedHashSet::kEntrySize, factory()->empty_ordered_hash_set(),
+ FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE);
case kStringFromCharCode:
reduction = ReduceStringFromCharCode(node);
break;
@@ -3191,6 +3036,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceStringIterator(node);
case kStringIteratorNext:
return ReduceStringIteratorNext(node);
+ case kStringSlice:
+ return ReduceStringSlice(node);
case kStringToLowerCaseIntl:
return ReduceStringToLowerCaseIntl(node);
case kStringToUpperCaseIntl:
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index d5bddcede5..2b22b0ce7c 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -62,12 +62,11 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceCollectionSize(Node* node,
InstanceType collection_instance_type);
Reduction ReduceCollectionIteratorNext(
- Node* node, int entry_size,
+ Node* node, int entry_size, Handle<HeapObject> empty_collection,
InstanceType collection_iterator_instance_type_first,
InstanceType collection_iterator_instance_type_last);
Reduction ReduceDateNow(Node* node);
Reduction ReduceDateGetTime(Node* node);
- Reduction ReduceFunctionBind(Node* node);
Reduction ReduceGlobalIsFinite(Node* node);
Reduction ReduceGlobalIsNaN(Node* node);
Reduction ReduceMapHas(Node* node);
@@ -111,7 +110,6 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceObjectCreate(Node* node);
- Reduction ReduceObjectIs(Node* node);
Reduction ReduceStringCharAt(Node* node);
Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringConcat(Node* node);
@@ -119,6 +117,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceStringIndexOf(Node* node);
Reduction ReduceStringIterator(Node* node);
Reduction ReduceStringIteratorNext(Node* node);
+ Reduction ReduceStringSlice(Node* node);
Reduction ReduceStringToLowerCaseIntl(Node* node);
Reduction ReduceStringToUpperCaseIntl(Node* node);
Reduction ReduceArrayBufferIsView(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index d8fcf4553a..c595b360d5 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -9,6 +9,7 @@
#include "src/code-stubs.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/allocation-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -21,62 +22,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-bool CanBePrimitive(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSCreate:
- case IrOpcode::kJSCreateArguments:
- case IrOpcode::kJSCreateArray:
- case IrOpcode::kJSCreateClosure:
- case IrOpcode::kJSCreateEmptyLiteralArray:
- case IrOpcode::kJSCreateEmptyLiteralObject:
- case IrOpcode::kJSCreateIterResultObject:
- case IrOpcode::kJSCreateKeyValueArray:
- case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateLiteralObject:
- case IrOpcode::kJSCreateLiteralRegExp:
- case IrOpcode::kJSConstructForwardVarargs:
- case IrOpcode::kJSConstruct:
- case IrOpcode::kJSConstructWithArrayLike:
- case IrOpcode::kJSConstructWithSpread:
- case IrOpcode::kJSConvertReceiver:
- case IrOpcode::kJSGetSuperConstructor:
- case IrOpcode::kJSToObject:
- return false;
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = HeapObjectMatcher(node).Value();
- return value->IsPrimitive();
- }
- default:
- return true;
- }
-}
-
-bool CanBeNullOrUndefined(Node* node) {
- if (CanBePrimitive(node)) {
- switch (node->opcode()) {
- case IrOpcode::kJSToBoolean:
- case IrOpcode::kJSToInteger:
- case IrOpcode::kJSToLength:
- case IrOpcode::kJSToName:
- case IrOpcode::kJSToNumber:
- case IrOpcode::kJSToString:
- return false;
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = HeapObjectMatcher(node).Value();
- Isolate* const isolate = value->GetIsolate();
- return value->IsNullOrUndefined(isolate);
- }
- default:
- return true;
- }
- }
- return false;
-}
-
-} // namespace
-
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
@@ -136,13 +81,11 @@ Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- // Replace the {node} with a proper {JSToBoolean} operator.
+ // Replace the {node} with a proper {ToBoolean} operator.
DCHECK_LE(2u, p.arity());
Node* value = (p.arity() == 2) ? jsgraph()->UndefinedConstant()
: NodeProperties::GetValueInput(node, 2);
- Node* context = NodeProperties::GetContextInput(node);
- value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), value,
- context);
+ value = graph()->NewNode(simplified()->ToBoolean(), value);
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -168,10 +111,11 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
if (p.arity() < 3) return NoChange();
Node* value = (p.arity() >= 3) ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
// We can fold away the Object(x) call if |x| is definitely not a primitive.
- if (CanBePrimitive(value)) {
- if (!CanBeNullOrUndefined(value)) {
+ if (NodeProperties::CanBePrimitive(value, effect)) {
+ if (!NodeProperties::CanBeNullOrUndefined(value, effect)) {
// Turn the {node} into a {JSToObject} call if we know that
// the {value} cannot be null or undefined.
NodeProperties::ReplaceValueInputs(node, value);
@@ -212,7 +156,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// If {arguments_list} cannot be null or undefined, we don't need
// to expand this {node} to control-flow.
- if (!CanBeNullOrUndefined(arguments_list)) {
+ if (!NodeProperties::CanBeNullOrUndefined(arguments_list, effect)) {
// Massage the value inputs appropriately.
node->ReplaceInput(0, target);
node->ReplaceInput(1, this_argument);
@@ -301,6 +245,106 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
+// ES section #sec-function.prototype.bind
+Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ // Value inputs to the {node} are as follows:
+ //
+ // - target, which is Function.prototype.bind JSFunction
+ // - receiver, which is the [[BoundTargetFunction]]
+ // - bound_this (optional), which is the [[BoundThis]]
+ // - and all the remaining value inouts are [[BoundArguments]]
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* bound_this = (node->op()->ValueInputCount() < 3)
+ ? jsgraph()->UndefinedConstant()
+ : NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Ensure that the {receiver} is known to be a JSBoundFunction or
+ // a JSFunction with the same [[Prototype]], and all maps we've
+ // seen for the {receiver} so far indicate that {receiver} is
+ // definitely a constructor or not a constructor.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+ bool const is_constructor = receiver_maps[0]->is_constructor();
+ Handle<Object> const prototype(receiver_maps[0]->prototype(), isolate());
+ for (Handle<Map> const receiver_map : receiver_maps) {
+ // Check for consistency among the {receiver_maps}.
+ STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
+ if (receiver_map->prototype() != *prototype) return NoChange();
+ if (receiver_map->is_constructor() != is_constructor) return NoChange();
+ if (receiver_map->instance_type() < FIRST_FUNCTION_TYPE) return NoChange();
+
+ // Disallow binding of slow-mode functions. We need to figure out
+ // whether the length and name property are in the original state.
+ if (receiver_map->is_dictionary_map()) return NoChange();
+
+ // Check whether the length and name properties are still present
+ // as AccessorInfo objects. In that case, their values can be
+ // recomputed even if the actual value of the object changes.
+ // This mirrors the checks done in builtins-function-gen.cc at
+ // runtime otherwise.
+ Handle<DescriptorArray> descriptors(receiver_map->instance_descriptors(),
+ isolate());
+ if (descriptors->length() < 2) return NoChange();
+ if (descriptors->GetKey(JSFunction::kLengthDescriptorIndex) !=
+ isolate()->heap()->length_string()) {
+ return NoChange();
+ }
+ if (!descriptors->GetValue(JSFunction::kLengthDescriptorIndex)
+ ->IsAccessorInfo()) {
+ return NoChange();
+ }
+ if (descriptors->GetKey(JSFunction::kNameDescriptorIndex) !=
+ isolate()->heap()->name_string()) {
+ return NoChange();
+ }
+ if (!descriptors->GetValue(JSFunction::kNameDescriptorIndex)
+ ->IsAccessorInfo()) {
+ return NoChange();
+ }
+ }
+
+ // Setup the map for the resulting JSBoundFunction with the
+ // correct instance {prototype}.
+ Handle<Map> map(
+ is_constructor
+ ? native_context()->bound_function_with_constructor_map()
+ : native_context()->bound_function_without_constructor_map(),
+ isolate());
+ if (map->prototype() != *prototype) {
+ map = Map::TransitionToPrototype(map, prototype);
+ }
+
+ // Make sure we can rely on the {receiver_maps}.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+ }
+
+ // Replace the {node} with a JSCreateBoundFunction.
+ int const arity = std::max(0, node->op()->ValueInputCount() - 3);
+ int const input_count = 2 + arity + 3;
+ Node** inputs = graph()->zone()->NewArray<Node*>(input_count);
+ inputs[0] = receiver;
+ inputs[1] = bound_this;
+ for (int i = 0; i < arity; ++i) {
+ inputs[2 + i] = NodeProperties::GetValueInput(node, 3 + i);
+ }
+ inputs[2 + arity + 0] = context;
+ inputs[2 + arity + 1] = effect;
+ inputs[2 + arity + 2] = control;
+ Node* value = effect = graph()->NewNode(
+ javascript()->CreateBoundFunction(arity, map), input_count, inputs);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
@@ -418,6 +462,20 @@ Reduction JSCallReducer::ReduceObjectGetPrototypeOf(Node* node) {
return ReduceObjectGetPrototype(node, object);
}
+// ES section #sec-object.is
+Reduction JSCallReducer::ReduceObjectIs(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& params = CallParametersOf(node->op());
+ int const argc = static_cast<int>(params.arity() - 2);
+ Node* lhs = (argc >= 1) ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* rhs = (argc >= 2) ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* value = graph()->NewNode(simplified()->SameValue(), lhs, rhs);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
// ES6 section B.2.2.1.1 get Object.prototype.__proto__
Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -490,8 +548,9 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
receiver, effect, control);
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, cache_type);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ control);
}
Node* value = jsgraph()->TrueConstant();
ReplaceWithValue(node, value, effect, control);
@@ -589,6 +648,150 @@ Reduction JSCallReducer::ReduceReflectGetPrototypeOf(Node* node) {
return ReduceObjectGetPrototype(node, target);
}
+// ES section #sec-reflect.get
+Reduction JSCallReducer::ReduceReflectGet(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ if (arity != 2) return NoChange();
+ Node* target = NodeProperties::GetValueInput(node, 2);
+ Node* key = NodeProperties::GetValueInput(node, 3);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check whether {target} is a JSReceiver.
+ Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), target);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Throw an appropriate TypeError if the {target} is not a JSReceiver.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ if_false = efalse = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kCalledOnNonObject),
+ jsgraph()->HeapConstant(
+ factory()->NewStringFromAsciiChecked("Reflect.get")),
+ context, frame_state, efalse, if_false);
+ }
+
+ // Otherwise just use the existing GetPropertyStub.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ Callable callable = CodeFactory::GetProperty(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+ MachineType::AnyTagged(), 1);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ vtrue = etrue = if_true =
+ graph()->NewNode(common()->Call(desc), stub_code, target, key, context,
+ frame_state, etrue, if_true);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ // Create appropriate {IfException} and {IfSuccess} nodes.
+ Node* extrue = graph()->NewNode(common()->IfException(), etrue, if_true);
+ if_true = graph()->NewNode(common()->IfSuccess(), if_true);
+ Node* exfalse = graph()->NewNode(common()->IfException(), efalse, if_false);
+ if_false = graph()->NewNode(common()->IfSuccess(), if_false);
+
+ // Join the exception edges.
+ Node* merge = graph()->NewNode(common()->Merge(2), extrue, exfalse);
+ Node* ephi =
+ graph()->NewNode(common()->EffectPhi(2), extrue, exfalse, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ extrue, exfalse, merge);
+ ReplaceWithValue(on_exception, phi, ephi, merge);
+ }
+
+ // Connect the throwing path to end.
+ if_false = graph()->NewNode(common()->Throw(), efalse, if_false);
+ NodeProperties::MergeControlToEnd(graph(), common(), if_false);
+
+ // Continue on the regular path.
+ ReplaceWithValue(node, vtrue, etrue, if_true);
+ return Changed(vtrue);
+}
+
+// ES section #sec-reflect.has
+Reduction JSCallReducer::ReduceReflectHas(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ DCHECK_LE(0, arity);
+ Node* target = (arity >= 1) ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* key = (arity >= 2) ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check whether {target} is a JSReceiver.
+ Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), target);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Throw an appropriate TypeError if the {target} is not a JSReceiver.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ if_false = efalse = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kCalledOnNonObject),
+ jsgraph()->HeapConstant(
+ factory()->NewStringFromAsciiChecked("Reflect.has")),
+ context, frame_state, efalse, if_false);
+ }
+
+ // Otherwise just use the existing {JSHasProperty} logic.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ vtrue = etrue = if_true =
+ graph()->NewNode(javascript()->HasProperty(), key, target, context,
+ frame_state, etrue, if_true);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ // Create appropriate {IfException} and {IfSuccess} nodes.
+ Node* extrue = graph()->NewNode(common()->IfException(), etrue, if_true);
+ if_true = graph()->NewNode(common()->IfSuccess(), if_true);
+ Node* exfalse = graph()->NewNode(common()->IfException(), efalse, if_false);
+ if_false = graph()->NewNode(common()->IfSuccess(), if_false);
+
+ // Join the exception edges.
+ Node* merge = graph()->NewNode(common()->Merge(2), extrue, exfalse);
+ Node* ephi =
+ graph()->NewNode(common()->EffectPhi(2), extrue, exfalse, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ extrue, exfalse, merge);
+ ReplaceWithValue(on_exception, phi, ephi, merge);
+ }
+
+ // Connect the throwing path to end.
+ if_false = graph()->NewNode(common()->Throw(), efalse, if_false);
+ NodeProperties::MergeControlToEnd(graph(), common(), if_false);
+
+ // Continue on the regular path.
+ ReplaceWithValue(node, vtrue, etrue, if_true);
+ return Changed(vtrue);
+}
+
bool CanInlineArrayIteratingBuiltin(Handle<Map> receiver_map) {
Isolate* const isolate = receiver_map->GetIsolate();
if (!receiver_map->prototype()->IsJSArray()) return false;
@@ -597,7 +800,7 @@ bool CanInlineArrayIteratingBuiltin(Handle<Map> receiver_map) {
return receiver_map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(receiver_map->elements_kind()) &&
(!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
- isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+ isolate->IsNoElementsProtectorIntact() &&
isolate->IsAnyInitialArrayPrototype(receiver_prototype);
}
@@ -649,7 +852,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
- dependencies()->AssumePropertyCell(factory()->array_protector());
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
Node* k = jsgraph()->ZeroConstant();
@@ -663,24 +866,21 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
- Node* check = graph()->NewNode(simplified()->ObjectIsCallable(), fncallback);
- Node* check_branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* check_fail = graph()->NewNode(common()->IfFalse(), check_branch);
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
- Node* check_throw = check_fail = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kCalledNonCallable), fncallback,
- context, check_frame_state, effect, check_fail);
- control = graph()->NewNode(common()->IfTrue(), check_branch);
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
+ &control, &check_fail, &check_throw);
// Start the loop.
Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
Node* eloop = effect =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
Node* vloop = k = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
checkpoint_params[3] = k;
@@ -710,24 +910,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
effect, control);
- // Make sure that the access is still in bounds, since the callback could have
- // changed the array's size.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
- receiver, effect, control);
- k = effect =
- graph()->NewNode(simplified()->CheckBounds(), k, length, effect, control);
-
- // Reload the elements pointer before calling the callback, since the previous
- // callback might have resized the array causing the elements buffer to be
- // re-allocated.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
-
- Node* element = effect = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, k, effect, control);
+ Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->Constant(1));
@@ -767,23 +950,8 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// Rewire potential exception edges.
Node* on_exception = nullptr;
if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- // Create appropriate {IfException} and {IfSuccess} nodes.
- Node* if_exception0 =
- graph()->NewNode(common()->IfException(), check_throw, check_fail);
- check_fail = graph()->NewNode(common()->IfSuccess(), check_fail);
- Node* if_exception1 =
- graph()->NewNode(common()->IfException(), effect, control);
- control = graph()->NewNode(common()->IfSuccess(), control);
-
- // Join the exception edges.
- Node* merge =
- graph()->NewNode(common()->Merge(2), if_exception0, if_exception1);
- Node* ephi = graph()->NewNode(common()->EffectPhi(2), if_exception0,
- if_exception1, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- if_exception0, if_exception1, merge);
- ReplaceWithValue(on_exception, phi, ephi, merge);
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
}
if (IsHoleyElementsKind(kind)) {
@@ -806,12 +974,13 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = if_false;
effect = eloop;
- // The above %ThrowTypeError runtime call is an unconditional throw, making
- // it impossible to return a successful completion in this case. We simply
- // connect the successful completion to the graph end.
- Node* terminate =
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
graph()->NewNode(common()->Throw(), check_throw, check_fail);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
return Replace(jsgraph()->UndefinedConstant());
@@ -899,24 +1068,21 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
- Node* check = graph()->NewNode(simplified()->ObjectIsCallable(), fncallback);
- Node* check_branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* check_fail = graph()->NewNode(common()->IfFalse(), check_branch);
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), function, Builtins::kArrayMapLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
- Node* check_throw = check_fail = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kCalledNonCallable), fncallback,
- context, check_frame_state, effect, check_fail);
- control = graph()->NewNode(common()->IfTrue(), check_branch);
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
+ &control, &check_fail, &check_throw);
// Start the loop.
Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
Node* eloop = effect =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
Node* vloop = k = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
checkpoint_params[4] = k;
@@ -946,24 +1112,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
effect, control);
- // Make sure that the access is still in bounds, since the callback could have
- // changed the array's size.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
- k = effect =
- graph()->NewNode(simplified()->CheckBounds(), k, length, effect, control);
-
- // Reload the elements pointer before calling the callback, since the previous
- // callback might have resized the array causing the elements buffer to be
- // re-allocated.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
-
- Node* element = effect = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, k, effect, control);
+ Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
@@ -982,23 +1131,8 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
// Rewire potential exception edges.
Node* on_exception = nullptr;
if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- // Create appropriate {IfException} and {IfSuccess} nodes.
- Node* if_exception0 =
- graph()->NewNode(common()->IfException(), check_throw, check_fail);
- check_fail = graph()->NewNode(common()->IfSuccess(), check_fail);
- Node* if_exception1 =
- graph()->NewNode(common()->IfException(), effect, control);
- control = graph()->NewNode(common()->IfSuccess(), control);
-
- // Join the exception edges.
- Node* merge =
- graph()->NewNode(common()->Merge(2), if_exception0, if_exception1);
- Node* ephi = graph()->NewNode(common()->EffectPhi(2), if_exception0,
- if_exception1, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- if_exception0, if_exception1, merge);
- ReplaceWithValue(on_exception, phi, ephi, merge);
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
}
Handle<Map> double_map(Map::cast(
@@ -1018,19 +1152,370 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
control = if_false;
effect = eloop;
- // The above %ThrowTypeError runtime call is an unconditional throw, making
- // it impossible to return a successful completion in this case. We simply
- // connect the successful completion to the graph end.
- Node* terminate =
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, a, effect, control);
+ return Replace(a);
+}
+
+Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result != NodeProperties::kReliableReceiverMaps) {
+ return NoChange();
+ }
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ if (receiver_maps.size() == 0) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ // TODO(danno): Handle holey elements kinds.
+ if (!IsFastPackedElementsKind(kind)) {
+ return NoChange();
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) {
+ return NoChange();
+ }
+ }
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ Handle<Map> initial_map(
+ Map::cast(native_context()->GetInitialJSArrayMap(kind)));
+
+ Node* k = jsgraph()->ZeroConstant();
+ Node* to = jsgraph()->ZeroConstant();
+
+ // Make sure the map hasn't changed before we construct the output array.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* a; // Construct the output array.
+ {
+ AllocationBuilder ab(jsgraph(), effect, control);
+ ab.Allocate(initial_map->instance_size(), NOT_TENURED, Type::Array());
+ ab.Store(AccessBuilder::ForMap(), initial_map);
+ Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+ ab.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
+ ab.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+ ab.Store(AccessBuilder::ForJSArrayLength(kind), jsgraph()->ZeroConstant());
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ ab.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ a = effect = ab.Finish();
+ }
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case. We don't have all the values available to completely fill out
+ // checkpoint_params yet, but that's okay because it'll never be called.
+ // Therefore, "to" is mentioned twice, once standing in for the k_value
+ // value.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, a, k, original_length, to, to});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
+ effect, &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* vloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* v_to_loop = to = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTaggedSigned, 2), to, to, loop);
+
+ control = loop;
+ effect = eloop;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ {
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, a, k, original_length, to});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayFilterLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
+
+ // Make sure the map hasn't changed during the iteration.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ Node* callback_value = nullptr;
+ {
+ // This frame state is dealt with by hand in
+ // Builtins::kArrayFilterLoopLazyDeoptContinuation.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, a, k, original_length, element, to});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // We need an eager frame state for right after the callback function
+ // returned, just in case an attempt to grow the output array fails.
+ //
+ // Note that we are intentionally reusing the
+ // Builtins::kArrayFilterLoopLazyDeoptContinuation as an *eager* entry
+ // point in this case. This is safe, because re-evaluating a [ToBoolean]
+ // coercion is safe.
+ {
+ std::vector<Node*> checkpoint_params({receiver, fncallback, this_arg, a, k,
+ original_length, element, to,
+ callback_value});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
+
+ // We have to coerce callback_value to boolean, and only store the element in
+ // a if it's true. The checkpoint above protects against the case that
+ // growing {a} fails.
+ to = DoFilterPostCallbackWork(kind, &control, &effect, a, to, element,
+ callback_value);
+ k = next_k;
+
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, k);
+ v_to_loop->ReplaceInput(1, to);
+ eloop->ReplaceInput(1, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
ReplaceWithValue(node, a, effect, control);
return Replace(a);
}
-Reduction JSCallReducer::ReduceCallApiFunction(
- Node* node, Handle<FunctionTemplateInfo> function_template_info) {
+Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
+ Node** effect, Node* a, Node* to,
+ Node* element,
+ Node* callback_value) {
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+
+ Node* check_boolean_result =
+ graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
+ jsgraph()->TrueConstant());
+ Node* boolean_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check_boolean_result, *control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), boolean_branch);
+ Node* etrue = *effect;
+ Node* vtrue;
+ {
+ // Load the elements backing store of the {receiver}.
+ Node* elements = etrue = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), a, etrue,
+ if_true);
+
+ // We know that {to} is in Unsigned31 range here, being smaller than
+ // {original_length} at all times.
+ Node* checked_to =
+ graph()->NewNode(common()->TypeGuard(Type::Unsigned31()), to, if_true);
+ Node* elements_length = etrue = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
+ etrue, if_true);
+
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = etrue =
+ graph()->NewNode(simplified()->MaybeGrowFastElements(mode), a, elements,
+ checked_to, elements_length, etrue, if_true);
+
+ // Update the length of {a}.
+ Node* new_length_a = graph()->NewNode(simplified()->NumberAdd(), checked_to,
+ jsgraph()->OneConstant());
+
+ etrue = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), a,
+ new_length_a, etrue, if_true);
+
+ // Append the value to the {elements}.
+ etrue = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, checked_to, element, etrue, if_true);
+
+ vtrue = new_length_a;
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), boolean_branch);
+ Node* efalse = *effect;
+ Node* vfalse = to;
+
+ *control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ *effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, *control);
+ to = graph()->NewNode(common()->Phi(MachineRepresentation::kTaggedSigned, 2),
+ vtrue, vfalse, *control);
+ return to;
+}
+
+void JSCallReducer::WireInCallbackIsCallableCheck(
+ Node* fncallback, Node* context, Node* check_frame_state, Node* effect,
+ Node** control, Node** check_fail, Node** check_throw) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsCallable(), fncallback);
+ Node* check_branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control);
+ *check_fail = graph()->NewNode(common()->IfFalse(), check_branch);
+ *check_throw = *check_fail = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kCalledNonCallable), fncallback,
+ context, check_frame_state, effect, *check_fail);
+ *control = graph()->NewNode(common()->IfTrue(), check_branch);
+}
+
+void JSCallReducer::RewirePostCallbackExceptionEdges(Node* check_throw,
+ Node* on_exception,
+ Node* effect,
+ Node** check_fail,
+ Node** control) {
+ // Create appropriate {IfException} and {IfSuccess} nodes.
+ Node* if_exception0 =
+ graph()->NewNode(common()->IfException(), check_throw, *check_fail);
+ *check_fail = graph()->NewNode(common()->IfSuccess(), *check_fail);
+ Node* if_exception1 =
+ graph()->NewNode(common()->IfException(), effect, *control);
+ *control = graph()->NewNode(common()->IfSuccess(), *control);
+
+ // Join the exception edges.
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), if_exception0, if_exception1);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), if_exception0,
+ if_exception1, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ if_exception0, if_exception1, merge);
+ ReplaceWithValue(on_exception, phi, ephi, merge);
+}
+
+Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
+ Node* control, Node** effect, Node** k) {
+ // Make sure that the access is still in bounds, since the callback could have
+ // changed the array's size.
+ Node* length = *effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ *effect, control);
+ *k = *effect = graph()->NewNode(simplified()->CheckBounds(), *k, length,
+ *effect, control);
+
+ // Reload the elements pointer before calling the callback, since the previous
+ // callback might have resized the array causing the elements buffer to be
+ // re-allocated.
+ Node* elements = *effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ *effect, control);
+
+ Node* masked_index =
+ graph()->NewNode(simplified()->MaskIndexWithBound(), *k, length);
+
+ Node* element = *effect = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, masked_index, *effect, control);
+ return element;
+}
+
+Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
+ Handle<JSFunction> function) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
@@ -1039,6 +1524,10 @@ Reduction JSCallReducer::ReduceCallApiFunction(
: NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
+ Handle<FunctionTemplateInfo> function_template_info(
+ FunctionTemplateInfo::cast(function->shared()->function_data()));
+ Handle<Context> context(function->context());
+
// CallApiCallbackStub expects the target in a register, so we count it out,
// and counts the receiver as an implicit argument, so we count the receiver
// out too.
@@ -1094,14 +1583,13 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Handle<CallHandlerInfo> call_handler_info(
CallHandlerInfo::cast(function_template_info->call_code()), isolate());
Handle<Object> data(call_handler_info->data(), isolate());
- CallApiCallbackStub stub(isolate(), argc, false);
+ CallApiCallbackStub stub(isolate(), argc);
CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), cid,
- cid.GetStackParameterCount() + argc +
- 2 /* implicit receiver + accessor_holder */,
+ cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
- MachineType::AnyTagged(), 1);
+ MachineType::AnyTagged(), 1, Linkage::kNoContext);
ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
Node* holder = lookup == CallOptimization::kHolderFound
? jsgraph()->HeapConstant(api_holder)
@@ -1110,12 +1598,14 @@ Reduction JSCallReducer::ReduceCallApiFunction(
&api_function, ExternalReference::DIRECT_API_CALL, isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(stub.GetCode()));
+ node->ReplaceInput(1, jsgraph()->Constant(context));
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(data));
node->InsertInput(graph()->zone(), 3, holder);
node->InsertInput(graph()->zone(), 4,
jsgraph()->ExternalConstant(function_reference));
- node->InsertInput(graph()->zone(), 5, holder /* as accessor_holder */);
- node->ReplaceInput(6, receiver);
+ node->ReplaceInput(5, receiver);
+ // Remove context input.
+ node->RemoveInput(6 + argc);
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
@@ -1344,9 +1834,9 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// The above %ThrowTypeError runtime call is an unconditional throw, making
// it impossible to return a successful completion in this case. We simply
// connect the successful completion to the graph end.
- Node* terminate =
+ Node* throw_node =
graph()->NewNode(common()->Throw(), check_throw, check_fail);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
Reduction const reduction = ReduceJSConstruct(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -1385,6 +1875,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
// Try to specialize JSCall {node}s with constant {target}s.
HeapObjectMatcher m(target);
@@ -1413,6 +1905,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceBooleanConstructor(node);
case Builtins::kFunctionPrototypeApply:
return ReduceFunctionPrototypeApply(node);
+ case Builtins::kFastFunctionPrototypeBind:
+ return ReduceFunctionPrototypeBind(node);
case Builtins::kFunctionPrototypeCall:
return ReduceFunctionPrototypeCall(node);
case Builtins::kFunctionPrototypeHasInstance:
@@ -1423,6 +1917,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceObjectConstructor(node);
case Builtins::kObjectGetPrototypeOf:
return ReduceObjectGetPrototypeOf(node);
+ case Builtins::kObjectIs:
+ return ReduceObjectIs(node);
case Builtins::kObjectPrototypeGetProto:
return ReduceObjectPrototypeGetProto(node);
case Builtins::kObjectPrototypeHasOwnProperty:
@@ -1433,12 +1929,18 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceReflectApply(node);
case Builtins::kReflectConstruct:
return ReduceReflectConstruct(node);
+ case Builtins::kReflectGet:
+ return ReduceReflectGet(node);
case Builtins::kReflectGetPrototypeOf:
return ReduceReflectGetPrototypeOf(node);
+ case Builtins::kReflectHas:
+ return ReduceReflectHas(node);
case Builtins::kArrayForEach:
return ReduceArrayForEach(function, node);
case Builtins::kArrayMap:
return ReduceArrayMap(function, node);
+ case Builtins::kArrayFilter:
+ return ReduceArrayFilter(function, node);
case Builtins::kReturnReceiver:
return ReduceReturnReceiver(node);
default:
@@ -1446,9 +1948,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
}
if (!FLAG_runtime_stats && shared->IsApiFunction()) {
- Handle<FunctionTemplateInfo> function_template_info(
- FunctionTemplateInfo::cast(shared->function_data()), isolate());
- return ReduceCallApiFunction(node, function_template_info);
+ return ReduceCallApiFunction(node, function);
}
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
@@ -1458,13 +1958,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
Handle<Object> bound_this(function->bound_this(), isolate());
Handle<FixedArray> bound_arguments(function->bound_arguments(),
isolate());
- CallParameters const& p = CallParametersOf(node->op());
ConvertReceiverMode const convert_mode =
(bound_this->IsNullOrUndefined(isolate()))
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
- size_t arity = p.arity();
- DCHECK_LE(2u, arity);
// Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
NodeProperties::ReplaceValueInput(
node, jsgraph()->Constant(bound_target_function), 0);
@@ -1490,6 +1987,40 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
+ // If {target} is the result of a JSCreateBoundFunction operation,
+ // we can just fold the construction and call the bound target
+ // function directly instead.
+ if (target->opcode() == IrOpcode::kJSCreateBoundFunction) {
+ Node* bound_target_function = NodeProperties::GetValueInput(target, 0);
+ Node* bound_this = NodeProperties::GetValueInput(target, 1);
+ int const bound_arguments_length =
+ static_cast<int>(CreateBoundFunctionParametersOf(target->op()).arity());
+
+ // Patch the {node} to use [[BoundTargetFunction]] and [[BoundThis]].
+ NodeProperties::ReplaceValueInput(node, bound_target_function, 0);
+ NodeProperties::ReplaceValueInput(node, bound_this, 1);
+
+ // Insert the [[BoundArguments]] for {node}.
+ for (int i = 0; i < bound_arguments_length; ++i) {
+ Node* value = NodeProperties::GetValueInput(target, 2 + i);
+ node->InsertInput(graph()->zone(), 2 + i, value);
+ arity++;
+ }
+
+ // Update the JSCall operator on {node}.
+ ConvertReceiverMode const convert_mode =
+ NodeProperties::CanBeNullOrUndefined(bound_this, effect)
+ ? ConvertReceiverMode::kAny
+ : ConvertReceiverMode::kNotNullOrUndefined;
+ NodeProperties::ChangeOp(
+ node, javascript()->Call(arity, p.frequency(), VectorSlotPair(),
+ convert_mode));
+
+ // Try to further reduce the JSCall {node}.
+ Reduction const reduction = ReduceJSCall(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+
// Extract feedback from the {node} using the CallICNexus.
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
@@ -1508,7 +2039,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (!ShouldUseCallICFeedback(target)) return NoChange();
Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
- if (cell->value()->IsJSFunction()) {
+ if (cell->value()->IsCallable()) {
Node* target_function =
jsgraph()->Constant(handle(cell->value(), isolate()));
@@ -1516,7 +2047,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
target_function);
effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ check, effect, control);
// Specialize the JSCall node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
@@ -1553,7 +2085,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
DCHECK_LE(2u, p.arity());
- int const arity = static_cast<int>(p.arity() - 2);
+ int arity = static_cast<int>(p.arity() - 2);
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1588,7 +2120,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
array_function);
effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ check, effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceEffectInput(node, effect);
@@ -1610,7 +2143,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
new_target, new_target_feedback);
effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ check, effect, control);
// Specialize the JSConstruct node to the {new_target_feedback}.
NodeProperties::ReplaceValueInput(node, new_target_feedback, arity + 1);
@@ -1629,18 +2163,18 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Try to specialize JSConstruct {node}s with constant {target}s.
HeapObjectMatcher m(target);
if (m.HasValue()) {
+ // Raise a TypeError if the {target} is not a constructor.
+ if (!m.Value()->IsConstructor()) {
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(node,
+ javascript()->CallRuntime(
+ Runtime::kThrowConstructedNonConstructable));
+ return Changed(node);
+ }
+
if (m.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- // Raise a TypeError if the {target} is not a constructor.
- if (!function->IsConstructor()) {
- NodeProperties::ReplaceValueInputs(node, target);
- NodeProperties::ChangeOp(
- node, javascript()->CallRuntime(
- Runtime::kThrowConstructedNonConstructable));
- return Changed(node);
- }
-
// Don't inline cross native context.
if (function->native_context() != *native_context()) return NoChange();
@@ -1678,9 +2212,86 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return Changed(node);
}
}
+ } else if (m.Value()->IsJSBoundFunction()) {
+ Handle<JSBoundFunction> function =
+ Handle<JSBoundFunction>::cast(m.Value());
+ Handle<JSReceiver> bound_target_function(
+ function->bound_target_function(), isolate());
+ Handle<FixedArray> bound_arguments(function->bound_arguments(),
+ isolate());
+
+ // Patch {node} to use [[BoundTargetFunction]].
+ NodeProperties::ReplaceValueInput(
+ node, jsgraph()->Constant(bound_target_function), 0);
+
+ // Patch {node} to use [[BoundTargetFunction]]
+ // as new.target if {new_target} equals {target}.
+ NodeProperties::ReplaceValueInput(
+ node,
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(),
+ target, new_target),
+ jsgraph()->Constant(bound_target_function),
+ new_target),
+ arity + 1);
+
+ // Insert the [[BoundArguments]] for {node}.
+ for (int i = 0; i < bound_arguments->length(); ++i) {
+ node->InsertInput(
+ graph()->zone(), i + 1,
+ jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
+ arity++;
+ }
+
+ // Update the JSConstruct operator on {node}.
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->Construct(arity + 2, p.frequency(), VectorSlotPair()));
+
+ // Try to further reduce the JSConstruct {node}.
+ Reduction const reduction = ReduceJSConstruct(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
- // TODO(bmeurer): Also support optimizing bound functions and proxies here.
+ // TODO(bmeurer): Also support optimizing proxies here.
+ }
+
+ // If {target} is the result of a JSCreateBoundFunction operation,
+ // we can just fold the construction and construct the bound target
+ // function directly instead.
+ if (target->opcode() == IrOpcode::kJSCreateBoundFunction) {
+ Node* bound_target_function = NodeProperties::GetValueInput(target, 0);
+ int const bound_arguments_length =
+ static_cast<int>(CreateBoundFunctionParametersOf(target->op()).arity());
+
+ // Patch the {node} to use [[BoundTargetFunction]].
+ NodeProperties::ReplaceValueInput(node, bound_target_function, 0);
+
+ // Patch {node} to use [[BoundTargetFunction]]
+ // as new.target if {new_target} equals {target}.
+ NodeProperties::ReplaceValueInput(
+ node,
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(),
+ target, new_target),
+ bound_target_function, new_target),
+ arity + 1);
+
+ // Insert the [[BoundArguments]] for {node}.
+ for (int i = 0; i < bound_arguments_length; ++i) {
+ Node* value = NodeProperties::GetValueInput(target, 2 + i);
+ node->InsertInput(graph()->zone(), 1 + i, value);
+ arity++;
+ }
+
+ // Update the JSConstruct operator on {node}.
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->Construct(arity + 2, p.frequency(), VectorSlotPair()));
+
+ // Try to further reduce the JSConstruct {node}.
+ Reduction const reduction = ReduceJSConstruct(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
return NoChange();
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 3fce912fde..6e2353c4c1 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -54,23 +54,27 @@ class JSCallReducer final : public AdvancedReducer {
private:
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
- Reduction ReduceCallApiFunction(
- Node* node, Handle<FunctionTemplateInfo> function_template_info);
+ Reduction ReduceCallApiFunction(Node* node, Handle<JSFunction> function);
Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
+ Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
Reduction ReduceFunctionPrototypeHasInstance(Node* node);
Reduction ReduceObjectConstructor(Node* node);
Reduction ReduceObjectGetPrototype(Node* node, Node* object);
Reduction ReduceObjectGetPrototypeOf(Node* node);
+ Reduction ReduceObjectIs(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
Reduction ReduceObjectPrototypeHasOwnProperty(Node* node);
Reduction ReduceObjectPrototypeIsPrototypeOf(Node* node);
Reduction ReduceReflectApply(Node* node);
Reduction ReduceReflectConstruct(Node* node);
+ Reduction ReduceReflectGet(Node* node);
Reduction ReduceReflectGetPrototypeOf(Node* node);
+ Reduction ReduceReflectHas(Node* node);
Reduction ReduceArrayForEach(Handle<JSFunction> function, Node* node);
Reduction ReduceArrayMap(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayFilter(Handle<JSFunction> function, Node* node);
Reduction ReduceCallOrConstructWithArrayLikeOrSpread(
Node* node, int arity, CallFrequency const& frequency,
VectorSlotPair const& feedback);
@@ -84,6 +88,30 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
+ // Returns the updated {to} node, and updates control and effect along the
+ // way.
+ Node* DoFilterPostCallbackWork(ElementsKind kind, Node** control,
+ Node** effect, Node* a, Node* to,
+ Node* element, Node* callback_value);
+
+ // If {fncallback} is not callable, throw a TypeError.
+ // {control} is altered, and new nodes {check_fail} and {check_throw} are
+ // returned. {check_fail} is the control branch where IsCallable failed,
+ // and {check_throw} is the call to throw a TypeError in that
+ // branch.
+ void WireInCallbackIsCallableCheck(Node* fncallback, Node* context,
+ Node* check_frame_state, Node* effect,
+ Node** control, Node** check_fail,
+ Node** check_throw);
+ void RewirePostCallbackExceptionEdges(Node* check_throw, Node* on_exception,
+ Node* effect, Node** check_fail,
+ Node** control);
+
+ // Load receiver[k], first bounding k by receiver array length.
+ // k is thusly changed, and the effect is changed as well.
+ Node* SafeLoadElement(ElementsKind kind, Node* receiver, Node* control,
+ Node** effect, Node** k);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 95d32bc3fd..ca7bcdfb66 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -102,8 +102,11 @@ bool IsContextParameter(Node* node) {
MaybeHandle<Context> GetSpecializationContext(Node* node, size_t* distance,
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
- case IrOpcode::kHeapConstant:
- return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kHeapConstant: {
+ Handle<Object> object = OpParameter<Handle<HeapObject>>(node);
+ if (object->IsContext()) return Handle<Context>::cast(object);
+ break;
+ }
case IrOpcode::kParameter: {
OuterContext outer;
if (maybe_outer.To(&outer) && IsContextParameter(node) &&
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index bd4f1069ab..9b0601f8f1 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -8,6 +8,7 @@
#include "src/code-factory.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/allocation-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
@@ -25,84 +26,6 @@ namespace compiler {
namespace {
-// A helper class to construct inline allocations on the simplified operator
-// level. This keeps track of the effect chain for initial stores on a newly
-// allocated object and also provides helpers for commonly allocated objects.
-class AllocationBuilder final {
- public:
- AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
- : jsgraph_(jsgraph),
- allocation_(nullptr),
- effect_(effect),
- control_(control) {}
-
- // Primitive allocation of static size.
- void Allocate(int size, PretenureFlag pretenure = NOT_TENURED,
- Type* type = Type::Any()) {
- DCHECK_LE(size, kMaxRegularHeapObjectSize);
- effect_ = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect_);
- allocation_ =
- graph()->NewNode(simplified()->Allocate(type, pretenure),
- jsgraph()->Constant(size), effect_, control_);
- effect_ = allocation_;
- }
-
- // Primitive store into a field.
- void Store(const FieldAccess& access, Node* value) {
- effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
- value, effect_, control_);
- }
-
- // Primitive store into an element.
- void Store(ElementAccess const& access, Node* index, Node* value) {
- effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
- index, value, effect_, control_);
- }
-
- // Compound allocation of a FixedArray.
- void AllocateArray(int length, Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED) {
- DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
- map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
- int size = (map->instance_type() == FIXED_ARRAY_TYPE)
- ? FixedArray::SizeFor(length)
- : FixedDoubleArray::SizeFor(length);
- Allocate(size, pretenure, Type::OtherInternal());
- Store(AccessBuilder::ForMap(), map);
- Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
- }
-
- // Compound store of a constant into a field.
- void Store(const FieldAccess& access, Handle<Object> value) {
- Store(access, jsgraph()->Constant(value));
- }
-
- void FinishAndChange(Node* node) {
- NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
- node->ReplaceInput(0, allocation_);
- node->ReplaceInput(1, effect_);
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->FinishRegion());
- }
-
- Node* Finish() {
- return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
- }
-
- protected:
- JSGraph* jsgraph() { return jsgraph_; }
- Graph* graph() { return jsgraph_->graph(); }
- CommonOperatorBuilder* common() { return jsgraph_->common(); }
- SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
-
- private:
- JSGraph* const jsgraph_;
- Node* allocation_;
- Node* effect_;
- Node* control_;
-};
-
// Retrieves the frame state holding actual argument values.
Node* GetArgumentsFrameState(Node* frame_state) {
Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state);
@@ -214,6 +137,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateArguments(node);
case IrOpcode::kJSCreateArray:
return ReduceJSCreateArray(node);
+ case IrOpcode::kJSCreateBoundFunction:
+ return ReduceJSCreateBoundFunction(node);
case IrOpcode::kJSCreateClosure:
return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateIterResultObject:
@@ -926,6 +851,46 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
return ReduceNewArrayToStubCall(node, site);
}
+Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateBoundFunction, node->opcode());
+ CreateBoundFunctionParameters const& p =
+ CreateBoundFunctionParametersOf(node->op());
+ int const arity = static_cast<int>(p.arity());
+ Handle<Map> const map = p.map();
+ Node* bound_target_function = NodeProperties::GetValueInput(node, 0);
+ Node* bound_this = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Create the [[BoundArguments]] for the result.
+ Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
+ if (arity > 0) {
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(arity, factory()->fixed_array_map());
+ for (int i = 0; i < arity; ++i) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i),
+ NodeProperties::GetValueInput(node, 2 + i));
+ }
+ bound_arguments = effect = a.Finish();
+ }
+
+ // Create the JSBoundFunction result.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSBoundFunction::kSize, NOT_TENURED, Type::BoundFunction());
+ a.Store(AccessBuilder::ForMap(), map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSBoundFunctionBoundTargetFunction(),
+ bound_target_function);
+ a.Store(AccessBuilder::ForJSBoundFunctionBoundThis(), bound_this);
+ a.Store(AccessBuilder::ForJSBoundFunctionBoundArguments(), bound_arguments);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
@@ -955,13 +920,16 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
- jsgraph()->TheHoleConstant());
a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
a.Store(AccessBuilder::ForJSFunctionContext(), context);
a.Store(AccessBuilder::ForJSFunctionFeedbackVector(), vector_cell);
a.Store(AccessBuilder::ForJSFunctionCode(), lazy_compile_builtin);
- STATIC_ASSERT(JSFunction::kSize == 8 * kPointerSize);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ if (function_map->has_prototype_slot()) {
+ a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
+ jsgraph()->TheHoleConstant());
+ STATIC_ASSERT(JSFunction::kSizeWithPrototype == 8 * kPointerSize);
+ }
for (int i = 0; i < function_map->GetInObjectProperties(); i++) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(function_map, i),
jsgraph()->UndefinedConstant());
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 00c2ba573c..42b4740dd0 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -16,7 +16,7 @@ namespace internal {
class AllocationSiteUsageContext;
class CompilationDependencies;
class Factory;
-
+class JSRegExp;
namespace compiler {
@@ -50,6 +50,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreate(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateArray(Node* node);
+ Reduction ReduceJSCreateBoundFunction(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
Reduction ReduceJSCreateKeyValueArray(Node* node);
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 4d7b7972a9..d06717717d 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -60,6 +60,7 @@ REPLACE_STUB_CALL(Subtract)
REPLACE_STUB_CALL(Multiply)
REPLACE_STUB_CALL(Divide)
REPLACE_STUB_CALL(Modulus)
+REPLACE_STUB_CALL(Exponentiate)
REPLACE_STUB_CALL(BitwiseAnd)
REPLACE_STUB_CALL(BitwiseOr)
REPLACE_STUB_CALL(BitwiseXor)
@@ -70,11 +71,16 @@ REPLACE_STUB_CALL(LessThan)
REPLACE_STUB_CALL(LessThanOrEqual)
REPLACE_STUB_CALL(GreaterThan)
REPLACE_STUB_CALL(GreaterThanOrEqual)
+REPLACE_STUB_CALL(BitwiseNot)
+REPLACE_STUB_CALL(Decrement)
+REPLACE_STUB_CALL(Increment)
+REPLACE_STUB_CALL(Negate)
REPLACE_STUB_CALL(HasProperty)
REPLACE_STUB_CALL(Equal)
REPLACE_STUB_CALL(ToInteger)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
+REPLACE_STUB_CALL(ToNumeric)
REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
REPLACE_STUB_CALL(ToString)
@@ -126,34 +132,6 @@ void JSGenericLowering::LowerJSStrictEqual(Node* node) {
Operator::kEliminatable);
}
-void JSGenericLowering::LowerJSToBoolean(Node* node) {
- // The ToBoolean conversion doesn't need the current context.
- NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
- node->AppendInput(zone(), graph()->start());
- ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
- Operator::kEliminatable);
-}
-
-void JSGenericLowering::LowerJSClassOf(Node* node) {
- // The %_ClassOf intrinsic doesn't need the current context.
- NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kClassOf);
- node->AppendInput(zone(), graph()->start());
- ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
- Operator::kEliminatable);
-}
-
-void JSGenericLowering::LowerJSTypeOf(Node* node) {
- // The typeof operator doesn't need the current context.
- NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof);
- node->AppendInput(zone(), graph()->start());
- ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
- Operator::kEliminatable);
-}
-
-
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
@@ -397,6 +375,9 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSCreateBoundFunction(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
@@ -542,12 +523,6 @@ void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
}
-void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
- Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
- ReplaceWithRuntimeCall(node, Runtime::kNewScriptContext);
-}
-
void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
ConstructForwardVarargsParameters p =
ConstructForwardVarargsParametersOf(node->op());
@@ -700,10 +675,6 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
-void JSGenericLowering::LowerJSConvertReceiver(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kConvertReceiver);
-}
-
void JSGenericLowering::LowerJSForInNext(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
@@ -745,7 +716,7 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Node* limit = graph()->NewNode(
+ Node* limit = effect = graph()->NewNode(
machine()->Load(MachineType::Pointer()),
jsgraph()->ExternalConstant(
ExternalReference::address_of_stack_limit(isolate())),
@@ -761,6 +732,7 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
NodeProperties::ReplaceControlInput(node, if_false);
+ NodeProperties::ReplaceEffectInput(node, effect);
Node* efalse = if_false = node;
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 1a8102da59..8cd89fcb26 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -1,7 +1,6 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
#define V8_COMPILER_JS_GENERIC_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 1060b81b97..12c610da56 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -305,6 +305,9 @@ Node* JSGraph::Dead() {
return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
+Node* JSGraph::DeadValue() {
+ return CACHED(kDeadValue, graph()->NewNode(common()->DeadValue()));
+}
void JSGraph::GetCachedNodes(NodeVector* nodes) {
cache_.GetCachedNodes(nodes);
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index a4eb9a9061..a685fd69a8 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -67,6 +67,9 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
Node* NaNConstant();
Node* MinusOneConstant();
+ // Used for padding frames.
+ Node* PaddingConstant() { return TheHoleConstant(); }
+
// Creates a HeapConstant node, possibly canonicalized, and may access the
// heap to inspect the object.
Node* HeapConstant(Handle<HeapObject> value);
@@ -152,6 +155,9 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
+ // Sentinel for a value resulting from unreachable computations.
+ Node* DeadValue();
+
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
@@ -193,6 +199,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kEmptyStateValues,
kSingleDeadTypedStateValues,
kDead,
+ kDeadValue,
kNumCachedNodes // Must remain last.
};
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index b784c6ef97..9cff51985a 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -139,24 +139,6 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
if (!can_inline) return NoChange();
- // Stop inlining once the maximum allowed level is reached.
- int level = 0;
- for (Node* frame_state = NodeProperties::GetFrameStateInput(node);
- frame_state->opcode() == IrOpcode::kFrameState;
- frame_state = NodeProperties::GetFrameStateInput(frame_state)) {
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
- if (FrameStateFunctionInfo::IsJSFunctionType(frame_info.type())) {
- if (++level > FLAG_max_inlining_levels) {
- TRACE(
- "Not considering call site #%d:%s, because inlining depth "
- "%d exceeds maximum allowed level %d\n",
- node->id(), node->op()->mnemonic(), level,
- FLAG_max_inlining_levels);
- return NoChange();
- }
- }
- }
-
// Gather feedback on how often this call site has been hit before.
if (node->opcode() == IrOpcode::kJSCall) {
CallParameters const p = CallParametersOf(node->op());
@@ -188,7 +170,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// Forcibly inline small functions here. In the case of polymorphic inlining
// small_inline is set only when all functions are small.
- if (small_inline) {
+ if (small_inline &&
+ cumulative_count_ < FLAG_max_inlined_bytecode_size_absolute) {
TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(),
node->op()->mnemonic());
return InlineCandidate(candidate, true);
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 6943aab250..add2b2c478 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -258,54 +258,6 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
namespace {
-// TODO(bmeurer): Unify this with the witness helper functions in the
-// js-builtin-reducer.cc once we have a better understanding of the
-// map tracking we want to do, and eventually changed the CheckMaps
-// operator to carry map constants on the operator instead of inputs.
-// I.e. if the CheckMaps has some kind of SmallMapSet as operator
-// parameter, then this could be changed to call a generic
-//
-// SmallMapSet NodeProperties::CollectMapWitness(receiver, effect)
-//
-// function, which either returns the map set from the CheckMaps or
-// a singleton set from a StoreField.
-bool NeedsConvertReceiver(Node* receiver, Node* effect) {
- // Check if the {receiver} is already a JSReceiver.
- switch (receiver->opcode()) {
- case IrOpcode::kJSConstruct:
- case IrOpcode::kJSConstructWithSpread:
- case IrOpcode::kJSCreate:
- case IrOpcode::kJSCreateArguments:
- case IrOpcode::kJSCreateArray:
- case IrOpcode::kJSCreateClosure:
- case IrOpcode::kJSCreateIterResultObject:
- case IrOpcode::kJSCreateKeyValueArray:
- case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateLiteralObject:
- case IrOpcode::kJSCreateLiteralRegExp:
- case IrOpcode::kJSConvertReceiver:
- case IrOpcode::kJSGetSuperConstructor:
- case IrOpcode::kJSToObject: {
- return false;
- }
- default: {
- // We don't really care about the exact maps here, just the instance
- // types, which don't change across potential side-effecting operations.
- ZoneHandleSet<Map> maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &maps);
- if (result != NodeProperties::kNoReceiverMaps) {
- // Check if all {maps} are actually JSReceiver maps.
- for (size_t i = 0; i < maps.size(); ++i) {
- if (!maps[i]->IsJSReceiverMap()) return true;
- }
- return false;
- }
- return true;
- }
- }
-}
-
// TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
DisallowHeapAllocation no_gc;
@@ -707,12 +659,14 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
if (node->opcode() == IrOpcode::kJSCall &&
is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
Node* effect = NodeProperties::GetEffectInput(node);
- if (NeedsConvertReceiver(call.receiver(), effect)) {
- const CallParameters& p = CallParametersOf(node->op());
- Node* convert = effect =
- graph()->NewNode(javascript()->ConvertReceiver(p.convert_mode()),
- call.receiver(), context, effect, start);
- NodeProperties::ReplaceValueInput(node, convert, 1);
+ if (NodeProperties::CanBePrimitive(call.receiver(), effect)) {
+ CallParameters const& p = CallParametersOf(node->op());
+ Node* global_proxy = jsgraph()->HeapConstant(
+ handle(info_->native_context()->global_proxy()));
+ Node* receiver = effect =
+ graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
+ call.receiver(), global_proxy, effect, start);
+ NodeProperties::ReplaceValueInput(node, receiver, 1);
NodeProperties::ReplaceEffectInput(node, effect);
}
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 3ed50acaeb..2322b8ac3a 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -69,8 +69,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
- case Runtime::kInlineSubString:
- return ReduceSubString(node);
case Runtime::kInlineToInteger:
return ReduceToInteger(node);
case Runtime::kInlineToLength:
@@ -241,19 +239,22 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
Node* vtrue = jsgraph()->FalseConstant();
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ Node* efalse = effect;
+ Node* map = efalse =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), value,
- effect, if_false),
- effect, if_false);
- Node* vfalse = graph()->NewNode(simplified()->NumberEqual(), efalse,
- jsgraph()->Constant(instance_type));
+ efalse, if_false);
+ Node* map_instance_type = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), map, efalse,
+ if_false);
+ Node* vfalse =
+ graph()->NewNode(simplified()->NumberEqual(), map_instance_type,
+ jsgraph()->Constant(instance_type));
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
// Replace all effect uses of {node} with the {ephi}.
Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
- ReplaceWithValue(node, node, ephi);
+ ReplaceWithValue(node, node, ephi, merge);
// Turn the {node} into a Phi.
return Change(node, common()->Phi(MachineRepresentation::kTagged, 2), vtrue,
@@ -281,11 +282,6 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
- return Change(node, CodeFactory::SubString(isolate()), 3);
-}
-
-
Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToInteger());
return Changed(node);
@@ -389,8 +385,11 @@ Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
Reduction JSIntrinsicLowering::ReduceClassOf(Node* node) {
RelaxEffectsAndControls(node);
+ // The ClassOf operator has a single value input and control input.
+ Node* control_input = NodeProperties::GetControlInput(node, 0);
node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, javascript()->ClassOf());
+ node->ReplaceInput(1, control_input);
+ NodeProperties::ChangeOp(node, simplified()->ClassOf());
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 0226ae56f5..81cf5467d5 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -54,7 +54,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
- Reduction ReduceSubString(Node* node);
Reduction ReduceToInteger(Node* node);
Reduction ReduceToLength(Node* node);
Reduction ReduceToNumber(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 06f059e24e..a6786da157 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -10,6 +10,7 @@
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/allocation-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
@@ -151,6 +152,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
Node* object = NodeProperties::GetValueInput(node, 0);
Node* constructor = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
@@ -158,10 +160,18 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Check if the right hand side is a known {receiver}.
+ // Check if the right hand side is a known {receiver}, or
+ // we have feedback from the InstanceOfIC.
+ Handle<JSObject> receiver;
HeapObjectMatcher m(constructor);
- if (!m.HasValue() || !m.Value()->IsJSObject()) return NoChange();
- Handle<JSObject> receiver = Handle<JSObject>::cast(m.Value());
+ if (m.HasValue() && m.Value()->IsJSObject()) {
+ receiver = Handle<JSObject>::cast(m.Value());
+ } else if (p.feedback().IsValid()) {
+ InstanceOfICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (!nexus.GetConstructorFeedback().ToHandle(&receiver)) return NoChange();
+ } else {
+ return NoChange();
+ }
Handle<Map> receiver_map(receiver->map(), isolate());
// Compute property access info for @@hasInstance on {receiver}.
@@ -187,6 +197,10 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
native_context(), access_info.receiver_maps(), holder);
}
+ // Check that {constructor} is actually {receiver}.
+ constructor = access_builder.BuildCheckValue(constructor, &effect,
+ control, receiver);
+
// Monomorphic property access.
access_builder.BuildCheckMaps(constructor, &effect, control,
access_info.receiver_maps());
@@ -225,6 +239,10 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
DCHECK(constant->IsCallable());
+ // Check that {constructor} is actually {receiver}.
+ constructor =
+ access_builder.BuildCheckValue(constructor, &effect, control, receiver);
+
// Monomorphic property access.
access_builder.BuildCheckMaps(constructor, &effect, control,
access_info.receiver_maps());
@@ -251,8 +269,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
ConvertReceiverMode::kNotNullOrUndefined));
// Rewire the value uses of {node} to ToBoolean conversion of the result.
- Node* value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- node, context);
+ Node* value = graph()->NewNode(simplified()->ToBoolean(), node);
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsValueEdge(edge) && edge.from() != value) {
edge.UpdateTo(value);
@@ -359,7 +376,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
NodeProperties::ReplaceValueInput(node, object, 0);
NodeProperties::ReplaceValueInput(
node, jsgraph()->HeapConstant(bound_target_function), 1);
- NodeProperties::ChangeOp(node, javascript()->InstanceOf());
+ NodeProperties::ChangeOp(node, javascript()->InstanceOf(VectorSlotPair()));
Reduction const reduction = ReduceJSInstanceOf(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -368,7 +385,8 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
if (m.Value()->IsJSFunction()) {
// Check if the {function} is a constructor and has an instance "prototype".
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- if (function->IsConstructor() && function->has_instance_prototype() &&
+ if (function->IsConstructor() && function->has_prototype_slot() &&
+ function->has_instance_prototype() &&
function->prototype()->IsJSReceiver()) {
// Ensure that the {function} has a valid initial map, so we can
// depend on that for the prototype constant-folding below.
@@ -462,9 +480,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Ensure that {index} matches the specified {name} (if {index} is given).
if (index != nullptr) {
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), index,
- jsgraph()->HeapConstant(name));
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = BuildCheckEqualsName(name, index, effect, control);
}
// Check if we have a {receiver} to validate. If so, we need to check that
@@ -473,7 +489,9 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
if (receiver != nullptr) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
jsgraph()->HeapConstant(global_proxy()));
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kReceiverNotAGlobalProxy),
+ check, effect, control);
}
if (access_mode == AccessMode::kLoad) {
@@ -544,8 +562,9 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), value,
jsgraph()->Constant(property_cell_value));
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kValueMismatch), check,
+ effect, control);
break;
}
case PropertyCellType::kConstantType: {
@@ -699,9 +718,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Ensure that {index} matches the specified {name} (if {index} is given).
if (index != nullptr) {
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), index,
- jsgraph()->HeapConstant(name));
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = BuildCheckEqualsName(name, index, effect, control);
}
// Collect call nodes to rewire exception edges.
@@ -723,10 +740,32 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
&receiver, &effect, control) &&
!access_builder.TryBuildNumberCheck(access_info.receiver_maps(),
&receiver, &effect, control)) {
- receiver =
- access_builder.BuildCheckHeapObject(receiver, &effect, control);
- access_builder.BuildCheckMaps(receiver, &effect, control,
- access_info.receiver_maps());
+ if (HasNumberMaps(access_info.receiver_maps())) {
+ // We need to also let Smi {receiver}s through in this case, so
+ // we construct a diamond, guarded by the Sminess of the {receiver}
+ // and if {receiver} is not a Smi just emit a sequence of map checks.
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ access_builder.BuildCheckMaps(receiver, &efalse, if_false,
+ access_info.receiver_maps());
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ } else {
+ receiver =
+ access_builder.BuildCheckHeapObject(receiver, &effect, control);
+ access_builder.BuildCheckMaps(receiver, &effect, control,
+ access_info.receiver_maps());
+ }
}
// Generate the actual property access.
@@ -831,7 +870,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
for (auto receiver_map : receiver_maps) {
maps.insert(receiver_map, graph()->zone());
}
- this_effect = graph()->NewNode(common()->MapGuard(maps), receiver,
+ this_effect = graph()->NewNode(simplified()->MapGuard(maps), receiver,
this_effect, this_control);
}
}
@@ -1007,7 +1046,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* node, Node* index, Node* value, MapHandles const& receiver_maps,
- AccessMode access_mode, KeyedAccessStoreMode store_mode) {
+ AccessMode access_mode, KeyedAccessLoadMode load_mode,
+ KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1029,13 +1069,10 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
effect, control);
- // Ensure that {index} is less than {receiver} length.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
-
- // Return the character from the {receiver} as single character string.
- value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
- control);
+ // Load the single character string from {receiver} or yield undefined
+ // if the {index} is out of bounds (depending on the {load_mode}).
+ value = BuildIndexedStringLoad(receiver, index, length, &effect, &control,
+ load_mode);
} else {
// Retrieve the native context from the given {node}.
// Compute element access infos for the receiver maps.
@@ -1128,7 +1165,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Access the actual element.
ValueEffectControl continuation =
BuildElementAccess(receiver, index, value, effect, control,
- access_info, access_mode, store_mode);
+ access_info, access_mode, load_mode, store_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1164,11 +1201,6 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
receiver, this_effect, this_control);
}
- // Load the {receiver} map.
- Node* receiver_map = this_effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, this_effect, this_control);
-
// Perform map check(s) on {receiver}.
MapHandles const& receiver_maps = access_info.receiver_maps();
if (j == access_infos.size() - 1) {
@@ -1178,40 +1210,28 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
receiver_maps);
fallthrough_control = nullptr;
} else {
- ZoneVector<Node*> this_controls(zone());
- ZoneVector<Node*> this_effects(zone());
+ // Explicitly branch on the {receiver_maps}.
+ ZoneHandleSet<Map> maps;
for (Handle<Map> map : receiver_maps) {
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
- jsgraph()->Constant(map));
- Node* branch = graph()->NewNode(common()->Branch(), check,
- fallthrough_control);
- this_controls.push_back(
- graph()->NewNode(common()->IfTrue(), branch));
- this_effects.push_back(this_effect);
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ maps.insert(map, graph()->zone());
}
+ Node* check = this_effect =
+ graph()->NewNode(simplified()->CompareMaps(maps), receiver,
+ this_effect, fallthrough_control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
- // Create single chokepoint for the control.
- int const this_control_count = static_cast<int>(this_controls.size());
- if (this_control_count == 1) {
- this_control = this_controls.front();
- this_effect = this_effects.front();
- } else {
- this_control =
- graph()->NewNode(common()->Merge(this_control_count),
- this_control_count, &this_controls.front());
- this_effects.push_back(this_control);
- this_effect =
- graph()->NewNode(common()->EffectPhi(this_control_count),
- this_control_count + 1, &this_effects.front());
- }
+ // Introduce a MapGuard to learn from this on the effect chain.
+ this_effect = graph()->NewNode(simplified()->MapGuard(maps), receiver,
+ this_effect, this_control);
}
// Access the actual element.
ValueEffectControl continuation = BuildElementAccess(
this_receiver, this_index, this_value, this_effect, this_control,
- access_info, access_mode, store_mode);
+ access_info, access_mode, load_mode, store_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -1248,7 +1268,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
template <typename KeyedICNexus>
Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
Node* node, Node* index, Node* value, KeyedICNexus const& nexus,
- AccessMode access_mode, KeyedAccessStoreMode store_mode) {
+ AccessMode access_mode, KeyedAccessLoadMode load_mode,
+ KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1299,8 +1320,10 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), elements,
jsgraph()->HeapConstant(array_elements));
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
- control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(
+ DeoptimizeReason::kCowArrayElementsChanged),
+ check, effect, control);
value = jsgraph()->Constant(it.GetDataValue());
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1320,13 +1343,11 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
// Ensure that {index} is less than {receiver} length.
Node* length = jsgraph()->Constant(string->length());
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
- // Return the character from the {receiver} as single character
- // string.
- value = graph()->NewNode(simplified()->StringCharAt(), receiver,
- index, control);
+ // Load the single character string from {receiver} or yield undefined
+ // if the {index} is out of bounds (depending on the {load_mode}).
+ value = BuildIndexedStringLoad(receiver, index, length, &effect,
+ &control, load_mode);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -1396,7 +1417,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
// Try to lower the element access based on the {receiver_maps}.
return ReduceElementAccess(node, index, value, receiver_maps, access_mode,
- store_mode);
+ load_mode, store_mode);
}
Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
@@ -1482,8 +1503,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
receiver, effect, control);
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, enumerator);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ control);
}
// Load the enum cache indices from the {cache_type}.
@@ -1504,7 +1526,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
graph()->NewNode(simplified()->ReferenceEqual(), enum_indices,
jsgraph()->EmptyFixedArrayConstant()));
effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ check, effect, control);
// Determine the index from the {enum_indices}.
index = effect = graph()->NewNode(
@@ -1525,9 +1548,12 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
if (!p.feedback().IsValid()) return NoChange();
KeyedLoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ // Extract the keyed access load mode from the keyed load IC.
+ KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
+
// Try to lower the keyed access based on the {nexus}.
return ReduceKeyedAccess(node, name, value, nexus, AccessMode::kLoad,
- STANDARD_STORE);
+ load_mode, STANDARD_STORE);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
@@ -1545,7 +1571,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
// Try to lower the keyed access based on the {nexus}.
return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kStore,
- store_mode);
+ STANDARD_LOAD, store_mode);
}
Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
@@ -1556,26 +1582,13 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
Handle<SharedFunctionInfo> shared_info =
frame_info.shared_info().ToHandleChecked();
- // We need a FrameState for the getter stub to restore the correct
- // context before returning to fullcodegen.
- FrameStateFunctionInfo const* frame_info0 =
- common()->CreateFrameStateFunctionInfo(FrameStateType::kGetterStub, 1, 0,
- shared_info);
- Node* frame_state0 = graph()->NewNode(
- common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
- frame_info0),
- graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
- receiver),
- jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(), context,
- target, frame_state);
-
// Introduce the call to the getter function.
Node* value;
if (access_info.constant()->IsJSFunction()) {
value = *effect = *control = graph()->NewNode(
jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, context, frame_state0, *effect, *control);
+ target, receiver, context, frame_state, *effect, *control);
} else {
DCHECK(access_info.constant()->IsFunctionTemplateInfo());
Handle<FunctionTemplateInfo> function_template_info(
@@ -1585,9 +1598,8 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
access_info.holder().is_null()
? receiver
: jsgraph()->Constant(access_info.holder().ToHandleChecked());
- value =
- InlineApiCall(receiver, holder, context, target, frame_state0, nullptr,
- effect, control, shared_info, function_template_info);
+ value = InlineApiCall(receiver, holder, frame_state, nullptr, effect,
+ control, shared_info, function_template_info);
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -1601,7 +1613,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
return value;
}
-Node* JSNativeContextSpecialization::InlinePropertySetterCall(
+void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* receiver, Node* value, Node* context, Node* frame_state,
Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
@@ -1609,25 +1621,12 @@ Node* JSNativeContextSpecialization::InlinePropertySetterCall(
FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
Handle<SharedFunctionInfo> shared_info =
frame_info.shared_info().ToHandleChecked();
- // We need a FrameState for the setter stub to restore the correct
- // context and return the appropriate value to fullcodegen.
- FrameStateFunctionInfo const* frame_info0 =
- common()->CreateFrameStateFunctionInfo(FrameStateType::kSetterStub, 2, 0,
- shared_info);
- Node* frame_state0 = graph()->NewNode(
- common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
- frame_info0),
- graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
- receiver, value),
- jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(), context,
- target, frame_state);
-
// Introduce the call to the setter function.
if (access_info.constant()->IsJSFunction()) {
*effect = *control = graph()->NewNode(
jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, value, context, frame_state0, *effect, *control);
+ target, receiver, value, context, frame_state, *effect, *control);
} else {
DCHECK(access_info.constant()->IsFunctionTemplateInfo());
Handle<FunctionTemplateInfo> function_template_info(
@@ -1637,9 +1636,8 @@ Node* JSNativeContextSpecialization::InlinePropertySetterCall(
access_info.holder().is_null()
? receiver
: jsgraph()->Constant(access_info.holder().ToHandleChecked());
- value =
- InlineApiCall(receiver, holder, context, target, frame_state0, value,
- effect, control, shared_info, function_template_info);
+ InlineApiCall(receiver, holder, frame_state, value, effect, control,
+ shared_info, function_template_info);
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -1650,13 +1648,11 @@ Node* JSNativeContextSpecialization::InlinePropertySetterCall(
if_exceptions->push_back(if_exception);
*control = if_success;
}
- return value;
}
Node* JSNativeContextSpecialization::InlineApiCall(
- Node* receiver, Node* holder, Node* context, Node* target,
- Node* frame_state, Node* value, Node** effect, Node** control,
- Handle<SharedFunctionInfo> shared_info,
+ Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
+ Node** control, Handle<SharedFunctionInfo> shared_info,
Handle<FunctionTemplateInfo> function_template_info) {
Handle<CallHandlerInfo> call_handler_info = handle(
CallHandlerInfo::cast(function_template_info->call_code()), isolate());
@@ -1665,17 +1661,15 @@ Node* JSNativeContextSpecialization::InlineApiCall(
// Only setters have a value.
int const argc = value == nullptr ? 0 : 1;
// The stub always expects the receiver as the first param on the stack.
- CallApiCallbackStub stub(
- isolate(), argc,
- true /* FunctionTemplateInfo doesn't have an associated context. */);
+ CallApiCallbackStub stub(isolate(), argc);
CallInterfaceDescriptor call_interface_descriptor =
stub.GetCallInterfaceDescriptor();
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), call_interface_descriptor,
call_interface_descriptor.GetStackParameterCount() + argc +
- 1 /* implicit receiver */ + 1 /* accessor holder */,
+ 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
- MachineType::AnyTagged(), 1);
+ MachineType::AnyTagged(), 1, Linkage::kNoContext);
Node* data = jsgraph()->Constant(call_data_object);
ApiFunction function(v8::ToCData<Address>(call_handler_info->callback()));
@@ -1685,17 +1679,17 @@ Node* JSNativeContextSpecialization::InlineApiCall(
Node* code = jsgraph()->HeapConstant(stub.GetCode());
// Add CallApiCallbackStub's register argument as well.
- Node* inputs[12] = {code, target, data, holder, function_reference,
- holder, receiver};
- int index = 7 + argc;
- inputs[index++] = context;
+ Node* context = jsgraph()->Constant(native_context());
+ Node* inputs[10] = {code, context, data, holder, function_reference,
+ receiver};
+ int index = 6 + argc;
inputs[index++] = frame_state;
inputs[index++] = *effect;
inputs[index++] = *control;
// This needs to stay here because of the edge case described in
// http://crbug.com/675648.
if (value != nullptr) {
- inputs[7] = value;
+ inputs[6] = value;
}
return *effect = *control =
@@ -1780,12 +1774,13 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* constant_value = jsgraph()->Constant(access_info.constant());
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), value, constant_value);
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ check, effect, control);
value = constant_value;
} else if (access_info.IsAccessorConstant()) {
- value =
- InlinePropertySetterCall(receiver, value, context, frame_state, &effect,
- &control, if_exceptions, access_info);
+ InlinePropertySetterCall(receiver, value, context, frame_state, &effect,
+ &control, if_exceptions, access_info);
} else {
DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
FieldIndex const field_index = access_info.field_index();
@@ -1820,21 +1815,12 @@ JSNativeContextSpecialization::BuildPropertyStore(
!FLAG_unbox_double_fields) {
if (access_info.HasTransitionMap()) {
// Allocate a MutableHeapNumber for the new property.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable),
- effect);
- Node* box = effect = graph()->NewNode(
- simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
- jsgraph()->Constant(HeapNumber::kSize), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), box,
- jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
- effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
- box, value, effect, control);
- value = effect =
- graph()->NewNode(common()->FinishRegion(), box, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(HeapNumber::kSize, NOT_TENURED, Type::OtherInternal());
+ a.Store(AccessBuilder::ForMap(),
+ factory()->mutable_heap_number_map());
+ a.Store(AccessBuilder::ForHeapNumberValue(), value);
+ value = effect = a.Finish();
field_access.type = Type::Any();
field_access.machine_type = MachineType::TaggedPointer();
@@ -1865,8 +1851,9 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check = graph()->NewNode(simplified()->NumberEqual(),
current_value, value);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ control);
return ValueEffectControl(value, effect, control);
}
break;
@@ -1883,8 +1870,9 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
current_value, value);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ control);
return ValueEffectControl(value, effect, control);
}
@@ -1929,7 +1917,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
// with this transitioning store.
Handle<Map> original_map(Map::cast(transition_map->GetBackPointer()),
isolate());
- if (original_map->unused_property_fields() == 0) {
+ if (original_map->UnusedPropertyFields() == 0) {
DCHECK(!field_index.is_inobject());
// Reallocate the properties {storage}.
@@ -2019,7 +2007,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* name = NodeProperties::GetValueInput(node, 1);
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
jsgraph()->HeapConstant(cached_name));
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ check, effect, control);
Node* value = NodeProperties::GetValueInput(node, 2);
Node* context = NodeProperties::GetContextInput(node);
@@ -2058,7 +2047,7 @@ JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
ElementAccessInfo const& access_info, AccessMode access_mode,
- KeyedAccessStoreMode store_mode) {
+ KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
// TODO(bmeurer): We currently specialize based on elements kind. We should
@@ -2133,7 +2122,8 @@ JSNativeContextSpecialization::BuildElementAccess(
check, jsgraph()->ZeroConstant(), length);
}
- if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
// bounds for the {receiver}.
@@ -2151,9 +2141,49 @@ JSNativeContextSpecialization::BuildElementAccess(
GetArrayTypeFromElementsKind(elements_kind);
switch (access_mode) {
case AccessMode::kLoad: {
- value = effect = graph()->NewNode(
- simplified()->LoadTypedElement(external_array_type), buffer,
- base_pointer, external_pointer, index, effect, control);
+ // Check if we can return undefined for out-of-bounds loads.
+ if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) {
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ Node* masked_index = graph()->NewNode(
+ simplified()->MaskIndexWithBound(), index, length);
+
+ // Perform the actual load
+ vtrue = etrue = graph()->NewNode(
+ simplified()->LoadTypedElement(external_array_type), buffer,
+ base_pointer, external_pointer, masked_index, etrue, if_true);
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // Materialize undefined for out-of-bounds loads.
+ vfalse = jsgraph()->UndefinedConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ } else {
+ Node* masked_index = graph()->NewNode(
+ simplified()->MaskIndexWithBound(), index, length);
+
+ // Perform the actual load.
+ value = effect = graph()->NewNode(
+ simplified()->LoadTypedElement(external_array_type), buffer,
+ base_pointer, external_pointer, masked_index, effect, control);
+ }
break;
}
case AccessMode::kStoreInLiteral:
@@ -2244,6 +2274,14 @@ JSNativeContextSpecialization::BuildElementAccess(
if (IsGrowStoreMode(store_mode)) {
// For growing stores we validate the {index} below.
DCHECK_EQ(AccessMode::kStore, access_mode);
+ } else if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ CanTreatHoleAsUndefined(receiver_maps)) {
+ // Check that the {index} is a valid array index, we do the actual
+ // bounds check below and just skip the store below if it's out of
+ // bounds for the {receiver}.
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ jsgraph()->Constant(Smi::kMaxValue),
+ effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
@@ -2276,34 +2314,94 @@ JSNativeContextSpecialization::BuildElementAccess(
elements_kind == HOLEY_SMI_ELEMENTS) {
element_access.machine_type = MachineType::AnyTagged();
}
- // Perform the actual backing store access.
- value = effect =
- graph()->NewNode(simplified()->LoadElement(element_access), elements,
- index, effect, control);
- // Handle loading from holey backing stores correctly, by either mapping
- // the hole to undefined if possible, or deoptimizing otherwise.
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Check if we are allowed to turn the hole into undefined.
- if (CanTreatHoleAsUndefined(receiver_maps)) {
- // Turn the hole into undefined.
- value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
- value);
- } else {
- // Bailout if we see the hole.
- value = effect = graph()->NewNode(simplified()->CheckNotTaggedHole(),
- value, effect, control);
+
+ // Check if we can return undefined for out-of-bounds loads.
+ if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ CanTreatHoleAsUndefined(receiver_maps)) {
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ Node* masked_index = graph()->NewNode(
+ simplified()->MaskIndexWithBound(), index, length);
+
+ // Perform the actual load
+ vtrue = etrue =
+ graph()->NewNode(simplified()->LoadElement(element_access),
+ elements, masked_index, etrue, if_true);
+
+ // Handle loading from holey backing stores correctly, by either
+ // mapping the hole to undefined if possible, or deoptimizing
+ // otherwise.
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Turn the hole into undefined.
+ vtrue = graph()->NewNode(
+ simplified()->ConvertTaggedHoleToUndefined(), vtrue);
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // Return the signaling NaN hole directly if all uses are
+ // truncating.
+ vtrue = etrue =
+ graph()->NewNode(simplified()->CheckFloat64Hole(
+ CheckFloat64HoleMode::kAllowReturnHole),
+ vtrue, etrue, if_true);
+ }
}
- } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
- // Perform the hole check on the result.
- CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
- // Check if we are allowed to return the hole directly.
- if (CanTreatHoleAsUndefined(receiver_maps)) {
- // Return the signaling NaN hole directly if all uses are truncating.
- mode = CheckFloat64HoleMode::kAllowReturnHole;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // Materialize undefined for out-of-bounds loads.
+ vfalse = jsgraph()->UndefinedConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ } else {
+ Node* masked_index =
+ graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
+
+ // Perform the actual load.
+ value = effect =
+ graph()->NewNode(simplified()->LoadElement(element_access),
+ elements, masked_index, effect, control);
+
+ // Handle loading from holey backing stores correctly, by either mapping
+ // the hole to undefined if possible, or deoptimizing otherwise.
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Check if we are allowed to turn the hole into undefined.
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ // Turn the hole into undefined.
+ value = graph()->NewNode(
+ simplified()->ConvertTaggedHoleToUndefined(), value);
+ } else {
+ // Bailout if we see the hole.
+ value = effect = graph()->NewNode(
+ simplified()->CheckNotTaggedHole(), value, effect, control);
+ }
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // Perform the hole check on the result.
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
+ // Check if we are allowed to return the hole directly.
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ // Return the signaling NaN hole directly if all uses are
+ // truncating.
+ mode = CheckFloat64HoleMode::kAllowReturnHole;
+ }
+ value = effect = graph()->NewNode(
+ simplified()->CheckFloat64Hole(mode), value, effect, control);
}
- value = effect = graph()->NewNode(simplified()->CheckFloat64Hole(mode),
- value, effect, control);
}
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
@@ -2398,6 +2496,53 @@ JSNativeContextSpecialization::BuildElementAccess(
return ValueEffectControl(value, effect, control);
}
+Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
+ Node* receiver, Node* index, Node* length, Node** effect, Node** control,
+ KeyedAccessLoadMode load_mode) {
+ if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ isolate()->IsNoElementsProtectorIntact()) {
+ // Add a code dependency on the "no elements" protector.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // Ensure that the {index} is a valid String length.
+ index = *effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ jsgraph()->Constant(String::kMaxLength),
+ *effect, *control);
+
+ // Load the single character string from {receiver} or yield
+ // undefined if the {index} is not within the valid bounds.
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control);
+
+ Node* masked_index =
+ graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
+ masked_index, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->UndefinedConstant();
+
+ *control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, *control);
+ } else {
+ // Ensure that {index} is less than {receiver} length.
+ index = *effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ length, *effect, *control);
+
+ Node* masked_index =
+ graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
+
+ // Return the character from the {receiver} as single character string.
+ return graph()->NewNode(simplified()->StringCharAt(), receiver,
+ masked_index, *control);
+ }
+}
+
Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
Handle<Map> map, Node* properties, Node* effect, Node* control) {
// TODO(bmeurer/jkummerow): Property deletions can undo map transitions
@@ -2409,7 +2554,7 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
// difficult for escape analysis to get rid of the backing stores used
// for intermediate states of chains of property additions. That makes
// it unclear what the best approach is here.
- DCHECK_EQ(0, map->unused_property_fields());
+ DCHECK_EQ(0, map->UnusedPropertyFields());
// Compute the length of the old {properties} and the new properties.
int length = map->NextFreePropertyIndex() - map->GetInObjectProperties();
int new_length = length + JSObject::kFieldsAdded;
@@ -2427,11 +2572,9 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
values.push_back(jsgraph()->UndefinedConstant());
}
- // Allocate and initialize the new properties.
+ // Compute new length and hash.
Node* hash;
if (length == 0) {
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
hash = graph()->NewNode(
common()->Select(MachineRepresentation::kTaggedSigned),
graph()->NewNode(simplified()->ObjectIsSmi(), properties), properties,
@@ -2445,30 +2588,35 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
hash = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForPropertyArrayLengthAndHash()),
properties, effect, control);
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
hash =
graph()->NewNode(simplified()->NumberBitwiseAnd(), hash,
jsgraph()->Constant(PropertyArray::HashField::kMask));
}
-
Node* new_length_and_hash = graph()->NewNode(
simplified()->NumberBitwiseOr(), jsgraph()->Constant(new_length), hash);
- Node* new_properties = effect = graph()->NewNode(
- simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
- jsgraph()->Constant(PropertyArray::SizeFor(new_length)), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), new_properties,
- jsgraph()->PropertyArrayMapConstant(), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForPropertyArrayLengthAndHash()),
- new_properties, new_length_and_hash, effect, control);
+
+ // Allocate and initialize the new properties.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(PropertyArray::SizeFor(new_length), NOT_TENURED,
+ Type::OtherInternal());
+ a.Store(AccessBuilder::ForMap(), jsgraph()->PropertyArrayMapConstant());
+ a.Store(AccessBuilder::ForPropertyArrayLengthAndHash(), new_length_and_hash);
for (int i = 0; i < new_length; ++i) {
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)),
- new_properties, values[i], effect, control);
+ a.Store(AccessBuilder::ForFixedArraySlot(i), values[i]);
}
- return graph()->NewNode(common()->FinishRegion(), new_properties, effect);
+ return a.Finish();
+}
+
+Node* JSNativeContextSpecialization::BuildCheckEqualsName(Handle<Name> name,
+ Node* value,
+ Node* effect,
+ Node* control) {
+ DCHECK(name->IsUniqueName());
+ Operator const* const op =
+ name->IsSymbol() ? simplified()->CheckEqualsSymbol()
+ : simplified()->CheckEqualsInternalizedString();
+ return graph()->NewNode(op, jsgraph()->HeapConstant(name), value, effect,
+ control);
}
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
@@ -2488,10 +2636,10 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
}
// Check if the array prototype chain is intact.
- if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
+ if (!isolate()->IsNoElementsProtectorIntact()) return false;
// Install code dependency on the array protector cell.
- dependencies()->AssumePropertyCell(factory()->array_protector());
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
return true;
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index f7e9439e29..879203c1dd 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -75,10 +75,12 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
MapHandles const& receiver_maps,
AccessMode access_mode,
+ KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode);
template <typename KeyedICNexus>
Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
KeyedICNexus const& nexus, AccessMode access_mode,
+ KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode);
Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
FeedbackNexus const& nexus,
@@ -140,28 +142,36 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Node** control,
ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info);
- Node* InlinePropertySetterCall(Node* receiver, Node* value, Node* context,
- Node* frame_state, Node** effect,
- Node** control,
- ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info);
- Node* InlineApiCall(Node* receiver, Node* holder, Node* context, Node* target,
- Node* frame_state, Node* value, Node** effect,
- Node** control, Handle<SharedFunctionInfo> shared_info,
+ void InlinePropertySetterCall(Node* receiver, Node* value, Node* context,
+ Node* frame_state, Node** effect,
+ Node** control,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info);
+ Node* InlineApiCall(Node* receiver, Node* holder, Node* frame_state,
+ Node* value, Node** effect, Node** control,
+ Handle<SharedFunctionInfo> shared_info,
Handle<FunctionTemplateInfo> function_template_info);
// Construct the appropriate subgraph for element access.
- ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
- Node* value, Node* effect,
- Node* control,
- ElementAccessInfo const& access_info,
- AccessMode access_mode,
- KeyedAccessStoreMode store_mode);
+ ValueEffectControl BuildElementAccess(
+ Node* receiver, Node* index, Node* value, Node* effect, Node* control,
+ ElementAccessInfo const& access_info, AccessMode access_mode,
+ KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode);
+
+ // Construct appropriate subgraph to load from a String.
+ Node* BuildIndexedStringLoad(Node* receiver, Node* index, Node* length,
+ Node** effect, Node** control,
+ KeyedAccessLoadMode load_mode);
// Construct appropriate subgraph to extend properties backing store.
Node* BuildExtendPropertiesBackingStore(Handle<Map> map, Node* properties,
Node* effect, Node* control);
+ // Construct appropriate subgraph to check that the {value} matches
+ // the previously recorded {name} feedback.
+ Node* BuildCheckEqualsName(Handle<Name> name, Node* value, Node* effect,
+ Node* control);
+
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 7b1df6e5a9..5b5e6589d2 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -52,17 +52,6 @@ size_t hash_value(VectorSlotPair const& p) {
}
-ConvertReceiverMode ConvertReceiverModeOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSConvertReceiver, op->opcode());
- return OpParameter<ConvertReceiverMode>(op);
-}
-
-
-ToBooleanHints ToBooleanHintsOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSToBoolean, op->opcode());
- return OpParameter<ToBooleanHints>(op);
-}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
return os << p.arity() << ", " << p.start_index();
@@ -291,6 +280,7 @@ std::ostream& operator<<(std::ostream& os, FeedbackParameter const& p) {
FeedbackParameter const& FeedbackParameterOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSCreateEmptyLiteralArray ||
+ op->opcode() == IrOpcode::kJSInstanceOf ||
op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
return OpParameter<FeedbackParameter>(op);
}
@@ -450,6 +440,33 @@ const CreateArrayParameters& CreateArrayParametersOf(const Operator* op) {
return OpParameter<CreateArrayParameters>(op);
}
+bool operator==(CreateBoundFunctionParameters const& lhs,
+ CreateBoundFunctionParameters const& rhs) {
+ return lhs.arity() == rhs.arity() &&
+ lhs.map().location() == rhs.map().location();
+}
+
+bool operator!=(CreateBoundFunctionParameters const& lhs,
+ CreateBoundFunctionParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CreateBoundFunctionParameters const& p) {
+ return base::hash_combine(p.arity(), p.map().location());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CreateBoundFunctionParameters const& p) {
+ os << p.arity();
+ if (!p.map().is_null()) os << ", " << Brief(*p.map());
+ return os;
+}
+
+const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
+ const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateBoundFunction, op->opcode());
+ return OpParameter<CreateBoundFunctionParameters>(op);
+}
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
@@ -560,20 +577,23 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(Multiply, Operator::kNoProperties, 2, 1) \
V(Divide, Operator::kNoProperties, 2, 1) \
V(Modulus, Operator::kNoProperties, 2, 1) \
+ V(Exponentiate, Operator::kNoProperties, 2, 1) \
+ V(BitwiseNot, Operator::kNoProperties, 1, 1) \
+ V(Decrement, Operator::kNoProperties, 1, 1) \
+ V(Increment, Operator::kNoProperties, 1, 1) \
+ V(Negate, Operator::kNoProperties, 1, 1) \
V(ToInteger, Operator::kNoProperties, 1, 1) \
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToNumeric, Operator::kNoProperties, 1, 1) \
V(ToObject, Operator::kFoldable, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
V(Create, Operator::kNoProperties, 2, 1) \
V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(ClassOf, Operator::kPure, 1, 1) \
- V(TypeOf, Operator::kPure, 1, 1) \
V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
@@ -731,15 +751,6 @@ const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
parameters); // parameter
}
-const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<ToBooleanHints>( //--
- IrOpcode::kJSToBoolean, Operator::kPure, // opcode
- "JSToBoolean", // name
- 1, 0, 0, 1, 0, 0, // inputs/outputs
- hints); // parameter
-}
-
const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity,
uint32_t start_index) {
CallForwardVarargsParameters parameters(arity, start_index);
@@ -845,18 +856,9 @@ const Operator* JSOperatorBuilder::ConstructWithSpread(
parameters); // parameter
}
-const Operator* JSOperatorBuilder::ConvertReceiver(
- ConvertReceiverMode convert_mode) {
- return new (zone()) Operator1<ConvertReceiverMode>( // --
- IrOpcode::kJSConvertReceiver, Operator::kEliminatable, // opcode
- "JSConvertReceiver", // name
- 1, 1, 1, 1, 1, 0, // counts
- convert_mode); // parameter
-}
-
const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
const VectorSlotPair& feedback) {
- NamedAccess access(SLOPPY, name, feedback);
+ NamedAccess access(LanguageMode::kSloppy, name, feedback);
return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
@@ -866,7 +868,7 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
const Operator* JSOperatorBuilder::LoadProperty(
VectorSlotPair const& feedback) {
- PropertyAccess access(SLOPPY, feedback);
+ PropertyAccess access(LanguageMode::kSloppy, feedback);
return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
@@ -874,6 +876,15 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
+const Operator* JSOperatorBuilder::InstanceOf(VectorSlotPair const& feedback) {
+ FeedbackParameter parameter(feedback);
+ return new (zone()) Operator1<FeedbackParameter>( // --
+ IrOpcode::kJSInstanceOf, Operator::kNoProperties, // opcode
+ "JSInstanceOf", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ parameter); // parameter
+}
+
const Operator* JSOperatorBuilder::ForInNext(ForInMode mode) {
return new (zone()) Operator1<ForInMode>( // --
IrOpcode::kJSForInNext, Operator::kNoProperties, // opcode
@@ -1037,6 +1048,18 @@ const Operator* JSOperatorBuilder::CreateArray(size_t arity,
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
+ Handle<Map> map) {
+ // bound_target_function, bound_this, arg1, ..., argN
+ int const value_input_count = static_cast<int>(arity) + 2;
+ CreateBoundFunctionParameters parameters(arity, map);
+ return new (zone()) Operator1<CreateBoundFunctionParameters>( // --
+ IrOpcode::kJSCreateBoundFunction, Operator::kEliminatable, // opcode
+ "JSCreateBoundFunction", // name
+ value_input_count, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::CreateClosure(
Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
PretenureFlag pretenure) {
@@ -1145,15 +1168,6 @@ const Operator* JSOperatorBuilder::CreateBlockContext(
scope_info); // parameter
}
-const Operator* JSOperatorBuilder::CreateScriptContext(
- const Handle<ScopeInfo>& scope_info) {
- return new (zone()) Operator1<Handle<ScopeInfo>>( // --
- IrOpcode::kJSCreateScriptContext, Operator::kNoProperties, // opcode
- "JSCreateScriptContext", // name
- 1, 1, 1, 1, 1, 2, // counts
- scope_info); // parameter
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 0d47005824..94a9b1fdb6 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -85,13 +85,6 @@ bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
size_t hash_value(VectorSlotPair const&);
-// The ConvertReceiverMode is used as parameter by JSConvertReceiver operators.
-ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
-
-
-// The ToBooleanHints are used as parameter by JSToBoolean operators.
-ToBooleanHints ToBooleanHintsOf(Operator const* op);
-
// Defines the flags for a JavaScript call forwarding parameters. This
// is used as parameter by JSConstructForwardVarargs operators.
class ConstructForwardVarargsParameters final {
@@ -527,6 +520,32 @@ std::ostream& operator<<(std::ostream&, CreateArrayParameters const&);
const CreateArrayParameters& CreateArrayParametersOf(const Operator* op);
+// Defines shared information for the bound function that should be created.
+// This is used as parameter by JSCreateBoundFunction operators.
+class CreateBoundFunctionParameters final {
+ public:
+ CreateBoundFunctionParameters(size_t arity, Handle<Map> map)
+ : arity_(arity), map_(map) {}
+
+ size_t arity() const { return arity_; }
+ Handle<Map> map() const { return map_; }
+
+ private:
+ size_t const arity_;
+ Handle<Map> const map_;
+};
+
+bool operator==(CreateBoundFunctionParameters const&,
+ CreateBoundFunctionParameters const&);
+bool operator!=(CreateBoundFunctionParameters const&,
+ CreateBoundFunctionParameters const&);
+
+size_t hash_value(CreateBoundFunctionParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateBoundFunctionParameters const&);
+
+const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
+ const Operator* op);
// Defines shared information for the closure that should be created. This is
// used as a parameter by JSCreateClosure operators.
@@ -632,18 +651,25 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Multiply();
const Operator* Divide();
const Operator* Modulus();
+ const Operator* Exponentiate();
+
+ const Operator* BitwiseNot();
+ const Operator* Decrement();
+ const Operator* Increment();
+ const Operator* Negate();
- const Operator* ToBoolean(ToBooleanHints hints);
const Operator* ToInteger();
const Operator* ToLength();
const Operator* ToName();
const Operator* ToNumber();
+ const Operator* ToNumeric();
const Operator* ToObject();
const Operator* ToString();
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
+ const Operator* CreateBoundFunction(size_t arity, Handle<Map> map);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
VectorSlotPair const& feedback,
PretenureFlag pretenure);
@@ -685,8 +711,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
uint32_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
- const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
-
const Operator* LoadProperty(VectorSlotPair const& feedback);
const Operator* LoadNamed(Handle<Name> name, VectorSlotPair const& feedback);
@@ -721,9 +745,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StoreModule(int32_t cell_index);
const Operator* ClassOf();
- const Operator* TypeOf();
const Operator* HasInPrototypeChain();
- const Operator* InstanceOf();
+ const Operator* InstanceOf(const VectorSlotPair& feedback);
const Operator* OrdinaryHasInstance();
const Operator* ForInEnumerate();
@@ -749,8 +772,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Handle<ScopeInfo>& scope_info);
const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info);
const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info);
- const Operator* CreateModuleContext();
- const Operator* CreateScriptContext(const Handle<ScopeInfo>& scpope_info);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 98b336ce97..a7ce12cdb4 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -212,6 +212,68 @@ JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph,
Flags flags)
: jsgraph_(jsgraph), flags_(flags), feedback_vector_(feedback_vector) {}
+JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
+ const Operator* op, Node* operand, Node* effect, Node* control,
+ FeedbackSlot slot) const {
+ DCHECK(!slot.IsInvalid());
+ BinaryOpICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForUnaryOperation)) {
+ return LoweringResult::Exit(node);
+ }
+
+ Node* node;
+ switch (op->opcode()) {
+ case IrOpcode::kJSBitwiseNot: {
+ // Lower to a speculative xor with -1 if we have some kind of Number
+ // feedback.
+ JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->BitwiseXor(),
+ operand, jsgraph()->SmiConstant(-1), effect,
+ control, slot);
+ node = b.TryBuildNumberBinop();
+ break;
+ }
+ case IrOpcode::kJSDecrement: {
+ // Lower to a speculative subtraction of 1 if we have some kind of Number
+ // feedback.
+ JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Subtract(),
+ operand, jsgraph()->SmiConstant(1), effect,
+ control, slot);
+ node = b.TryBuildNumberBinop();
+ break;
+ }
+ case IrOpcode::kJSIncrement: {
+ // Lower to a speculative addition of 1 if we have some kind of Number
+ // feedback.
+ BinaryOperationHint hint = BinaryOperationHint::kAny; // Dummy.
+ JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Add(hint),
+ operand, jsgraph()->SmiConstant(1), effect,
+ control, slot);
+ node = b.TryBuildNumberBinop();
+ break;
+ }
+ case IrOpcode::kJSNegate: {
+ // Lower to a speculative multiplication with -1 if we have some kind of
+ // Number feedback.
+ JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Multiply(),
+ operand, jsgraph()->SmiConstant(-1), effect,
+ control, slot);
+ node = b.TryBuildNumberBinop();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ if (node != nullptr) {
+ return LoweringResult::SideEffectFree(node, node, control);
+ } else {
+ return LoweringResult::NoChange();
+ }
+}
+
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
const Operator* op, Node* left, Node* right, Node* effect, Node* control,
FeedbackSlot slot) const {
@@ -246,6 +308,18 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
}
break;
}
+ case IrOpcode::kJSInstanceOf: {
+ DCHECK(!slot.IsInvalid());
+ InstanceOfICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
+ return LoweringResult::Exit(node);
+ }
+ // TODO(turbofan): Should we generally support early lowering of
+ // JSInstanceOf operators here?
+ break;
+ }
case IrOpcode::kJSBitwiseOr:
case IrOpcode::kJSBitwiseXor:
case IrOpcode::kJSBitwiseAnd:
@@ -270,6 +344,10 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
}
break;
}
+ case IrOpcode::kJSExponentiate: {
+ // TODO(neis): Introduce a SpeculativeNumberPow operator?
+ break;
+ }
default:
UNREACHABLE();
break;
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index f6cc65c602..90686e5248 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -99,6 +99,11 @@ class JSTypeHintLowering {
Node* control_;
};
+ // Potential reduction of unary operations (e.g. negation).
+ LoweringResult ReduceUnaryOperation(const Operator* op, Node* operand,
+ Node* effect, Node* control,
+ FeedbackSlot slot) const;
+
// Potential reduction of binary (arithmetic, logical, shift and relational
// comparison) operations.
LoweringResult ReduceBinaryOperation(const Operator* op, Node* left,
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 6f50ba15a3..2380c7c0f4 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/allocation-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -278,6 +279,8 @@ class JSBinopReduction final {
return simplified()->NumberDivide();
case IrOpcode::kJSModulus:
return simplified()->NumberModulus();
+ case IrOpcode::kJSExponentiate:
+ return simplified()->NumberPow();
case IrOpcode::kJSBitwiseAnd:
return simplified()->NumberBitwiseAnd();
case IrOpcode::kJSBitwiseOr:
@@ -363,7 +366,7 @@ class JSBinopReduction final {
Node* ConvertPlainPrimitiveToNumber(Node* node) {
DCHECK(NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
// Avoid inserting too many eager ToNumber() operations.
- Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
+ Reduction const reduction = lowering_->ReduceJSToNumberOrNumericInput(node);
if (reduction.Changed()) return reduction.replacement();
if (NodeProperties::GetType(node)->Is(Type::Number())) {
return node;
@@ -425,11 +428,70 @@ Reduction JSTypedLowering::ReduceSpeculativeNumberAdd(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSBitwiseNot(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::PlainPrimitive())) {
+ // JSBitwiseNot(x) => NumberBitwiseXor(ToInt32(x), -1)
+ node->InsertInput(graph()->zone(), 1, jsgraph()->SmiConstant(-1));
+ NodeProperties::ChangeOp(node, javascript()->BitwiseXor());
+ JSBinopReduction r(this, node);
+ r.ConvertInputsToNumber();
+ r.ConvertInputsToUI32(kSigned, kSigned);
+ return r.ChangeToPureOperator(r.NumberOp(), Type::Signed32());
+ }
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSDecrement(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::PlainPrimitive())) {
+ // JSDecrement(x) => NumberSubtract(ToNumber(x), 1)
+ node->InsertInput(graph()->zone(), 1, jsgraph()->OneConstant());
+ NodeProperties::ChangeOp(node, javascript()->Subtract());
+ JSBinopReduction r(this, node);
+ r.ConvertInputsToNumber();
+ DCHECK_EQ(simplified()->NumberSubtract(), r.NumberOp());
+ return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
+ }
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSIncrement(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::PlainPrimitive())) {
+ // JSIncrement(x) => NumberAdd(ToNumber(x), 1)
+ node->InsertInput(graph()->zone(), 1, jsgraph()->OneConstant());
+ BinaryOperationHint hint = BinaryOperationHint::kAny; // Dummy.
+ NodeProperties::ChangeOp(node, javascript()->Add(hint));
+ JSBinopReduction r(this, node);
+ r.ConvertInputsToNumber();
+ DCHECK_EQ(simplified()->NumberAdd(), r.NumberOp());
+ return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
+ }
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSNegate(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::PlainPrimitive())) {
+ // JSNegate(x) => NumberMultiply(ToNumber(x), -1)
+ node->InsertInput(graph()->zone(), 1, jsgraph()->SmiConstant(-1));
+ NodeProperties::ChangeOp(node, javascript()->Multiply());
+ JSBinopReduction r(this, node);
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
+ }
+ return NoChange();
+}
+
Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
- r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
if (r.BothInputsAre(Type::PlainPrimitive()) &&
@@ -531,7 +593,7 @@ Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
NumberOperationHint hint = NumberOperationHintOf(node->op());
if ((hint == NumberOperationHint::kNumber ||
hint == NumberOperationHint::kNumberOrOddball) &&
- r.BothInputsAre(Type::NumberOrOddball())) {
+ r.BothInputsAre(Type::NumberOrUndefinedOrNullOrBoolean())) {
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
Type::Number());
@@ -646,33 +708,18 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
Node* value_map = jsgraph()->HeapConstant(factory()->cons_string_map());
// Allocate the resulting ConsString.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- Node* value = effect =
- graph()->NewNode(simplified()->Allocate(Type::OtherString(), NOT_TENURED),
- jsgraph()->Constant(ConsString::kSize), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- value, value_map, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), value,
- jsgraph()->Constant(Name::kEmptyHashField), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), value, length,
- effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForConsStringFirst()), value,
- first, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForConsStringSecond()), value,
- second, effect, control);
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(ConsString::kSize, NOT_TENURED, Type::OtherString());
+ a.Store(AccessBuilder::ForMap(), value_map);
+ a.Store(AccessBuilder::ForNameHashField(),
+ jsgraph()->Constant(Name::kEmptyHashField));
+ a.Store(AccessBuilder::ForStringLength(), length);
+ a.Store(AccessBuilder::ForConsStringFirst(), first);
+ a.Store(AccessBuilder::ForConsStringSecond(), second);
// Morph the {node} into a {FinishRegion}.
ReplaceWithValue(node, node, node, control);
- NodeProperties::SetType(value, NodeProperties::GetType(node));
- node->ReplaceInput(0, value);
- node->ReplaceInput(1, effect);
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->FinishRegion());
+ a.FinishAndChange(node);
return Changed(node);
}
@@ -764,32 +811,6 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
return r.ChangeToPureOperator(comparison);
}
-Reduction JSTypedLowering::ReduceJSTypeOf(Node* node) {
- Node* const input = node->InputAt(0);
- Type* type = NodeProperties::GetType(input);
- Factory* const f = factory();
- if (type->Is(Type::Boolean())) {
- return Replace(jsgraph()->Constant(f->boolean_string()));
- } else if (type->Is(Type::Number())) {
- return Replace(jsgraph()->Constant(f->number_string()));
- } else if (type->Is(Type::String())) {
- return Replace(jsgraph()->Constant(f->string_string()));
- } else if (type->Is(Type::Symbol())) {
- return Replace(jsgraph()->Constant(f->symbol_string()));
- } else if (type->Is(Type::OtherUndetectableOrUndefined())) {
- return Replace(jsgraph()->Constant(f->undefined_string()));
- } else if (type->Is(Type::NonCallableOrNull())) {
- return Replace(jsgraph()->Constant(f->object_string()));
- } else if (type->Is(Type::Function())) {
- return Replace(jsgraph()->Constant(f->function_string()));
- } else if (type->IsHeapConstant()) {
- return Replace(jsgraph()->Constant(
- Object::TypeOf(isolate(), type->AsHeapConstant()->Value())));
- }
-
- return NoChange();
-}
-
Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
JSBinopReduction r(this, node);
@@ -895,52 +916,6 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
- Node* const input = node->InputAt(0);
- Type* const input_type = NodeProperties::GetType(input);
- if (input_type->Is(Type::Boolean())) {
- // JSToBoolean(x:boolean) => x
- return Replace(input);
- } else if (input_type->Is(Type::OrderedNumber())) {
- // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
- node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
- jsgraph()->ZeroConstant()));
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->BooleanNot());
- return Changed(node);
- } else if (input_type->Is(Type::Number())) {
- // JSToBoolean(x:number) => NumberToBoolean(x)
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->NumberToBoolean());
- return Changed(node);
- } else if (input_type->Is(Type::DetectableReceiverOrNull())) {
- // JSToBoolean(x:detectable receiver \/ null)
- // => BooleanNot(ReferenceEqual(x,#null))
- node->ReplaceInput(0, graph()->NewNode(simplified()->ReferenceEqual(),
- input, jsgraph()->NullConstant()));
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->BooleanNot());
- return Changed(node);
- } else if (input_type->Is(Type::ReceiverOrNullOrUndefined())) {
- // JSToBoolean(x:receiver \/ null \/ undefined)
- // => BooleanNot(ObjectIsUndetectable(x))
- node->ReplaceInput(
- 0, graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->BooleanNot());
- return Changed(node);
- } else if (input_type->Is(Type::String())) {
- // JSToBoolean(x:string) => BooleanNot(ReferenceEqual(x,""))
- node->ReplaceInput(0,
- graph()->NewNode(simplified()->ReferenceEqual(), input,
- jsgraph()->EmptyStringConstant()));
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->BooleanNot());
- return Changed(node);
- }
- return NoChange();
-}
-
Reduction JSTypedLowering::ReduceJSToInteger(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type* const input_type = NodeProperties::GetType(input);
@@ -967,7 +942,7 @@ Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
Node* input = NodeProperties::GetValueInput(node, 0);
Type* input_type = NodeProperties::GetType(input);
if (input_type->Is(type_cache_.kIntegerOrMinusZero)) {
- if (input_type->Max() <= 0.0) {
+ if (input_type->IsNone() || input_type->Max() <= 0.0) {
input = jsgraph()->ZeroConstant();
} else if (input_type->Min() >= kMaxSafeInteger) {
input = jsgraph()->Constant(kMaxSafeInteger);
@@ -987,8 +962,9 @@ Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
- // Try constant-folding of JSToNumber with constant inputs.
+Reduction JSTypedLowering::ReduceJSToNumberOrNumericInput(Node* input) {
+ // Try constant-folding of JSToNumber/JSToNumeric with constant inputs. Here
+ // we only cover cases where ToNumber and ToNumeric coincide.
Type* input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::String())) {
HeapObjectMatcher m(input);
@@ -1020,10 +996,10 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
+Reduction JSTypedLowering::ReduceJSToNumberOrNumeric(Node* node) {
// Try to reduce the input first.
Node* const input = node->InputAt(0);
- Reduction reduction = ReduceJSToNumberInput(input);
+ Reduction reduction = ReduceJSToNumberOrNumericInput(input);
if (reduction.Changed()) {
ReplaceWithValue(node, reduction.replacement());
return reduction;
@@ -1032,6 +1008,10 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
if (input_type->Is(Type::PlainPrimitive())) {
RelaxEffectsAndControls(node);
node->TrimInputCount(1);
+ // For a PlainPrimitive, ToNumeric is the same as ToNumber.
+ Type* node_type = NodeProperties::GetType(node);
+ NodeProperties::SetType(
+ node, Type::Intersect(node_type, Type::Number(), graph()->zone()));
NodeProperties::ChangeOp(node, simplified()->PlainPrimitiveToNumber());
return Changed(node);
}
@@ -1200,6 +1180,8 @@ Reduction JSTypedLowering::ReduceJSHasInPrototypeChain(Node* node) {
Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
Node* eloop = effect =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
Node* vloop = value = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), value, value, loop);
NodeProperties::SetType(vloop, Type::NonInternal());
@@ -1448,165 +1430,6 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
return Changed(value);
}
-Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
- DCHECK_EQ(IrOpcode::kJSConvertReceiver, node->opcode());
- ConvertReceiverMode mode = ConvertReceiverModeOf(node->op());
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Type* receiver_type = NodeProperties::GetType(receiver);
- Node* context = NodeProperties::GetContextInput(node);
- Type* context_type = NodeProperties::GetType(context);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Check if {receiver} is known to be a receiver.
- if (receiver_type->Is(Type::Receiver())) {
- ReplaceWithValue(node, receiver, effect, control);
- return Replace(receiver);
- }
-
- // If the {receiver} is known to be null or undefined, we can just replace it
- // with the global proxy unconditionally.
- if (receiver_type->Is(Type::NullOrUndefined()) ||
- mode == ConvertReceiverMode::kNullOrUndefined) {
- if (context_type->IsHeapConstant()) {
- Handle<JSObject> global_proxy(
- Handle<Context>::cast(context_type->AsHeapConstant()->Value())
- ->global_proxy(),
- isolate());
- receiver = jsgraph()->Constant(global_proxy);
- } else {
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, effect);
- receiver = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
- native_context, effect);
- }
- ReplaceWithValue(node, receiver, effect, control);
- return Replace(receiver);
- }
-
- // If {receiver} cannot be null or undefined we can skip a few checks.
- if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
- mode == ConvertReceiverMode::kNotNullOrUndefined) {
- Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* rtrue = receiver;
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* rfalse;
- {
- // Convert {receiver} using the ToObjectStub. The call does not require a
- // frame-state in this case, because neither null nor undefined is passed.
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, node->op()->properties());
- rfalse = efalse = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- receiver, context, efalse);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-
- // Morph the {node} into an appropriate Phi.
- ReplaceWithValue(node, node, effect, control);
- node->ReplaceInput(0, rtrue);
- node->ReplaceInput(1, rfalse);
- node->ReplaceInput(2, control);
- node->TrimInputCount(3);
- NodeProperties::ChangeOp(node,
- common()->Phi(MachineRepresentation::kTagged, 2));
- return Changed(node);
- }
-
- // Check if {receiver} is already a JSReceiver.
- Node* check0 = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-
- // Check {receiver} for undefined.
- Node* check1 = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
- jsgraph()->UndefinedConstant());
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_false0);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-
- // Check {receiver} for null.
- Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
- jsgraph()->NullConstant());
- Node* branch2 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check2, if_false1);
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-
- // We just use {receiver} directly.
- Node* if_noop = if_true0;
- Node* enoop = effect;
- Node* rnoop = receiver;
-
- // Convert {receiver} using ToObject.
- Node* if_convert = if_false2;
- Node* econvert = effect;
- Node* rconvert;
- {
- // Convert {receiver} using the ToObjectStub. The call does not require a
- // frame-state in this case, because neither null nor undefined is passed.
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, node->op()->properties());
- rconvert = econvert = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- receiver, context, econvert);
- }
-
- // Replace {receiver} with global proxy of {context}.
- Node* if_global = graph()->NewNode(common()->Merge(2), if_true1, if_true2);
- Node* eglobal = effect;
- Node* rglobal;
- {
- if (context_type->IsHeapConstant()) {
- Handle<JSObject> global_proxy(
- Handle<Context>::cast(context_type->AsHeapConstant()->Value())
- ->global_proxy(),
- isolate());
- rglobal = jsgraph()->Constant(global_proxy);
- } else {
- Node* native_context = eglobal = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, eglobal);
- rglobal = eglobal = graph()->NewNode(
- javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
- native_context, eglobal);
- }
- }
-
- control =
- graph()->NewNode(common()->Merge(3), if_noop, if_convert, if_global);
- effect = graph()->NewNode(common()->EffectPhi(3), enoop, econvert, eglobal,
- control);
- // Morph the {node} into an appropriate Phi.
- ReplaceWithValue(node, node, effect, control);
- node->ReplaceInput(0, rnoop);
- node->ReplaceInput(1, rconvert);
- node->ReplaceInput(2, rglobal);
- node->ReplaceInput(3, control);
- node->TrimInputCount(4);
- NodeProperties::ChangeOp(node,
- common()->Phi(MachineRepresentation::kTagged, 3));
- return Changed(node);
-}
-
namespace {
void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
@@ -1632,7 +1455,6 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
const bool is_construct = (node->opcode() == IrOpcode::kJSConstruct);
DCHECK(Builtins::HasCppImplementation(builtin_index));
- DCHECK_EQ(0, flags & CallDescriptor::kSupportsTailCalls);
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = is_construct
@@ -1661,6 +1483,7 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
static const int kStubAndReceiver = 2;
int cursor = arity + kStubAndReceiver;
+ node->InsertInput(zone, cursor++, jsgraph->PaddingConstant());
node->InsertInput(zone, cursor++, argc_node);
node->InsertInput(zone, cursor++, target);
node->InsertInput(zone, cursor++, new_target);
@@ -1866,9 +1689,11 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Check if we need to convert the {receiver}.
if (is_sloppy(shared->language_mode()) && !shared->native() &&
!receiver_type->Is(Type::Receiver())) {
+ Node* global_proxy =
+ jsgraph()->HeapConstant(handle(function->global_proxy()));
receiver = effect =
- graph()->NewNode(javascript()->ConvertReceiver(convert_mode),
- receiver, context, effect, control);
+ graph()->NewNode(simplified()->ConvertReceiver(convert_mode),
+ receiver, global_proxy, effect, control);
NodeProperties::ReplaceValueInput(node, receiver, 1);
}
@@ -1893,8 +1718,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node, common()->Call(Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(),
1 + arity, flags)));
- } else if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
- ((flags & CallDescriptor::kSupportsTailCalls) == 0)) {
+ } else if (is_builtin && Builtins::HasCppImplementation(builtin_index)) {
// Patch {node} to a direct CEntryStub call.
ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
} else {
@@ -1959,9 +1783,12 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, cache_type);
effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ check, effect, control);
- ReplaceWithValue(node, node, effect, control);
+ // Since the change to LoadElement() below is effectful, we connect
+ // node to all effect uses.
+ ReplaceWithValue(node, node, node, control);
// Morph the {node} into a LoadElement.
node->ReplaceInput(0, cache_array);
@@ -2082,7 +1909,7 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
}
case ForInMode::kGeneric: {
// Check if the {enumerator} is a Map or a FixedArray.
- Node* check = graph()->NewNode(
+ Node* check = effect = graph()->NewNode(
simplified()->CompareMaps(ZoneHandleSet<Map>(factory()->meta_map())),
enumerator, effect, control);
Node* branch =
@@ -2297,13 +2124,20 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSMultiply:
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
+ case IrOpcode::kJSExponentiate:
return ReduceNumberBinop(node);
+ case IrOpcode::kJSBitwiseNot:
+ return ReduceJSBitwiseNot(node);
+ case IrOpcode::kJSDecrement:
+ return ReduceJSDecrement(node);
+ case IrOpcode::kJSIncrement:
+ return ReduceJSIncrement(node);
+ case IrOpcode::kJSNegate:
+ return ReduceJSNegate(node);
case IrOpcode::kJSHasInPrototypeChain:
return ReduceJSHasInPrototypeChain(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
- case IrOpcode::kJSToBoolean:
- return ReduceJSToBoolean(node);
case IrOpcode::kJSToInteger:
return ReduceJSToInteger(node);
case IrOpcode::kJSToLength:
@@ -2311,13 +2145,12 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSToName:
return ReduceJSToName(node);
case IrOpcode::kJSToNumber:
- return ReduceJSToNumber(node);
+ case IrOpcode::kJSToNumeric:
+ return ReduceJSToNumberOrNumeric(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
case IrOpcode::kJSToObject:
return ReduceJSToObject(node);
- case IrOpcode::kJSTypeOf:
- return ReduceJSTypeOf(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
case IrOpcode::kJSLoadContext:
@@ -2328,8 +2161,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSLoadModule(node);
case IrOpcode::kJSStoreModule:
return ReduceJSStoreModule(node);
- case IrOpcode::kJSConvertReceiver:
- return ReduceJSConvertReceiver(node);
case IrOpcode::kJSConstructForwardVarargs:
return ReduceJSConstructForwardVarargs(node);
case IrOpcode::kJSConstruct:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 80f818e0a4..8b00c1d32c 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -40,6 +40,10 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
friend class JSBinopReduction;
Reduction ReduceJSAdd(Node* node);
+ Reduction ReduceJSBitwiseNot(Node* node);
+ Reduction ReduceJSDecrement(Node* node);
+ Reduction ReduceJSIncrement(Node* node);
+ Reduction ReduceJSNegate(Node* node);
Reduction ReduceJSComparison(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSHasInPrototypeChain(Node* node);
@@ -50,16 +54,14 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSStoreModule(Node* node);
Reduction ReduceJSEqual(Node* node);
Reduction ReduceJSStrictEqual(Node* node);
- Reduction ReduceJSToBoolean(Node* node);
Reduction ReduceJSToInteger(Node* node);
Reduction ReduceJSToLength(Node* node);
Reduction ReduceJSToName(Node* node);
- Reduction ReduceJSToNumberInput(Node* input);
- Reduction ReduceJSToNumber(Node* node);
+ Reduction ReduceJSToNumberOrNumericInput(Node* input);
+ Reduction ReduceJSToNumberOrNumeric(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
Reduction ReduceJSToObject(Node* node);
- Reduction ReduceJSConvertReceiver(Node* node);
Reduction ReduceJSConstructForwardVarargs(Node* node);
Reduction ReduceJSConstruct(Node* node);
Reduction ReduceJSCallForwardVarargs(Node* node);
@@ -71,7 +73,6 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
- Reduction ReduceJSTypeOf(Node* node);
Reduction ReduceNumberBinop(Node* node);
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 679771f56e..03b8074f0f 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -37,6 +37,9 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
case CallDescriptor::kCallAddress:
os << "Addr";
break;
+ case CallDescriptor::kCallWasmFunction:
+ os << "Wasm";
+ break;
}
return os;
}
@@ -46,7 +49,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) {
// TODO(svenpanne) Output properties etc. and be less cryptic.
return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
<< "s" << d.StackParameterCount() << "i" << d.InputCount() << "f"
- << d.FrameStateCount() << "t" << d.SupportsTailCalls();
+ << d.FrameStateCount();
}
MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
@@ -117,6 +120,7 @@ int CallDescriptor::CalculateFixedFrameSize() const {
CommonFrameConstants::kCPSlotCount;
break;
case kCallCodeObject:
+ case kCallWasmFunction:
return TypedFrameConstants::kFixedSlotCount;
}
UNREACHABLE();
@@ -144,7 +148,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
// deoptimize are whitelisted here and can be called without a FrameState.
case Runtime::kAbort:
case Runtime::kAllocateInTargetSpace:
- case Runtime::kConvertReceiver:
case Runtime::kCreateIterResultObject:
case Runtime::kGeneratorGetContinuation:
case Runtime::kIncBlockCounter:
@@ -155,7 +158,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kPushBlockContext:
case Runtime::kPushCatchContext:
case Runtime::kReThrow:
- case Runtime::kStringCompare:
case Runtime::kStringEqual:
case Runtime::kStringNotEqual:
case Runtime::kStringLessThan:
@@ -451,17 +453,16 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
// The target for interpreter dispatches is a code entry address.
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallAddress, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- stack_parameter_count, // stack_parameter_count
- Operator::kNoProperties, // properties
- kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
- CallDescriptor::kCanUseRoots | // flags
- CallDescriptor::kSupportsTailCalls, // flags
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ stack_parameter_count, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kCanUseRoots, // flags
descriptor.DebugName(isolate));
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 0f85fc994f..9e79a9af00 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -166,28 +166,28 @@ class V8_EXPORT_PRIVATE CallDescriptor final
public:
// Describes the kind of this call, which determines the target.
enum Kind {
- kCallCodeObject, // target is a Code object
- kCallJSFunction, // target is a JSFunction object
- kCallAddress // target is a machine pointer
+ kCallCodeObject, // target is a Code object
+ kCallJSFunction, // target is a JSFunction object
+ kCallAddress, // target is a machine pointer
+ kCallWasmFunction // target is a wasm function
};
enum Flag {
kNoFlags = 0u,
kNeedsFrameState = 1u << 0,
kHasExceptionHandler = 1u << 1,
- kSupportsTailCalls = 1u << 2,
- kCanUseRoots = 1u << 3,
+ kCanUseRoots = 1u << 2,
// (arm64 only) native stack should be used for arguments.
- kUseNativeStack = 1u << 4,
+ kUseNativeStack = 1u << 3,
// (arm64 only) call instruction has to restore JSSP or CSP.
- kRestoreJSSP = 1u << 5,
- kRestoreCSP = 1u << 6,
+ kRestoreJSSP = 1u << 4,
+ kRestoreCSP = 1u << 5,
// Causes the code generator to initialize the root register.
- kInitializeRootRegister = 1u << 7,
+ kInitializeRootRegister = 1u << 6,
// Does not ever try to allocate space on our heap.
- kNoAllocate = 1u << 8,
+ kNoAllocate = 1u << 7,
// Push argument count as part of function prologue.
- kPushArgumentCount = 1u << 9
+ kPushArgumentCount = 1u << 8
};
typedef base::Flags<Flag> Flags;
@@ -248,7 +248,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
Flags flags() const { return flags_; }
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
- bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
bool UseNativeStack() const { return flags() & kUseNativeStack; }
bool PushArgumentCount() const { return flags() & kPushArgumentCount; }
bool InitializeRootRegister() const {
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index d3b9879919..0313e57909 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -17,12 +17,27 @@ namespace compiler {
namespace {
-enum Aliasing { kNoAlias, kMayAlias, kMustAlias };
+bool IsRename(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kFinishRegion:
+ case IrOpcode::kTypeGuard:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Node* ResolveRenames(Node* node) {
+ while (IsRename(node)) {
+ node = node->InputAt(0);
+ }
+ return node;
+}
-Aliasing QueryAlias(Node* a, Node* b) {
- if (a == b) return kMustAlias;
+bool MayAlias(Node* a, Node* b) {
+ if (a == b) return true;
if (!NodeProperties::GetType(a)->Maybe(NodeProperties::GetType(b))) {
- return kNoAlias;
+ return false;
}
switch (b->opcode()) {
case IrOpcode::kAllocate: {
@@ -30,7 +45,7 @@ Aliasing QueryAlias(Node* a, Node* b) {
case IrOpcode::kAllocate:
case IrOpcode::kHeapConstant:
case IrOpcode::kParameter:
- return kNoAlias;
+ return false;
default:
break;
}
@@ -38,7 +53,7 @@ Aliasing QueryAlias(Node* a, Node* b) {
}
case IrOpcode::kFinishRegion:
case IrOpcode::kTypeGuard:
- return QueryAlias(a, b->InputAt(0));
+ return MayAlias(a, b->InputAt(0));
default:
break;
}
@@ -47,7 +62,7 @@ Aliasing QueryAlias(Node* a, Node* b) {
switch (b->opcode()) {
case IrOpcode::kHeapConstant:
case IrOpcode::kParameter:
- return kNoAlias;
+ return false;
default:
break;
}
@@ -55,16 +70,16 @@ Aliasing QueryAlias(Node* a, Node* b) {
}
case IrOpcode::kFinishRegion:
case IrOpcode::kTypeGuard:
- return QueryAlias(a->InputAt(0), b);
+ return MayAlias(a->InputAt(0), b);
default:
break;
}
- return kMayAlias;
+ return true;
}
-bool MayAlias(Node* a, Node* b) { return QueryAlias(a, b) != kNoAlias; }
-
-bool MustAlias(Node* a, Node* b) { return QueryAlias(a, b) == kMustAlias; }
+bool MustAlias(Node* a, Node* b) {
+ return ResolveRenames(a) == ResolveRenames(b);
+}
} // namespace
@@ -371,15 +386,22 @@ void LoadElimination::AbstractField::Print() const {
}
}
+LoadElimination::AbstractMaps::AbstractMaps(Zone* zone)
+ : info_for_node_(zone) {}
+
+LoadElimination::AbstractMaps::AbstractMaps(Node* object,
+ ZoneHandleSet<Map> maps, Zone* zone)
+ : info_for_node_(zone) {
+ object = ResolveRenames(object);
+ info_for_node_.insert(std::make_pair(object, maps));
+}
+
bool LoadElimination::AbstractMaps::Lookup(
Node* object, ZoneHandleSet<Map>* object_maps) const {
- for (auto pair : info_for_node_) {
- if (MustAlias(object, pair.first)) {
- *object_maps = pair.second;
- return true;
- }
- }
- return false;
+ auto it = info_for_node_.find(ResolveRenames(object));
+ if (it == info_for_node_.end()) return false;
+ *object_maps = it->second;
+ return true;
}
LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Kill(
@@ -415,7 +437,8 @@ LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Extend(
Node* object, ZoneHandleSet<Map> maps, Zone* zone) const {
AbstractMaps* that = new (zone) AbstractMaps(zone);
that->info_for_node_ = this->info_for_node_;
- that->info_for_node_.insert(std::make_pair(object, maps));
+ object = ResolveRenames(object);
+ that->info_for_node_[object] = maps;
return that;
}
@@ -516,7 +539,7 @@ bool LoadElimination::AbstractState::LookupMaps(
return this->maps_ && this->maps_->Lookup(object, object_map);
}
-LoadElimination::AbstractState const* LoadElimination::AbstractState::AddMaps(
+LoadElimination::AbstractState const* LoadElimination::AbstractState::SetMaps(
Node* object, ZoneHandleSet<Map> maps, Zone* zone) const {
AbstractState* that = new (zone) AbstractState(*this);
if (that->maps_) {
@@ -650,9 +673,11 @@ Node* LoadElimination::AbstractState::LookupField(Node* object,
}
bool LoadElimination::AliasStateInfo::MayAlias(Node* other) const {
- if (QueryAlias(object_, other) == kNoAlias) {
+ // Decide aliasing based on the node kinds.
+ if (!compiler::MayAlias(object_, other)) {
return false;
}
+ // Decide aliasing based on maps (if available).
Handle<Map> map;
if (map_.ToHandle(&map)) {
ZoneHandleSet<Map> other_maps;
@@ -713,7 +738,7 @@ Reduction LoadElimination::ReduceArrayBufferWasNeutered(Node* node) {
}
Reduction LoadElimination::ReduceMapGuard(Node* node) {
- ZoneHandleSet<Map> const maps = MapGuardMapsOf(node->op());
+ ZoneHandleSet<Map> const maps = MapGuardMapsOf(node->op()).maps();
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
@@ -721,10 +746,9 @@ Reduction LoadElimination::ReduceMapGuard(Node* node) {
ZoneHandleSet<Map> object_maps;
if (state->LookupMaps(object, &object_maps)) {
if (maps.contains(object_maps)) return Replace(effect);
- state = state->KillMaps(object, zone());
// TODO(turbofan): Compute the intersection.
}
- state = state->AddMaps(object, maps, zone());
+ state = state->SetMaps(object, maps, zone());
return UpdateState(node, state);
}
@@ -737,15 +761,14 @@ Reduction LoadElimination::ReduceCheckMaps(Node* node) {
ZoneHandleSet<Map> object_maps;
if (state->LookupMaps(object, &object_maps)) {
if (maps.contains(object_maps)) return Replace(effect);
- state = state->KillMaps(object, zone());
// TODO(turbofan): Compute the intersection.
}
- state = state->AddMaps(object, maps, zone());
+ state = state->SetMaps(object, maps, zone());
return UpdateState(node, state);
}
Reduction LoadElimination::ReduceCompareMaps(Node* node) {
- ZoneHandleSet<Map> const maps = CompareMapsParametersOf(node->op());
+ ZoneHandleSet<Map> const maps = CompareMapsParametersOf(node->op()).maps();
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
@@ -777,7 +800,7 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
return Replace(elements);
}
// We know that the resulting elements have the fixed array map.
- state = state->AddMaps(node, fixed_array_maps, zone());
+ state = state->SetMaps(node, fixed_array_maps, zone());
// Kill the previous elements on {object}.
state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
MaybeHandle<Name>(), zone());
@@ -795,11 +818,11 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
if (state == nullptr) return NoChange();
if (mode == GrowFastElementsMode::kDoubleElements) {
// We know that the resulting elements have the fixed double array map.
- state = state->AddMaps(
+ state = state->SetMaps(
node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
} else {
// We know that the resulting elements have the fixed array map.
- state = state->AddMaps(
+ state = state->SetMaps(
node, ZoneHandleSet<Map>(factory()->fixed_array_map()), zone());
}
// Kill the previous elements on {object}.
@@ -842,7 +865,7 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
object_maps.insert(target_map, zone());
AliasStateInfo alias_info(state, object, source_map);
state = state->KillMaps(alias_info, zone());
- state = state->AddMaps(object, object_maps, zone());
+ state = state->SetMaps(object, object_maps, zone());
}
} else {
AliasStateInfo alias_info(state, object, source_map);
@@ -867,7 +890,7 @@ Reduction LoadElimination::ReduceTransitionAndStoreElement(Node* node) {
object_maps.insert(double_map, zone());
object_maps.insert(fast_map, zone());
state = state->KillMaps(object, zone());
- state = state->AddMaps(object, object_maps, zone());
+ state = state->SetMaps(object, object_maps, zone());
}
// Kill the elements as well.
state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
@@ -911,7 +934,7 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
}
Handle<Map> field_map;
if (access.map.ToHandle(&field_map)) {
- state = state->AddMaps(node, ZoneHandleSet<Map>(field_map), zone());
+ state = state->SetMaps(node, ZoneHandleSet<Map>(field_map), zone());
}
return UpdateState(node, state);
}
@@ -933,7 +956,7 @@ Reduction LoadElimination::ReduceStoreField(Node* node) {
// Record the new {object} map information.
ZoneHandleSet<Map> object_maps(
bit_cast<Handle<Map>>(new_value_type->AsHeapConstant()->Value()));
- state = state->AddMaps(object, object_maps, zone());
+ state = state->SetMaps(object, object_maps, zone());
}
} else {
int field_index = FieldIndexOf(access);
@@ -1067,7 +1090,7 @@ LoadElimination::AbstractState const* LoadElimination::UpdateStateForPhi(
if (!input_state->LookupMaps(phi->InputAt(i), &input_maps)) return state;
if (input_maps != object_maps) return state;
}
- return state->AddMaps(phi, object_maps, zone());
+ return state->SetMaps(phi, object_maps, zone());
}
Reduction LoadElimination::ReduceEffectPhi(Node* node) {
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 5080d7980a..60ae16f152 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -192,11 +192,8 @@ class V8_EXPORT_PRIVATE LoadElimination final
// effect paths through the graph.
class AbstractMaps final : public ZoneObject {
public:
- explicit AbstractMaps(Zone* zone) : info_for_node_(zone) {}
- AbstractMaps(Node* object, ZoneHandleSet<Map> maps, Zone* zone)
- : info_for_node_(zone) {
- info_for_node_.insert(std::make_pair(object, maps));
- }
+ explicit AbstractMaps(Zone* zone);
+ AbstractMaps(Node* object, ZoneHandleSet<Map> maps, Zone* zone);
AbstractMaps const* Extend(Node* object, ZoneHandleSet<Map> maps,
Zone* zone) const;
@@ -225,7 +222,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
bool Equals(AbstractState const* that) const;
void Merge(AbstractState const* that, Zone* zone);
- AbstractState const* AddMaps(Node* object, ZoneHandleSet<Map> maps,
+ AbstractState const* SetMaps(Node* object, ZoneHandleSet<Map> maps,
Zone* zone) const;
AbstractState const* KillMaps(Node* object, Zone* zone) const;
AbstractState const* KillMaps(const AliasStateInfo& alias_info,
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index 23591b2411..a9cd46d975 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -387,6 +387,7 @@ class LoopFinderImpl {
// Search the marks word by word.
for (int i = 0; i < width_; i++) {
uint32_t marks = backward_[pos + i] & forward_[pos + i];
+
for (int j = 0; j < 32; j++) {
if (marks & (1u << j)) {
int loop_num = i * 32 + j;
@@ -401,6 +402,10 @@ class LoopFinderImpl {
}
}
if (innermost == nullptr) continue;
+
+ // Return statements should never be found by forward or backward walk.
+ CHECK(ni.node->opcode() != IrOpcode::kReturn);
+
AddNodeToLoop(&ni, innermost, innermost_index);
count++;
}
@@ -421,6 +426,10 @@ class LoopFinderImpl {
size_t count = 0;
for (NodeInfo& ni : info_) {
if (ni.node == nullptr || !IsInLoop(ni.node, 1)) continue;
+
+ // Return statements should never be found by forward or backward walk.
+ CHECK(ni.node->opcode() != IrOpcode::kReturn);
+
AddNodeToLoop(&ni, li, 1);
count++;
}
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index d50237ad6e..069c86414c 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -318,6 +318,7 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
}
// TODO(jarin) Support both sides.
+ // XXX
if (arith->InputAt(0) != phi) {
if ((arith->InputAt(0)->opcode() != IrOpcode::kJSToNumber &&
arith->InputAt(0)->opcode() != IrOpcode::kSpeculativeToNumber) ||
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 8590c942d3..e589f0cbd8 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -7,7 +7,6 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
-#include "src/codegen.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
@@ -122,7 +121,7 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
DCHECK_LT(0u, divisor);
// If the divisor is even, we can avoid using the expensive fixup by shifting
// the dividend upfront.
- unsigned const shift = base::bits::CountTrailingZeros32(divisor);
+ unsigned const shift = base::bits::CountTrailingZeros(divisor);
dividend = Word32Shr(dividend, shift);
divisor >>= shift;
// Compute the magic number for the (shifted) divisor.
@@ -465,7 +464,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node());
}
if (m.IsFoldable()) { // K % K => K
- return ReplaceFloat64(modulo(m.left().Value(), m.right().Value()));
+ return ReplaceFloat64(Modulo(m.left().Value(), m.right().Value()));
}
break;
}
@@ -1182,7 +1181,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
(mleft.right().Value() & 0x1f) >=
- base::bits::CountTrailingZeros32(mask)) {
+ base::bits::CountTrailingZeros(mask)) {
// (x << L) & (-1 << K) => x << L iff L >= K
return Replace(mleft.node());
}
@@ -1223,7 +1222,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
if (mleft.left().IsWord32Shl()) {
Int32BinopMatcher mleftleft(mleft.left().node());
- if (mleftleft.right().Is(base::bits::CountTrailingZeros32(mask))) {
+ if (mleftleft.right().Is(base::bits::CountTrailingZeros(mask))) {
// (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
node->ReplaceInput(0,
Word32And(mleft.right().node(), m.right().node()));
@@ -1235,7 +1234,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
if (mleft.right().IsWord32Shl()) {
Int32BinopMatcher mleftright(mleft.right().node());
- if (mleftright.right().Is(base::bits::CountTrailingZeros32(mask))) {
+ if (mleftright.right().Is(base::bits::CountTrailingZeros(mask))) {
// (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
node->ReplaceInput(0,
Word32And(mleft.left().node(), m.right().node()));
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 425b04e215..3b6634c8bc 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -250,6 +250,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const OptionalOperator Word64ReverseBytes();
const OptionalOperator Int32AbsWithOverflow();
const OptionalOperator Int64AbsWithOverflow();
+
+ // Return true if the target's Word32 shift implementation is directly
+ // compatible with JavaScript's specification. Otherwise, we have to manually
+ // generate a mask with 0x1f on the amount ahead of generating the shift.
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 946931f855..767ada506a 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -75,7 +75,11 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
DCHECK_LT(0, node->op()->EffectInputCount());
switch (node->opcode()) {
case IrOpcode::kAllocate:
- return VisitAllocate(node, state);
+ // Allocate nodes were purged from the graph in effect-control
+ // linearization.
+ UNREACHABLE();
+ case IrOpcode::kAllocateRaw:
+ return VisitAllocateRaw(node, state);
case IrOpcode::kCall:
return VisitCall(node, state);
case IrOpcode::kCallWithCallerSavedRegisters:
@@ -100,6 +104,7 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
case IrOpcode::kDebugBreak:
+ case IrOpcode::kUnreachable:
return VisitOtherEffect(node, state);
default:
break;
@@ -109,8 +114,9 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
#define __ gasm()->
-void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
- DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
+void MemoryOptimizer::VisitAllocateRaw(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
@@ -129,7 +135,7 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
Node* const child = user->InputAt(1);
- if (child->opcode() == IrOpcode::kAllocate &&
+ if (child->opcode() == IrOpcode::kAllocateRaw &&
PretenureFlagOf(child->op()) == NOT_TENURED) {
NodeProperties::ChangeOp(child, node->op());
break;
@@ -142,7 +148,7 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* const parent = user->InputAt(0);
- if (parent->opcode() == IrOpcode::kAllocate &&
+ if (parent->opcode() == IrOpcode::kAllocateRaw &&
PretenureFlagOf(parent->op()) == TENURED) {
pretenure = TENURED;
break;
@@ -297,7 +303,6 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
- USE(control); // Floating control, dropped on the floor.
// Replace all effect uses of {node} with the {effect}, enqueue the
// effect uses for further processing, and replace all value uses of
@@ -306,9 +311,11 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
if (NodeProperties::IsEffectEdge(edge)) {
EnqueueUse(edge.from(), edge.index(), state);
edge.UpdateTo(effect);
- } else {
- DCHECK(NodeProperties::IsValueEdge(edge));
+ } else if (NodeProperties::IsValueEdge(edge)) {
edge.UpdateTo(value);
+ } else {
+ DCHECK(NodeProperties::IsControlEdge(edge));
+ edge.UpdateTo(control);
}
}
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 982fc6146e..e229f2b0be 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -106,7 +106,7 @@ class MemoryOptimizer final {
};
void VisitNode(Node*, AllocationState const*);
- void VisitAllocate(Node*, AllocationState const*);
+ void VisitAllocateRaw(Node*, AllocationState const*);
void VisitCall(Node*, AllocationState const*);
void VisitCallWithCallerSavedRegisters(Node*, AllocationState const*);
void VisitLoadElement(Node*, AllocationState const*);
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
index 3f8fbfc7c8..978563cab5 100644
--- a/deps/v8/src/compiler/mips/OWNERS
+++ b/deps/v8/src/compiler/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index e6264bc2b4..b7301749cf 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -273,14 +273,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
-#ifdef V8_CSA_WRITE_BARRIER
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
-#else
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
-#endif
if (must_save_lr_) {
__ Pop(ra);
}
@@ -745,7 +739,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
-// the first set of flags ({kKindSpecificFlags1Offset});
+// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -759,9 +753,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ nop();
int pc = __ pc_offset();
__ bind(&current);
- int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
+ int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
__ lw(a2, MemOperand(ra, offset));
__ pop(ra);
+ __ lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
@@ -785,6 +780,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallWasmFunction: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ __ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
+ : RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ Call(at, i.InputRegister(0), 0);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
@@ -801,6 +809,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
+ : RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ Jump(at, i.InputRegister(0), 0);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
__ Jump(i.InputRegister(0));
@@ -2572,9 +2593,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
+ unsigned lane = shuffle & 0xff;
+ if (FLAG_debug_code) {
+ // range of all four lanes, for unary instruction,
+ // should belong to the same range, which can be one of these:
+ // [0, 3] or [4, 7]
+ if (lane >= 4) {
+ int32_t shuffle_helper = shuffle;
+ for (int i = 0; i < 4; ++i) {
+ lane = shuffle_helper & 0xff;
+ CHECK_GE(lane, 4);
+ shuffle_helper >>= 8;
+ }
+ }
+ }
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
- int lane = shuffle & 0xff;
+ lane = shuffle & 0xff;
+ if (lane >= 4) {
+ lane -= 4;
+ }
DCHECK_GT(4, lane);
i8 |= lane << (2 * i);
shuffle >>= 8;
@@ -3365,7 +3403,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
if (saves_fpu != 0) {
- int count = base::bits::CountPopulation32(saves_fpu);
+ int count = base::bits::CountPopulation(saves_fpu);
DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
(kDoubleSize / kPointerSize));
@@ -3373,7 +3411,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
- int count = base::bits::CountPopulation32(saves);
+ int count = base::bits::CountPopulation(saves);
DCHECK_EQ(kNumCalleeSaved, count + 1);
frame->AllocateSavedCalleeRegisterSlots(count);
}
@@ -3411,7 +3449,12 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
+ const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+
+ // Skip callee-saved slots, which are pushed below.
+ shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= 2 * base::bits::CountPopulation(saves_fpu);
if (shrink_slots > 0) {
__ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
}
@@ -3421,11 +3464,10 @@ void CodeGenerator::AssembleConstructFrame() {
__ MultiPushFPU(saves_fpu);
}
- const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation32(saves) + 1);
+ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
}
}
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 5bb112f77e..1053763f0d 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -421,7 +421,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint32_t mask = m.right().Value();
- uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
// The mask must be contiguous, and occupy the least-significant bits.
@@ -454,7 +454,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
if (m.right().HasValue()) {
uint32_t mask = m.right().Value();
- uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros32(~mask);
if (shift != 0 && shift != 32 && msb + shift == 32) {
// Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
@@ -507,7 +507,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
// contiguous, and the shift immediate non-zero.
if (mleft.right().HasValue()) {
uint32_t mask = mleft.right().Value();
- uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
uint32_t shift = m.right().Value();
@@ -537,7 +537,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
- unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
MipsOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
index 3f8fbfc7c8..3fce7dd688 100644
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ b/deps/v8/src/compiler/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 6542f0d099..6d43750b1c 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -273,14 +273,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
-#ifdef V8_CSA_WRITE_BARRIER
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
-#else
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
-#endif
if (must_save_lr_) {
__ Pop(ra);
}
@@ -788,7 +782,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
-// the first set of flags ({kKindSpecificFlags1Offset});
+// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -802,9 +796,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ nop();
int pc = __ pc_offset();
__ bind(&current);
- int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
- __ Lw(a2, MemOperand(ra, offset));
+ int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
+ __ Ld(a2, MemOperand(ra, offset));
__ pop(ra);
+ __ Lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
@@ -829,6 +824,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallWasmFunction: {
+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+ __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
+ : RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ daddiu(at, i.InputRegister(0), 0);
+ __ Jump(at);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
@@ -846,6 +860,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+ __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
+ : RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ daddiu(at, i.InputRegister(0), 0);
+ __ Jump(at);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
__ Jump(i.InputRegister(0));
@@ -2861,9 +2889,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
+ unsigned lane = shuffle & 0xff;
+ if (FLAG_debug_code) {
+ // range of all four lanes, for unary instruction,
+ // should belong to the same range, which can be one of these:
+ // [0, 3] or [4, 7]
+ if (lane >= 4) {
+ int32_t shuffle_helper = shuffle;
+ for (int i = 0; i < 4; ++i) {
+ lane = shuffle_helper & 0xff;
+ CHECK_GE(lane, 4);
+ shuffle_helper >>= 8;
+ }
+ }
+ }
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
- int lane = shuffle & 0xff;
+ lane = shuffle & 0xff;
+ if (lane >= 4) {
+ lane -= 4;
+ }
DCHECK_GT(4, lane);
i8 |= lane << (2 * i);
shuffle >>= 8;
@@ -3666,7 +3711,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
- int count = base::bits::CountPopulation32(saves_fpu);
+ int count = base::bits::CountPopulation(saves_fpu);
DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
(kDoubleSize / kPointerSize));
@@ -3674,7 +3719,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
- int count = base::bits::CountPopulation32(saves);
+ int count = base::bits::CountPopulation(saves);
DCHECK_EQ(kNumCalleeSaved, count + 1);
frame->AllocateSavedCalleeRegisterSlots(count);
}
@@ -3713,22 +3758,26 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+
+ // Skip callee-saved slots, which are pushed below.
+ shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= base::bits::CountPopulation(saves_fpu);
if (shrink_slots > 0) {
__ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
}
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
// Save callee-saved FPU registers.
__ MultiPushFPU(saves_fpu);
- DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation32(saves_fpu));
+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation32(saves) + 1);
+ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
}
}
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index a8dc8a19db..0b490c7d77 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -529,7 +529,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint32_t mask = m.right().Value();
- uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
// The mask must be contiguous, and occupy the least-significant bits.
@@ -558,7 +558,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
if (m.right().HasValue()) {
uint32_t mask = m.right().Value();
- uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros32(~mask);
if (shift != 0 && shift != 32 && msb + shift == 32) {
// Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
@@ -579,7 +579,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint64_t mask = m.right().Value();
- uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
// The mask must be contiguous, and occupy the least-significant bits.
@@ -612,7 +612,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
}
if (m.right().HasValue()) {
uint64_t mask = m.right().Value();
- uint32_t shift = base::bits::CountPopulation64(~mask);
+ uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros64(~mask);
if (shift != 0 && shift < 32 && msb + shift == 64) {
// Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
@@ -696,7 +696,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
// contiguous, and the shift immediate non-zero.
if (mleft.right().HasValue()) {
uint32_t mask = mleft.right().Value();
- uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
uint32_t shift = m.right().Value();
@@ -726,7 +726,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
- unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
Mips64OperandGenerator g(this);
@@ -788,7 +788,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint64_t mask = mleft.right().Value();
- uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
uint64_t shift = m.right().Value();
@@ -819,7 +819,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
// Select Dext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
- unsigned mask_width = base::bits::CountPopulation64(mask);
+ unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
Mips64OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index d84c27e86d..e312dc4354 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -277,6 +277,23 @@ Node* NodeProperties::FindProjection(Node* node, size_t projection_index) {
// static
+void NodeProperties::CollectValueProjections(Node* node, Node** projections,
+ size_t projection_count) {
+#ifdef DEBUG
+ for (size_t index = 0; index < projection_count; ++index) {
+ DCHECK_NULL(projections[index]);
+ }
+#endif
+ for (Edge const edge : node->use_edges()) {
+ if (!IsValueEdge(edge)) continue;
+ Node* use = edge.from();
+ DCHECK_EQ(IrOpcode::kProjection, use->opcode());
+ projections[ProjectionIndexOf(use->op())] = use;
+ }
+}
+
+
+// static
void NodeProperties::CollectControlProjections(Node* node, Node** projections,
size_t projection_count) {
#ifdef DEBUG
@@ -348,12 +365,26 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
- Handle<Map> receiver_map(m.Value()->map());
- if (receiver_map->is_stable()) {
- // The {receiver_map} is only reliable when we install a stability
- // code dependency.
- *maps_return = ZoneHandleSet<Map>(receiver_map);
- return kUnreliableReceiverMaps;
+ Handle<HeapObject> receiver = m.Value();
+ Isolate* const isolate = m.Value()->GetIsolate();
+ // We don't use ICs for the Array.prototype and the Object.prototype
+ // because the runtime has to be able to intercept them properly, so
+ // we better make sure that TurboFan doesn't outsmart the system here
+ // by storing to elements of either prototype directly.
+ //
+ // TODO(bmeurer): This can be removed once the Array.prototype and
+ // Object.prototype have NO_ELEMENTS elements kind.
+ if (!isolate->IsInAnyContext(*receiver,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX) &&
+ !isolate->IsInAnyContext(*receiver,
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX)) {
+ Handle<Map> receiver_map(receiver->map(), isolate);
+ if (receiver_map->is_stable()) {
+ // The {receiver_map} is only reliable when we install a stability
+ // code dependency.
+ *maps_return = ZoneHandleSet<Map>(receiver_map);
+ return kUnreliableReceiverMaps;
+ }
}
}
InferReceiverMapsResult result = kReliableReceiverMaps;
@@ -362,7 +393,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
case IrOpcode::kMapGuard: {
Node* const object = GetValueInput(effect, 0);
if (IsSame(receiver, object)) {
- *maps_return = MapGuardMapsOf(effect->op());
+ *maps_return = MapGuardMapsOf(effect->op()).maps();
return result;
}
break;
@@ -379,7 +410,8 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
if (IsSame(receiver, effect)) {
HeapObjectMatcher mtarget(GetValueInput(effect, 0));
HeapObjectMatcher mnewtarget(GetValueInput(effect, 1));
- if (mtarget.HasValue() && mnewtarget.HasValue()) {
+ if (mtarget.HasValue() && mnewtarget.HasValue() &&
+ mnewtarget.Value()->IsJSFunction()) {
Handle<JSFunction> original_constructor =
Handle<JSFunction>::cast(mnewtarget.Value());
if (original_constructor->has_initial_map()) {
@@ -470,6 +502,69 @@ bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
}
// static
+bool NodeProperties::CanBePrimitive(Node* receiver, Node* effect) {
+ switch (receiver->opcode()) {
+#define CASE(Opcode) case IrOpcode::k##Opcode:
+ JS_CONSTRUCT_OP_LIST(CASE)
+ JS_CREATE_OP_LIST(CASE)
+#undef CASE
+ case IrOpcode::kCheckReceiver:
+ case IrOpcode::kConvertReceiver:
+ case IrOpcode::kJSGetSuperConstructor:
+ case IrOpcode::kJSToObject:
+ return false;
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = HeapObjectMatcher(receiver).Value();
+ return value->IsPrimitive();
+ }
+ default: {
+ // We don't really care about the exact maps here,
+ // just the instance types, which don't change
+ // across potential side-effecting operations.
+ ZoneHandleSet<Map> maps;
+ if (InferReceiverMaps(receiver, effect, &maps) != kNoReceiverMaps) {
+ // Check if all {maps} are actually JSReceiver maps.
+ for (size_t i = 0; i < maps.size(); ++i) {
+ if (!maps[i]->IsJSReceiverMap()) return true;
+ }
+ return false;
+ }
+ return true;
+ }
+ }
+}
+
+// static
+bool NodeProperties::CanBeNullOrUndefined(Node* receiver, Node* effect) {
+ if (CanBePrimitive(receiver, effect)) {
+ switch (receiver->opcode()) {
+ case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckNumber:
+ case IrOpcode::kCheckSymbol:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kCheckSeqString:
+ case IrOpcode::kCheckInternalizedString:
+ case IrOpcode::kToBoolean:
+ case IrOpcode::kJSToInteger:
+ case IrOpcode::kJSToLength:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumeric:
+ case IrOpcode::kJSToString:
+ return false;
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = HeapObjectMatcher(receiver).Value();
+ Isolate* const isolate = value->GetIsolate();
+ return value->IsNullOrUndefined(isolate);
+ }
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
+// static
Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
Node* context = NodeProperties::GetContextInput(node);
while (*depth > 0 &&
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 6bc1fe7078..5ccc15c1ab 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -122,6 +122,9 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// Collect the output-value projection for the given output index.
static Node* FindProjection(Node* node, size_t projection_index);
+ // Collect the value projections from a node.
+ static void CollectValueProjections(Node* node, Node** proj, size_t count);
+
// Collect the branch-related projections from a node, such as IfTrue,
// IfFalse, IfSuccess, IfException, IfValue and IfDefault.
// - Branch: [ IfTrue, IfFalse ]
@@ -155,6 +158,15 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// in the effect chain.
static bool NoObservableSideEffectBetween(Node* effect, Node* dominator);
+ // Returns true if the {receiver} can be a primitive value (i.e. is not
+ // definitely a JavaScript object); might walk up the {effect} chain to
+ // find map checks on {receiver}.
+ static bool CanBePrimitive(Node* receiver, Node* effect);
+
+ // Returns true if the {receiver} can be null or undefined. Might walk
+ // up the {effect} chain to find map checks for {receiver}.
+ static bool CanBeNullOrUndefined(Node* receiver, Node* effect);
+
// ---------------------------------------------------------------------------
// Context.
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 97f91b8cac..3c3650b8f4 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -79,6 +79,8 @@
#define COMMON_OP_LIST(V) \
CONSTANT_OP_LIST(V) \
INNER_OP_LIST(V) \
+ V(Unreachable) \
+ V(DeadValue) \
V(Dead)
// Opcodes for JavaScript operators.
@@ -103,7 +105,8 @@
V(JSSubtract) \
V(JSMultiply) \
V(JSDivide) \
- V(JSModulus)
+ V(JSModulus) \
+ V(JSExponentiate)
#define JS_SIMPLE_BINOP_LIST(V) \
JS_COMPARE_BINOP_LIST(V) \
@@ -114,34 +117,38 @@
V(JSOrdinaryHasInstance)
#define JS_CONVERSION_UNOP_LIST(V) \
- V(JSToBoolean) \
V(JSToInteger) \
V(JSToLength) \
V(JSToName) \
V(JSToNumber) \
+ V(JSToNumeric) \
V(JSToObject) \
V(JSToString)
-#define JS_OTHER_UNOP_LIST(V) \
- V(JSClassOf) \
- V(JSTypeOf)
-
#define JS_SIMPLE_UNOP_LIST(V) \
JS_CONVERSION_UNOP_LIST(V) \
- JS_OTHER_UNOP_LIST(V)
+ V(JSBitwiseNot) \
+ V(JSDecrement) \
+ V(JSIncrement) \
+ V(JSNegate)
+
+#define JS_CREATE_OP_LIST(V) \
+ V(JSCreate) \
+ V(JSCreateArguments) \
+ V(JSCreateArray) \
+ V(JSCreateBoundFunction) \
+ V(JSCreateClosure) \
+ V(JSCreateGeneratorObject) \
+ V(JSCreateIterResultObject) \
+ V(JSCreateKeyValueArray) \
+ V(JSCreateLiteralArray) \
+ V(JSCreateEmptyLiteralArray) \
+ V(JSCreateLiteralObject) \
+ V(JSCreateEmptyLiteralObject) \
+ V(JSCreateLiteralRegExp)
#define JS_OBJECT_OP_LIST(V) \
- V(JSCreate) \
- V(JSCreateArguments) \
- V(JSCreateArray) \
- V(JSCreateClosure) \
- V(JSCreateIterResultObject) \
- V(JSCreateKeyValueArray) \
- V(JSCreateLiteralArray) \
- V(JSCreateEmptyLiteralArray) \
- V(JSCreateLiteralObject) \
- V(JSCreateEmptyLiteralObject) \
- V(JSCreateLiteralRegExp) \
+ JS_CREATE_OP_LIST(V) \
V(JSLoadProperty) \
V(JSLoadNamed) \
V(JSLoadGlobal) \
@@ -152,7 +159,6 @@
V(JSStoreDataPropertyInLiteral) \
V(JSDeleteProperty) \
V(JSHasProperty) \
- V(JSCreateGeneratorObject) \
V(JSGetSuperConstructor)
#define JS_CONTEXT_OP_LIST(V) \
@@ -161,20 +167,21 @@
V(JSCreateFunctionContext) \
V(JSCreateCatchContext) \
V(JSCreateWithContext) \
- V(JSCreateBlockContext) \
- V(JSCreateScriptContext)
+ V(JSCreateBlockContext)
+
+#define JS_CONSTRUCT_OP_LIST(V) \
+ V(JSConstructForwardVarargs) \
+ V(JSConstruct) \
+ V(JSConstructWithArrayLike) \
+ V(JSConstructWithSpread)
#define JS_OTHER_OP_LIST(V) \
- V(JSConstructForwardVarargs) \
- V(JSConstruct) \
- V(JSConstructWithArrayLike) \
- V(JSConstructWithSpread) \
+ JS_CONSTRUCT_OP_LIST(V) \
V(JSCallForwardVarargs) \
V(JSCall) \
V(JSCallWithArrayLike) \
V(JSCallWithSpread) \
V(JSCallRuntime) \
- V(JSConvertReceiver) \
V(JSForInEnumerate) \
V(JSForInNext) \
V(JSForInPrepare) \
@@ -241,6 +248,7 @@
V(SpeculativeNumberLessThan) \
V(SpeculativeNumberLessThanOrEqual) \
V(ReferenceEqual) \
+ V(SameValue) \
V(StringEqual) \
V(StringLessThan) \
V(StringLessThanOrEqual)
@@ -320,6 +328,7 @@
V(PlainPrimitiveToWord32) \
V(PlainPrimitiveToFloat64) \
V(BooleanNot) \
+ V(StringToNumber) \
V(StringCharAt) \
V(StringCharCodeAt) \
V(SeqStringCharCodeAt) \
@@ -341,9 +350,15 @@
V(CheckHeapObject) \
V(CheckFloat64Hole) \
V(CheckNotTaggedHole) \
+ V(CheckEqualsInternalizedString) \
+ V(CheckEqualsSymbol) \
V(CompareMaps) \
+ V(ConvertReceiver) \
V(ConvertTaggedHoleToUndefined) \
+ V(TypeOf) \
+ V(ClassOf) \
V(Allocate) \
+ V(AllocateRaw) \
V(LoadFieldByIndex) \
V(LoadField) \
V(LoadElement) \
@@ -353,7 +368,11 @@
V(StoreTypedElement) \
V(StoreSignedSmallElement) \
V(TransitionAndStoreElement) \
+ V(TransitionAndStoreNumberElement) \
+ V(TransitionAndStoreNonNumberElement) \
+ V(ToBoolean) \
V(ObjectIsArrayBufferView) \
+ V(ObjectIsBigInt) \
V(ObjectIsCallable) \
V(ObjectIsConstructor) \
V(ObjectIsDetectableCallable) \
@@ -377,6 +396,7 @@
V(TransitionElementsKind) \
V(FindOrderedHashMapEntry) \
V(FindOrderedHashMapEntryForInt32Key) \
+ V(MaskIndexWithBound) \
V(RuntimeAbort)
#define SIMPLIFIED_OP_LIST(V) \
@@ -815,7 +835,7 @@ class V8_EXPORT_PRIVATE IrOpcode {
}
static bool IsContextChainExtendingOpcode(Value value) {
- return kJSCreateFunctionContext <= value && value <= kJSCreateScriptContext;
+ return kJSCreateFunctionContext <= value && value <= kJSCreateBlockContext;
}
};
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 0cc98a0ef1..46d6557b21 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -116,15 +116,7 @@ Type* OperationTyper::Rangify(Type* type) {
if (!type->Is(cache_.kInteger)) {
return type; // Give up on non-integer types.
}
- double min = type->Min();
- double max = type->Max();
- // Handle the degenerate case of empty bitset types (such as
- // OtherUnsigned31 and OtherSigned32 on 64-bit architectures).
- if (std::isnan(min)) {
- DCHECK(std::isnan(max));
- return type;
- }
- return Type::Range(min, max, zone());
+ return Type::Range(type->Min(), type->Max(), zone());
}
namespace {
@@ -241,7 +233,19 @@ Type* OperationTyper::MultiplyRanger(Type* lhs, Type* rhs) {
: range;
}
-Type* OperationTyper::ToNumber(Type* type) {
+Type* OperationTyper::ConvertReceiver(Type* type) {
+ if (type->Is(Type::Receiver())) return type;
+ bool const maybe_primitive = type->Maybe(Type::Primitive());
+ type = Type::Intersect(type, Type::Receiver(), zone());
+ if (maybe_primitive) {
+ // ConvertReceiver maps null and undefined to the JSGlobalProxy of the
+ // target function, and all other primitives are wrapped into a JSValue.
+ type = Type::Union(type, Type::OtherObject(), zone());
+ }
+ return type;
+}
+
+Type* OperationTyper::ToNumberOrNumeric(Object::Conversion mode, Type* type) {
if (type->Is(Type::Number())) return type;
if (type->Is(Type::NullOrUndefined())) {
if (type->Is(Type::Null())) return cache_.kSingletonZero;
@@ -265,28 +269,42 @@ Type* OperationTyper::ToNumber(Type* type) {
}
return Type::Intersect(type, Type::Number(), zone());
}
- return Type::Number();
+ if (type->Is(Type::BigInt())) {
+ return mode == Object::Conversion::kToNumber ? Type::None() : type;
+ }
+ return mode == Object::Conversion::kToNumber ? Type::Number()
+ : Type::Numeric();
+}
+
+Type* OperationTyper::ToNumber(Type* type) {
+ return ToNumberOrNumeric(Object::Conversion::kToNumber, type);
+}
+
+Type* OperationTyper::ToNumeric(Type* type) {
+ return ToNumberOrNumeric(Object::Conversion::kToNumeric, type);
}
Type* OperationTyper::NumberAbs(Type* type) {
DCHECK(type->Is(Type::Number()));
-
- if (!type->IsInhabited()) {
- return Type::None();
- }
+ if (type->IsNone()) return type;
bool const maybe_nan = type->Maybe(Type::NaN());
bool const maybe_minuszero = type->Maybe(Type::MinusZero());
+
type = Type::Intersect(type, Type::PlainNumber(), zone());
- double const max = type->Max();
- double const min = type->Min();
- if (min < 0) {
- if (type->Is(cache_.kInteger)) {
- type = Type::Range(0.0, std::max(std::fabs(min), std::fabs(max)), zone());
- } else {
- type = Type::PlainNumber();
+ if (!type->IsNone()) {
+ double const max = type->Max();
+ double const min = type->Min();
+ if (min < 0) {
+ if (type->Is(cache_.kInteger)) {
+ type =
+ Type::Range(0.0, std::max(std::fabs(min), std::fabs(max)), zone());
+ } else {
+ type = Type::PlainNumber();
+ }
}
}
+
if (maybe_minuszero) {
type = Type::Union(type, cache_.kSingletonZero, zone());
}
@@ -334,8 +352,9 @@ Type* OperationTyper::NumberCbrt(Type* type) {
Type* OperationTyper::NumberCeil(Type* type) {
DCHECK(type->Is(Type::Number()));
if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return cache_.kIntegerOrMinusZeroOrNaN;
+ type = Type::Intersect(type, Type::NaN(), zone());
+ type = Type::Union(type, cache_.kIntegerOrMinusZero, zone());
+ return type;
}
Type* OperationTyper::NumberClz32(Type* type) {
@@ -399,8 +418,9 @@ Type* OperationTyper::NumberLog10(Type* type) {
Type* OperationTyper::NumberRound(Type* type) {
DCHECK(type->Is(Type::Number()));
if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return cache_.kIntegerOrMinusZeroOrNaN;
+ type = Type::Intersect(type, Type::NaN(), zone());
+ type = Type::Union(type, cache_.kIntegerOrMinusZero, zone());
+ return type;
}
Type* OperationTyper::NumberSign(Type* type) {
@@ -409,7 +429,9 @@ Type* OperationTyper::NumberSign(Type* type) {
bool maybe_minuszero = type->Maybe(Type::MinusZero());
bool maybe_nan = type->Maybe(Type::NaN());
type = Type::Intersect(type, Type::PlainNumber(), zone());
- if (type->Max() < 0.0) {
+ if (type->IsNone()) {
+ // Do nothing.
+ } else if (type->Max() < 0.0) {
type = cache_.kSingletonMinusOne;
} else if (type->Max() <= 0.0) {
type = cache_.kMinusOneOrZero;
@@ -422,6 +444,7 @@ Type* OperationTyper::NumberSign(Type* type) {
}
if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+ DCHECK(!type->IsNone());
return type;
}
@@ -453,13 +476,14 @@ Type* OperationTyper::NumberTanh(Type* type) {
Type* OperationTyper::NumberTrunc(Type* type) {
DCHECK(type->Is(Type::Number()));
if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return cache_.kIntegerOrMinusZeroOrNaN;
+ type = Type::Intersect(type, Type::NaN(), zone());
+ type = Type::Union(type, cache_.kIntegerOrMinusZero, zone());
+ return type;
}
Type* OperationTyper::NumberToBoolean(Type* type) {
DCHECK(type->Is(Type::Number()));
- if (!type->IsInhabited()) return Type::None();
+ if (type->IsNone()) return type;
if (type->Is(cache_.kZeroish)) return singleton_false_;
if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
return singleton_true_; // Ruled out nan, -0 and +0.
@@ -511,9 +535,7 @@ Type* OperationTyper::NumberAdd(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
- return Type::None();
- }
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
// Addition can return NaN if either input can be NaN or we try to compute
// the sum of two infinities of opposite sign.
@@ -536,7 +558,7 @@ Type* OperationTyper::NumberAdd(Type* lhs, Type* rhs) {
Type* type = Type::None();
lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
- if (lhs->IsInhabited() && rhs->IsInhabited()) {
+ if (!lhs->IsNone() && !rhs->IsNone()) {
if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
type = AddRanger(lhs->Min(), lhs->Max(), rhs->Min(), rhs->Max());
} else {
@@ -558,9 +580,7 @@ Type* OperationTyper::NumberSubtract(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
- return Type::None();
- }
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
// Subtraction can return NaN if either input can be NaN or we try to
// compute the sum of two infinities of opposite sign.
@@ -581,7 +601,7 @@ Type* OperationTyper::NumberSubtract(Type* lhs, Type* rhs) {
Type* type = Type::None();
lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
- if (lhs->IsInhabited() && rhs->IsInhabited()) {
+ if (!lhs->IsNone() && !rhs->IsNone()) {
if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
type = SubtractRanger(lhs->Min(), lhs->Max(), rhs->Min(), rhs->Max());
} else {
@@ -606,7 +626,7 @@ Type* OperationTyper::SpeculativeSafeIntegerAdd(Type* lhs, Type* rhs) {
// In either case the result will be in the safe integer range, so we
// can bake in the type here. This needs to be in sync with
// SimplifiedLowering::VisitSpeculativeAdditiveOp.
- return Type::Intersect(result, cache_.kSafeInteger, zone());
+ return Type::Intersect(result, cache_.kSafeIntegerOrMinusZero, zone());
}
Type* OperationTyper::SpeculativeSafeIntegerSubtract(Type* lhs, Type* rhs) {
@@ -623,9 +643,7 @@ Type* OperationTyper::NumberMultiply(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
- return Type::None();
- }
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
lhs = Rangify(lhs);
rhs = Rangify(rhs);
@@ -640,10 +658,7 @@ Type* OperationTyper::NumberDivide(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
- return Type::None();
- }
-
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
// Division is tricky, so all we do is try ruling out -0 and NaN.
@@ -652,7 +667,9 @@ Type* OperationTyper::NumberDivide(Type* lhs, Type* rhs) {
((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
(rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+ DCHECK(!lhs->IsNone());
rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+ DCHECK(!rhs->IsNone());
// Try to rule out -0.
bool maybe_minuszero =
@@ -671,6 +688,8 @@ Type* OperationTyper::NumberModulus(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+
// Modulus can yield NaN if either {lhs} or {rhs} are NaN, or
// {lhs} is not finite, or the {rhs} is a zero value.
bool maybe_nan = lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
@@ -693,7 +712,7 @@ Type* OperationTyper::NumberModulus(Type* lhs, Type* rhs) {
// We can only derive a meaningful type if both {lhs} and {rhs} are inhabited,
// and the {rhs} is not 0, otherwise the result is NaN independent of {lhs}.
- if (lhs->IsInhabited() && !rhs->Is(cache_.kSingletonZero)) {
+ if (!lhs->IsNone() && !rhs->Is(cache_.kSingletonZero)) {
// Determine the bounds of {lhs} and {rhs}.
double const lmin = lhs->Min();
double const lmax = lhs->Max();
@@ -738,11 +757,11 @@ Type* OperationTyper::NumberBitwiseOr(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
-
lhs = NumberToInt32(lhs);
rhs = NumberToInt32(rhs);
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+
double lmin = lhs->Min();
double rmin = rhs->Min();
double lmax = lhs->Max();
@@ -775,11 +794,11 @@ Type* OperationTyper::NumberBitwiseAnd(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
-
lhs = NumberToInt32(lhs);
rhs = NumberToInt32(rhs);
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+
double lmin = lhs->Min();
double rmin = rhs->Min();
double lmax = lhs->Max();
@@ -806,11 +825,11 @@ Type* OperationTyper::NumberBitwiseXor(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
-
lhs = NumberToInt32(lhs);
rhs = NumberToInt32(rhs);
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+
double lmin = lhs->Min();
double rmin = rhs->Min();
double lmax = lhs->Max();
@@ -831,11 +850,11 @@ Type* OperationTyper::NumberShiftLeft(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
-
lhs = NumberToInt32(lhs);
rhs = NumberToUint32(rhs);
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+
int32_t min_lhs = lhs->Min();
int32_t max_lhs = lhs->Max();
uint32_t min_rhs = rhs->Min();
@@ -866,11 +885,11 @@ Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
-
lhs = NumberToInt32(lhs);
rhs = NumberToUint32(rhs);
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+
int32_t min_lhs = lhs->Min();
int32_t max_lhs = lhs->Max();
uint32_t min_rhs = rhs->Min();
@@ -891,11 +910,11 @@ Type* OperationTyper::NumberShiftRightLogical(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
-
lhs = NumberToUint32(lhs);
rhs = NumberToUint32(rhs);
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+
uint32_t min_lhs = lhs->Min();
uint32_t max_lhs = lhs->Max();
uint32_t min_rhs = rhs->Min();
@@ -932,19 +951,19 @@ Type* OperationTyper::NumberImul(Type* lhs, Type* rhs) {
Type* OperationTyper::NumberMax(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
- return Type::None();
- }
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) {
- return Type::NaN();
- }
+
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+
Type* type = Type::None();
// TODO(turbofan): Improve minus zero handling here.
if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
type = Type::Union(type, Type::NaN(), zone());
}
lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+ DCHECK(!lhs->IsNone());
rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+ DCHECK(!rhs->IsNone());
if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
double max = std::max(lhs->Max(), rhs->Max());
double min = std::max(lhs->Min(), rhs->Min());
@@ -958,19 +977,19 @@ Type* OperationTyper::NumberMax(Type* lhs, Type* rhs) {
Type* OperationTyper::NumberMin(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
- return Type::None();
- }
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) {
- return Type::NaN();
- }
+
+ if (lhs->IsNone() || rhs->IsNone()) return Type::None();
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+
Type* type = Type::None();
// TODO(turbofan): Improve minus zero handling here.
if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
type = Type::Union(type, Type::NaN(), zone());
}
lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+ DCHECK(!lhs->IsNone());
rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+ DCHECK(!rhs->IsNone());
if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
double max = std::min(lhs->Max(), rhs->Max());
double min = std::min(lhs->Min(), rhs->Min());
@@ -1012,7 +1031,7 @@ Type* OperationTyper::SpeculativeToNumber(Type* type) {
}
Type* OperationTyper::ToPrimitive(Type* type) {
- if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
+ if (type->Is(Type::Primitive())) {
return type;
}
return Type::Primitive();
@@ -1020,7 +1039,7 @@ Type* OperationTyper::ToPrimitive(Type* type) {
Type* OperationTyper::Invert(Type* type) {
DCHECK(type->Is(Type::Boolean()));
- DCHECK(type->IsInhabited());
+ DCHECK(!type->IsNone());
if (type->Is(singleton_false())) return singleton_true();
if (type->Is(singleton_true())) return singleton_false();
return type;
@@ -1046,6 +1065,61 @@ Type* OperationTyper::FalsifyUndefined(ComparisonOutcome outcome) {
return singleton_true();
}
+namespace {
+
+Type* JSType(Type* type) {
+ if (type->Is(Type::Boolean())) return Type::Boolean();
+ if (type->Is(Type::String())) return Type::String();
+ if (type->Is(Type::Number())) return Type::Number();
+ if (type->Is(Type::BigInt())) return Type::BigInt();
+ if (type->Is(Type::Undefined())) return Type::Undefined();
+ if (type->Is(Type::Null())) return Type::Null();
+ if (type->Is(Type::Symbol())) return Type::Symbol();
+ if (type->Is(Type::Receiver())) return Type::Receiver(); // JS "Object"
+ return Type::Any();
+}
+
+} // namespace
+
+Type* OperationTyper::SameValue(Type* lhs, Type* rhs) {
+ if (!JSType(lhs)->Maybe(JSType(rhs))) return singleton_false();
+ if (lhs->Is(Type::NaN())) {
+ if (rhs->Is(Type::NaN())) return singleton_true();
+ if (!rhs->Maybe(Type::NaN())) return singleton_false();
+ } else if (rhs->Is(Type::NaN())) {
+ if (!lhs->Maybe(Type::NaN())) return singleton_false();
+ }
+ if (lhs->Is(Type::MinusZero())) {
+ if (rhs->Is(Type::MinusZero())) return singleton_true();
+ if (!rhs->Maybe(Type::MinusZero())) return singleton_false();
+ } else if (rhs->Is(Type::MinusZero())) {
+ if (!lhs->Maybe(Type::MinusZero())) return singleton_false();
+ }
+ if (lhs->Is(Type::OrderedNumber()) && rhs->Is(Type::OrderedNumber()) &&
+ (lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
+ return singleton_false();
+ }
+ return Type::Boolean();
+}
+
+Type* OperationTyper::StrictEqual(Type* lhs, Type* rhs) {
+ if (!JSType(lhs)->Maybe(JSType(rhs))) return singleton_false();
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return singleton_false();
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number()) &&
+ (lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
+ return singleton_false();
+ }
+ if ((lhs->Is(Type::Hole()) || rhs->Is(Type::Hole())) && !lhs->Maybe(rhs)) {
+ return singleton_false();
+ }
+ if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
+ // Types are equal and are inhabited only by a single semantic value,
+ // which is not nan due to the earlier check.
+ return singleton_true();
+ }
+ return Type::Boolean();
+}
+
Type* OperationTyper::CheckFloat64Hole(Type* type) {
if (type->Maybe(Type::Hole())) {
// Turn "the hole" into undefined.
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index c1d05645d7..4a9c4ffb08 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/opcodes.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -31,15 +32,16 @@ class V8_EXPORT_PRIVATE OperationTyper {
Type* Merge(Type* left, Type* right);
Type* ToPrimitive(Type* type);
-
- // Helpers for number operation typing.
Type* ToNumber(Type* type);
+ Type* ToNumeric(Type* type);
+
Type* WeakenRange(Type* current_range, Type* previous_range);
// Number unary operators.
#define DECLARE_METHOD(Name) Type* Name(Type* type);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ DECLARE_METHOD(ConvertReceiver)
#undef DECLARE_METHOD
// Number binary operators.
@@ -48,6 +50,10 @@ class V8_EXPORT_PRIVATE OperationTyper {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
+ // Comparison operators.
+ Type* SameValue(Type* lhs, Type* rhs);
+ Type* StrictEqual(Type* lhs, Type* rhs);
+
// Check operators.
Type* CheckFloat64Hole(Type* type);
Type* CheckNumber(Type* type);
@@ -68,6 +74,8 @@ class V8_EXPORT_PRIVATE OperationTyper {
private:
typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
+ Type* ToNumberOrNumeric(Object::Conversion mode, Type* type);
+
ComparisonOutcome Invert(ComparisonOutcome);
Type* Invert(Type*);
Type* FalsifyUndefined(ComparisonOutcome);
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 5f45a79bcc..d786bb3ee5 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -44,6 +44,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSMultiply:
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
+ case IrOpcode::kJSExponentiate:
// Bitwise operations
case IrOpcode::kJSBitwiseOr:
@@ -85,14 +86,12 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSStoreDataPropertyInLiteral:
case IrOpcode::kJSDeleteProperty:
- // Context operations
- case IrOpcode::kJSCreateScriptContext:
-
// Conversions
case IrOpcode::kJSToInteger:
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
@@ -112,6 +111,10 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSStackCheck:
case IrOpcode::kJSDebugger:
case IrOpcode::kJSGetSuperConstructor:
+ case IrOpcode::kJSBitwiseNot:
+ case IrOpcode::kJSDecrement:
+ case IrOpcode::kJSIncrement:
+ case IrOpcode::kJSNegate:
return true;
default:
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
index 2da48ca887..d505a533b7 100644
--- a/deps/v8/src/compiler/operator.cc
+++ b/deps/v8/src/compiler/operator.cc
@@ -31,8 +31,8 @@ Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
opcode_(opcode),
properties_(properties),
value_in_(CheckRange<uint32_t>(value_in)),
- effect_in_(CheckRange<uint16_t>(effect_in)),
- control_in_(CheckRange<uint16_t>(control_in)),
+ effect_in_(CheckRange<uint32_t>(effect_in)),
+ control_in_(CheckRange<uint32_t>(control_in)),
value_out_(CheckRange<uint32_t>(value_out)),
effect_out_(CheckRange<uint8_t>(effect_out)),
control_out_(CheckRange<uint32_t>(control_out)) {}
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 99e8461c86..e436ec09f4 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -137,8 +137,8 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
Opcode opcode_;
Properties properties_;
uint32_t value_in_;
- uint16_t effect_in_;
- uint16_t control_in_;
+ uint32_t effect_in_;
+ uint32_t control_in_;
uint32_t value_out_;
uint8_t effect_out_;
uint32_t control_out_;
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 99ef25f457..6019b344bf 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -45,9 +45,9 @@ void PipelineStatistics::CommonStats::End(
timer_.Stop();
}
-PipelineStatistics::PipelineStatistics(CompilationInfo* info,
+PipelineStatistics::PipelineStatistics(CompilationInfo* info, Isolate* isolate,
ZoneStats* zone_stats)
- : isolate_(info->isolate()),
+ : isolate_(isolate),
outer_zone_(info->zone()),
zone_stats_(zone_stats),
compilation_stats_(isolate_->GetTurboStatistics()),
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index b09e2363d6..b2bf3ac76a 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -20,7 +20,8 @@ class PhaseScope;
class PipelineStatistics : public Malloced {
public:
- PipelineStatistics(CompilationInfo* info, ZoneStats* zone_stats);
+ PipelineStatistics(CompilationInfo* info, Isolate* isolate,
+ ZoneStats* zone_stats);
~PipelineStatistics();
void BeginPhaseKind(const char* phase_kind_name);
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 9ad3763403..045d695ecf 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -12,6 +12,7 @@
#include "src/base/adapters.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
+#include "src/bootstrapper.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/compiler/basic-block-instrumentor.h"
@@ -71,19 +72,23 @@
#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/register-configuration.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
+
+namespace trap_handler {
+struct ProtectedInstructionData;
+} // namespace trap_handler
+
namespace compiler {
class PipelineData {
public:
// For main entry point.
- PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
+ PipelineData(ZoneStats* zone_stats, Isolate* isolate, CompilationInfo* info,
PipelineStatistics* pipeline_statistics)
- : isolate_(info->isolate()),
+ : isolate_(isolate),
info_(info),
debug_name_(info_->GetDebugName()),
may_have_unverifiable_graph_(false),
@@ -112,12 +117,12 @@ class PipelineData {
}
// For WebAssembly compile entry point.
- PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
- PipelineStatistics* pipeline_statistics,
+ PipelineData(ZoneStats* zone_stats, Isolate* isolate, CompilationInfo* info,
+ JSGraph* jsgraph, PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
- ZoneVector<trap_handler::ProtectedInstructionData>*
+ std::vector<trap_handler::ProtectedInstructionData>*
protected_instructions)
- : isolate_(info->isolate()),
+ : isolate_(isolate),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
@@ -135,14 +140,14 @@ class PipelineData {
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
- protected_instructions_(protected_instructions) {
- }
+ protected_instructions_(protected_instructions) {}
// For machine graph testing entry point.
- PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
- Schedule* schedule, SourcePositionTable* source_positions,
+ PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Isolate* isolate,
+ Graph* graph, Schedule* schedule,
+ SourcePositionTable* source_positions,
JumpOptimizationInfo* jump_opt)
- : isolate_(info->isolate()),
+ : isolate_(isolate),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
@@ -157,10 +162,11 @@ class PipelineData {
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
jump_optimization_info_(jump_opt) {}
+
// For register allocation testing entry point.
- PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
+ PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Isolate* isolate,
InstructionSequence* sequence)
- : isolate_(info->isolate()),
+ : isolate_(isolate),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
@@ -171,8 +177,7 @@ class PipelineData {
codegen_zone_scope_(zone_stats_, ZONE_NAME),
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {
- }
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
~PipelineData() {
delete code_generator_; // Must happen before zones are destroyed.
@@ -248,7 +253,7 @@ class PipelineData {
source_position_output_ = source_position_output;
}
- ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions()
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions()
const {
return protected_instructions_;
}
@@ -297,8 +302,8 @@ class PipelineData {
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(instruction_zone(),
schedule());
- sequence_ = new (instruction_zone()) InstructionSequence(
- info()->isolate(), instruction_zone(), instruction_blocks);
+ sequence_ = new (instruction_zone())
+ InstructionSequence(isolate(), instruction_zone(), instruction_blocks);
if (descriptor && descriptor->RequiresFrameAsIncoming()) {
sequence_->instruction_blocks()[0]->mark_needs_frame();
} else {
@@ -336,9 +341,10 @@ class PipelineData {
void InitializeCodeGenerator(Linkage* linkage) {
DCHECK_NULL(code_generator_);
- code_generator_ = new CodeGenerator(
- codegen_zone(), frame(), linkage, sequence(), info(), osr_helper_,
- start_source_position_, jump_optimization_info_);
+ code_generator_ =
+ new CodeGenerator(codegen_zone(), frame(), linkage, sequence(), info(),
+ isolate(), osr_helper_, start_source_position_,
+ jump_optimization_info_, protected_instructions_);
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -409,7 +415,7 @@ class PipelineData {
// Source position output for --trace-turbo.
std::string source_position_output_;
- ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_ =
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_ =
nullptr;
JumpOptimizationInfo* jump_optimization_info_ = nullptr;
@@ -455,6 +461,151 @@ class PipelineImpl final {
namespace {
+// Print function's source if it was not printed before.
+// Return a sequential id under which this function was printed.
+int PrintFunctionSource(CompilationInfo* info, Isolate* isolate,
+ std::vector<Handle<SharedFunctionInfo>>* printed,
+ int inlining_id, Handle<SharedFunctionInfo> shared) {
+ // Outermost function has source id -1 and inlined functions take
+ // source ids starting from 0.
+ int source_id = -1;
+ if (inlining_id != SourcePosition::kNotInlined) {
+ for (unsigned i = 0; i < printed->size(); i++) {
+ if (printed->at(i).is_identical_to(shared)) {
+ return i;
+ }
+ }
+ source_id = static_cast<int>(printed->size());
+ printed->push_back(shared);
+ }
+
+ if (!shared->script()->IsUndefined(isolate)) {
+ Handle<Script> script(Script::cast(shared->script()), isolate);
+
+ if (!script->source()->IsUndefined(isolate)) {
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ Object* source_name = script->name();
+ OFStream os(tracing_scope.file());
+ os << "--- FUNCTION SOURCE (";
+ if (source_name->IsString()) {
+ os << String::cast(source_name)->ToCString().get() << ":";
+ }
+ os << shared->DebugName()->ToCString().get() << ") id{";
+ os << info->optimization_id() << "," << source_id << "} start{";
+ os << shared->start_position() << "} ---\n";
+ {
+ DisallowHeapAllocation no_allocation;
+ int start = shared->start_position();
+ int len = shared->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start,
+ len);
+ for (const auto& c : source) {
+ os << AsReversiblyEscapedUC16(c);
+ }
+ }
+
+ os << "\n--- END ---\n";
+ }
+ }
+
+ return source_id;
+}
+
+// Print information for the given inlining: which function was inlined and
+// where the inlining occurred.
+void PrintInlinedFunctionInfo(CompilationInfo* info, Isolate* isolate,
+ int source_id, int inlining_id,
+ const CompilationInfo::InlinedFunctionHolder& h) {
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "INLINE (" << h.shared_info->DebugName()->ToCString().get() << ") id{"
+ << info->optimization_id() << "," << source_id << "} AS " << inlining_id
+ << " AT ";
+ const SourcePosition position = h.position.position;
+ if (position.IsKnown()) {
+ os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
+ } else {
+ os << "<?>";
+ }
+ os << std::endl;
+}
+
+// Print the source of all functions that participated in this optimizing
+// compilation. For inlined functions print source position of their inlining.
+void DumpParticipatingSource(CompilationInfo* info, Isolate* isolate) {
+ AllowDeferredHandleDereference allow_deference_for_print_code;
+
+ std::vector<Handle<SharedFunctionInfo>> printed;
+ printed.reserve(info->inlined_functions().size());
+
+ PrintFunctionSource(info, isolate, &printed, SourcePosition::kNotInlined,
+ info->shared_info());
+ const auto& inlined = info->inlined_functions();
+ for (unsigned id = 0; id < inlined.size(); id++) {
+ const int source_id = PrintFunctionSource(info, isolate, &printed, id,
+ inlined[id].shared_info);
+ PrintInlinedFunctionInfo(info, isolate, source_id, id, inlined[id]);
+ }
+}
+
+// Print the code after compiling it.
+void PrintCode(Handle<Code> code, CompilationInfo* info) {
+ Isolate* isolate = code->GetIsolate();
+ if (FLAG_print_opt_source && info->IsOptimizing()) {
+ DumpParticipatingSource(info, isolate);
+ }
+
+#ifdef ENABLE_DISASSEMBLER
+ AllowDeferredHandleDereference allow_deference_for_print_code;
+ bool print_code =
+ isolate->bootstrapper()->IsActive()
+ ? FLAG_print_builtin_code
+ : (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
+ (info->IsOptimizing() && FLAG_print_opt_code &&
+ info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
+ (info->IsWasm() && FLAG_print_wasm_code));
+ if (print_code) {
+ std::unique_ptr<char[]> debug_name = info->GetDebugName();
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+
+ // Print the source code if available.
+ bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION;
+ if (print_source) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ if (shared->script()->IsScript() &&
+ !Script::cast(shared->script())->source()->IsUndefined(isolate)) {
+ os << "--- Raw source ---\n";
+ StringCharacterStream stream(
+ String::cast(Script::cast(shared->script())->source()),
+ shared->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len = shared->end_position() - shared->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.HasMore()) {
+ os << AsReversiblyEscapedUC16(stream.GetNext());
+ }
+ }
+ os << "\n\n";
+ }
+ }
+ if (info->IsOptimizing()) {
+ os << "--- Optimized code ---\n"
+ << "optimization_id = " << info->optimization_id() << "\n";
+ } else {
+ os << "--- Code ---\n";
+ }
+ if (print_source) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ os << "source_position = " << shared->start_position() << "\n";
+ }
+ code->Disassemble(debug_name.get(), os);
+ os << "--- End code ---\n";
+ }
+#endif // ENABLE_DISASSEMBLER
+}
+
struct TurboCfgFile : public std::ofstream {
explicit TurboCfgFile(Isolate* isolate)
: std::ofstream(isolate->GetTurboCfgFileName().c_str(),
@@ -467,7 +618,8 @@ struct TurboJsonFile : public std::ofstream {
mode) {}
};
-void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
+void TraceSchedule(CompilationInfo* info, Isolate* isolate,
+ Schedule* schedule) {
if (FLAG_trace_turbo) {
AllowHandleDereference allow_deref;
TurboJsonFile json_of(info, std::ios_base::app);
@@ -482,7 +634,7 @@ void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
}
if (FLAG_trace_turbo_graph || FLAG_trace_turbo_scheduler) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "-- Schedule --------------------------------------\n" << *schedule;
}
@@ -551,11 +703,12 @@ class PipelineRunScope {
PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
CompilationInfo* info,
+ Isolate* isolate,
ZoneStats* zone_stats) {
PipelineStatistics* pipeline_statistics = nullptr;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics = new PipelineStatistics(info, zone_stats);
+ pipeline_statistics = new PipelineStatistics(info, isolate, zone_stats);
pipeline_statistics->BeginPhaseKind("initializing");
}
@@ -565,7 +718,6 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
int pos = info->IsStub() ? 0 : info->shared_info()->start_position();
json_of << "{\"function\":\"" << function_name.get()
<< "\", \"sourcePosition\":" << pos << ", \"source\":\"";
- Isolate* isolate = info->isolate();
if (!script.is_null() && !script->source()->IsUndefined(isolate)) {
DisallowHeapAllocation no_allocation;
int start = info->shared_info()->start_position();
@@ -590,25 +742,27 @@ class PipelineCompilationJob final : public CompilationJob {
Handle<JSFunction> function)
// Note that the CompilationInfo is not initialized at the time we pass it
// to the CompilationJob constructor, but it is not dereferenced there.
- : CompilationJob(function->GetIsolate(), parse_info, &compilation_info_,
- "TurboFan"),
+ : CompilationJob(parse_info->stack_limit(), parse_info,
+ &compilation_info_, "TurboFan"),
parse_info_(parse_info),
zone_stats_(function->GetIsolate()->allocator()),
compilation_info_(parse_info_.get()->zone(), function->GetIsolate(),
shared_info, function),
- pipeline_statistics_(CreatePipelineStatistics(
- parse_info_->script(), compilation_info(), &zone_stats_)),
- data_(&zone_stats_, compilation_info(), pipeline_statistics_.get()),
+ pipeline_statistics_(
+ CreatePipelineStatistics(parse_info_->script(), compilation_info(),
+ function->GetIsolate(), &zone_stats_)),
+ data_(&zone_stats_, function->GetIsolate(), compilation_info(),
+ pipeline_statistics_.get()),
pipeline_(&data_),
linkage_(nullptr) {}
protected:
- Status PrepareJobImpl() final;
+ Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
- Status FinalizeJobImpl() final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
// Registers weak object to optimized code dependencies.
- void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
+ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code, Isolate* isolate);
private:
std::unique_ptr<ParseInfo> parse_info_;
@@ -622,7 +776,8 @@ class PipelineCompilationJob final : public CompilationJob {
DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
};
-PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
+PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
+ Isolate* isolate) {
if (!FLAG_always_opt) {
compilation_info()->MarkAsBailoutOnUninitialized();
}
@@ -636,7 +791,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
compilation_info()->MarkAsAccessorInliningEnabled();
}
if (compilation_info()->closure()->feedback_vector_cell()->map() ==
- isolate()->heap()->one_closure_cell_map()) {
+ isolate->heap()->one_closure_cell_map()) {
compilation_info()->MarkAsFunctionContextSpecializing();
}
@@ -647,7 +802,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
if (!pipeline_.CreateGraph()) {
- if (isolate()->has_pending_exception()) return FAILED; // Stack overflowed.
+ if (isolate->has_pending_exception()) return FAILED; // Stack overflowed.
return AbortOptimization(kGraphBuildingFailed);
}
@@ -656,7 +811,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
// Make sure that we have generated the maximal number of deopt entries.
// This is in order to avoid triggering the generation of deopt entries later
// during code assembly.
- Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate());
+ Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
return SUCCEEDED;
}
@@ -667,7 +822,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
return SUCCEEDED;
}
-PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
+PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
+ Isolate* isolate) {
Handle<Code> code = pipeline_.FinalizeCode();
if (code.is_null()) {
if (compilation_info()->bailout_reason() == kNoReason) {
@@ -679,7 +835,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
compilation_info()->SetCode(code);
compilation_info()->context()->native_context()->AddOptimizedCode(*code);
- RegisterWeakObjectsInOptimizedCode(code);
+ RegisterWeakObjectsInOptimizedCode(code, isolate);
return SUCCEEDED;
}
@@ -702,7 +858,7 @@ void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
} // namespace
void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
- Handle<Code> code) {
+ Handle<Code> code, Isolate* isolate) {
DCHECK(code->is_optimized_code());
std::vector<Handle<Map>> maps;
std::vector<Handle<HeapObject>> objects;
@@ -714,7 +870,7 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
if (mode == RelocInfo::EMBEDDED_OBJECT &&
code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
- isolate());
+ isolate);
if (object->IsMap()) {
maps.push_back(Handle<Map>::cast(object));
} else {
@@ -725,12 +881,12 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
}
for (Handle<Map> map : maps) {
if (map->dependent_code()->IsEmpty(DependentCode::kWeakCodeGroup)) {
- isolate()->heap()->AddRetainedMap(map);
+ isolate->heap()->AddRetainedMap(map);
}
Map::AddDependentCode(map, DependentCode::kWeakCodeGroup, code);
}
for (Handle<HeapObject> object : objects) {
- AddWeakObjectToCodeDependency(isolate(), object, code);
+ AddWeakObjectToCodeDependency(isolate, object, code);
}
code->set_can_have_weak_objects(true);
}
@@ -738,25 +894,25 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
class PipelineWasmCompilationJob final : public CompilationJob {
public:
explicit PipelineWasmCompilationJob(
- CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
- SourcePositionTable* source_positions,
- ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
+ CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
+ CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ std::vector<trap_handler::ProtectedInstructionData>* protected_insts,
bool asmjs_origin)
- : CompilationJob(info->isolate(), nullptr, info, "TurboFan",
- State::kReadyToExecute),
- zone_stats_(info->isolate()->allocator()),
- pipeline_statistics_(CreatePipelineStatistics(Handle<Script>::null(),
- info, &zone_stats_)),
- data_(&zone_stats_, info, jsgraph, pipeline_statistics_.get(),
+ : CompilationJob(isolate->stack_guard()->real_climit(), nullptr, info,
+ "TurboFan", State::kReadyToExecute),
+ zone_stats_(isolate->allocator()),
+ pipeline_statistics_(CreatePipelineStatistics(
+ Handle<Script>::null(), info, isolate, &zone_stats_)),
+ data_(&zone_stats_, isolate, info, jsgraph, pipeline_statistics_.get(),
source_positions, protected_insts),
pipeline_(&data_),
linkage_(descriptor),
asmjs_origin_(asmjs_origin) {}
protected:
- Status PrepareJobImpl() final;
+ Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
- Status FinalizeJobImpl() final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
private:
size_t AllocatedMemory() const override;
@@ -775,8 +931,8 @@ class PipelineWasmCompilationJob final : public CompilationJob {
bool asmjs_origin_;
};
-PipelineWasmCompilationJob::Status
-PipelineWasmCompilationJob::PrepareJobImpl() {
+PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::PrepareJobImpl(
+ Isolate* isolate) {
UNREACHABLE(); // Prepare should always be skipped for WasmCompilationJob.
return SUCCEEDED;
}
@@ -795,7 +951,7 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
PipelineRunScope scope(data, "Wasm optimization");
JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
+ data->common(), scope.zone());
ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph(), asmjs_origin_);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -817,10 +973,24 @@ size_t PipelineWasmCompilationJob::AllocatedMemory() const {
return pipeline_.data_->zone_stats()->GetCurrentAllocatedBytes();
}
-PipelineWasmCompilationJob::Status
-PipelineWasmCompilationJob::FinalizeJobImpl() {
- pipeline_.FinalizeCode();
- ValidateImmovableEmbeddedObjects();
+PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
+ Isolate* isolate) {
+ if (!FLAG_wasm_jit_to_native) {
+ pipeline_.FinalizeCode();
+ ValidateImmovableEmbeddedObjects();
+ } else {
+ CodeGenerator* code_generator = pipeline_.data_->code_generator();
+ CompilationInfo::WasmCodeDesc* wasm_code_desc =
+ compilation_info()->wasm_code_desc();
+ code_generator->tasm()->GetCode(isolate, &wasm_code_desc->code_desc);
+ wasm_code_desc->safepoint_table_offset =
+ code_generator->GetSafepointTableOffset();
+ wasm_code_desc->frame_slot_count =
+ code_generator->frame()->GetTotalFrameSlotCount();
+ wasm_code_desc->source_positions_table =
+ code_generator->GetSourcePositionTable();
+ wasm_code_desc->handler_table = code_generator->GetHandlerTable();
+ }
return SUCCEEDED;
}
@@ -844,7 +1014,7 @@ void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
case RelocInfo::CODE_TARGET:
// this would be either one of the stubs or builtins, because
// we didn't link yet.
- target = reinterpret_cast<Object*>(it.rinfo()->target_address());
+ target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
break;
case RelocInfo::EMBEDDED_OBJECT:
target = it.rinfo()->target_object();
@@ -855,7 +1025,18 @@ void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
CHECK_NOT_NULL(target);
bool is_immovable =
target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
- CHECK(is_immovable);
+ bool is_wasm = target->IsCode() &&
+ (Code::cast(target)->kind() == Code::WASM_FUNCTION ||
+ Code::cast(target)->kind() == Code::WASM_TO_JS_FUNCTION ||
+ Code::cast(target)->kind() == Code::WASM_TO_WASM_FUNCTION);
+ bool is_allowed_stub = false;
+ if (target->IsCode()) {
+ Code* code = Code::cast(target);
+ is_allowed_stub =
+ code->kind() == Code::STUB &&
+ CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI;
+ }
+ CHECK(is_immovable || is_wasm || is_allowed_stub);
}
}
@@ -929,7 +1110,7 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
+ data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
@@ -1023,7 +1204,7 @@ struct TypedLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
+ data->common(), temp_zone);
JSBuiltinReducer builtin_reducer(
&graph_reducer, data->jsgraph(),
data->info()->dependencies(), data->native_context());
@@ -1136,7 +1317,7 @@ struct EarlyOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
+ data->common(), temp_zone);
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
@@ -1167,50 +1348,53 @@ struct EffectControlLinearizationPhase {
static const char* phase_name() { return "effect linearization"; }
void Run(PipelineData* data, Zone* temp_zone) {
- // The scheduler requires the graphs to be trimmed, so trim now.
- // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
- // graphs.
- GraphTrimmer trimmer(temp_zone, data->graph());
- NodeVector roots(temp_zone);
- data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
-
- // Schedule the graph without node splitting so that we can
- // fix the effect and control flow for nodes with low-level side
- // effects (such as changing representation to tagged or
- // 'floating' allocation regions.)
- Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kTempSchedule);
- if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
- TraceSchedule(data->info(), schedule);
-
- // Post-pass for wiring the control/effects
- // - connect allocating representation changes into the control&effect
- // chains and lower them,
- // - get rid of the region markers,
- // - introduce effect phis and rewire effects to get SSA again.
- EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone,
- data->source_positions());
- linearizer.Run();
- }
-};
-
-// The store-store elimination greatly benefits from doing a common operator
-// reducer and dead code elimination just before it, to eliminate conditional
-// deopts with a constant condition.
-
-struct DeadCodeEliminationPhase {
- static const char* phase_name() { return "dead code elimination"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
- AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &common_reducer);
- graph_reducer.ReduceGraph();
+ {
+ // The scheduler requires the graphs to be trimmed, so trim now.
+ // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
+ // graphs.
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+
+ // Schedule the graph without node splitting so that we can
+ // fix the effect and control flow for nodes with low-level side
+ // effects (such as changing representation to tagged or
+ // 'floating' allocation regions.)
+ Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
+ Scheduler::kTempSchedule);
+ if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
+ TraceSchedule(data->info(), data->isolate(), schedule);
+
+ // Post-pass for wiring the control/effects
+ // - connect allocating representation changes into the control&effect
+ // chains and lower them,
+ // - get rid of the region markers,
+ // - introduce effect phis and rewire effects to get SSA again.
+ EffectControlLinearizer::MaskArrayIndexEnable mask_array_index =
+ data->info()->has_untrusted_code_mitigations()
+ ? EffectControlLinearizer::kMaskArrayIndex
+ : EffectControlLinearizer::kDoNotMaskArrayIndex;
+ EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone,
+ data->source_positions(),
+ mask_array_index);
+ linearizer.Run();
+ }
+ {
+ // The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
+ // run {DeadCodeElimination} to prune these parts of the graph.
+ // Also, the following store-store elimination phase greatly benefits from
+ // doing a common operator reducer and dead code elimination just before
+ // it, to eliminate conditional deopts with a constant condition.
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ graph_reducer.ReduceGraph();
+ }
}
};
@@ -1235,7 +1419,7 @@ struct LoadEliminationPhase {
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
+ data->common(), temp_zone);
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
@@ -1278,7 +1462,7 @@ struct LateOptimizationPhase {
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
+ data->common(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -1347,7 +1531,7 @@ struct InstructionSelectionPhase {
FLAG_turbo_instruction_scheduling
? InstructionSelector::kEnableScheduling
: InstructionSelector::kDisableScheduling,
- data->info()->will_serialize()
+ data->isolate()->serializer_enabled()
? InstructionSelector::kEnableSerialization
: InstructionSelector::kDisableSerialization);
if (!selector.SelectInstructions()) {
@@ -1562,13 +1746,13 @@ struct PrintGraphPhase {
}
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsScheduledGraph(schedule);
} else if (FLAG_trace_turbo_graph) { // Simple textual RPO.
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsRPO(*graph);
@@ -1582,8 +1766,22 @@ struct VerifyGraphPhase {
void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
bool values_only = false) {
+ Verifier::CodeType code_type;
+ switch (data->info()->code_kind()) {
+ case Code::WASM_FUNCTION:
+ case Code::WASM_TO_JS_FUNCTION:
+ case Code::WASM_TO_WASM_FUNCTION:
+ case Code::JS_TO_WASM_FUNCTION:
+ case Code::WASM_INTERPRETER_ENTRY:
+ case Code::C_WASM_ENTRY:
+ code_type = Verifier::kWasm;
+ break;
+ default:
+ code_type = Verifier::kDefault;
+ }
Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
- values_only ? Verifier::kValuesOnly : Verifier::kAll);
+ values_only ? Verifier::kValuesOnly : Verifier::kAll,
+ code_type);
}
};
@@ -1720,9 +1918,6 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<EffectControlLinearizationPhase>();
RunPrintAndVerify("Effect and control linearized", true);
- Run<DeadCodeEliminationPhase>();
- RunPrintAndVerify("Dead code elimination", true);
-
if (FLAG_turbo_store_elimination) {
Run<StoreStoreEliminationPhase>();
RunPrintAndVerify("Store-store elimination", true);
@@ -1749,24 +1944,24 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
return ScheduleAndSelectInstructions(linkage, true);
}
-Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
- CallDescriptor* call_descriptor,
- Graph* graph, Schedule* schedule,
- Code::Kind kind,
- const char* debug_name,
- JumpOptimizationInfo* jump_opt) {
- CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), kind);
- if (isolate->serializer_enabled()) info.MarkAsSerializing();
+Handle<Code> Pipeline::GenerateCodeForCodeStub(
+ Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule, Code::Kind kind, const char* debug_name,
+ uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt) {
+ CompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
+ info.set_builtin_index(builtin_index);
+ info.set_stub_key(stub_key);
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
SourcePositionTable source_positions(graph);
- PipelineData data(&zone_stats, &info, graph, schedule, &source_positions,
- jump_opt);
+ PipelineData data(&zone_stats, &info, isolate, graph, schedule,
+ &source_positions, jump_opt);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
+ pipeline_statistics.reset(
+ new PipelineStatistics(&info, isolate, &zone_stats));
pipeline_statistics->BeginPhaseKind("stub codegen");
}
@@ -1791,14 +1986,17 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
}
// static
-Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
- ZoneStats zone_stats(info->isolate()->allocator());
+Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
+ Isolate* isolate) {
+ ZoneStats zone_stats(isolate->allocator());
std::unique_ptr<PipelineStatistics> pipeline_statistics(
- CreatePipelineStatistics(Handle<Script>::null(), info, &zone_stats));
- PipelineData data(&zone_stats, info, pipeline_statistics.get());
+ CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
+ &zone_stats));
+ PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
+ Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
if (!pipeline.CreateGraph()) return Handle<Code>::null();
if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
@@ -1808,28 +2006,30 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
// static
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
- Graph* graph,
+ Isolate* isolate, Graph* graph,
Schedule* schedule) {
CallDescriptor* call_descriptor =
Linkage::ComputeIncoming(info->zone(), info);
- return GenerateCodeForTesting(info, call_descriptor, graph, schedule);
+ return GenerateCodeForTesting(info, isolate, call_descriptor, graph,
+ schedule);
}
// static
Handle<Code> Pipeline::GenerateCodeForTesting(
- CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule, SourcePositionTable* source_positions) {
+ CompilationInfo* info, Isolate* isolate, CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule, SourcePositionTable* source_positions) {
// Construct a pipeline for scheduling and code generation.
- ZoneStats zone_stats(info->isolate()->allocator());
+ ZoneStats zone_stats(isolate->allocator());
// TODO(wasm): Refactor code generation to check for non-existing source
// table, then remove this conditional allocation.
if (!source_positions)
source_positions = new (info->zone()) SourcePositionTable(graph);
- PipelineData data(&zone_stats, info, graph, schedule, source_positions,
- nullptr);
+ PipelineData data(&zone_stats, info, isolate, graph, schedule,
+ source_positions, nullptr);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
+ pipeline_statistics.reset(
+ new PipelineStatistics(info, isolate, &zone_stats));
pipeline_statistics->BeginPhaseKind("test codegen");
}
@@ -1861,11 +2061,11 @@ CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function,
// static
CompilationJob* Pipeline::NewWasmCompilationJob(
- CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
- SourcePositionTable* source_positions,
- ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
+ CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
+ CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions,
wasm::ModuleOrigin asmjs_origin) {
- return new PipelineWasmCompilationJob(info, jsgraph, descriptor,
+ return new PipelineWasmCompilationJob(info, isolate, jsgraph, descriptor,
source_positions,
protected_instructions, asmjs_origin);
}
@@ -1873,10 +2073,9 @@ CompilationJob* Pipeline::NewWasmCompilationJob(
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
- CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
- sequence->zone(), Code::STUB);
+ CompilationInfo info(ArrayVector("testing"), sequence->zone(), Code::STUB);
ZoneStats zone_stats(sequence->isolate()->allocator());
- PipelineData data(&zone_stats, &info, sequence);
+ PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
PipelineImpl pipeline(&data);
pipeline.data_->InitializeFrameData(nullptr);
pipeline.AllocateRegisters(config, nullptr, run_verifier);
@@ -1895,11 +2094,11 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
RunPrintAndVerify("Late trimmed", true);
}
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
- TraceSchedule(data->info(), data->schedule());
+ TraceSchedule(data->info(), data->isolate(), data->schedule());
if (FLAG_turbo_profiling) {
data->set_profiler_data(BasicBlockInstrumentor::Instrument(
- info(), data->graph(), data->schedule()));
+ info(), data->graph(), data->schedule(), data->isolate()));
}
bool verify_stub_graph = data->verify_graph();
@@ -1917,8 +2116,7 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
!strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
if (FLAG_trace_verify_csa) {
AllowHandleDereference allow_deref;
- CompilationInfo* info = data->info();
- CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "--------------------------------------------------\n"
<< "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
@@ -2019,7 +2217,7 @@ Handle<Code> PipelineImpl::FinalizeCode() {
}
info()->SetCode(code);
- v8::internal::CodeGenerator::PrintCode(code, info());
+ PrintCode(code, info());
if (FLAG_trace_turbo) {
TurboJsonFile json_of(info(), std::ios_base::app);
@@ -2152,7 +2350,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
CompilationInfo* PipelineImpl::info() const { return data_->info(); }
-Isolate* PipelineImpl::isolate() const { return info()->isolate(); }
+Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 7b7a8b5336..2dca7794eb 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -9,6 +9,7 @@
// Do not include anything from src/compiler here!
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/code.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -44,29 +45,28 @@ class Pipeline : public AllStatic {
// Returns a new compilation job for the WebAssembly compilation info.
static CompilationJob* NewWasmCompilationJob(
- CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
- SourcePositionTable* source_positions,
- ZoneVector<trap_handler::ProtectedInstructionData>*
+ CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
+ CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ std::vector<trap_handler::ProtectedInstructionData>*
protected_instructions,
wasm::ModuleOrigin wasm_origin);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
- static Handle<Code> GenerateCodeForCodeStub(Isolate* isolate,
- CallDescriptor* call_descriptor,
- Graph* graph, Schedule* schedule,
- Code::Kind kind,
- const char* debug_name,
- JumpOptimizationInfo* jump_opt);
+ static Handle<Code> GenerateCodeForCodeStub(
+ Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule, Code::Kind kind, const char* debug_name,
+ uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt);
// Run the entire pipeline and generate a handle to a code object suitable for
// testing.
- static Handle<Code> GenerateCodeForTesting(CompilationInfo* info);
+ static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
+ Isolate* isolate);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
- Graph* graph,
+ Isolate* isolate, Graph* graph,
Schedule* schedule = nullptr);
// Run just the register allocator phases.
@@ -77,8 +77,8 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
static Handle<Code> GenerateCodeForTesting(
- CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule = nullptr,
+ CompilationInfo* info, Isolate* isolate, CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule = nullptr,
SourcePositionTable* source_positions = nullptr);
private:
diff --git a/deps/v8/src/compiler/ppc/OWNERS b/deps/v8/src/compiler/ppc/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/compiler/ppc/OWNERS
+++ b/deps/v8/src/compiler/ppc/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 4e96e19ae5..11fde27fc9 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -214,6 +214,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
@@ -236,21 +237,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ mflr(scratch0_);
__ Push(scratch0_);
}
-#ifdef V8_CSA_WRITE_BARRIER
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
-#else
- if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
- } else {
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
- }
-#endif
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Pop(scratch0_);
@@ -966,7 +954,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
-// the first set of flags ({kKindSpecificFlags1Offset});
+// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -974,9 +962,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ mov_label_addr(r11, &current);
int pc_offset = __ pc_offset();
__ bind(&current);
- int offset =
- Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset);
- __ LoadWordArith(r11, MemOperand(r11, offset));
+ int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset);
+ __ LoadP(r11, MemOperand(r11, offset));
+ __ LoadWordArith(
+ r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
@@ -1005,6 +994,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallWasmFunction: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
+ if (info()->IsWasm()) {
+ rmode = RelocInfo::WASM_CALL;
+ }
+
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ __ Call(wasm_code, rmode);
+ } else {
+ __ Call(i.InputRegister(0));
+ }
+ RecordCallPosition(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
if (opcode == kArchTailCallCodeObjectFromJSFunction) {
@@ -1027,6 +1036,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+ case kArchTailCallWasm: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
+ if (info()->IsWasm()) {
+ rmode = RelocInfo::WASM_CALL;
+ }
+
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ __ Jump(wasm_code, rmode);
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
__ Jump(i.InputRegister(0));
@@ -1721,10 +1750,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_Push:
if (instr->InputAt(0)->IsFPRegister()) {
- __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDoubleU(i.InputDoubleRegister(0),
+ MemOperand(sp, -kDoubleSize), r0);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ StoreSingleU(i.InputDoubleRegister(0),
+ MemOperand(sp, -kPointerSize), r0);
+ frame_access_state()->IncreaseSPDelta(1);
+ }
} else {
- __ Push(i.InputRegister(0));
+ __ StorePU(i.InputRegister(0), MemOperand(sp, -kPointerSize), r0);
frame_access_state()->IncreaseSPDelta(1);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -2295,7 +2333,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
if (double_saves != 0) {
frame->AlignSavedCalleeRegisterSlots();
DCHECK_EQ(kNumCalleeSavedDoubles,
- base::bits::CountPopulation32(double_saves));
+ base::bits::CountPopulation(double_saves));
frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
(kDoubleSize / kPointerSize));
}
@@ -2308,7 +2346,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
// register save area does not include the fp or constant pool pointer.
const int num_saves =
kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
- DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ DCHECK(num_saves == base::bits::CountPopulation(saves));
frame->AllocateSavedCalleeRegisterSlots(num_saves);
}
}
@@ -2364,7 +2402,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (double_saves != 0) {
__ MultiPushDoubles(double_saves);
DCHECK_EQ(kNumCalleeSavedDoubles,
- base::bits::CountPopulation32(double_saves));
+ base::bits::CountPopulation(double_saves));
}
// Save callee-saved registers.
@@ -2415,6 +2453,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
AssembleDeconstructFrame();
}
}
+ // Constant pool is unavailable since the frame has been destructed
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
if (pop->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_count += g.ToConstant(pop).ToInt32();
@@ -2425,7 +2465,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() {}
+void CodeGenerator::FinishCode() { __ EmitConstantPool(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2532,8 +2572,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
: Double(src.ToFloat64());
#endif
__ LoadDoubleLiteral(dst, value, kScratchReg);
- if (destination->IsFPStackSlot()) {
+ if (destination->IsDoubleStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination), r0);
+ } else if (destination->IsFloatStackSlot()) {
+ __ StoreSingle(dst, g.ToMemOperand(destination), r0);
}
}
} else if (source->IsFPRegister()) {
@@ -2576,10 +2618,60 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
-
+// Swaping contents in source and destination.
+// source and destination could be:
+// Register,
+// FloatRegister,
+// DoubleRegister,
+// StackSlot,
+// FloatStackSlot,
+// or DoubleStackSlot
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
PPCOperandConverter g(this, nullptr);
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ SwapP(src, g.ToRegister(destination), kScratchReg);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
+ r0);
+ } else if (source->IsFloatRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsFloatRegister()) {
+ __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
+ } else {
+ DCHECK(destination->IsFloatStackSlot());
+ __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
+ }
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
+ }
+ } else if (source->IsFloatStackSlot()) {
+ DCHECK(destination->IsFloatStackSlot());
+ __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
+ kScratchDoubleReg, d0);
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
+ kScratchDoubleReg, d0);
+ } else if (source->IsSimd128Register()) {
+ UNREACHABLE();
+ } else {
+ UNREACHABLE();
+ }
+
+ return;
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 2b8e4bc57c..8454590ee2 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -509,7 +509,7 @@ static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
- int mask_width = base::bits::CountPopulation32(value);
+ int mask_width = base::bits::CountPopulation(value);
int mask_msb = base::bits::CountLeadingZeros32(value);
int mask_lsb = base::bits::CountTrailingZeros32(value);
if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
@@ -522,7 +522,7 @@ static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
#if V8_TARGET_ARCH_PPC64
static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
- int mask_width = base::bits::CountPopulation64(value);
+ int mask_width = base::bits::CountPopulation(value);
int mask_msb = base::bits::CountLeadingZeros64(value);
int mask_lsb = base::bits::CountTrailingZeros64(value);
if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
@@ -1995,21 +1995,10 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
- int num_slots = static_cast<int>(descriptor->StackParameterCount());
- int slot = 0;
- for (PushParameter input : (*arguments)) {
- if (slot == 0) {
- DCHECK(input.node());
- Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
- g.TempImmediate(num_slots));
- } else {
- // Skip any alignment holes in pushed nodes.
- if (input.node()) {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
- g.TempImmediate(slot));
- }
- }
- ++slot;
+ for (PushParameter input : base::Reversed(*arguments)) {
+ // Skip any alignment holes in pushed nodes.
+ if (input.node() == nullptr) continue;
+ Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node()));
}
}
}
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 8f24d9205b..5e79cbdfec 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -93,6 +93,7 @@ namespace {
bool NeedsCheckHeapObject(Node* receiver) {
switch (receiver->opcode()) {
+ case IrOpcode::kConvertReceiver:
case IrOpcode::kHeapConstant:
case IrOpcode::kJSCreate:
case IrOpcode::kJSCreateArguments:
@@ -105,7 +106,6 @@ bool NeedsCheckHeapObject(Node* receiver) {
case IrOpcode::kJSCreateEmptyLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
case IrOpcode::kJSCreateGeneratorObject:
- case IrOpcode::kJSConvertReceiver:
case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
case IrOpcode::kJSConstructWithArrayLike:
@@ -113,7 +113,7 @@ bool NeedsCheckHeapObject(Node* receiver) {
case IrOpcode::kJSToName:
case IrOpcode::kJSToString:
case IrOpcode::kJSToObject:
- case IrOpcode::kJSTypeOf:
+ case IrOpcode::kTypeOf:
case IrOpcode::kJSGetSuperConstructor:
return false;
case IrOpcode::kPhi: {
@@ -167,6 +167,19 @@ void PropertyAccessBuilder::BuildCheckMaps(
*effect, control);
}
+Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Node** effect,
+ Node* control,
+ Handle<HeapObject> value) {
+ HeapObjectMatcher m(receiver);
+ if (m.Is(value)) return receiver;
+ Node* expected = jsgraph()->HeapConstant(value);
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), receiver, expected);
+ *effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ check, *effect, control);
+ return expected;
+}
+
void PropertyAccessBuilder::AssumePrototypesStable(
Handle<Context> native_context,
std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 6ae3a7bd0f..13a0f0b46f 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -41,6 +41,8 @@ class PropertyAccessBuilder {
Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
void BuildCheckMaps(Node* receiver, Node** effect, Node* control,
std::vector<Handle<Map>> const& receiver_maps);
+ Node* BuildCheckValue(Node* receiver, Node** effect, Node* control,
+ Handle<HeapObject> value);
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index b685fc5d66..bed2f628d9 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -440,6 +440,8 @@ void RawMachineAssembler::PrintCurrentBlock(std::ostream& os) {
os << CurrentBlock();
}
+bool RawMachineAssembler::InsideBlock() { return current_block_ != nullptr; }
+
void RawMachineAssembler::SetInitialDebugInformation(
AssemblerDebugInfo debug_info) {
CurrentBlock()->set_debug_info(debug_info);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 3ee91a1ff9..9fc3590875 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -842,6 +842,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void Bind(RawMachineLabel* label, AssemblerDebugInfo info);
void SetInitialDebugInformation(AssemblerDebugInfo info);
void PrintCurrentBlock(std::ostream& os);
+ bool InsideBlock();
#endif // DEBUG
// Add success / exception successor blocks and ends the current block ending
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 321b9c6687..b0a345a57f 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -137,8 +137,7 @@ bool IsWord(MachineRepresentation rep) {
Node* RepresentationChanger::GetRepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type,
Node* use_node, UseInfo use_info) {
- if (output_rep == MachineRepresentation::kNone &&
- output_type->IsInhabited()) {
+ if (output_rep == MachineRepresentation::kNone && !output_type->IsNone()) {
// The output representation should be set if the type is inhabited (i.e.,
// if the value is possible).
return TypeError(node, output_rep, output_type, use_info.representation());
@@ -217,8 +216,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- // We just provide a dummy value here.
- return jsgraph()->Constant(0);
+ return jsgraph()->DeadValue();
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed31())) {
op = simplified()->ChangeInt31ToTaggedSigned();
@@ -336,8 +334,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
Operator const* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- // We just provide a dummy value here.
- return jsgraph()->TheHoleConstant();
+ return jsgraph()->DeadValue();
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -414,8 +411,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- // We just provide a dummy value here.
- return jsgraph()->TheHoleConstant();
+ return jsgraph()->DeadValue();
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -493,8 +489,7 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- // We just provide a dummy value here.
- return jsgraph()->Float32Constant(0.0f);
+ return jsgraph()->DeadValue();
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
// int32 -> float64 -> float32
@@ -554,8 +549,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- // We just provide a dummy value here.
- return jsgraph()->Float64Constant(0.0);
+ return jsgraph()->DeadValue();
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeInt32ToFloat64();
@@ -632,8 +626,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- // We just provide a dummy value here.
- return jsgraph()->Int32Constant(0);
+ return jsgraph()->DeadValue();
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
@@ -769,8 +762,7 @@ Node* RepresentationChanger::GetBitRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- // We just provide a dummy value here.
- return jsgraph()->Int32Constant(0);
+ return jsgraph()->DeadValue();
} else if (output_rep == MachineRepresentation::kTagged ||
output_rep == MachineRepresentation::kTaggedPointer) {
if (output_type->Is(Type::BooleanOrNullOrUndefined())) {
@@ -815,8 +807,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- // We just provide a dummy value here.
- return jsgraph()->Int64Constant(0);
+ return jsgraph()->DeadValue();
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word64
}
diff --git a/deps/v8/src/compiler/s390/OWNERS b/deps/v8/src/compiler/s390/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/compiler/s390/OWNERS
+++ b/deps/v8/src/compiler/s390/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 8aeab0ac0d..f49a8e540c 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -96,7 +96,7 @@ class S390OperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
- MemOperand MemoryOperand(AddressingMode* mode = NULL,
+ MemOperand MemoryOperand(AddressingMode* mode = nullptr,
size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
@@ -269,14 +269,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore r14 if the frame was elided.
__ Push(r14);
}
-#ifdef V8_CSA_WRITE_BARRIER
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
-#else
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
-#endif
if (must_save_lr_) {
// We need to save and restore r14 if the frame was elided.
__ Pop(r14);
@@ -1174,7 +1168,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
-// the first set of flags ({kKindSpecificFlags1Offset});
+// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -1182,9 +1176,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ larl(r1, &current);
int pc_offset = __ pc_offset();
__ bind(&current);
- int offset =
- Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset);
- __ LoadW(ip, MemOperand(r1, offset));
+ int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset);
+ __ LoadP(ip, MemOperand(r1, offset));
+ __ LoadW(ip,
+ FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(ip, Code::kMarkedForDeoptimizationBit);
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
@@ -1215,6 +1210,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallWasmFunction: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
+ if (info()->IsWasm()) {
+ rmode = RelocInfo::WASM_CALL;
+ }
+
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ __ Call(wasm_code, rmode);
+ } else {
+ __ Call(i.InputRegister(0));
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
if (opcode == kArchTailCallCodeObjectFromJSFunction) {
@@ -1236,6 +1250,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+ case kArchTailCallWasm: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
+ if (info()->IsWasm()) {
+ rmode = RelocInfo::WASM_CALL;
+ }
+
+ if (instr->InputAt(0)->IsImmediate()) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt32());
+ __ Jump(wasm_code, rmode);
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
__ Jump(i.InputRegister(0));
@@ -1964,11 +1997,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CanonicalizeNaN(result, value);
break;
}
+ case kS390_StackClaim: {
+ int num_slots = i.InputInt32(0);
+ __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(num_slots);
+ break;
+ }
case kS390_Push:
if (instr->InputAt(0)->IsFPRegister()) {
- __ lay(sp, MemOperand(sp, -kDoubleSize));
- __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ lay(sp, MemOperand(sp, -kDoubleSize));
+ __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ lay(sp, MemOperand(sp, -kPointerSize));
+ __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
} else {
__ Push(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
@@ -2353,13 +2400,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadlW(i.OutputRegister(), i.MemoryOperand());
break;
case kAtomicStoreWord8:
- __ StoreByte(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ __ StoreByte(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
case kAtomicStoreWord16:
- __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
case kAtomicStoreWord32:
- __ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ __ StoreW(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
// 0x aa bb cc dd
// index = 3..2..1..0
@@ -2677,7 +2724,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
if (double_saves != 0) {
frame->AlignSavedCalleeRegisterSlots();
DCHECK_EQ(kNumCalleeSavedDoubles,
- base::bits::CountPopulation32(double_saves));
+ base::bits::CountPopulation(double_saves));
frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
(kDoubleSize / kPointerSize));
}
@@ -2686,7 +2733,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
if (saves != 0) {
// register save area does not include the fp or constant pool pointer.
const int num_saves = kNumCalleeSaved - 1;
- DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ DCHECK(num_saves == base::bits::CountPopulation(saves));
frame->AllocateSavedCalleeRegisterSlots(num_saves);
}
}
@@ -2735,7 +2782,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (double_saves != 0) {
__ MultiPushDoubles(double_saves);
DCHECK_EQ(kNumCalleeSavedDoubles,
- base::bits::CountPopulation32(double_saves));
+ base::bits::CountPopulation(double_saves));
}
// Save callee-saved registers.
@@ -2883,7 +2930,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ LoadDoubleLiteral(dst, value, kScratchReg);
}
- if (destination->IsFPStackSlot()) {
+ if (destination->IsFloatStackSlot()) {
+ __ StoreFloat32(dst, g.ToMemOperand(destination));
+ } else if (destination->IsDoubleStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination));
}
}
@@ -2927,71 +2976,56 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
+// Swaping contents in source and destination.
+// source and destination could be:
+// Register,
+// FloatRegister,
+// DoubleRegister,
+// StackSlot,
+// FloatStackSlot,
+// or DoubleStackSlot
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
S390OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
if (source->IsRegister()) {
- // Register-register.
- Register temp = kScratchReg;
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ LoadRR(temp, src);
- __ LoadRR(src, dst);
- __ LoadRR(dst, temp);
+ __ SwapP(src, g.ToRegister(destination), kScratchReg);
} else {
DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ LoadRR(temp, src);
- __ LoadP(src, dst);
- __ StoreP(temp, dst);
+ __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
}
-#if V8_TARGET_ARCH_S390X
- } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
-#else
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
-#endif
- Register temp_0 = kScratchReg;
- Register temp_1 = r0;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ LoadP(temp_0, src);
- __ LoadP(temp_1, dst);
- __ StoreP(temp_0, dst);
- __ StoreP(temp_1, src);
- } else if (source->IsFPRegister()) {
- DoubleRegister temp = kScratchDoubleReg;
+ __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
+ r0);
+ } else if (source->IsFloatRegister()) {
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DoubleRegister dst = g.ToDoubleRegister(destination);
- __ ldr(temp, src);
- __ ldr(src, dst);
- __ ldr(dst, temp);
+ if (destination->IsFloatRegister()) {
+ __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ ldr(temp, src);
- __ LoadDouble(src, dst);
- __ StoreDouble(temp, dst);
+ DCHECK(destination->IsFloatStackSlot());
+ __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
}
-#if !V8_TARGET_ARCH_S390X
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPStackSlot());
- DoubleRegister temp_0 = kScratchDoubleReg;
- DoubleRegister temp_1 = d0;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- // TODO(joransiu): MVC opportunity
- __ LoadDouble(temp_0, src);
- __ LoadDouble(temp_1, dst);
- __ StoreDouble(temp_0, dst);
- __ StoreDouble(temp_1, src);
-#endif
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
+ }
+ } else if (source->IsFloatStackSlot()) {
+ DCHECK(destination->IsFloatStackSlot());
+ __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
+ kScratchDoubleReg, d0);
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
+ kScratchDoubleReg, d0);
+ } else if (source->IsSimd128Register()) {
+ UNREACHABLE();
} else {
- // No other combinations are possible.
UNREACHABLE();
}
}
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index d415de6587..cb94da2ec7 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -97,6 +97,7 @@ namespace compiler {
V(S390_Tst64) \
V(S390_Push) \
V(S390_PushFrame) \
+ V(S390_StackClaim) \
V(S390_StoreToStackSlot) \
V(S390_ExtendSignWord8) \
V(S390_ExtendSignWord16) \
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 350f84b4bd..1850830f6e 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -164,6 +164,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
+ case kS390_StackClaim:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index d8c7c64d83..54f5a0c68b 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -945,7 +945,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
#if 0
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
- int mask_width = base::bits::CountPopulation32(value);
+ int mask_width = base::bits::CountPopulation(value);
int mask_msb = base::bits::CountLeadingZeros32(value);
int mask_lsb = base::bits::CountTrailingZeros32(value);
if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
@@ -958,7 +958,7 @@ static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
#if V8_TARGET_ARCH_S390X
static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
- int mask_width = base::bits::CountPopulation64(value);
+ int mask_width = base::bits::CountPopulation(value);
int mask_msb = base::bits::CountLeadingZeros64(value);
int mask_lsb = base::bits::CountTrailingZeros64(value);
if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
@@ -1406,8 +1406,8 @@ static inline bool TryMatchDoubleConstructFromInsert(
S390OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- Node* lo32 = NULL;
- Node* hi32 = NULL;
+ Node* lo32 = nullptr;
+ Node* hi32 = nullptr;
if (node->opcode() == IrOpcode::kFloat64InsertLowWord32) {
lo32 = right;
@@ -2357,22 +2357,28 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
- int num_slots = static_cast<int>(descriptor->StackParameterCount());
+ int num_slots = 0;
int slot = 0;
- for (PushParameter input : (*arguments)) {
- if (slot == 0) {
- DCHECK(input.node());
- Emit(kS390_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
- g.TempImmediate(num_slots));
- } else {
- // Skip any alignment holes in pushed nodes.
- if (input.node()) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(),
- g.UseRegister(input.node()), g.TempImmediate(slot));
- }
+
+ for (PushParameter input : *arguments) {
+ if (input.node() == nullptr) continue;
+ num_slots +=
+ input.type().representation() == MachineRepresentation::kFloat64
+ ? kDoubleSize / kPointerSize
+ : 1;
+ }
+ Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
+ for (PushParameter input : *arguments) {
+ // Skip any alignment holes in pushed nodes.
+ if (input.node()) {
+ Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot));
+ slot += input.type().representation() == MachineRepresentation::kFloat64
+ ? (kDoubleSize / kPointerSize)
+ : 1;
}
- ++slot;
}
+ DCHECK(num_slots == slot);
}
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 28634b8c9f..9bdb7cfbaf 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -209,11 +209,11 @@ class InputUseInfos {
bool CanOverflowSigned32(const Operator* op, Type* left, Type* right,
Zone* type_zone) {
// We assume the inputs are checked Signed32 (or known statically
- // to be Signed32). Technically, theinputs could also be minus zero, but
+ // to be Signed32). Technically, the inputs could also be minus zero, but
// that cannot cause overflow.
left = Type::Intersect(left, Type::Signed32(), type_zone);
right = Type::Intersect(right, Type::Signed32(), type_zone);
- if (!left->IsInhabited() || !right->IsInhabited()) return false;
+ if (left->IsNone() || right->IsNone()) return false;
switch (op->opcode()) {
case IrOpcode::kSpeculativeSafeIntegerAdd:
return (left->Max() + right->Max() > kMaxInt) ||
@@ -229,6 +229,10 @@ bool CanOverflowSigned32(const Operator* op, Type* left, Type* right,
return true;
}
+bool IsSomePositiveOrderedNumber(Type* type) {
+ return type->Is(Type::OrderedNumber()) && !type->IsNone() && type->Min() > 0;
+}
+
} // namespace
class RepresentationSelector {
@@ -426,6 +430,7 @@ class RepresentationSelector {
break; \
}
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ DECLARE_CASE(SameValue)
#undef DECLARE_CASE
#define DECLARE_CASE(Name) \
@@ -457,6 +462,10 @@ class RepresentationSelector {
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
+ case IrOpcode::kConvertReceiver:
+ new_type = op_typer_.ConvertReceiver(FeedbackTypeOf(node->InputAt(0)));
+ break;
+
case IrOpcode::kPlainPrimitiveToNumber:
new_type = op_typer_.ToNumber(FeedbackTypeOf(node->InputAt(0)));
break;
@@ -552,8 +561,10 @@ class RepresentationSelector {
Type* current_integer =
Type::Intersect(current_type, integer, graph_zone());
+ DCHECK(!current_integer->IsNone());
Type* previous_integer =
Type::Intersect(previous_type, integer, graph_zone());
+ DCHECK(!previous_integer->IsNone());
// Once we start weakening a node, we should always weaken.
if (!GetInfo(node)->weakened()) {
@@ -1047,7 +1058,7 @@ class RepresentationSelector {
}
static MachineType DeoptMachineTypeOf(MachineRepresentation rep, Type* type) {
- if (!type->IsInhabited()) {
+ if (type->IsNone()) {
return MachineType::None();
}
// TODO(turbofan): Special treatment for ExternalPointer here,
@@ -1242,10 +1253,8 @@ class RepresentationSelector {
// there is no need to return -0.
CheckForMinusZeroMode mz_mode =
truncation.IdentifiesZeroAndMinusZero() ||
- (input0_type->Is(Type::OrderedNumber()) &&
- input0_type->Min() > 0) ||
- (input1_type->Is(Type::OrderedNumber()) &&
- input1_type->Min() > 0)
+ IsSomePositiveOrderedNumber(input0_type) ||
+ IsSomePositiveOrderedNumber(input1_type)
? CheckForMinusZeroMode::kDontCheckForMinusZero
: CheckForMinusZeroMode::kCheckForMinusZero;
@@ -1264,16 +1273,13 @@ class RepresentationSelector {
SimplifiedLowering* lowering) {
Type* left_upper = GetUpperBound(node->InputAt(0));
Type* right_upper = GetUpperBound(node->InputAt(1));
- // Only eliminate eliminate the node if the ToNumber conversion cannot
- // cause any observable side-effect and if we know for sure that it
- // is a number addition (we must exclude strings).
- if (left_upper->Is(Type::NumberOrOddball()) &&
- right_upper->Is(Type::NumberOrOddball())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (left_upper->Is(type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
right_upper->Is(type_cache_.kAdditiveSafeIntegerOrMinusZero)) {
+ // Only eliminate the node if its typing rule can be satisfied, namely
+ // that a safe integer is produced.
+ if (truncation.IsUnused()) return VisitUnused(node);
+
// If we know how to interpret the result or if the users only care
// about the low 32-bits, we can truncate to Word32 do a wrapping
// addition.
@@ -1541,10 +1547,9 @@ class RepresentationSelector {
//------------------------------------------------------------------
// JavaScript operators.
//------------------------------------------------------------------
- case IrOpcode::kJSToBoolean: {
+ case IrOpcode::kToBoolean: {
if (truncation.IsUsedAsBool()) {
ProcessInput(node, 0, UseInfo::Bool());
- ProcessInput(node, 1, UseInfo::None());
SetOutput(node, MachineRepresentation::kBit);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
@@ -1553,15 +1558,18 @@ class RepresentationSelector {
}
return;
}
- case IrOpcode::kJSToNumber: {
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumeric: {
VisitInputs(node);
// TODO(bmeurer): Optimize somewhat based on input type?
if (truncation.IsUsedAsWord32()) {
SetOutput(node, MachineRepresentation::kWord32);
- if (lower()) lowering->DoJSToNumberTruncatesToWord32(node, this);
+ if (lower())
+ lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this);
} else if (truncation.IsUsedAsFloat64()) {
SetOutput(node, MachineRepresentation::kFloat64);
- if (lower()) lowering->DoJSToNumberTruncatesToFloat64(node, this);
+ if (lower())
+ lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this);
} else {
SetOutput(node, MachineRepresentation::kTagged);
}
@@ -1583,7 +1591,7 @@ class RepresentationSelector {
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
} else {
- DCHECK(!TypeOf(node->InputAt(0))->IsInhabited());
+ DCHECK(TypeOf(node->InputAt(0))->IsNone());
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else {
@@ -2080,8 +2088,8 @@ class RepresentationSelector {
case IrOpcode::kSpeculativeNumberShiftRightLogical: {
// ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
// can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
+ // that the inputs are PlainPrimitive, which excludes everything that
+ // might have side effects or throw during a ToNumber conversion.
if (BothInputsAre(node, Type::PlainPrimitive())) {
if (truncation.IsUnused()) return VisitUnused(node);
}
@@ -2346,6 +2354,17 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kSameValue: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ VisitBinop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
+ case IrOpcode::kClassOf:
+ case IrOpcode::kTypeOf: {
+ return VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ }
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual: {
@@ -2405,8 +2424,9 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) {
- if (index_type->Min() >= 0.0 &&
- index_type->Max() < length_type->Min()) {
+ if (index_type->IsNone() || length_type->IsNone() ||
+ (index_type->Min() >= 0.0 &&
+ index_type->Max() < length_type->Min())) {
// The bounds check is redundant if we already know that
// the index is within the bounds of [0.0, length[.
DeferReplacement(node, node->InputAt(0));
@@ -2419,6 +2439,11 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kMaskIndexWithBound: {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ return;
+ }
case IrOpcode::kCheckHeapObject: {
if (InputCannotBe(node, Type::SignedSmall())) {
VisitUnop(node, UseInfo::AnyTagged(),
@@ -2512,12 +2537,6 @@ class RepresentationSelector {
MachineRepresentation field_representation =
access.machine_type.representation();
- // Make sure we convert to Smi if possible. This should help write
- // barrier elimination.
- if (field_representation == MachineRepresentation::kTagged &&
- TypeOf(value_node)->Is(Type::SignedSmall())) {
- field_representation = MachineRepresentation::kTaggedSigned;
- }
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, field_representation, access.offset,
access.type, input_info->representation(), value_node);
@@ -2551,12 +2570,6 @@ class RepresentationSelector {
MachineRepresentation element_representation =
access.machine_type.representation();
- // Make sure we convert to Smi if possible. This should help write
- // barrier elimination.
- if (element_representation == MachineRepresentation::kTagged &&
- TypeOf(value_node)->Is(Type::SignedSmall())) {
- element_representation = MachineRepresentation::kTaggedSigned;
- }
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, element_representation, access.type,
input_info->representation(), value_node);
@@ -2581,13 +2594,31 @@ class RepresentationSelector {
ProcessInput(node, 0, UseInfo::AnyTagged()); // array
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
- ProcessInput(node, 2, UseInfo::AnyTagged()); // value
if (value_type->Is(Type::SignedSmall())) {
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // value
if (lower()) {
NodeProperties::ChangeOp(node,
simplified()->StoreSignedSmallElement());
}
+ } else if (value_type->Is(Type::Number())) {
+ ProcessInput(node, 2, UseInfo::TruncatingFloat64()); // value
+ if (lower()) {
+ Handle<Map> double_map = DoubleMapParameterOf(node->op());
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->TransitionAndStoreNumberElement(double_map));
+ }
+ } else if (value_type->Is(Type::NonNumber())) {
+ ProcessInput(node, 2, UseInfo::AnyTagged()); // value
+ if (lower()) {
+ Handle<Map> fast_map = FastMapParameterOf(node->op());
+ NodeProperties::ChangeOp(
+ node, simplified()->TransitionAndStoreNonNumberElement(
+ fast_map, value_type));
+ }
+ } else {
+ ProcessInput(node, 2, UseInfo::AnyTagged()); // value
}
ProcessRemainingInputs(node, 3);
@@ -2618,13 +2649,33 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kNone);
return;
}
+ case IrOpcode::kConvertReceiver: {
+ Type* input_type = TypeOf(node->InputAt(0));
+ VisitBinop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) {
+ // Try to optimize the {node} based on the input type.
+ if (input_type->Is(Type::Receiver())) {
+ DeferReplacement(node, node->InputAt(0));
+ } else if (input_type->Is(Type::NullOrUndefined())) {
+ DeferReplacement(node, node->InputAt(1));
+ } else if (!input_type->Maybe(Type::NullOrUndefined())) {
+ NodeProperties::ChangeOp(
+ node, lowering->simplified()->ConvertReceiver(
+ ConvertReceiverMode::kNotNullOrUndefined));
+ }
+ }
+ return;
+ }
case IrOpcode::kPlainPrimitiveToNumber: {
if (InputIs(node, Type::Boolean())) {
VisitUnop(node, UseInfo::Bool(), MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else if (InputIs(node, Type::String())) {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) lowering->DoStringToNumber(node);
+ if (lower()) {
+ NodeProperties::ChangeOp(node, simplified()->StringToNumber());
+ }
} else if (truncation.IsUsedAsWord32()) {
if (InputIs(node, Type::NumberOrOddball())) {
VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -2679,6 +2730,10 @@ class RepresentationSelector {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
return;
}
+ case IrOpcode::kObjectIsBigInt: {
+ VisitObjectIs(node, Type::BigInt(), lowering);
+ return;
+ }
case IrOpcode::kObjectIsCallable: {
VisitObjectIs(node, Type::Callable(), lowering);
return;
@@ -2864,6 +2919,10 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kCheckEqualsSymbol:
+ case IrOpcode::kCheckEqualsInternalizedString:
+ return VisitBinop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kNone);
case IrOpcode::kMapGuard:
// Eliminate MapGuard nodes here.
return VisitUnused(node);
@@ -2933,8 +2992,19 @@ class RepresentationSelector {
return SetOutput(node, MachineRepresentation::kTagged);
case IrOpcode::kFindOrderedHashMapEntry: {
- VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedSigned);
+ Type* const key_type = TypeOf(node->InputAt(1));
+ if (key_type->Is(Type::Signed32OrMinusZero())) {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ NodeProperties::ChangeOp(
+ node,
+ lowering->simplified()->FindOrderedHashMapEntryForInt32Key());
+ }
+ } else {
+ VisitBinop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedSigned);
+ }
return;
}
@@ -2959,15 +3029,19 @@ class RepresentationSelector {
case IrOpcode::kOsrValue:
case IrOpcode::kArgumentsElementsState:
case IrOpcode::kArgumentsLengthState:
+ case IrOpcode::kUnreachable:
case IrOpcode::kRuntimeAbort:
// All JavaScript operators except JSToNumber have uniform handling.
#define OPCODE_CASE(name) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
- JS_OTHER_UNOP_LIST(OPCODE_CASE)
JS_OBJECT_OP_LIST(OPCODE_CASE)
JS_CONTEXT_OP_LIST(OPCODE_CASE)
JS_OTHER_OP_LIST(OPCODE_CASE)
#undef OPCODE_CASE
+ case IrOpcode::kJSBitwiseNot:
+ case IrOpcode::kJSDecrement:
+ case IrOpcode::kJSIncrement:
+ case IrOpcode::kJSNegate:
case IrOpcode::kJSToInteger:
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
@@ -2976,7 +3050,8 @@ class RepresentationSelector {
VisitInputs(node);
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
-
+ case IrOpcode::kDeadValue:
+ return SetOutput(node, MachineRepresentation::kNone);
default:
V8_Fatal(
__FILE__, __LINE__,
@@ -3106,9 +3181,10 @@ void SimplifiedLowering::LowerAllNodes() {
selector.Run(this);
}
-void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
+void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
Node* node, RepresentationSelector* selector) {
- DCHECK_EQ(IrOpcode::kJSToNumber, node->opcode());
+ DCHECK(node->opcode() == IrOpcode::kJSToNumber ||
+ node->opcode() == IrOpcode::kJSToNumeric);
Node* value = node->InputAt(0);
Node* context = node->InputAt(1);
Node* frame_state = node->InputAt(2);
@@ -3131,12 +3207,16 @@ void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
Node* efalse0 = effect;
Node* vfalse0;
{
- vfalse0 = efalse0 = if_false0 =
- graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
- frame_state, efalse0, if_false0);
+ Operator const* op = node->opcode() == IrOpcode::kJSToNumber
+ ? ToNumberOperator()
+ : ToNumericOperator();
+ Node* code = node->opcode() == IrOpcode::kJSToNumber ? ToNumberCode()
+ : ToNumericCode();
+ vfalse0 = efalse0 = if_false0 = graph()->NewNode(
+ op, code, value, context, frame_state, efalse0, if_false0);
// Update potential {IfException} uses of {node} to point to the above
- // {ToNumber} stub call node instead.
+ // stub call node instead.
Node* on_exception = nullptr;
if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
NodeProperties::ReplaceControlInput(on_exception, vfalse0);
@@ -3196,9 +3276,10 @@ void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
selector->DeferReplacement(node, value);
}
-void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
+void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToWord32(
Node* node, RepresentationSelector* selector) {
- DCHECK_EQ(IrOpcode::kJSToNumber, node->opcode());
+ DCHECK(node->opcode() == IrOpcode::kJSToNumber ||
+ node->opcode() == IrOpcode::kJSToNumeric);
Node* value = node->InputAt(0);
Node* context = node->InputAt(1);
Node* frame_state = node->InputAt(2);
@@ -3218,12 +3299,16 @@ void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
Node* efalse0 = effect;
Node* vfalse0;
{
- vfalse0 = efalse0 = if_false0 =
- graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
- frame_state, efalse0, if_false0);
+ Operator const* op = node->opcode() == IrOpcode::kJSToNumber
+ ? ToNumberOperator()
+ : ToNumericOperator();
+ Node* code = node->opcode() == IrOpcode::kJSToNumber ? ToNumberCode()
+ : ToNumericCode();
+ vfalse0 = efalse0 = if_false0 = graph()->NewNode(
+ op, code, value, context, frame_state, efalse0, if_false0);
// Update potential {IfException} uses of {node} to point to the above
- // {ToNumber} stub call node instead.
+ // stub call node instead.
Node* on_exception = nullptr;
if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
NodeProperties::ReplaceControlInput(on_exception, vfalse0);
@@ -3622,20 +3707,6 @@ void SimplifiedLowering::DoShift(Node* node, Operator const* op,
ChangeToPureOp(node, op);
}
-void SimplifiedLowering::DoStringToNumber(Node* node) {
- Operator::Properties properties = Operator::kEliminatable;
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStringToNumber);
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
- node->AppendInput(graph()->zone(), graph()->start());
- NodeProperties::ChangeOp(node, common()->Call(desc));
-}
-
void SimplifiedLowering::DoIntegral32ToBit(Node* node) {
Node* const input = node->InputAt(0);
Node* const zero = jsgraph()->Int32Constant(0);
@@ -3737,6 +3808,14 @@ Node* SimplifiedLowering::ToNumberCode() {
return to_number_code_.get();
}
+Node* SimplifiedLowering::ToNumericCode() {
+ if (!to_numeric_code_.is_set()) {
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
+ to_number_code_.set(jsgraph()->HeapConstant(callable.code()));
+ }
+ return to_numeric_code_.get();
+}
+
Operator const* SimplifiedLowering::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
@@ -3749,6 +3828,18 @@ Operator const* SimplifiedLowering::ToNumberOperator() {
return to_number_operator_.get();
}
+Operator const* SimplifiedLowering::ToNumericOperator() {
+ if (!to_numeric_operator_.is_set()) {
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ Operator::kNoProperties);
+ to_numeric_operator_.set(common()->Call(desc));
+ }
+ return to_numeric_operator_.get();
+}
+
#undef TRACE
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 22fb77f8fa..eaa148ee04 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -30,12 +30,11 @@ class SimplifiedLowering final {
void DoMax(Node* node, Operator const* op, MachineRepresentation rep);
void DoMin(Node* node, Operator const* op, MachineRepresentation rep);
- void DoJSToNumberTruncatesToFloat64(Node* node,
- RepresentationSelector* selector);
- void DoJSToNumberTruncatesToWord32(Node* node,
- RepresentationSelector* selector);
+ void DoJSToNumberOrNumericTruncatesToFloat64(
+ Node* node, RepresentationSelector* selector);
+ void DoJSToNumberOrNumericTruncatesToWord32(Node* node,
+ RepresentationSelector* selector);
void DoShift(Node* node, Operator const* op, Type* rhs_type);
- void DoStringToNumber(Node* node);
void DoIntegral32ToBit(Node* node);
void DoOrderedNumberToBit(Node* node);
void DoNumberToBit(Node* node);
@@ -49,7 +48,9 @@ class SimplifiedLowering final {
Zone* const zone_;
TypeCache const& type_cache_;
SetOncePointer<Node> to_number_code_;
+ SetOncePointer<Node> to_numeric_code_;
SetOncePointer<Operator const> to_number_operator_;
+ SetOncePointer<Operator const> to_numeric_operator_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
// positions table, but must for now since there currently is no other way to
@@ -68,7 +69,9 @@ class SimplifiedLowering final {
Node* Uint32Mod(Node* const node);
Node* ToNumberCode();
+ Node* ToNumericCode();
Operator const* ToNumberOperator();
+ Operator const* ToNumericOperator();
friend class RepresentationSelector;
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 0d2333e126..04bbc7bba8 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -40,12 +40,6 @@ bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
lhs.machine_type == rhs.machine_type;
}
-
-bool operator!=(FieldAccess const& lhs, FieldAccess const& rhs) {
- return !(lhs == rhs);
-}
-
-
size_t hash_value(FieldAccess const& access) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
@@ -60,7 +54,7 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
#ifdef OBJECT_PRINT
Handle<Name> name;
if (access.name.ToHandle(&name)) {
- name->Print(os);
+ name->NamePrint(os);
os << ", ";
}
Handle<Map> map;
@@ -92,12 +86,6 @@ bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
lhs.machine_type == rhs.machine_type;
}
-
-bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs) {
- return !(lhs == rhs);
-}
-
-
size_t hash_value(ElementAccess const& access) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
@@ -114,7 +102,6 @@ std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
return os;
}
-
const FieldAccess& FieldAccessOf(const Operator* op) {
DCHECK_NOT_NULL(op);
DCHECK(op->opcode() == IrOpcode::kLoadField ||
@@ -136,6 +123,11 @@ ExternalArrayType ExternalArrayTypeOf(const Operator* op) {
return OpParameter<ExternalArrayType>(op);
}
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kConvertReceiver, op->opcode());
+ return OpParameter<ConvertReceiverMode>(op);
+}
+
size_t hash_value(CheckFloat64HoleMode mode) {
return static_cast<size_t>(mode);
}
@@ -187,27 +179,51 @@ std::ostream& operator<<(std::ostream& os, CheckMapsFlags flags) {
return os;
}
-bool operator==(CheckMapsParameters const& lhs,
- CheckMapsParameters const& rhs) {
- return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+MapsParameterInfo::MapsParameterInfo(ZoneHandleSet<Map> const& maps)
+ : maps_(maps), instance_type_(Nothing<InstanceType>()) {
+ DCHECK_LT(0, maps.size());
+ instance_type_ = Just(maps.at(0)->instance_type());
+ for (size_t i = 1; i < maps.size(); ++i) {
+ if (instance_type_.FromJust() != maps.at(i)->instance_type()) {
+ instance_type_ = Nothing<InstanceType>();
+ break;
+ }
+ }
}
-bool operator!=(CheckMapsParameters const& lhs,
- CheckMapsParameters const& rhs) {
+std::ostream& operator<<(std::ostream& os, MapsParameterInfo const& p) {
+ ZoneHandleSet<Map> const& maps = p.maps();
+ InstanceType instance_type;
+ if (p.instance_type().To(&instance_type)) {
+ os << ", " << instance_type;
+ }
+ for (size_t i = 0; i < maps.size(); ++i) {
+ os << ", " << Brief(*maps[i]);
+ }
+ return os;
+}
+
+bool operator==(MapsParameterInfo const& lhs, MapsParameterInfo const& rhs) {
+ return lhs.maps() == rhs.maps();
+}
+
+bool operator!=(MapsParameterInfo const& lhs, MapsParameterInfo const& rhs) {
return !(lhs == rhs);
}
+size_t hash_value(MapsParameterInfo const& p) { return hash_value(p.maps()); }
+
+bool operator==(CheckMapsParameters const& lhs,
+ CheckMapsParameters const& rhs) {
+ return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+}
+
size_t hash_value(CheckMapsParameters const& p) {
return base::hash_combine(p.flags(), p.maps());
}
std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
- ZoneHandleSet<Map> const& maps = p.maps();
- os << p.flags();
- for (size_t i = 0; i < maps.size(); ++i) {
- os << ", " << Brief(*maps[i]);
- }
- return os;
+ return os << p.flags() << p.maps_info();
}
CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
@@ -215,9 +231,14 @@ CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
return OpParameter<CheckMapsParameters>(op);
}
-ZoneHandleSet<Map> const& CompareMapsParametersOf(Operator const* op) {
+MapsParameterInfo const& CompareMapsParametersOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kCompareMaps, op->opcode());
- return OpParameter<ZoneHandleSet<Map>>(op);
+ return OpParameter<MapsParameterInfo>(op);
+}
+
+MapsParameterInfo const& MapGuardMapsOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kMapGuard, op->opcode());
+ return OpParameter<MapsParameterInfo>(op);
}
size_t hash_value(CheckTaggedInputMode mode) {
@@ -261,10 +282,6 @@ bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
lhs.target().address() == rhs.target().address();
}
-bool operator!=(ElementsTransition const& lhs, ElementsTransition const& rhs) {
- return !(lhs == rhs);
-}
-
size_t hash_value(ElementsTransition transition) {
return base::hash_combine(static_cast<uint8_t>(transition.mode()),
transition.source().address(),
@@ -327,14 +344,105 @@ std::ostream& operator<<(std::ostream& os,
} // namespace
+namespace {
+
+// Parameters for the TransitionAndStoreNonNumberElement opcode.
+class TransitionAndStoreNonNumberElementParameters final {
+ public:
+ TransitionAndStoreNonNumberElementParameters(Handle<Map> fast_map,
+ Type* value_type);
+
+ Handle<Map> fast_map() const { return fast_map_; }
+ Type* value_type() const { return value_type_; }
+
+ private:
+ Handle<Map> const fast_map_;
+ Type* value_type_;
+};
+
+TransitionAndStoreNonNumberElementParameters::
+ TransitionAndStoreNonNumberElementParameters(Handle<Map> fast_map,
+ Type* value_type)
+ : fast_map_(fast_map), value_type_(value_type) {}
+
+bool operator==(TransitionAndStoreNonNumberElementParameters const& lhs,
+ TransitionAndStoreNonNumberElementParameters const& rhs) {
+ return lhs.fast_map().address() == rhs.fast_map().address() &&
+ lhs.value_type() == rhs.value_type();
+}
+
+size_t hash_value(TransitionAndStoreNonNumberElementParameters parameters) {
+ return base::hash_combine(parameters.fast_map().address(),
+ parameters.value_type());
+}
+
+std::ostream& operator<<(
+ std::ostream& os, TransitionAndStoreNonNumberElementParameters parameters) {
+ parameters.value_type()->PrintTo(os);
+ return os << ", fast-map" << Brief(*parameters.fast_map());
+}
+
+} // namespace
+
+namespace {
+
+// Parameters for the TransitionAndStoreNumberElement opcode.
+class TransitionAndStoreNumberElementParameters final {
+ public:
+ explicit TransitionAndStoreNumberElementParameters(Handle<Map> double_map);
+
+ Handle<Map> double_map() const { return double_map_; }
+
+ private:
+ Handle<Map> const double_map_;
+};
+
+TransitionAndStoreNumberElementParameters::
+ TransitionAndStoreNumberElementParameters(Handle<Map> double_map)
+ : double_map_(double_map) {}
+
+bool operator==(TransitionAndStoreNumberElementParameters const& lhs,
+ TransitionAndStoreNumberElementParameters const& rhs) {
+ return lhs.double_map().address() == rhs.double_map().address();
+}
+
+size_t hash_value(TransitionAndStoreNumberElementParameters parameters) {
+ return base::hash_combine(parameters.double_map().address());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ TransitionAndStoreNumberElementParameters parameters) {
+ return os << "double-map" << Brief(*parameters.double_map());
+}
+
+} // namespace
+
Handle<Map> DoubleMapParameterOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kTransitionAndStoreElement, op->opcode());
- return OpParameter<TransitionAndStoreElementParameters>(op).double_map();
+ if (op->opcode() == IrOpcode::kTransitionAndStoreElement) {
+ return OpParameter<TransitionAndStoreElementParameters>(op).double_map();
+ } else if (op->opcode() == IrOpcode::kTransitionAndStoreNumberElement) {
+ return OpParameter<TransitionAndStoreNumberElementParameters>(op)
+ .double_map();
+ }
+ UNREACHABLE();
+ return Handle<Map>::null();
+}
+
+Type* ValueTypeParameterOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kTransitionAndStoreNonNumberElement, op->opcode());
+ return OpParameter<TransitionAndStoreNonNumberElementParameters>(op)
+ .value_type();
}
Handle<Map> FastMapParameterOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kTransitionAndStoreElement, op->opcode());
- return OpParameter<TransitionAndStoreElementParameters>(op).fast_map();
+ if (op->opcode() == IrOpcode::kTransitionAndStoreElement) {
+ return OpParameter<TransitionAndStoreElementParameters>(op).fast_map();
+ } else if (op->opcode() == IrOpcode::kTransitionAndStoreNonNumberElement) {
+ return OpParameter<TransitionAndStoreNonNumberElementParameters>(op)
+ .fast_map();
+ }
+ UNREACHABLE();
+ return Handle<Map>::null();
}
std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
@@ -392,16 +500,13 @@ bool operator==(AllocateParameters const& lhs, AllocateParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() && lhs.type() == rhs.type();
}
-bool operator!=(AllocateParameters const& lhs, AllocateParameters const& rhs) {
- return !(lhs == rhs);
-}
-
PretenureFlag PretenureFlagOf(const Operator* op) {
if (op->opcode() == IrOpcode::kNewDoubleElements ||
op->opcode() == IrOpcode::kNewSmiOrObjectElements) {
return OpParameter<PretenureFlag>(op);
}
- DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
+ DCHECK(op->opcode() == IrOpcode::kAllocate ||
+ op->opcode() == IrOpcode::kAllocateRaw);
return OpParameter<AllocateParameters>(op).pretenure();
}
@@ -420,6 +525,11 @@ BailoutReason BailoutReasonOf(const Operator* op) {
return OpParameter<BailoutReason>(op);
}
+DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kCheckIf, op->opcode());
+ return OpParameter<DeoptimizeReason>(op);
+}
+
#define PURE_OP_LIST(V) \
V(BooleanNot, Operator::kNoProperties, 1, 0) \
V(NumberEqual, Operator::kCommutative, 2, 0) \
@@ -474,6 +584,7 @@ BailoutReason BailoutReasonOf(const Operator* op) {
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(StringToNumber, Operator::kNoProperties, 1, 0) \
V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
@@ -481,6 +592,8 @@ BailoutReason BailoutReasonOf(const Operator* op) {
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(TypeOf, Operator::kNoProperties, 1, 1) \
+ V(ClassOf, Operator::kNoProperties, 1, 1) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
@@ -500,6 +613,7 @@ BailoutReason BailoutReasonOf(const Operator* op) {
V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
@@ -513,10 +627,13 @@ BailoutReason BailoutReasonOf(const Operator* op) {
V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
+ V(SameValue, Operator::kCommutative, 2, 0) \
V(ReferenceEqual, Operator::kCommutative, 2, 0) \
V(StringEqual, Operator::kCommutative, 2, 0) \
V(StringLessThan, Operator::kNoProperties, 2, 0) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0)
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(MaskIndexWithBound, Operator::kNoProperties, 2, 0)
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
@@ -524,29 +641,30 @@ BailoutReason BailoutReasonOf(const Operator* op) {
V(SpeculativeNumberLessThan) \
V(SpeculativeNumberLessThanOrEqual)
-#define CHECKED_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
- V(CheckHeapObject, 1, 1) \
- V(CheckIf, 1, 0) \
- V(CheckInternalizedString, 1, 1) \
- V(CheckNumber, 1, 1) \
- V(CheckReceiver, 1, 1) \
- V(CheckSmi, 1, 1) \
- V(CheckString, 1, 1) \
- V(CheckSeqString, 1, 1) \
- V(CheckSymbol, 1, 1) \
- V(CheckNotTaggedHole, 1, 1) \
- V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
- V(CheckedInt32Div, 2, 1) \
- V(CheckedInt32Mod, 2, 1) \
- V(CheckedUint32Div, 2, 1) \
- V(CheckedUint32Mod, 2, 1) \
- V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedUint32ToTaggedSigned, 1, 1) \
- V(CheckedInt32ToTaggedSigned, 1, 1) \
- V(CheckedTaggedSignedToInt32, 1, 1) \
- V(CheckedTaggedToTaggedSigned, 1, 1) \
+#define CHECKED_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckHeapObject, 1, 1) \
+ V(CheckInternalizedString, 1, 1) \
+ V(CheckNumber, 1, 1) \
+ V(CheckReceiver, 1, 1) \
+ V(CheckSmi, 1, 1) \
+ V(CheckString, 1, 1) \
+ V(CheckSeqString, 1, 1) \
+ V(CheckSymbol, 1, 1) \
+ V(CheckNotTaggedHole, 1, 1) \
+ V(CheckEqualsInternalizedString, 2, 0) \
+ V(CheckEqualsSymbol, 2, 0) \
+ V(CheckedInt32Add, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
+ V(CheckedInt32Div, 2, 1) \
+ V(CheckedInt32Mod, 2, 1) \
+ V(CheckedUint32Div, 2, 1) \
+ V(CheckedUint32Mod, 2, 1) \
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedInt32ToTaggedSigned, 1, 1) \
+ V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTaggedToTaggedSigned, 1, 1) \
V(CheckedTaggedToTaggedPointer, 1, 1)
struct SimplifiedOperatorGlobalCache final {
@@ -571,6 +689,18 @@ struct SimplifiedOperatorGlobalCache final {
CHECKED_OP_LIST(CHECKED)
#undef CHECKED
+ template <DeoptimizeReason kDeoptimizeReason>
+ struct CheckIfOperator final : public Operator1<DeoptimizeReason> {
+ CheckIfOperator()
+ : Operator1<DeoptimizeReason>(
+ IrOpcode::kCheckIf, Operator::kFoldable | Operator::kNoThrow,
+ "CheckIf", 1, 1, 1, 0, 1, 0, kDeoptimizeReason) {}
+ };
+#define CHECK_IF(Name, message) \
+ CheckIfOperator<DeoptimizeReason::k##Name> kCheckIf##Name;
+ DEOPTIMIZE_REASON_LIST(CHECK_IF)
+#undef CHECK_IF
+
template <UnicodeEncoding kEncoding>
struct StringFromCodePointOperator final : public Operator1<UnicodeEncoding> {
StringFromCodePointOperator()
@@ -696,6 +826,23 @@ struct SimplifiedOperatorGlobalCache final {
CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumberOrOddball>
kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ template <ConvertReceiverMode kMode>
+ struct ConvertReceiverOperator final : public Operator1<ConvertReceiverMode> {
+ ConvertReceiverOperator()
+ : Operator1<ConvertReceiverMode>( // --
+ IrOpcode::kConvertReceiver, // opcode
+ Operator::kEliminatable, // flags
+ "ConvertReceiver", // name
+ 2, 1, 1, 1, 1, 0, // counts
+ kMode) {} // param
+ };
+ ConvertReceiverOperator<ConvertReceiverMode::kAny>
+ kConvertReceiverAnyOperator;
+ ConvertReceiverOperator<ConvertReceiverMode::kNullOrUndefined>
+ kConvertReceiverNullOrUndefinedOperator;
+ ConvertReceiverOperator<ConvertReceiverMode::kNotNullOrUndefined>
+ kConvertReceiverNotNullOrUndefinedOperator;
+
template <CheckFloat64HoleMode kMode>
struct CheckFloat64HoleNaNOperator final
: public Operator1<CheckFloat64HoleMode> {
@@ -796,6 +943,17 @@ const Operator* SimplifiedOperatorBuilder::RuntimeAbort(BailoutReason reason) {
reason); // parameter
}
+const Operator* SimplifiedOperatorBuilder::CheckIf(DeoptimizeReason reason) {
+ switch (reason) {
+#define CHECK_IF(Name, message) \
+ case DeoptimizeReason::k##Name: \
+ return &cache_.kCheckIf##Name;
+ DEOPTIMIZE_REASON_LIST(CHECK_IF)
+#undef CHECK_IF
+ }
+ UNREACHABLE();
+}
+
const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
CheckForMinusZeroMode mode) {
switch (mode) {
@@ -873,14 +1031,36 @@ const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
parameters); // parameter
}
+const Operator* SimplifiedOperatorBuilder::MapGuard(ZoneHandleSet<Map> maps) {
+ return new (zone()) Operator1<MapsParameterInfo>( // --
+ IrOpcode::kMapGuard, Operator::kEliminatable, // opcode
+ "MapGuard", // name
+ 1, 1, 1, 0, 1, 0, // counts
+ MapsParameterInfo(maps)); // parameter
+}
+
const Operator* SimplifiedOperatorBuilder::CompareMaps(
ZoneHandleSet<Map> maps) {
- return new (zone()) Operator1<ZoneHandleSet<Map>>( // --
- IrOpcode::kCompareMaps, // opcode
- Operator::kEliminatable, // flags
- "CompareMaps", // name
- 1, 1, 1, 1, 1, 0, // counts
- maps); // parameter
+ return new (zone()) Operator1<MapsParameterInfo>( // --
+ IrOpcode::kCompareMaps, // opcode
+ Operator::kEliminatable, // flags
+ "CompareMaps", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ MapsParameterInfo(maps)); // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::ConvertReceiver(
+ ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kAny:
+ return &cache_.kConvertReceiverAnyOperator;
+ case ConvertReceiverMode::kNullOrUndefined:
+ return &cache_.kConvertReceiverNullOrUndefinedOperator;
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return &cache_.kConvertReceiverNotNullOrUndefinedOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
@@ -1018,6 +1198,14 @@ const Operator* SimplifiedOperatorBuilder::Allocate(Type* type,
1, 1, 1, 1, 1, 0, AllocateParameters(type, pretenure));
}
+const Operator* SimplifiedOperatorBuilder::AllocateRaw(
+ Type* type, PretenureFlag pretenure) {
+ return new (zone()) Operator1<AllocateParameters>(
+ IrOpcode::kAllocateRaw,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ "AllocateRaw", 1, 1, 1, 1, 1, 1, AllocateParameters(type, pretenure));
+}
+
const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
UnicodeEncoding encoding) {
switch (encoding) {
@@ -1084,6 +1272,29 @@ const Operator* SimplifiedOperatorBuilder::StoreSignedSmallElement() {
"StoreSignedSmallElement", 3, 1, 1, 0, 1, 0);
}
+const Operator* SimplifiedOperatorBuilder::TransitionAndStoreNumberElement(
+ Handle<Map> double_map) {
+ TransitionAndStoreNumberElementParameters parameters(double_map);
+ return new (zone()) Operator1<TransitionAndStoreNumberElementParameters>(
+ IrOpcode::kTransitionAndStoreNumberElement,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "TransitionAndStoreNumberElement", 3, 1, 1, 0, 1, 0, parameters);
+}
+
+const Operator* SimplifiedOperatorBuilder::TransitionAndStoreNonNumberElement(
+ Handle<Map> fast_map, Type* value_type) {
+ TransitionAndStoreNonNumberElementParameters parameters(fast_map, value_type);
+ return new (zone()) Operator1<TransitionAndStoreNonNumberElementParameters>(
+ IrOpcode::kTransitionAndStoreNonNumberElement,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "TransitionAndStoreNonNumberElement", 3, 1, 1, 0, 1, 0, parameters);
+}
+
+#undef PURE_OP_LIST
+#undef SPECULATIVE_NUMBER_BINOP_LIST
+#undef CHECKED_OP_LIST
+#undef ACCESS_OP_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 6d43bcac50..0ed46b0e7a 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -10,10 +10,12 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
+#include "src/deoptimize-reason.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/machine-type.h"
#include "src/objects.h"
+#include "src/type-hints.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -50,7 +52,6 @@ struct FieldAccess {
};
V8_EXPORT_PRIVATE bool operator==(FieldAccess const&, FieldAccess const&);
-bool operator!=(FieldAccess const&, FieldAccess const&);
size_t hash_value(FieldAccess const&);
@@ -77,7 +78,6 @@ struct ElementAccess {
};
V8_EXPORT_PRIVATE bool operator==(ElementAccess const&, ElementAccess const&);
-bool operator!=(ElementAccess const&, ElementAccess const&);
size_t hash_value(ElementAccess const&);
@@ -88,6 +88,9 @@ V8_EXPORT_PRIVATE ElementAccess const& ElementAccessOf(const Operator* op)
ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
+// The ConvertReceiverMode is used as parameter by ConvertReceiver operators.
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
+
enum class CheckFloat64HoleMode : uint8_t {
kNeverReturnHole, // Never return the hole (deoptimize instead).
kAllowReturnHole // Allow to return the hole (signaling NaN).
@@ -133,22 +136,41 @@ DEFINE_OPERATORS_FOR_FLAGS(CheckMapsFlags)
std::ostream& operator<<(std::ostream&, CheckMapsFlags);
+class MapsParameterInfo {
+ public:
+ explicit MapsParameterInfo(ZoneHandleSet<Map> const& maps);
+
+ Maybe<InstanceType> instance_type() const { return instance_type_; }
+ ZoneHandleSet<Map> const& maps() const { return maps_; }
+
+ private:
+ ZoneHandleSet<Map> const maps_;
+ Maybe<InstanceType> instance_type_;
+};
+
+std::ostream& operator<<(std::ostream&, MapsParameterInfo const&);
+
+bool operator==(MapsParameterInfo const&, MapsParameterInfo const&);
+bool operator!=(MapsParameterInfo const&, MapsParameterInfo const&);
+
+size_t hash_value(MapsParameterInfo const&);
+
// A descriptor for map checks.
class CheckMapsParameters final {
public:
CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps)
- : flags_(flags), maps_(maps) {}
+ : flags_(flags), maps_info_(maps) {}
CheckMapsFlags flags() const { return flags_; }
- ZoneHandleSet<Map> const& maps() const { return maps_; }
+ ZoneHandleSet<Map> const& maps() const { return maps_info_.maps(); }
+ MapsParameterInfo const& maps_info() const { return maps_info_; }
private:
CheckMapsFlags const flags_;
- ZoneHandleSet<Map> const maps_;
+ MapsParameterInfo const maps_info_;
};
bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
-bool operator!=(CheckMapsParameters const&, CheckMapsParameters const&);
size_t hash_value(CheckMapsParameters const&);
@@ -157,8 +179,10 @@ std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
WARN_UNUSED_RESULT;
+MapsParameterInfo const& MapGuardMapsOf(Operator const*) WARN_UNUSED_RESULT;
+
// Parameters for CompareMaps operator.
-ZoneHandleSet<Map> const& CompareMapsParametersOf(Operator const*)
+MapsParameterInfo const& CompareMapsParametersOf(Operator const*)
WARN_UNUSED_RESULT;
// A descriptor for growing elements backing stores.
@@ -197,7 +221,6 @@ class ElementsTransition final {
};
bool operator==(ElementsTransition const&, ElementsTransition const&);
-bool operator!=(ElementsTransition const&, ElementsTransition const&);
size_t hash_value(ElementsTransition);
@@ -206,10 +229,15 @@ std::ostream& operator<<(std::ostream&, ElementsTransition);
ElementsTransition const& ElementsTransitionOf(const Operator* op)
WARN_UNUSED_RESULT;
-// Parameters for TransitionAndStoreElement.
+// Parameters for TransitionAndStoreElement, or
+// TransitionAndStoreNonNumberElement, or
+// TransitionAndStoreNumberElement.
Handle<Map> DoubleMapParameterOf(const Operator* op);
Handle<Map> FastMapParameterOf(const Operator* op);
+// Parameters for TransitionAndStoreNonNumberElement.
+Type* ValueTypeParameterOf(const Operator* op);
+
// A hint for speculative number operations.
enum class NumberOperationHint : uint8_t {
kSignedSmall, // Inputs were Smi, output was in Smi.
@@ -247,7 +275,6 @@ size_t hash_value(AllocateParameters);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
bool operator==(AllocateParameters const&, AllocateParameters const&);
-bool operator!=(AllocateParameters const&, AllocateParameters const&);
PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
@@ -257,6 +284,8 @@ UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
BailoutReason BailoutReasonOf(const Operator* op) WARN_UNUSED_RESULT;
+DeoptimizeReason DeoptimizeReasonOf(const Operator* op) WARN_UNUSED_RESULT;
+
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
// indexing into objects and arrays, etc.
@@ -360,6 +389,12 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SpeculativeNumberEqual(NumberOperationHint hint);
const Operator* ReferenceEqual();
+ const Operator* SameValue();
+
+ const Operator* TypeOf();
+ const Operator* ClassOf();
+
+ const Operator* ToBoolean();
const Operator* StringEqual();
const Operator* StringLessThan();
@@ -378,6 +413,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SpeculativeToNumber(NumberOperationHint hint);
+ const Operator* StringToNumber();
const Operator* PlainPrimitiveToNumber();
const Operator* PlainPrimitiveToWord32();
const Operator* PlainPrimitiveToFloat64();
@@ -399,10 +435,12 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TruncateTaggedToBit();
const Operator* TruncateTaggedPointerToBit();
- const Operator* CheckIf();
+ const Operator* CheckIf(DeoptimizeReason deoptimize_reason);
const Operator* CheckBounds();
const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
+ const Operator* MaskIndexWithBound();
const Operator* CompareMaps(ZoneHandleSet<Map>);
+ const Operator* MapGuard(ZoneHandleSet<Map> maps);
const Operator* CheckHeapObject();
const Operator* CheckInternalizedString();
@@ -431,11 +469,17 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedTaggedToTaggedPointer();
const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode);
+ const Operator* ConvertReceiver(ConvertReceiverMode);
+
const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
const Operator* CheckNotTaggedHole();
const Operator* ConvertTaggedHoleToUndefined();
+ const Operator* CheckEqualsInternalizedString();
+ const Operator* CheckEqualsSymbol();
+
const Operator* ObjectIsArrayBufferView();
+ const Operator* ObjectIsBigInt();
const Operator* ObjectIsCallable();
const Operator* ObjectIsConstructor();
const Operator* ObjectIsDetectableCallable();
@@ -472,6 +516,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TransitionElementsKind(ElementsTransition transition);
const Operator* Allocate(Type* type, PretenureFlag pretenure = NOT_TENURED);
+ const Operator* AllocateRaw(Type* type,
+ PretenureFlag pretenure = NOT_TENURED);
const Operator* LoadFieldByIndex();
const Operator* LoadField(FieldAccess const&);
@@ -489,6 +535,13 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// store-element [base + index], smi value, only with fast arrays.
const Operator* StoreSignedSmallElement();
+ // store-element [base + index], double value, only with fast arrays.
+ const Operator* TransitionAndStoreNumberElement(Handle<Map> double_map);
+
+ // store-element [base + index], object value, only with fast arrays.
+ const Operator* TransitionAndStoreNonNumberElement(Handle<Map> fast_map,
+ Type* value_type);
+
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 8fb7b35020..1ed12d245b 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -79,7 +79,6 @@ struct UnobservableStore {
StoreOffset offset_;
bool operator==(const UnobservableStore) const;
- bool operator!=(const UnobservableStore) const;
bool operator<(const UnobservableStore) const;
};
@@ -140,7 +139,6 @@ class RedundantStoreFinder final {
void Visit(Node* node);
private:
- static bool IsEffectful(Node* node);
void VisitEffectfulNode(Node* node);
UnobservablesSet RecomputeUseIntersection(Node* node);
UnobservablesSet RecomputeSet(Node* node, UnobservablesSet uses);
@@ -251,10 +249,6 @@ void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) {
}
}
-bool RedundantStoreFinder::IsEffectful(Node* node) {
- return (node->op()->EffectInputCount() >= 1);
-}
-
// Recompute unobservables-set for a node. Will also mark superfluous nodes
// as to be removed.
@@ -552,9 +546,6 @@ bool UnobservableStore::operator==(const UnobservableStore other) const {
return (id_ == other.id_) && (offset_ == other.offset_);
}
-bool UnobservableStore::operator!=(const UnobservableStore other) const {
- return !(*this == other);
-}
bool UnobservableStore::operator<(const UnobservableStore other) const {
return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 5ac9072174..346aa47bfc 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -7,6 +7,7 @@
#include "src/compiler/types.h"
#include "src/date.h"
+#include "src/objects/code.h"
#include "src/objects/string.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index abb3a2bc50..49a4cdfdb3 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -42,7 +42,7 @@ Reduction TypedOptimization::Reduce(Node* node) {
// eager deoptimization exit (i.e. {node} has an operator that doesn't have
// the Operator::kNoDeopt property).
Type* upper = NodeProperties::GetType(node);
- if (upper->IsInhabited()) {
+ if (!upper->IsNone()) {
if (upper->IsHeapConstant()) {
Node* replacement =
jsgraph()->Constant(upper->AsHeapConstant()->Value());
@@ -73,6 +73,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
}
}
switch (node->opcode()) {
+ case IrOpcode::kConvertReceiver:
+ return ReduceConvertReceiver(node);
case IrOpcode::kCheckHeapObject:
return ReduceCheckHeapObject(node);
case IrOpcode::kCheckNotTaggedHole:
@@ -85,6 +87,10 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReduceCheckString(node);
case IrOpcode::kCheckSeqString:
return ReduceCheckSeqString(node);
+ case IrOpcode::kCheckEqualsInternalizedString:
+ return ReduceCheckEqualsInternalizedString(node);
+ case IrOpcode::kCheckEqualsSymbol:
+ return ReduceCheckEqualsSymbol(node);
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kNumberCeil:
@@ -99,8 +105,14 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReducePhi(node);
case IrOpcode::kReferenceEqual:
return ReduceReferenceEqual(node);
+ case IrOpcode::kSameValue:
+ return ReduceSameValue(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
+ case IrOpcode::kTypeOf:
+ return ReduceTypeOf(node);
+ case IrOpcode::kToBoolean:
+ return ReduceToBoolean(node);
case IrOpcode::kSpeculativeToNumber:
return ReduceSpeculativeToNumber(node);
default:
@@ -121,6 +133,20 @@ MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
} // namespace
+Reduction TypedOptimization::ReduceConvertReceiver(Node* node) {
+ Node* const value = NodeProperties::GetValueInput(node, 0);
+ Type* const value_type = NodeProperties::GetType(value);
+ Node* const global_proxy = NodeProperties::GetValueInput(node, 1);
+ if (value_type->Is(Type::Receiver())) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ } else if (value_type->Is(Type::NullOrUndefined())) {
+ ReplaceWithValue(node, global_proxy);
+ return Replace(global_proxy);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceCheckHeapObject(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type* const input_type = NodeProperties::GetType(input);
@@ -197,6 +223,28 @@ Reduction TypedOptimization::ReduceCheckSeqString(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceCheckEqualsInternalizedString(Node* node) {
+ Node* const exp = NodeProperties::GetValueInput(node, 0);
+ Type* const exp_type = NodeProperties::GetType(exp);
+ Node* const val = NodeProperties::GetValueInput(node, 1);
+ Type* const val_type = NodeProperties::GetType(val);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ if (val_type->Is(exp_type)) return Replace(effect);
+ // TODO(turbofan): Should we also try to optimize the
+ // non-internalized String case for {val} here?
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceCheckEqualsSymbol(Node* node) {
+ Node* const exp = NodeProperties::GetValueInput(node, 0);
+ Type* const exp_type = NodeProperties::GetType(exp);
+ Node* const val = NodeProperties::GetValueInput(node, 1);
+ Type* const val_type = NodeProperties::GetType(val);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ if (val_type->Is(exp_type)) return Replace(effect);
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceLoadField(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Type* const object_type = NodeProperties::GetType(object);
@@ -311,6 +359,52 @@ Reduction TypedOptimization::ReduceReferenceEqual(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceSameValue(Node* node) {
+ DCHECK_EQ(IrOpcode::kSameValue, node->opcode());
+ Node* const lhs = NodeProperties::GetValueInput(node, 0);
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ Type* const lhs_type = NodeProperties::GetType(lhs);
+ Type* const rhs_type = NodeProperties::GetType(rhs);
+ if (lhs == rhs) {
+ // SameValue(x,x) => #true
+ return Replace(jsgraph()->TrueConstant());
+ } else if (lhs_type->Is(Type::Unique()) && rhs_type->Is(Type::Unique())) {
+ // SameValue(x:unique,y:unique) => ReferenceEqual(x,y)
+ NodeProperties::ChangeOp(node, simplified()->ReferenceEqual());
+ return Changed(node);
+ } else if (lhs_type->Is(Type::String()) && rhs_type->Is(Type::String())) {
+ // SameValue(x:string,y:string) => StringEqual(x,y)
+ NodeProperties::ChangeOp(node, simplified()->StringEqual());
+ return Changed(node);
+ } else if (lhs_type->Is(Type::MinusZero())) {
+ // SameValue(x:minus-zero,y) => ObjectIsMinusZero(y)
+ node->RemoveInput(0);
+ NodeProperties::ChangeOp(node, simplified()->ObjectIsMinusZero());
+ return Changed(node);
+ } else if (rhs_type->Is(Type::MinusZero())) {
+ // SameValue(x,y:minus-zero) => ObjectIsMinusZero(x)
+ node->RemoveInput(1);
+ NodeProperties::ChangeOp(node, simplified()->ObjectIsMinusZero());
+ return Changed(node);
+ } else if (lhs_type->Is(Type::NaN())) {
+ // SameValue(x:nan,y) => ObjectIsNaN(y)
+ node->RemoveInput(0);
+ NodeProperties::ChangeOp(node, simplified()->ObjectIsNaN());
+ return Changed(node);
+ } else if (rhs_type->Is(Type::NaN())) {
+ // SameValue(x,y:nan) => ObjectIsNaN(x)
+ node->RemoveInput(1);
+ NodeProperties::ChangeOp(node, simplified()->ObjectIsNaN());
+ return Changed(node);
+ } else if (lhs_type->Is(Type::PlainNumber()) &&
+ rhs_type->Is(Type::PlainNumber())) {
+ // SameValue(x:plain-number,y:plain-number) => NumberEqual(x,y)
+ NodeProperties::ChangeOp(node, simplified()->NumberEqual());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceSelect(Node* node) {
DCHECK_EQ(IrOpcode::kSelect, node->opcode());
Node* const condition = NodeProperties::GetValueInput(node, 0);
@@ -361,6 +455,80 @@ Reduction TypedOptimization::ReduceSpeculativeToNumber(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceTypeOf(Node* node) {
+ Node* const input = node->InputAt(0);
+ Type* const type = NodeProperties::GetType(input);
+ Factory* const f = factory();
+ if (type->Is(Type::Boolean())) {
+ return Replace(jsgraph()->Constant(f->boolean_string()));
+ } else if (type->Is(Type::Number())) {
+ return Replace(jsgraph()->Constant(f->number_string()));
+ } else if (type->Is(Type::String())) {
+ return Replace(jsgraph()->Constant(f->string_string()));
+ } else if (type->Is(Type::BigInt())) {
+ return Replace(jsgraph()->Constant(f->bigint_string()));
+ } else if (type->Is(Type::Symbol())) {
+ return Replace(jsgraph()->Constant(f->symbol_string()));
+ } else if (type->Is(Type::OtherUndetectableOrUndefined())) {
+ return Replace(jsgraph()->Constant(f->undefined_string()));
+ } else if (type->Is(Type::NonCallableOrNull())) {
+ return Replace(jsgraph()->Constant(f->object_string()));
+ } else if (type->Is(Type::Function())) {
+ return Replace(jsgraph()->Constant(f->function_string()));
+ } else if (type->IsHeapConstant()) {
+ return Replace(jsgraph()->Constant(
+ Object::TypeOf(isolate(), type->AsHeapConstant()->Value())));
+ }
+
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceToBoolean(Node* node) {
+ Node* const input = node->InputAt(0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Boolean())) {
+ // ToBoolean(x:boolean) => x
+ return Replace(input);
+ } else if (input_type->Is(Type::OrderedNumber())) {
+ // SToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
+ node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
+ jsgraph()->ZeroConstant()));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ } else if (input_type->Is(Type::Number())) {
+ // ToBoolean(x:number) => NumberToBoolean(x)
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->NumberToBoolean());
+ return Changed(node);
+ } else if (input_type->Is(Type::DetectableReceiverOrNull())) {
+ // ToBoolean(x:detectable receiver \/ null)
+ // => BooleanNot(ReferenceEqual(x,#null))
+ node->ReplaceInput(0, graph()->NewNode(simplified()->ReferenceEqual(),
+ input, jsgraph()->NullConstant()));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ } else if (input_type->Is(Type::ReceiverOrNullOrUndefined())) {
+ // ToBoolean(x:receiver \/ null \/ undefined)
+ // => BooleanNot(ObjectIsUndetectable(x))
+ node->ReplaceInput(
+ 0, graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ } else if (input_type->Is(Type::String())) {
+ // ToBoolean(x:string) => BooleanNot(ReferenceEqual(x,""))
+ node->ReplaceInput(0,
+ graph()->NewNode(simplified()->ReferenceEqual(), input,
+ jsgraph()->EmptyStringConstant()));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Factory* TypedOptimization::factory() const { return isolate()->factory(); }
Graph* TypedOptimization::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index e4fbe6e6f9..75de75b143 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -36,20 +36,26 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceConvertReceiver(Node* node);
Reduction ReduceCheckHeapObject(Node* node);
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCheckNumber(Node* node);
Reduction ReduceCheckString(Node* node);
Reduction ReduceCheckSeqString(Node* node);
+ Reduction ReduceCheckEqualsInternalizedString(Node* node);
+ Reduction ReduceCheckEqualsSymbol(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceNumberFloor(Node* node);
Reduction ReduceNumberRoundop(Node* node);
Reduction ReduceNumberToUint8Clamped(Node* node);
Reduction ReducePhi(Node* node);
Reduction ReduceReferenceEqual(Node* node);
+ Reduction ReduceSameValue(Node* node);
Reduction ReduceSelect(Node* node);
Reduction ReduceSpeculativeToNumber(Node* node);
Reduction ReduceCheckNotTaggedHole(Node* node);
+ Reduction ReduceTypeOf(Node* node);
+ Reduction ReduceToBoolean(Node* node);
CompilationDependencies* dependencies() const { return dependencies_; }
Factory* factory() const;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 2590342d2e..605a96c944 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -264,12 +264,18 @@ class Typer::Visitor : public Reducer {
static ComparisonOutcome Invert(ComparisonOutcome, Typer*);
static Type* FalsifyUndefined(ComparisonOutcome, Typer*);
+ static Type* BitwiseNot(Type*, Typer*);
+ static Type* Decrement(Type*, Typer*);
+ static Type* Increment(Type*, Typer*);
+ static Type* Negate(Type*, Typer*);
+
static Type* ToPrimitive(Type*, Typer*);
static Type* ToBoolean(Type*, Typer*);
static Type* ToInteger(Type*, Typer*);
static Type* ToLength(Type*, Typer*);
static Type* ToName(Type*, Typer*);
static Type* ToNumber(Type*, Typer*);
+ static Type* ToNumeric(Type*, Typer*);
static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
#define DECLARE_METHOD(Name) \
@@ -288,6 +294,7 @@ class Typer::Visitor : public Reducer {
#undef DECLARE_METHOD
static Type* ObjectIsArrayBufferView(Type*, Typer*);
+ static Type* ObjectIsBigInt(Type*, Typer*);
static Type* ObjectIsCallable(Type*, Typer*);
static Type* ObjectIsConstructor(Type*, Typer*);
static Type* ObjectIsDetectableCallable(Type*, Typer*);
@@ -314,6 +321,7 @@ class Typer::Visitor : public Reducer {
static Type* NumberLessThanTyper(Type*, Type*, Typer*);
static Type* NumberLessThanOrEqualTyper(Type*, Type*, Typer*);
static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
+ static Type* SameValueTyper(Type*, Type*, Typer*);
static Type* StringFromCharCodeTyper(Type*, Typer*);
static Type* StringFromCodePointTyper(Type*, Typer*);
@@ -388,15 +396,15 @@ void Typer::Decorator::Decorate(Node* node) {
Type* Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
Type* input = Operand(node, 0);
- return input->IsInhabited() ? f(input, typer_) : Type::None();
+ return input->IsNone() ? Type::None() : f(input, typer_);
}
Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
Type* left = Operand(node, 0);
Type* right = Operand(node, 1);
- return left->IsInhabited() && right->IsInhabited() ? f(left, right, typer_)
- : Type::None();
+ return left->IsNone() || right->IsNone() ? Type::None()
+ : f(left, right, typer_);
}
@@ -421,6 +429,38 @@ Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
return t->singleton_true_;
}
+Type* Typer::Visitor::BitwiseNot(Type* type, Typer* t) {
+ type = ToNumeric(type, t);
+ if (type->Is(Type::Number())) {
+ return NumberBitwiseXor(type, t->cache_.kSingletonMinusOne, t);
+ }
+ return Type::Numeric();
+}
+
+Type* Typer::Visitor::Decrement(Type* type, Typer* t) {
+ type = ToNumeric(type, t);
+ if (type->Is(Type::Number())) {
+ return NumberSubtract(type, t->cache_.kSingletonOne, t);
+ }
+ return Type::Numeric();
+}
+
+Type* Typer::Visitor::Increment(Type* type, Typer* t) {
+ type = ToNumeric(type, t);
+ if (type->Is(Type::Number())) {
+ return NumberAdd(type, t->cache_.kSingletonOne, t);
+ }
+ return Type::Numeric();
+}
+
+Type* Typer::Visitor::Negate(Type* type, Typer* t) {
+ type = ToNumeric(type, t);
+ if (type->Is(Type::Number())) {
+ return NumberMultiply(type, t->cache_.kSingletonMinusOne, t);
+ }
+ return Type::Numeric();
+}
+
// Type conversion.
Type* Typer::Visitor::ToPrimitive(Type* type, Typer* t) {
@@ -460,6 +500,7 @@ Type* Typer::Visitor::ToInteger(Type* type, Typer* t) {
Type* Typer::Visitor::ToLength(Type* type, Typer* t) {
// ES6 section 7.1.15 ToLength ( argument )
type = ToInteger(type, t);
+ if (type->IsNone()) return type;
double min = type->Min();
double max = type->Max();
if (max <= 0.0) {
@@ -489,6 +530,10 @@ Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
return t->operation_typer_.ToNumber(type);
}
+// static
+Type* Typer::Visitor::ToNumeric(Type* type, Typer* t) {
+ return t->operation_typer_.ToNumeric(type);
+}
// static
Type* Typer::Visitor::ToObject(Type* type, Typer* t) {
@@ -518,6 +563,12 @@ Type* Typer::Visitor::ObjectIsArrayBufferView(Type* type, Typer* t) {
return Type::Boolean();
}
+Type* Typer::Visitor::ObjectIsBigInt(Type* type, Typer* t) {
+ if (type->Is(Type::BigInt())) return t->singleton_true_;
+ if (!type->Maybe(Type::BigInt())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
if (type->Is(Type::Callable())) return t->singleton_true_;
if (!type->Maybe(Type::Callable())) return t->singleton_false_;
@@ -720,7 +771,7 @@ Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
}
// If we do not have enough type information for the initial value or
// the increment, just return the initial value's type.
- if (!initial_type->IsInhabited() ||
+ if (initial_type->IsNone() ||
increment_type->Is(typer_->cache_.kSingletonZero)) {
return initial_type;
}
@@ -754,7 +805,7 @@ Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
// If the type is not an integer, just skip the bound.
if (!bound_type->Is(typer_->cache_.kInteger)) continue;
// If the type is not inhabited, then we can take the initial value.
- if (!bound_type->IsInhabited()) {
+ if (bound_type->IsNone()) {
max = initial_type->Max();
break;
}
@@ -774,7 +825,7 @@ Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
// If the type is not an integer, just skip the bound.
if (!bound_type->Is(typer_->cache_.kInteger)) continue;
// If the type is not inhabited, then we can take the initial value.
- if (!bound_type->IsInhabited()) {
+ if (bound_type->IsNone()) {
min = initial_type->Min();
break;
}
@@ -894,6 +945,10 @@ Type* Typer::Visitor::TypeTypeGuard(Node* node) {
Type* Typer::Visitor::TypeDead(Node* node) { return Type::None(); }
+Type* Typer::Visitor::TypeDeadValue(Node* node) { return Type::None(); }
+
+Type* Typer::Visitor::TypeUnreachable(Node* node) { UNREACHABLE(); }
+
// JS comparison operators.
@@ -914,38 +969,10 @@ Type* Typer::Visitor::JSEqualTyper(Type* lhs, Type* rhs, Typer* t) {
return Type::Boolean();
}
-
-static Type* JSType(Type* type) {
- if (type->Is(Type::Boolean())) return Type::Boolean();
- if (type->Is(Type::String())) return Type::String();
- if (type->Is(Type::Number())) return Type::Number();
- if (type->Is(Type::Undefined())) return Type::Undefined();
- if (type->Is(Type::Null())) return Type::Null();
- if (type->Is(Type::Symbol())) return Type::Symbol();
- if (type->Is(Type::Receiver())) return Type::Receiver(); // JS "Object"
- return Type::Any();
-}
-
-
Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
- if (!JSType(lhs)->Maybe(JSType(rhs))) return t->singleton_false_;
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false_;
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number()) &&
- (lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
- return t->singleton_false_;
- }
- if ((lhs->Is(Type::Hole()) || rhs->Is(Type::Hole())) && !lhs->Maybe(rhs)) {
- return t->singleton_false_;
- }
- if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
- // Types are equal and are inhabited only by a single semantic value,
- // which is not nan due to the earlier check.
- return t->singleton_true_;
- }
- return Type::Boolean();
+ return t->operation_typer()->StrictEqual(lhs, rhs);
}
-
// The EcmaScript specification defines the four relational comparison operators
// (<, <=, >=, >) with the help of a single abstract one. It behaves like <
// but returns undefined when the inputs cannot be compared.
@@ -959,12 +986,22 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::JSCompareTyper(Type* lhs,
return ComparisonOutcome(kComparisonTrue) |
ComparisonOutcome(kComparisonFalse);
}
- return NumberCompareTyper(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberCompareTyper(lhs, rhs, t);
+ }
+ return ComparisonOutcome(kComparisonTrue) |
+ ComparisonOutcome(kComparisonFalse) |
+ ComparisonOutcome(kComparisonUndefined);
}
Typer::Visitor::ComparisonOutcome Typer::Visitor::NumberCompareTyper(Type* lhs,
Type* rhs,
Typer* t) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
// Shortcut for NaNs.
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return kComparisonUndefined;
@@ -1015,27 +1052,53 @@ Type* Typer::Visitor::JSGreaterThanOrEqualTyper(
Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberBitwiseOr(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberBitwiseOr(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberBitwiseAnd(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberBitwiseAnd(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberBitwiseXor(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberBitwiseXor(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
Type* Typer::Visitor::JSShiftLeftTyper(Type* lhs, Type* rhs, Typer* t) {
return NumberShiftLeft(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberShiftLeft(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberShiftRight(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberShiftRight(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
@@ -1053,45 +1116,88 @@ Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
if (lhs->Is(Type::String()) || rhs->Is(Type::String())) {
return Type::String();
} else {
- return Type::NumberOrString();
+ return Type::NumericOrString();
}
}
// The addition must be numeric.
- return NumberAdd(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberAdd(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberSubtract(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberSubtract(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberMultiply(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberMultiply(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberDivide(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberDivide(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberModulus(ToNumber(lhs, t), ToNumber(rhs, t), t);
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
+ return NumberModulus(lhs, rhs, t);
+ }
+ return Type::Numeric();
}
+Type* Typer::Visitor::JSExponentiateTyper(Type* lhs, Type* rhs, Typer* t) {
+ return Type::Numeric();
+}
// JS unary operators.
-Type* Typer::Visitor::TypeJSClassOf(Node* node) {
+Type* Typer::Visitor::TypeJSBitwiseNot(Node* node) {
+ return TypeUnaryOp(node, BitwiseNot);
+}
+
+Type* Typer::Visitor::TypeJSDecrement(Node* node) {
+ return TypeUnaryOp(node, Decrement);
+}
+
+Type* Typer::Visitor::TypeJSIncrement(Node* node) {
+ return TypeUnaryOp(node, Increment);
+}
+
+Type* Typer::Visitor::TypeJSNegate(Node* node) {
+ return TypeUnaryOp(node, Negate);
+}
+
+Type* Typer::Visitor::TypeClassOf(Node* node) {
return Type::InternalizedStringOrNull();
}
-Type* Typer::Visitor::TypeJSTypeOf(Node* node) {
+Type* Typer::Visitor::TypeTypeOf(Node* node) {
return Type::InternalizedString();
}
// JS conversion operators.
-
-Type* Typer::Visitor::TypeJSToBoolean(Node* node) {
+Type* Typer::Visitor::TypeToBoolean(Node* node) {
return TypeUnaryOp(node, ToBoolean);
}
@@ -1111,6 +1217,10 @@ Type* Typer::Visitor::TypeJSToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
+Type* Typer::Visitor::TypeJSToNumeric(Node* node) {
+ return TypeUnaryOp(node, ToNumeric);
+}
+
Type* Typer::Visitor::TypeJSToObject(Node* node) {
return TypeUnaryOp(node, ToObject);
}
@@ -1138,6 +1248,10 @@ Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
Type* Typer::Visitor::TypeJSCreateArray(Node* node) { return Type::Array(); }
+Type* Typer::Visitor::TypeJSCreateBoundFunction(Node* node) {
+ return Type::BoundFunction();
+}
+
Type* Typer::Visitor::TypeJSCreateGeneratorObject(Node* node) {
return Type::OtherObject();
}
@@ -1356,10 +1470,6 @@ Type* Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
return Type::OtherInternal();
}
-Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
- return Type::OtherInternal();
-}
-
// JS other operators.
Type* Typer::Visitor::TypeJSConstructForwardVarargs(Node* node) {
@@ -1670,7 +1780,6 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return Type::Boolean();
case Runtime::kInlineCreateIterResultObject:
return Type::OtherObject();
- case Runtime::kInlineSubString:
case Runtime::kInlineStringCharFromCode:
return Type::String();
case Runtime::kInlineToInteger:
@@ -1697,10 +1806,6 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
}
-Type* Typer::Visitor::TypeJSConvertReceiver(Node* node) {
- return Type::Receiver();
-}
-
Type* Typer::Visitor::TypeJSForInEnumerate(Node* node) {
return Type::OtherInternal();
}
@@ -1795,6 +1900,10 @@ Type* Typer::Visitor::TypeSpeculativeNumberLessThanOrEqual(Node* node) {
return TypeBinaryOp(node, NumberLessThanOrEqualTyper);
}
+Type* Typer::Visitor::TypeStringToNumber(Node* node) {
+ return TypeUnaryOp(node, ToNumber);
+}
+
Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
@@ -1820,6 +1929,15 @@ Type* Typer::Visitor::TypeReferenceEqual(Node* node) {
return TypeBinaryOp(node, ReferenceEqualTyper);
}
+// static
+Type* Typer::Visitor::SameValueTyper(Type* lhs, Type* rhs, Typer* t) {
+ return t->operation_typer()->SameValue(lhs, rhs);
+}
+
+Type* Typer::Visitor::TypeSameValue(Node* node) {
+ return TypeBinaryOp(node, SameValueTyper);
+}
+
Type* Typer::Visitor::TypeStringEqual(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeStringLessThan(Node* node) { return Type::Boolean(); }
@@ -1858,18 +1976,21 @@ Type* Typer::Visitor::TypeStringFromCodePoint(Node* node) {
return TypeUnaryOp(node, StringFromCodePointTyper);
}
-Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
- return Type::Range(-1.0, String::kMaxLength - 1.0, zone());
+Type* Typer::Visitor::TypeStringIndexOf(Node* node) { UNREACHABLE(); }
+
+Type* Typer::Visitor::TypeMaskIndexWithBound(Node* node) {
+ return Type::Union(Operand(node, 0), typer_->cache_.kSingletonZero, zone());
}
Type* Typer::Visitor::TypeCheckBounds(Node* node) {
Type* index = Operand(node, 0);
Type* length = Operand(node, 1);
+ DCHECK(length->Is(Type::Unsigned31()));
if (index->Maybe(Type::MinusZero())) {
index = Type::Union(index, typer_->cache_.kSingletonZero, zone());
}
index = Type::Intersect(index, Type::Integral32(), zone());
- if (!index->IsInhabited() || !length->IsInhabited()) return Type::None();
+ if (index->IsNone() || length->IsNone()) return Type::None();
double min = std::max(index->Min(), 0.0);
double max = std::min(index->Max(), length->Max() - 1);
if (max < min) return Type::None();
@@ -1935,15 +2056,28 @@ Type* Typer::Visitor::TypeCheckNotTaggedHole(Node* node) {
return type;
}
+Type* Typer::Visitor::TypeConvertReceiver(Node* node) {
+ Type* arg = Operand(node, 0);
+ return typer_->operation_typer_.ConvertReceiver(arg);
+}
+
Type* Typer::Visitor::TypeConvertTaggedHoleToUndefined(Node* node) {
Type* type = Operand(node, 0);
return typer_->operation_typer()->ConvertTaggedHoleToUndefined(type);
}
+Type* Typer::Visitor::TypeCheckEqualsInternalizedString(Node* node) {
+ UNREACHABLE();
+}
+
+Type* Typer::Visitor::TypeCheckEqualsSymbol(Node* node) { UNREACHABLE(); }
+
Type* Typer::Visitor::TypeAllocate(Node* node) {
return AllocateTypeOf(node->op());
}
+Type* Typer::Visitor::TypeAllocateRaw(Node* node) { UNREACHABLE(); }
+
Type* Typer::Visitor::TypeLoadFieldByIndex(Node* node) {
return Type::NonInternal();
}
@@ -1979,6 +2113,14 @@ Type* Typer::Visitor::TypeTransitionAndStoreElement(Node* node) {
UNREACHABLE();
}
+Type* Typer::Visitor::TypeTransitionAndStoreNumberElement(Node* node) {
+ UNREACHABLE();
+}
+
+Type* Typer::Visitor::TypeTransitionAndStoreNonNumberElement(Node* node) {
+ UNREACHABLE();
+}
+
Type* Typer::Visitor::TypeStoreSignedSmallElement(Node* node) { UNREACHABLE(); }
Type* Typer::Visitor::TypeStoreTypedElement(Node* node) {
@@ -1989,6 +2131,10 @@ Type* Typer::Visitor::TypeObjectIsArrayBufferView(Node* node) {
return TypeUnaryOp(node, ObjectIsArrayBufferView);
}
+Type* Typer::Visitor::TypeObjectIsBigInt(Node* node) {
+ return TypeUnaryOp(node, ObjectIsBigInt);
+}
+
Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
return TypeUnaryOp(node, ObjectIsCallable);
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 568c606b2c..1b6ca6b53f 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -68,34 +68,38 @@ bool Type::Contains(RangeType* range, i::Object* val) {
double Type::Min() {
DCHECK(this->Is(Number()));
+ DCHECK(!this->Is(NaN()));
if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
if (this->IsUnion()) {
double min = +V8_INFINITY;
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ for (int i = 1, n = this->AsUnion()->Length(); i < n; ++i) {
min = std::min(min, this->AsUnion()->Get(i)->Min());
}
+ Type* bitset = this->AsUnion()->Get(0);
+ if (!bitset->Is(NaN())) min = std::min(min, bitset->Min());
return min;
}
if (this->IsRange()) return this->AsRange()->Min();
- if (this->IsOtherNumberConstant())
- return this->AsOtherNumberConstant()->Value();
- UNREACHABLE();
+ DCHECK(this->IsOtherNumberConstant());
+ return this->AsOtherNumberConstant()->Value();
}
double Type::Max() {
DCHECK(this->Is(Number()));
+ DCHECK(!this->Is(NaN()));
if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
if (this->IsUnion()) {
double max = -V8_INFINITY;
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ for (int i = 1, n = this->AsUnion()->Length(); i < n; ++i) {
max = std::max(max, this->AsUnion()->Get(i)->Max());
}
+ Type* bitset = this->AsUnion()->Get(0);
+ if (!bitset->Is(NaN())) max = std::max(max, bitset->Max());
return max;
}
if (this->IsRange()) return this->AsRange()->Max();
- if (this->IsOtherNumberConstant())
- return this->AsOtherNumberConstant()->Value();
- UNREACHABLE();
+ DCHECK(this->IsOtherNumberConstant());
+ return this->AsOtherNumberConstant()->Value();
}
// -----------------------------------------------------------------------------
@@ -173,6 +177,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
return kInternalizedSeqString;
case SYMBOL_TYPE:
return kSymbol;
+ case BIGINT_TYPE:
+ return kBigInt;
case ODDBALL_TYPE: {
Heap* heap = map->GetHeap();
if (map == heap->undefined_map()) return kUndefined;
@@ -236,7 +242,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
- case PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case WASM_MODULE_TYPE:
case WASM_INSTANCE_TYPE:
@@ -266,8 +271,9 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case FIXED_DOUBLE_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
- case FEEDBACK_VECTOR_TYPE:
+ case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
+ case FEEDBACK_VECTOR_TYPE:
case PROPERTY_ARRAY_TYPE:
case FOREIGN_TYPE:
case SCRIPT_TYPE:
@@ -276,7 +282,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
case CELL_TYPE:
- case BIGINT_TYPE:
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
@@ -306,6 +311,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
+ case CODE_DATA_CONTAINER_TYPE:
UNREACHABLE();
}
UNREACHABLE();
@@ -394,6 +400,7 @@ Type::bitset BitsetType::Glb(double min, double max) {
double BitsetType::Min(bitset bits) {
DisallowHeapAllocation no_allocation;
DCHECK(Is(bits, kNumber));
+ DCHECK(!Is(bits, kNaN));
const Boundary* mins = Boundaries();
bool mz = bits & kMinusZero;
for (size_t i = 0; i < BoundariesSize(); ++i) {
@@ -401,13 +408,14 @@ double BitsetType::Min(bitset bits) {
return mz ? std::min(0.0, mins[i].min) : mins[i].min;
}
}
- if (mz) return 0;
- return std::numeric_limits<double>::quiet_NaN();
+ DCHECK(mz);
+ return 0;
}
double BitsetType::Max(bitset bits) {
DisallowHeapAllocation no_allocation;
DCHECK(Is(bits, kNumber));
+ DCHECK(!Is(bits, kNaN));
const Boundary* mins = Boundaries();
bool mz = bits & kMinusZero;
if (BitsetType::Is(mins[BoundariesSize() - 1].internal, bits)) {
@@ -418,8 +426,8 @@ double BitsetType::Max(bitset bits) {
return mz ? std::max(0.0, mins[i + 1].min - 1) : mins[i + 1].min - 1;
}
}
- if (mz) return 0;
- return std::numeric_limits<double>::quiet_NaN();
+ DCHECK(mz);
+ return 0;
}
// static
@@ -517,8 +525,7 @@ bool Type::SlowIs(Type* that) {
bool Type::Maybe(Type* that) {
DisallowHeapAllocation no_allocation;
- if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
- return false;
+ if (BitsetType::IsNone(this->BitsetLub() & that->BitsetLub())) return false;
// (T1 \/ ... \/ Tn) overlaps T if (T1 overlaps T) \/ ... \/ (Tn overlaps T)
if (this->IsUnion()) {
@@ -561,14 +568,14 @@ bool Type::Maybe(Type* that) {
return this->SimplyEquals(that);
}
-// Return the range in [this], or [NULL].
+// Return the range in [this], or [nullptr].
Type* Type::GetRange() {
DisallowHeapAllocation no_allocation;
if (this->IsRange()) return this;
if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
return this->AsUnion()->Get(1);
}
- return NULL;
+ return nullptr;
}
bool UnionType::Wellformed() {
@@ -715,9 +722,7 @@ int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
return size;
}
- if (!BitsetType::IsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
- return size;
- }
+ if (BitsetType::IsNone(lhs->BitsetLub() & rhs->BitsetLub())) return size;
if (lhs->IsRange()) {
if (rhs->IsBitset()) {
@@ -852,15 +857,15 @@ Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
Type* range = None();
Type* range1 = type1->GetRange();
Type* range2 = type2->GetRange();
- if (range1 != NULL && range2 != NULL) {
+ if (range1 != nullptr && range2 != nullptr) {
RangeType::Limits lims =
RangeType::Limits::Union(RangeType::Limits(range1->AsRange()),
RangeType::Limits(range2->AsRange()));
Type* union_range = RangeType::New(lims, zone);
range = NormalizeRangeAndBitset(union_range, &new_bitset, zone);
- } else if (range1 != NULL) {
+ } else if (range1 != nullptr) {
range = NormalizeRangeAndBitset(range1, &new_bitset, zone);
- } else if (range2 != NULL) {
+ } else if (range2 != nullptr) {
range = NormalizeRangeAndBitset(range2, &new_bitset, zone);
}
Type* bits = BitsetType::New(new_bitset);
@@ -938,7 +943,7 @@ const char* BitsetType::Name(bitset bits) {
#undef RETURN_NAMED_TYPE
default:
- return NULL;
+ return nullptr;
}
}
@@ -946,7 +951,7 @@ void BitsetType::Print(std::ostream& os, // NOLINT
bitset bits) {
DisallowHeapAllocation no_allocation;
const char* name = Name(bits);
- if (name != NULL) {
+ if (name != nullptr) {
os << name;
return;
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index aeb0f05e85..d791ec25c5 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -129,6 +129,7 @@ namespace compiler {
V(OtherInternal, 1u << 25) \
V(ExternalPointer, 1u << 26) \
V(Array, 1u << 27) \
+ V(BigInt, 1u << 28) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | \
@@ -148,6 +149,7 @@ namespace compiler {
V(OrderedNumber, kPlainNumber | kMinusZero) \
V(MinusZeroOrNaN, kMinusZero | kNaN) \
V(Number, kOrderedNumber | kNaN) \
+ V(Numeric, kNumber | kBigInt) \
V(InternalizedString, kInternalizedNonSeqString | \
kInternalizedSeqString) \
V(OtherString, kOtherNonSeqString | kOtherSeqString) \
@@ -169,10 +171,13 @@ namespace compiler {
V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | \
kHole) \
V(NumberOrString, kNumber | kString) \
+ V(NumericOrString, kNumeric | kString) \
V(NumberOrUndefined, kNumber | kUndefined) \
+ V(NumberOrUndefinedOrNullOrBoolean, \
+ kNumber | kNullOrUndefined | kBoolean) \
V(PlainPrimitive, kNumberOrString | kBoolean | \
kNullOrUndefined) \
- V(Primitive, kSymbol | kPlainPrimitive) \
+ V(Primitive, kSymbol | kBigInt | kPlainPrimitive) \
V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
V(Proxy, kCallableProxy | kOtherProxy) \
V(ArrayOrOtherObject, kArray | kOtherObject) \
@@ -244,7 +249,7 @@ class V8_EXPORT_PRIVATE BitsetType {
return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
}
- static bool IsInhabited(bitset bits) { return bits != kNone; }
+ static bool IsNone(bitset bits) { return bits == kNone; }
static bool Is(bitset bits1, bitset bits2) {
return (bits1 | bits2) == bits2;
@@ -579,7 +584,7 @@ class V8_EXPORT_PRIVATE Type {
static Type* For(i::Handle<i::Map> map) { return For(*map); }
// Predicates.
- bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
+ bool IsNone() { return this == None(); }
bool Is(Type* that) { return this == that || this->SlowIs(that); }
bool Maybe(Type* that);
@@ -601,14 +606,14 @@ class V8_EXPORT_PRIVATE Type {
TupleType* AsTuple() { return TupleType::cast(this); }
// Minimum and maximum of a numeric type.
- // These functions do not distinguish between -0 and +0. If the type equals
- // kNaN, they return NaN; otherwise kNaN is ignored. Only call these
- // functions on subtypes of Number.
+ // These functions do not distinguish between -0 and +0. NaN is ignored.
+ // Only call them on subtypes of Number whose intersection with OrderedNumber
+ // is not empty.
double Min();
double Max();
// Extracts a range from the type: if the type is a range or a union
- // containing a range, that range is returned; otherwise, NULL is returned.
+ // containing a range, that range is returned; otherwise, nullptr is returned.
Type* GetRange();
static bool IsInteger(i::Object* x);
@@ -642,7 +647,6 @@ class V8_EXPORT_PRIVATE Type {
// Internal inspection.
bool IsKind(TypeBase::Kind kind) { return TypeBase::IsKind(this, kind); }
- bool IsNone() { return this == None(); }
bool IsAny() { return this == Any(); }
bool IsBitset() { return BitsetType::IsBitset(this); }
bool IsUnion() { return IsKind(TypeBase::kUnion); }
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 5869a0d491..e0c40df63b 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -32,14 +32,18 @@ namespace compiler {
class Verifier::Visitor {
public:
- Visitor(Zone* z, Typing typed, CheckInputs check_inputs)
- : zone(z), typing(typed), check_inputs(check_inputs) {}
+ Visitor(Zone* z, Typing typed, CheckInputs check_inputs, CodeType code_type)
+ : zone(z),
+ typing(typed),
+ check_inputs(check_inputs),
+ code_type(code_type) {}
- void Check(Node* node);
+ void Check(Node* node, const AllNodes& all);
Zone* zone;
Typing typing;
CheckInputs check_inputs;
+ CodeType code_type;
private:
void CheckNotTyped(Node* node) {
@@ -96,8 +100,7 @@ class Verifier::Visitor {
}
};
-
-void Verifier::Visitor::Check(Node* node) {
+void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
int value_count = node->op()->ValueInputCount();
int context_count = OperatorProperties::GetContextInputCount(node->op());
int frame_state_count =
@@ -112,6 +115,20 @@ void Verifier::Visitor::Check(Node* node) {
}
CHECK_EQ(input_count, node->InputCount());
+ // If this node has any effect outputs, make sure that it is
+ // consumed as an effect input somewhere else.
+ // TODO(mvstanton): support this kind of verification for WASM
+ // compiles, too.
+ if (code_type != kWasm && node->op()->EffectOutputCount() > 0) {
+ int effect_edges = 0;
+ for (Edge edge : node->use_edges()) {
+ if (all.IsLive(edge.from()) && NodeProperties::IsEffectEdge(edge)) {
+ effect_edges++;
+ }
+ }
+ DCHECK_GT(effect_edges, 0);
+ }
+
// Verify that frame state has been inserted for the nodes that need it.
for (int i = 0; i < frame_state_count; i++) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
@@ -218,13 +235,18 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kDead:
// Dead is never connected to the graph.
UNREACHABLE();
+ case IrOpcode::kDeadValue:
+ CheckTypeIs(node, Type::None());
+ break;
+ case IrOpcode::kUnreachable:
+ CheckNotTyped(node);
break;
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
int count_true = 0, count_false = 0;
for (const Node* use : node->uses()) {
- CHECK(use->opcode() == IrOpcode::kIfTrue ||
- use->opcode() == IrOpcode::kIfFalse);
+ CHECK(all.IsLive(use) && (use->opcode() == IrOpcode::kIfTrue ||
+ use->opcode() == IrOpcode::kIfFalse));
if (use->opcode() == IrOpcode::kIfTrue) ++count_true;
if (use->opcode() == IrOpcode::kIfFalse) ++count_false;
}
@@ -263,6 +285,7 @@ void Verifier::Visitor::Check(Node* node) {
// Switch uses are Case and Default.
int count_case = 0, count_default = 0;
for (const Node* use : node->uses()) {
+ CHECK(all.IsLive(use));
switch (use->opcode()) {
case IrOpcode::kIfValue: {
for (const Node* user : node->uses()) {
@@ -298,7 +321,22 @@ void Verifier::Visitor::Check(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
- case IrOpcode::kLoop:
+ case IrOpcode::kLoop: {
+ CHECK_EQ(control_count, input_count);
+ // Type is empty.
+ CheckNotTyped(node);
+ // All loops need to be connected to a {Terminate} node to ensure they
+ // stay connected to the graph end.
+ bool has_terminate = false;
+ for (const Node* use : node->uses()) {
+ if (all.IsLive(use) && use->opcode() == IrOpcode::kTerminate) {
+ has_terminate = true;
+ break;
+ }
+ }
+ CHECK(has_terminate);
+ break;
+ }
case IrOpcode::kMerge:
CHECK_EQ(control_count, input_count);
// Type is empty.
@@ -319,7 +357,9 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kThrow:
// Deoptimize, Return and Throw uses are End.
for (const Node* use : node->uses()) {
- CHECK_EQ(IrOpcode::kEnd, use->opcode());
+ if (all.IsLive(use)) {
+ CHECK_EQ(IrOpcode::kEnd, use->opcode());
+ }
}
// Type is empty.
CheckNotTyped(node);
@@ -333,7 +373,9 @@ void Verifier::Visitor::Check(Node* node) {
NodeProperties::GetControlInput(node)->opcode());
// Terminate uses are End.
for (const Node* use : node->uses()) {
- CHECK_EQ(IrOpcode::kEnd, use->opcode());
+ if (all.IsLive(use)) {
+ CHECK_EQ(IrOpcode::kEnd, use->opcode());
+ }
}
// Type is empty.
CheckNotTyped(node);
@@ -450,6 +492,18 @@ void Verifier::Visitor::Check(Node* node) {
Node* control = NodeProperties::GetControlInput(node, 0);
CHECK_EQ(effect_count, control->op()->ControlInputCount());
CHECK_EQ(input_count, 1 + effect_count);
+ // If the control input is a Merge, then make sure that at least one
+ // of it's usages is non-phi.
+ if (control->opcode() == IrOpcode::kMerge) {
+ bool non_phi_use_found = false;
+ for (Node* use : control->uses()) {
+ if (all.IsLive(use) && use->opcode() != IrOpcode::kEffectPhi &&
+ use->opcode() != IrOpcode::kPhi) {
+ non_phi_use_found = true;
+ }
+ }
+ CHECK(non_phi_use_found);
+ }
break;
}
case IrOpcode::kLoopExit: {
@@ -537,28 +591,28 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kJSAdd:
+ CheckTypeIs(node, Type::NumericOrString());
+ break;
case IrOpcode::kJSBitwiseOr:
case IrOpcode::kJSBitwiseXor:
case IrOpcode::kJSBitwiseAnd:
case IrOpcode::kJSShiftLeft:
case IrOpcode::kJSShiftRight:
case IrOpcode::kJSShiftRightLogical:
- // Type is 32 bit integral.
- CheckTypeIs(node, Type::Integral32());
- break;
- case IrOpcode::kJSAdd:
- // Type is Number or String.
- CheckTypeIs(node, Type::NumberOrString());
- break;
case IrOpcode::kJSSubtract:
case IrOpcode::kJSMultiply:
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
- // Type is Number.
- CheckTypeIs(node, Type::Number());
+ case IrOpcode::kJSExponentiate:
+ case IrOpcode::kJSBitwiseNot:
+ case IrOpcode::kJSDecrement:
+ case IrOpcode::kJSIncrement:
+ case IrOpcode::kJSNegate:
+ CheckTypeIs(node, Type::Numeric());
break;
- case IrOpcode::kJSToBoolean:
+ case IrOpcode::kToBoolean:
// Type is Boolean.
CheckTypeIs(node, Type::Boolean());
break;
@@ -577,6 +631,10 @@ void Verifier::Visitor::Check(Node* node) {
// Type is Number.
CheckTypeIs(node, Type::Number());
break;
+ case IrOpcode::kJSToNumeric:
+ // Type is Numeric.
+ CheckTypeIs(node, Type::Numeric());
+ break;
case IrOpcode::kJSToString:
// Type is String.
CheckTypeIs(node, Type::String());
@@ -598,6 +656,10 @@ void Verifier::Visitor::Check(Node* node) {
// Type is Array.
CheckTypeIs(node, Type::Array());
break;
+ case IrOpcode::kJSCreateBoundFunction:
+ // Type is BoundFunction.
+ CheckTypeIs(node, Type::BoundFunction());
+ break;
case IrOpcode::kJSCreateClosure:
// Type is Function.
CheckTypeIs(node, Type::Function());
@@ -671,11 +733,11 @@ void Verifier::Visitor::Check(Node* node) {
// Type is Boolean.
CheckTypeIs(node, Type::Boolean());
break;
- case IrOpcode::kJSClassOf:
+ case IrOpcode::kClassOf:
// Type is InternaliedString \/ Null.
CheckTypeIs(node, Type::InternalizedStringOrNull());
break;
- case IrOpcode::kJSTypeOf:
+ case IrOpcode::kTypeOf:
// Type is InternalizedString.
CheckTypeIs(node, Type::InternalizedString());
break;
@@ -698,8 +760,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSCreateFunctionContext:
case IrOpcode::kJSCreateCatchContext:
case IrOpcode::kJSCreateWithContext:
- case IrOpcode::kJSCreateBlockContext:
- case IrOpcode::kJSCreateScriptContext: {
+ case IrOpcode::kJSCreateBlockContext: {
// Type is Context, and operand is Internal.
Node* context = NodeProperties::GetContextInput(node);
// TODO(bmeurer): This should say CheckTypeIs, but we don't have type
@@ -713,7 +774,6 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSConstruct:
case IrOpcode::kJSConstructWithArrayLike:
case IrOpcode::kJSConstructWithSpread:
- case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
CheckTypeIs(node, Type::Receiver());
break;
@@ -958,6 +1018,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::String());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kStringToNumber:
+ // String -> Number
+ CheckValueInputIs(node, 0, Type::String());
+ CheckTypeIs(node, Type::Number());
+ break;
case IrOpcode::kStringCharAt:
// (String, Unsigned32) -> String
CheckValueInputIs(node, 0, Type::String());
@@ -1004,8 +1069,15 @@ void Verifier::Visitor::Check(Node* node) {
// (Any, Unique) -> Boolean
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kSameValue:
+ // (Any, Any) -> Boolean
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::Boolean());
+ break;
case IrOpcode::kObjectIsArrayBufferView:
+ case IrOpcode::kObjectIsBigInt:
case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsConstructor:
case IrOpcode::kObjectIsDetectableCallable:
@@ -1053,6 +1125,9 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
break;
+ case IrOpcode::kAllocateRaw:
+ // CheckValueInputIs(node, 0, Type::PlainNumber());
+ break;
case IrOpcode::kEnsureWritableFastElements:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Internal());
@@ -1192,6 +1267,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::Unsigned31());
CheckTypeIs(node, Type::Unsigned31());
break;
+ case IrOpcode::kMaskIndexWithBound:
+ CheckValueInputIs(node, 0, Type::Unsigned32());
+ CheckValueInputIs(node, 1, Type::Unsigned31());
+ CheckTypeIs(node, Type::Unsigned32());
+ break;
case IrOpcode::kCheckHeapObject:
CheckValueInputIs(node, 0, Type::Any());
break;
@@ -1233,6 +1313,14 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kCheckSymbol:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Symbol());
+ break;
+
+ case IrOpcode::kConvertReceiver:
+ // (Any, Any) -> Receiver
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::Receiver());
+ break;
case IrOpcode::kCheckedInt32Add:
case IrOpcode::kCheckedInt32Sub:
@@ -1266,6 +1354,17 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::NonInternal());
break;
+ case IrOpcode::kCheckEqualsInternalizedString:
+ CheckValueInputIs(node, 0, Type::InternalizedString());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckNotTyped(node);
+ break;
+ case IrOpcode::kCheckEqualsSymbol:
+ CheckValueInputIs(node, 0, Type::Symbol());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckNotTyped(node);
+ break;
+
case IrOpcode::kLoadFieldByIndex:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::SignedSmall());
@@ -1302,8 +1401,13 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kTransitionAndStoreElement:
CheckNotTyped(node);
break;
+ case IrOpcode::kTransitionAndStoreNumberElement:
+ CheckNotTyped(node);
+ break;
+ case IrOpcode::kTransitionAndStoreNonNumberElement:
+ CheckNotTyped(node);
+ break;
case IrOpcode::kStoreSignedSmallElement:
- CheckValueInputIs(node, 1, Type::SignedSmall());
CheckNotTyped(node);
break;
case IrOpcode::kStoreTypedElement:
@@ -1508,13 +1612,14 @@ void Verifier::Visitor::Check(Node* node) {
}
} // NOLINT(readability/fn_size)
-void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
+void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs,
+ CodeType code_type) {
CHECK_NOT_NULL(graph->start());
CHECK_NOT_NULL(graph->end());
Zone zone(graph->zone()->allocator(), ZONE_NAME);
- Visitor visitor(&zone, typing, check_inputs);
+ Visitor visitor(&zone, typing, check_inputs, code_type);
AllNodes all(&zone, graph);
- for (Node* node : all.reachable) visitor.Check(node);
+ for (Node* node : all.reachable) visitor.Check(node, all);
// Check the uniqueness of projections.
for (Node* proj : all.reachable) {
@@ -1796,12 +1901,14 @@ void Verifier::VerifyNode(Node* node) {
}
}
}
+
// Frame state input should be a frame state (or sentinel).
if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
Node* input = NodeProperties::GetFrameStateInput(node);
DCHECK(input->opcode() == IrOpcode::kFrameState ||
input->opcode() == IrOpcode::kStart ||
- input->opcode() == IrOpcode::kDead);
+ input->opcode() == IrOpcode::kDead ||
+ input->opcode() == IrOpcode::kDeadValue);
}
// Effect inputs should be effect-producing nodes (or sentinels).
for (int i = 0; i < node->op()->EffectInputCount(); i++) {
@@ -1826,7 +1933,9 @@ void Verifier::VerifyEdgeInputReplacement(const Edge& edge,
DCHECK(!NodeProperties::IsEffectEdge(edge) ||
replacement->op()->EffectOutputCount() > 0);
DCHECK(!NodeProperties::IsFrameStateEdge(edge) ||
- replacement->opcode() == IrOpcode::kFrameState);
+ replacement->opcode() == IrOpcode::kFrameState ||
+ replacement->opcode() == IrOpcode::kDead ||
+ replacement->opcode() == IrOpcode::kDeadValue);
}
#endif // DEBUG
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index db0f4538b8..2cf851cadd 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -23,9 +23,11 @@ class Verifier {
public:
enum Typing { TYPED, UNTYPED };
enum CheckInputs { kValuesOnly, kAll };
+ enum CodeType { kDefault, kWasm };
static void Run(Graph* graph, Typing typing = TYPED,
- CheckInputs check_inputs = kAll);
+ CheckInputs check_inputs = kAll,
+ CodeType code_type = kDefault);
#ifdef DEBUG
// Verifies consistency of node inputs and uses:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index bc731b2bb8..a04c7b3e5d 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -13,6 +13,7 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/diamond.h"
@@ -31,6 +32,7 @@
#include "src/factory.h"
#include "src/isolate-inl.h"
#include "src/log-inl.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -82,6 +84,7 @@ WasmGraphBuilder::WasmGraphBuilder(
function_table_sizes_(zone),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
+ untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
runtime_exception_support_(exception_support),
sig_(sig),
source_position_table_(source_position_table) {
@@ -118,10 +121,6 @@ Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
return terminate;
}
-unsigned WasmGraphBuilder::InputCount(Node* node) {
- return static_cast<unsigned>(node->InputCount());
-}
-
bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
return phi && IrOpcode::IsPhiOpcode(phi->opcode()) &&
NodeProperties::GetControlInput(phi) == merge;
@@ -2363,15 +2362,21 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
-
- // Add code object as constant.
- Handle<Code> code = index < env_->function_code.size()
- ? env_->function_code[index]
- : env_->default_function_code;
-
- DCHECK(!code.is_null());
- args[0] = HeapConstant(code);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
+ if (FLAG_wasm_jit_to_native) {
+ // Simply encode the index of the target.
+ Address code = reinterpret_cast<Address>(index);
+ args[0] = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<intptr_t>(code), RelocInfo::WASM_CALL);
+ } else {
+ // Add code object as constant.
+ Handle<Code> code = index < env_->function_code.size()
+ ? env_->function_code[index]
+ : env_->default_function_code;
+
+ DCHECK(!code.is_null());
+ args[0] = HeapConstant(code);
+ }
return BuildWasmCall(sig, args, rets, position);
}
@@ -2415,23 +2420,31 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Int32Constant(kPointerSizeLog2)),
Int32Constant(fixed_offset)),
*effect_, *control_);
- auto map = env_->signature_maps[table_index];
- Node* sig_match = graph()->NewNode(
- machine->WordEqual(), load_sig,
- jsgraph()->SmiConstant(static_cast<int>(map->FindOrInsert(sig))));
+ int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
+ CHECK_GE(sig_index, 0);
+ Node* sig_match =
+ graph()->NewNode(machine->WordEqual(), load_sig,
+ jsgraph()->SmiConstant(canonical_sig_num));
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
}
- // Load code object from the table.
- Node* load_code = graph()->NewNode(
+ // Load code object from the table. It is held by a Foreign.
+ Node* entry = graph()->NewNode(
machine->Load(MachineType::AnyTagged()), table,
graph()->NewNode(machine->Int32Add(),
graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2)),
Uint32Constant(fixed_offset)),
*effect_, *control_);
-
- args[0] = load_code;
+ if (FLAG_wasm_jit_to_native) {
+ Node* address = graph()->NewNode(
+ machine->Load(MachineType::Pointer()), entry,
+ Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
+ *effect_, *control_);
+ args[0] = address;
+ } else {
+ args[0] = entry;
+ }
return BuildWasmCall(sig, args, rets, position);
}
@@ -2765,7 +2778,7 @@ Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
-void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
+void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
Address wasm_context_address) {
const int wasm_count = static_cast<int>(sig_->parameter_count());
const int count =
@@ -2791,18 +2804,28 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
reinterpret_cast<uintptr_t>(wasm_context_address),
RelocInfo::WASM_CONTEXT_REFERENCE);
+ Node* wasm_code_node = nullptr;
+ if (!wasm_code.IsCodeObject()) {
+ const wasm::WasmCode* code = wasm_code.GetWasmCode();
+ Address instr_start =
+ code == nullptr ? nullptr : code->instructions().start();
+ wasm_code_node = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<intptr_t>(instr_start), RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ wasm_code_node = HeapConstant(wasm_code.GetCode());
+ }
if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the js_context of the calling javascript function
// (passed as a parameter), such that the generated code is js_context
// independent.
- BuildCallToRuntimeWithContextFromJS(Runtime::kWasmThrowTypeError,
- js_context, nullptr, 0);
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
+ nullptr, 0);
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
// the wasm function could not be re-imported into another wasm module.
int pos = 0;
- args[pos++] = HeapConstant(wasm_code);
+ args[pos++] = wasm_code_node;
args[pos++] = wasm_context_;
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -2817,7 +2840,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
}
int pos = 0;
- args[pos++] = HeapConstant(wasm_code);
+ args[pos++] = wasm_code_node;
args[pos++] = wasm_context_;
// Convert JS parameters to wasm numbers.
@@ -3016,7 +3039,7 @@ bool HasInt64ParamOrReturn(wasm::FunctionSig* sig) {
}
} // namespace
-void WasmGraphBuilder::BuildWasmToWasmWrapper(Handle<Code> target,
+void WasmGraphBuilder::BuildWasmToWasmWrapper(WasmCodeWrapper wasm_code,
Address new_context_address) {
int wasm_count = static_cast<int>(sig_->parameter_count());
int count = wasm_count + 4; // wasm_code, wasm_context, effect, and control.
@@ -3029,7 +3052,15 @@ void WasmGraphBuilder::BuildWasmToWasmWrapper(Handle<Code> target,
int pos = 0;
// Add the wasm code target.
- args[pos++] = jsgraph()->HeapConstant(target);
+ if (!wasm_code.IsCodeObject()) {
+ const wasm::WasmCode* code = wasm_code.GetWasmCode();
+ Address instr_start =
+ code == nullptr ? nullptr : code->instructions().start();
+ args[pos++] = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<intptr_t>(instr_start), RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ args[pos++] = jsgraph()->HeapConstant(wasm_code.GetCode());
+ }
// Add the wasm_context of the other instance.
args[pos++] = jsgraph()->IntPtrConstant(
reinterpret_cast<uintptr_t>(new_context_address));
@@ -3041,16 +3072,14 @@ void WasmGraphBuilder::BuildWasmToWasmWrapper(Handle<Code> target,
args[pos++] = *effect_;
args[pos++] = *control_;
- // Call the wasm code.
+ // Tail-call the wasm code.
CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
- Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
- *effect_ = call;
- Node* retval = sig_->return_count() == 0 ? jsgraph()->Int32Constant(0) : call;
- Return(retval);
+ Node* tail_call =
+ graph()->NewNode(jsgraph()->common()->TailCall(desc), count, args);
+ MergeControlToEnd(jsgraph(), tail_call);
}
-void WasmGraphBuilder::BuildWasmInterpreterEntry(
- uint32_t function_index, Handle<WasmInstanceObject> instance) {
+void WasmGraphBuilder::BuildWasmInterpreterEntry(uint32_t func_index) {
int param_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
@@ -3095,9 +3124,8 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
// like a Smi (lowest bit not set). In the runtime function however, don't
// call Smi::value on it, but just cast it to a byte pointer.
Node* parameters[] = {
- jsgraph()->HeapConstant(instance), // wasm instance
- jsgraph()->SmiConstant(function_index), // function index
- arg_buffer, // argument buffer
+ jsgraph()->SmiConstant(func_index), // function index
+ arg_buffer, // argument buffer
};
BuildCallToRuntime(Runtime::kWasmRunInterpreter, parameters,
arraysize(parameters));
@@ -3130,12 +3158,19 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
reinterpret_cast<uintptr_t>(wasm_context_address));
// Create parameter nodes (offset by 1 for the receiver parameter).
- Node* code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
+ Node* code_obj = nullptr;
+ if (FLAG_wasm_jit_to_native) {
+ Node* foreign_code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ code_obj = graph()->NewNode(
+ machine->Load(MachineType::Pointer()), foreign_code_obj,
+ Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
+ *effect_, *control_);
+ } else {
+ code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
+ }
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
- // Set the ThreadInWasm flag before we do the actual call.
- BuildModifyThreadInWasmFlag(true);
-
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
int arg_count = wasm_arg_count + 4; // code, wasm_context, control, effect
Node** args = Buffer(arg_count);
@@ -3165,9 +3200,6 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
graph()->NewNode(jsgraph()->common()->Call(desc), arg_count, args);
*effect_ = call;
- // Clear the ThreadInWasmFlag
- BuildModifyThreadInWasmFlag(false);
-
// Store the return value.
DCHECK_GE(1, sig_->return_count());
if (sig_->return_count() == 1) {
@@ -3193,53 +3225,167 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
}
}
-// This function is used by WasmFullDecoder to create a node that loads the
-// mem_start variable from the WasmContext. It should not be used directly by
-// the WasmGraphBuilder. The WasmGraphBuilder should directly use mem_start_,
-// which will always contain the correct node (stored in the SsaEnv).
-Node* WasmGraphBuilder::LoadMemStart() {
+void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
DCHECK_NOT_NULL(wasm_context_);
- Node* mem_buffer = graph()->NewNode(
+ DCHECK_NOT_NULL(*control_);
+ DCHECK_NOT_NULL(*effect_);
+
+ // Load the memory start.
+ Node* mem_start = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_start))),
*effect_, *control_);
- *effect_ = mem_buffer;
- return mem_buffer;
-}
+ *effect_ = mem_start;
+ context_cache->mem_start = mem_start;
-// This function is used by WasmFullDecoder to create a node that loads the
-// mem_size variable from the WasmContext. It should not be used directly by
-// the WasmGraphBuilder. The WasmGraphBuilder should directly use mem_size_,
-// which will always contain the correct node (stored in the SsaEnv).
-Node* WasmGraphBuilder::LoadMemSize() {
- // Load mem_size from the memory_context location at runtime.
- DCHECK_NOT_NULL(wasm_context_);
+ // Load the memory size.
Node* mem_size = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_size))),
*effect_, *control_);
*effect_ = mem_size;
- if (jsgraph()->machine()->Is64()) {
- mem_size = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
- mem_size);
+ context_cache->mem_size = mem_size;
+
+ if (untrusted_code_mitigations_) {
+ // Load the memory mask.
+ Node* mem_mask = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
+ jsgraph()->Int32Constant(
+ static_cast<int32_t>(offsetof(WasmContext, mem_mask))),
+ *effect_, *control_);
+ *effect_ = mem_mask;
+ context_cache->mem_mask = mem_mask;
+ } else {
+ // Explicitly set to nullptr to ensure a SEGV when we try to use it.
+ context_cache->mem_mask = nullptr;
+ }
+}
+
+void WasmGraphBuilder::PrepareContextCacheForLoop(
+ WasmContextCacheNodes* context_cache, Node* control) {
+#define INTRODUCE_PHI(field, rep) \
+ context_cache->field = Phi(rep, 1, &context_cache->field, control);
+
+ INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
+ INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
+ if (untrusted_code_mitigations_) {
+ INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
+ }
+
+#undef INTRODUCE_PHI
+}
+
+void WasmGraphBuilder::NewContextCacheMerge(WasmContextCacheNodes* to,
+ WasmContextCacheNodes* from,
+ Node* merge) {
+#define INTRODUCE_PHI(field, rep) \
+ if (to->field != from->field) { \
+ Node* vals[] = {to->field, from->field}; \
+ to->field = Phi(rep, 2, vals, merge); \
+ }
+
+ INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
+ INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
+ if (untrusted_code_mitigations_) {
+ INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
+ }
+
+#undef INTRODUCE_PHI
+}
+
+void WasmGraphBuilder::MergeContextCacheInto(WasmContextCacheNodes* to,
+ WasmContextCacheNodes* from,
+ Node* merge) {
+ to->mem_size = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
+ to->mem_size, from->mem_size);
+ to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
+ merge, to->mem_start, from->mem_start);
+ if (untrusted_code_mitigations_) {
+ to->mem_mask = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
+ to->mem_mask, from->mem_mask);
+ }
+}
+
+Node* WasmGraphBuilder::CreateOrMergeIntoPhi(wasm::ValueType type, Node* merge,
+ Node* tnode, Node* fnode) {
+ if (IsPhiWithMerge(tnode, merge)) {
+ AppendToPhi(tnode, fnode);
+ } else if (tnode != fnode) {
+ uint32_t count = merge->InputCount();
+ Node** vals = Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode;
+ vals[count - 1] = fnode;
+ return Phi(type, count, vals, merge);
+ }
+ return tnode;
+}
+
+Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode,
+ Node* fnode) {
+ if (IsPhiWithMerge(tnode, merge)) {
+ AppendToPhi(tnode, fnode);
+ } else if (tnode != fnode) {
+ uint32_t count = merge->InputCount();
+ Node** effects = Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) {
+ effects[j] = tnode;
+ }
+ effects[count - 1] = fnode;
+ tnode = EffectPhi(count, effects, merge);
+ }
+ return tnode;
+}
+
+void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
+ uint32_t offset, Node** base_node,
+ Node** offset_node) {
+ DCHECK_NOT_NULL(wasm_context_);
+ if (globals_start_ == nullptr) {
+ // Load globals_start from the WasmContext at runtime.
+ // TODO(wasm): we currently generate only one load of the {globals_start}
+ // start per graph, which means it can be placed anywhere by the scheduler.
+ // This is legal because the globals_start should never change.
+ // However, in some cases (e.g. if the WasmContext is already in a
+ // register), it is slightly more efficient to reload this value from the
+ // WasmContext. Since this depends on register allocation, it is not
+ // possible to express in the graph, and would essentially constitute a
+ // "mem2reg" optimization in TurboFan.
+ globals_start_ = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
+ jsgraph()->Int32Constant(
+ static_cast<int32_t>(offsetof(WasmContext, globals_start))),
+ graph()->start(), graph()->start());
+ }
+ *base_node = globals_start_;
+ *offset_node = jsgraph()->Int32Constant(offset);
+
+ if (mem_type == MachineType::Simd128() && offset != 0) {
+ // TODO(titzer,bbudge): code generation for SIMD memory offsets is broken.
+ *base_node =
+ graph()->NewNode(kPointerSize == 4 ? jsgraph()->machine()->Int32Add()
+ : jsgraph()->machine()->Int64Add(),
+ *base_node, *offset_node);
+ *offset_node = jsgraph()->Int32Constant(0);
}
- return mem_size;
}
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
- DCHECK_NOT_NULL(*mem_start_);
- if (offset == 0) return *mem_start_;
- return graph()->NewNode(jsgraph()->machine()->IntAdd(), *mem_start_,
+ DCHECK_NOT_NULL(context_cache_);
+ Node* mem_start = context_cache_->mem_start;
+ DCHECK_NOT_NULL(mem_start);
+ if (offset == 0) return mem_start;
+ return graph()->NewNode(jsgraph()->machine()->IntAdd(), mem_start,
jsgraph()->IntPtrConstant(offset));
}
Node* WasmGraphBuilder::CurrentMemoryPages() {
// CurrentMemoryPages can not be called from asm.js.
DCHECK_EQ(wasm::kWasmOrigin, env_->module->origin());
- DCHECK_NOT_NULL(*mem_size_);
- Node* mem_size = *mem_size_;
+ DCHECK_NOT_NULL(context_cache_);
+ Node* mem_size = context_cache_->mem_size;
+ DCHECK_NOT_NULL(mem_size);
if (jsgraph()->machine()->Is64()) {
mem_size = graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(),
mem_size);
@@ -3296,23 +3442,6 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
Node* js_context,
Node** parameters,
int parameter_count) {
- // We're leaving Wasm code, so clear the flag.
- *control_ = BuildModifyThreadInWasmFlag(false);
- // Since the thread-in-wasm flag is clear, it is as if we are calling from JS.
- Node* call = BuildCallToRuntimeWithContextFromJS(f, js_context, parameters,
- parameter_count);
-
- // Restore the thread-in-wasm flag, since we have returned to Wasm.
- *control_ = BuildModifyThreadInWasmFlag(true);
-
- return call;
-}
-
-// This version of BuildCallToRuntime does not clear and set the thread-in-wasm
-// flag.
-Node* WasmGraphBuilder::BuildCallToRuntimeWithContextFromJS(
- Runtime::FunctionId f, Node* js_context, Node* const* parameters,
- int parameter_count) {
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -3354,13 +3483,12 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
MachineType mem_type =
wasm::WasmOpcodes::MachineTypeFor(env_->module->globals[index].type);
- uintptr_t global_addr =
- env_->globals_start + env_->module->globals[index].offset;
- Node* addr = jsgraph()->RelocatableIntPtrConstant(
- global_addr, RelocInfo::WASM_GLOBAL_REFERENCE);
- const Operator* op = jsgraph()->machine()->Load(mem_type);
- Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
- *control_);
+ Node* base = nullptr;
+ Node* offset = nullptr;
+ GetGlobalBaseAndOffset(mem_type, env_->module->globals[index].offset, &base,
+ &offset);
+ Node* node = graph()->NewNode(jsgraph()->machine()->Load(mem_type), base,
+ offset, *effect_, *control_);
*effect_ = node;
return node;
}
@@ -3368,23 +3496,33 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
MachineType mem_type =
wasm::WasmOpcodes::MachineTypeFor(env_->module->globals[index].type);
- uintptr_t global_addr =
- env_->globals_start + env_->module->globals[index].offset;
- Node* addr = jsgraph()->RelocatableIntPtrConstant(
- global_addr, RelocInfo::WASM_GLOBAL_REFERENCE);
+ Node* base = nullptr;
+ Node* offset = nullptr;
+ GetGlobalBaseAndOffset(mem_type, env_->module->globals[index].offset, &base,
+ &offset);
const Operator* op = jsgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
- Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
- *effect_, *control_);
+ Node* node = graph()->NewNode(op, base, offset, val, *effect_, *control_);
*effect_ = node;
return node;
}
-void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
- uint32_t offset,
- wasm::WasmCodePosition position) {
- if (FLAG_wasm_no_bounds_checks) return;
- DCHECK_NOT_NULL(*mem_size_);
+Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
+ uint32_t offset,
+ wasm::WasmCodePosition position,
+ EnforceBoundsCheck enforce_check) {
+ if (FLAG_wasm_no_bounds_checks) return index;
+ DCHECK_NOT_NULL(context_cache_);
+ Node* mem_size = context_cache_->mem_size;
+ DCHECK_NOT_NULL(mem_size);
+
+ auto m = jsgraph()->machine();
+ if (trap_handler::UseTrapHandler() && enforce_check == kCanOmitBoundsCheck) {
+ // Simply zero out the 32-bits on 64-bit targets and let the trap handler
+ // do its job.
+ return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
+ : index;
+ }
uint32_t min_size = env_->module->initial_pages * wasm::WasmModule::kPageSize;
uint32_t max_size =
@@ -3398,7 +3536,7 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0), 0,
position);
- return;
+ return jsgraph()->IntPtrConstant(0);
}
uint32_t end_offset = offset + access_size;
@@ -3406,48 +3544,44 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the actual memory size, which
// is not known at compile time.
- Node* cond;
- if (jsgraph()->machine()->Is32()) {
- cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(),
- jsgraph()->Int32Constant(end_offset), *mem_size_);
- } else {
- cond = graph()->NewNode(
- jsgraph()->machine()->Uint64LessThanOrEqual(),
- jsgraph()->Int64Constant(static_cast<int64_t>(end_offset)),
- *mem_size_);
- }
+ Node* cond =
+ graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Int32Constant(end_offset), mem_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
// The end offset is within the bounds of the smallest memory, so only
// one check is required. Check to see if the index is also a constant.
- UintPtrMatcher m(index);
- if (m.HasValue()) {
- uint64_t index_val = m.Value();
- if ((index_val + offset + access_size) <= min_size) {
+ Uint32Matcher match(index);
+ if (match.HasValue()) {
+ uint32_t index_val = match.Value();
+ if (index_val <= min_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
- return;
+ return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
+ : index;
}
}
}
- Node* effective_size;
- if (jsgraph()->machine()->Is32()) {
- effective_size =
- graph()->NewNode(jsgraph()->machine()->Int32Sub(), *mem_size_,
- jsgraph()->Int32Constant(end_offset - 1));
- } else {
- effective_size = graph()->NewNode(
- jsgraph()->machine()->Int64Sub(), *mem_size_,
- jsgraph()->Int64Constant(static_cast<int64_t>(end_offset - 1)));
- }
-
- const Operator* less = jsgraph()->machine()->Is32()
- ? jsgraph()->machine()->Uint32LessThan()
- : jsgraph()->machine()->Uint64LessThan();
+ // Compute the effective size of the memory, which is the size of the memory
+ // minus the statically known offset, minus the byte size of the access minus
+ // one.
+ // This produces a positive number since {end_offset <= min_size <= mem_size}.
+ Node* effective_size =
+ graph()->NewNode(jsgraph()->machine()->Int32Sub(), mem_size,
+ jsgraph()->Int32Constant(end_offset - 1));
- Node* cond = graph()->NewNode(less, index, effective_size);
+ // Introduce the actual bounds check.
+ Node* cond = graph()->NewNode(m->Uint32LessThan(), index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
+
+ if (untrusted_code_mitigations_) {
+ // In the fallthrough case, condition the index with the memory mask.
+ Node* mem_mask = context_cache_->mem_mask;
+ DCHECK_NOT_NULL(mem_mask);
+ index = graph()->NewNode(m->Word32And(), index, mem_mask);
+ }
+ return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index) : index;
}
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
@@ -3500,14 +3634,9 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
wasm::WasmCodePosition position) {
Node* load;
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
- // Wasm semantics throw on OOB. Introduce explicit bounds check.
- if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
- BoundsCheckMem(memtype, index, offset, position);
- }
+ // Wasm semantics throw on OOB. Introduce explicit bounds check and
+ // conditioning when not using the trap handler.
+ index = BoundsCheckMem(memtype, index, offset, position, kCanOmitBoundsCheck);
if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
@@ -3521,7 +3650,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
- DCHECK(!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED);
+ DCHECK(!trap_handler::UseTrapHandler());
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
@@ -3559,14 +3688,7 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
wasm::ValueType type) {
Node* store;
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
- // Wasm semantics throw on OOB. Introduce explicit bounds check.
- if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
- BoundsCheckMem(memtype, index, offset, position);
- }
+ index = BoundsCheckMem(memtype, index, offset, position, kCanOmitBoundsCheck);
#if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndiannessStore(val, memtype, type);
@@ -3574,7 +3696,7 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedStoreSupported(memtype.representation())) {
- if (FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED) {
+ if (trap_handler::UseTrapHandler()) {
store = graph()->NewNode(
jsgraph()->machine()->ProtectedStore(memtype.representation()),
MemBuffer(offset), index, val, *effect_, *control_);
@@ -3587,7 +3709,7 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
- DCHECK(!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED);
+ DCHECK(!trap_handler::UseTrapHandler());
UnalignedStoreRepresentation rep(memtype.representation());
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
@@ -3604,37 +3726,106 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
return store;
}
+namespace {
+Node* GetAsmJsOOBValue(MachineRepresentation rep, JSGraph* jsgraph) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return jsgraph->Int32Constant(0);
+ case MachineRepresentation::kWord64:
+ return jsgraph->Int64Constant(0);
+ case MachineRepresentation::kFloat32:
+ return jsgraph->Float32Constant(std::numeric_limits<float>::quiet_NaN());
+ case MachineRepresentation::kFloat64:
+ return jsgraph->Float64Constant(std::numeric_limits<double>::quiet_NaN());
+ default:
+ UNREACHABLE();
+ }
+}
+} // namespace
+
Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
- // TODO(turbofan): fold bounds checks for constant asm.js loads.
- // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
- DCHECK_NOT_NULL(*mem_size_);
- DCHECK_NOT_NULL(*mem_start_);
+ DCHECK_NOT_NULL(context_cache_);
+ Node* mem_start = context_cache_->mem_start;
+ Node* mem_size = context_cache_->mem_size;
+ DCHECK_NOT_NULL(mem_start);
+ DCHECK_NOT_NULL(mem_size);
+
+ // Asm.js semantics are defined along the lines of typed arrays, hence OOB
+ // reads return {undefined} coerced to the result type (0 for integers, NaN
+ // for float and double).
+ // Note that we check against the memory size ignoring the size of the
+ // stored value, which is conservative if misaligned. Technically, asm.js
+ // should never have misaligned accesses.
+ Diamond bounds_check(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Uint32LessThan(), index, mem_size),
+ BranchHint::kTrue);
+ bounds_check.Chain(*control_);
+
+ if (untrusted_code_mitigations_) {
+ // Condition the index with the memory mask.
+ Node* mem_mask = context_cache_->mem_mask;
+ DCHECK_NOT_NULL(mem_mask);
+ index =
+ graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
+ }
+
if (jsgraph()->machine()->Is64()) {
index =
graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
}
- const Operator* op = jsgraph()->machine()->CheckedLoad(type);
- Node* load =
- graph()->NewNode(op, *mem_start_, index, *mem_size_, *effect_, *control_);
- *effect_ = load;
- return load;
+ Node* load = graph()->NewNode(jsgraph()->machine()->Load(type), mem_start,
+ index, *effect_, bounds_check.if_true);
+ Node* value_phi =
+ bounds_check.Phi(type.representation(), load,
+ GetAsmJsOOBValue(type.representation(), jsgraph()));
+ Node* effect_phi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), load,
+ *effect_, bounds_check.merge);
+ *effect_ = effect_phi;
+ *control_ = bounds_check.merge;
+ return value_phi;
}
Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
Node* val) {
- // TODO(turbofan): fold bounds checks for constant asm.js stores.
- // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
- DCHECK_NOT_NULL(*mem_size_);
- DCHECK_NOT_NULL(*mem_start_);
+ DCHECK_NOT_NULL(context_cache_);
+ Node* mem_start = context_cache_->mem_start;
+ Node* mem_size = context_cache_->mem_size;
+ DCHECK_NOT_NULL(mem_start);
+ DCHECK_NOT_NULL(mem_size);
+
+ // Asm.js semantics are to ignore OOB writes.
+ // Note that we check against the memory size ignoring the size of the
+ // stored value, which is conservative if misaligned. Technically, asm.js
+ // should never have misaligned accesses.
+ Diamond bounds_check(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Uint32LessThan(), index, mem_size),
+ BranchHint::kTrue);
+ bounds_check.Chain(*control_);
+
+ if (untrusted_code_mitigations_) {
+ // Condition the index with the memory mask.
+ Node* mem_mask = context_cache_->mem_mask;
+ DCHECK_NOT_NULL(mem_mask);
+ index =
+ graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
+ }
+
if (jsgraph()->machine()->Is64()) {
index =
graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
}
- const Operator* op =
- jsgraph()->machine()->CheckedStore(type.representation());
- Node* store = graph()->NewNode(op, *mem_start_, index, *mem_size_, val,
- *effect_, *control_);
- *effect_ = store;
+ const Operator* store_op = jsgraph()->machine()->Store(StoreRepresentation(
+ type.representation(), WriteBarrierKind::kNoWriteBarrier));
+ Node* store = graph()->NewNode(store_op, mem_start, index, val, *effect_,
+ bounds_check.if_true);
+ Node* effect_phi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), store,
+ *effect_, bounds_check.merge);
+ *effect_ = effect_phi;
+ *control_ = bounds_check.merge;
return val;
}
@@ -4111,47 +4302,50 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- BoundsCheckMem(MachineType::Type(), inputs[0], offset, position); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), inputs[0], inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
+ position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
-#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- BoundsCheckMem(MachineType::Type(), inputs[0], offset, position); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), inputs[0], inputs[1], inputs[2], *effect_, \
- *control_); \
- break; \
+#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
+ position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
+ break; \
}
ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP)
#undef BUILD_ATOMIC_TERNARY_OP
-#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
- case wasm::kExpr##Name: { \
- BoundsCheckMem(MachineType::Type(), inputs[0], offset, position); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), inputs[0], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
+ position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
+ MemBuffer(offset), index, *effect_, *control_); \
+ break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
- case wasm::kExpr##Name: { \
- BoundsCheckMem(MachineType::Type(), inputs[0], offset, position); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), inputs[0], inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
+ case wasm::kExpr##Name: { \
+ Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
+ position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
@@ -4196,7 +4390,7 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
} // namespace
Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
- Handle<Code> wasm_code, uint32_t index,
+ WasmCodeWrapper wasm_code, uint32_t index,
Address wasm_context_address) {
const wasm::WasmFunction* func = &module->functions[index];
@@ -4206,20 +4400,25 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
// TODO(titzer): compile JS to WASM wrappers without a {ModuleEnv}.
- ModuleEnv env = {module,
- std::vector<Address>(), // function_tables
- std::vector<Address>(), // signature_tables
- std::vector<wasm::SignatureMap*>(), // signature_maps
- std::vector<Handle<Code>>(), // function_code
- BUILTIN_CODE(isolate, Illegal), // default_function_code
- 0};
+ ModuleEnv env = {
+ module,
+ std::vector<Address>(), // function_tables
+ std::vector<Address>(), // signature_tables
+ // TODO(mtrofin): remove these 2 lines when we don't need
+ // FLAG_wasm_jit_to_native
+ std::vector<Handle<Code>>(), // function_code
+ BUILTIN_CODE(isolate, Illegal) // default_function_code
+ };
WasmGraphBuilder builder(&env, &zone, &jsgraph,
CEntryStub(isolate, 1).GetCode(), func->sig);
@@ -4250,8 +4449,9 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
Vector<const char> func_name = CStrVector("js-to-wasm");
#endif
- CompilationInfo info(func_name, isolate, &zone, Code::JS_TO_WASM_FUNCTION);
- Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
+ CompilationInfo info(func_name, &zone, Code::JS_TO_WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
@@ -4267,6 +4467,8 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
return code;
}
+namespace {
+
void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
#if !DEBUG
return;
@@ -4283,7 +4485,7 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
case RelocInfo::CODE_TARGET:
// this would be either one of the stubs or builtins, because
// we didn't link yet.
- target = reinterpret_cast<Object*>(it.rinfo()->target_address());
+ target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
break;
case RelocInfo::EMBEDDED_OBJECT:
target = it.rinfo()->target_object();
@@ -4294,10 +4496,19 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
CHECK_NOT_NULL(target);
bool is_immovable =
target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
- CHECK(is_immovable);
+ bool is_allowed_stub = false;
+ if (target->IsCode()) {
+ Code* code = Code::cast(target);
+ is_allowed_stub =
+ code->kind() == Code::STUB &&
+ CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI;
+ }
+ CHECK(is_immovable || is_allowed_stub);
}
}
+} // namespace
+
Handle<Code> CompileWasmToJSWrapper(
Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
uint32_t index, wasm::ModuleOrigin origin,
@@ -4308,7 +4519,10 @@ Handle<Code> CompileWasmToJSWrapper(
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
@@ -4357,9 +4571,9 @@ Handle<Code> CompileWasmToJSWrapper(
Vector<const char> func_name = CStrVector("wasm-to-js");
#endif
- CompilationInfo info(func_name, isolate, &zone, Code::WASM_TO_JS_FUNCTION);
+ CompilationInfo info(func_name, &zone, Code::WASM_TO_JS_FUNCTION);
Handle<Code> code = Pipeline::GenerateCodeForTesting(
- &info, incoming, &graph, nullptr, source_position_table);
+ &info, isolate, incoming, &graph, nullptr, source_position_table);
ValidateImportWrapperReferencesImmovables(code);
Handle<FixedArray> deopt_data =
isolate->factory()->NewFixedArray(2, TENURED);
@@ -4386,8 +4600,8 @@ Handle<Code> CompileWasmToJSWrapper(
return code;
}
-Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, Handle<Code> target,
- wasm::FunctionSig* sig, uint32_t index,
+Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
+ wasm::FunctionSig* sig,
Address new_wasm_context_address) {
//----------------------------------------------------------------------------
// Create the Graph
@@ -4395,7 +4609,10 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, Handle<Code> target,
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
@@ -4433,8 +4650,9 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, Handle<Code> target,
func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
}
- CompilationInfo info(func_name, isolate, &zone, Code::WASM_FUNCTION);
- Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
+ CompilationInfo info(func_name, &zone, Code::WASM_TO_WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
@@ -4446,7 +4664,7 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, Handle<Code> target,
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
- "wasm-to-wasm#%d", index);
+ "wasm-to-wasm");
}
return code;
@@ -4474,7 +4692,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
CEntryStub(isolate, 1).GetCode(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildWasmInterpreterEntry(func_index, instance);
+ builder.BuildWasmInterpreterEntry(func_index);
Handle<Code> code = Handle<Code>::null();
{
@@ -4497,9 +4715,9 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Vector<const char> func_name = CStrVector("wasm-interpreter-entry");
#endif
- CompilationInfo info(func_name, isolate, &zone,
- Code::WASM_INTERPRETER_ENTRY);
- code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+ CompilationInfo info(func_name, &zone, Code::WASM_INTERPRETER_ENTRY);
+ code = Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph,
+ nullptr);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
@@ -4513,11 +4731,13 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
}
}
- Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(1, TENURED);
- Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
- deopt_data->set(0, *weak_instance);
- code->set_deoptimization_data(*deopt_data);
-
+ if (!FLAG_wasm_jit_to_native) {
+ Handle<FixedArray> deopt_data =
+ isolate->factory()->NewFixedArray(1, TENURED);
+ Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
+ deopt_data->set(0, *weak_instance);
+ code->set_deoptimization_data(*deopt_data);
+ }
return code;
}
@@ -4526,7 +4746,10 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
@@ -4566,8 +4789,9 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
debug_name[name_len] = '\0';
Vector<const char> debug_name_vec(debug_name, name_len);
- CompilationInfo info(debug_name_vec, isolate, &zone, Code::C_WASM_ENTRY);
- Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
+ CompilationInfo info(debug_name_vec, &zone, Code::C_WASM_ENTRY);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
@@ -4585,7 +4809,6 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
size_t tables_size = env_->module->function_tables.size();
DCHECK_EQ(tables_size, env_->function_tables.size());
DCHECK_EQ(tables_size, env_->signature_tables.size());
- DCHECK_EQ(tables_size, env_->signature_maps.size());
}
#endif
@@ -4596,17 +4819,17 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
// Create a TF graph during decoding.
SourcePositionTable* source_position_table =
- new (jsgraph_->zone()) SourcePositionTable(jsgraph_->graph());
- WasmGraphBuilder builder(env_, jsgraph_->zone(), jsgraph_, centry_stub_,
- func_body_.sig, source_position_table,
+ new (tf_.jsgraph_->zone()) SourcePositionTable(tf_.jsgraph_->graph());
+ WasmGraphBuilder builder(env_, tf_.jsgraph_->zone(), tf_.jsgraph_,
+ centry_stub_, func_body_.sig, source_position_table,
runtime_exception_support_);
- graph_construction_result_ =
+ tf_.graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
- if (graph_construction_result_.failed()) {
+ if (tf_.graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
- os << "Compilation failed: " << graph_construction_result_.error_msg()
+ os << "Compilation failed: " << tf_.graph_construction_result_.error_msg()
<< std::endl;
}
return nullptr;
@@ -4616,7 +4839,7 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
if (builder.has_simd() &&
(!CpuFeatures::SupportsWasmSimd128() || lower_simd_)) {
- SimdScalarLowering(jsgraph_, func_body_.sig).LowerGraph();
+ SimdScalarLowering(tf_.jsgraph_, func_body_.sig).LowerGraph();
}
if (func_index_ >= FLAG_trace_wasm_ast_start &&
@@ -4647,13 +4870,21 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
memcpy(index_name, name_vector.start(), name_len);
return Vector<const char>(index_name, name_len);
}
+
} // namespace
+// static
+WasmCompilationUnit::CompilationMode
+WasmCompilationUnit::GetDefaultCompilationMode() {
+ return FLAG_liftoff ? WasmCompilationUnit::CompilationMode::kLiftoff
+ : WasmCompilationUnit::CompilationMode::kTurbofan;
+}
+
WasmCompilationUnit::WasmCompilationUnit(
- Isolate* isolate, ModuleEnv* env, wasm::FunctionBody body,
- wasm::WasmName name, int index, Handle<Code> centry_stub,
- Counters* counters, RuntimeExceptionSupport exception_support,
- bool lower_simd)
+ Isolate* isolate, ModuleEnv* env, wasm::NativeModule* native_module,
+ wasm::FunctionBody body, wasm::WasmName name, int index,
+ Handle<Code> centry_stub, CompilationMode mode, Counters* counters,
+ RuntimeExceptionSupport exception_support, bool lower_simd)
: isolate_(isolate),
env_(env),
func_body_(body),
@@ -4662,9 +4893,38 @@ WasmCompilationUnit::WasmCompilationUnit(
centry_stub_(centry_stub),
func_index_(index),
runtime_exception_support_(exception_support),
- lower_simd_(lower_simd) {}
+ native_module_(native_module),
+ lower_simd_(lower_simd),
+ protected_instructions_(
+ new std::vector<trap_handler::ProtectedInstructionData>()),
+ mode_(mode) {
+ switch (mode_) {
+ case WasmCompilationUnit::CompilationMode::kLiftoff:
+ new (&liftoff_) LiftoffData(isolate);
+ break;
+ case WasmCompilationUnit::CompilationMode::kTurbofan:
+ new (&tf_) TurbofanData();
+ break;
+ }
+}
+
+WasmCompilationUnit::~WasmCompilationUnit() {
+ switch (mode_) {
+ case WasmCompilationUnit::CompilationMode::kLiftoff:
+ liftoff_.~LiftoffData();
+ break;
+ case WasmCompilationUnit::CompilationMode::kTurbofan:
+ tf_.~TurbofanData();
+ break;
+ }
+}
void WasmCompilationUnit::ExecuteCompilation() {
+ auto size_histogram = env_->module->is_wasm()
+ ? counters()->wasm_wasm_function_size_bytes()
+ : counters()->wasm_asm_function_size_bytes();
+ size_histogram->AddSample(
+ static_cast<int>(func_body_.end - func_body_.start));
auto timed_histogram = env_->module->is_wasm()
? counters()->wasm_compile_wasm_function_time()
: counters()->wasm_compile_asm_function_time();
@@ -4679,13 +4939,28 @@ void WasmCompilationUnit::ExecuteCompilation() {
}
}
+ switch (mode_) {
+ case WasmCompilationUnit::CompilationMode::kLiftoff:
+ if (ExecuteLiftoffCompilation()) break;
+ // Otherwise, fall back to turbofan.
+ liftoff_.~LiftoffData();
+ mode_ = WasmCompilationUnit::CompilationMode::kTurbofan;
+ new (&tf_) TurbofanData();
+ // fall-through
+ case WasmCompilationUnit::CompilationMode::kTurbofan:
+ ExecuteTurbofanCompilation();
+ break;
+ }
+}
+
+void WasmCompilationUnit::ExecuteTurbofanCompilation() {
double decode_ms = 0;
size_t node_count = 0;
// Scope for the {graph_zone}.
{
Zone graph_zone(isolate_->allocator(), ZONE_NAME);
- jsgraph_ = new (&graph_zone) JSGraph(
+ tf_.jsgraph_ = new (&graph_zone) JSGraph(
isolate_, new (&graph_zone) Graph(&graph_zone),
new (&graph_zone) CommonOperatorBuilder(&graph_zone), nullptr, nullptr,
new (&graph_zone) MachineOperatorBuilder(
@@ -4695,39 +4970,37 @@ void WasmCompilationUnit::ExecuteCompilation() {
SourcePositionTable* source_positions =
BuildGraphForWasmFunction(&decode_ms);
- if (graph_construction_result_.failed()) {
+ if (tf_.graph_construction_result_.failed()) {
ok_ = false;
return;
}
base::ElapsedTimer pipeline_timer;
if (FLAG_trace_wasm_decode_time) {
- node_count = jsgraph_->graph()->NodeCount();
+ node_count = tf_.jsgraph_->graph()->NodeCount();
pipeline_timer.Start();
}
- compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
+ tf_.compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
// Run the compiler pipeline to generate machine code.
CallDescriptor* descriptor =
- GetWasmCallDescriptor(compilation_zone_.get(), func_body_.sig);
- if (jsgraph_->machine()->Is32()) {
+ GetWasmCallDescriptor(tf_.compilation_zone_.get(), func_body_.sig);
+ if (tf_.jsgraph_->machine()->Is32()) {
descriptor =
- GetI32WasmCallDescriptor(compilation_zone_.get(), descriptor);
+ GetI32WasmCallDescriptor(tf_.compilation_zone_.get(), descriptor);
}
- info_.reset(new CompilationInfo(
- GetDebugName(compilation_zone_.get(), func_name_, func_index_),
- isolate_, compilation_zone_.get(), Code::WASM_FUNCTION));
- ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions(
- compilation_zone_.get());
-
- job_.reset(Pipeline::NewWasmCompilationJob(
- info_.get(), jsgraph_, descriptor, source_positions,
- &protected_instructions, env_->module->origin()));
- ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
+ tf_.info_.reset(new CompilationInfo(
+ GetDebugName(tf_.compilation_zone_.get(), func_name_, func_index_),
+ tf_.compilation_zone_.get(), Code::WASM_FUNCTION));
+
+ tf_.job_.reset(Pipeline::NewWasmCompilationJob(
+ tf_.info_.get(), isolate_, tf_.jsgraph_, descriptor, source_positions,
+ protected_instructions_.get(), env_->module->origin()));
+ ok_ = tf_.job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
- static_cast<int>(jsgraph_->graph()->zone()->allocation_size()));
+ static_cast<int>(tf_.jsgraph_->graph()->zone()->allocation_size()));
if (FLAG_trace_wasm_decode_time) {
double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
@@ -4738,19 +5011,40 @@ void WasmCompilationUnit::ExecuteCompilation() {
node_count, pipeline_ms);
}
// The graph zone is about to get out of scope. Avoid invalid references.
- jsgraph_ = nullptr;
+ tf_.jsgraph_ = nullptr;
}
// Record the memory cost this unit places on the system until
// it is finalized.
- size_t cost = job_->AllocatedMemory();
- set_memory_cost(cost);
+ memory_cost_ = tf_.job_->AllocatedMemory();
+}
+
+// WasmCompilationUnit::ExecuteLiftoffCompilation() is defined in
+// liftoff-compiler.cc.
+
+WasmCodeWrapper WasmCompilationUnit::FinishCompilation(
+ wasm::ErrorThrower* thrower) {
+ WasmCodeWrapper ret;
+ switch (mode_) {
+ case WasmCompilationUnit::CompilationMode::kLiftoff:
+ ret = FinishLiftoffCompilation(thrower);
+ break;
+ case WasmCompilationUnit::CompilationMode::kTurbofan:
+ ret = FinishTurbofanCompilation(thrower);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (!ret.IsCodeObject() && ret.is_null()) {
+ thrower->RuntimeError("Error finalizing code.");
+ }
+ return ret;
}
-MaybeHandle<Code> WasmCompilationUnit::FinishCompilation(
+WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
wasm::ErrorThrower* thrower) {
if (!ok_) {
- if (graph_construction_result_.failed()) {
+ if (tf_.graph_construction_result_.failed()) {
// Add the function as another context for the exception.
EmbeddedVector<char, 128> message;
if (func_name_.start() == nullptr) {
@@ -4760,7 +5054,7 @@ MaybeHandle<Code> WasmCompilationUnit::FinishCompilation(
SNPrintF(message, "Compiling wasm function #%d:%.*s failed",
func_index_, trunc_name.length(), trunc_name.start());
}
- thrower->CompileFailed(message.start(), graph_construction_result_);
+ thrower->CompileFailed(message.start(), tf_.graph_construction_result_);
}
return {};
@@ -4769,41 +5063,137 @@ MaybeHandle<Code> WasmCompilationUnit::FinishCompilation(
if (FLAG_trace_wasm_decode_time) {
codegen_timer.Start();
}
- if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
- return Handle<Code>::null();
- }
- Handle<Code> code = info_->code();
- DCHECK(!code.is_null());
- if (must_record_function_compilation(isolate_)) {
- wasm::TruncatedUserString<> trunc_name(func_name_);
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
- "wasm_function#%d:%.*s", func_index_,
- trunc_name.length(), trunc_name.start());
+ if (tf_.job_->FinalizeJob(isolate_) != CompilationJob::SUCCEEDED) {
+ return {};
}
+ if (!FLAG_wasm_jit_to_native) {
+ Handle<Code> code = tf_.info_->code();
+ DCHECK(!code.is_null());
- if (FLAG_trace_wasm_decode_time) {
- double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
- static_cast<unsigned>(func_body_.end - func_body_.start),
- codegen_ms);
- }
+ if (FLAG_trace_wasm_decode_time) {
+ double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
+ PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
+ static_cast<unsigned>(func_body_.end - func_body_.start),
+ codegen_ms);
+ }
- return code;
+ PackProtectedInstructions(code);
+ return WasmCodeWrapper(code);
+ } else {
+ // TODO(mtrofin): when we crystalize a design in lieu of WasmCodeDesc, that
+ // works for both wasm and non-wasm, we can simplify AddCode to just take
+ // that as a parameter.
+ const CodeDesc& desc =
+ tf_.job_->compilation_info()->wasm_code_desc()->code_desc;
+ wasm::WasmCode* code = native_module_->AddCode(
+ desc, tf_.job_->compilation_info()->wasm_code_desc()->frame_slot_count,
+ func_index_,
+ tf_.job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
+ protected_instructions_);
+ if (!code) {
+ return WasmCodeWrapper(code);
+ }
+ // TODO(mtrofin): add CodeEventListener call - see the non-native case.
+ if (FLAG_trace_wasm_decode_time) {
+ double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
+ PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
+ static_cast<unsigned>(func_body_.end - func_body_.start),
+ codegen_ms);
+ }
+
+ Handle<ByteArray> source_positions =
+ tf_.job_->compilation_info()->wasm_code_desc()->source_positions_table;
+ MaybeHandle<HandlerTable> handler_table =
+ tf_.job_->compilation_info()->wasm_code_desc()->handler_table;
+
+ int function_index_as_int = static_cast<int>(func_index_);
+ native_module_->compiled_module()->source_positions()->set(
+ function_index_as_int, *source_positions);
+ if (!handler_table.is_null()) {
+ native_module_->compiled_module()->handler_table()->set(
+ function_index_as_int, *handler_table.ToHandleChecked());
+ }
+ // TODO(mtrofin): this should probably move up in the common caller,
+ // once liftoff has source positions. Until then, we'd need to handle
+ // undefined values, which is complicating the code.
+ LOG_CODE_EVENT(isolate_,
+ CodeLinePosInfoRecordEvent(code->instructions().start(),
+ *source_positions));
+ return WasmCodeWrapper(code);
+ }
+}
+
+// TODO(mtrofin): remove when FLAG_wasm_jit_to_native is not needed
+void WasmCompilationUnit::PackProtectedInstructions(Handle<Code> code) const {
+ if (protected_instructions_->empty()) return;
+ DCHECK_LT(protected_instructions_->size(), std::numeric_limits<int>::max());
+ const int num_instructions =
+ static_cast<int>(protected_instructions_->size());
+ Handle<FixedArray> fn_protected = isolate_->factory()->NewFixedArray(
+ num_instructions * Code::kTrapDataSize, TENURED);
+ for (int i = 0; i < num_instructions; ++i) {
+ const trap_handler::ProtectedInstructionData& instruction =
+ protected_instructions_->at(i);
+ fn_protected->set(Code::kTrapDataSize * i + Code::kTrapCodeOffset,
+ Smi::FromInt(instruction.instr_offset));
+ fn_protected->set(Code::kTrapDataSize * i + Code::kTrapLandingOffset,
+ Smi::FromInt(instruction.landing_offset));
+ }
+ code->set_protected_instructions(*fn_protected);
+}
+
+WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
+ wasm::ErrorThrower* thrower) {
+ CodeDesc desc;
+ liftoff_.asm_.GetCode(isolate_, &desc);
+ WasmCodeWrapper ret;
+ if (!FLAG_wasm_jit_to_native) {
+ Handle<Code> code;
+ code = isolate_->factory()->NewCode(desc, Code::WASM_FUNCTION, code);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code || FLAG_print_wasm_code) {
+ // TODO(wasm): Use proper log files, here and elsewhere.
+ OFStream os(stdout);
+ os << "--- Wasm liftoff code ---\n";
+ EmbeddedVector<char, 32> func_name;
+ func_name.Truncate(SNPrintF(func_name, "wasm#%d-liftoff", func_index_));
+ code->Disassemble(func_name.start(), os);
+ os << "--- End code ---\n";
+ }
+#endif
+ if (isolate_->logger()->is_logging_code_events() ||
+ isolate_->is_profiling()) {
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
+ "wasm#%d-liftoff", func_index_);
+ }
+
+ PackProtectedInstructions(code);
+ return WasmCodeWrapper(code);
+ } else {
+ // TODO(mtrofin): figure a way to raise events; also, disassembly.
+ // Consider lifting them both to FinishCompilation.
+ return WasmCodeWrapper(native_module_->AddCode(
+ desc, liftoff_.asm_.GetTotalFrameSlotCount(), func_index_,
+ liftoff_.asm_.GetSafepointTableOffset(), protected_instructions_,
+ true));
+ }
}
// static
-MaybeHandle<Code> WasmCompilationUnit::CompileWasmFunction(
- wasm::ErrorThrower* thrower, Isolate* isolate,
- const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
- const wasm::WasmFunction* function) {
+WasmCodeWrapper WasmCompilationUnit::CompileWasmFunction(
+ wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
+ Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
+ const wasm::WasmFunction* function, CompilationMode mode) {
wasm::FunctionBody function_body{
function->sig, function->code.offset(),
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
- WasmCompilationUnit unit(
- isolate, env, function_body, wire_bytes.GetNameOrNull(function),
- function->func_index, CEntryStub(isolate, 1).GetCode());
+
+ WasmCompilationUnit unit(isolate, env, native_module, function_body,
+ wire_bytes.GetNameOrNull(function),
+ function->func_index,
+ CEntryStub(isolate, 1).GetCode(), mode);
unit.ExecuteCompilation();
return unit.FinishCompilation(thrower);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 24c4a6a9f8..146f3044ca 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -12,6 +12,7 @@
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -36,6 +37,8 @@ class SignatureMap;
// Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
typedef compiler::Node TFNode;
typedef compiler::JSGraph TFGraph;
+class NativeModule;
+class WasmCode;
} // namespace wasm
namespace compiler {
@@ -45,8 +48,7 @@ namespace compiler {
// which the compiled code should be specialized, including which code to call
// for direct calls {function_code}, which tables to use for indirect calls
// {function_tables}, memory start address and size {mem_start, mem_size},
-// globals start address {globals_start}, as well as signature maps
-// {signature_maps} and the module itself {module}.
+// as well as signature maps {signature_maps} and the module itself {module}.
// ModuleEnvs are shareable across multiple compilations.
struct ModuleEnv {
// A pointer to the decoded module's static representation.
@@ -60,18 +62,13 @@ struct ModuleEnv {
// (the same length as module.function_tables)
// We use the address to a global handle to the FixedArray.
const std::vector<Address> signature_tables;
- // Signature maps canonicalize {FunctionSig*} to indexes. New entries can be
- // added to a signature map during graph building.
- // Normally, these signature maps correspond to the signature maps in the
- // function tables stored in the {module}.
- const std::vector<wasm::SignatureMap*> signature_maps;
- // Contains the code objects to call for each indirect call.
+
+ // TODO(mtrofin): remove these 2 once we don't need FLAG_wasm_jit_to_native
+ // Contains the code objects to call for each direct call.
// (the same length as module.functions)
const std::vector<Handle<Code>> function_code;
// If the default code is not a null handle, always use it for direct calls.
const Handle<Code> default_function_code;
- // Address of the start of the globals region.
- const uintptr_t globals_start;
};
enum RuntimeExceptionSupport : bool {
@@ -81,55 +78,89 @@ enum RuntimeExceptionSupport : bool {
class WasmCompilationUnit final {
public:
+ enum class CompilationMode : uint8_t { kLiftoff, kTurbofan };
+ static CompilationMode GetDefaultCompilationMode();
+
// If constructing from a background thread, pass in a Counters*, and ensure
// that the Counters live at least as long as this compilation unit (which
// typically means to hold a std::shared_ptr<Counters>).
// If no such pointer is passed, Isolate::counters() will be called. This is
// only allowed to happen on the foreground thread.
- WasmCompilationUnit(Isolate*, ModuleEnv*, wasm::FunctionBody, wasm::WasmName,
- int index, Handle<Code> centry_stub, Counters* = nullptr,
+ WasmCompilationUnit(Isolate*, ModuleEnv*, wasm::NativeModule*,
+ wasm::FunctionBody, wasm::WasmName, int index,
+ Handle<Code> centry_stub,
+ CompilationMode = GetDefaultCompilationMode(),
+ Counters* = nullptr,
RuntimeExceptionSupport = kRuntimeExceptionSupport,
bool lower_simd = false);
+ ~WasmCompilationUnit();
+
int func_index() const { return func_index_; }
void ExecuteCompilation();
- MaybeHandle<Code> FinishCompilation(wasm::ErrorThrower* thrower);
+ WasmCodeWrapper FinishCompilation(wasm::ErrorThrower* thrower);
- static MaybeHandle<Code> CompileWasmFunction(
- wasm::ErrorThrower* thrower, Isolate* isolate,
- const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
- const wasm::WasmFunction* function);
+ static WasmCodeWrapper CompileWasmFunction(
+ wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
+ Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
+ const wasm::WasmFunction* function,
+ CompilationMode = GetDefaultCompilationMode());
- void set_memory_cost(size_t memory_cost) { memory_cost_ = memory_cost; }
size_t memory_cost() const { return memory_cost_; }
private:
+ void PackProtectedInstructions(Handle<Code> code) const;
+
+ struct LiftoffData {
+ wasm::LiftoffAssembler asm_;
+ explicit LiftoffData(Isolate* isolate) : asm_(isolate) {}
+ };
+ struct TurbofanData {
+ // The graph zone is deallocated at the end of ExecuteCompilation by virtue
+ // of it being zone allocated.
+ JSGraph* jsgraph_ = nullptr;
+ // The compilation_zone_, info_, and job_ fields need to survive past
+ // ExecuteCompilation, onto FinishCompilation (which happens on the main
+ // thread).
+ std::unique_ptr<Zone> compilation_zone_;
+ std::unique_ptr<CompilationInfo> info_;
+ std::unique_ptr<CompilationJob> job_;
+ wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
+ };
+
+ // Turbofan.
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
- Counters* counters() { return counters_; }
+ void ExecuteTurbofanCompilation();
+ WasmCodeWrapper FinishTurbofanCompilation(wasm::ErrorThrower*);
+
+ // Liftoff.
+ bool ExecuteLiftoffCompilation();
+ WasmCodeWrapper FinishLiftoffCompilation(wasm::ErrorThrower*);
Isolate* isolate_;
ModuleEnv* env_;
wasm::FunctionBody func_body_;
wasm::WasmName func_name_;
Counters* counters_;
- // The graph zone is deallocated at the end of ExecuteCompilation by virtue of
- // it being zone allocated.
- JSGraph* jsgraph_ = nullptr;
- // the compilation_zone_, info_, and job_ fields need to survive past
- // ExecuteCompilation, onto FinishCompilation (which happens on the main
- // thread).
- std::unique_ptr<Zone> compilation_zone_;
- std::unique_ptr<CompilationInfo> info_;
- std::unique_ptr<CompilationJob> job_;
Handle<Code> centry_stub_;
int func_index_;
- wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
// See WasmGraphBuilder::runtime_exception_support_.
RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
size_t memory_cost_ = 0;
+ wasm::NativeModule* native_module_;
bool lower_simd_;
+ std::shared_ptr<std::vector<trap_handler::ProtectedInstructionData>>
+ protected_instructions_;
+ CompilationMode mode_;
+ // {liftoff_} is valid if mode_ == kLiftoff, tf_ if mode_ == kTurbofan.
+ union {
+ LiftoffData liftoff_;
+ TurbofanData tf_;
+ };
+
+ Counters* counters() { return counters_; }
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
@@ -144,14 +175,14 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
Handle<FixedArray> global_js_imports_table);
// Wraps a given wasm code object, producing a code object.
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
- Handle<Code> wasm_code, uint32_t index,
- Address wasm_context_address);
+V8_EXPORT_PRIVATE Handle<Code> CompileJSToWasmWrapper(
+ Isolate* isolate, wasm::WasmModule* module, WasmCodeWrapper wasm_code,
+ uint32_t index, Address wasm_context_address);
// Wraps a wasm function, producing a code object that can be called from other
// wasm instances (the WasmContext address must be changed).
-Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, Handle<Code> target,
- wasm::FunctionSig* sig, uint32_t index,
+Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
+ wasm::FunctionSig* sig,
Address new_wasm_context_address);
// Compiles a stub that redirects a call to a wasm function to the wasm
@@ -173,14 +204,28 @@ enum CWasmEntryParameters {
Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
Address wasm_context_address);
+// Values from the {WasmContext} are cached between WASM-level function calls.
+// This struct allows the SSA environment handling this cache to be defined
+// and manipulated in wasm-compiler.{h,cc} instead of inside the WASM decoder.
+// (Note that currently, the globals base is immutable in a context, so not
+// cached here.)
+struct WasmContextCacheNodes {
+ Node* mem_start;
+ Node* mem_size;
+ Node* mem_mask;
+};
+
// Abstracts details of building TurboFan graph nodes for wasm to separate
// the wasm decoder from the internal details of TurboFan.
typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
- WasmGraphBuilder(ModuleEnv*, Zone*, JSGraph*, Handle<Code> centry_stub_,
- wasm::FunctionSig*, compiler::SourcePositionTable* = nullptr,
- RuntimeExceptionSupport = kRuntimeExceptionSupport);
+ enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
+
+ WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
+ Handle<Code> centry_stub, wasm::FunctionSig* sig,
+ compiler::SourcePositionTable* spt = nullptr,
+ RuntimeExceptionSupport res = kRuntimeExceptionSupport);
Node** Buffer(size_t count) {
if (count > cur_bufsize_) {
@@ -202,6 +247,9 @@ class WasmGraphBuilder {
Node* Terminate(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control);
+ Node* CreateOrMergeIntoPhi(wasm::ValueType type, Node* merge, Node* tnode,
+ Node* fnode);
+ Node* CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, Node* fnode);
Node* EffectPhi(unsigned count, Node** effects, Node* control);
Node* NumberConstant(int32_t value);
Node* Uint32Constant(uint32_t value);
@@ -222,7 +270,6 @@ class WasmGraphBuilder {
Node* ConvertExceptionTagToRuntimeId(uint32_t tag);
Node* GetExceptionRuntimeId();
Node** GetExceptionValues(const wasm::WasmException* except_decl);
- unsigned InputCount(Node* node);
bool IsPhiWithMerge(Node* phi, Node* merge);
bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
void AppendToMerge(Node* merge, Node* from);
@@ -270,7 +317,7 @@ class WasmGraphBuilder {
Node* CallIndirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position);
- void BuildJSToWasmWrapper(Handle<Code> wasm_code,
+ void BuildJSToWasmWrapper(WasmCodeWrapper wasm_code_start,
Address wasm_context_address);
enum ImportDataType {
kFunction = 1,
@@ -283,10 +330,9 @@ class WasmGraphBuilder {
bool BuildWasmToJSWrapper(Handle<JSReceiver> target,
Handle<FixedArray> global_js_imports_table,
int index);
- void BuildWasmToWasmWrapper(Handle<Code> target,
+ void BuildWasmToWasmWrapper(WasmCodeWrapper wasm_code_start,
Address new_wasm_context_address);
- void BuildWasmInterpreterEntry(uint32_t func_index,
- Handle<WasmInstanceObject> instance);
+ void BuildWasmInterpreterEntry(uint32_t func_index);
void BuildCWasmEntry(Address wasm_context_address);
Node* ToJS(Node* node, wasm::ValueType type);
@@ -321,12 +367,21 @@ class WasmGraphBuilder {
void set_effect_ptr(Node** effect) { this->effect_ = effect; }
- Node* LoadMemSize();
- Node* LoadMemStart();
+ void GetGlobalBaseAndOffset(MachineType mem_type, uint32_t offset,
+ Node** base_node, Node** offset_node);
- void set_mem_size(Node** mem_size) { this->mem_size_ = mem_size; }
+ // Utilities to manipulate sets of context cache nodes.
+ void InitContextCache(WasmContextCacheNodes* context_cache);
+ void PrepareContextCacheForLoop(WasmContextCacheNodes* context_cache,
+ Node* control);
+ void NewContextCacheMerge(WasmContextCacheNodes* to,
+ WasmContextCacheNodes* from, Node* merge);
+ void MergeContextCacheInto(WasmContextCacheNodes* to,
+ WasmContextCacheNodes* from, Node* merge);
- void set_mem_start(Node** mem_start) { this->mem_start_ = mem_start; }
+ void set_context_cache(WasmContextCacheNodes* context_cache) {
+ this->context_cache_ = context_cache;
+ }
wasm::FunctionSig* GetFunctionSignature() { return sig_; }
@@ -371,13 +426,14 @@ class WasmGraphBuilder {
NodeVector function_table_sizes_;
Node** control_ = nullptr;
Node** effect_ = nullptr;
- Node** mem_size_ = nullptr;
- Node** mem_start_ = nullptr;
+ WasmContextCacheNodes* context_cache_ = nullptr;
+ Node* globals_start_ = nullptr;
Node** cur_buffer_;
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
bool needs_stack_check_ = false;
+ bool untrusted_code_mitigations_ = true;
// If the runtime doesn't support exception propagation,
// we won't generate stack checks, and trap handling will also
// be generated differently.
@@ -394,8 +450,9 @@ class WasmGraphBuilder {
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
- void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
- wasm::WasmCodePosition position);
+ // BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
+ Node* BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
+ wasm::WasmCodePosition, EnforceBoundsCheck);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
Node* BuildChangeEndiannessStore(Node* node, MachineType type,
@@ -538,8 +595,8 @@ class WasmGraphBuilder {
// call descriptors. This is used by the Int64Lowering::LowerNode method.
constexpr int kWasmContextParameterIndex = 0;
-V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(Zone* zone,
- wasm::FunctionSig* sig);
+V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
+ Zone* zone, wasm::FunctionSig* signature);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, CallDescriptor* descriptor);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index ccebecb7d4..e231d15f10 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -47,7 +47,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ia32 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS esi, eax, edx, ecx, ebx
-#define GP_RETURN_REGISTERS eax, edx
+#define GP_RETURN_REGISTERS eax, edx, ecx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -56,7 +56,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == x64 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS rsi, rax, rdx, rcx, rbx, rdi
-#define GP_RETURN_REGISTERS rax, rdx
+#define GP_RETURN_REGISTERS rax, rdx, rcx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -65,7 +65,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r3, r0, r1, r2
-#define GP_RETURN_REGISTERS r0, r1
+#define GP_RETURN_REGISTERS r0, r1, r3
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -74,7 +74,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm64 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS x7, x0, x1, x2, x3, x4, x5, x6
-#define GP_RETURN_REGISTERS x0, x1
+#define GP_RETURN_REGISTERS x0, x1, x2
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -83,7 +83,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3
-#define GP_RETURN_REGISTERS v0, v1
+#define GP_RETURN_REGISTERS v0, v1, t7
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4
@@ -92,7 +92,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips64 =================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
-#define GP_RETURN_REGISTERS v0, v1
+#define GP_RETURN_REGISTERS v0, v1, t3
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4
@@ -101,7 +101,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ppc & ppc64 ============================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r10, r3, r4, r5, r6, r7, r8, r9
-#define GP_RETURN_REGISTERS r3, r4
+#define GP_RETURN_REGISTERS r3, r4, r5
#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
#define FP_RETURN_REGISTERS d1, d2
@@ -110,7 +110,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390x ==================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
-#define GP_RETURN_REGISTERS r2, r3
+#define GP_RETURN_REGISTERS r2, r3, r4
#define FP_PARAM_REGISTERS d0, d2, d4, d6
#define FP_RETURN_REGISTERS d0, d2, d4, d6
@@ -119,7 +119,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
-#define GP_RETURN_REGISTERS r2, r3
+#define GP_RETURN_REGISTERS r2, r3, r4
#define FP_PARAM_REGISTERS d0, d2
#define FP_RETURN_REGISTERS d0, d2
@@ -251,11 +251,17 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
const RegList kCalleeSaveFPRegisters = 0;
// The target for wasm calls is always a code object.
- MachineType target_type = MachineType::AnyTagged();
+ MachineType target_type = FLAG_wasm_jit_to_native ? MachineType::Pointer()
+ : MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
+ CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
+ CallDescriptor::Kind kind = FLAG_wasm_jit_to_native
+ ? CallDescriptor::kCallWasmFunction
+ : CallDescriptor::kCallCodeObject;
+
return new (zone) CallDescriptor( // --
- CallDescriptor::kCallCodeObject, // kind
+ kind, // kind
target_type, // target MachineType
target_loc, // target location
locations.Build(), // location_sig
@@ -263,7 +269,7 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
compiler::Operator::kNoProperties, // properties
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
- CallDescriptor::kUseNativeStack, // flags
+ flags, // flags
"wasm-call");
}
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index f6cf21fa1c..ea417533f2 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -212,7 +212,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
- __ SlowTruncateToIDelayed(zone_, result_, rsp, 0);
+ __ SlowTruncateToIDelayed(zone_, result_);
__ addp(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kDoubleSize);
@@ -255,14 +255,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-#ifdef V8_CSA_WRITE_BARRIER
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
-#else
- __ CallStubDelayed(
- new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode));
-#endif
}
private:
@@ -288,7 +282,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
// TODO(eholk): Refactor this method to take the code generator as a
// parameter.
void Generate() final {
- __ RecordProtectedInstructionLanding(pc_);
+ gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
if (frame_elided_) {
__ EnterFrame(StackFrame::WASM_COMPILED);
@@ -757,7 +751,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack);
+ scratch3);
__ bind(&done);
}
@@ -827,7 +821,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// jumps to CompileLazyDeoptimizedCode builtin. In order to do this we need to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
-// the first set of flags ({kKindSpecificFlags1Offset});
+// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -837,8 +831,9 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ leaq(rcx, Operand(&current));
__ bind(&current);
int pc = __ pc_offset();
- int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
- __ testl(Operand(rcx, offset),
+ int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
+ __ movp(rcx, Operand(rcx, offset));
+ __ testl(FieldOperand(rcx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
@@ -865,6 +860,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallWasmFunction: {
+ if (HasImmediateInput(instr, 0)) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+ if (info()->IsWasm()) {
+ __ near_call(wasm_code, RelocInfo::WASM_CALL);
+ } else {
+ __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
+ } else {
+ Register reg = i.InputRegister(0);
+ __ call(reg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
@@ -885,6 +897,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
+ case kArchTailCallWasm: {
+ if (HasImmediateInput(instr, 0)) {
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+ if (info()->IsWasm()) {
+ __ near_jmp(wasm_code, RelocInfo::WASM_CALL);
+ } else {
+ __ Move(kScratchRegister, wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ __ jmp(kScratchRegister);
+ }
+ } else {
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ }
+ unwinding_info_writer_.MarkBlockWillExit();
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
@@ -3010,7 +3041,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
if (saves_fp != 0) {
frame->AlignSavedCalleeRegisterSlots();
if (saves_fp != 0) { // Save callee-saved XMM registers.
- const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
frame->AllocateSavedCalleeRegisterSlots(saves_fp_count *
(kQuadWordSize / kPointerSize));
}
@@ -3044,9 +3075,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
}
- if (!descriptor->IsJSFunctionCall()) {
- unwinding_info_writer_.MarkFrameConstructed(pc_base);
- }
+ unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
int shrink_slots =
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
@@ -3064,7 +3093,9 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
}
+ const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+
if (shrink_slots > 0) {
if (info()->IsWasm() && shrink_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
@@ -3096,11 +3127,17 @@ void CodeGenerator::AssembleConstructFrame() {
__ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
- __ subq(rsp, Immediate(shrink_slots * kPointerSize));
+
+ // Skip callee-saved slots, which are pushed below.
+ shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= base::bits::CountPopulation(saves_fp);
+ if (shrink_slots > 0) {
+ __ subq(rsp, Immediate(shrink_slots * kPointerSize));
+ }
}
if (saves_fp != 0) { // Save callee-saved XMM registers.
- const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize;
// Adjust the stack pointer.
__ subp(rsp, Immediate(stack_size));
@@ -3114,7 +3151,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
if (!((1 << i) & saves)) continue;
@@ -3136,7 +3172,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
- const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize;
// Load the registers from the stack.
int slot_idx = 0;
@@ -3229,7 +3265,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
- // TODO(dcarney): don't need scratch in this case.
int32_t value = src.ToInt32();
if (RelocInfo::IsWasmSizeReference(src.rmode())) {
__ movl(dst, Immediate(value, src.rmode()));
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index f826b22b09..04fec146de 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -1242,13 +1242,13 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
}
}
case IrOpcode::kLoad: {
- // The movzxbl/movsxbl/movzxwl/movsxwl operations implicitly zero-extend
- // to 64-bit on x64,
- // so the zero-extension is a no-op.
+ // The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
+ // zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
return true;
default:
return false;
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 05b46e0e0b..4a1deb00e2 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -152,7 +152,8 @@ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
CHECK_FOLLOWS2(v3, v4)
int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
- bool has_shared_name, bool needs_home_object) {
+ bool has_prototype_slot, bool has_shared_name,
+ bool needs_home_object) {
if (IsClassConstructor(kind)) {
// Like the strict function map, but with no 'name' accessor. 'name'
// needs to be the last property and it is added during instantiation,
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 21721828b2..bf55b391e7 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -62,7 +62,8 @@ bool Context::is_declaration_context() {
IsModuleContext()) {
return true;
}
- if (IsEvalContext()) return closure()->shared()->language_mode() == STRICT;
+ if (IsEvalContext())
+ return closure()->shared()->language_mode() == LanguageMode::kStrict;
if (!IsBlockContext()) return false;
Object* ext = extension();
// If we have the special extension, we immediately know it must be a
@@ -173,7 +174,7 @@ static Maybe<bool> UnscopableLookup(LookupIterator* it) {
Isolate* isolate = it->isolate();
Maybe<bool> found = JSReceiver::HasProperty(it);
- if (!found.IsJust() || !found.FromJust()) return found;
+ if (found.IsNothing() || !found.FromJust()) return found;
Handle<Object> unscopables;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -294,7 +295,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
maybe = JSReceiver::GetPropertyAttributes(object, name);
}
- if (!maybe.IsJust()) return Handle<Object>();
+ if (maybe.IsNothing()) return Handle<Object>();
DCHECK(!isolate->has_pending_exception());
*attributes = maybe.FromJust();
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index f8278820d5..5f8eecb201 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -197,6 +197,7 @@ enum ContextLookupFlags {
V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \
accessor_property_descriptor_map) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+ V(ALLOW_WASM_EVAL_INDEX, Object, allow_wasm_eval) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
V(ARRAY_BUFFER_NOINIT_FUN_INDEX, JSFunction, array_buffer_noinit_fun) \
@@ -266,6 +267,7 @@ enum ContextLookupFlags {
initial_async_generator_prototype) \
V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(INITIAL_STRING_PROTOTYPE_INDEX, JSObject, initial_string_prototype) \
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
@@ -351,7 +353,7 @@ enum ContextLookupFlags {
slow_object_with_null_prototype_map) \
V(SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP, Map, \
slow_object_with_object_prototype_map) \
- V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary, \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, NumberDictionary, \
slow_template_instantiations_cache) \
/* All *_FUNCTION_MAP_INDEX definitions used by Context::FunctionMapIndex */ \
/* must remain together. */ \
@@ -680,7 +682,8 @@ class Context: public FixedArray {
}
static inline int FunctionMapIndex(LanguageMode language_mode,
- FunctionKind kind, bool has_shared_name,
+ FunctionKind kind, bool has_prototype_slot,
+ bool has_shared_name,
bool needs_home_object);
static int ArrayMapIndex(ElementsKind elements_kind) {
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 096661c64a..21f90a50ae 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -178,6 +178,21 @@ int64_t NumberToInt64(Object* number) {
return static_cast<int64_t>(d);
}
+uint64_t PositiveNumberToUint64(Object* number) {
+ if (number->IsSmi()) {
+ int value = Smi::ToInt(number);
+ if (value <= 0) return 0;
+ return value;
+ }
+ DCHECK(number->IsHeapNumber());
+ double value = number->Number();
+ // Catch all values smaller than 1 and use the double-negation trick for NANs.
+ if (!(value >= 1)) return 0;
+ uint64_t max = std::numeric_limits<uint64_t>::max();
+ if (value < max) return static_cast<uint64_t>(value);
+ return max;
+}
+
bool TryNumberToSize(Object* number, size_t* result) {
// Do not create handles in this function! Don't use SealHandleScope because
// the function can be used concurrently.
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 848c4f4c2b..b9be0e097c 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -11,7 +11,6 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/char-predicates-inl.h"
-#include "src/codegen.h"
#include "src/dtoa.h"
#include "src/factory.h"
#include "src/handles.h"
@@ -171,7 +170,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache, Iterator current,
return static_cast<double>(number);
}
- DCHECK(number != 0);
+ DCHECK_NE(number, 0);
return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
}
@@ -184,6 +183,17 @@ class StringToIntHelper {
: isolate_(isolate), subject_(subject), radix_(radix) {
DCHECK(subject->IsFlat());
}
+
+ // Used for the StringToBigInt operation.
+ StringToIntHelper(Isolate* isolate, Handle<String> subject)
+ : isolate_(isolate), subject_(subject) {
+ DCHECK(subject->IsFlat());
+ }
+
+ // Used for parsing BigInt literals, where the input is a Zone-allocated
+ // buffer of one-byte digits, along with an optional radix prefix.
+ StringToIntHelper(Isolate* isolate, const uint8_t* subject, int length)
+ : isolate_(isolate), raw_one_byte_subject_(subject), length_(length) {}
virtual ~StringToIntHelper() {}
protected:
@@ -197,15 +207,40 @@ class StringToIntHelper {
// Subclasses may override this.
virtual void HandleSpecialCases() {}
+ // Subclass constructors should call these for configuration before calling
+ // ParseInt().
+ void set_allow_binary_and_octal_prefixes() {
+ allow_binary_and_octal_prefixes_ = true;
+ }
+ void set_disallow_trailing_junk() { allow_trailing_junk_ = false; }
+
+ bool IsOneByte() const {
+ return raw_one_byte_subject_ != nullptr ||
+ subject_->IsOneByteRepresentationUnderneath();
+ }
+
+ Vector<const uint8_t> GetOneByteVector() {
+ if (raw_one_byte_subject_ != nullptr) {
+ return Vector<const uint8_t>(raw_one_byte_subject_, length_);
+ }
+ return subject_->GetFlatContent().ToOneByteVector();
+ }
+
+ Vector<const uc16> GetTwoByteVector() {
+ return subject_->GetFlatContent().ToUC16Vector();
+ }
+
// Subclasses get access to internal state:
- enum State { kRunning, kError, kJunk, kZero, kDone };
+ enum State { kRunning, kError, kJunk, kEmpty, kZero, kDone };
+
+ enum class Sign { kNegative, kPositive, kNone };
Isolate* isolate() { return isolate_; }
- Handle<String> subject() { return subject_; }
int radix() { return radix_; }
int cursor() { return cursor_; }
int length() { return length_; }
- bool negative() { return negative_; }
+ bool negative() { return sign_ == Sign::kNegative; }
+ Sign sign() { return sign_; }
State state() { return state_; }
void set_state(State state) { state_ = state; }
@@ -217,23 +252,25 @@ class StringToIntHelper {
Isolate* isolate_;
Handle<String> subject_;
- int radix_;
+ const uint8_t* raw_one_byte_subject_ = nullptr;
+ int radix_ = 0;
int cursor_ = 0;
int length_ = 0;
- bool negative_ = false;
+ Sign sign_ = Sign::kNone;
bool leading_zero_ = false;
+ bool allow_binary_and_octal_prefixes_ = false;
+ bool allow_trailing_junk_ = true;
State state_ = kRunning;
};
void StringToIntHelper::ParseInt() {
{
DisallowHeapAllocation no_gc;
- String::FlatContent flat = subject_->GetFlatContent();
- if (flat.IsOneByte()) {
- Vector<const uint8_t> vector = flat.ToOneByteVector();
+ if (IsOneByte()) {
+ Vector<const uint8_t> vector = GetOneByteVector();
DetectRadixInternal(vector.start(), vector.length());
} else {
- Vector<const uc16> vector = flat.ToUC16Vector();
+ Vector<const uc16> vector = GetTwoByteVector();
DetectRadixInternal(vector.start(), vector.length());
}
}
@@ -243,18 +280,17 @@ void StringToIntHelper::ParseInt() {
if (state_ != kRunning) return;
{
DisallowHeapAllocation no_gc;
- String::FlatContent flat = subject_->GetFlatContent();
- if (flat.IsOneByte()) {
- Vector<const uint8_t> vector = flat.ToOneByteVector();
+ if (IsOneByte()) {
+ Vector<const uint8_t> vector = GetOneByteVector();
DCHECK_EQ(length_, vector.length());
ParseInternal(vector.start());
} else {
- Vector<const uc16> vector = flat.ToUC16Vector();
+ Vector<const uc16> vector = GetTwoByteVector();
DCHECK_EQ(length_, vector.length());
ParseInternal(vector.start());
}
}
- DCHECK(state_ != kRunning);
+ DCHECK_NE(state_, kRunning);
}
template <class Char>
@@ -265,7 +301,7 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
UnicodeCache* unicode_cache = isolate_->unicode_cache();
if (!AdvanceToNonspace(unicode_cache, &current, end)) {
- return set_state(kJunk);
+ return set_state(kEmpty);
}
if (*current == '+') {
@@ -274,12 +310,13 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
if (current == end) {
return set_state(kJunk);
}
+ sign_ = Sign::kPositive;
} else if (*current == '-') {
++current;
if (current == end) {
return set_state(kJunk);
}
- negative_ = true;
+ sign_ = Sign::kNegative;
}
if (radix_ == 0) {
@@ -292,6 +329,16 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
radix_ = 16;
++current;
if (current == end) return set_state(kJunk);
+ } else if (allow_binary_and_octal_prefixes_ &&
+ (*current == 'o' || *current == 'O')) {
+ radix_ = 8;
+ ++current;
+ DCHECK(current != end);
+ } else if (allow_binary_and_octal_prefixes_ &&
+ (*current == 'b' || *current == 'B')) {
+ radix_ = 2;
+ ++current;
+ DCHECK(current != end);
} else {
leading_zero_ = true;
}
@@ -384,6 +431,11 @@ void StringToIntHelper::ParseInternal(Char start) {
ResultMultiplyAdd(multiplier, part);
} while (!done);
+ if (!allow_trailing_junk_ &&
+ AdvanceToNonspace(isolate_->unicode_cache(), &current, end)) {
+ return set_state(kJunk);
+ }
+
return set_state(kDone);
}
@@ -396,6 +448,7 @@ class NumberParseIntHelper : public StringToIntHelper {
ParseInt();
switch (state()) {
case kJunk:
+ case kEmpty:
return JunkStringValue();
case kZero:
return SignedZero(negative());
@@ -419,14 +472,13 @@ class NumberParseIntHelper : public StringToIntHelper {
bool is_power_of_two = base::bits::IsPowerOfTwo(radix());
if (!is_power_of_two && radix() != 10) return;
DisallowHeapAllocation no_gc;
- String::FlatContent flat = subject()->GetFlatContent();
- if (flat.IsOneByte()) {
- Vector<const uint8_t> vector = flat.ToOneByteVector();
+ if (IsOneByte()) {
+ Vector<const uint8_t> vector = GetOneByteVector();
DCHECK_EQ(length(), vector.length());
result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.start())
: HandleBaseTenCase(vector.start());
} else {
- Vector<const uc16> vector = flat.ToUC16Vector();
+ Vector<const uc16> vector = GetTwoByteVector();
DCHECK_EQ(length(), vector.length());
result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.start())
: HandleBaseTenCase(vector.start());
@@ -480,7 +532,7 @@ class NumberParseIntHelper : public StringToIntHelper {
if (buffer_pos <= kMaxSignificantDigits) {
// If the number has more than kMaxSignificantDigits it will be parsed
// as infinity.
- DCHECK(buffer_pos < kBufferSize);
+ DCHECK_LT(buffer_pos, kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
}
++current;
@@ -565,7 +617,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
return JunkStringValue();
}
- DCHECK(buffer_pos == 0);
+ DCHECK_EQ(buffer_pos, 0);
return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
}
@@ -619,7 +671,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
// Copy significant digits of the integer part (if any) to the buffer.
while (*current >= '0' && *current <= '9') {
if (significant_digits < kMaxSignificantDigits) {
- DCHECK(buffer_pos < kBufferSize);
+ DCHECK_LT(buffer_pos, kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
significant_digits++;
// Will later check if it's an octal in the buffer.
@@ -664,7 +716,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
// instead.
while (*current >= '0' && *current <= '9') {
if (significant_digits < kMaxSignificantDigits) {
- DCHECK(buffer_pos < kBufferSize);
+ DCHECK_LT(buffer_pos, kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
significant_digits++;
exponent--;
@@ -802,24 +854,63 @@ double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
class BigIntParseIntHelper : public StringToIntHelper {
public:
+ enum class Behavior { kParseInt, kStringToBigInt, kLiteral };
+
+ // Used for BigInt.parseInt API, where the input is a Heap-allocated String.
BigIntParseIntHelper(Isolate* isolate, Handle<String> string, int radix)
- : StringToIntHelper(isolate, string, radix) {}
+ : StringToIntHelper(isolate, string, radix),
+ behavior_(Behavior::kParseInt) {}
+
+ // Used for StringToBigInt operation (BigInt constructor and == operator).
+ BigIntParseIntHelper(Isolate* isolate, Handle<String> string)
+ : StringToIntHelper(isolate, string),
+ behavior_(Behavior::kStringToBigInt) {
+ set_allow_binary_and_octal_prefixes();
+ set_disallow_trailing_junk();
+ }
+
+ // Used for parsing BigInt literals, where the input is a buffer of
+ // one-byte ASCII digits, along with an optional radix prefix.
+ BigIntParseIntHelper(Isolate* isolate, const uint8_t* string, int length)
+ : StringToIntHelper(isolate, string, length),
+ behavior_(Behavior::kLiteral) {
+ set_allow_binary_and_octal_prefixes();
+ }
MaybeHandle<BigInt> GetResult() {
ParseInt();
+ if (behavior_ == Behavior::kStringToBigInt && sign() != Sign::kNone &&
+ radix() != 10) {
+ return MaybeHandle<BigInt>();
+ }
+ if (state() == kEmpty) {
+ if (behavior_ == Behavior::kParseInt) {
+ set_state(kJunk);
+ } else if (behavior_ == Behavior::kStringToBigInt) {
+ set_state(kZero);
+ } else {
+ UNREACHABLE();
+ }
+ }
switch (state()) {
case kJunk:
- THROW_NEW_ERROR(isolate(),
- NewSyntaxError(MessageTemplate::kBigIntInvalidString),
- BigInt);
+ if (should_throw() == kThrowOnError) {
+ THROW_NEW_ERROR(isolate(),
+ NewSyntaxError(MessageTemplate::kBigIntInvalidString),
+ BigInt);
+ } else {
+ DCHECK_EQ(should_throw(), kDontThrow);
+ return MaybeHandle<BigInt>();
+ }
case kZero:
- return isolate()->factory()->NewBigIntFromInt(0);
+ return BigInt::Zero(isolate());
case kError:
+ DCHECK_EQ(should_throw() == kThrowOnError,
+ isolate()->has_pending_exception());
return MaybeHandle<BigInt>();
case kDone:
- result_->set_sign(negative());
- result_->RightTrim();
- return result_;
+ return BigInt::Finalize(result_, negative());
+ case kEmpty:
case kRunning:
break;
}
@@ -833,28 +924,45 @@ class BigIntParseIntHelper : public StringToIntHelper {
// Optimization opportunity: Would it makes sense to scan for trailing
// junk before allocating the result?
int charcount = length() - cursor();
- MaybeHandle<BigInt> maybe =
- BigInt::AllocateFor(isolate(), radix(), charcount);
+ // TODO(adamk): Pretenure if this is for a literal.
+ MaybeHandle<FreshlyAllocatedBigInt> maybe =
+ BigInt::AllocateFor(isolate(), radix(), charcount, should_throw());
if (!maybe.ToHandle(&result_)) {
set_state(kError);
}
}
virtual void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) {
- result_->InplaceMultiplyAdd(static_cast<uintptr_t>(multiplier),
- static_cast<uintptr_t>(part));
+ BigInt::InplaceMultiplyAdd(result_, static_cast<uintptr_t>(multiplier),
+ static_cast<uintptr_t>(part));
}
private:
- Handle<BigInt> result_;
+ ShouldThrow should_throw() const {
+ return behavior_ == Behavior::kParseInt ? kThrowOnError : kDontThrow;
+ }
+
+ Handle<FreshlyAllocatedBigInt> result_;
+ Behavior behavior_;
};
-MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string,
+MaybeHandle<BigInt> BigIntParseInt(Isolate* isolate, Handle<String> string,
int radix) {
BigIntParseIntHelper helper(isolate, string, radix);
return helper.GetResult();
}
+MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
+ BigIntParseIntHelper helper(isolate, string);
+ return helper.GetResult();
+}
+
+MaybeHandle<BigInt> BigIntLiteral(Isolate* isolate, const char* string) {
+ BigIntParseIntHelper helper(isolate, reinterpret_cast<const uint8_t*>(string),
+ static_cast<int>(strlen(string)));
+ return helper.GetResult();
+}
+
const char* DoubleToCString(double v, Vector<char> buffer) {
switch (FPCLASSIFY_NAMESPACE::fpclassify(v)) {
case FP_NAN: return "NaN";
@@ -933,8 +1041,8 @@ const char* IntToCString(int n, Vector<char> buffer) {
char* DoubleToFixedCString(double value, int f) {
const int kMaxDigitsBeforePoint = 21;
const double kFirstNonFixed = 1e21;
- DCHECK(f >= 0);
- DCHECK(f <= kMaxFractionDigits);
+ DCHECK_GE(f, 0);
+ DCHECK_LE(f, kMaxFractionDigits);
bool negative = false;
double abs_value = value;
@@ -1051,7 +1159,7 @@ char* DoubleToExponentialCString(double value, int f) {
const int kV8DtoaBufferCapacity = kMaxFractionDigits + 1 + 1;
// Make sure that the buffer is big enough, even if we fall back to the
// shortest representation (which happens when f equals -1).
- DCHECK(kBase10MaximalLength <= kMaxFractionDigits + 1);
+ DCHECK_LE(kBase10MaximalLength, kMaxFractionDigits + 1);
char decimal_rep[kV8DtoaBufferCapacity];
int decimal_rep_length;
@@ -1065,7 +1173,7 @@ char* DoubleToExponentialCString(double value, int f) {
Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
&sign, &decimal_rep_length, &decimal_point);
}
- DCHECK(decimal_rep_length > 0);
+ DCHECK_GT(decimal_rep_length, 0);
DCHECK(decimal_rep_length <= f + 1);
int exponent = decimal_point - 1;
@@ -1102,7 +1210,7 @@ char* DoubleToPrecisionCString(double value, int p) {
int exponent = decimal_point - 1;
- char* result = NULL;
+ char* result = nullptr;
if (exponent < -6 || exponent >= p) {
result =
@@ -1215,7 +1323,7 @@ char* DoubleToRadixCString(double value, int radix) {
buffer[--integer_cursor] = '0';
}
do {
- double remainder = modulo(integer, radix);
+ double remainder = Modulo(integer, radix);
buffer[--integer_cursor] = chars[static_cast<int>(remainder)];
integer = (integer - remainder) / radix;
} while (integer > 0);
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index ac689c8b51..915a286e8f 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -105,8 +105,20 @@ double StringToDouble(UnicodeCache* unicode_cache,
double StringToInt(Isolate* isolate, Handle<String> string, int radix);
-MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string,
+// This follows BigInt.parseInt semantics: "" => SyntaxError.
+MaybeHandle<BigInt> BigIntParseInt(Isolate* isolate, Handle<String> string,
int radix);
+// This follows https://tc39.github.io/proposal-bigint/#sec-string-to-bigint
+// semantics: "" => 0n.
+MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string);
+
+// This version expects a zero-terminated character array. Radix will
+// be inferred from string prefix (case-insensitive):
+// 0x -> hex
+// 0o -> octal
+// 0b -> binary
+V8_EXPORT_PRIVATE MaybeHandle<BigInt> BigIntLiteral(Isolate* isolate,
+ const char* string);
const int kDoubleToCStringMinBufferSize = 100;
@@ -158,6 +170,7 @@ inline uint32_t PositiveNumberToUint32(Object* number);
inline int32_t NumberToInt32(Object* number);
inline uint32_t NumberToUint32(Object* number);
inline int64_t NumberToInt64(Object* number);
+inline uint64_t PositiveNumberToUint64(Object* number);
double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
int flags, double empty_string_val = 0.0);
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index deb794b09f..f085478bf3 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -19,7 +19,7 @@ void RuntimeCallTimer::Start(RuntimeCallCounter* counter,
v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
return;
}
- base::TimeTicks now = Now();
+ base::TimeTicks now = RuntimeCallTimer::Now();
if (parent) parent->Pause(now);
Resume(now);
DCHECK(IsStarted());
@@ -38,7 +38,7 @@ void RuntimeCallTimer::Resume(base::TimeTicks now) {
RuntimeCallTimer* RuntimeCallTimer::Stop() {
if (!IsStarted()) return parent();
- base::TimeTicks now = Now();
+ base::TimeTicks now = RuntimeCallTimer::Now();
Pause(now);
counter_->Increment();
CommitTimeToCounter();
@@ -57,10 +57,6 @@ void RuntimeCallTimer::CommitTimeToCounter() {
bool RuntimeCallTimer::IsStarted() { return start_ticks_ != base::TimeTicks(); }
-base::TimeTicks RuntimeCallTimer::Now() {
- return base::TimeTicks::HighResolutionNow();
-}
-
RuntimeCallTimerScope::RuntimeCallTimerScope(
HeapObject* heap_object, RuntimeCallStats::CounterId counter_id)
: RuntimeCallTimerScope(heap_object->GetIsolate(), counter_id) {}
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index a5709dbec9..c754e6fdef 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -16,9 +16,9 @@ namespace v8 {
namespace internal {
StatsTable::StatsTable(Counters* counters)
- : lookup_function_(NULL),
- create_histogram_function_(NULL),
- add_histogram_sample_function_(NULL) {}
+ : lookup_function_(nullptr),
+ create_histogram_function_(nullptr),
+ add_histogram_sample_function_(nullptr) {}
void StatsTable::SetCounterFunction(CounterLookupCallback f) {
lookup_function_ = f;
@@ -313,6 +313,9 @@ void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {
#undef HM
}
+base::TimeTicks (*RuntimeCallTimer::Now)() =
+ &base::TimeTicks::HighResolutionNow;
+
class RuntimeCallStatEntries {
public:
void Print(std::ostream& os) {
@@ -478,8 +481,8 @@ void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
CounterId counter_id) {
DCHECK(stats->IsCalledOnTheSameThread());
RuntimeCallCounter* counter = &(stats->*counter_id);
- DCHECK(counter->name() != nullptr);
- timer->Start(counter, stats->current_timer_.Value());
+ DCHECK_NOT_NULL(counter->name());
+ timer->Start(counter, stats->current_timer());
stats->current_timer_.SetValue(timer);
stats->current_counter_.SetValue(counter);
}
@@ -487,17 +490,11 @@ void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
// static
void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
DCHECK(stats->IsCalledOnTheSameThread());
- if (stats->current_timer_.Value() != timer) {
- // The branch is added to catch a crash crbug.com/760649
- RuntimeCallTimer* stack_top = stats->current_timer_.Value();
- EmbeddedVector<char, 200> text;
- SNPrintF(text, "ERROR: Leaving counter '%s', stack top '%s'.\n",
- timer->name(), stack_top ? stack_top->name() : "(null)");
- USE(text);
- CHECK(false);
- }
+ RuntimeCallTimer* stack_top = stats->current_timer();
+ if (stack_top == nullptr) return; // Missing timer is a result of Reset().
+ CHECK(stack_top == timer);
stats->current_timer_.SetValue(timer->Stop());
- RuntimeCallTimer* cur_timer = stats->current_timer_.Value();
+ RuntimeCallTimer* cur_timer = stats->current_timer();
stats->current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
}
@@ -513,8 +510,10 @@ void RuntimeCallStats::Add(RuntimeCallStats* other) {
// static
void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
CounterId counter_id) {
+ DCHECK(stats->IsCalledOnTheSameThread());
+ // When RCS are enabled dynamically there might be no stats or timer set up.
+ if (stats == nullptr) return;
RuntimeCallTimer* timer = stats->current_timer_.Value();
- // When RCS are enabled dynamically there might be no current timer set up.
if (timer == nullptr) return;
RuntimeCallCounter* counter = &(stats->*counter_id);
timer->set_counter(counter);
@@ -528,6 +527,11 @@ bool RuntimeCallStats::IsCalledOnTheSameThread() {
return true;
}
+void RuntimeCallStats::Print() {
+ OFStream os(stdout);
+ Print(os);
+}
+
void RuntimeCallStats::Print(std::ostream& os) {
RuntimeCallStatEntries entries;
if (current_timer_.Value() != nullptr) {
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 3fa3ae0306..56873db092 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -46,23 +46,21 @@ class StatsTable {
add_histogram_sample_function_ = f;
}
- bool HasCounterFunction() const {
- return lookup_function_ != NULL;
- }
+ bool HasCounterFunction() const { return lookup_function_ != nullptr; }
// Lookup the location of a counter by name. If the lookup
- // is successful, returns a non-NULL pointer for writing the
+ // is successful, returns a non-nullptr pointer for writing the
// value of the counter. Each thread calling this function
// may receive a different location to store it's counter.
// The return value must not be cached and re-used across
// threads, although a single thread is free to cache it.
int* FindLocation(const char* name) {
- if (!lookup_function_) return NULL;
+ if (!lookup_function_) return nullptr;
return lookup_function_(name);
}
// Create a histogram by name. If the create is successful,
- // returns a non-NULL pointer for use with AddHistogramSample
+ // returns a non-nullptr pointer for use with AddHistogramSample
// function. min and max define the expected minimum and maximum
// sample values. buckets is the maximum number of buckets
// that the samples will be grouped into.
@@ -70,7 +68,7 @@ class StatsTable {
int min,
int max,
size_t buckets) {
- if (!create_histogram_function_) return NULL;
+ if (!create_histogram_function_) return nullptr;
return create_histogram_function_(name, min, max, buckets);
}
@@ -149,16 +147,14 @@ class StatsCounter : public StatsCounterBase {
// Is this counter enabled?
// Returns false if table is full.
- bool Enabled() {
- return GetPtr() != NULL;
- }
+ bool Enabled() { return GetPtr() != nullptr; }
// Get the internal pointer to the counter. This is used
// by the code generator to emit code that manipulates a
// given counter without calling the runtime system.
int* GetInternalPointer() {
int* loc = GetPtr();
- DCHECK(loc != NULL);
+ DCHECK_NOT_NULL(loc);
return loc;
}
@@ -191,9 +187,9 @@ class StatsCounterThreadSafe : public StatsCounterBase {
void Increment(int value);
void Decrement();
void Decrement(int value);
- bool Enabled() { return ptr_ != NULL; }
+ bool Enabled() { return ptr_ != nullptr; }
int* GetInternalPointer() {
- DCHECK(ptr_ != NULL);
+ DCHECK_NOT_NULL(ptr_);
return ptr_;
}
@@ -220,6 +216,10 @@ class Histogram {
const char* name() { return name_; }
+ int min() const { return min_; }
+ int max() const { return max_; }
+ int num_buckets() const { return num_buckets_; }
+
protected:
Histogram() {}
Histogram(const char* name, int min, int max, int num_buckets,
@@ -292,6 +292,29 @@ class TimedHistogramScope {
DISALLOW_IMPLICIT_CONSTRUCTORS(TimedHistogramScope);
};
+// Helper class for scoping a TimedHistogram, where the histogram is selected at
+// stop time rather than start time.
+// TODO(leszeks): This is heavily reliant on TimedHistogram::Start() doing
+// nothing but starting the timer, and TimedHistogram::Stop() logging the sample
+// correctly even if Start() was not called. This happens to be true iff Stop()
+// is passed a null isolate, but that's an implementation detail of
+// TimedHistogram, and we shouldn't rely on it.
+class LazyTimedHistogramScope {
+ public:
+ LazyTimedHistogramScope() : histogram_(nullptr) { timer_.Start(); }
+ ~LazyTimedHistogramScope() {
+ // We should set the histogram before this scope exits.
+ DCHECK_NOT_NULL(histogram_);
+ histogram_->Stop(&timer_, nullptr);
+ }
+
+ void set_histogram(TimedHistogram* histogram) { histogram_ = histogram; }
+
+ private:
+ base::ElapsedTimer timer_;
+ TimedHistogram* histogram_;
+};
+
// A HistogramTimer allows distributions of non-nested timed results
// to be created. WARNING: This class is not thread safe and can only
// be run on the foreground thread.
@@ -381,7 +404,13 @@ class AggregatableHistogramTimer : public Histogram {
public:
// Start/stop the "outer" scope.
void Start() { time_ = base::TimeDelta(); }
- void Stop() { AddSample(static_cast<int>(time_.InMicroseconds())); }
+ void Stop() {
+ if (time_ != base::TimeDelta()) {
+ // Only add non-zero samples, since zero samples represent situations
+ // where there were no aggregated samples added.
+ AddSample(static_cast<int>(time_.InMicroseconds()));
+ }
+ }
// Add a time value ("inner" scope).
void Add(base::TimeDelta other) { time_ += other; }
@@ -466,7 +495,7 @@ class AggregatedMemoryHistogram {
last_ms_(0.0),
aggregate_value_(0.0),
last_value_(0.0),
- backing_histogram_(NULL) {}
+ backing_histogram_(nullptr) {}
double Aggregate(double current_ms, double current_value);
bool is_initialized_;
@@ -591,11 +620,13 @@ class RuntimeCallTimer final {
void Snapshot();
inline RuntimeCallTimer* Stop();
+ // Make the time source configurable for testing purposes.
+ V8_EXPORT_PRIVATE static base::TimeTicks (*Now)();
+
private:
inline void Pause(base::TimeTicks now);
inline void Resume(base::TimeTicks now);
inline void CommitTimeToCounter();
- inline base::TimeTicks Now();
RuntimeCallCounter* counter_ = nullptr;
base::AtomicValue<RuntimeCallTimer*> parent_;
@@ -760,17 +791,20 @@ class RuntimeCallTimer final {
V(ArrayLengthSetter) \
V(BoundFunctionNameGetter) \
V(BoundFunctionLengthGetter) \
- V(CompileCodeLazy) \
+ V(CompileBackgroundAnalyse) \
+ V(CompileBackgroundEval) \
+ V(CompileBackgroundIgnition) \
+ V(CompileBackgroundScript) \
+ V(CompileBackgroundRenumber) \
+ V(CompileBackgroundRewriteReturnResult) \
+ V(CompileBackgroundScopeAnalysis) \
V(CompileDeserialize) \
V(CompileEval) \
- V(CompileFullCode) \
V(CompileAnalyse) \
- V(CompileBackgroundIgnition) \
V(CompileFunction) \
V(CompileGetFromOptimizedCodeMap) \
V(CompileIgnition) \
V(CompileIgnitionFinalization) \
- V(CompileInnerFunction) \
V(CompileRenumber) \
V(CompileRewriteReturnResult) \
V(CompileScopeAnalysis) \
@@ -781,6 +815,7 @@ class RuntimeCallTimer final {
V(FunctionCallback) \
V(FunctionPrototypeGetter) \
V(FunctionPrototypeSetter) \
+ V(FunctionLengthGetter) \
V(GC_Custom_AllAvailableGarbage) \
V(GC_Custom_IncrementalMarkingObserver) \
V(GC_Custom_SlowAllocateRaw) \
@@ -809,6 +844,7 @@ class RuntimeCallTimer final {
V(ParseArrowFunctionLiteral) \
V(ParseBackgroundArrowFunctionLiteral) \
V(ParseBackgroundFunctionLiteral) \
+ V(ParseBackgroundProgram) \
V(ParseEval) \
V(ParseFunction) \
V(ParseFunctionLiteral) \
@@ -831,56 +867,60 @@ class RuntimeCallTimer final {
V(TestCounter2) \
V(TestCounter3)
-#define FOR_EACH_HANDLER_COUNTER(V) \
- V(KeyedLoadIC_LoadIndexedStringStub) \
- V(KeyedLoadIC_LoadIndexedInterceptorStub) \
- V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
- V(KeyedLoadIC_LoadElementDH) \
- V(KeyedLoadIC_SlowStub) \
- V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
- V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
- V(KeyedStoreIC_SlowStub) \
- V(KeyedStoreIC_StoreFastElementStub) \
- V(KeyedStoreIC_StoreElementStub) \
- V(LoadIC_FunctionPrototypeStub) \
- V(LoadIC_HandlerCacheHit_Accessor) \
- V(LoadIC_LoadAccessorDH) \
- V(LoadIC_LoadAccessorFromPrototypeDH) \
- V(LoadIC_LoadApiGetterDH) \
- V(LoadIC_LoadApiGetterFromPrototypeDH) \
- V(LoadIC_LoadCallback) \
- V(LoadIC_LoadConstantDH) \
- V(LoadIC_LoadConstantFromPrototypeDH) \
- V(LoadIC_LoadFieldDH) \
- V(LoadIC_LoadFieldFromPrototypeDH) \
- V(LoadIC_LoadGlobalDH) \
- V(LoadIC_LoadGlobalFromPrototypeDH) \
- V(LoadIC_LoadIntegerIndexedExoticDH) \
- V(LoadIC_LoadInterceptorDH) \
- V(LoadIC_LoadNonMaskingInterceptorDH) \
- V(LoadIC_LoadInterceptorFromPrototypeDH) \
- V(LoadIC_LoadNonexistentDH) \
- V(LoadIC_LoadNormalDH) \
- V(LoadIC_LoadNormalFromPrototypeDH) \
- V(LoadIC_LoadScriptContextFieldStub) \
- V(LoadIC_LoadViaGetter) \
- V(LoadIC_NonReceiver) \
- V(LoadIC_Premonomorphic) \
- V(LoadIC_SlowStub) \
- V(LoadIC_StringLength) \
- V(StoreIC_HandlerCacheHit_Accessor) \
- V(StoreIC_NonReceiver) \
- V(StoreIC_Premonomorphic) \
- V(StoreIC_SlowStub) \
- V(StoreIC_StoreCallback) \
- V(StoreIC_StoreFieldDH) \
- V(StoreIC_StoreGlobalDH) \
- V(StoreIC_StoreGlobalTransitionDH) \
- V(StoreIC_StoreInterceptorStub) \
- V(StoreIC_StoreNormalDH) \
- V(StoreIC_StoreScriptContextFieldStub) \
- V(StoreIC_StoreTransitionDH) \
- V(StoreIC_StoreViaSetter)
+#define FOR_EACH_HANDLER_COUNTER(V) \
+ V(KeyedLoadIC_LoadIndexedInterceptorStub) \
+ V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
+ V(KeyedLoadIC_LoadElementDH) \
+ V(KeyedLoadIC_LoadIndexedStringDH) \
+ V(KeyedLoadIC_SlowStub) \
+ V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
+ V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
+ V(KeyedStoreIC_SlowStub) \
+ V(KeyedStoreIC_StoreFastElementStub) \
+ V(KeyedStoreIC_StoreElementStub) \
+ V(LoadIC_FunctionPrototypeStub) \
+ V(LoadIC_HandlerCacheHit_Accessor) \
+ V(LoadIC_LoadAccessorDH) \
+ V(LoadIC_LoadAccessorFromPrototypeDH) \
+ V(LoadIC_LoadApiGetterFromPrototypeDH) \
+ V(LoadIC_LoadCallback) \
+ V(LoadIC_LoadConstantDH) \
+ V(LoadIC_LoadConstantFromPrototypeDH) \
+ V(LoadIC_LoadFieldDH) \
+ V(LoadIC_LoadFieldFromPrototypeDH) \
+ V(LoadIC_LoadGlobalDH) \
+ V(LoadIC_LoadGlobalFromPrototypeDH) \
+ V(LoadIC_LoadIntegerIndexedExoticDH) \
+ V(LoadIC_LoadInterceptorDH) \
+ V(LoadIC_LoadNonMaskingInterceptorDH) \
+ V(LoadIC_LoadInterceptorFromPrototypeDH) \
+ V(LoadIC_LoadNativeDataPropertyDH) \
+ V(LoadIC_LoadNativeDataPropertyFromPrototypeDH) \
+ V(LoadIC_LoadNonexistentDH) \
+ V(LoadIC_LoadNormalDH) \
+ V(LoadIC_LoadNormalFromPrototypeDH) \
+ V(LoadIC_LoadScriptContextFieldStub) \
+ V(LoadIC_NonReceiver) \
+ V(LoadIC_Premonomorphic) \
+ V(LoadIC_SlowStub) \
+ V(LoadIC_StringLength) \
+ V(LoadIC_StringWrapperLength) \
+ V(StoreIC_HandlerCacheHit_Accessor) \
+ V(StoreIC_NonReceiver) \
+ V(StoreIC_Premonomorphic) \
+ V(StoreIC_SlowStub) \
+ V(StoreIC_StoreAccessorDH) \
+ V(StoreIC_StoreAccessorOnPrototypeDH) \
+ V(StoreIC_StoreApiSetterOnPrototypeDH) \
+ V(StoreIC_StoreFieldDH) \
+ V(StoreIC_StoreGlobalDH) \
+ V(StoreIC_StoreGlobalTransitionDH) \
+ V(StoreIC_StoreInterceptorStub) \
+ V(StoreIC_StoreNativeDataPropertyDH) \
+ V(StoreIC_StoreNativeDataPropertyOnPrototypeDH) \
+ V(StoreIC_StoreNormalDH) \
+ V(StoreIC_StoreScriptContextFieldStub) \
+ V(StoreIC_StoreTransitionDH)
class RuntimeCallStats final : public ZoneObject {
public:
@@ -931,6 +971,7 @@ class RuntimeCallStats final : public ZoneObject {
// Add all entries from another stats object.
void Add(RuntimeCallStats* other);
V8_EXPORT_PRIVATE void Print(std::ostream& os);
+ V8_EXPORT_PRIVATE void Print();
V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
ThreadId thread_id() const { return thread_id_; }
@@ -1032,7 +1073,10 @@ class RuntimeCallTimerScope {
V8.WasmCompileFunctionPeakMemoryBytes, 1, GB, 51) \
HR(asm_module_size_bytes, V8.AsmModuleSizeBytes, 1, GB, 51) \
HR(asm_wasm_translation_throughput, V8.AsmWasmTranslationThroughput, 1, 100, \
- 20)
+ 20) \
+ HR(wasm_lazy_compilation_throughput, V8.WasmLazyCompilationThroughput, 1, \
+ 10000, 50) \
+ HR(compile_script_cache_behaviour, V8.CompileScript.CacheBehaviour, 0, 19, 20)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
@@ -1090,7 +1134,27 @@ class RuntimeCallTimerScope {
HT(wasm_instantiate_wasm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
- V8.WasmInstantiateModuleMicroSeconds.asm, 10000000, MICROSECOND)
+ V8.WasmInstantiateModuleMicroSeconds.asm, 10000000, MICROSECOND) \
+ /* Total compilation time incl. caching/parsing for various cache states. */ \
+ HT(compile_script_with_produce_cache, \
+ V8.CompileScriptMicroSeconds.ProduceCache, 1000000, MICROSECOND) \
+ HT(compile_script_with_isolate_cache_hit, \
+ V8.CompileScriptMicroSeconds.IsolateCacheHit, 1000000, MICROSECOND) \
+ HT(compile_script_with_consume_cache, \
+ V8.CompileScriptMicroSeconds.ConsumeCache, 1000000, MICROSECOND) \
+ HT(compile_script_consume_failed, \
+ V8.CompileScriptMicroSeconds.ConsumeCache.Failed, 1000000, MICROSECOND) \
+ HT(compile_script_no_cache_other, \
+ V8.CompileScriptMicroSeconds.NoCache.Other, 1000000, MICROSECOND) \
+ HT(compile_script_no_cache_because_inline_script, \
+ V8.CompileScriptMicroSeconds.NoCache.InlineScript, 1000000, MICROSECOND) \
+ HT(compile_script_no_cache_because_script_too_small, \
+ V8.CompileScriptMicroSeconds.NoCache.ScriptTooSmall, 1000000, \
+ MICROSECOND) \
+ HT(compile_script_no_cache_because_cache_too_cold, \
+ V8.CompileScriptMicroSeconds.NoCache.CacheTooCold, 1000000, MICROSECOND) \
+ HT(compile_script_on_background, \
+ V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds)
@@ -1193,8 +1257,6 @@ class RuntimeCallTimerScope {
SC(cow_arrays_converted, V8.COWArraysConverted) \
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
- SC(negative_lookups, V8.NegativeLookups) \
- SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \
SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
@@ -1206,8 +1268,6 @@ class RuntimeCallTimerScope {
SC(string_add_runtime_ext_to_one_byte, V8.StringAddRuntimeExtToOneByte) \
SC(sub_string_runtime, V8.SubStringRuntime) \
SC(sub_string_native, V8.SubStringNative) \
- SC(string_compare_native, V8.StringCompareNative) \
- SC(string_compare_runtime, V8.StringCompareRuntime) \
SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
SC(regexp_entry_native, V8.RegExpEntryNative) \
SC(number_to_string_native, V8.NumberToStringNative) \
@@ -1246,10 +1306,12 @@ class RuntimeCallTimerScope {
/* Total count of functions compiled using the baseline compiler. */ \
SC(total_baseline_compile_count, V8.TotalBaselineCompileCount)
-#define STATS_COUNTER_TS_LIST(SC) \
- SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
- SC(wasm_reloc_size, V8.WasmRelocBytes) \
- SC(wasm_lazily_compiled_functions, V8.WasmLazilyCompiledFunctions)
+#define STATS_COUNTER_TS_LIST(SC) \
+ SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
+ SC(wasm_reloc_size, V8.WasmRelocBytes) \
+ SC(wasm_lazily_compiled_functions, V8.WasmLazilyCompiledFunctions) \
+ SC(liftoff_compiled_functions, V8.LiftoffCompiledFunctions) \
+ SC(liftoff_unsupported_functions, V8.LiftoffUnsupportedFunctions)
// This file contains all the v8 counters that are in use.
class Counters : public std::enable_shared_from_this<Counters> {
diff --git a/deps/v8/src/d8-console.cc b/deps/v8/src/d8-console.cc
index 5848883b8c..7f057e2867 100644
--- a/deps/v8/src/d8-console.cc
+++ b/deps/v8/src/d8-console.cc
@@ -4,15 +4,17 @@
#include "src/d8-console.h"
#include "src/d8.h"
+#include "src/isolate.h"
namespace v8 {
namespace {
-void WriteToFile(FILE* file, Isolate* isolate,
+void WriteToFile(const char* prefix, FILE* file, Isolate* isolate,
const debug::ConsoleCallArguments& args) {
+ if (prefix) fprintf(file, "%s: ", prefix);
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(isolate);
- if (i != 0) fprintf(file, " ");
+ if (i > 0) fprintf(file, " ");
Local<Value> arg = args[i];
Local<String> str_obj;
@@ -35,29 +37,48 @@ D8Console::D8Console(Isolate* isolate) : isolate_(isolate) {
default_timer_ = base::TimeTicks::HighResolutionNow();
}
+void D8Console::Assert(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
+ Local<Boolean> arg;
+ if (args.Length() > 0) {
+ if (!args[0]->ToBoolean(isolate_->GetCurrentContext()).ToLocal(&arg)) {
+ return;
+ }
+ } else {
+ // No arguments given, the "first" argument is undefined which is false-ish.
+ arg = v8::False(isolate_);
+ }
+ if (arg->IsTrue()) return;
+ WriteToFile("console.assert", stdout, isolate_, args);
+ isolate_->ThrowException(v8::Exception::Error(
+ v8::String::NewFromUtf8(isolate_, "console.assert failed",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked()));
+}
+
void D8Console::Log(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
- WriteToFile(stdout, isolate_, args);
+ WriteToFile(nullptr, stdout, isolate_, args);
}
void D8Console::Error(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
- WriteToFile(stderr, isolate_, args);
+ WriteToFile("console.error", stderr, isolate_, args);
}
void D8Console::Warn(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
- WriteToFile(stdout, isolate_, args);
+ WriteToFile("console.warn", stdout, isolate_, args);
}
void D8Console::Info(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
- WriteToFile(stdout, isolate_, args);
+ WriteToFile("console.info", stdout, isolate_, args);
}
void D8Console::Debug(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
- WriteToFile(stdout, isolate_, args);
+ WriteToFile("console.debug", stdout, isolate_, args);
}
void D8Console::Time(const debug::ConsoleCallArguments& args,
@@ -84,11 +105,11 @@ void D8Console::Time(const debug::ConsoleCallArguments& args,
void D8Console::TimeEnd(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
base::TimeDelta delta;
- base::TimeTicks now = base::TimeTicks::HighResolutionNow();
if (args.Length() == 0) {
delta = base::TimeTicks::HighResolutionNow() - default_timer_;
- printf("default: ");
+ printf("console.timeEnd: default, %f\n", delta.InMillisecondsF());
} else {
+ base::TimeTicks now = base::TimeTicks::HighResolutionNow();
Local<Value> arg = args[0];
Local<String> label;
v8::TryCatch try_catch(isolate_);
@@ -99,9 +120,30 @@ void D8Console::TimeEnd(const debug::ConsoleCallArguments& args,
if (find != timers_.end()) {
delta = now - find->second;
}
- printf("%s: ", *utf8);
+ printf("console.timeEnd: %s, %f\n", *utf8, delta.InMillisecondsF());
}
- printf("%f\n", delta.InMillisecondsF());
+}
+
+void D8Console::TimeStamp(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
+ base::TimeDelta delta = base::TimeTicks::HighResolutionNow() - default_timer_;
+ if (args.Length() == 0) {
+ printf("console.timeStamp: default, %f\n", delta.InMillisecondsF());
+ } else {
+ Local<Value> arg = args[0];
+ Local<String> label;
+ v8::TryCatch try_catch(isolate_);
+ if (!arg->ToString(isolate_->GetCurrentContext()).ToLocal(&label)) return;
+ v8::String::Utf8Value utf8(isolate_, label);
+ std::string string(*utf8);
+ printf("console.timeStamp: %s, %f\n", *utf8, delta.InMillisecondsF());
+ }
+}
+
+void D8Console::Trace(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
+ i_isolate->PrintStack(stderr, i::Isolate::kPrintStackConcise);
}
} // namespace v8
diff --git a/deps/v8/src/d8-console.h b/deps/v8/src/d8-console.h
index a98525b9f4..5c7569b3ce 100644
--- a/deps/v8/src/d8-console.h
+++ b/deps/v8/src/d8-console.h
@@ -16,6 +16,8 @@ class D8Console : public debug::ConsoleDelegate {
explicit D8Console(Isolate* isolate);
private:
+ void Assert(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
void Log(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) override;
void Error(const debug::ConsoleCallArguments& args,
@@ -30,6 +32,10 @@ class D8Console : public debug::ConsoleDelegate {
const v8::debug::ConsoleContext&) override;
void TimeEnd(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) override;
+ void TimeStamp(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
+ void Trace(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
Isolate* isolate_;
std::map<std::string, base::TimeTicks> timers_;
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 18040c81f1..8836fdb0e5 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -87,7 +87,7 @@ static bool WaitOnFD(int fd,
int gone = 0;
if (total_timeout != -1) {
struct timeval time_now;
- gettimeofday(&time_now, NULL);
+ gettimeofday(&time_now, nullptr);
time_t seconds = time_now.tv_sec - start_time.tv_sec;
gone = static_cast<int>(seconds * 1000 +
(time_now.tv_usec - start_time.tv_usec) / 1000);
@@ -104,11 +104,8 @@ static bool WaitOnFD(int fd,
}
timeout.tv_usec = (read_timeout % 1000) * 1000;
timeout.tv_sec = read_timeout / 1000;
- int number_of_fds_ready = select(fd + 1,
- &readfds,
- &writefds,
- &exceptfds,
- read_timeout != -1 ? &timeout : NULL);
+ int number_of_fds_ready = select(fd + 1, &readfds, &writefds, &exceptfds,
+ read_timeout != -1 ? &timeout : nullptr);
return number_of_fds_ready == 1;
}
@@ -118,7 +115,7 @@ static bool WaitOnFD(int fd,
static bool TimeIsOut(const struct timeval& start_time, const int& total_time) {
if (total_time == -1) return false;
struct timeval time_now;
- gettimeofday(&time_now, NULL);
+ gettimeofday(&time_now, nullptr);
// Careful about overflow.
int seconds = static_cast<int>(time_now.tv_sec - start_time.tv_sec);
if (seconds > 100) {
@@ -139,7 +136,9 @@ static bool TimeIsOut(const struct timeval& start_time, const int& total_time) {
class ZombieProtector {
public:
explicit ZombieProtector(int pid): pid_(pid) { }
- ~ZombieProtector() { if (pid_ != 0) waitpid(pid_, NULL, 0); }
+ ~ZombieProtector() {
+ if (pid_ != 0) waitpid(pid_, nullptr, 0);
+ }
void ChildIsDeadNow() { pid_ = 0; }
private:
int pid_;
@@ -161,12 +160,10 @@ class OpenFDCloser {
// scope.
class ExecArgs {
public:
- ExecArgs() {
- exec_args_[0] = NULL;
- }
+ ExecArgs() { exec_args_[0] = nullptr; }
bool Init(Isolate* isolate, Local<Value> arg0, Local<Array> command_args) {
String::Utf8Value prog(isolate, arg0);
- if (*prog == NULL) {
+ if (*prog == nullptr) {
const char* message =
"os.system(): String conversion of program name failed";
isolate->ThrowException(
@@ -184,8 +181,8 @@ class ExecArgs {
command_args->Get(isolate->GetCurrentContext(),
Integer::New(isolate, j)).ToLocalChecked());
String::Utf8Value utf8_arg(isolate, arg);
- if (*utf8_arg == NULL) {
- exec_args_[i] = NULL; // Consistent state for destructor.
+ if (*utf8_arg == nullptr) {
+ exec_args_[i] = nullptr; // Consistent state for destructor.
const char* message =
"os.system(): String conversion of argument failed.";
isolate->ThrowException(
@@ -198,12 +195,12 @@ class ExecArgs {
snprintf(c_arg, len, "%s", *utf8_arg);
exec_args_[i] = c_arg;
}
- exec_args_[i] = NULL;
+ exec_args_[i] = nullptr;
return true;
}
~ExecArgs() {
for (unsigned i = 0; i < kMaxArgs; i++) {
- if (exec_args_[i] == NULL) {
+ if (exec_args_[i] == nullptr) {
return;
}
delete [] exec_args_[i];
@@ -490,7 +487,7 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
struct timeval start_time;
- gettimeofday(&start_time, NULL);
+ gettimeofday(&start_time, nullptr);
ExecArgs exec_args;
if (!exec_args.Init(args.GetIsolate(), args[0], command_args)) {
@@ -554,7 +551,7 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
- if (*directory == NULL) {
+ if (*directory == nullptr) {
const char* message = "os.chdir(): String conversion of argument failed.";
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
@@ -619,7 +616,7 @@ static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
return CheckItsADirectory(isolate, directory);
} else if (errno == ENOENT) { // Intermediate path element is missing.
char* last_slash = strrchr(directory, '/');
- if (last_slash == NULL) {
+ if (last_slash == nullptr) {
isolate->ThrowException(
String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
.ToLocalChecked());
@@ -668,7 +665,7 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
- if (*directory == NULL) {
+ if (*directory == nullptr) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
@@ -688,7 +685,7 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
- if (*directory == NULL) {
+ if (*directory == nullptr) {
const char* message = "os.rmdir(): String conversion of argument failed.";
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
@@ -709,7 +706,7 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
String::Utf8Value var(args.GetIsolate(), args[0]);
String::Utf8Value value(args.GetIsolate(), args[1]);
- if (*var == NULL) {
+ if (*var == nullptr) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
args.GetIsolate()->ThrowException(
@@ -717,7 +714,7 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
.ToLocalChecked());
return;
}
- if (*value == NULL) {
+ if (*value == nullptr) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
args.GetIsolate()->ThrowException(
@@ -738,7 +735,7 @@ void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
String::Utf8Value var(args.GetIsolate(), args[0]);
- if (*var == NULL) {
+ if (*var == nullptr) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
args.GetIsolate()->ThrowException(
@@ -825,7 +822,7 @@ char* Shell::ReadCharsFromTcpPort(const char* name, int* size_out) {
fprintf(stderr, "Received length %d for %s from localhost:%d\n",
file_length, name, Shell::options.read_from_tcp_port);
close(sockfd);
- return NULL;
+ return nullptr;
}
// Allocate the output array.
@@ -841,7 +838,7 @@ char* Shell::ReadCharsFromTcpPort(const char* name, int* size_out) {
Shell::options.read_from_tcp_port);
close(sockfd);
delete[] chars;
- return NULL;
+ return nullptr;
}
total_received += received;
}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 8a7b922ebb..54a41fc00e 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -124,7 +124,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
#endif
void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
+ return data == nullptr ? data : memset(data, 0, length);
}
void* AllocateUninitialized(size_t length) override {
#if USE_VM
@@ -142,34 +142,35 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void Free(void* data, size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
- base::OS::ReleaseRegion(data, length);
+ CHECK(base::OS::Free(data, length));
return;
}
#endif
free(data);
}
- // If {length} is at least {VM_THRESHOLD}, round up to next page size
- // and return {true}. Otherwise return {false}.
+ // If {length} is at least {VM_THRESHOLD}, round up to next page size and
+ // return {true}. Otherwise return {false}.
bool RoundToPageSize(size_t* length) {
- const size_t kPageSize = base::OS::CommitPageSize();
+ size_t page_size = base::OS::AllocatePageSize();
if (*length >= VM_THRESHOLD && *length < TWO_GB) {
- *length = ((*length + kPageSize - 1) / kPageSize) * kPageSize;
+ *length = RoundUp(*length, page_size);
return true;
}
return false;
}
#if USE_VM
void* VirtualMemoryAllocate(size_t length) {
- void* data = base::OS::ReserveRegion(length, nullptr);
- if (data && !base::OS::CommitRegion(data, length, false)) {
- base::OS::ReleaseRegion(data, length);
- return nullptr;
- }
+ size_t page_size = base::OS::AllocatePageSize();
+ size_t alloc_size = RoundUp(length, page_size);
+ void* address = base::OS::Allocate(nullptr, alloc_size, page_size,
+ base::OS::MemoryPermission::kReadWrite);
+ if (address != nullptr) {
#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(data, length);
+ __lsan_register_root_region(address, alloc_size);
#endif
- MSAN_MEMORY_IS_INITIALIZED(data, length);
- return data;
+ MSAN_MEMORY_IS_INITIALIZED(address, alloc_size);
+ }
+ return address;
}
#endif
};
@@ -177,14 +178,14 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
const size_t kAllocationLimit = 10 * MB;
size_t get_actual_length(size_t length) const {
- return length > kAllocationLimit ? base::OS::CommitPageSize() : length;
+ return length > kAllocationLimit ? base::OS::AllocatePageSize() : length;
}
public:
void* Allocate(size_t length) override {
const size_t actual_length = get_actual_length(length);
void* data = AllocateUninitialized(actual_length);
- return data == NULL ? data : memset(data, 0, actual_length);
+ return data == nullptr ? data : memset(data, 0, actual_length);
}
void* AllocateUninitialized(size_t length) override {
return malloc(get_actual_length(length));
@@ -208,6 +209,18 @@ class PredictablePlatform : public Platform {
DCHECK_NOT_NULL(platform_);
}
+ std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate* isolate) override {
+ return platform_->GetForegroundTaskRunner(isolate);
+ }
+
+ std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
+ v8::Isolate* isolate) override {
+ // Return the foreground task runner here, so that all tasks get executed
+ // sequentially in a predictable order.
+ return platform_->GetForegroundTaskRunner(isolate);
+ }
+
void CallOnBackgroundThread(Task* task,
ExpectedRuntime expected_runtime) override {
// It's not defined when background tasks are being executed, so we can just
@@ -252,13 +265,12 @@ class PredictablePlatform : public Platform {
DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
};
-
-v8::Platform* g_platform = NULL;
+std::unique_ptr<v8::Platform> g_platform;
v8::Platform* GetDefaultPlatform() {
return i::FLAG_verify_predictable
- ? static_cast<PredictablePlatform*>(g_platform)->platform()
- : g_platform;
+ ? static_cast<PredictablePlatform*>(g_platform.get())->platform()
+ : g_platform.get();
}
static Local<Value> Throw(Isolate* isolate, const char* message) {
@@ -270,19 +282,26 @@ static Local<Value> Throw(Isolate* isolate, const char* message) {
Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) {
if (object->InternalFieldCount() != 1) {
Throw(isolate, "this is not a Worker");
- return NULL;
+ return nullptr;
}
Worker* worker =
static_cast<Worker*>(object->GetAlignedPointerFromInternalField(0));
- if (worker == NULL) {
+ if (worker == nullptr) {
Throw(isolate, "Worker is defunct because main thread is terminating");
- return NULL;
+ return nullptr;
}
return worker;
}
+base::Thread::Options GetThreadOptions(const char* name) {
+ // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
+ // which is not enough to parse the big literal expressions used in tests.
+ // The stack size should be at least StackGuard::kLimitSize + some
+ // OS-specific padding for thread startup code. 2Mbytes seems to be enough.
+ return base::Thread::Options(name, 2 * MB);
+}
} // namespace
@@ -401,12 +420,13 @@ static platform::tracing::TraceConfig* CreateTraceConfigFromJSON(
class PerIsolateData {
public:
- explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) {
+ explicit PerIsolateData(Isolate* isolate)
+ : isolate_(isolate), realms_(nullptr) {
isolate->SetData(0, this);
}
~PerIsolateData() {
- isolate_->SetData(0, NULL); // Not really needed, just to be sure...
+ isolate_->SetData(0, nullptr); // Not really needed, just to be sure...
}
inline static PerIsolateData* Get(Isolate* isolate) {
@@ -473,7 +493,7 @@ class ExternalOwningOneByteStringResource
};
CounterMap* Shell::counter_map_;
-base::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
+base::OS::MemoryMappedFile* Shell::counters_file_ = nullptr;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
base::LazyMutex Shell::context_mutex_;
@@ -498,6 +518,53 @@ bool CounterMap::Match(void* key1, void* key2) {
return strcmp(name1, name2) == 0;
}
+// Dummy external source stream which returns the whole source in one go.
+class DummySourceStream : public v8::ScriptCompiler::ExternalSourceStream {
+ public:
+ explicit DummySourceStream(Local<String> source) : done_(false) {
+ source_length_ = source->Utf8Length();
+ source_buffer_.reset(new uint8_t[source_length_]);
+ source->WriteUtf8(reinterpret_cast<char*>(source_buffer_.get()),
+ source_length_);
+ }
+
+ virtual size_t GetMoreData(const uint8_t** src) {
+ if (done_) {
+ return 0;
+ }
+ *src = source_buffer_.release();
+ done_ = true;
+
+ return source_length_;
+ }
+
+ private:
+ int source_length_;
+ std::unique_ptr<uint8_t[]> source_buffer_;
+ bool done_;
+};
+
+class BackgroundCompileThread : public base::Thread {
+ public:
+ BackgroundCompileThread(Isolate* isolate, Local<String> source)
+ : base::Thread(GetThreadOptions("BackgroundCompileThread")),
+ source_(source),
+ streamed_source_(new DummySourceStream(source),
+ v8::ScriptCompiler::StreamedSource::UTF8),
+ task_(v8::ScriptCompiler::StartStreamingScript(isolate,
+ &streamed_source_)) {}
+
+ void Run() override { task_->Run(); }
+
+ v8::ScriptCompiler::StreamedSource* streamed_source() {
+ return &streamed_source_;
+ }
+
+ private:
+ Local<String> source_;
+ v8::ScriptCompiler::StreamedSource streamed_source_;
+ std::unique_ptr<v8::ScriptCompiler::ScriptStreamingTask> task_;
+};
ScriptCompiler::CachedData* CompileForCachedData(
Local<String> source, Local<Value> name,
@@ -506,7 +573,7 @@ ScriptCompiler::CachedData* CompileForCachedData(
uint16_t* source_buffer = new uint16_t[source_length];
source->Write(source_buffer, 0, source_length);
int name_length = 0;
- uint16_t* name_buffer = NULL;
+ uint16_t* name_buffer = nullptr;
if (name->IsString()) {
Local<String> name_string = Local<String>::Cast(name);
name_length = name_string->Length();
@@ -515,10 +582,14 @@ ScriptCompiler::CachedData* CompileForCachedData(
}
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
+ i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
Isolate* temp_isolate = Isolate::New(create_params);
+ i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
temp_isolate->SetHostImportModuleDynamicallyCallback(
Shell::HostImportModuleDynamically);
- ScriptCompiler::CachedData* result = NULL;
+ temp_isolate->SetHostInitializeImportMetaObjectCallback(
+ Shell::HostInitializeImportMetaObject);
+ ScriptCompiler::CachedData* result = nullptr;
{
Isolate::Scope isolate_scope(temp_isolate);
HandleScope handle_scope(temp_isolate);
@@ -576,10 +647,10 @@ MaybeLocal<Script> Shell::CompileString(
} else {
DCHECK(false); // A new compile option?
}
- if (data == NULL) compile_options = ScriptCompiler::kNoCompileOptions;
+ if (data == nullptr) compile_options = ScriptCompiler::kNoCompileOptions;
MaybeLocal<Script> result =
ScriptCompiler::Compile(context, &cached_source, compile_options);
- CHECK(data == NULL || !data->rejected);
+ CHECK(data == nullptr || !data->rejected);
return result;
}
@@ -593,20 +664,44 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
try_catch.SetVerbose(true);
MaybeLocal<Value> maybe_result;
+ bool success = true;
{
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm =
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
Context::Scope context_scope(realm);
+ MaybeLocal<Script> maybe_script;
+ if (options.stress_background_compile) {
+ // Start a background thread compiling the script.
+ BackgroundCompileThread background_compile_thread(isolate, source);
+ background_compile_thread.Start();
+
+ // In parallel, compile on the main thread to flush out any data races.
+ {
+ TryCatch ignore_try_catch(isolate);
+ Shell::CompileString(isolate, source, name, options.compile_options);
+ }
+
+ // Join with background thread and finalize compilation.
+ background_compile_thread.Join();
+ ScriptOrigin origin(name);
+ maybe_script = v8::ScriptCompiler::Compile(
+ isolate->GetCurrentContext(),
+ background_compile_thread.streamed_source(), source, origin);
+ } else {
+ maybe_script =
+ Shell::CompileString(isolate, source, name, options.compile_options);
+ }
+
Local<Script> script;
- if (!Shell::CompileString(isolate, source, name, options.compile_options)
- .ToLocal(&script)) {
+ if (!maybe_script.ToLocal(&script)) {
// Print errors that happened during compilation.
if (report_exceptions) ReportException(isolate, &try_catch);
return false;
}
+
maybe_result = script->Run(realm);
- EmptyMessageQueues(isolate);
+ if (!EmptyMessageQueues(isolate)) success = false;
data->realm_current_ = data->realm_switch_;
}
Local<Value> result;
@@ -632,7 +727,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
printf("\n");
}
}
- return true;
+ return success;
}
namespace {
@@ -659,7 +754,7 @@ std::string GetWorkingDirectory() {
char system_buffer[MAX_PATH];
// TODO(adamk): Support Unicode paths.
DWORD len = GetCurrentDirectoryA(MAX_PATH, system_buffer);
- CHECK(len > 0);
+ CHECK_GT(len, 0);
return system_buffer;
#else
char curdir[PATH_MAX];
@@ -712,13 +807,13 @@ class ModuleEmbedderData {
public:
explicit ModuleEmbedderData(Isolate* isolate)
- : module_to_directory_map(10, ModuleGlobalHash(isolate)) {}
+ : module_to_specifier_map(10, ModuleGlobalHash(isolate)) {}
// Map from normalized module specifier to Module.
std::unordered_map<std::string, Global<Module>> specifier_to_module_map;
- // Map from Module to the directory that Module was loaded from.
+ // Map from Module to its URL as defined in the ScriptOrigin
std::unordered_map<Global<Module>, std::string, ModuleGlobalHash>
- module_to_directory_map;
+ module_to_specifier_map;
};
enum {
@@ -746,11 +841,11 @@ MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
Local<Module> referrer) {
Isolate* isolate = context->GetIsolate();
ModuleEmbedderData* d = GetModuleDataFromContext(context);
- auto dir_name_it =
- d->module_to_directory_map.find(Global<Module>(isolate, referrer));
- CHECK(dir_name_it != d->module_to_directory_map.end());
- std::string absolute_path =
- NormalizePath(ToSTLString(isolate, specifier), dir_name_it->second);
+ auto specifier_it =
+ d->module_to_specifier_map.find(Global<Module>(isolate, referrer));
+ CHECK(specifier_it != d->module_to_specifier_map.end());
+ std::string absolute_path = NormalizePath(ToSTLString(isolate, specifier),
+ DirName(specifier_it->second));
auto module_it = d->specifier_to_module_map.find(absolute_path);
CHECK(module_it != d->specifier_to_module_map.end());
return module_it->second.Get(isolate);
@@ -783,11 +878,11 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
CHECK(d->specifier_to_module_map
.insert(std::make_pair(file_name, Global<Module>(isolate, module)))
.second);
+ CHECK(d->module_to_specifier_map
+ .insert(std::make_pair(Global<Module>(isolate, module), file_name))
+ .second);
std::string dir_name = DirName(file_name);
- CHECK(d->module_to_directory_map
- .insert(std::make_pair(Global<Module>(isolate, module), dir_name))
- .second);
for (int i = 0, length = module->GetModuleRequestsLength(); i < length; ++i) {
Local<String> name = module->GetModuleRequest(i);
@@ -842,6 +937,26 @@ MaybeLocal<Promise> Shell::HostImportModuleDynamically(
return MaybeLocal<Promise>();
}
+void Shell::HostInitializeImportMetaObject(Local<Context> context,
+ Local<Module> module,
+ Local<Object> meta) {
+ Isolate* isolate = context->GetIsolate();
+ HandleScope handle_scope(isolate);
+
+ ModuleEmbedderData* d = GetModuleDataFromContext(context);
+ auto specifier_it =
+ d->module_to_specifier_map.find(Global<Module>(isolate, module));
+ CHECK(specifier_it != d->module_to_specifier_map.end());
+
+ Local<String> url_key =
+ String::NewFromUtf8(isolate, "url", NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<String> url = String::NewFromUtf8(isolate, specifier_it->second.c_str(),
+ NewStringType::kNormal)
+ .ToLocalChecked();
+ meta->CreateDataProperty(context, url_key, url).ToChecked();
+}
+
void Shell::DoHostImportModuleDynamically(void* import_data) {
std::unique_ptr<DynamicImportData> import_data_(
static_cast<DynamicImportData*>(import_data));
@@ -1059,7 +1174,7 @@ MaybeLocal<Context> Shell::CreateRealm(
}
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
Local<Context> context =
- Context::New(isolate, NULL, global_template, global_object);
+ Context::New(isolate, nullptr, global_template, global_object);
DCHECK(!try_catch.HasCaught());
if (context.IsEmpty()) return MaybeLocal<Context>();
InitializeModuleEmbedderData(context);
@@ -1240,7 +1355,7 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file(args.GetIsolate(), args[0]);
- if (*file == NULL) {
+ if (*file == nullptr) {
Throw(args.GetIsolate(), "Error loading file");
return;
}
@@ -1270,9 +1385,9 @@ Local<String> Shell::ReadFromStdin(Isolate* isolate) {
// Continue reading if the line ends with an escape '\\' or the line has
// not been fully read into the buffer yet (does not end with '\n').
// If fgets gets an error, just give up.
- char* input = NULL;
+ char* input = nullptr;
input = fgets(buffer, kBufferSize, stdin);
- if (input == NULL) return Local<String>();
+ if (input == nullptr) return Local<String>();
length = static_cast<int>(strlen(buffer));
if (length == 0) {
return accumulator;
@@ -1301,7 +1416,7 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
String::Utf8Value file(args.GetIsolate(), args[i]);
- if (*file == NULL) {
+ if (*file == nullptr) {
Throw(args.GetIsolate(), "Error loading file");
return;
}
@@ -1310,11 +1425,11 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Error loading file");
return;
}
- if (!ExecuteString(
- args.GetIsolate(), source,
- String::NewFromUtf8(args.GetIsolate(), *file,
- NewStringType::kNormal).ToLocalChecked(),
- false, true)) {
+ if (!ExecuteString(args.GetIsolate(), source,
+ String::NewFromUtf8(args.GetIsolate(), *file,
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ false, !options.quiet_load)) {
Throw(args.GetIsolate(), "Error executing file");
return;
}
@@ -1350,10 +1465,10 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- // Initialize the embedder field to NULL; if we return early without
+ // Initialize the embedder field to nullptr; if we return early without
// creating a new Worker (because the main thread is terminating) we can
// early-out from the instance calls.
- args.Holder()->SetAlignedPointerInInternalField(0, NULL);
+ args.Holder()->SetAlignedPointerInInternalField(0, nullptr);
if (!allow_new_workers_) return;
@@ -1545,17 +1660,17 @@ CounterCollection::CounterCollection() {
Counter* CounterCollection::GetNextCounter() {
- if (counters_in_use_ == kMaxCounters) return NULL;
+ if (counters_in_use_ == kMaxCounters) return nullptr;
return &counters_[counters_in_use_++];
}
void Shell::MapCounters(v8::Isolate* isolate, const char* name) {
counters_file_ = base::OS::MemoryMappedFile::create(
- name, nullptr, sizeof(CounterCollection), &local_counters_);
- void* memory = (counters_file_ == NULL) ?
- NULL : counters_file_->memory();
- if (memory == NULL) {
+ name, sizeof(CounterCollection), &local_counters_);
+ void* memory =
+ (counters_file_ == nullptr) ? nullptr : counters_file_->memory();
+ if (memory == nullptr) {
printf("Could not map counters file %s\n", name);
Exit(1);
}
@@ -1580,9 +1695,9 @@ int CounterMap::Hash(const char* name) {
Counter* Shell::GetCounter(const char* name, bool is_histogram) {
Counter* counter = counter_map_->Lookup(name);
- if (counter == NULL) {
+ if (counter == nullptr) {
counter = counters_->GetNextCounter();
- if (counter != NULL) {
+ if (counter != nullptr) {
counter_map_->Set(name, counter);
counter->Bind(name, is_histogram);
}
@@ -1596,10 +1711,10 @@ Counter* Shell::GetCounter(const char* name, bool is_histogram) {
int* Shell::LookupCounter(const char* name) {
Counter* counter = GetCounter(name, false);
- if (counter != NULL) {
+ if (counter != nullptr) {
return counter->ptr();
} else {
- return NULL;
+ return nullptr;
}
}
@@ -1871,7 +1986,7 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
// Initialize the global objects
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
EscapableHandleScope handle_scope(isolate);
- Local<Context> context = Context::New(isolate, NULL, global_template);
+ Local<Context> context = Context::New(isolate, nullptr, global_template);
DCHECK(!context.IsEmpty());
InitializeModuleEmbedderData(context);
Context::Scope scope(context);
@@ -2082,17 +2197,17 @@ static FILE* FOpen(const char* path, const char* mode) {
if (fopen_s(&result, path, mode) == 0) {
return result;
} else {
- return NULL;
+ return nullptr;
}
#else
FILE* file = fopen(path, mode);
- if (file == NULL) return NULL;
+ if (file == nullptr) return nullptr;
struct stat file_stat;
- if (fstat(fileno(file), &file_stat) != 0) return NULL;
+ if (fstat(fileno(file), &file_stat) != 0) return nullptr;
bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
if (is_regular_file) return file;
fclose(file);
- return NULL;
+ return nullptr;
#endif
}
@@ -2102,7 +2217,7 @@ static char* ReadChars(const char* name, int* size_out) {
}
FILE* file = FOpen(name, "rb");
- if (file == NULL) return NULL;
+ if (file == nullptr) return nullptr;
fseek(file, 0, SEEK_END);
size_t size = ftell(file);
@@ -2144,18 +2259,19 @@ static void ReadBufferWeakCallback(
void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
- DCHECK(sizeof(char) == sizeof(uint8_t)); // NOLINT
+ static_assert(sizeof(char) == sizeof(uint8_t),
+ "char and uint8_t should both have 1 byte");
Isolate* isolate = args.GetIsolate();
String::Utf8Value filename(isolate, args[0]);
int length;
- if (*filename == NULL) {
+ if (*filename == nullptr) {
Throw(isolate, "Error loading file");
return;
}
DataAndPersistent* data = new DataAndPersistent;
data->data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
- if (data->data == NULL) {
+ if (data->data == nullptr) {
delete data;
Throw(isolate, "Error reading file");
return;
@@ -2165,7 +2281,6 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
data->handle.Reset(isolate, buffer);
data->handle.SetWeak(data, ReadBufferWeakCallback,
v8::WeakCallbackType::kParameter);
- data->handle.MarkIndependent();
isolate->AdjustAmountOfExternalAllocatedMemory(length);
args.GetReturnValue().Set(buffer);
@@ -2175,7 +2290,7 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
char* chars = ReadChars(name, &size);
- if (chars == NULL) return Local<String>();
+ if (chars == nullptr) return Local<String>();
Local<String> result;
if (i::FLAG_use_external_strings && internal::String::IsAscii(chars, size)) {
String::ExternalOneByteStringResource* resource =
@@ -2237,7 +2352,7 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
void Send(const v8_inspector::StringView& string) {
v8::Isolate::AllowJavascriptExecutionScope allow_script(isolate_);
int length = static_cast<int>(string.length());
- DCHECK(length < v8::String::kMaxLength);
+ DCHECK_LT(length, v8::String::kMaxLength);
Local<String> message =
(string.is8Bit()
? v8::String::NewFromOneByte(
@@ -2350,7 +2465,7 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
SourceGroup::~SourceGroup() {
delete thread_;
- thread_ = NULL;
+ thread_ = nullptr;
}
@@ -2413,14 +2528,8 @@ Local<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
return Shell::ReadFile(isolate, name);
}
-
-base::Thread::Options SourceGroup::GetThreadOptions() {
- // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
- // which is not enough to parse the big literal expressions used in tests.
- // The stack size should be at least StackGuard::kLimitSize + some
- // OS-specific padding for thread startup code. 2Mbytes seems to be enough.
- return base::Thread::Options("IsolateThread", 2 * MB);
-}
+SourceGroup::IsolateThread::IsolateThread(SourceGroup* group)
+ : base::Thread(GetThreadOptions("IsolateThread")), group_(group) {}
void SourceGroup::ExecuteInThread() {
Isolate::CreateParams create_params;
@@ -2428,6 +2537,8 @@ void SourceGroup::ExecuteInThread() {
Isolate* isolate = Isolate::New(create_params);
isolate->SetHostImportModuleDynamicallyCallback(
Shell::HostImportModuleDynamically);
+ isolate->SetHostInitializeImportMetaObjectCallback(
+ Shell::HostInitializeImportMetaObject);
Shell::EnsureEventLoopInitialized(isolate);
D8Console console(isolate);
@@ -2460,7 +2571,7 @@ void SourceGroup::ExecuteInThread() {
void SourceGroup::StartExecuteInThread() {
- if (thread_ == NULL) {
+ if (thread_ == nullptr) {
thread_ = new IsolateThread(this);
thread_->Start();
}
@@ -2469,13 +2580,13 @@ void SourceGroup::StartExecuteInThread() {
void SourceGroup::WaitForThread() {
- if (thread_ == NULL) return;
+ if (thread_ == nullptr) return;
done_semaphore_.Wait();
}
void SourceGroup::JoinThread() {
- if (thread_ == NULL) return;
+ if (thread_ == nullptr) return;
thread_->Join();
}
@@ -2510,20 +2621,18 @@ void SerializationDataQueue::Clear() {
data_.clear();
}
-
Worker::Worker()
: in_semaphore_(0),
out_semaphore_(0),
- thread_(NULL),
- script_(NULL),
+ thread_(nullptr),
+ script_(nullptr),
running_(false) {}
-
Worker::~Worker() {
delete thread_;
- thread_ = NULL;
+ thread_ = nullptr;
delete[] script_;
- script_ = NULL;
+ script_ = nullptr;
in_queue_.Clear();
out_queue_.Clear();
}
@@ -2555,9 +2664,9 @@ std::unique_ptr<SerializationData> Worker::GetMessage() {
void Worker::Terminate() {
base::Relaxed_Store(&running_, false);
- // Post NULL to wake the Worker thread message loop, and tell it to stop
+ // Post nullptr to wake the Worker thread message loop, and tell it to stop
// running.
- PostMessage(NULL);
+ PostMessage(nullptr);
}
@@ -2573,6 +2682,8 @@ void Worker::ExecuteInThread() {
Isolate* isolate = Isolate::New(create_params);
isolate->SetHostImportModuleDynamicallyCallback(
Shell::HostImportModuleDynamically);
+ isolate->SetHostInitializeImportMetaObjectCallback(
+ Shell::HostInitializeImportMetaObject);
D8Console console(isolate);
debug::SetConsoleDelegate(isolate, &console);
{
@@ -2642,8 +2753,8 @@ void Worker::ExecuteInThread() {
}
isolate->Dispose();
- // Post NULL to wake the thread waiting on GetMessage() if there is one.
- out_queue_.Enqueue(NULL);
+ // Post nullptr to wake the thread waiting on GetMessage() if there is one.
+ out_queue_.Enqueue(nullptr);
out_semaphore_.Signal();
}
@@ -2681,17 +2792,24 @@ bool Shell::SetOptions(int argc, char* argv[]) {
for (int i = 0; i < argc; i++) {
if (strcmp(argv[i], "--stress-opt") == 0) {
options.stress_opt = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--nostress-opt") == 0 ||
strcmp(argv[i], "--no-stress-opt") == 0) {
options.stress_opt = false;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--stress-deopt") == 0) {
options.stress_deopt = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--stress-background-compile") == 0) {
+ options.stress_background_compile = true;
+ argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--nostress-background-compile") == 0 ||
+ strcmp(argv[i], "--no-stress-background-compile") == 0) {
+ options.stress_background_compile = false;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--mock-arraybuffer-allocator") == 0) {
options.mock_arraybuffer_allocator = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--noalways-opt") == 0 ||
strcmp(argv[i], "--no-always-opt") == 0) {
// No support for stressing if we can't use --always-opt.
@@ -2699,28 +2817,28 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.stress_deopt = false;
} else if (strcmp(argv[i], "--logfile-per-isolate") == 0) {
logfile_per_isolate = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--shell") == 0) {
options.interactive_shell = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--test") == 0) {
options.test_shell = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--notest") == 0 ||
strcmp(argv[i], "--no-test") == 0) {
options.test_shell = false;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--send-idle-notification") == 0) {
options.send_idle_notification = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--invoke-weak-callbacks") == 0) {
options.invoke_weak_callbacks = true;
// TODO(jochen) See issue 3351
options.send_idle_notification = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--omit-quit") == 0) {
options.omit_quit = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
@@ -2729,17 +2847,17 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.num_isolates++;
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
options.icu_data_file = argv[i] + 16;
- argv[i] = NULL;
+ argv[i] = nullptr;
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
} else if (strncmp(argv[i], "--natives_blob=", 15) == 0) {
options.natives_blob = argv[i] + 15;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--snapshot_blob=", 16) == 0) {
options.snapshot_blob = argv[i] + 16;
- argv[i] = NULL;
+ argv[i] = nullptr;
#endif // V8_USE_EXTERNAL_STARTUP_DATA
} else if (strcmp(argv[i], "--cache") == 0 ||
strncmp(argv[i], "--cache=", 8) == 0) {
@@ -2754,39 +2872,36 @@ bool Shell::SetOptions(int argc, char* argv[]) {
printf("Unknown option to --cache.\n");
return false;
}
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--enable-tracing") == 0) {
options.trace_enabled = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--trace-config=", 15) == 0) {
options.trace_config = argv[i] + 15;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--enable-inspector") == 0) {
options.enable_inspector = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--lcov=", 7) == 0) {
options.lcov_file = argv[i] + 7;
- argv[i] = NULL;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--disable-in-process-stack-traces") == 0) {
options.disable_in_process_stack_traces = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
#ifdef V8_OS_POSIX
} else if (strncmp(argv[i], "--read-from-tcp-port=", 21) == 0) {
options.read_from_tcp_port = atoi(argv[i] + 21);
- argv[i] = NULL;
+ argv[i] = nullptr;
#endif // V8_OS_POSIX
} else if (strcmp(argv[i], "--enable-os-system") == 0) {
options.enable_os_system = true;
- argv[i] = NULL;
+ argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--quiet-load") == 0) {
+ options.quiet_load = true;
+ argv[i] = nullptr;
}
}
-// On x64 Linux we want to enable the Wasm trap handler by default. Setting
-// the flag here allows the command line argument to still override it.
-#if V8_OS_LINUX && V8_TARGET_ARCH_X64
- SetFlagsFromString("--wasm-trap-handler");
-#endif
-
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
// Set up isolated source groups.
@@ -2891,7 +3006,7 @@ void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
}
namespace {
-void ProcessMessages(Isolate* isolate,
+bool ProcessMessages(Isolate* isolate,
std::function<platform::MessageLoopBehavior()> behavior) {
Platform* platform = GetDefaultPlatform();
while (true) {
@@ -2913,9 +3028,10 @@ void ProcessMessages(Isolate* isolate,
Context::Scope context_scope(context);
if (callback->Call(context, Undefined(isolate), 0, nullptr).IsEmpty()) {
Shell::ReportException(isolate, &try_catch);
- return;
+ return false;
}
}
+ return true;
}
} // anonymous namespace
@@ -2929,9 +3045,9 @@ void Shell::CompleteMessageLoop(Isolate* isolate) {
});
}
-void Shell::EmptyMessageQueues(Isolate* isolate) {
- ProcessMessages(isolate,
- []() { return platform::MessageLoopBehavior::kDoNotWait; });
+bool Shell::EmptyMessageQueues(Isolate* isolate) {
+ return ProcessMessages(
+ isolate, []() { return platform::MessageLoopBehavior::kDoNotWait; });
}
class Serializer : public ValueSerializer::Delegate {
@@ -2976,7 +3092,7 @@ class Serializer : public ValueSerializer::Delegate {
Maybe<uint32_t> GetSharedArrayBufferId(
Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer) override {
- DCHECK(data_ != nullptr);
+ DCHECK_NOT_NULL(data_);
for (size_t index = 0; index < shared_array_buffers_.size(); ++index) {
if (shared_array_buffers_[index] == shared_array_buffer) {
return Just<uint32_t>(static_cast<uint32_t>(index));
@@ -3187,25 +3303,26 @@ int Shell::Main(int argc, char* argv[]) {
? v8::platform::InProcessStackDumping::kDisabled
: v8::platform::InProcessStackDumping::kEnabled;
- platform::tracing::TracingController* tracing_controller = nullptr;
+ std::unique_ptr<platform::tracing::TracingController> tracing;
if (options.trace_enabled && !i::FLAG_verify_predictable) {
+ tracing = base::make_unique<platform::tracing::TracingController>();
trace_file.open("v8_trace.json");
- tracing_controller = new platform::tracing::TracingController();
platform::tracing::TraceBuffer* trace_buffer =
platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
platform::tracing::TraceBuffer::kRingBufferChunks,
platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file));
- tracing_controller->Initialize(trace_buffer);
+ tracing->Initialize(trace_buffer);
}
- g_platform = v8::platform::CreateDefaultPlatform(
+ platform::tracing::TracingController* tracing_controller = tracing.get();
+ g_platform = v8::platform::NewDefaultPlatform(
0, v8::platform::IdleTaskSupport::kEnabled, in_process_stack_dumping,
- tracing_controller);
+ std::move(tracing));
if (i::FLAG_verify_predictable) {
- g_platform = new PredictablePlatform(std::unique_ptr<Platform>(g_platform));
+ g_platform.reset(new PredictablePlatform(std::move(g_platform)));
}
- v8::V8::InitializePlatform(g_platform);
+ v8::V8::InitializePlatform(g_platform.get());
v8::V8::Initialize();
if (options.natives_blob || options.snapshot_blob) {
v8::V8::InitializeExternalStartupData(options.natives_blob,
@@ -3249,6 +3366,8 @@ int Shell::Main(int argc, char* argv[]) {
Isolate* isolate = Isolate::New(create_params);
isolate->SetHostImportModuleDynamicallyCallback(
Shell::HostImportModuleDynamically);
+ isolate->SetHostInitializeImportMetaObjectCallback(
+ Shell::HostInitializeImportMetaObject);
D8Console console(isolate);
{
@@ -3318,7 +3437,6 @@ int Shell::Main(int argc, char* argv[]) {
OnExit(isolate);
V8::Dispose();
V8::ShutdownPlatform();
- delete g_platform;
return result;
}
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 53c498b57d..c699d91d68 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -63,13 +63,13 @@ class CounterMap {
Counter* Lookup(const char* name) {
base::HashMap::Entry* answer =
hash_map_.Lookup(const_cast<char*>(name), Hash(name));
- if (!answer) return NULL;
+ if (!answer) return nullptr;
return reinterpret_cast<Counter*>(answer->value);
}
void Set(const char* name, Counter* value) {
base::HashMap::Entry* answer =
hash_map_.LookupOrInsert(const_cast<char*>(name), Hash(name));
- DCHECK(answer != NULL);
+ DCHECK_NOT_NULL(answer);
answer->value = value;
}
class Iterator {
@@ -77,7 +77,7 @@ class CounterMap {
explicit Iterator(CounterMap* map)
: map_(&map->hash_map_), entry_(map_->Start()) { }
void Next() { entry_ = map_->Next(entry_); }
- bool More() { return entry_ != NULL; }
+ bool More() { return entry_ != nullptr; }
const char* CurrentKey() { return static_cast<const char*>(entry_->key); }
Counter* CurrentValue() { return static_cast<Counter*>(entry_->value); }
private:
@@ -94,13 +94,13 @@ class CounterMap {
class SourceGroup {
public:
- SourceGroup() :
- next_semaphore_(0),
- done_semaphore_(0),
- thread_(NULL),
- argv_(NULL),
- begin_offset_(0),
- end_offset_(0) {}
+ SourceGroup()
+ : next_semaphore_(0),
+ done_semaphore_(0),
+ thread_(nullptr),
+ argv_(nullptr),
+ begin_offset_(0),
+ end_offset_(0) {}
~SourceGroup();
@@ -120,8 +120,7 @@ class SourceGroup {
private:
class IsolateThread : public base::Thread {
public:
- explicit IsolateThread(SourceGroup* group)
- : base::Thread(GetThreadOptions()), group_(group) {}
+ explicit IsolateThread(SourceGroup* group);
virtual void Run() {
group_->ExecuteInThread();
@@ -131,7 +130,6 @@ class SourceGroup {
SourceGroup* group_;
};
- static base::Thread::Options GetThreadOptions();
void ExecuteInThread();
base::Semaphore next_semaphore_;
@@ -297,13 +295,14 @@ class ShellOptions {
enable_inspector(false),
num_isolates(1),
compile_options(v8::ScriptCompiler::kNoCompileOptions),
- isolate_sources(NULL),
- icu_data_file(NULL),
- natives_blob(NULL),
- snapshot_blob(NULL),
+ stress_background_compile(false),
+ isolate_sources(nullptr),
+ icu_data_file(nullptr),
+ natives_blob(nullptr),
+ snapshot_blob(nullptr),
trace_enabled(false),
- trace_config(NULL),
- lcov_file(NULL),
+ trace_config(nullptr),
+ lcov_file(nullptr),
disable_in_process_stack_traces(false),
read_from_tcp_port(-1) {}
@@ -329,6 +328,7 @@ class ShellOptions {
bool enable_inspector;
int num_isolates;
v8::ScriptCompiler::CompileOptions compile_options;
+ bool stress_background_compile;
SourceGroup* isolate_sources;
const char* icu_data_file;
const char* natives_blob;
@@ -339,6 +339,7 @@ class ShellOptions {
bool disable_in_process_stack_traces;
int read_from_tcp_port;
bool enable_os_system = false;
+ bool quiet_load = false;
};
class Shell : public i::AllStatic {
@@ -358,7 +359,7 @@ class Shell : public i::AllStatic {
static void Exit(int exit_code);
static void OnExit(Isolate* isolate);
static void CollectGarbage(Isolate* isolate);
- static void EmptyMessageQueues(Isolate* isolate);
+ static bool EmptyMessageQueues(Isolate* isolate);
static void EnsureEventLoopInitialized(Isolate* isolate);
static void CompleteMessageLoop(Isolate* isolate);
@@ -450,6 +451,9 @@ class Shell : public i::AllStatic {
static MaybeLocal<Promise> HostImportModuleDynamically(
Local<Context> context, Local<ScriptOrModule> referrer,
Local<String> specifier);
+ static void HostInitializeImportMetaObject(Local<Context> context,
+ Local<Module> module,
+ Local<Object> meta);
// Data is of type DynamicImportData*. We use void* here to be able
// to conform with MicrotaskCallback interface and enqueue this
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 40a96e0190..a402706ec5 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -109,7 +109,7 @@ void DateCache::YearMonthDayFromDays(
bool is_leap = (!yd1 || yd2) && !yd3;
- DCHECK(days >= -1);
+ DCHECK_GE(days, -1);
DCHECK(is_leap || (days >= 0));
DCHECK((days < 365) || (is_leap && (days < 366)));
DCHECK(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0))));
@@ -162,8 +162,8 @@ int DateCache::DaysFromYearMonth(int year, int month) {
month += 12;
}
- DCHECK(month >= 0);
- DCHECK(month < 12);
+ DCHECK_GE(month, 0);
+ DCHECK_LT(month, 12);
// year_delta is an arbitrary number such that:
// a) year_delta = -1 (mod 400)
@@ -337,17 +337,17 @@ int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
void DateCache::ProbeDST(int time_sec) {
- DST* before = NULL;
- DST* after = NULL;
+ DST* before = nullptr;
+ DST* after = nullptr;
DCHECK(before_ != after_);
for (int i = 0; i < kDSTSize; ++i) {
if (dst_[i].start_sec <= time_sec) {
- if (before == NULL || before->start_sec < dst_[i].start_sec) {
+ if (before == nullptr || before->start_sec < dst_[i].start_sec) {
before = &dst_[i];
}
} else if (time_sec < dst_[i].end_sec) {
- if (after == NULL || after->end_sec > dst_[i].end_sec) {
+ if (after == nullptr || after->end_sec > dst_[i].end_sec) {
after = &dst_[i];
}
}
@@ -355,16 +355,16 @@ void DateCache::ProbeDST(int time_sec) {
// If before or after segments were not found,
// then set them to any invalid segment.
- if (before == NULL) {
+ if (before == nullptr) {
before = InvalidSegment(before_) ? before_ : LeastRecentlyUsedDST(after);
}
- if (after == NULL) {
+ if (after == nullptr) {
after = InvalidSegment(after_) && before != after_
? after_ : LeastRecentlyUsedDST(before);
}
- DCHECK(before != NULL);
- DCHECK(after != NULL);
+ DCHECK_NOT_NULL(before);
+ DCHECK_NOT_NULL(after);
DCHECK(before != after);
DCHECK(InvalidSegment(before) || before->start_sec <= time_sec);
DCHECK(InvalidSegment(after) || time_sec < after->start_sec);
@@ -377,10 +377,10 @@ void DateCache::ProbeDST(int time_sec) {
DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
- DST* result = NULL;
+ DST* result = nullptr;
for (int i = 0; i < kDSTSize; ++i) {
if (&dst_[i] == skip) continue;
- if (result == NULL || result->last_used > dst_[i].last_used) {
+ if (result == nullptr || result->last_used > dst_[i].last_used) {
result = &dst_[i];
}
}
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index d4e12f8bf2..b8a9263d32 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -43,7 +43,7 @@ class DateCache {
virtual ~DateCache() {
delete tz_cache_;
- tz_cache_ = NULL;
+ tz_cache_ = nullptr;
}
@@ -212,7 +212,7 @@ class DateCache {
virtual int GetLocalOffsetFromOS() {
double offset = tz_cache_->LocalTimeOffset();
- DCHECK(offset < kInvalidLocalOffsetInMs);
+ DCHECK_LT(offset, kInvalidLocalOffsetInMs);
return static_cast<int>(offset);
}
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index fd4bed2df6..a20e8393ce 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -198,7 +198,7 @@ DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
if (in_->Skip('.')) return DateToken::Symbol('.');
if (in_->Skip(')')) return DateToken::Symbol(')');
if (in_->IsAsciiAlphaOrAbove()) {
- DCHECK(KeywordTable::kPrefixLength == 3);
+ DCHECK_EQ(KeywordTable::kPrefixLength, 3);
uint32_t buffer[3] = {0, 0, 0};
int length = in_->ReadWord(buffer, 3);
int index = KeywordTable::Lookup(buffer, length);
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index d096a7ec9f..b1807f27b6 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -190,7 +190,7 @@ int DateParser::ReadMilliseconds(DateToken token) {
// most significant digits.
int factor = 1;
do {
- DCHECK(factor <= 100000000); // factor won't overflow.
+ DCHECK_LE(factor, 100000000); // factor won't overflow.
factor *= 10;
length--;
} while (length > 3);
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index 81f1e952bc..26899f9114 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -1,9 +1,9 @@
set noparent
bmeurer@chromium.org
+franzih@chromium.org
jgruber@chromium.org
mvstanton@chromium.org
-ulan@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index 0afcde6252..ca00606247 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -7,9 +7,9 @@
#include "src/debug/debug.h"
#include "src/assembler-inl.h"
-#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index edf037e3ad..47280bfbc9 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -7,7 +7,6 @@
#include "src/debug/debug.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/frame-constants.h"
#include "src/frames-inl.h"
@@ -58,4 +57,6 @@ const bool LiveEdit::kFrameDropperSupported = true;
} // namespace internal
} // namespace v8
+#undef __
+
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 8b87286d29..d53a6fdc4e 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -382,11 +382,12 @@ std::unique_ptr<Coverage> Coverage::CollectPrecise(Isolate* isolate) {
DCHECK(!isolate->is_best_effort_code_coverage());
std::unique_ptr<Coverage> result =
Collect(isolate, isolate->code_coverage_mode());
- if (isolate->is_precise_binary_code_coverage() ||
- isolate->is_block_binary_code_coverage()) {
+ if (!isolate->is_collecting_type_profile() &&
+ (isolate->is_precise_binary_code_coverage() ||
+ isolate->is_block_binary_code_coverage())) {
// We do not have to hold onto feedback vectors for invocations we already
// reported. So we can reset the list.
- isolate->SetCodeCoverageList(*ArrayList::New(isolate, 0));
+ isolate->SetFeedbackVectorsForProfilingTools(*ArrayList::New(isolate, 0));
}
return result;
}
@@ -407,9 +408,11 @@ std::unique_ptr<Coverage> Coverage::Collect(
case v8::debug::Coverage::kPreciseBinary:
case v8::debug::Coverage::kPreciseCount: {
// Feedback vectors are already listed to prevent losing them to GC.
- DCHECK(isolate->factory()->code_coverage_list()->IsArrayList());
- Handle<ArrayList> list =
- Handle<ArrayList>::cast(isolate->factory()->code_coverage_list());
+ DCHECK(isolate->factory()
+ ->feedback_vectors_for_profiling_tools()
+ ->IsArrayList());
+ Handle<ArrayList> list = Handle<ArrayList>::cast(
+ isolate->factory()->feedback_vectors_for_profiling_tools());
for (int i = 0; i < list->Length(); i++) {
FeedbackVector* vector = FeedbackVector::cast(list->Get(i));
SharedFunctionInfo* shared = vector->shared_function_info();
@@ -421,7 +424,9 @@ std::unique_ptr<Coverage> Coverage::Collect(
break;
}
case v8::debug::Coverage::kBestEffort: {
- DCHECK(!isolate->factory()->code_coverage_list()->IsArrayList());
+ DCHECK(!isolate->factory()
+ ->feedback_vectors_for_profiling_tools()
+ ->IsArrayList());
DCHECK_EQ(v8::debug::Coverage::kBestEffort, collectionMode);
HeapIterator heap_iterator(isolate->heap());
while (HeapObject* current_obj = heap_iterator.next()) {
@@ -520,7 +525,10 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
// following coverage recording (without reloads) will be at function
// granularity.
isolate->debug()->RemoveAllCoverageInfos();
- isolate->SetCodeCoverageList(isolate->heap()->undefined_value());
+ if (!isolate->is_collecting_type_profile()) {
+ isolate->SetFeedbackVectorsForProfilingTools(
+ isolate->heap()->undefined_value());
+ }
break;
case debug::Coverage::kBlockBinary:
case debug::Coverage::kBlockCount:
@@ -530,29 +538,11 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
// Remove all optimized function. Optimized and inlined functions do not
// increment invocation count.
Deoptimizer::DeoptimizeAll(isolate);
- // Collect existing feedback vectors.
- std::vector<Handle<FeedbackVector>> vectors;
- {
- HeapIterator heap_iterator(isolate->heap());
- while (HeapObject* current_obj = heap_iterator.next()) {
- if (current_obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(current_obj);
- shared->set_has_reported_binary_coverage(false);
- } else if (current_obj->IsFeedbackVector()) {
- FeedbackVector* vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo* shared = vector->shared_function_info();
- if (!shared->IsSubjectToDebugging()) continue;
- vector->clear_invocation_count();
- vectors.emplace_back(vector, isolate);
- }
- }
+ if (isolate->factory()
+ ->feedback_vectors_for_profiling_tools()
+ ->IsUndefined(isolate)) {
+ isolate->InitializeVectorListFromHeap();
}
- // Add collected feedback vectors to the root list lest we lose them to
- // GC.
- Handle<ArrayList> list =
- ArrayList::New(isolate, static_cast<int>(vectors.size()));
- for (const auto& vector : vectors) list = ArrayList::Add(list, vector);
- isolate->SetCodeCoverageList(*list);
break;
}
}
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 3c89809356..b6e3f14ed1 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -32,10 +32,10 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
// Enter the top context from before the debugger was invoked.
SaveContext save(isolate);
SaveContext* top = &save;
- while (top != NULL && IsDebugContext(isolate, *top->context())) {
+ while (top != nullptr && IsDebugContext(isolate, *top->context())) {
top = top->prev();
}
- if (top != NULL) isolate->set_context(*top->context());
+ if (top != nullptr) isolate->set_context(*top->context());
// Get the native context now set to the top context from before the
// debugger was invoked.
@@ -92,17 +92,18 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
Handle<JSFunction> eval_fun;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, eval_fun,
- Compiler::GetFunctionFromEval(source, outer_info, context, SLOPPY,
- NO_PARSE_RESTRICTION, kNoSourcePosition,
- kNoSourcePosition, kNoSourcePosition),
+ Compiler::GetFunctionFromEval(source, outer_info, context,
+ LanguageMode::kSloppy, NO_PARSE_RESTRICTION,
+ kNoSourcePosition, kNoSourcePosition,
+ kNoSourcePosition),
Object);
Handle<Object> result;
{
NoSideEffectScope no_side_effect(isolate, throw_on_side_effect);
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
- Object);
+ isolate, result,
+ Execution::Call(isolate, eval_fun, receiver, 0, nullptr), Object);
}
// Skip the global proxy as it has no properties and always delegates to the
@@ -158,8 +159,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
Handle<StringSet> non_locals = it.GetNonLocals();
MaterializeReceiver(materialized, local_context, local_function,
non_locals);
- frame_inspector.MaterializeStackLocals(materialized, local_function,
- true);
+ MaterializeStackLocals(materialized, local_function, &frame_inspector);
ContextChainElement context_chain_element;
context_chain_element.scope_info = it.CurrentScopeInfo();
context_chain_element.materialized_object = materialized;
@@ -241,97 +241,132 @@ void DebugEvaluate::ContextBuilder::MaterializeReceiver(
JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
}
+void DebugEvaluate::ContextBuilder::MaterializeStackLocals(
+ Handle<JSObject> target, Handle<JSFunction> function,
+ FrameInspector* frame_inspector) {
+ bool materialize_arguments_object = true;
+
+ // Do not materialize the arguments object for eval or top-level code.
+ if (function->shared()->is_toplevel()) materialize_arguments_object = false;
+
+ // First materialize stack locals (modulo arguments object).
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+ frame_inspector->MaterializeStackLocals(target, scope_info,
+ materialize_arguments_object);
+
+ // Then materialize the arguments object.
+ if (materialize_arguments_object) {
+ // Skip if "arguments" is already taken and wasn't optimized out (which
+ // causes {MaterializeStackLocals} above to skip the local variable).
+ Handle<String> arguments_str = isolate_->factory()->arguments_string();
+ Maybe<bool> maybe = JSReceiver::HasOwnProperty(target, arguments_str);
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) return;
+
+ // FunctionGetArguments can't throw an exception.
+ Handle<JSObject> arguments =
+ Accessors::FunctionGetArguments(frame_, inlined_jsframe_index_);
+ JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
+ NONE)
+ .Check();
+ }
+}
+
namespace {
bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
// Use macro to include both inlined and non-inlined version of an intrinsic.
-#define INTRINSIC_WHITELIST(V) \
- /* Conversions */ \
- V(ToInteger) \
- V(ToObject) \
- V(ToString) \
- V(ToLength) \
- V(ToNumber) \
- V(NumberToString) \
- /* Type checks */ \
- V(IsJSReceiver) \
- V(IsSmi) \
- V(IsArray) \
- V(IsFunction) \
- V(IsDate) \
- V(IsJSProxy) \
- V(IsJSMap) \
- V(IsJSSet) \
- V(IsJSWeakMap) \
- V(IsJSWeakSet) \
- V(IsRegExp) \
- V(IsTypedArray) \
- V(ClassOf) \
- /* Loads */ \
- V(LoadLookupSlotForCall) \
- /* Arrays */ \
- V(ArraySpeciesConstructor) \
- V(NormalizeElements) \
- V(GetArrayKeys) \
- V(HasComplexElements) \
- V(EstimateNumberOfElements) \
- /* Errors */ \
- V(ReThrow) \
- V(ThrowReferenceError) \
- V(ThrowSymbolIteratorInvalid) \
- V(ThrowIteratorResultNotAnObject) \
- V(NewTypeError) \
- V(ThrowInvalidStringLength) \
- /* Strings */ \
- V(StringIndexOf) \
- V(StringIncludes) \
- V(StringReplaceOneCharWithString) \
- V(StringToNumber) \
- V(StringTrim) \
- V(SubString) \
- V(RegExpInternalReplace) \
- /* BigInts */ \
- V(BigIntEqual) \
- V(BigIntToBoolean) \
- /* Literals */ \
- V(CreateArrayLiteral) \
- V(CreateObjectLiteral) \
- V(CreateRegExpLiteral) \
- /* Collections */ \
- V(GenericHash) \
- /* Called from builtins */ \
- V(StringAdd) \
- V(StringParseFloat) \
- V(StringParseInt) \
- V(StringCharCodeAt) \
- V(StringIndexOfUnchecked) \
- V(StringEqual) \
- V(RegExpInitializeAndCompile) \
- V(SymbolDescriptiveString) \
- V(GenerateRandomNumbers) \
- V(GlobalPrint) \
- V(AllocateInNewSpace) \
- V(AllocateSeqOneByteString) \
- V(AllocateSeqTwoByteString) \
- V(ObjectCreate) \
- V(ObjectHasOwnProperty) \
- V(ArrayIndexOf) \
- V(ArrayIncludes_Slow) \
- V(ArrayIsArray) \
- V(ThrowTypeError) \
- V(ThrowCalledOnNullOrUndefined) \
- V(ThrowIncompatibleMethodReceiver) \
- V(ThrowInvalidHint) \
- V(ThrowNotDateError) \
- V(ThrowRangeError) \
- V(ToName) \
- V(GetOwnPropertyDescriptor) \
- /* Misc. */ \
- V(Call) \
- V(MaxSmi) \
- V(NewObject) \
- V(FinalizeInstanceSize) \
- V(HasInPrototypeChain) \
+#define INTRINSIC_WHITELIST(V) \
+ /* Conversions */ \
+ V(ToInteger) \
+ V(ToObject) \
+ V(ToString) \
+ V(ToLength) \
+ V(ToNumber) \
+ V(NumberToString) \
+ /* Type checks */ \
+ V(IsJSReceiver) \
+ V(IsSmi) \
+ V(IsArray) \
+ V(IsFunction) \
+ V(IsDate) \
+ V(IsJSProxy) \
+ V(IsJSMap) \
+ V(IsJSSet) \
+ V(IsJSWeakMap) \
+ V(IsJSWeakSet) \
+ V(IsRegExp) \
+ V(IsTypedArray) \
+ V(ClassOf) \
+ /* Loads */ \
+ V(LoadLookupSlotForCall) \
+ /* Arrays */ \
+ V(ArraySpeciesConstructor) \
+ V(NormalizeElements) \
+ V(GetArrayKeys) \
+ V(TrySliceSimpleNonFastElements) \
+ V(HasComplexElements) \
+ V(EstimateNumberOfElements) \
+ /* Errors */ \
+ V(ReThrow) \
+ V(ThrowReferenceError) \
+ V(ThrowSymbolIteratorInvalid) \
+ V(ThrowIteratorResultNotAnObject) \
+ V(NewTypeError) \
+ V(ThrowInvalidStringLength) \
+ /* Strings */ \
+ V(StringIndexOf) \
+ V(StringIncludes) \
+ V(StringReplaceOneCharWithString) \
+ V(StringToNumber) \
+ V(StringTrim) \
+ V(SubString) \
+ V(RegExpInternalReplace) \
+ /* BigInts */ \
+ V(BigIntEqualToBigInt) \
+ V(BigIntToBoolean) \
+ V(BigIntToNumber) \
+ /* Literals */ \
+ V(CreateArrayLiteral) \
+ V(CreateObjectLiteral) \
+ V(CreateRegExpLiteral) \
+ /* Collections */ \
+ V(GenericHash) \
+ /* Called from builtins */ \
+ V(StringAdd) \
+ V(StringParseFloat) \
+ V(StringParseInt) \
+ V(StringCharCodeAt) \
+ V(StringIndexOfUnchecked) \
+ V(StringEqual) \
+ V(RegExpInitializeAndCompile) \
+ V(SymbolDescriptiveString) \
+ V(GenerateRandomNumbers) \
+ V(GlobalPrint) \
+ V(AllocateInNewSpace) \
+ V(AllocateInTargetSpace) \
+ V(AllocateSeqOneByteString) \
+ V(AllocateSeqTwoByteString) \
+ V(ObjectCreate) \
+ V(ObjectHasOwnProperty) \
+ V(ArrayIndexOf) \
+ V(ArrayIncludes_Slow) \
+ V(ArrayIsArray) \
+ V(ThrowTypeError) \
+ V(ThrowCalledOnNullOrUndefined) \
+ V(ThrowIncompatibleMethodReceiver) \
+ V(ThrowInvalidHint) \
+ V(ThrowNotDateError) \
+ V(ThrowRangeError) \
+ V(ToName) \
+ V(GetOwnPropertyDescriptor) \
+ /* Misc. */ \
+ V(Call) \
+ V(MaxSmi) \
+ V(NewObject) \
+ V(CompleteInobjectSlackTrackingForMap) \
+ V(HasInPrototypeChain) \
V(StringMaxLength)
#define CASE(Name) \
@@ -378,6 +413,8 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kDivSmi:
case Bytecode::kMod:
case Bytecode::kModSmi:
+ case Bytecode::kExp:
+ case Bytecode::kExpSmi:
case Bytecode::kNegate:
case Bytecode::kBitwiseAnd:
case Bytecode::kBitwiseAndSmi:
@@ -608,6 +645,8 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringPrototypeItalics:
case Builtins::kStringPrototypeLastIndexOf:
case Builtins::kStringPrototypeLink:
+ case Builtins::kStringPrototypePadEnd:
+ case Builtins::kStringPrototypePadStart:
case Builtins::kStringPrototypeRepeat:
case Builtins::kStringPrototypeSlice:
case Builtins::kStringPrototypeSmall:
@@ -627,6 +666,7 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringPrototypeTrimRight:
case Builtins::kStringPrototypeValueOf:
case Builtins::kStringToNumber:
+ case Builtins::kSubString:
// Symbol builtins.
case Builtins::kSymbolConstructor:
case Builtins::kSymbolKeyFor:
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 6327895d57..fbe747d024 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+class FrameInspector;
+
class DebugEvaluate : public AllStatic {
public:
static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source);
@@ -73,6 +75,10 @@ class DebugEvaluate : public AllStatic {
Handle<JSFunction> local_function,
Handle<StringSet> non_locals);
+ void MaterializeStackLocals(Handle<JSObject> target,
+ Handle<JSFunction> function,
+ FrameInspector* frame_inspector);
+
Handle<SharedFunctionInfo> outer_info_;
Handle<Context> evaluation_context_;
std::vector<ContextChainElement> context_chain_;
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index b04f8fc1bc..70f3670ee4 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -39,13 +39,6 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
// Calculate the deoptimized frame.
if (is_optimized_) {
DCHECK_NOT_NULL(js_frame);
- // TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
- if (js_frame->LookupCode()->is_turbofanned() &&
- !js_frame->function()->shared()->HasBytecodeArray()) {
- is_optimized_ = false;
- return;
- }
-
deoptimized_frame_.reset(Deoptimizer::DebuggerInspectableFrame(
js_frame, inlined_frame_index, isolate));
} else if (frame_->is_wasm_interpreter_entry()) {
@@ -75,12 +68,6 @@ Handle<Object> FrameInspector::GetParameter(int index) {
}
Handle<Object> FrameInspector::GetExpression(int index) {
- // TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
- if (frame_->is_java_script() &&
- javascript_frame()->LookupCode()->is_turbofanned() &&
- !javascript_frame()->function()->shared()->HasBytecodeArray()) {
- return isolate_->factory()->undefined_value();
- }
return is_optimized_ ? deoptimized_frame_->GetExpression(index)
: handle(frame_->GetExpression(index), isolate_);
}
@@ -151,33 +138,6 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
}
}
-void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
- Handle<JSFunction> function,
- bool materialize_arguments_object) {
- // Do not materialize the arguments object for eval or top-level code.
- if (function->shared()->is_toplevel()) materialize_arguments_object = false;
-
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
- MaterializeStackLocals(target, scope_info, materialize_arguments_object);
-
- // Third materialize the arguments object.
- if (materialize_arguments_object) {
- // Skip if "arguments" is already taken and wasn't optimized out (which
- // causes {MaterializeStackLocals} above to skip the local variable).
- Handle<String> arguments_str = isolate_->factory()->arguments_string();
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(target, arguments_str);
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) return;
-
- // FunctionGetArguments can't throw an exception.
- Handle<JSObject> arguments = Accessors::FunctionGetArguments(function);
- JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
- NONE)
- .Check();
- }
-}
-
void FrameInspector::UpdateStackLocalsFromMaterializedObject(
Handle<JSObject> target, Handle<ScopeInfo> scope_info) {
@@ -224,10 +184,10 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
SaveContext* DebugFrameHelper::FindSavedContextForFrame(Isolate* isolate,
StandardFrame* frame) {
SaveContext* save = isolate->save_context();
- while (save != NULL && !save->IsBelowFrame(frame)) {
+ while (save != nullptr && !save->IsBelowFrame(frame)) {
save = save->prev();
}
- DCHECK(save != NULL);
+ DCHECK(save != nullptr);
return save;
}
@@ -236,7 +196,6 @@ int DebugFrameHelper::FindIndexedNonNativeFrame(StackTraceFrameIterator* it,
int count = -1;
for (; !it->done(); it->Advance()) {
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
it->frame()->Summarize(&frames);
for (size_t i = frames.size(); i != 0; i--) {
// Omit functions from native and extension scripts.
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 96593b858d..9b669ea096 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -52,10 +52,6 @@ class FrameInspector {
Handle<ScopeInfo> scope_info,
bool materialize_arguments_object = false);
- void MaterializeStackLocals(Handle<JSObject> target,
- Handle<JSFunction> function,
- bool materialize_arguments_object = false);
-
void UpdateStackLocalsFromMaterializedObject(Handle<JSObject> object,
Handle<ScopeInfo> scope_info);
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 7063f1efe7..c8c1e76ef2 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -178,7 +178,7 @@ class DebugDelegate {
public:
virtual ~DebugDelegate() {}
virtual void PromiseEventOccurred(debug::PromiseDebugActionType type, int id,
- int parent_id, bool created_by_user) {}
+ bool is_blackboxed) {}
virtual void ScriptCompiled(v8::Local<Script> script, bool is_live_edited,
bool has_compile_error) {}
// |break_points_hit| contains installed by JS debug API breakpoint objects.
@@ -456,7 +456,7 @@ class StackTraceIterator {
virtual void Advance() = 0;
virtual int GetContextId() const = 0;
- virtual v8::Local<v8::Value> GetReceiver() const = 0;
+ virtual v8::MaybeLocal<v8::Value> GetReceiver() const = 0;
virtual v8::Local<v8::Value> GetReturnValue() const = 0;
virtual v8::Local<v8::String> GetFunctionName() const = 0;
virtual v8::Local<v8::debug::Script> GetScript() const = 0;
@@ -485,6 +485,21 @@ void QueryObjects(v8::Local<v8::Context> context,
void GlobalLexicalScopeNames(v8::Local<v8::Context> context,
v8::PersistentValueVector<v8::String>* names);
+void SetReturnValue(v8::Isolate* isolate, v8::Local<v8::Value> value);
+
+enum class NativeAccessorType {
+ None = 0,
+ HasGetter = 1 << 0,
+ HasSetter = 1 << 1,
+ IsBuiltin = 1 << 2
+};
+
+int GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ v8::Local<v8::Name> name);
+
+int64_t GetNextRandomInt64(v8::Isolate* isolate);
+
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 0fcb20a645..77654de635 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -105,7 +105,7 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
// Retrieve it from shared function info.
info->set_language_mode(shared_info->language_mode());
} else if (scope_info->scope_type() == MODULE_SCOPE) {
- info->set_module();
+ DCHECK(info->is_module());
} else {
DCHECK(scope_info->scope_type() == SCRIPT_SCOPE);
}
@@ -253,7 +253,7 @@ void ScopeIterator::Next() {
} else {
do {
if (LastNestedScopeChain().scope_info->HasContext()) {
- DCHECK(context_->previous() != NULL);
+ DCHECK(context_->previous() != nullptr);
context_ = Handle<Context>(context_->previous(), isolate_);
}
nested_scope_chain_.pop_back();
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 867436d1de..7b0c1690c7 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -29,7 +29,6 @@ DebugStackTraceIterator::DebugStackTraceIterator(Isolate* isolate, int index)
is_top_frame_(true) {
if (iterator_.done()) return;
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
iterator_.frame()->Summarize(&frames);
inlined_frame_index_ = static_cast<int>(frames.size());
Advance();
@@ -61,7 +60,6 @@ void DebugStackTraceIterator::Advance() {
iterator_.Advance();
if (iterator_.done()) break;
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
iterator_.frame()->Summarize(&frames);
inlined_frame_index_ = static_cast<int>(frames.size());
}
@@ -78,13 +76,42 @@ int DebugStackTraceIterator::GetContextId() const {
return 0;
}
-v8::Local<v8::Value> DebugStackTraceIterator::GetReceiver() const {
+v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
DCHECK(!Done());
+ if (frame_inspector_->IsJavaScript() &&
+ frame_inspector_->GetFunction()->shared()->kind() == kArrowFunction) {
+ // FrameInspector is not able to get receiver for arrow function.
+ // So let's try to fetch it using same logic as is used to retrieve 'this'
+ // during DebugEvaluate::Local.
+ Handle<JSFunction> function = frame_inspector_->GetFunction();
+ Handle<Context> context(function->context());
+ // Arrow function defined in top level function without references to
+ // variables may have NativeContext as context.
+ if (!context->IsFunctionContext()) return v8::MaybeLocal<v8::Value>();
+ ScopeIterator scope_iterator(isolate_, frame_inspector_.get(),
+ ScopeIterator::COLLECT_NON_LOCALS);
+ // We lookup this variable in function context only when it is used in arrow
+ // function otherwise V8 can optimize it out.
+ if (!scope_iterator.GetNonLocals()->Has(isolate_->factory()->this_string()))
+ return v8::MaybeLocal<v8::Value>();
+
+ Handle<ScopeInfo> scope_info(context->scope_info());
+ VariableMode mode;
+ InitializationFlag flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ int slot_index = ScopeInfo::ContextSlotIndex(
+ scope_info, isolate_->factory()->this_string(), &mode, &flag,
+ &maybe_assigned_flag);
+ if (slot_index < 0) return v8::MaybeLocal<v8::Value>();
+ Handle<Object> value = handle(context->get(slot_index), isolate_);
+ if (value->IsTheHole(isolate_)) return v8::MaybeLocal<v8::Value>();
+ return Utils::ToLocal(value);
+ }
Handle<Object> value = frame_inspector_->GetReceiver();
if (value.is_null() || (value->IsSmi() || !value->IsTheHole(isolate_))) {
return Utils::ToLocal(value);
}
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate_));
+ return v8::MaybeLocal<v8::Value>();
}
v8::Local<v8::Value> DebugStackTraceIterator::GetReturnValue() const {
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.h b/deps/v8/src/debug/debug-stack-trace-iterator.h
index 174e400e1f..4da552ffcb 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.h
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.h
@@ -21,7 +21,7 @@ class DebugStackTraceIterator final : public debug::StackTraceIterator {
void Advance() override;
int GetContextId() const override;
- v8::Local<v8::Value> GetReceiver() const override;
+ v8::MaybeLocal<v8::Value> GetReceiver() const override;
v8::Local<v8::Value> GetReturnValue() const override;
v8::Local<v8::String> GetFunctionName() const override;
v8::Local<v8::debug::Script> GetScript() const override;
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index ef4f5ba3d7..c89849e350 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -15,19 +15,12 @@ namespace internal {
std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
std::unique_ptr<TypeProfile> result(new TypeProfile());
- // Collect existing feedback vectors.
- std::vector<Handle<FeedbackVector>> feedback_vectors;
- {
- HeapIterator heap_iterator(isolate->heap());
- while (HeapObject* current_obj = heap_iterator.next()) {
- if (current_obj->IsFeedbackVector()) {
- FeedbackVector* vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo* shared = vector->shared_function_info();
- if (!shared->IsSubjectToDebugging()) continue;
- feedback_vectors.emplace_back(vector, isolate);
- }
- }
- }
+ // Feedback vectors are already listed to prevent losing them to GC.
+ DCHECK(isolate->factory()
+ ->feedback_vectors_for_profiling_tools()
+ ->IsArrayList());
+ Handle<ArrayList> list = Handle<ArrayList>::cast(
+ isolate->factory()->feedback_vectors_for_profiling_tools());
Script::Iterator scripts(isolate);
@@ -41,7 +34,10 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
TypeProfileScript type_profile_script(script_handle);
std::vector<TypeProfileEntry>* entries = &type_profile_script.entries;
- for (const auto& vector : feedback_vectors) {
+ // TODO(franzih): Sort the vectors by script first instead of iterating
+ // the list multiple times.
+ for (int i = 0; i < list->Length(); i++) {
+ FeedbackVector* vector = FeedbackVector::cast(list->Get(i));
SharedFunctionInfo* info = vector->shared_function_info();
DCHECK(info->IsSubjectToDebugging());
@@ -74,28 +70,47 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
}
void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
- isolate->set_type_profile_mode(mode);
HandleScope handle_scope(isolate);
if (mode == debug::TypeProfile::Mode::kNone) {
- // Release type profile data collected so far.
- {
- HeapIterator heap_iterator(isolate->heap());
- while (HeapObject* current_obj = heap_iterator.next()) {
- if (current_obj->IsFeedbackVector()) {
- FeedbackVector* vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo* info = vector->shared_function_info();
- if (!info->IsSubjectToDebugging() ||
- info->feedback_metadata()->is_empty() ||
- !info->feedback_metadata()->HasTypeProfileSlot())
- continue;
+ if (!isolate->factory()
+ ->feedback_vectors_for_profiling_tools()
+ ->IsUndefined(isolate)) {
+ // Release type profile data collected so far.
+
+ // Feedback vectors are already listed to prevent losing them to GC.
+ DCHECK(isolate->factory()
+ ->feedback_vectors_for_profiling_tools()
+ ->IsArrayList());
+ Handle<ArrayList> list = Handle<ArrayList>::cast(
+ isolate->factory()->feedback_vectors_for_profiling_tools());
+
+ for (int i = 0; i < list->Length(); i++) {
+ FeedbackVector* vector = FeedbackVector::cast(list->Get(i));
+ SharedFunctionInfo* info = vector->shared_function_info();
+ DCHECK(info->IsSubjectToDebugging());
+ if (info->feedback_metadata()->HasTypeProfileSlot()) {
FeedbackSlot slot = vector->GetTypeProfileSlot();
CollectTypeProfileNexus nexus(vector, slot);
nexus.Clear();
}
}
+
+ // Delete the feedback vectors from the list if they're not used by code
+ // coverage.
+ if (isolate->is_best_effort_code_coverage()) {
+ isolate->SetFeedbackVectorsForProfilingTools(
+ isolate->heap()->undefined_value());
+ }
+ }
+ } else {
+ DCHECK_EQ(debug::TypeProfile::Mode::kCollect, mode);
+ if (isolate->factory()->feedback_vectors_for_profiling_tools()->IsUndefined(
+ isolate)) {
+ isolate->InitializeVectorListFromHeap();
}
}
+ isolate->set_type_profile_mode(mode);
}
} // namespace internal
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 1d50226e72..78cb102fa8 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -11,7 +11,6 @@
#include "src/assembler-inl.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
@@ -46,7 +45,7 @@ Debug::Debug(Isolate* isolate)
break_on_exception_(false),
break_on_uncaught_exception_(false),
side_effect_check_failed_(false),
- debug_info_list_(NULL),
+ debug_info_list_(nullptr),
feature_tracker_(isolate),
isolate_(isolate) {
ThreadInit();
@@ -282,7 +281,7 @@ void Debug::Iterate(RootVisitor* v) {
v->VisitRootPointer(Root::kDebug, &thread_local_.ignore_step_into_function_);
}
-DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
+DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info) : next_(nullptr) {
// Globalize the request debug info object and make it weak.
GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
debug_info_ = global_handles->Create(debug_info).location();
@@ -603,8 +602,13 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- // Find the break point and change it.
- *source_position = FindBreakablePosition(debug_info, *source_position);
+ // Find breakable position returns first breakable position after
+ // *source_position, it can return 0 if no break location is found after
+ // *source_position.
+ int breakable_position = FindBreakablePosition(debug_info, *source_position);
+ if (breakable_position < *source_position) return false;
+ *source_position = breakable_position;
+
DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
// At least one active break point now.
DCHECK_LT(0, debug_info->GetBreakPointCount());
@@ -653,7 +657,7 @@ void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
HandleScope scope(isolate_);
- for (DebugInfoListNode* node = debug_info_list_; node != NULL;
+ for (DebugInfoListNode* node = debug_info_list_; node != nullptr;
node = node->next()) {
Handle<Object> result =
DebugInfo::FindBreakPointInfo(node->debug_info(), break_point_object);
@@ -1020,7 +1024,7 @@ void Debug::ClearOneShot() {
// The current implementation just runs through all the breakpoints. When the
// last break point for a function is removed that function is automatically
// removed from the list.
- for (DebugInfoListNode* node = debug_info_list_; node != NULL;
+ for (DebugInfoListNode* node = debug_info_list_; node != nullptr;
node = node->next()) {
Handle<DebugInfo> debug_info = node->debug_info();
ClearBreakPoints(debug_info);
@@ -1196,12 +1200,12 @@ void Debug::RecordGenerator(Handle<JSGeneratorObject> generator_object) {
class SharedFunctionInfoFinder {
public:
explicit SharedFunctionInfoFinder(int target_position)
- : current_candidate_(NULL),
- current_candidate_closure_(NULL),
+ : current_candidate_(nullptr),
+ current_candidate_closure_(nullptr),
current_start_position_(kNoSourcePosition),
target_position_(target_position) {}
- void NewCandidate(SharedFunctionInfo* shared, JSFunction* closure = NULL) {
+ void NewCandidate(SharedFunctionInfo* shared, JSFunction* closure = nullptr) {
if (!shared->IsSubjectToDebugging()) return;
int start_position = shared->function_token_position();
if (start_position == kNoSourcePosition) {
@@ -1211,11 +1215,11 @@ class SharedFunctionInfoFinder {
if (start_position > target_position_) return;
if (target_position_ > shared->end_position()) return;
- if (current_candidate_ != NULL) {
+ if (current_candidate_ != nullptr) {
if (current_start_position_ == start_position &&
shared->end_position() == current_candidate_->end_position()) {
// If we already have a matching closure, do not throw it away.
- if (current_candidate_closure_ != NULL && closure == NULL) return;
+ if (current_candidate_closure_ != nullptr && closure == nullptr) return;
// If a top-level function contains only one function
// declaration the source for the top-level and the function
// is the same. In that case prefer the non top-level function.
@@ -1267,7 +1271,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
finder.NewCandidate(info);
}
shared = finder.Result();
- if (shared == NULL) break;
+ if (shared == nullptr) break;
// We found it if it's already compiled.
if (shared->is_compiled()) {
Handle<SharedFunctionInfo> shared_handle(shared);
@@ -1403,7 +1407,7 @@ void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
DebugInfoListNode* node) {
DCHECK(node->debug_info()->IsEmpty());
- // Unlink from list. If prev is NULL we are looking at the first element.
+ // Unlink from list. If prev is nullptr we are looking at the first element.
if (prev == nullptr) {
debug_info_list_ = node->next();
} else {
@@ -1560,7 +1564,7 @@ void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
namespace {
v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
- // Isolate::context() may have been NULL when "script collected" event
+ // Isolate::context() may have been nullptr when "script collected" event
// occurred.
if (context.is_null()) return v8::Local<v8::Context>();
Handle<Context> native_context(context->native_context());
@@ -1605,7 +1609,7 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
Handle<JSObject> jspromise = Handle<JSObject>::cast(promise);
// Mark the promise as already having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
- JSObject::SetProperty(jspromise, key, key, STRICT).Assert();
+ JSObject::SetProperty(jspromise, key, key, LanguageMode::kStrict).Assert();
// Check whether the promise reject is considered an uncaught exception.
uncaught = !isolate_->PromiseHasUserDefinedRejectHandler(jspromise);
}
@@ -1742,28 +1746,55 @@ int GetReferenceAsyncTaskId(Isolate* isolate, Handle<JSPromise> promise) {
}
} // namespace
-void Debug::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+void Debug::RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
Handle<Object> parent) {
+ if (hook_type == PromiseHookType::kResolve) return;
+ if (in_debug_scope() || ignore_events()) return;
if (!debug_delegate_) return;
+ PostponeInterruptsScope no_interrupts(isolate_);
+
int id = GetReferenceAsyncTaskId(isolate_, promise);
- switch (type) {
- case PromiseHookType::kInit:
- OnAsyncTaskEvent(debug::kDebugPromiseCreated, id,
- parent->IsJSPromise()
- ? GetReferenceAsyncTaskId(
- isolate_, Handle<JSPromise>::cast(parent))
- : 0);
- return;
- case PromiseHookType::kResolve:
- // We can't use this hook because it's called before promise object will
- // get resolved status.
- return;
- case PromiseHookType::kBefore:
- OnAsyncTaskEvent(debug::kDebugWillHandle, id, 0);
- return;
- case PromiseHookType::kAfter:
- OnAsyncTaskEvent(debug::kDebugDidHandle, id, 0);
- return;
+ if (hook_type == PromiseHookType::kBefore) {
+ debug_delegate_->PromiseEventOccurred(debug::kDebugWillHandle, id, false);
+ } else if (hook_type == PromiseHookType::kAfter) {
+ debug_delegate_->PromiseEventOccurred(debug::kDebugDidHandle, id, false);
+ } else {
+ DCHECK(hook_type == PromiseHookType::kInit);
+ debug::PromiseDebugActionType type = debug::kDebugPromiseThen;
+ bool last_frame_was_promise_builtin = false;
+ JavaScriptFrameIterator it(isolate_);
+ while (!it.done()) {
+ std::vector<Handle<SharedFunctionInfo>> infos;
+ it.frame()->GetFunctions(&infos);
+ for (size_t i = 1; i <= infos.size(); ++i) {
+ Handle<SharedFunctionInfo> info = infos[infos.size() - i];
+ if (info->IsUserJavaScript()) {
+ // We should not report PromiseThen and PromiseCatch which is called
+ // indirectly, e.g. Promise.all calls Promise.then internally.
+ if (type == debug::kDebugAsyncFunctionPromiseCreated ||
+ last_frame_was_promise_builtin) {
+ debug_delegate_->PromiseEventOccurred(type, id, IsBlackboxed(info));
+ }
+ return;
+ }
+ last_frame_was_promise_builtin = false;
+ Handle<Code> code(info->code());
+ if (*code == *BUILTIN_CODE(isolate_, AsyncFunctionPromiseCreate)) {
+ type = debug::kDebugAsyncFunctionPromiseCreated;
+ last_frame_was_promise_builtin = true;
+ } else if (*code == *BUILTIN_CODE(isolate_, PromiseThen)) {
+ type = debug::kDebugPromiseThen;
+ last_frame_was_promise_builtin = true;
+ } else if (*code == *BUILTIN_CODE(isolate_, PromiseCatch)) {
+ type = debug::kDebugPromiseCatch;
+ last_frame_was_promise_builtin = true;
+ } else if (*code == *BUILTIN_CODE(isolate_, PromiseFinally)) {
+ type = debug::kDebugPromiseFinally;
+ last_frame_was_promise_builtin = true;
+ }
+ }
+ it.Advance();
+ }
}
}
@@ -1776,7 +1807,8 @@ int Debug::NextAsyncTaskId(Handle<JSObject> promise) {
}
Handle<Smi> async_id =
handle(Smi::FromInt(++thread_local_.async_task_count_), isolate_);
- Object::SetProperty(&it, async_id, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED)
+ Object::SetProperty(&it, async_id, LanguageMode::kSloppy,
+ Object::MAY_BE_STORE_FROM_KEYED)
.ToChecked();
return async_id->value();
}
@@ -1852,25 +1884,6 @@ bool Debug::SetScriptSource(Handle<Script> script, Handle<String> source,
return true;
}
-void Debug::OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id,
- int parent_id) {
- if (in_debug_scope() || ignore_events()) return;
- if (!debug_delegate_) return;
- SuppressDebug while_processing(this);
- PostponeInterruptsScope no_interrupts(isolate_);
- DisableBreak no_recursive_break(this);
- bool created_by_user = false;
- if (type == debug::kDebugPromiseCreated) {
- JavaScriptFrameIterator it(isolate_);
- // We need to skip top frame which contains instrumentation.
- it.Advance();
- created_by_user =
- !it.done() &&
- !IsFrameBlackboxed(it.frame());
- }
- debug_delegate_->PromiseEventOccurred(type, id, parent_id, created_by_user);
-}
-
void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
// Attach the correct debug id to the script. The debug id is used by the
// inspector to filter scripts by native context.
@@ -2158,8 +2171,7 @@ bool Debug::PerformSideEffectCheckForCallback(Address function) {
}
void LegacyDebugDelegate::PromiseEventOccurred(
- v8::debug::PromiseDebugActionType type, int id, int parent_id,
- bool created_by_user) {
+ v8::debug::PromiseDebugActionType type, int id, bool is_blackboxed) {
DebugScope debug_scope(isolate_->debug());
if (debug_scope.failed()) return;
HandleScope scope(isolate_);
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 154c381729..aec66f2f35 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -207,8 +207,6 @@ class Debug {
void OnPromiseReject(Handle<Object> promise, Handle<Object> value);
void OnCompileError(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- void OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id,
- int parent_id);
MUST_USE_RESULT MaybeHandle<Object> Call(Handle<Object> fun,
Handle<Object> data);
@@ -260,7 +258,7 @@ class Debug {
void RecordGenerator(Handle<JSGeneratorObject> generator_object);
- void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+ void RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
Handle<Object> parent);
int NextAsyncTaskId(Handle<JSObject> promise);
@@ -574,7 +572,7 @@ class LegacyDebugDelegate : public v8::debug::DebugDelegate {
public:
explicit LegacyDebugDelegate(Isolate* isolate) : isolate_(isolate) {}
void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
- int parent_id, bool created_by_user) override;
+ bool is_blackboxed) override;
void ScriptCompiled(v8::Local<v8::debug::Script> script, bool is_live_edited,
bool has_compile_error) override;
void BreakProgramRequested(v8::Local<v8::Context> paused_context,
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index deb30320ba..d8dcb0e5d5 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -6,9 +6,9 @@
#include "src/debug/debug.h"
-#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 2173174a73..4dd8352695 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -70,10 +70,10 @@ struct WasmDisassembly {
};
enum PromiseDebugActionType {
- kDebugPromiseCreated,
- kDebugEnqueueAsyncFunction,
- kDebugEnqueuePromiseResolve,
- kDebugEnqueuePromiseReject,
+ kDebugAsyncFunctionPromiseCreated,
+ kDebugPromiseThen,
+ kDebugPromiseCatch,
+ kDebugPromiseFinally,
kDebugWillHandle,
kDebugDidHandle,
};
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index e20e56cd75..9180608b21 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -29,7 +29,8 @@ void SetElementSloppy(Handle<JSObject> object,
// Ignore return value from SetElement. It can only be a failure if there
// are element setters causing exceptions and the debugger context has none
// of these.
- Object::SetElement(object->GetIsolate(), object, index, value, SLOPPY)
+ Object::SetElement(object->GetIsolate(), object, index, value,
+ LanguageMode::kSloppy)
.Assert();
}
@@ -703,11 +704,14 @@ MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
Handle<JSObject> script_obj =
Script::GetWrapper(message_location.script());
- Object::SetProperty(rethrow_exception, start_pos_key, start_pos, SLOPPY)
+ Object::SetProperty(rethrow_exception, start_pos_key, start_pos,
+ LanguageMode::kSloppy)
.Assert();
- Object::SetProperty(rethrow_exception, end_pos_key, end_pos, SLOPPY)
+ Object::SetProperty(rethrow_exception, end_pos_key, end_pos,
+ LanguageMode::kSloppy)
.Assert();
- Object::SetProperty(rethrow_exception, script_obj_key, script_obj, SLOPPY)
+ Object::SetProperty(rethrow_exception, script_obj_key, script_obj,
+ LanguageMode::kSloppy)
.Assert();
}
}
@@ -754,8 +758,8 @@ class FeedbackVectorFixer {
static void IterateJSFunctions(Handle<SharedFunctionInfo> shared_info,
Visitor* visitor) {
HeapIterator iterator(shared_info->GetHeap());
- for (HeapObject* obj = iterator.next(); obj != NULL;
- obj = iterator.next()) {
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
if (function->shared() == *shared_info) {
@@ -941,13 +945,12 @@ static int TranslatePosition(int original_position,
return original_position + position_diff;
}
-void TranslateSourcePositionTable(Handle<AbstractCode> code,
+void TranslateSourcePositionTable(Handle<BytecodeArray> code,
Handle<JSArray> position_change_array) {
Isolate* isolate = code->GetIsolate();
- Zone zone(isolate->allocator(), ZONE_NAME);
- SourcePositionTableBuilder builder(&zone);
+ SourcePositionTableBuilder builder;
- Handle<ByteArray> source_position_table(code->source_position_table());
+ Handle<ByteArray> source_position_table(code->SourcePositionTable());
for (SourcePositionTableIterator iterator(*source_position_table);
!iterator.done(); iterator.Advance()) {
SourcePosition position = iterator.source_position();
@@ -958,8 +961,11 @@ void TranslateSourcePositionTable(Handle<AbstractCode> code,
}
Handle<ByteArray> new_source_position_table(
- builder.ToSourcePositionTable(isolate, code));
+ builder.ToSourcePositionTable(isolate));
code->set_source_position_table(*new_source_position_table);
+ LOG_CODE_EVENT(isolate,
+ CodeLinePosInfoRecordEvent(code->GetFirstBytecodeAddress(),
+ *new_source_position_table));
}
} // namespace
@@ -981,9 +987,8 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
info->set_function_token_position(new_function_token_pos);
if (info->HasBytecodeArray()) {
- TranslateSourcePositionTable(
- Handle<AbstractCode>(AbstractCode::cast(info->bytecode_array())),
- position_change_array);
+ TranslateSourcePositionTable(handle(info->bytecode_array()),
+ position_change_array);
}
if (info->HasBreakInfo()) {
// Existing break points will be re-applied. Reset the debug info here.
@@ -1108,9 +1113,7 @@ class MultipleFunctionTarget {
LiveEdit::FunctionPatchabilityStatus status) {
return CheckActivation(old_shared_array_, result_, frame, status);
}
- const char* GetNotFoundMessage() const {
- return NULL;
- }
+ const char* GetNotFoundMessage() const { return nullptr; }
bool FrameUsesNewTarget(StackFrame* frame) {
if (!frame->is_java_script()) return false;
JavaScriptFrame* jsframe = JavaScriptFrame::cast(frame);
@@ -1238,25 +1241,25 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
if (frame->is_java_script()) {
if (target.MatchActivation(frame, non_droppable_reason)) {
// Fail.
- return NULL;
+ return nullptr;
}
if (non_droppable_reason ==
LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR &&
!target_frame_found) {
// Fail.
target.set_status(non_droppable_reason);
- return NULL;
+ return nullptr;
}
}
}
}
// We cannot restart a frame that uses new.target.
- if (target.FrameUsesNewTarget(frames[bottom_js_frame_index])) return NULL;
+ if (target.FrameUsesNewTarget(frames[bottom_js_frame_index])) return nullptr;
if (!do_drop) {
// We are in check-only mode.
- return NULL;
+ return nullptr;
}
if (!target_frame_found) {
@@ -1269,7 +1272,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
}
debug->ScheduleFrameRestart(frames[bottom_js_frame_index]);
- return NULL;
+ return nullptr;
}
@@ -1299,7 +1302,7 @@ static const char* DropActivationsInActiveThread(
SetElementSloppy(result, i, replaced);
}
}
- return NULL;
+ return nullptr;
}
@@ -1315,8 +1318,8 @@ bool LiveEdit::FindActiveGenerators(Handle<FixedArray> shared_info_array,
Heap* heap = isolate->heap();
HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
- HeapObject* obj = NULL;
- while ((obj = iterator.next()) != NULL) {
+ HeapObject* obj = nullptr;
+ while ((obj = iterator.next()) != nullptr) {
if (!obj->IsJSGeneratorObject()) continue;
JSGeneratorObject* gen = JSGeneratorObject::cast(obj);
@@ -1408,7 +1411,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// Try to drop activations from the current stack.
const char* error_message = DropActivationsInActiveThread(
old_shared_array, new_shared_array, result, do_drop);
- if (error_message != NULL) {
+ if (error_message != nullptr) {
// Add error message as an array extra element.
Handle<String> str =
isolate->factory()->NewStringFromAsciiChecked(error_message);
@@ -1456,15 +1459,14 @@ class SingleFrameTarget {
LiveEdit::FunctionPatchabilityStatus m_saved_status;
};
-
// Finds a drops required frame and all frames above.
-// Returns error message or NULL.
+// Returns error message or nullptr.
const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
SingleFrameTarget target(frame);
const char* result =
DropActivationsInActiveThreadImpl(frame->isolate(), target, true);
- if (result != NULL) {
+ if (result != nullptr) {
return result;
}
if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE) {
@@ -1473,7 +1475,7 @@ const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR) {
return "Function is blocked under a generator activation";
}
- return NULL;
+ return nullptr;
}
Handle<JSArray> LiveEditFunctionTracker::Collect(FunctionLiteral* node,
@@ -1541,7 +1543,7 @@ Handle<Object> LiveEditFunctionTracker::SerializeFunctionScope(Scope* scope) {
// variables in the whole scope chain. Null-named slots delimit
// scopes of this chain.
Scope* current_scope = scope;
- while (current_scope != NULL) {
+ while (current_scope != nullptr) {
HandleScope handle_scope(isolate_);
for (Variable* var : *current_scope->locals()) {
if (!var->IsContextSlot()) continue;
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 873faa4c82..2335b94f10 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -123,7 +123,7 @@ class LiveEdit : AllStatic {
bool do_drop);
// Restarts the call frame and completely drops all frames above it.
- // Return error message or NULL.
+ // Return error message or nullptr.
static const char* RestartFrame(JavaScriptFrame* frame);
// A copy of this is in liveedit.js.
@@ -213,7 +213,8 @@ class JSArrayBasedStruct {
protected:
void SetField(int field_position, Handle<Object> value) {
- Object::SetElement(isolate(), array_, field_position, value, SLOPPY)
+ Object::SetElement(isolate(), array_, field_position, value,
+ LanguageMode::kSloppy)
.Assert();
}
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
index 3f8fbfc7c8..978563cab5 100644
--- a/deps/v8/src/debug/mips/OWNERS
+++ b/deps/v8/src/debug/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index f7e99625a0..9767ab0fd8 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -6,9 +6,9 @@
#include "src/debug/debug.h"
-#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
index 3f8fbfc7c8..978563cab5 100644
--- a/deps/v8/src/debug/mips64/OWNERS
+++ b/deps/v8/src/debug/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index 22f188b33b..8bfb31e3a2 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -6,9 +6,9 @@
#include "src/debug/debug.h"
-#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 534e354988..8e9a5bf3da 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -45,6 +45,45 @@ var SetValues = global.Set.prototype.values;
// - ScriptMirror
// - ScopeMirror
+macro IS_BOOLEAN(arg)
+(typeof(arg) === 'boolean')
+endmacro
+
+macro IS_DATE(arg)
+(%IsDate(arg))
+endmacro
+
+macro IS_ERROR(arg)
+(%_ClassOf(arg) === 'Error')
+endmacro
+
+macro IS_GENERATOR(arg)
+(%_ClassOf(arg) === 'Generator')
+endmacro
+
+macro IS_MAP(arg)
+(%_IsJSMap(arg))
+endmacro
+
+macro IS_MAP_ITERATOR(arg)
+(%_ClassOf(arg) === 'Map Iterator')
+endmacro
+
+macro IS_SCRIPT(arg)
+(%_ClassOf(arg) === 'Script')
+endmacro
+
+macro IS_SET(arg)
+(%_IsJSSet(arg))
+endmacro
+
+macro IS_SET_ITERATOR(arg)
+(%_ClassOf(arg) === 'Set Iterator')
+endmacro
+
+// Must match PropertyFilter in property-details.h
+define PROPERTY_FILTER_NONE = 0;
+
// Type names of the different mirrors.
var MirrorType = {
UNDEFINED_TYPE : 'undefined',
diff --git a/deps/v8/src/debug/ppc/OWNERS b/deps/v8/src/debug/ppc/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/debug/ppc/OWNERS
+++ b/deps/v8/src/debug/ppc/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index 7c29c582ad..047f76a1de 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -6,9 +6,9 @@
#include "src/debug/debug.h"
-#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/s390/OWNERS b/deps/v8/src/debug/s390/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/debug/s390/OWNERS
+++ b/deps/v8/src/debug/s390/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index 0806910b5e..2fdbcc8fd0 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -8,9 +8,9 @@
#include "src/debug/debug.h"
-#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 8432f43c19..337fec3515 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -7,9 +7,9 @@
#include "src/debug/debug.h"
#include "src/assembler.h"
-#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index 62bd4f68cf..ddfe637293 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -13,8 +13,10 @@ namespace internal {
#define DEOPTIMIZE_REASON_LIST(V) \
V(AccessCheck, "Access check needed") \
V(NoReason, "no reason") \
+ V(ArrayBufferWasNeutered, "array buffer was neutered") \
V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
V(ConversionOverflow, "conversion overflow") \
+ V(CowArrayElementsChanged, "copy-on-write array's elements changed") \
V(DivisionByZero, "division by zero") \
V(ExpectedHeapNumber, "Expected heap number") \
V(ExpectedSmi, "Expected smi") \
@@ -36,6 +38,8 @@ namespace internal {
"Insufficient type feedback for generic named access") \
V(InsufficientTypeFeedbackForGenericKeyedAccess, \
"Insufficient type feedback for generic keyed access") \
+ V(InsufficientTypeFeedbackForUnaryOperation, \
+ "Insufficient type feedback for unary operation") \
V(KeyIsNegative, "key is negative") \
V(LostPrecision, "lost precision") \
V(LostPrecisionOrNaN, "lost precision or NaN") \
@@ -55,6 +59,7 @@ namespace internal {
V(OutsideOfRange, "Outside of range") \
V(Overflow, "overflow") \
V(Proxy, "proxy") \
+ V(ReceiverNotAGlobalProxy, "receiver was not a global proxy") \
V(ReceiverWasAGlobalObject, "receiver was a global object") \
V(Smi, "Smi") \
V(TooManyArguments, "too many arguments") \
@@ -74,6 +79,7 @@ namespace internal {
V(ValueMismatch, "value mismatch") \
V(WrongInstanceType, "wrong instance type") \
V(WrongMap, "wrong map") \
+ V(WrongName, "wrong name") \
V(UndefinedOrNullInForIn, "null or undefined in for-in") \
V(UndefinedOrNullInToObject, "null or undefined in ToObject")
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 125ca932f7..ac6818ed0d 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -10,7 +10,6 @@
#include "src/assembler-inl.h"
#include "src/ast/prettyprinter.h"
#include "src/callable.h"
-#include "src/codegen.h"
#include "src/disasm.h"
#include "src/frames-inl.h"
#include "src/global-handles.h"
@@ -24,28 +23,23 @@
namespace v8 {
namespace internal {
-static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
- return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
- MemoryAllocator::GetCommitPageSize(),
- EXECUTABLE, NULL);
-}
-
-
-DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
- : allocator_(allocator),
- current_(NULL) {
+DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) {
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
- deopt_entry_code_entries_[i] = -1;
- deopt_entry_code_[i] = AllocateCodeChunk(allocator);
+ deopt_entry_code_[i] = nullptr;
}
+ Code** start = &deopt_entry_code_[0];
+ Code** end = &deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
+ heap_->RegisterStrongRoots(reinterpret_cast<Object**>(start),
+ reinterpret_cast<Object**>(end));
}
DeoptimizerData::~DeoptimizerData() {
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
- allocator_->Free<MemoryAllocator::kFull>(deopt_entry_code_[i]);
- deopt_entry_code_[i] = NULL;
+ deopt_entry_code_[i] = nullptr;
}
+ Code** start = &deopt_entry_code_[0];
+ heap_->UnregisterStrongRoots(reinterpret_cast<Object**>(start));
}
@@ -62,7 +56,7 @@ Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
element = code->next_code_link();
}
}
- return NULL;
+ return nullptr;
}
@@ -76,31 +70,17 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
Isolate* isolate) {
Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, type,
bailout_id, from, fp_to_sp_delta);
- CHECK(isolate->deoptimizer_data()->current_ == NULL);
+ CHECK_NULL(isolate->deoptimizer_data()->current_);
isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
}
-// No larger than 2K on all platforms
-static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
-
-
-size_t Deoptimizer::GetMaxDeoptTableSize() {
- int entries_size =
- Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
- int commit_page_size = static_cast<int>(MemoryAllocator::GetCommitPageSize());
- int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
- commit_page_size) + 1;
- return static_cast<size_t>(commit_page_size * page_count);
-}
-
-
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
Deoptimizer* result = isolate->deoptimizer_data()->current_;
CHECK_NOT_NULL(result);
result->DeleteFrameDescriptions();
- isolate->deoptimizer_data()->current_ = NULL;
+ isolate->deoptimizer_data()->current_ = nullptr;
return result;
}
@@ -194,7 +174,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
DisallowHeapAllocation no_allocation;
Isolate* isolate = context->GetHeap()->isolate();
- Code* topmost_optimized_code = NULL;
+ Code* topmost_optimized_code = nullptr;
bool safe_to_deopt_topmost_optimized_code = false;
#ifdef DEBUG
// Make sure all activations of optimized code can deopt at their current PC.
@@ -218,15 +198,12 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
int deopt_index = safepoint.deoptimization_index();
// Turbofan deopt is checked when we are patching addresses on stack.
- bool is_non_deoptimizing_asm_code =
- code->is_turbofanned() && !function->shared()->HasBytecodeArray();
bool safe_if_deopt_triggered =
- deopt_index != Safepoint::kNoDeoptimizationIndex ||
- is_non_deoptimizing_asm_code;
+ deopt_index != Safepoint::kNoDeoptimizationIndex;
bool is_builtin_code = code->kind() == Code::BUILTIN;
- DCHECK(topmost_optimized_code == NULL || safe_if_deopt_triggered ||
- is_non_deoptimizing_asm_code || is_builtin_code);
- if (topmost_optimized_code == NULL) {
+ DCHECK(topmost_optimized_code == nullptr || safe_if_deopt_triggered ||
+ is_builtin_code);
+ if (topmost_optimized_code == nullptr) {
topmost_optimized_code = code;
safe_to_deopt_topmost_optimized_code = safe_if_deopt_triggered;
}
@@ -238,10 +215,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// deoptimization and have not been found in stack frames.
std::set<Code*> codes;
- // Move marked code from the optimized code list to the deoptimized
- // code list.
+ // Move marked code from the optimized code list to the deoptimized code list.
// Walk over all optimized code objects in this native context.
- Code* prev = NULL;
+ Code* prev = nullptr;
Object* element = context->OptimizedCodeListHead();
while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
@@ -250,10 +226,10 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
if (code->marked_for_deoptimization()) {
// Make sure that this object does not point to any garbage.
- code->InvalidateEmbeddedObjects();
+ isolate->heap()->InvalidateCodeEmbeddedObjects(code);
codes.insert(code);
- if (prev != NULL) {
+ if (prev != nullptr) {
// Skip this code in the optimized code list.
prev->set_next_code_link(next);
} else {
@@ -281,12 +257,10 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
isolate->thread_manager()->IterateArchivedThreads(&visitor);
// If there's no activation of a code in any stack then we can remove its
- // deoptimization data. We do this to ensure that Code objects that will be
- // unlinked won't be kept alive.
- std::set<Code*>::iterator it;
- for (it = codes.begin(); it != codes.end(); ++it) {
- Code* code = *it;
- code->set_deoptimization_data(isolate->heap()->empty_fixed_array());
+ // deoptimization data. We do this to ensure that code objects that are
+ // unlinked don't transitively keep objects alive unnecessarily.
+ for (Code* code : codes) {
+ isolate->heap()->InvalidateCodeDeoptimizationData(code);
}
}
@@ -380,7 +354,7 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
case LAZY: return "lazy";
}
FATAL("Unsupported deopt type");
- return NULL;
+ return nullptr;
}
namespace {
@@ -407,6 +381,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
+ preserve_optimized_(false),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
deoptimizing_throw_(false),
@@ -428,13 +403,14 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
deoptimizing_throw_ = true;
}
- DCHECK(from != nullptr);
+ DCHECK_NOT_NULL(from);
compiled_code_ = FindOptimizedCode();
- DCHECK(compiled_code_ != NULL);
+ DCHECK_NOT_NULL(compiled_code_);
DCHECK(function->IsJSFunction());
- trace_scope_ =
- FLAG_trace_deopt ? new CodeTracer::Scope(isolate->GetCodeTracer()) : NULL;
+ trace_scope_ = FLAG_trace_deopt
+ ? new CodeTracer::Scope(isolate->GetCodeTracer())
+ : nullptr;
#ifdef DEBUG
DCHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
@@ -467,7 +443,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
Code* Deoptimizer::FindOptimizedCode() {
Code* compiled_code = FindDeoptimizingCode(from_);
- return (compiled_code == NULL)
+ return (compiled_code == nullptr)
? static_cast<Code*>(isolate_->FindCodeObject(from_))
: compiled_code;
}
@@ -490,8 +466,8 @@ Handle<Code> Deoptimizer::compiled_code() const {
}
Deoptimizer::~Deoptimizer() {
- DCHECK(input_ == NULL && output_ == NULL);
- DCHECK(disallow_heap_allocation_ == NULL);
+ DCHECK(input_ == nullptr && output_ == nullptr);
+ DCHECK_NULL(disallow_heap_allocation_);
delete trace_scope_;
}
@@ -502,32 +478,25 @@ void Deoptimizer::DeleteFrameDescriptions() {
if (output_[i] != input_) delete output_[i];
}
delete[] output_;
- input_ = NULL;
- output_ = NULL;
+ input_ = nullptr;
+ output_ = nullptr;
#ifdef DEBUG
DCHECK(!AllowHeapAllocation::IsAllowed());
- DCHECK(disallow_heap_allocation_ != NULL);
+ DCHECK_NOT_NULL(disallow_heap_allocation_);
delete disallow_heap_allocation_;
- disallow_heap_allocation_ = NULL;
+ disallow_heap_allocation_ = nullptr;
#endif // DEBUG
}
-
-Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
- int id,
- BailoutType type,
- GetEntryMode mode) {
+Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, int id,
+ BailoutType type) {
CHECK_GE(id, 0);
- if (id >= kMaxNumberOfEntries) return NULL;
- if (mode == ENSURE_ENTRY_CODE) {
- EnsureCodeForDeoptimizationEntry(isolate, type, id);
- } else {
- CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS);
- }
+ if (id >= kMaxNumberOfEntries) return nullptr;
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(type, kLastBailoutType);
- MemoryChunk* base = data->deopt_entry_code_[type];
- return base->area_start() + (id * table_entry_size_);
+ CHECK_NOT_NULL(data->deopt_entry_code_[type]);
+ Code* code = data->deopt_entry_code_[type];
+ return code->instruction_start() + (id * table_entry_size_);
}
@@ -535,8 +504,10 @@ int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
Address addr,
BailoutType type) {
DeoptimizerData* data = isolate->deoptimizer_data();
- MemoryChunk* base = data->deopt_entry_code_[type];
- Address start = base->area_start();
+ CHECK_LE(type, kLastBailoutType);
+ Code* code = data->deopt_entry_code_[type];
+ if (code == nullptr) return kNotDeoptimizationEntry;
+ Address start = code->instruction_start();
if (addr < start ||
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
@@ -593,8 +564,8 @@ void Deoptimizer::DoComputeOutputFrames() {
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+ DeoptimizationData* input_data =
+ DeoptimizationData::cast(compiled_code_->deoptimization_data());
{
// Read caller's PC, caller's FP and caller's constant pool values
@@ -618,7 +589,7 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
- if (trace_scope_ != NULL) {
+ if (trace_scope_ != nullptr) {
timer.Start();
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
MessageFor(bailout_type_));
@@ -665,10 +636,10 @@ void Deoptimizer::DoComputeOutputFrames() {
count = catch_handler_frame_index + 1;
}
- DCHECK(output_ == NULL);
+ DCHECK_NULL(output_);
output_ = new FrameDescription*[count];
for (size_t i = 0; i < count; ++i) {
- output_[i] = NULL;
+ output_[i] = nullptr;
}
output_count_ = static_cast<int>(count);
@@ -689,12 +660,6 @@ void Deoptimizer::DoComputeOutputFrames() {
case TranslatedFrame::kConstructStub:
DoComputeConstructStubFrame(translated_frame, frame_index);
break;
- case TranslatedFrame::kGetter:
- DoComputeAccessorStubFrame(translated_frame, frame_index, false);
- break;
- case TranslatedFrame::kSetter:
- DoComputeAccessorStubFrame(translated_frame, frame_index, true);
- break;
case TranslatedFrame::kBuiltinContinuation:
DoComputeBuiltinContinuation(translated_frame, frame_index, false);
break;
@@ -708,7 +673,7 @@ void Deoptimizer::DoComputeOutputFrames() {
}
// Print some helpful diagnostic information.
- if (trace_scope_ != NULL) {
+ if (trace_scope_ != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
PrintF(trace_scope_->file(), "[deoptimizing (%s): end ",
@@ -749,7 +714,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
Object* function = value_iterator->GetRawValue();
value_iterator++;
input_index++;
- if (trace_scope_ != NULL) {
+ if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " translating interpreted frame ");
std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
PrintF(trace_scope_->file(), "%s", name.get());
@@ -1016,7 +981,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
Object* function = value_iterator->GetRawValue();
value_iterator++;
input_index++;
- if (trace_scope_ != NULL) {
+ if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(),
" translating arguments adaptor => height=%d\n", height_in_bytes);
}
@@ -1031,7 +996,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
// Arguments adaptor can not be topmost.
CHECK(frame_index < output_count_ - 1);
- CHECK(output_[frame_index] == NULL);
+ CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
@@ -1115,7 +1080,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
- DCHECK(0 == output_offset);
+ DCHECK_EQ(0, output_offset);
Builtins* builtins = isolate_->builtins();
Code* adaptor_trampoline =
@@ -1163,7 +1128,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
- if (trace_scope_ != NULL) {
+ if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(),
" translating construct stub => bailout_id=%d (%s), height=%d\n",
bailout_id.ToInt(),
@@ -1180,7 +1145,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Construct stub can not be topmost.
DCHECK(frame_index > 0 && frame_index < output_count_);
- DCHECK(output_[frame_index] == NULL);
+ DCHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
@@ -1328,191 +1293,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
}
}
-void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
- int frame_index,
- bool is_setter_stub_frame) {
- TranslatedFrame::iterator value_iterator = translated_frame->begin();
- bool is_topmost = (output_count_ - 1 == frame_index);
- // The accessor frame could become topmost only if we inlined an accessor
- // call which does a tail call (otherwise the tail callee's frame would be
- // the topmost one). So it could only be the LAZY case.
- CHECK(!is_topmost || bailout_type_ == LAZY);
- int input_index = 0;
-
- // Skip accessor.
- value_iterator++;
- input_index++;
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
-
- // If the accessor frame appears to be topmost we should ensure that the
- // value of result register is preserved during continuation execution.
- // We do this here by "pushing" the result of the accessor function to the
- // top of the reconstructed stack and then popping it in
- // {Builtins::kNotifyDeoptimized}.
- // For setter calls, since the result register is going to be overwritten
- // anyway in the stub, we store a dummy value to pop into the result register
- // to keep the code simpler.
- if (is_topmost) {
- height_in_bytes += kPointerSize;
- if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
- }
-
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 1 stack entry for the return address and enough entries for the
- // StackFrame::INTERNAL (FP, frame type, context, code object and constant
- // pool (if enabled)- see MacroAssembler::EnterFrame).
- // For a setter stub frame we need one additional entry for the implicit
- // return value, see StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries =
- (StandardFrameConstants::kFixedFrameSize / kPointerSize) + 1 +
- (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new (output_frame_size) FrameDescription(output_frame_size);
-
- // A frame for an accessor stub can not be bottommost.
- CHECK(frame_index > 0 && frame_index < output_count_);
- CHECK_NULL(output_[frame_index]);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPCOnStackSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetCallerPc(output_offset, callers_pc);
- DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kFPOnStackSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetCallerFp(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- Register fp_reg = JavaScriptFrame::fp_register();
- output_frame->SetRegister(fp_reg.code(), fp_value);
- }
- DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
-
- if (FLAG_enable_embedded_constant_pool) {
- // Read the caller's constant pool from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetConstantPool();
- output_frame->SetCallerConstantPool(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset,
- "caller's constant_pool\n");
- }
-
- // Set the frame type.
- output_offset -= kPointerSize;
- value = StackFrame::TypeToMarker(StackFrame::INTERNAL);
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "frame type ");
- if (trace_scope_ != nullptr) {
- PrintF(trace_scope_->file(), "(%s sentinel)\n", kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
-
- // Skip receiver.
- value_iterator++;
- input_index++;
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
- output_offset);
- }
-
- if (is_topmost) {
- if (PadTopOfStackRegister()) {
- output_offset -= kPointerSize;
- WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
- output_offset, "padding ");
- }
- // Ensure the result is restored back when we return to the stub.
- output_offset -= kPointerSize;
- Register result_reg = kReturnRegister0;
- value = input_->GetRegister(result_reg.code());
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset,
- "accessor result\n");
- }
-
- CHECK_EQ(0u, output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-
- // Update constant pool.
- if (FLAG_enable_embedded_constant_pool) {
- intptr_t constant_pool_value =
- reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
- output_frame->SetConstantPool(constant_pool_value);
- if (is_topmost) {
- Register constant_pool_reg =
- JavaScriptFrame::constant_pool_pointer_register();
- output_frame->SetRegister(constant_pool_reg.code(), fp_value);
- }
- }
-
- // Clear the context register. The context might be a de-materialized object
- // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
- // safety we use Smi(0) instead of the potential {arguments_marker} here.
- if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
- Register context_reg = JavaScriptFrame::context_register();
- output_frame->SetRegister(context_reg.code(), context_value);
- }
-
- // Set the continuation for the topmost frame.
- if (is_topmost) {
- Builtins* builtins = isolate_->builtins();
- DCHECK_EQ(LAZY, bailout_type_);
- Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
- }
-}
-
// BuiltinContinuationFrames capture the machine state that is expected as input
// to a builtin, including both input register values and stack parameters. When
// the frame is reactivated (i.e. the frame below it returns), a
@@ -1569,6 +1349,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
BailoutId bailout_id = translated_frame->node_id();
Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id);
+ DCHECK(!Builtins::IsLazy(builtin_name));
Code* builtin = isolate()->builtins()->builtin(builtin_name);
Callable continuation_callable =
Builtins::CallableFor(isolate(), builtin_name);
@@ -1581,6 +1362,9 @@ void Deoptimizer::DoComputeBuiltinContinuation(
const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers();
+ int padding_slot_count = BuiltinContinuationFrameConstants::PaddingSlotCount(
+ allocatable_register_count);
+
int register_parameter_count =
continuation_descriptor.GetRegisterParameterCount();
// Make sure to account for the context by removing it from the register
@@ -1588,8 +1372,19 @@ void Deoptimizer::DoComputeBuiltinContinuation(
int stack_param_count = height_in_words - register_parameter_count - 1;
if (must_handle_result) stack_param_count++;
int output_frame_size =
- kPointerSize * (stack_param_count + allocatable_register_count) +
- TYPED_FRAME_SIZE(2); // For destination builtin code and registers
+ kPointerSize * (stack_param_count + allocatable_register_count +
+ padding_slot_count) +
+ BuiltinContinuationFrameConstants::kFixedFrameSize;
+
+ // If the builtins frame appears to be topmost we should ensure that the
+ // value of result register is preserved during continuation execution.
+ // We do this here by "pushing" the result of callback function to the
+ // top of the reconstructed stack and popping it in
+ // {Builtins::kNotifyDeoptimized}.
+ if (is_topmost) {
+ output_frame_size += kPointerSize;
+ if (PadTopOfStackRegister()) output_frame_size += kPointerSize;
+ }
// Validate types of parameters. They must all be tagged except for argc for
// JS builtins.
@@ -1609,7 +1404,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
CHECK_EQ(java_script_builtin, has_argc);
- if (trace_scope_ != NULL) {
+ if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(),
" translating BuiltinContinuation to %s,"
" register param count %d,"
@@ -1633,7 +1428,9 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
output_frame->SetTop(top_address);
- // Get the possible JSFunction for the case that
+ // Get the possible JSFunction for the case that this is a
+ // JavaScriptBuiltinContinuationFrame, which needs the JSFunction pointer
+ // like a normal JavaScriptFrame.
intptr_t maybe_function =
reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
++input_index;
@@ -1649,14 +1446,6 @@ void Deoptimizer::DoComputeBuiltinContinuation(
intptr_t value;
- Register result_reg = kReturnRegister0;
- if (must_handle_result) {
- value = input_->GetRegister(result_reg.code());
- } else {
- value = reinterpret_cast<intptr_t>(isolate()->heap()->undefined_value());
- }
- output_frame->SetRegister(result_reg.code(), value);
-
int translated_stack_parameters =
must_handle_result ? stack_param_count - 1 : stack_param_count;
@@ -1785,8 +1574,37 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
}
+ // Some architectures must pad the stack frame with extra stack slots
+ // to ensure the stack frame is aligned.
+ for (int i = 0; i < padding_slot_count; ++i) {
+ output_frame_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_frame_offset, "padding ");
+ }
+
+ if (is_topmost) {
+ if (PadTopOfStackRegister()) {
+ output_frame_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_frame_offset, "padding ");
+ }
+ // Ensure the result is restored back when we return to the stub.
+ output_frame_offset -= kPointerSize;
+ Register result_reg = kReturnRegister0;
+ if (must_handle_result) {
+ value = input_->GetRegister(result_reg.code());
+ } else {
+ value = reinterpret_cast<intptr_t>(isolate()->heap()->undefined_value());
+ }
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ "callback result\n");
+ }
+
+ CHECK_EQ(0u, output_frame_offset);
+
// Clear the context register. The context might be a de-materialized object
- // and will be materialized by {Runtime_NotifyStubFailure}. For additional
+ // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
@@ -1794,6 +1612,13 @@ void Deoptimizer::DoComputeBuiltinContinuation(
output_frame->SetRegister(context_reg.code(), context_value);
}
+ // TODO(6898): For eager deopts within builtin stub frames we currently skip
+ // marking the underlying function as deoptimized. This is to avoid deopt
+ // loops where we would just generate the same optimized code all over again.
+ if (is_topmost && bailout_type_ != LAZY) {
+ preserve_optimized_ = true;
+ }
+
// Ensure the frame pointer register points to the callee's frame. The builtin
// will build its own frame once we continue to it.
Register fp_reg = JavaScriptFrame::fp_register();
@@ -1815,7 +1640,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
reinterpret_cast<intptr_t>(continue_to_builtin->instruction_start()));
Code* continuation =
- isolate()->builtins()->builtin(Builtins::kNotifyBuiltinContinuation);
+ isolate()->builtins()->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
@@ -1943,45 +1768,34 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
}
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- BailoutType type,
- int max_entry_id) {
- // We cannot run this if the serializer is enabled because this will
- // cause us to emit relocation information for the external
- // references. This is fine because the deoptimizer's code section
- // isn't meant to be serialized at all.
+ BailoutType type) {
CHECK(type == EAGER || type == SOFT || type == LAZY);
DeoptimizerData* data = isolate->deoptimizer_data();
- int entry_count = data->deopt_entry_code_entries_[type];
- if (max_entry_id < entry_count) return;
- entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
- while (max_entry_id >= entry_count) entry_count *= 2;
- CHECK(entry_count <= Deoptimizer::kMaxNumberOfEntries);
+ if (data->deopt_entry_code_[type] != nullptr) return;
- MacroAssembler masm(isolate, NULL, 16 * KB, CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, nullptr, 16 * KB, CodeObjectRequired::kYes);
masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, entry_count, type);
+ GenerateDeoptimizationEntries(&masm, kMaxNumberOfEntries, type);
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- MemoryChunk* chunk = data->deopt_entry_code_[type];
- CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
- desc.instr_size);
- if (!chunk->CommitArea(desc.instr_size)) {
- V8::FatalProcessOutOfMemory(
- "Deoptimizer::EnsureCodeForDeoptimizationEntry");
- }
- CopyBytes(chunk->area_start(), desc.buffer,
- static_cast<size_t>(desc.instr_size));
- Assembler::FlushICache(isolate, chunk->area_start(), desc.instr_size);
+ // Allocate the code as immovable since the entry addresses will be used
+ // directly and there is no support for relocating them.
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::STUB, Handle<Object>(), Builtins::kNoBuiltinId,
+ MaybeHandle<HandlerTable>(), MaybeHandle<ByteArray>(),
+ MaybeHandle<DeoptimizationData>(), kImmovable);
+ CHECK(Heap::IsImmovable(*code));
- data->deopt_entry_code_entries_[type] = entry_count;
+ CHECK_NULL(data->deopt_entry_code_[type]);
+ data->deopt_entry_code_[type] = *code;
}
void Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate) {
- EnsureCodeForDeoptimizationEntry(isolate, EAGER, kMaxNumberOfEntries - 1);
- EnsureCodeForDeoptimizationEntry(isolate, LAZY, kMaxNumberOfEntries - 1);
- EnsureCodeForDeoptimizationEntry(isolate, SOFT, kMaxNumberOfEntries - 1);
+ EnsureCodeForDeoptimizationEntry(isolate, EAGER);
+ EnsureCodeForDeoptimizationEntry(isolate, LAZY);
+ EnsureCodeForDeoptimizationEntry(isolate, SOFT);
}
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
@@ -2008,7 +1822,7 @@ FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
void TranslationBuffer::Add(int32_t value) {
// This wouldn't handle kMinInt correctly if it ever encountered it.
- DCHECK(value != kMinInt);
+ DCHECK_NE(value, kMinInt);
// Encode the sign bit in the least significant bit.
bool is_negative = (value < 0);
uint32_t bits = ((is_negative ? -value : value) << 1) |
@@ -2078,18 +1892,6 @@ void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
}
-void Translation::BeginGetterStubFrame(int literal_id) {
- buffer_->Add(GETTER_STUB_FRAME);
- buffer_->Add(literal_id);
-}
-
-
-void Translation::BeginSetterStubFrame(int literal_id) {
- buffer_->Add(SETTER_STUB_FRAME);
- buffer_->Add(literal_id);
-}
-
-
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
buffer_->Add(literal_id);
@@ -2208,8 +2010,6 @@ void Translation::StoreJSFrameFunction() {
int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
- case GETTER_STUB_FRAME:
- case SETTER_STUB_FRAME:
case DUPLICATED_OBJECT:
case ARGUMENTS_ELEMENTS:
case ARGUMENTS_LENGTH:
@@ -2572,7 +2372,7 @@ Float64 TranslatedValue::double_value() const {
int TranslatedValue::object_length() const {
- DCHECK(kind() == kCapturedObject);
+ DCHECK_EQ(kind(), kCapturedObject);
return materialization_info_.length_;
}
@@ -2765,13 +2565,6 @@ TranslatedFrame TranslatedFrame::InterpretedFrame(
}
-TranslatedFrame TranslatedFrame::AccessorFrame(
- Kind kind, SharedFunctionInfo* shared_info) {
- DCHECK(kind == kSetter || kind == kGetter);
- return TranslatedFrame(kind, shared_info);
-}
-
-
TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
SharedFunctionInfo* shared_info, int height) {
return TranslatedFrame(kArgumentsAdaptor, shared_info, height);
@@ -2807,12 +2600,6 @@ int TranslatedFrame::GetValueCount() {
return height_ + parameter_count + 2;
}
- case kGetter:
- return 2; // Function and receiver.
-
- case kSetter:
- return 3; // Function, receiver and the value to set.
-
case kArgumentsAdaptor:
case kConstructStub:
case kBuiltinContinuation:
@@ -2926,28 +2713,6 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
bailout_id, shared_info, height_with_context);
}
- case Translation::GETTER_STUB_FRAME: {
- SharedFunctionInfo* shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
- PrintF(trace_file, " reading getter frame %s; inputs:\n", name.get());
- }
- return TranslatedFrame::AccessorFrame(TranslatedFrame::kGetter,
- shared_info);
- }
-
- case Translation::SETTER_STUB_FRAME: {
- SharedFunctionInfo* shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
- PrintF(trace_file, " reading setter frame %s; inputs:\n", name.get());
- }
- return TranslatedFrame::AccessorFrame(TranslatedFrame::kSetter,
- shared_info);
- }
-
case Translation::BEGIN:
case Translation::DUPLICATED_OBJECT:
case Translation::ARGUMENTS_ELEMENTS:
@@ -3088,8 +2853,6 @@ int TranslatedState::CreateNextTranslatedValue(
case Translation::INTERPRETED_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
case Translation::BUILTIN_CONTINUATION_FRAME:
// Peeled off before getting here.
@@ -3357,7 +3120,7 @@ int TranslatedState::CreateNextTranslatedValue(
TranslatedState::TranslatedState(const JavaScriptFrame* frame)
: isolate_(nullptr), stack_frame_pointer_(nullptr) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data =
+ DeoptimizationData* data =
static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData(
&deopt_index);
DCHECK(data != nullptr && deopt_index != Safepoint::kNoDeoptimizationIndex);
@@ -3475,7 +3238,7 @@ class TranslatedState::CapturedObjectMaterializer {
}
Handle<Object> FieldAt(int* value_index) {
- CHECK(field_count_ > 0);
+ CHECK_GT(field_count_, 0);
--field_count_;
Handle<Object> object = state_->MaterializeAt(frame_index_, value_index);
// This is a big hammer to make sure that the materialized objects do not
@@ -3550,11 +3313,12 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_SET_VALUE_ITERATOR_TYPE: {
Handle<JSSetIterator> object = Handle<JSSetIterator>::cast(
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
Handle<Object> table = materializer.FieldAt(value_index);
Handle<Object> index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(FixedArray::cast(*properties));
+ object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
object->set_table(*table);
object->set_index(*index);
@@ -3565,11 +3329,12 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_MAP_VALUE_ITERATOR_TYPE: {
Handle<JSMapIterator> object = Handle<JSMapIterator>::cast(
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
Handle<Object> table = materializer.FieldAt(value_index);
Handle<Object> index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(FixedArray::cast(*properties));
+ object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
object->set_table(*table);
object->set_index(*index);
@@ -3672,19 +3437,25 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
// The correct function and context will be set below once available.
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> prototype = materializer.FieldAt(value_index);
Handle<Object> shared = materializer.FieldAt(value_index);
Handle<Object> context = materializer.FieldAt(value_index);
Handle<Object> vector_cell = materializer.FieldAt(value_index);
Handle<Object> code = materializer.FieldAt(value_index);
+ bool has_prototype_slot = map->has_prototype_slot();
+ Handle<Object> prototype;
+ if (has_prototype_slot) {
+ prototype = materializer.FieldAt(value_index);
+ }
object->set_map(*map);
object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
- object->set_prototype_or_initial_map(*prototype);
object->set_shared(SharedFunctionInfo::cast(*shared));
object->set_context(Context::cast(*context));
object->set_feedback_vector_cell(Cell::cast(*vector_cell));
object->set_code(Code::cast(*code));
+ if (has_prototype_slot) {
+ object->set_prototype_or_initial_map(*prototype);
+ }
int in_object_properties = map->GetInObjectProperties();
for (int i = 0; i < in_object_properties; ++i) {
Handle<Object> value = materializer.FieldAt(value_index);
@@ -3875,7 +3646,6 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
- case PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_PROXY_TYPE:
case MAP_TYPE:
@@ -3886,6 +3656,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case ACCESSOR_PAIR_TYPE:
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
+ case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_VECTOR_TYPE:
case FOREIGN_TYPE:
@@ -3914,6 +3685,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case WEAK_CELL_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
+ case CODE_DATA_CONTAINER_TYPE:
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
@@ -3957,7 +3729,7 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
case TranslatedValue::kCapturedObject: {
// The map must be a tagged object.
- CHECK(frame->values_[*value_index].kind() == TranslatedValue::kTagged);
+ CHECK_EQ(frame->values_[*value_index].kind(), TranslatedValue::kTagged);
CHECK(frame->values_[*value_index].GetValue()->IsMap());
return MaterializeCapturedObjectAt(slot, frame_index, value_index);
}
@@ -4077,7 +3849,7 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
if (new_store && value_changed) {
materialized_store->Set(stack_frame_pointer_,
previously_materialized_objects);
- CHECK(frames_[0].kind() == TranslatedFrame::kInterpretedFunction);
+ CHECK_EQ(frames_[0].kind(), TranslatedFrame::kInterpretedFunction);
CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode());
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index dcc5619812..b8ab648b9c 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -121,8 +121,6 @@ class TranslatedFrame {
public:
enum Kind {
kInterpretedFunction,
- kGetter,
- kSetter,
kArgumentsAdaptor,
kConstructStub,
kBuiltinContinuation,
@@ -368,6 +366,7 @@ class Deoptimizer : public Malloced {
Handle<JSFunction> function() const;
Handle<Code> compiled_code() const;
BailoutType bailout_type() const { return bailout_type_; }
+ bool preserve_optimized() const { return preserve_optimized_; }
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@@ -406,18 +405,8 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
-
- enum GetEntryMode {
- CALCULATE_ENTRY_ADDRESS,
- ENSURE_ENTRY_CODE
- };
-
-
- static Address GetDeoptimizationEntry(
- Isolate* isolate,
- int id,
- BailoutType type,
- GetEntryMode mode = ENSURE_ENTRY_CODE);
+ static Address GetDeoptimizationEntry(Isolate* isolate, int id,
+ BailoutType type);
static int GetDeoptimizationId(Isolate* isolate,
Address addr,
BailoutType type);
@@ -460,11 +449,8 @@ class Deoptimizer : public Malloced {
int count_;
};
- static size_t GetMaxDeoptTableSize();
-
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- BailoutType type,
- int max_entry_id);
+ BailoutType type);
static void EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
@@ -486,8 +472,6 @@ class Deoptimizer : public Malloced {
int frame_index);
void DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
int frame_index);
- void DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
- int frame_index, bool is_setter_stub_frame);
void DoComputeBuiltinContinuation(TranslatedFrame* translated_frame,
int frame_index, bool java_script_frame);
@@ -533,6 +517,7 @@ class Deoptimizer : public Malloced {
Code* compiled_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
+ bool preserve_optimized_;
Address from_;
int fp_to_sp_delta_;
bool deoptimizing_throw_;
@@ -772,13 +757,12 @@ class FrameDescription {
class DeoptimizerData {
public:
- explicit DeoptimizerData(MemoryAllocator* allocator);
+ explicit DeoptimizerData(Heap* heap);
~DeoptimizerData();
private:
- MemoryAllocator* allocator_;
- int deopt_entry_code_entries_[Deoptimizer::kLastBailoutType + 1];
- MemoryChunk* deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
+ Heap* heap_;
+ Code* deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
Deoptimizer* current_;
@@ -825,8 +809,6 @@ class TranslationIterator BASE_EMBEDDED {
V(BUILTIN_CONTINUATION_FRAME) \
V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) \
V(CONSTRUCT_STUB_FRAME) \
- V(GETTER_STUB_FRAME) \
- V(SETTER_STUB_FRAME) \
V(ARGUMENTS_ADAPTOR_FRAME) \
V(DUPLICATED_OBJECT) \
V(ARGUMENTS_ELEMENTS) \
@@ -877,8 +859,6 @@ class Translation BASE_EMBEDDED {
unsigned height);
void BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id,
int literal_id, unsigned height);
- void BeginGetterStubFrame(int literal_id);
- void BeginSetterStubFrame(int literal_id);
void ArgumentsElements(CreateArgumentsType type);
void ArgumentsLength(CreateArgumentsType type);
void BeginCapturedObject(int length);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 10b8c1637e..603f0bbe03 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -9,7 +9,6 @@
#include "src/assembler-inl.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/disasm.h"
@@ -39,14 +38,14 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
const char* name =
- code_ == NULL ? NULL : code_->GetIsolate()->builtins()->Lookup(pc);
+ code_ == nullptr ? nullptr : code_->GetIsolate()->builtins()->Lookup(pc);
- if (name != NULL) {
+ if (name != nullptr) {
SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc), name);
return v8_buffer_.start();
}
- if (code_ != NULL) {
+ if (code_ != nullptr) {
int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
@@ -62,7 +61,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
const char* V8NameConverter::NameInCode(byte* addr) const {
// The V8NameConverter is used for well known code, so we can "safely"
// dereference pointers in generated code.
- return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
+ return (code_ != nullptr) ? reinterpret_cast<const char*>(addr) : "";
}
@@ -166,8 +165,8 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
StringBuilder out(out_buffer.start(), out_buffer.length());
byte* pc = begin;
disasm::Disassembler d(converter);
- RelocIterator* it = NULL;
- if (converter.code() != NULL) {
+ RelocIterator* it = nullptr;
+ if (converter.code() != nullptr) {
it = new RelocIterator(converter.code());
} else {
// No relocation information when printing code stubs.
@@ -191,8 +190,8 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
*reinterpret_cast<int32_t*>(pc), num_const);
constants = num_const;
pc += 4;
- } else if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
- it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
+ } else if (it != nullptr && !it->done() && it->rinfo()->pc() == pc &&
+ it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
// raw pointer embedded in code stream, e.g., jump table
byte* ptr = *reinterpret_cast<byte**>(pc);
SNPrintF(
@@ -210,7 +209,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
std::vector<byte*> pcs;
std::vector<RelocInfo::Mode> rmodes;
std::vector<intptr_t> datas;
- if (it != NULL) {
+ if (it != nullptr) {
while (!it->done() && it->rinfo()->pc() < pc) {
if (RelocInfo::IsComment(it->rinfo()->rmode())) {
// For comments just collect the text.
@@ -274,7 +273,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
// Emit comments following the last instruction (if any).
- if (it != NULL) {
+ if (it != nullptr) {
for ( ; !it->done(); it->next()) {
if (RelocInfo::IsComment(it->rinfo()->rmode())) {
out.AddFormatted(" %s",
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index ac53f775b1..c0df0e6586 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -17,7 +17,7 @@ class Disassembler : public AllStatic {
// instruction could be decoded.
// the code object is used for name resolution and may be null.
static int Decode(Isolate* isolate, std::ostream* os, byte* begin, byte* end,
- Code* code = NULL);
+ Code* code = nullptr);
};
} // namespace internal
diff --git a/deps/v8/src/diy-fp.h b/deps/v8/src/diy-fp.h
index 1325c94519..7cb89dd905 100644
--- a/deps/v8/src/diy-fp.h
+++ b/deps/v8/src/diy-fp.h
@@ -55,7 +55,7 @@ class DiyFp {
}
void Normalize() {
- DCHECK(f_ != 0);
+ DCHECK_NE(f_, 0);
uint64_t f = f_;
int e = e_;
diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h
index 8a59a72484..b0a2ecf05e 100644
--- a/deps/v8/src/double.h
+++ b/deps/v8/src/double.h
@@ -34,14 +34,14 @@ class Double {
// The value encoded by this Double must be greater or equal to +0.0.
// It must not be special (infinity, or NaN).
DiyFp AsDiyFp() const {
- DCHECK(Sign() > 0);
+ DCHECK_GT(Sign(), 0);
DCHECK(!IsSpecial());
return DiyFp(Significand(), Exponent());
}
// The value encoded by this Double must be strictly greater than 0.
DiyFp AsNormalizedDiyFp() const {
- DCHECK(value() > 0.0);
+ DCHECK_GT(value(), 0.0);
uint64_t f = Significand();
int e = Exponent();
@@ -121,7 +121,7 @@ class Double {
// Precondition: the value encoded by this Double must be greater or equal
// than +0.0.
DiyFp UpperBoundary() const {
- DCHECK(Sign() > 0);
+ DCHECK_GT(Sign(), 0);
return DiyFp(Significand() * 2 + 1, Exponent() - 1);
}
@@ -130,7 +130,7 @@ class Double {
// exponent as m_plus.
// Precondition: the value encoded by this Double must be greater than 0.
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
- DCHECK(value() > 0.0);
+ DCHECK_GT(value(), 0.0);
DiyFp v = this->AsDiyFp();
bool significand_is_zero = (v.f() == kHiddenBit);
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
diff --git a/deps/v8/src/eh-frame.cc b/deps/v8/src/eh-frame.cc
index ce5552fcce..3dbfa46507 100644
--- a/deps/v8/src/eh-frame.cc
+++ b/deps/v8/src/eh-frame.cc
@@ -78,7 +78,7 @@ EhFrameWriter::EhFrameWriter(Zone* zone)
eh_frame_buffer_(zone) {}
void EhFrameWriter::Initialize() {
- DCHECK(writer_state_ == InternalState::kUndefined);
+ DCHECK_EQ(writer_state_, InternalState::kUndefined);
eh_frame_buffer_.reserve(128);
writer_state_ = InternalState::kInitialized;
WriteCie();
@@ -154,7 +154,7 @@ void EhFrameWriter::WriteFdeHeader() {
}
void EhFrameWriter::WriteEhFrameHdr(int code_size) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
//
// In order to calculate offsets in the .eh_frame_hdr, we must know the layout
@@ -236,7 +236,7 @@ void EhFrameWriter::WriteEhFrameHdr(int code_size) {
}
void EhFrameWriter::WritePaddingToAlignedSize(int unpadded_size) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
DCHECK_GE(unpadded_size, 0);
int padding_size = RoundUp(unpadded_size, kPointerSize) - unpadded_size;
@@ -248,7 +248,7 @@ void EhFrameWriter::WritePaddingToAlignedSize(int unpadded_size) {
}
void EhFrameWriter::AdvanceLocation(int pc_offset) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
DCHECK_GE(pc_offset, last_pc_offset_);
uint32_t delta = pc_offset - last_pc_offset_;
@@ -274,7 +274,7 @@ void EhFrameWriter::AdvanceLocation(int pc_offset) {
}
void EhFrameWriter::SetBaseAddressOffset(int base_offset) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
DCHECK_GE(base_offset, 0);
WriteOpcode(EhFrameConstants::DwarfOpcodes::kDefCfaOffset);
WriteULeb128(base_offset);
@@ -282,7 +282,7 @@ void EhFrameWriter::SetBaseAddressOffset(int base_offset) {
}
void EhFrameWriter::SetBaseAddressRegister(Register base_register) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
int code = RegisterToDwarfCode(base_register);
WriteOpcode(EhFrameConstants::DwarfOpcodes::kDefCfaRegister);
WriteULeb128(code);
@@ -291,7 +291,7 @@ void EhFrameWriter::SetBaseAddressRegister(Register base_register) {
void EhFrameWriter::SetBaseAddressRegisterAndOffset(Register base_register,
int base_offset) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
DCHECK_GE(base_offset, 0);
int code = RegisterToDwarfCode(base_register);
WriteOpcode(EhFrameConstants::DwarfOpcodes::kDefCfa);
@@ -302,7 +302,7 @@ void EhFrameWriter::SetBaseAddressRegisterAndOffset(Register base_register,
}
void EhFrameWriter::RecordRegisterSavedToStack(int register_code, int offset) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
DCHECK_EQ(offset % EhFrameConstants::kDataAlignmentFactor, 0);
int factored_offset = offset / EhFrameConstants::kDataAlignmentFactor;
if (factored_offset >= 0) {
@@ -319,13 +319,13 @@ void EhFrameWriter::RecordRegisterSavedToStack(int register_code, int offset) {
}
void EhFrameWriter::RecordRegisterNotModified(Register name) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
WriteOpcode(EhFrameConstants::DwarfOpcodes::kSameValue);
WriteULeb128(RegisterToDwarfCode(name));
}
void EhFrameWriter::RecordRegisterFollowsInitialRule(Register name) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
int code = RegisterToDwarfCode(name);
DCHECK_LE(code, EhFrameConstants::kFollowInitialRuleMask);
WriteByte((EhFrameConstants::kFollowInitialRuleTag
@@ -334,7 +334,7 @@ void EhFrameWriter::RecordRegisterFollowsInitialRule(Register name) {
}
void EhFrameWriter::Finish(int code_size) {
- DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(writer_state_, InternalState::kInitialized);
DCHECK_GE(eh_frame_offset(), cie_size_);
DCHECK_GE(eh_frame_offset(), fde_offset() + kInt32Size);
@@ -360,7 +360,7 @@ void EhFrameWriter::Finish(int code_size) {
}
void EhFrameWriter::GetEhFrame(CodeDesc* desc) {
- DCHECK(writer_state_ == InternalState::kFinalized);
+ DCHECK_EQ(writer_state_, InternalState::kFinalized);
desc->unwinding_info_size = static_cast<int>(eh_frame_buffer_.size());
desc->unwinding_info = eh_frame_buffer_.data();
}
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 8c692ecab8..22bf8012dd 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -71,7 +71,7 @@ enum Where { AT_START, AT_END };
V(FastPackedDoubleElementsAccessor, PACKED_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(FastHoleyDoubleElementsAccessor, HOLEY_DOUBLE_ELEMENTS, FixedDoubleArray) \
- V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, SeededNumberDictionary) \
+ V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, NumberDictionary) \
V(FastSloppyArgumentsElementsAccessor, FAST_SLOPPY_ARGUMENTS_ELEMENTS, \
FixedArray) \
V(SlowSloppyArgumentsElementsAccessor, SLOW_SLOPPY_ARGUMENTS_ELEMENTS, \
@@ -161,7 +161,7 @@ static void CopyDictionaryToObjectElements(
FixedArrayBase* from_base, uint32_t from_start, FixedArrayBase* to_base,
ElementsKind to_kind, uint32_t to_start, int raw_copy_size) {
DisallowHeapAllocation no_allocation;
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ NumberDictionary* from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -190,7 +190,7 @@ static void CopyDictionaryToObjectElements(
Isolate* isolate = from->GetIsolate();
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(isolate, i + from_start);
- if (entry != SeededNumberDictionary::kNotFound) {
+ if (entry != NumberDictionary::kNotFound) {
Object* value = from->ValueAt(entry);
DCHECK(!value->IsTheHole(isolate));
to->set(i + to_start, value, write_barrier_mode);
@@ -401,7 +401,7 @@ static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
uint32_t to_start,
int raw_copy_size) {
DisallowHeapAllocation no_allocation;
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ NumberDictionary* from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (copy_size < 0) {
DCHECK(copy_size == ElementsAccessor::kCopyToEnd ||
@@ -422,7 +422,7 @@ static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
Isolate* isolate = from->GetIsolate();
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(isolate, i + from_start);
- if (entry != SeededNumberDictionary::kNotFound) {
+ if (entry != NumberDictionary::kNotFound) {
to->set(i + to_start, from->ValueAt(entry)->Number());
} else {
to->set_the_hole(i + to_start);
@@ -911,7 +911,7 @@ class ElementsAccessorBase : public ElementsAccessor {
// Array optimizations rely on the prototype lookups of Array objects
// always returning undefined. If there is a store to the initial
// prototype object, make sure all of these optimizations are invalidated.
- object->GetIsolate()->UpdateArrayProtectorOnSetLength(object);
+ object->GetIsolate()->UpdateNoElementsProtectorOnSetLength(object);
}
Handle<FixedArrayBase> old_elements(object->elements());
// This method should only be called if there's a reason to update the
@@ -1031,11 +1031,11 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
+ Handle<NumberDictionary> Normalize(Handle<JSObject> object) final {
return Subclass::NormalizeImpl(object, handle(object->elements()));
}
- static Handle<SeededNumberDictionary> NormalizeImpl(
+ static Handle<NumberDictionary> NormalizeImpl(
Handle<JSObject> object, Handle<FixedArrayBase> elements) {
UNREACHABLE();
}
@@ -1365,15 +1365,15 @@ class DictionaryElementsAccessor
static uint32_t NumberOfElementsImpl(JSObject* receiver,
FixedArrayBase* backing_store) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
+ NumberDictionary* dict = NumberDictionary::cast(backing_store);
return dict->NumberOfElements();
}
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
uint32_t length,
Handle<FixedArrayBase> backing_store) {
- Handle<SeededNumberDictionary> dict =
- Handle<SeededNumberDictionary>::cast(backing_store);
+ Handle<NumberDictionary> dict =
+ Handle<NumberDictionary>::cast(backing_store);
int capacity = dict->Capacity();
uint32_t old_length = 0;
CHECK(array->length()->ToArrayLength(&old_length));
@@ -1429,18 +1429,47 @@ class DictionaryElementsAccessor
UNREACHABLE();
}
+ static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
+ uint32_t end) {
+ Isolate* isolate = receiver->GetIsolate();
+ uint32_t result_length = end < start ? 0u : end - start;
+
+ // Result must also be a dictionary.
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(0, HOLEY_ELEMENTS);
+ JSObject::NormalizeElements(result_array);
+ result_array->set_length(Smi::FromInt(result_length));
+ Handle<NumberDictionary> source_dict(
+ NumberDictionary::cast(receiver->elements()));
+ int entry_count = source_dict->Capacity();
+ for (int i = 0; i < entry_count; i++) {
+ Object* key = source_dict->KeyAt(i);
+ if (!source_dict->ToKey(isolate, i, &key)) continue;
+ uint64_t key_value = NumberToInt64(key);
+ if (key_value >= start && key_value < end) {
+ Handle<NumberDictionary> dest_dict(
+ NumberDictionary::cast(result_array->elements()));
+ Handle<Object> value(source_dict->ValueAt(i), isolate);
+ PropertyDetails details = source_dict->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ AddImpl(result_array, static_cast<uint32_t>(key_value) - start, value,
+ attr, 0);
+ }
+ }
+
+ return result_array;
+ }
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(obj->elements()));
- dict = SeededNumberDictionary::DeleteEntry(dict, entry);
+ Handle<NumberDictionary> dict(NumberDictionary::cast(obj->elements()));
+ dict = NumberDictionary::DeleteEntry(dict, entry);
obj->set_elements(*dict);
}
static bool HasAccessorsImpl(JSObject* holder,
FixedArrayBase* backing_store) {
DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
+ NumberDictionary* dict = NumberDictionary::cast(backing_store);
if (!dict->requires_slow_elements()) return false;
int capacity = dict->Capacity();
Isolate* isolate = dict->GetIsolate();
@@ -1454,7 +1483,7 @@ class DictionaryElementsAccessor
}
static Object* GetRaw(FixedArrayBase* store, uint32_t entry) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ NumberDictionary* backing_store = NumberDictionary::cast(store);
return backing_store->ValueAt(entry);
}
@@ -1470,14 +1499,14 @@ class DictionaryElementsAccessor
static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
Object* value) {
- SeededNumberDictionary::cast(backing_store)->ValueAtPut(entry, value);
+ NumberDictionary::cast(backing_store)->ValueAtPut(entry, value);
}
static void ReconfigureImpl(Handle<JSObject> object,
Handle<FixedArrayBase> store, uint32_t entry,
Handle<Object> value,
PropertyAttributes attributes) {
- SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(*store);
+ NumberDictionary* dictionary = NumberDictionary::cast(*store);
if (attributes != NONE) object->RequireSlowElements(dictionary);
dictionary->ValueAtPut(entry, *value);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -1491,12 +1520,12 @@ class DictionaryElementsAccessor
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) {
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
- Handle<SeededNumberDictionary> dictionary =
+ Handle<NumberDictionary> dictionary =
object->HasFastElements() || object->HasFastStringWrapperElements()
? JSObject::NormalizeElements(object)
- : handle(SeededNumberDictionary::cast(object->elements()));
- Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::Add(dictionary, index, value, details);
+ : handle(NumberDictionary::cast(object->elements()));
+ Handle<NumberDictionary> new_dictionary =
+ NumberDictionary::Add(dictionary, index, value, details);
new_dictionary->UpdateMaxNumberKey(index, object);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (dictionary.is_identical_to(new_dictionary)) return;
@@ -1506,14 +1535,14 @@ class DictionaryElementsAccessor
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* store,
uint32_t entry) {
DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
+ NumberDictionary* dict = NumberDictionary::cast(store);
Object* index = dict->KeyAt(entry);
return !index->IsTheHole(isolate);
}
static uint32_t GetIndexForEntryImpl(FixedArrayBase* store, uint32_t entry) {
DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
+ NumberDictionary* dict = NumberDictionary::cast(store);
uint32_t result = 0;
CHECK(dict->KeyAt(entry)->ToArrayIndex(&result));
return result;
@@ -1523,9 +1552,9 @@ class DictionaryElementsAccessor
FixedArrayBase* store, uint32_t index,
PropertyFilter filter) {
DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
+ NumberDictionary* dictionary = NumberDictionary::cast(store);
int entry = dictionary->FindEntry(isolate, index);
- if (entry == SeededNumberDictionary::kNotFound) return kMaxUInt32;
+ if (entry == NumberDictionary::kNotFound) return kMaxUInt32;
if (filter != ALL_PROPERTIES) {
PropertyDetails details = dictionary->DetailsAt(entry);
PropertyAttributes attr = details.attributes();
@@ -1540,11 +1569,11 @@ class DictionaryElementsAccessor
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
uint32_t entry) {
- return SeededNumberDictionary::cast(backing_store)->DetailsAt(entry);
+ return NumberDictionary::cast(backing_store)->DetailsAt(entry);
}
- static uint32_t FilterKey(Handle<SeededNumberDictionary> dictionary,
- int entry, Object* raw_key, PropertyFilter filter) {
+ static uint32_t FilterKey(Handle<NumberDictionary> dictionary, int entry,
+ Object* raw_key, PropertyFilter filter) {
DCHECK(raw_key->IsNumber());
DCHECK_LE(raw_key->Number(), kMaxUInt32);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -1554,7 +1583,7 @@ class DictionaryElementsAccessor
}
static uint32_t GetKeyForEntryImpl(Isolate* isolate,
- Handle<SeededNumberDictionary> dictionary,
+ Handle<NumberDictionary> dictionary,
int entry, PropertyFilter filter) {
DisallowHeapAllocation no_gc;
Object* raw_key = dictionary->KeyAt(entry);
@@ -1567,8 +1596,8 @@ class DictionaryElementsAccessor
KeyAccumulator* keys) {
if (keys->filter() & SKIP_STRINGS) return;
Isolate* isolate = keys->isolate();
- Handle<SeededNumberDictionary> dictionary =
- Handle<SeededNumberDictionary>::cast(backing_store);
+ Handle<NumberDictionary> dictionary =
+ Handle<NumberDictionary>::cast(backing_store);
int capacity = dictionary->Capacity();
Handle<FixedArray> elements = isolate->factory()->NewFixedArray(
GetMaxNumberOfEntries(*object, *backing_store));
@@ -1599,8 +1628,8 @@ class DictionaryElementsAccessor
if (filter & SKIP_STRINGS) return list;
if (filter & ONLY_ALL_CAN_READ) return list;
- Handle<SeededNumberDictionary> dictionary =
- Handle<SeededNumberDictionary>::cast(backing_store);
+ Handle<NumberDictionary> dictionary =
+ Handle<NumberDictionary>::cast(backing_store);
uint32_t capacity = dictionary->Capacity();
for (uint32_t i = 0; i < capacity; i++) {
uint32_t key = GetKeyForEntryImpl(isolate, dictionary, i, filter);
@@ -1617,8 +1646,8 @@ class DictionaryElementsAccessor
KeyAccumulator* accumulator,
AddKeyConversion convert) {
Isolate* isolate = accumulator->isolate();
- Handle<SeededNumberDictionary> dictionary(
- SeededNumberDictionary::cast(receiver->elements()), isolate);
+ Handle<NumberDictionary> dictionary(
+ NumberDictionary::cast(receiver->elements()), isolate);
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
@@ -1635,8 +1664,7 @@ class DictionaryElementsAccessor
Handle<Object> value, uint32_t start_from,
uint32_t length, Maybe<bool>* result) {
DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(receiver->elements());
+ NumberDictionary* dictionary = NumberDictionary::cast(receiver->elements());
int capacity = dictionary->Capacity();
Object* the_hole = isolate->heap()->the_hole_value();
Object* undefined = isolate->heap()->undefined_value();
@@ -1683,13 +1711,13 @@ class DictionaryElementsAccessor
}
}
- Handle<SeededNumberDictionary> dictionary(
- SeededNumberDictionary::cast(receiver->elements()), isolate);
+ Handle<NumberDictionary> dictionary(
+ NumberDictionary::cast(receiver->elements()), isolate);
// Iterate through entire range, as accessing elements out of order is
// observable
for (uint32_t k = start_from; k < length; ++k) {
int entry = dictionary->FindEntry(isolate, k);
- if (entry == SeededNumberDictionary::kNotFound) {
+ if (entry == NumberDictionary::kNotFound) {
if (search_for_hole) return Just(true);
continue;
}
@@ -1737,8 +1765,8 @@ class DictionaryElementsAccessor
return accessor->IncludesValue(isolate, receiver, value, k + 1,
length);
}
- dictionary = handle(
- SeededNumberDictionary::cast(receiver->elements()), isolate);
+ dictionary =
+ handle(NumberDictionary::cast(receiver->elements()), isolate);
break;
}
}
@@ -1752,13 +1780,13 @@ class DictionaryElementsAccessor
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
- Handle<SeededNumberDictionary> dictionary(
- SeededNumberDictionary::cast(receiver->elements()), isolate);
+ Handle<NumberDictionary> dictionary(
+ NumberDictionary::cast(receiver->elements()), isolate);
// Iterate through entire range, as accessing elements out of order is
// observable.
for (uint32_t k = start_from; k < length; ++k) {
int entry = dictionary->FindEntry(isolate, k);
- if (entry == SeededNumberDictionary::kNotFound) {
+ if (entry == NumberDictionary::kNotFound) {
continue;
}
@@ -1799,8 +1827,8 @@ class DictionaryElementsAccessor
return IndexOfValueSlowPath(isolate, receiver, value, k + 1,
length);
}
- dictionary = handle(
- SeededNumberDictionary::cast(receiver->elements()), isolate);
+ dictionary =
+ handle(NumberDictionary::cast(receiver->elements()), isolate);
break;
}
}
@@ -1814,8 +1842,7 @@ class DictionaryElementsAccessor
DCHECK_EQ(holder->map()->elements_kind(), DICTIONARY_ELEMENTS);
if (!FLAG_enable_slow_asserts) return;
Isolate* isolate = holder->GetIsolate();
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(holder->elements());
+ NumberDictionary* dictionary = NumberDictionary::cast(holder->elements());
// Validate the requires_slow_elements and max_number_key values.
int capacity = dictionary->Capacity();
bool requires_slow_elements = false;
@@ -1824,7 +1851,7 @@ class DictionaryElementsAccessor
Object* k;
if (!dictionary->ToKey(isolate, i, &k)) continue;
DCHECK_LE(0.0, k->Number());
- if (k->Number() > SeededNumberDictionary::kRequiresSlowElementsLimit) {
+ if (k->Number() > NumberDictionary::kRequiresSlowElementsLimit) {
requires_slow_elements = true;
} else {
max_key = Max(max_key, Smi::ToInt(k));
@@ -1849,20 +1876,21 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
typedef typename KindTraits::BackingStore BackingStore;
- static Handle<SeededNumberDictionary> NormalizeImpl(
- Handle<JSObject> object, Handle<FixedArrayBase> store) {
+ static Handle<NumberDictionary> NormalizeImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store) {
Isolate* isolate = store->GetIsolate();
ElementsKind kind = Subclass::kind();
// Ensure that notifications fire if the array or object prototypes are
// normalizing.
- if (IsSmiOrObjectElementsKind(kind)) {
- isolate->UpdateArrayProtectorOnNormalizeElements(object);
+ if (IsSmiOrObjectElementsKind(kind) ||
+ kind == FAST_STRING_WRAPPER_ELEMENTS) {
+ isolate->UpdateNoElementsProtectorOnNormalizeElements(object);
}
int capacity = object->GetFastElementsUsage();
- Handle<SeededNumberDictionary> dictionary =
- SeededNumberDictionary::New(isolate, capacity);
+ Handle<NumberDictionary> dictionary =
+ NumberDictionary::New(isolate, capacity);
PropertyDetails details = PropertyDetails::Empty();
int j = 0;
@@ -1873,7 +1901,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
max_number_key = i;
Handle<Object> value = Subclass::GetImpl(isolate, *store, i);
- dictionary = SeededNumberDictionary::Add(dictionary, i, value, details);
+ dictionary = NumberDictionary::Add(dictionary, i, value, details);
j++;
}
@@ -1941,8 +1969,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// enough to reliably hit the "window" of remaining elements count where
// normalization would be beneficial.
STATIC_ASSERT(kLengthFraction >=
- SeededNumberDictionary::kEntrySize *
- SeededNumberDictionary::kPreferFastElementsSizeFactor);
+ NumberDictionary::kEntrySize *
+ NumberDictionary::kPreferFastElementsSizeFactor);
size_t current_counter = isolate->elements_deletion_counter();
if (current_counter < length / kLengthFraction) {
isolate->set_elements_deletion_counter(current_counter + 1);
@@ -1966,9 +1994,9 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (!backing_store->is_the_hole(isolate, i)) {
++num_used;
// Bail out if a number dictionary wouldn't be able to save much space.
- if (SeededNumberDictionary::kPreferFastElementsSizeFactor *
- SeededNumberDictionary::ComputeCapacity(num_used) *
- SeededNumberDictionary::kEntrySize >
+ if (NumberDictionary::kPreferFastElementsSizeFactor *
+ NumberDictionary::ComputeCapacity(num_used) *
+ NumberDictionary::kEntrySize >
static_cast<uint32_t>(backing_store->length())) {
return;
}
@@ -1981,8 +2009,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArrayBase> store, uint32_t entry,
Handle<Object> value,
PropertyAttributes attributes) {
- Handle<SeededNumberDictionary> dictionary =
- JSObject::NormalizeElements(object);
+ Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object);
entry = dictionary->FindEntry(entry);
DictionaryElementsAccessor::ReconfigureImpl(object, dictionary, entry,
value, attributes);
@@ -2450,7 +2477,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
uint32_t length = static_cast<uint32_t>(Smi::ToInt(receiver->length()));
- DCHECK(length > 0);
+ DCHECK_GT(length, 0);
int new_length = length - 1;
int remove_index = remove_position == AT_START ? 0 : new_length;
Handle<Object> result =
@@ -2472,7 +2499,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Arguments* args, uint32_t add_size,
Where add_position) {
uint32_t length = Smi::ToInt(receiver->length());
- DCHECK(0 < add_size);
+ DCHECK_LT(0, add_size);
uint32_t elms_len = backing_store->length();
// Check we do not overflow the new_length.
DCHECK(add_size <= static_cast<uint32_t>(Smi::kMaxValue - length));
@@ -3183,8 +3210,7 @@ class TypedElementsAccessor
JSTypedArray* destination,
size_t length, uint32_t offset) {
// The source is a typed array, so we know we don't need to do ToNumber
- // side-effects, as the source elements will always be a number or
- // undefined.
+ // side-effects, as the source elements will always be a number.
DisallowHeapAllocation no_gc;
FixedTypedArrayBase* source_elements =
@@ -3192,10 +3218,10 @@ class TypedElementsAccessor
BackingStore* destination_elements =
BackingStore::cast(destination->elements());
- DCHECK_LE(offset + source->length(), destination->length());
- DCHECK_GE(destination->length(), source->length());
+ DCHECK_LE(offset, destination->length_value());
+ DCHECK_LE(source->length_value(), destination->length_value() - offset);
DCHECK(source->length()->IsSmi());
- DCHECK_EQ(Smi::FromInt(static_cast<int>(length)), source->length());
+ DCHECK_EQ(length, source->length_value());
InstanceType source_type = source_elements->map()->instance_type();
InstanceType destination_type =
@@ -3247,6 +3273,10 @@ class TypedElementsAccessor
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
+#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+ if (isolate->force_slow_path()) return true;
+#endif
+
Object* source_proto = source->map()->prototype();
// Null prototypes are OK - we don't need to do prototype chain lookups on
@@ -3257,7 +3287,7 @@ class TypedElementsAccessor
return true;
}
- return !isolate->IsFastArrayConstructorPrototypeChainIntact(context);
+ return !isolate->IsNoElementsProtectorIntact(context);
}
static bool TryCopyElementsFastNumber(Context* context, JSArray* source,
@@ -3744,6 +3774,29 @@ class SloppyArgumentsElementsAccessor
}
return Just<int64_t>(-1);
}
+
+ static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
+ uint32_t end) {
+ Isolate* isolate = receiver->GetIsolate();
+ uint32_t result_len = end < start ? 0u : end - start;
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(HOLEY_ELEMENTS, result_len, result_len);
+ DisallowHeapAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(result_array->elements());
+ FixedArray* parameters = FixedArray::cast(receiver->elements());
+ uint32_t insertion_index = 0;
+ for (uint32_t i = start; i < end; i++) {
+ uint32_t entry = GetEntryForIndexImpl(isolate, *receiver, parameters, i,
+ ALL_PROPERTIES);
+ if (entry != kMaxUInt32 && HasEntryImpl(isolate, parameters, entry)) {
+ elements->set(insertion_index, *GetImpl(isolate, parameters, entry));
+ } else {
+ elements->set_the_hole(isolate, insertion_index);
+ }
+ insertion_index++;
+ }
+ return result_array;
+ }
};
@@ -3777,10 +3830,10 @@ class SlowSloppyArgumentsElementsAccessor
// No need to delete a context mapped entry from the arguments elements.
if (entry == kMaxUInt32) return;
Isolate* isolate = obj->GetIsolate();
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(elements->arguments()), isolate);
+ Handle<NumberDictionary> dict(NumberDictionary::cast(elements->arguments()),
+ isolate);
int length = elements->parameter_map_length();
- dict = SeededNumberDictionary::DeleteEntry(dict, entry - length);
+ dict = NumberDictionary::DeleteEntry(dict, entry - length);
elements->set_arguments(*dict);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -3791,13 +3844,13 @@ class SlowSloppyArgumentsElementsAccessor
SloppyArgumentsElements::cast(object->elements()), isolate);
Handle<FixedArrayBase> old_arguments(
FixedArrayBase::cast(elements->arguments()), isolate);
- Handle<SeededNumberDictionary> dictionary =
- old_arguments->IsSeededNumberDictionary()
- ? Handle<SeededNumberDictionary>::cast(old_arguments)
+ Handle<NumberDictionary> dictionary =
+ old_arguments->IsNumberDictionary()
+ ? Handle<NumberDictionary>::cast(old_arguments)
: JSObject::NormalizeElements(object);
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
- Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::Add(dictionary, index, value, details);
+ Handle<NumberDictionary> new_dictionary =
+ NumberDictionary::Add(dictionary, index, value, details);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (*dictionary != *new_dictionary) {
elements->set_arguments(*new_dictionary);
@@ -3828,9 +3881,9 @@ class SlowSloppyArgumentsElementsAccessor
}
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
- Handle<SeededNumberDictionary> arguments(
- SeededNumberDictionary::cast(elements->arguments()), isolate);
- arguments = SeededNumberDictionary::Add(arguments, entry, value, details);
+ Handle<NumberDictionary> arguments(
+ NumberDictionary::cast(elements->arguments()), isolate);
+ arguments = NumberDictionary::Add(arguments, entry, value, details);
// If the attributes were NONE, we would have called set rather than
// reconfigure.
DCHECK_NE(NONE, attributes);
@@ -3869,41 +3922,17 @@ class FastSloppyArgumentsElementsAccessor
return Handle<FixedArray>(elements->arguments(), isolate);
}
- static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
- uint32_t end) {
- Isolate* isolate = receiver->GetIsolate();
- uint32_t result_len = end < start ? 0u : end - start;
- Handle<JSArray> result_array =
- isolate->factory()->NewJSArray(HOLEY_ELEMENTS, result_len, result_len);
- DisallowHeapAllocation no_gc;
- FixedArray* elements = FixedArray::cast(result_array->elements());
- FixedArray* parameters = FixedArray::cast(receiver->elements());
- uint32_t insertion_index = 0;
- for (uint32_t i = start; i < end; i++) {
- uint32_t entry = GetEntryForIndexImpl(isolate, *receiver, parameters, i,
- ALL_PROPERTIES);
- if (entry != kMaxUInt32 && HasEntryImpl(isolate, parameters, entry)) {
- elements->set(insertion_index, *GetImpl(isolate, parameters, entry));
- } else {
- elements->set_the_hole(isolate, insertion_index);
- }
- insertion_index++;
- }
- return result_array;
- }
-
- static Handle<SeededNumberDictionary> NormalizeImpl(
+ static Handle<NumberDictionary> NormalizeImpl(
Handle<JSObject> object, Handle<FixedArrayBase> elements) {
Handle<FixedArray> arguments =
GetArguments(elements->GetIsolate(), *elements);
return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments);
}
- static Handle<SeededNumberDictionary> NormalizeArgumentsElements(
+ static Handle<NumberDictionary> NormalizeArgumentsElements(
Handle<JSObject> object, Handle<SloppyArgumentsElements> elements,
uint32_t* entry) {
- Handle<SeededNumberDictionary> dictionary =
- JSObject::NormalizeElements(object);
+ Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object);
elements->set_arguments(*dictionary);
// kMaxUInt32 indicates that a context mapped element got deleted. In this
// case we only normalize the elements (aka. migrate to SLOW_SLOPPY).
@@ -3931,7 +3960,7 @@ class FastSloppyArgumentsElementsAccessor
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
Handle<FixedArray> old_arguments(elements->arguments(), isolate);
- if (old_arguments->IsSeededNumberDictionary() ||
+ if (old_arguments->IsNumberDictionary() ||
static_cast<uint32_t>(old_arguments->length()) < new_capacity) {
GrowCapacityAndConvertImpl(object, new_capacity);
}
@@ -4123,6 +4152,13 @@ class StringWrapperElementsAccessor
uint32_t capacity) {
Handle<FixedArrayBase> old_elements(object->elements());
ElementsKind from_kind = object->GetElementsKind();
+ if (from_kind == FAST_STRING_WRAPPER_ELEMENTS) {
+ // The optimizing compiler relies on the prototype lookups of String
+ // objects always returning undefined. If there's a store to the
+ // initial String.prototype object, make sure all the optimizations
+ // are invalidated.
+ object->GetIsolate()->UpdateNoElementsProtectorOnSetLength(object);
+ }
// This method should only be called if there's a reason to update the
// elements.
DCHECK(from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
@@ -4173,7 +4209,7 @@ class FastStringWrapperElementsAccessor
FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>>(name) {}
- static Handle<SeededNumberDictionary> NormalizeImpl(
+ static Handle<NumberDictionary> NormalizeImpl(
Handle<JSObject> object, Handle<FixedArrayBase> elements) {
return FastHoleyObjectElementsAccessor::NormalizeImpl(object, elements);
}
@@ -4201,7 +4237,7 @@ class SlowStringWrapperElementsAccessor
void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t index,
bool allow_appending) {
DisallowHeapAllocation no_allocation;
- Object* raw_length = NULL;
+ Object* raw_length = nullptr;
const char* elements_type = "array";
if (obj->IsJSArray()) {
JSArray* array = JSArray::cast(*obj);
@@ -4380,11 +4416,11 @@ void ElementsAccessor::InitializeOncePerProcess() {
void ElementsAccessor::TearDown() {
- if (elements_accessors_ == NULL) return;
+ if (elements_accessors_ == nullptr) return;
#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
ELEMENTS_LIST(ACCESSOR_DELETE)
#undef ACCESSOR_DELETE
- elements_accessors_ = NULL;
+ elements_accessors_ = nullptr;
}
Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
@@ -4439,6 +4475,6 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
return result_array;
}
-ElementsAccessor** ElementsAccessor::elements_accessors_ = NULL;
+ElementsAccessor** ElementsAccessor::elements_accessors_ = nullptr;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 3f81be0c51..348af6d8ea 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -23,7 +23,7 @@ class ElementsAccessor {
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
- DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
+ DCHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
return elements_accessors_[elements_kind];
}
@@ -34,7 +34,7 @@ class ElementsAccessor {
// Returns true if a holder contains an element with the specified index
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
- // the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
+ // the ElementsKind of the ElementsAccessor. If backing_store is nullptr, the
// holder->elements() is used as the backing store. If a |filter| is
// specified the PropertyAttributes of the element at the given index
// are compared to the given |filter|. If they match/overlap the given
@@ -158,7 +158,7 @@ class ElementsAccessor {
virtual Handle<Object> Shift(Handle<JSArray> receiver) = 0;
- virtual Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) = 0;
+ virtual Handle<NumberDictionary> Normalize(Handle<JSObject> object) = 0;
virtual uint32_t GetCapacity(JSObject* holder,
FixedArrayBase* backing_store) = 0;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index d560512e07..ee4bd55534 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -6,7 +6,6 @@
#include "src/api.h"
#include "src/bootstrapper.h"
-#include "src/codegen.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
@@ -17,13 +16,10 @@
namespace v8 {
namespace internal {
-StackGuard::StackGuard()
- : isolate_(NULL) {
-}
-
+StackGuard::StackGuard() : isolate_(nullptr) {}
void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
- DCHECK(isolate_ != NULL);
+ DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(kInterruptLimit);
thread_local_.set_climit(kInterruptLimit);
isolate_->heap()->SetStackLimits();
@@ -31,7 +27,7 @@ void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
void StackGuard::reset_limits(const ExecutionAccess& lock) {
- DCHECK(isolate_ != NULL);
+ DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(thread_local_.real_jslimit_);
thread_local_.set_climit(thread_local_.real_climit_);
isolate_->heap()->SetStackLimits();
@@ -115,7 +111,7 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(
}
// Placeholder for return value.
- Object* value = NULL;
+ Object* value = nullptr;
typedef Object* (*JSEntryFunction)(Object* new_target, Object* target,
Object* receiver, int argc,
@@ -218,7 +214,7 @@ MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
MaybeHandle<Object>* exception_out) {
bool is_termination = false;
MaybeHandle<Object> maybe_result;
- if (exception_out != NULL) *exception_out = MaybeHandle<Object>();
+ if (exception_out != nullptr) *exception_out = MaybeHandle<Object>();
DCHECK_IMPLIES(message_handling == MessageHandling::kKeepPending,
exception_out == nullptr);
// Enter a try-block while executing the JavaScript code. To avoid
@@ -318,7 +314,7 @@ void StackGuard::PopPostponeInterruptsScope() {
ExecutionAccess access(isolate_);
PostponeInterruptsScope* top = thread_local_.postpone_interrupts_;
// Make intercepted interrupts active.
- DCHECK((thread_local_.interrupt_flags_ & top->intercept_mask_) == 0);
+ DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
thread_local_.interrupt_flags_ |= top->intercepted_flags_;
if (has_pending_interrupts(access)) set_interrupt_limits(access);
// Remove scope from chain.
@@ -353,8 +349,7 @@ void StackGuard::ClearInterrupt(InterruptFlag flag) {
ExecutionAccess access(isolate_);
// Clear the interrupt flag from the chain of PostponeInterruptsScopes.
for (PostponeInterruptsScope* current = thread_local_.postpone_interrupts_;
- current != NULL;
- current = current->prev_) {
+ current != nullptr; current = current->prev_) {
current->intercepted_flags_ &= ~flag;
}
@@ -410,7 +405,7 @@ void StackGuard::ThreadLocal::Clear() {
set_jslimit(kIllegalLimit);
real_climit_ = kIllegalLimit;
set_climit(kIllegalLimit);
- postpone_interrupts_ = NULL;
+ postpone_interrupts_ = nullptr;
interrupt_flags_ = 0;
}
@@ -419,7 +414,7 @@ bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
bool should_set_stack_limits = false;
if (real_climit_ == kIllegalLimit) {
const uintptr_t kLimitSize = FLAG_stack_size * KB;
- DCHECK(GetCurrentStackPosition() > kLimitSize);
+ DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
@@ -427,7 +422,7 @@ bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
set_climit(limit);
should_set_stack_limits = true;
}
- postpone_interrupts_ = NULL;
+ postpone_interrupts_ = nullptr;
interrupt_flags_ = 0;
return should_set_stack_limits;
}
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 8c8b0cf048..54c6259b8b 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -49,8 +49,8 @@ ExternalizeStringExtension::GetNativeFunctionTemplate(
return v8::FunctionTemplate::New(isolate,
ExternalizeStringExtension::Externalize);
} else {
- DCHECK(strcmp(*v8::String::Utf8Value(isolate, str), "isOneByteString") ==
- 0);
+ DCHECK_EQ(strcmp(*v8::String::Utf8Value(isolate, str), "isOneByteString"),
+ 0);
return v8::FunctionTemplate::New(isolate,
ExternalizeStringExtension::IsOneByte);
}
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 9f979d630b..f0b5d72387 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -17,7 +17,7 @@ const char* const StatisticsExtension::kSource =
v8::Local<v8::FunctionTemplate> StatisticsExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> str) {
- DCHECK(strcmp(*v8::String::Utf8Value(isolate, str), "getV8Statistics") == 0);
+ DCHECK_EQ(strcmp(*v8::String::Utf8Value(isolate, str), "getV8Statistics"), 0);
return v8::FunctionTemplate::New(isolate, StatisticsExtension::GetCounters);
}
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index eeb668a25f..123f9c2fd2 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -8,7 +8,6 @@
#include "src/assembler.h"
#include "src/counters.h"
#include "src/ic/stub-cache.h"
-#include "src/trap-handler/trap-handler.h"
#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
#define SYMBOLIZE_FUNCTION
@@ -28,7 +27,7 @@ BUILTIN_LIST_C(FORWARD_DECLARE)
ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
ExternalReferenceTable* external_reference_table =
isolate->external_reference_table();
- if (external_reference_table == NULL) {
+ if (external_reference_table == nullptr) {
external_reference_table = new ExternalReferenceTable(isolate);
isolate->set_external_reference_table(external_reference_table);
}
@@ -241,6 +240,7 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"libc_memmove");
Add(ExternalReference::libc_memset_function(isolate).address(),
"libc_memset");
+ Add(ExternalReference::printf_function(isolate).address(), "printf");
Add(ExternalReference::try_internalize_string_function(isolate).address(),
"try_internalize_string_function");
Add(ExternalReference::check_object_type(isolate).address(),
@@ -268,6 +268,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"orderedhashmap_gethash_raw");
Add(ExternalReference::get_or_create_hash_raw(isolate).address(),
"get_or_create_hash_raw");
+ Add(ExternalReference::jsreceiver_create_identity_hash(isolate).address(),
+ "jsreceiver_create_identity_hash");
Add(ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
isolate)
.address(),
@@ -283,6 +285,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"double_constants.minus_one_half");
Add(ExternalReference::stress_deopt_count(isolate).address(),
"Isolate::stress_deopt_count_address()");
+ Add(ExternalReference::force_slow_path(isolate).address(),
+ "Isolate::force_slow_path_address()");
Add(ExternalReference::runtime_function_table_address(isolate).address(),
"Runtime::runtime_function_table_address()");
Add(ExternalReference::address_of_float_abs_constant().address(),
@@ -309,9 +313,6 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
Add(ExternalReference::debug_restart_fp_address(isolate).address(),
"Debug::restart_fp_address()");
- Add(ExternalReference::address_of_regexp_dotall_flag(isolate).address(),
- "FLAG_harmony_regexp_dotall");
-
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
@@ -380,7 +381,7 @@ void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate) {
// Top addresses
static const char* address_names[] = {
#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
- FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL
+ FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) nullptr
#undef BUILD_NAME_LITERAL
};
@@ -398,8 +399,9 @@ void ExternalReferenceTable::AddAccessors(Isolate* isolate) {
};
static const AccessorRefTable getters[] = {
-#define ACCESSOR_INFO_DECLARATION(name) \
- {FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
+#define ACCESSOR_INFO_DECLARATION(accessor_name, AccessorName) \
+ {FUNCTION_ADDR(&Accessors::AccessorName##Getter), \
+ "Accessors::" #AccessorName "Getter"}, /* NOLINT(whitespace/indent) */
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
};
@@ -407,7 +409,7 @@ void ExternalReferenceTable::AddAccessors(Isolate* isolate) {
#define ACCESSOR_SETTER_DECLARATION(name) \
{ FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
-#undef ACCESSOR_INFO_DECLARATION
+#undef ACCESSOR_SETTER_DECLARATION
};
for (unsigned i = 0; i < arraysize(getters); ++i) {
@@ -455,3 +457,5 @@ void ExternalReferenceTable::AddStubCache(Isolate* isolate) {
} // namespace internal
} // namespace v8
+
+#undef SYMBOLIZE_FUNCTION
diff --git a/deps/v8/src/factory-inl.h b/deps/v8/src/factory-inl.h
index a3c7a48e6e..02cdef3a15 100644
--- a/deps/v8/src/factory-inl.h
+++ b/deps/v8/src/factory-inl.h
@@ -54,6 +54,16 @@ PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
+#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
+ Handle<AccessorInfo> Factory::accessor_name##_accessor() { \
+ return Handle<AccessorInfo>(bit_cast<AccessorInfo**>( \
+ &isolate() \
+ ->heap() \
+ ->roots_[Heap::k##AccessorName##AccessorRootIndex])); \
+ }
+ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
+#undef ACCESSOR_INFO_ACCESSOR
+
Handle<String> Factory::InternalizeString(Handle<String> string) {
if (string->IsInternalizedString()) return string;
return StringTable::LookupString(isolate(), string);
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 7710b0c788..fbb8d55bef 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -14,7 +14,7 @@
#include "src/conversions.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
-#include "src/objects/bigint-inl.h"
+#include "src/objects/bigint.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/module.h"
@@ -42,7 +42,7 @@ namespace internal {
#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
do { \
AllocationResult __allocation__ = FUNCTION_CALL; \
- Object* __object__ = NULL; \
+ Object* __object__ = nullptr; \
RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
/* Two GCs before panicking. In newspace will almost always succeed. */ \
for (int __i__ = 0; __i__ < 2; __i__++) { \
@@ -100,7 +100,7 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE, TENURED));
result->set_prototype_users(WeakFixedArray::Empty());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
- result->set_validity_cell(Smi::kZero);
+ result->set_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
result->set_bit_field(0);
return result;
}
@@ -169,51 +169,68 @@ Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
return oddball;
}
-
-Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
- DCHECK(0 <= size);
+Handle<PropertyArray> Factory::NewPropertyArray(int length,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_property_array();
CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFixedArray(size, pretenure),
- FixedArray);
+ isolate(), isolate()->heap()->AllocatePropertyArray(length, pretenure),
+ PropertyArray);
}
-Handle<PropertyArray> Factory::NewPropertyArray(int size,
- PretenureFlag pretenure) {
- DCHECK_LE(0, size);
- if (size == 0) return empty_property_array();
+Handle<FixedArray> Factory::NewFixedArrayWithMap(
+ Heap::RootListIndex map_root_index, int length, PretenureFlag pretenure) {
+ // Zero-length case must be handled outside, where the knowledge about
+ // the map is.
+ DCHECK_LT(0, length);
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocatePropertyArray(size, pretenure),
- PropertyArray);
+ isolate()->heap()->AllocateFixedArrayWithMap(
+ map_root_index, length, pretenure),
+ FixedArray);
}
-MaybeHandle<FixedArray> Factory::TryNewFixedArray(int size,
+Handle<FixedArray> Factory::NewFixedArray(int length, PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFixedArray(length, pretenure),
+ FixedArray);
+}
+
+MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
PretenureFlag pretenure) {
- DCHECK(0 <= size);
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+
AllocationResult allocation =
- isolate()->heap()->AllocateFixedArray(size, pretenure);
- Object* array = NULL;
+ isolate()->heap()->AllocateFixedArray(length, pretenure);
+ Object* array = nullptr;
if (!allocation.To(&array)) return MaybeHandle<FixedArray>();
return Handle<FixedArray>(FixedArray::cast(array), isolate());
}
-Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
+Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
- DCHECK(0 <= size);
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateFixedArrayWithFiller(size,
- pretenure,
- *the_hole_value()),
+ isolate()->heap()->AllocateFixedArrayWithFiller(
+ Heap::kFixedArrayMapRootIndex, length, pretenure, *the_hole_value()),
FixedArray);
}
-Handle<FixedArray> Factory::NewUninitializedFixedArray(int size) {
+Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+
// TODO(ulan): As an experiment this temporarily returns an initialized fixed
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFixedArray(size, NOT_TENURED),
+ isolate()->heap()->AllocateFixedArray(length, NOT_TENURED),
FixedArray);
}
@@ -255,7 +272,7 @@ Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int size,
PretenureFlag pretenure) {
- DCHECK(0 <= size);
+ DCHECK_LE(0, size);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
@@ -266,7 +283,7 @@ Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int size,
Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
int size,
PretenureFlag pretenure) {
- DCHECK(0 <= size);
+ DCHECK_LE(0, size);
Handle<FixedArrayBase> array = NewFixedDoubleArray(size, pretenure);
if (size > 0) {
Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, size);
@@ -397,7 +414,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
decoder->Reset(string.start() + non_ascii_start,
length - non_ascii_start);
int utf16_length = static_cast<int>(decoder->Utf16Length());
- DCHECK(utf16_length > 0);
+ DCHECK_GT(utf16_length, 0);
// Allocate string.
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -432,7 +449,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
isolate()->unicode_cache()->utf8_decoder());
decoder->Reset(start + non_ascii_start, length - non_ascii_start);
int utf16_length = static_cast<int>(decoder->Utf16Length());
- DCHECK(utf16_length > 0);
+ DCHECK_GT(utf16_length, 0);
// Allocate string.
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -597,7 +614,7 @@ MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
if (length > String::kMaxLength || length < 0) {
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
}
- DCHECK(length > 0); // Use Factory::empty_string() instead.
+ DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawOneByteString(length, pretenure),
@@ -610,7 +627,7 @@ MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
if (length > String::kMaxLength || length < 0) {
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
}
- DCHECK(length > 0); // Use Factory::empty_string() instead.
+ DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
@@ -634,7 +651,7 @@ Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) {
single_character_string_cache()->set(code, *result);
return result;
}
- DCHECK(code <= String::kMaxUtf16CodeUnitU);
+ DCHECK_LE(code, String::kMaxUtf16CodeUnitU);
Handle<SeqTwoByteString> result = NewRawTwoByteString(1).ToHandleChecked();
result->SeqTwoByteStringSet(0, static_cast<uint16_t>(code));
@@ -836,7 +853,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
return MakeOrFindTwoCharacterString(isolate(), c1, c2);
}
- if (length < SlicedString::kMinLength) {
+ if (!FLAG_string_slices || length < SlicedString::kMinLength) {
if (str->IsOneByteRepresentation()) {
Handle<SeqOneByteString> result =
NewRawOneByteString(length).ToHandleChecked();
@@ -1146,8 +1163,10 @@ Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<AccessorInfo> info =
Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE, TENURED));
- info->set_flag(0); // Must clear the flag, it was initialized as undefined.
+ info->set_name(*empty_string());
+ info->set_flags(0); // Must clear the flags, it was initialized as undefined.
info->set_is_sloppy(true);
+ info->set_initial_property_attributes(NONE);
return info;
}
@@ -1188,7 +1207,7 @@ Handle<Foreign> Factory::NewForeign(const AccessorDescriptor* desc) {
Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
- DCHECK(0 <= length);
+ DCHECK_LE(0, length);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateByteArray(length, pretenure),
@@ -1199,7 +1218,7 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
Handle<BytecodeArray> Factory::NewBytecodeArray(
int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
Handle<FixedArray> constant_pool) {
- DCHECK(0 <= length);
+ DCHECK_LE(0, length);
CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateBytecodeArray(
length, raw_bytecodes, frame_size,
parameter_count, *constant_pool),
@@ -1286,21 +1305,20 @@ Handle<AllocationSite> Factory::NewAllocationSite() {
return site;
}
-
-Handle<Map> Factory::NewMap(InstanceType type,
- int instance_size,
- ElementsKind elements_kind) {
+Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
+ ElementsKind elements_kind,
+ int inobject_properties) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
+ isolate()->heap()->AllocateMap(type, instance_size, elements_kind,
+ inobject_properties),
Map);
}
Handle<JSObject> Factory::CopyJSObject(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyJSObject(*object, NULL),
- JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(), isolate()->heap()->CopyJSObject(*object, nullptr), JSObject);
}
@@ -1309,8 +1327,7 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
Handle<AllocationSite> site) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->CopyJSObject(
- *object,
- site.is_null() ? NULL : *site),
+ *object, site.is_null() ? nullptr : *site),
JSObject);
}
@@ -1413,32 +1430,9 @@ Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
HeapNumber);
}
-Handle<BigInt> Factory::NewBigInt(int length, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateBigInt(length, true, pretenure),
- BigInt);
-}
-
-Handle<BigInt> Factory::NewBigIntRaw(int length, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocateBigInt(length, false, pretenure),
- BigInt);
-}
-
-Handle<BigInt> Factory::NewBigIntFromInt(int value, PretenureFlag pretenure) {
- if (value == 0) return NewBigInt(0);
- Handle<BigInt> result = NewBigIntRaw(1);
- if (value > 0) {
- result->set_digit(0, value);
- } else if (value == kMinInt) {
- STATIC_ASSERT(kMinInt == -kMaxInt - 1);
- result->set_digit(0, static_cast<BigInt::digit_t>(kMaxInt) + 1);
- result->set_sign(true);
- } else {
- result->set_digit(0, -value);
- result->set_sign(true);
- }
- return result;
+Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length) {
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateBigInt(length),
+ FreshlyAllocatedBigInt);
}
Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
@@ -1532,25 +1526,44 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
function->set_shared(*info);
function->set_code(info->code());
function->set_context(*context_or_undefined);
- function->set_prototype_or_initial_map(*the_hole_value());
function->set_feedback_vector_cell(*undefined_cell());
- isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
+ int header_size;
+ if (map->has_prototype_slot()) {
+ header_size = JSFunction::kSizeWithPrototype;
+ function->set_prototype_or_initial_map(*the_hole_value());
+ } else {
+ header_size = JSFunction::kSizeWithoutPrototype;
+ }
+ isolate()->heap()->InitializeJSObjectBody(*function, *map, header_size);
return function;
}
-Handle<JSFunction> Factory::NewFunction(Handle<Map> map, Handle<String> name,
- MaybeHandle<Code> maybe_code) {
- DCHECK(!name.is_null());
+Handle<JSFunction> Factory::NewFunctionForTest(Handle<String> name) {
+ NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
+ name, isolate()->sloppy_function_map(), LanguageMode::kSloppy);
+ Handle<JSFunction> result = NewFunction(args);
+ DCHECK(is_sloppy(result->shared()->language_mode()));
+ return result;
+}
+
+Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
+ DCHECK(!args.name_.is_null());
+
+ // Create the SharedFunctionInfo.
Handle<Context> context(isolate()->native_context());
+ Handle<Map> map = args.GetMap(isolate());
Handle<SharedFunctionInfo> info =
- NewSharedFunctionInfo(name, maybe_code, map->is_constructor());
- // Proper language mode in shared function info will be set outside.
+ NewSharedFunctionInfo(args.name_, args.maybe_code_, map->is_constructor(),
+ kNormalFunction, args.maybe_builtin_id_);
+
+ // Proper language mode in shared function info will be set later.
DCHECK(is_sloppy(info->language_mode()));
DCHECK(!map->IsUndefined(isolate()));
+
#ifdef DEBUG
if (isolate()->bootstrapper()->IsActive()) {
Handle<Code> code;
- bool has_code = maybe_code.ToHandle(&code);
+ bool has_code = args.maybe_code_.ToHandle(&code);
DCHECK(
// During bootstrapping some of these maps could be not created yet.
(*map == context->get(Context::STRICT_FUNCTION_MAP_INDEX)) ||
@@ -1573,80 +1586,48 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map, Handle<String> name,
(*map == *isolate()->native_function_map()));
}
#endif
- return NewFunction(map, info, context);
-}
+ Handle<JSFunction> result = NewFunction(map, info, context);
-Handle<JSFunction> Factory::NewFunction(Handle<String> name) {
- Handle<JSFunction> result =
- NewFunction(isolate()->sloppy_function_map(), name, MaybeHandle<Code>());
- DCHECK(is_sloppy(result->shared()->language_mode()));
- return result;
-}
-
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
- Handle<String> name, Handle<Code> code, LanguageMode language_mode) {
- Handle<Map> map = is_strict(language_mode)
- ? isolate()->strict_function_without_prototype_map()
- : isolate()->sloppy_function_without_prototype_map();
- Handle<JSFunction> result = NewFunction(map, name, code);
- result->shared()->set_language_mode(language_mode);
- return result;
-}
+ if (args.should_set_prototype_) {
+ result->set_prototype_or_initial_map(
+ *args.maybe_prototype_.ToHandleChecked());
+ }
-Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
- Handle<Object> prototype,
- LanguageMode language_mode,
- MutableMode prototype_mutability) {
- Handle<Map> map;
- if (prototype_mutability == MUTABLE) {
- map = is_strict(language_mode) ? isolate()->strict_function_map()
- : isolate()->sloppy_function_map();
- } else {
- map = is_strict(language_mode)
- ? isolate()->strict_function_with_readonly_prototype_map()
- : isolate()->sloppy_function_with_readonly_prototype_map();
+ if (args.should_set_language_mode_) {
+ result->shared()->set_language_mode(args.language_mode_);
}
- Handle<JSFunction> result = NewFunction(map, name, code);
- result->set_prototype_or_initial_map(*prototype);
- result->shared()->set_language_mode(language_mode);
- return result;
-}
-Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
- Handle<Object> prototype,
- InstanceType type, int instance_size,
- LanguageMode language_mode,
- MutableMode prototype_mutability) {
- // Allocate the function
- Handle<JSFunction> function =
- NewFunction(name, code, prototype, language_mode, prototype_mutability);
-
- ElementsKind elements_kind =
- type == JS_ARRAY_TYPE ? PACKED_SMI_ELEMENTS : TERMINAL_FAST_ELEMENTS_KIND;
- Handle<Map> initial_map = NewMap(type, instance_size, elements_kind);
- // TODO(littledan): Why do we have this is_generator test when
- // NewFunctionPrototype already handles finding an appropriately
- // shared prototype?
- if (!IsResumableFunction(function->shared()->kind())) {
- if (prototype->IsTheHole(isolate())) {
- prototype = NewFunctionPrototype(function);
+ if (args.should_create_and_set_initial_map_) {
+ ElementsKind elements_kind;
+ switch (args.type_) {
+ case JS_ARRAY_TYPE:
+ elements_kind = PACKED_SMI_ELEMENTS;
+ break;
+ case JS_ARGUMENTS_TYPE:
+ elements_kind = PACKED_ELEMENTS;
+ break;
+ default:
+ elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
+ break;
}
+ Handle<Map> initial_map = NewMap(args.type_, args.instance_size_,
+ elements_kind, args.inobject_properties_);
+ // TODO(littledan): Why do we have this is_generator test when
+ // NewFunctionPrototype already handles finding an appropriately
+ // shared prototype?
+ Handle<Object> prototype = args.maybe_prototype_.ToHandleChecked();
+ if (!IsResumableFunction(result->shared()->kind())) {
+ if (prototype->IsTheHole(isolate())) {
+ prototype = NewFunctionPrototype(result);
+ }
+ }
+ JSFunction::SetInitialMap(result, initial_map, prototype);
}
- JSFunction::SetInitialMap(function, initial_map, prototype);
- return function;
-}
-
-Handle<JSFunction> Factory::NewFunction(Handle<String> name,
- Handle<Code> code,
- InstanceType type,
- int instance_size) {
- DCHECK(isolate()->bootstrapper()->IsActive());
- return NewFunction(name, code, the_hole_value(), type, instance_size, STRICT);
+ return result;
}
-
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
@@ -1773,95 +1754,55 @@ Handle<JSObject> Factory::NewExternal(void* value) {
return external;
}
-
-Handle<Code> Factory::NewCodeRaw(int object_size, bool immovable) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateCode(object_size, immovable),
- Code);
+Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
+ Handle<CodeDataContainer> data_container =
+ New<CodeDataContainer>(code_data_container_map(), OLD_SPACE);
+ data_container->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
+ data_container->set_kind_specific_flags(flags);
+ data_container->clear_padding();
+ return data_container;
}
-Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Kind kind,
- Handle<Object> self_ref, bool immovable) {
+Handle<Code> Factory::NewCode(
+ const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
+ int32_t builtin_index, MaybeHandle<HandlerTable> maybe_handler_table,
+ MaybeHandle<ByteArray> maybe_source_position_table,
+ MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
+ uint32_t stub_key, bool is_turbofanned, int stack_slots,
+ int safepoint_table_offset) {
Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
+ Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
+
+ Handle<HandlerTable> handler_table =
+ maybe_handler_table.is_null() ? HandlerTable::Empty(isolate())
+ : maybe_handler_table.ToHandleChecked();
+ Handle<ByteArray> source_position_table =
+ maybe_source_position_table.is_null()
+ ? empty_byte_array()
+ : maybe_source_position_table.ToHandleChecked();
+ Handle<DeoptimizationData> deopt_data =
+ maybe_deopt_data.is_null() ? DeoptimizationData::Empty(isolate())
+ : maybe_deopt_data.ToHandleChecked();
- bool has_unwinding_info = desc.unwinding_info != nullptr;
- DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
- (!has_unwinding_info && desc.unwinding_info_size == 0));
-
- // Compute size.
- int body_size = desc.instr_size;
- int unwinding_info_size_field_size = kInt64Size;
- if (has_unwinding_info) {
- body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
- unwinding_info_size_field_size;
- }
- int obj_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
-
- Handle<Code> code = NewCodeRaw(obj_size, immovable);
- DCHECK(!isolate()->heap()->memory_allocator()->code_range()->valid() ||
- isolate()->heap()->memory_allocator()->code_range()->contains(
- code->address()) ||
- obj_size <= isolate()->heap()->code_space()->AreaSize());
-
- // The code object has not been fully initialized yet. We rely on the
- // fact that no allocation will happen from this point on.
- DisallowHeapAllocation no_gc;
- code->set_instruction_size(desc.instr_size);
- code->set_relocation_info(*reloc_info);
- code->initialize_flags(kind);
- code->set_has_unwinding_info(has_unwinding_info);
- code->set_raw_kind_specific_flags1(0);
- code->set_raw_kind_specific_flags2(0);
- code->set_has_tagged_params(true);
- code->set_deoptimization_data(*empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_raw_type_feedback_info(Smi::kZero);
- code->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
- code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_source_position_table(*empty_byte_array(), SKIP_WRITE_BARRIER);
- code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
- code->set_builtin_index(-1);
- code->set_trap_handler_index(Smi::FromInt(-1));
-
- switch (code->kind()) {
- case Code::OPTIMIZED_FUNCTION:
- code->set_marked_for_deoptimization(false);
- break;
- case Code::JS_TO_WASM_FUNCTION:
- case Code::WASM_FUNCTION:
- code->set_has_tagged_params(false);
- break;
- default:
- break;
- }
-
- // Allow self references to created code object by patching the handle to
- // point to the newly allocated Code object.
- if (!self_ref.is_null()) *(self_ref.location()) = *code;
-
- // Migrate generated code.
- // The generated code can contain Object** values (typically from handles)
- // that are dereferenced during the copy to point directly to the actual heap
- // objects. These pointers can include references to the code object itself,
- // through the self_reference parameter.
- code->CopyFrom(desc);
-
- code->clear_padding();
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) code->ObjectVerify();
-#endif
- return code;
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateCode(
+ desc, kind, self_ref, builtin_index, *reloc_info, *data_container,
+ *handler_table, *source_position_table, *deopt_data, movability,
+ stub_key, is_turbofanned, stack_slots, safepoint_table_offset),
+ Code);
}
Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
- const bool kNotImmovable = false;
- return NewCodeRaw(size, kNotImmovable);
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateCode(size, kMovable),
+ Code);
}
Handle<Code> Factory::CopyCode(Handle<Code> code) {
+ Handle<CodeDataContainer> data_container =
+ NewCodeDataContainer(code->code_data_container()->kind_specific_flags());
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyCode(*code),
- Code);
+ isolate()->heap()->CopyCode(*code, *data_container), Code);
}
@@ -1899,12 +1840,12 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// Make sure no field properties are described in the initial map.
// This guarantees us that normalizing the properties does not
// require us to change property values to PropertyCells.
- DCHECK(map->NextFreePropertyIndex() == 0);
+ DCHECK_EQ(map->NextFreePropertyIndex(), 0);
// Make sure we don't have a ton of pre-allocated slots in the
// global objects. They will be unused once we normalize the object.
- DCHECK(map->unused_property_fields() == 0);
- DCHECK(map->GetInObjectProperties() == 0);
+ DCHECK_EQ(map->UnusedPropertyFields(), 0);
+ DCHECK_EQ(map->GetInObjectProperties(), 0);
// Initial size of the backing store to avoid resize of the storage during
// bootstrapping. The size differs between the JS global object ad the
@@ -1958,9 +1899,8 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObjectFromMap(
- *map,
- pretenure,
- allocation_site.is_null() ? NULL : *allocation_site),
+ *map, pretenure,
+ allocation_site.is_null() ? nullptr : *allocation_site),
JSObject);
}
@@ -2095,6 +2035,7 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
module->set_script(Script::cast(code->script()));
module->set_status(Module::kUninstantiated);
module->set_exception(isolate()->heap()->the_hole_value());
+ module->set_import_meta(isolate()->heap()->the_hole_value());
module->set_dfs_index(-1);
module->set_dfs_ancestor_index(-1);
return module;
@@ -2317,7 +2258,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
size_t element_size = GetExternalArrayElementSize(type);
ElementsKind elements_kind = GetExternalArrayElementsKind(type);
- CHECK(byte_offset % element_size == 0);
+ CHECK_EQ(byte_offset % element_size, 0);
CHECK(length <= (std::numeric_limits<size_t>::max() / element_size));
CHECK(length <= static_cast<size_t>(Smi::kMaxValue));
@@ -2365,7 +2306,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
Handle<JSArrayBuffer> buffer =
NewJSArrayBuffer(SharedFlag::kNotShared, pretenure);
- JSArrayBuffer::Setup(buffer, isolate(), true, NULL, byte_length,
+ JSArrayBuffer::Setup(buffer, isolate(), true, nullptr, byte_length,
SharedFlag::kNotShared);
obj->set_buffer(*buffer);
Handle<FixedTypedArrayBase> elements = NewFixedTypedArray(
@@ -2401,6 +2342,9 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
isolate(), prototype,
JSReceiver::GetPrototype(isolate(), target_function), JSBoundFunction);
+ SaveContext save(isolate());
+ isolate()->set_context(*target_function->GetCreationContext());
+
// Create the [[BoundArguments]] for the result.
Handle<FixedArray> bound_arguments;
if (bound_args.length() == 0) {
@@ -2450,7 +2394,6 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
result->initialize_properties();
result->set_target(*target);
result->set_handler(*handler);
- result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
return result;
}
@@ -2474,7 +2417,8 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
Handle<Map> old_map(object->map(), isolate());
// The proxy's hash should be retained across reinitialization.
- Handle<Object> hash(object->hash(), isolate());
+ Handle<Object> raw_properties_or_hash(object->raw_properties_or_hash(),
+ isolate());
if (old_map->is_prototype_map()) {
map = Map::Copy(map, "CopyAsPrototypeForJSGlobalProxy");
@@ -2488,9 +2432,6 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
DCHECK(map->instance_size() == old_map->instance_size());
DCHECK(map->instance_type() == old_map->instance_type());
- // Allocate the backing storage for the properties.
- Handle<FixedArray> properties = empty_fixed_array();
-
// In order to keep heap in consistent state there must be no allocations
// before object re-initialization is finished.
DisallowHeapAllocation no_allocation;
@@ -2500,10 +2441,7 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
Heap* heap = isolate()->heap();
// Reinitialize the object from the constructor map.
- heap->InitializeJSObjectFromMap(*object, *properties, *map);
-
- // Restore the saved hash.
- object->set_hash(*hash);
+ heap->InitializeJSObjectFromMap(*object, *raw_properties_or_hash, *map);
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
@@ -2553,7 +2491,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
MaybeHandle<String> maybe_name, MaybeHandle<Code> maybe_code,
- bool is_constructor, FunctionKind kind) {
+ bool is_constructor, FunctionKind kind, int maybe_builtin_index) {
// Function names are assumed to be flat elsewhere. Must flatten before
// allocating SharedFunctionInfo to avoid GC seeing the uninitialized SFI.
Handle<String> shared_name;
@@ -2573,14 +2511,15 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
if (!maybe_code.ToHandle(&code)) {
code = BUILTIN_CODE(isolate(), Illegal);
}
- Object* function_data =
- (code->is_builtin() && Builtins::IsLazy(code->builtin_index()))
- ? Smi::FromInt(code->builtin_index())
- : Object::cast(*undefined_value());
+ Object* function_data = (Builtins::IsBuiltinId(maybe_builtin_index) &&
+ Builtins::IsLazy(maybe_builtin_index))
+ ? Smi::FromInt(maybe_builtin_index)
+ : Object::cast(*undefined_value());
share->set_function_data(function_data, SKIP_WRITE_BARRIER);
share->set_code(*code);
share->set_scope_info(ScopeInfo::Empty(isolate()));
share->set_outer_scope_info(*the_hole_value());
+ DCHECK(!Builtins::IsLazy(Builtins::kConstructedNonConstructable));
Handle<Code> construct_stub =
is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
: BUILTIN_CODE(isolate(), ConstructedNonConstructable);
@@ -2609,8 +2548,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_compiler_hints(0);
share->set_kind(kind);
- share->set_preparsed_scope_data(*null_value());
-
share->clear_padding();
// Link into the list.
@@ -2758,7 +2695,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
Handle<SourcePositionTableWithFrameCache>
Factory::NewSourcePositionTableWithFrameCache(
Handle<ByteArray> source_position_table,
- Handle<UnseededNumberDictionary> stack_frame_cache) {
+ Handle<NumberDictionary> stack_frame_cache) {
Handle<SourcePositionTableWithFrameCache>
source_position_table_with_frame_cache =
Handle<SourcePositionTableWithFrameCache>::cast(
@@ -2781,22 +2718,15 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
DCHECK(!isolate()->has_pending_exception());
Handle<JSObject> result = NewJSObjectFromMap(map);
Handle<Smi> value(Smi::FromInt(length), isolate());
- Object::SetProperty(result, length_string(), value, STRICT).Assert();
+ Object::SetProperty(result, length_string(), value, LanguageMode::kStrict)
+ .Assert();
if (!strict_mode_callee) {
- Object::SetProperty(result, callee_string(), callee, STRICT).Assert();
+ Object::SetProperty(result, callee_string(), callee, LanguageMode::kStrict)
+ .Assert();
}
return result;
}
-
-Handle<JSWeakMap> Factory::NewJSWeakMap() {
- // TODO(adamk): Currently the map is only created three times per
- // isolate. If it's created more often, the map should be moved into the
- // strong root list.
- Handle<Map> map = NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
- return Handle<JSWeakMap>::cast(NewJSObjectFromMap(map));
-}
-
Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> native_context,
int number_of_properties) {
DCHECK(native_context->IsNativeContext());
@@ -2923,27 +2853,28 @@ Handle<String> Factory::ToPrimitiveHintString(ToPrimitiveHint hint) {
Handle<Map> Factory::CreateSloppyFunctionMap(
FunctionMode function_mode, MaybeHandle<JSFunction> maybe_empty_function) {
- Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetSloppyFunctionInstanceDescriptor(map, function_mode);
- map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ bool has_prototype = IsFunctionModeWithPrototype(function_mode);
+ int header_size = has_prototype ? JSFunction::kSizeWithPrototype
+ : JSFunction::kSizeWithoutPrototype;
+ int descriptors_count = has_prototype ? 5 : 4;
+ int inobject_properties_count = 0;
+ if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count;
+
+ Handle<Map> map = NewMap(
+ JS_FUNCTION_TYPE, header_size + inobject_properties_count * kPointerSize,
+ TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
+ map->set_has_prototype_slot(has_prototype);
+ map->set_is_constructor(has_prototype);
map->set_is_callable();
Handle<JSFunction> empty_function;
if (maybe_empty_function.ToHandle(&empty_function)) {
Map::SetPrototype(map, empty_function);
}
- return map;
-}
-void Factory::SetSloppyFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode) {
- int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
- int inobject_properties_count = 0;
- if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count;
- map->SetInObjectProperties(inobject_properties_count);
- map->set_instance_size(JSFunction::kSize +
- inobject_properties_count * kPointerSize);
-
- Map::EnsureDescriptorSlack(map, size);
+ //
+ // Setup descriptors array.
+ //
+ Map::EnsureDescriptorSlack(map, descriptors_count);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -2954,11 +2885,9 @@ void Factory::SetSloppyFunctionInstanceDescriptor(Handle<Map> map,
int field_index = 0;
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), roc_attribs);
{ // Add length accessor.
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(length->name())), length, roc_attribs);
+ length_string(), function_length_accessor(), roc_attribs);
map->AppendDescriptor(&d);
}
@@ -2972,24 +2901,18 @@ void Factory::SetSloppyFunctionInstanceDescriptor(Handle<Map> map,
} else {
// Add name accessor.
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), roc_attribs);
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(name->name())), name, roc_attribs);
+ name_string(), function_name_accessor(), roc_attribs);
map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> args =
- Accessors::FunctionArgumentsInfo(isolate(), ro_attribs);
{ // Add arguments accessor.
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(args->name())), args, ro_attribs);
+ arguments_string(), function_arguments_accessor(), ro_attribs);
map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> caller =
- Accessors::FunctionCallerInfo(isolate(), ro_attribs);
{ // Add caller accessor.
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(caller->name())), caller, ro_attribs);
+ caller_string(), function_caller_accessor(), ro_attribs);
map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
@@ -2997,39 +2920,37 @@ void Factory::SetSloppyFunctionInstanceDescriptor(Handle<Map> map,
PropertyAttributes attribs =
IsFunctionModeWithWritablePrototype(function_mode) ? rw_attribs
: ro_attribs;
- Handle<AccessorInfo> prototype =
- Accessors::FunctionPrototypeInfo(isolate(), attribs);
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(prototype->name())), prototype, attribs);
+ prototype_string(), function_prototype_accessor(), attribs);
map->AppendDescriptor(&d);
}
DCHECK_EQ(inobject_properties_count, field_index);
+ return map;
}
Handle<Map> Factory::CreateStrictFunctionMap(
FunctionMode function_mode, Handle<JSFunction> empty_function) {
- Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetStrictFunctionInstanceDescriptor(map, function_mode);
- map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
- map->set_is_callable();
- Map::SetPrototype(map, empty_function);
- return map;
-}
-
-void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode) {
- DCHECK_EQ(JS_FUNCTION_TYPE, map->instance_type());
+ bool has_prototype = IsFunctionModeWithPrototype(function_mode);
+ int header_size = has_prototype ? JSFunction::kSizeWithPrototype
+ : JSFunction::kSizeWithoutPrototype;
int inobject_properties_count = 0;
if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count;
if (IsFunctionModeWithHomeObject(function_mode)) ++inobject_properties_count;
- map->SetInObjectProperties(inobject_properties_count);
- map->set_instance_size(JSFunction::kSize +
- inobject_properties_count * kPointerSize);
-
- int size = (IsFunctionModeWithPrototype(function_mode) ? 3 : 2) +
- inobject_properties_count;
+ int descriptors_count = (IsFunctionModeWithPrototype(function_mode) ? 3 : 2) +
+ inobject_properties_count;
+
+ Handle<Map> map = NewMap(
+ JS_FUNCTION_TYPE, header_size + inobject_properties_count * kPointerSize,
+ TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
+ map->set_has_prototype_slot(has_prototype);
+ map->set_is_constructor(has_prototype);
+ map->set_is_callable();
+ Map::SetPrototype(map, empty_function);
- Map::EnsureDescriptorSlack(map, size);
+ //
+ // Setup descriptors array.
+ //
+ Map::EnsureDescriptorSlack(map, descriptors_count);
PropertyAttributes rw_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
@@ -3041,10 +2962,8 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
int field_index = 0;
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
{ // Add length accessor.
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), roc_attribs);
Descriptor d = Descriptor::AccessorConstant(
- handle(Name::cast(length->name())), length, roc_attribs);
+ length_string(), function_length_accessor(), roc_attribs);
map->AppendDescriptor(&d);
}
@@ -3058,10 +2977,17 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
} else {
// Add name accessor.
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), roc_attribs);
Descriptor d = Descriptor::AccessorConstant(
- handle(Name::cast(name->name())), name, roc_attribs);
+ name_string(), function_name_accessor(), roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ STATIC_ASSERT(JSFunction::kMaybeHomeObjectDescriptorIndex == 2);
+ if (IsFunctionModeWithHomeObject(function_mode)) {
+ // Add home object field.
+ Handle<Name> name = isolate()->factory()->home_object_symbol();
+ Descriptor d = Descriptor::DataField(name, field_index++, DONT_ENUM,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
@@ -3070,57 +2996,177 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
PropertyAttributes attribs =
IsFunctionModeWithWritablePrototype(function_mode) ? rw_attribs
: ro_attribs;
- Handle<AccessorInfo> prototype =
- Accessors::FunctionPrototypeInfo(isolate(), attribs);
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(prototype->name())), prototype, attribs);
- map->AppendDescriptor(&d);
- }
-
- if (IsFunctionModeWithHomeObject(function_mode)) {
- // Add home object field.
- Handle<Name> name = isolate()->factory()->home_object_symbol();
- Descriptor d = Descriptor::DataField(name, field_index++, DONT_ENUM,
- Representation::Tagged());
+ prototype_string(), function_prototype_accessor(), attribs);
map->AppendDescriptor(&d);
}
DCHECK_EQ(inobject_properties_count, field_index);
+ return map;
}
Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
- Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetClassFunctionInstanceDescriptor(map);
+ Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSizeWithPrototype);
+ map->set_has_prototype_slot(true);
map->set_is_constructor(true);
+ map->set_is_prototype_map(true);
map->set_is_callable();
Map::SetPrototype(map, empty_function);
- return map;
-}
-void Factory::SetClassFunctionInstanceDescriptor(Handle<Map> map) {
+ //
+ // Setup descriptors array.
+ //
Map::EnsureDescriptorSlack(map, 2);
- PropertyAttributes rw_attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
{ // Add length accessor.
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), roc_attribs);
Descriptor d = Descriptor::AccessorConstant(
- handle(Name::cast(length->name())), length, roc_attribs);
+ length_string(), function_length_accessor(), roc_attribs);
map->AppendDescriptor(&d);
}
{
// Add prototype accessor.
- Handle<AccessorInfo> prototype =
- Accessors::FunctionPrototypeInfo(isolate(), rw_attribs);
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(prototype->name())), prototype, rw_attribs);
+ prototype_string(), function_prototype_accessor(), ro_attribs);
map->AppendDescriptor(&d);
}
+ return map;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForWasm(Handle<String> name, Handle<Code> code,
+ Handle<Map> map) {
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_map_ = map;
+ args.maybe_code_ = code;
+ args.language_mode_ = LanguageMode::kSloppy;
+ args.prototype_mutability_ = MUTABLE;
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForBuiltin(Handle<String> name,
+ Handle<Code> code, Handle<Map> map,
+ int builtin_id) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_map_ = map;
+ args.maybe_code_ = code;
+ args.maybe_builtin_id_ = builtin_id;
+ args.language_mode_ = LanguageMode::kStrict;
+ args.prototype_mutability_ = MUTABLE;
+
+ args.SetShouldSetLanguageMode();
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForFunctionWithoutCode(
+ Handle<String> name, Handle<Map> map, LanguageMode language_mode) {
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_map_ = map;
+ args.language_mode_ = language_mode;
+ args.prototype_mutability_ = MUTABLE;
+
+ args.SetShouldSetLanguageMode();
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForBuiltinWithPrototype(
+ Handle<String> name, Handle<Code> code, Handle<Object> prototype,
+ InstanceType type, int instance_size, int inobject_properties,
+ int builtin_id, MutableMode prototype_mutability) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_code_ = code;
+ args.type_ = type;
+ args.instance_size_ = instance_size;
+ args.inobject_properties_ = inobject_properties;
+ args.maybe_prototype_ = prototype;
+ args.maybe_builtin_id_ = builtin_id;
+ args.language_mode_ = LanguageMode::kStrict;
+ args.prototype_mutability_ = prototype_mutability;
+
+ args.SetShouldCreateAndSetInitialMap();
+ args.SetShouldSetPrototype();
+ args.SetShouldSetLanguageMode();
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForBuiltinWithoutPrototype(
+ Handle<String> name, Handle<Code> code, int builtin_id,
+ LanguageMode language_mode) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_code_ = code;
+ args.maybe_builtin_id_ = builtin_id;
+ args.language_mode_ = language_mode;
+ args.prototype_mutability_ = MUTABLE;
+
+ args.SetShouldSetLanguageMode();
+
+ return args;
+}
+
+void NewFunctionArgs::SetShouldCreateAndSetInitialMap() {
+ // Needed to create the initial map.
+ maybe_prototype_.Assert();
+ DCHECK_NE(kUninitialized, instance_size_);
+ DCHECK_NE(kUninitialized, inobject_properties_);
+
+ should_create_and_set_initial_map_ = true;
+}
+
+void NewFunctionArgs::SetShouldSetPrototype() {
+ maybe_prototype_.Assert();
+ should_set_prototype_ = true;
+}
+
+void NewFunctionArgs::SetShouldSetLanguageMode() {
+ DCHECK(language_mode_ == LanguageMode::kStrict ||
+ language_mode_ == LanguageMode::kSloppy);
+ should_set_language_mode_ = true;
+}
+
+Handle<Map> NewFunctionArgs::GetMap(Isolate* isolate) const {
+ if (!maybe_map_.is_null()) {
+ return maybe_map_.ToHandleChecked();
+ } else if (maybe_prototype_.is_null()) {
+ return is_strict(language_mode_)
+ ? isolate->strict_function_without_prototype_map()
+ : isolate->sloppy_function_without_prototype_map();
+ } else {
+ DCHECK(!maybe_prototype_.is_null());
+ switch (prototype_mutability_) {
+ case MUTABLE:
+ return is_strict(language_mode_) ? isolate->strict_function_map()
+ : isolate->sloppy_function_map();
+ case IMMUTABLE:
+ return is_strict(language_mode_)
+ ? isolate->strict_function_with_readonly_prototype_map()
+ : isolate->sloppy_function_with_readonly_prototype_map();
+ }
+ }
+ UNREACHABLE();
}
} // namespace internal
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 3fe2a79d86..cb76aab3b7 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -11,6 +11,8 @@
#include "src/messages.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
+#include "src/objects/js-array.h"
+#include "src/objects/js-regexp.h"
#include "src/objects/scope-info.h"
#include "src/objects/string.h"
#include "src/string-hasher.h"
@@ -20,14 +22,15 @@ namespace internal {
// Forward declarations.
class AliasedArgumentsEntry;
-class BigInt;
class BreakPointInfo;
class BreakPoint;
class BoilerplateDescription;
class ConstantElementsPair;
class CoverageInfo;
class DebugInfo;
+class FreshlyAllocatedBigInt;
class JSModuleNamespace;
+class NewFunctionArgs;
struct SourceRange;
class PreParsedScopeData;
class TemplateObjectDescription;
@@ -67,10 +70,15 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Object> to_number, const char* type_of,
byte kind);
+ // Allocates a fixed array-like object with given map and initialized with
+ // undefined values.
+ Handle<FixedArray> NewFixedArrayWithMap(Heap::RootListIndex map_root_index,
+ int length, PretenureFlag pretenure);
+
// Allocates a fixed array initialized with undefined values.
- Handle<FixedArray> NewFixedArray(int size,
+ Handle<FixedArray> NewFixedArray(int length,
PretenureFlag pretenure = NOT_TENURED);
- Handle<PropertyArray> NewPropertyArray(int size,
+ Handle<PropertyArray> NewPropertyArray(int length,
PretenureFlag pretenure = NOT_TENURED);
// Tries allocating a fixed array initialized with undefined values.
// In case of an allocation failure (OOM) an empty handle is returned.
@@ -79,15 +87,14 @@ class V8_EXPORT_PRIVATE Factory final {
// NewFixedArray as a fallback.
MUST_USE_RESULT
MaybeHandle<FixedArray> TryNewFixedArray(
- int size, PretenureFlag pretenure = NOT_TENURED);
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate a new fixed array with non-existing entries (the hole).
Handle<FixedArray> NewFixedArrayWithHoles(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocates an uninitialized fixed array. It must be filled by the caller.
- Handle<FixedArray> NewUninitializedFixedArray(int size);
+ Handle<FixedArray> NewUninitializedFixedArray(int length);
// Allocates a feedback vector whose slots are initialized with undefined
// values.
@@ -375,7 +382,7 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<SourcePositionTableWithFrameCache>
NewSourcePositionTableWithFrameCache(
Handle<ByteArray> source_position_table,
- Handle<UnseededNumberDictionary> stack_frame_cache);
+ Handle<NumberDictionary> stack_frame_cache);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
@@ -416,7 +423,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<AllocationSite> NewAllocationSite();
Handle<Map> NewMap(InstanceType type, int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ int inobject_properties = 0);
Handle<HeapObject> NewFillerObject(int size,
bool double_align,
@@ -481,15 +489,9 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<HeapNumber> NewHeapNumber(MutableMode mode,
PretenureFlag pretenure = NOT_TENURED);
- // Allocates a new BigInt with {length} digits and zero-initializes them.
- Handle<BigInt> NewBigInt(int length, PretenureFlag pretenure = NOT_TENURED);
- // Initializes length and sign fields, but leaves digits uninitialized.
- Handle<BigInt> NewBigIntRaw(int length,
- PretenureFlag pretenure = NOT_TENURED);
- Handle<BigInt> NewBigIntFromInt(int value,
- PretenureFlag pretenure = NOT_TENURED);
-
- Handle<JSWeakMap> NewJSWeakMap();
+ // Allocates a new BigInt with {length} digits. Only to be used by
+ // MutableBigInt::New*.
+ Handle<FreshlyAllocatedBigInt> NewBigInt(int length);
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
@@ -616,18 +618,14 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy(int size);
- Handle<JSFunction> NewFunction(Handle<Map> map,
- Handle<SharedFunctionInfo> info,
- Handle<Object> context_or_undefined,
- PretenureFlag pretenure = TENURED);
- Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
- Handle<Object> prototype,
- LanguageMode language_mode = SLOPPY,
- MutableMode prototype_mutability = MUTABLE);
- Handle<JSFunction> NewFunction(Handle<String> name);
- Handle<JSFunction> NewFunctionWithoutPrototype(
- Handle<String> name, Handle<Code> code,
- LanguageMode language_mode = SLOPPY);
+ // Creates a new JSFunction according to the given args. This is the function
+ // you'll probably want to use when creating a JSFunction from the runtime.
+ Handle<JSFunction> NewFunction(const NewFunctionArgs& args);
+
+ // For testing only. Creates a sloppy function without code.
+ Handle<JSFunction> NewFunctionForTest(Handle<String> name);
+
+ // Function creation from SharedFunctionInfo.
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
@@ -646,17 +644,12 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
PretenureFlag pretenure = TENURED);
- Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
- Handle<Object> prototype, InstanceType type,
- int instance_size,
- LanguageMode language_mode = SLOPPY,
- MutableMode prototype_mutability = MUTABLE);
- Handle<JSFunction> NewFunction(Handle<String> name,
- Handle<Code> code,
- InstanceType type,
- int instance_size);
- Handle<JSFunction> NewFunction(Handle<Map> map, Handle<String> name,
- MaybeHandle<Code> maybe_code);
+ // The choke-point for JSFunction creation. Handles allocation and
+ // initialization. All other utility methods call into this.
+ Handle<JSFunction> NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Object> context_or_undefined,
+ PretenureFlag pretenure = TENURED);
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
@@ -669,11 +662,24 @@ class V8_EXPORT_PRIVATE Factory final {
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
+ // Creates a new CodeDataContainer for a Code object.
+ Handle<CodeDataContainer> NewCodeDataContainer(int flags);
+
// The reference to the Code object is stored in self_reference.
// This allows generated code to reference its own Code object
// by containing this handle.
Handle<Code> NewCode(const CodeDesc& desc, Code::Kind kind,
- Handle<Object> self_reference, bool immovable = false);
+ Handle<Object> self_reference,
+ int32_t builtin_index = Builtins::kNoBuiltinId,
+ MaybeHandle<HandlerTable> maybe_handler_table =
+ MaybeHandle<HandlerTable>(),
+ MaybeHandle<ByteArray> maybe_source_position_table =
+ MaybeHandle<ByteArray>(),
+ MaybeHandle<DeoptimizationData> maybe_deopt_data =
+ MaybeHandle<DeoptimizationData>(),
+ Movability movability = kMovable, uint32_t stub_key = 0,
+ bool is_turbofanned = false, int stack_slots = 0,
+ int safepoint_table_offset = 0);
// Allocates a new, empty code object for use by builtin deserialization. The
// given {size} argument specifies the size of the entire code object.
@@ -741,13 +747,19 @@ class V8_EXPORT_PRIVATE Factory final {
WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
+#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
+ inline Handle<AccessorInfo> accessor_name##_accessor();
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
+#undef ACCESSOR_INFO_ACCESSOR
+
// Allocates a new SharedFunctionInfo object.
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> name, FunctionKind kind, Handle<Code> code,
Handle<ScopeInfo> scope_info);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> name, MaybeHandle<Code> code, bool is_constructor,
- FunctionKind kind = kNormalFunction);
+ FunctionKind kind = kNormalFunction,
+ int maybe_builtin_index = Builtins::kNoBuiltinId);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
FunctionLiteral* literal, Handle<Script> script);
@@ -839,9 +851,6 @@ class V8_EXPORT_PRIVATE Factory final {
MaybeHandle<String> NewStringFromTwoByte(const uc16* string, int length,
PretenureFlag pretenure);
- // Creates a code object that is not yet fully initialized yet.
- Handle<Code> NewCodeRaw(int object_size, bool immovable);
-
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
Handle<Object> GetNumberStringCache(Handle<Object> number);
@@ -852,14 +861,59 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a JSArray with no elements and no length.
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
+};
+
+// Utility class to simplify argument handling around JSFunction creation.
+class NewFunctionArgs final {
+ public:
+ static NewFunctionArgs ForWasm(Handle<String> name, Handle<Code> code,
+ Handle<Map> map);
+ static NewFunctionArgs ForBuiltin(Handle<String> name, Handle<Code> code,
+ Handle<Map> map, int builtin_id);
+ static NewFunctionArgs ForFunctionWithoutCode(Handle<String> name,
+ Handle<Map> map,
+ LanguageMode language_mode);
+ static NewFunctionArgs ForBuiltinWithPrototype(
+ Handle<String> name, Handle<Code> code, Handle<Object> prototype,
+ InstanceType type, int instance_size, int inobject_properties,
+ int builtin_id, MutableMode prototype_mutability);
+ static NewFunctionArgs ForBuiltinWithoutPrototype(Handle<String> name,
+ Handle<Code> code,
+ int builtin_id,
+ LanguageMode language_mode);
+
+ Handle<Map> GetMap(Isolate* isolate) const;
+
+ private:
+ NewFunctionArgs() {} // Use the static factory constructors.
+
+ void SetShouldCreateAndSetInitialMap();
+ void SetShouldSetPrototype();
+ void SetShouldSetLanguageMode();
+
+ // Sentinel value.
+ static const int kUninitialized = -1;
+
+ Handle<String> name_;
+ MaybeHandle<Map> maybe_map_;
+ MaybeHandle<Code> maybe_code_;
+
+ bool should_create_and_set_initial_map_ = false;
+ InstanceType type_;
+ int instance_size_ = kUninitialized;
+ int inobject_properties_ = kUninitialized;
+
+ bool should_set_prototype_ = false;
+ MaybeHandle<Object> maybe_prototype_;
+
+ bool should_set_language_mode_ = false;
+ LanguageMode language_mode_;
- void SetSloppyFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode);
+ int maybe_builtin_id_ = kUninitialized;
- void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode);
+ MutableMode prototype_mutability_;
- void SetClassFunctionInstanceDescriptor(Handle<Map> map);
+ friend class Factory;
};
} // namespace internal
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index ed7223ee44..7c8438e62f 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -435,7 +435,7 @@ static bool DigitGen(DiyFp low,
// data (like the interval or 'unit'), too.
// Note that the multiplication by 10 does not overflow, because w.e >= -60
// and thus one.e >= -60.
- DCHECK(one.e() >= -60);
+ DCHECK_GE(one.e(), -60);
DCHECK(fractionals < one.f());
DCHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
while (true) {
@@ -491,8 +491,8 @@ static bool DigitGenCounted(DiyFp w,
int* length,
int* kappa) {
DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
- DCHECK(kMinimalTargetExponent >= -60);
- DCHECK(kMaximalTargetExponent <= -32);
+ DCHECK_GE(kMinimalTargetExponent, -60);
+ DCHECK_LE(kMaximalTargetExponent, -32);
// w is assumed to have an error less than 1 unit. Whenever w is scaled we
// also scale its error.
uint64_t w_error = 1;
@@ -543,7 +543,7 @@ static bool DigitGenCounted(DiyFp w,
// data (the 'unit'), too.
// Note that the multiplication by 10 does not overflow, because w.e >= -60
// and thus one.e >= -60.
- DCHECK(one.e() >= -60);
+ DCHECK_GE(one.e(), -60);
DCHECK(fractionals < one.f());
DCHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
while (requested_digits > 0 && fractionals > w_error) {
@@ -689,7 +689,7 @@ bool FastDtoa(double v,
Vector<char> buffer,
int* length,
int* decimal_point) {
- DCHECK(v > 0);
+ DCHECK_GT(v, 0);
DCHECK(!Double(v).IsSpecial());
bool result = false;
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index 54d287a3b7..e14381f2ab 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -31,7 +31,7 @@ bool FeedbackMetadata::is_empty() const {
int FeedbackMetadata::slot_count() const {
if (length() == 0) return 0;
- DCHECK(length() > kReservedIndexCount);
+ DCHECK_GT(length(), kReservedIndexCount);
return Smi::ToInt(get(kSlotsCountIndex));
}
@@ -44,6 +44,7 @@ FeedbackVector* FeedbackVector::cast(Object* obj) {
int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
switch (kind) {
case FeedbackSlotKind::kForIn:
+ case FeedbackSlotKind::kInstanceOf:
case FeedbackSlotKind::kCompareOp:
case FeedbackSlotKind::kBinaryOp:
case FeedbackSlotKind::kLiteral:
@@ -293,6 +294,16 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
total++;
break;
}
+ case FeedbackSlotKind::kInstanceOf: {
+ if (obj->IsWeakCell()) {
+ with++;
+ } else if (obj == megamorphic_sentinel) {
+ gen++;
+ with++;
+ }
+ total++;
+ break;
+ }
case FeedbackSlotKind::kCreateClosure:
case FeedbackSlotKind::kLiteral:
break;
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index c105effd77..0572b85395 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -193,6 +193,8 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
return "TypeProfile";
case FeedbackSlotKind::kForIn:
return "ForIn";
+ case FeedbackSlotKind::kInstanceOf:
+ return "InstanceOf";
case FeedbackSlotKind::kKindsNumber:
break;
}
@@ -282,6 +284,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
case FeedbackSlotKind::kStoreKeyedStrict:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kTypeProfile:
+ case FeedbackSlotKind::kInstanceOf:
vector->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
break;
@@ -297,8 +300,9 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
}
Handle<FeedbackVector> result = Handle<FeedbackVector>::cast(vector);
- if (!isolate->is_best_effort_code_coverage()) {
- AddToCodeCoverageList(isolate, result);
+ if (!isolate->is_best_effort_code_coverage() ||
+ isolate->is_collecting_type_profile()) {
+ AddToVectorsForProfilingTools(isolate, result);
}
return result;
}
@@ -309,21 +313,23 @@ Handle<FeedbackVector> FeedbackVector::Copy(Isolate* isolate,
Handle<FeedbackVector> result;
result = Handle<FeedbackVector>::cast(
isolate->factory()->CopyFixedArray(Handle<FixedArray>::cast(vector)));
- if (!isolate->is_best_effort_code_coverage()) {
- AddToCodeCoverageList(isolate, result);
+ if (!isolate->is_best_effort_code_coverage() ||
+ isolate->is_collecting_type_profile()) {
+ AddToVectorsForProfilingTools(isolate, result);
}
return result;
}
// static
-void FeedbackVector::AddToCodeCoverageList(Isolate* isolate,
- Handle<FeedbackVector> vector) {
- DCHECK(!isolate->is_best_effort_code_coverage());
+void FeedbackVector::AddToVectorsForProfilingTools(
+ Isolate* isolate, Handle<FeedbackVector> vector) {
+ DCHECK(!isolate->is_best_effort_code_coverage() ||
+ isolate->is_collecting_type_profile());
if (!vector->shared_function_info()->IsSubjectToDebugging()) return;
- Handle<ArrayList> list =
- Handle<ArrayList>::cast(isolate->factory()->code_coverage_list());
+ Handle<ArrayList> list = Handle<ArrayList>::cast(
+ isolate->factory()->feedback_vectors_for_profiling_tools());
list = ArrayList::Add(list, vector);
- isolate->SetCodeCoverageList(*list);
+ isolate->SetFeedbackVectorsForProfilingTools(*list);
}
// static
@@ -445,9 +451,17 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
// Set(slot, Smi::kZero);
break;
}
- case FeedbackSlotKind::kCreateClosure: {
- case FeedbackSlotKind::kTypeProfile:
- break;
+ case FeedbackSlotKind::kInstanceOf: {
+ InstanceOfICNexus nexus(this, slot);
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
+ break;
+ }
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kTypeProfile: {
+ break;
}
case FeedbackSlotKind::kLiteral: {
Set(slot, Smi::kZero, SKIP_WRITE_BARRIER);
@@ -510,12 +524,22 @@ void FeedbackNexus::ConfigurePremonomorphic() {
SKIP_WRITE_BARRIER);
}
-void FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
+bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
+ DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
- SetFeedback(*FeedbackVector::MegamorphicSentinel(isolate),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(Smi::FromInt(static_cast<int>(property_type)),
- SKIP_WRITE_BARRIER);
+ bool changed = false;
+ Symbol* sentinel = *FeedbackVector::MegamorphicSentinel(isolate);
+ if (GetFeedback() != sentinel) {
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER);
+ changed = true;
+ }
+
+ Smi* extra = Smi::FromInt(static_cast<int>(property_type));
+ if (changed || GetFeedbackExtra() != extra) {
+ SetFeedbackExtra(extra, SKIP_WRITE_BARRIER);
+ changed = true;
+ }
+ return changed;
}
InlineCacheState LoadICNexus::StateFromFeedback() const {
@@ -705,7 +729,7 @@ void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name,
MapHandles const& maps,
ObjectHandles* handlers) {
int receiver_count = static_cast<int>(maps.size());
- DCHECK(receiver_count > 1);
+ DCHECK_GT(receiver_count, 1);
Handle<FixedArray> array;
if (name.is_null()) {
array = EnsureArrayOfSize(receiver_count * 2);
@@ -833,7 +857,7 @@ Name* KeyedLoadICNexus::FindFirstName() const {
if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback);
}
- return NULL;
+ return nullptr;
}
Name* KeyedStoreICNexus::FindFirstName() const {
@@ -841,7 +865,23 @@ Name* KeyedStoreICNexus::FindFirstName() const {
if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback);
}
- return NULL;
+ return nullptr;
+}
+
+KeyedAccessLoadMode KeyedLoadICNexus::GetKeyedAccessLoadMode() const {
+ MapHandles maps;
+ ObjectHandles handlers;
+
+ if (GetKeyType() == PROPERTY) return STANDARD_LOAD;
+
+ ExtractMaps(&maps);
+ FindHandlers(&handlers, static_cast<int>(maps.size()));
+ for (Handle<Object> const& handler : handlers) {
+ KeyedAccessLoadMode mode = LoadHandler::GetKeyedAccessLoadMode(*handler);
+ if (mode != STANDARD_LOAD) return mode;
+ }
+
+ return STANDARD_LOAD;
}
KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
@@ -952,6 +992,32 @@ ForInHint ForInICNexus::GetForInFeedback() const {
return ForInHintFromFeedback(feedback);
}
+void InstanceOfICNexus::ConfigureUninitialized() {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+}
+
+InlineCacheState InstanceOfICNexus::StateFromFeedback() const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ return MEGAMORPHIC;
+ }
+ return MONOMORPHIC;
+}
+
+MaybeHandle<JSObject> InstanceOfICNexus::GetConstructorFeedback() const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+ if (feedback->IsWeakCell() && !WeakCell::cast(feedback)->cleared()) {
+ return handle(JSObject::cast(WeakCell::cast(feedback)->value()), isolate);
+ }
+ return MaybeHandle<JSObject>();
+}
+
InlineCacheState StoreDataPropertyInLiteralICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
@@ -1004,26 +1070,26 @@ void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
Object* const feedback = GetFeedback();
// Map source position to collection of types
- Handle<UnseededNumberDictionary> types;
+ Handle<NumberDictionary> types;
if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- types = UnseededNumberDictionary::New(isolate, 1);
+ types = NumberDictionary::New(isolate, 1);
} else {
- types = handle(UnseededNumberDictionary::cast(feedback));
+ types = handle(NumberDictionary::cast(feedback));
}
Handle<ArrayList> position_specific_types;
int entry = types->FindEntry(position);
- if (entry == UnseededNumberDictionary::kNotFound) {
+ if (entry == NumberDictionary::kNotFound) {
position_specific_types = ArrayList::New(isolate, 1);
- types = UnseededNumberDictionary::Set(
+ types = NumberDictionary::Set(
types, position, ArrayList::Add(position_specific_types, type));
} else {
DCHECK(types->ValueAt(entry)->IsArrayList());
position_specific_types = handle(ArrayList::cast(types->ValueAt(entry)));
if (!InList(position_specific_types, type)) { // Add type
- types = UnseededNumberDictionary::Set(
+ types = NumberDictionary::Set(
types, position, ArrayList::Add(position_specific_types, type));
}
}
@@ -1044,12 +1110,12 @@ std::vector<int> CollectTypeProfileNexus::GetSourcePositions() const {
return source_positions;
}
- Handle<UnseededNumberDictionary> types = Handle<UnseededNumberDictionary>(
- UnseededNumberDictionary::cast(feedback), isolate);
+ Handle<NumberDictionary> types =
+ Handle<NumberDictionary>(NumberDictionary::cast(feedback), isolate);
- for (int index = UnseededNumberDictionary::kElementsStartIndex;
- index < types->length(); index += UnseededNumberDictionary::kEntrySize) {
- int key_index = index + UnseededNumberDictionary::kEntryKeyIndex;
+ for (int index = NumberDictionary::kElementsStartIndex;
+ index < types->length(); index += NumberDictionary::kEntrySize) {
+ int key_index = index + NumberDictionary::kEntryKeyIndex;
Object* key = types->get(key_index);
if (key->IsSmi()) {
int position = Smi::cast(key)->value();
@@ -1069,11 +1135,11 @@ std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
return types_for_position;
}
- Handle<UnseededNumberDictionary> types = Handle<UnseededNumberDictionary>(
- UnseededNumberDictionary::cast(feedback), isolate);
+ Handle<NumberDictionary> types =
+ Handle<NumberDictionary>(NumberDictionary::cast(feedback), isolate);
int entry = types->FindEntry(position);
- if (entry == UnseededNumberDictionary::kNotFound) {
+ if (entry == NumberDictionary::kNotFound) {
return types_for_position;
}
DCHECK(types->ValueAt(entry)->IsArrayList());
@@ -1090,17 +1156,16 @@ std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
namespace {
Handle<JSObject> ConvertToJSObject(Isolate* isolate,
- Handle<UnseededNumberDictionary> feedback) {
+ Handle<NumberDictionary> feedback) {
Handle<JSObject> type_profile =
isolate->factory()->NewJSObject(isolate->object_function());
- for (int index = UnseededNumberDictionary::kElementsStartIndex;
- index < feedback->length();
- index += UnseededNumberDictionary::kEntrySize) {
- int key_index = index + UnseededNumberDictionary::kEntryKeyIndex;
+ for (int index = NumberDictionary::kElementsStartIndex;
+ index < feedback->length(); index += NumberDictionary::kEntrySize) {
+ int key_index = index + NumberDictionary::kEntryKeyIndex;
Object* key = feedback->get(key_index);
if (key->IsSmi()) {
- int value_index = index + UnseededNumberDictionary::kEntryValueIndex;
+ int value_index = index + NumberDictionary::kEntryValueIndex;
Handle<ArrayList> position_specific_types(
ArrayList::cast(feedback->get(value_index)));
@@ -1127,8 +1192,7 @@ JSObject* CollectTypeProfileNexus::GetTypeProfile() const {
return *isolate->factory()->NewJSObject(isolate->object_function());
}
- return *ConvertToJSObject(isolate,
- handle(UnseededNumberDictionary::cast(feedback)));
+ return *ConvertToJSObject(isolate, handle(NumberDictionary::cast(feedback)));
}
} // namespace internal
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index efa1cf5924..fdcf9ff01a 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -48,6 +48,7 @@ enum class FeedbackSlotKind {
kCreateClosure,
kLiteral,
kForIn,
+ kInstanceOf,
kKindsNumber // Last value indicating number of kinds.
};
@@ -108,7 +109,8 @@ inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
FeedbackSlotKind::kLastSloppyKind);
STATIC_ASSERT(FeedbackSlotKind::kStoreNamedSloppy <=
FeedbackSlotKind::kLastSloppyKind);
- return (kind <= FeedbackSlotKind::kLastSloppyKind) ? SLOPPY : STRICT;
+ return (kind <= FeedbackSlotKind::kLastSloppyKind) ? LanguageMode::kSloppy
+ : LanguageMode::kStrict;
}
std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind);
@@ -230,6 +232,8 @@ class FeedbackVector : public HeapObject {
DECL_PRINTER(FeedbackVector)
DECL_VERIFIER(FeedbackVector)
+ void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot); // NOLINT
+
// Clears the vector slots. Return true if feedback has changed.
bool ClearSlots(Isolate* isolate);
@@ -277,14 +281,17 @@ class FeedbackVector : public HeapObject {
}
private:
- static void AddToCodeCoverageList(Isolate* isolate,
- Handle<FeedbackVector> vector);
+ static void AddToVectorsForProfilingTools(Isolate* isolate,
+ Handle<FeedbackVector> vector);
+
+ void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot,
+ FeedbackSlotKind kind); // NOLINT
DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackVector);
};
template <typename Derived>
-class FeedbackVectorSpecBase {
+class V8_EXPORT_PRIVATE FeedbackVectorSpecBase {
public:
FeedbackSlot AddCallICSlot() { return AddSlot(FeedbackSlotKind::kCall); }
@@ -307,7 +314,7 @@ class FeedbackVectorSpecBase {
}
FeedbackSlot AddStoreICSlot(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
return AddSlot(is_strict(language_mode)
? FeedbackSlotKind::kStoreNamedStrict
: FeedbackSlotKind::kStoreNamedSloppy);
@@ -318,29 +325,33 @@ class FeedbackVectorSpecBase {
}
FeedbackSlot AddStoreGlobalICSlot(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
return AddSlot(is_strict(language_mode)
? FeedbackSlotKind::kStoreGlobalStrict
: FeedbackSlotKind::kStoreGlobalSloppy);
}
FeedbackSlot AddKeyedStoreICSlot(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
return AddSlot(is_strict(language_mode)
? FeedbackSlotKind::kStoreKeyedStrict
: FeedbackSlotKind::kStoreKeyedSloppy);
}
- FeedbackSlot AddInterpreterBinaryOpICSlot() {
+ FeedbackSlot AddBinaryOpICSlot() {
return AddSlot(FeedbackSlotKind::kBinaryOp);
}
- FeedbackSlot AddInterpreterCompareICSlot() {
+ FeedbackSlot AddCompareICSlot() {
return AddSlot(FeedbackSlotKind::kCompareOp);
}
FeedbackSlot AddForInSlot() { return AddSlot(FeedbackSlotKind::kForIn); }
+ FeedbackSlot AddInstanceOfSlot() {
+ return AddSlot(FeedbackSlotKind::kInstanceOf);
+ }
+
FeedbackSlot AddLiteralSlot() { return AddSlot(FeedbackSlotKind::kLiteral); }
FeedbackSlot AddStoreDataPropertyInLiteralICSlot() {
@@ -378,7 +389,7 @@ class StaticFeedbackVectorSpec
friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
void append(FeedbackSlotKind kind) {
- DCHECK(slot_count_ < kMaxLength);
+ DCHECK_LT(slot_count_, kMaxLength);
kinds_[slot_count_++] = kind;
}
@@ -388,7 +399,8 @@ class StaticFeedbackVectorSpec
FeedbackSlotKind kinds_[kMaxLength];
};
-class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
+class V8_EXPORT_PRIVATE FeedbackVectorSpec
+ : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
public:
explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
slot_kinds_.reserve(16);
@@ -534,13 +546,13 @@ class FeedbackMetadataIterator {
class FeedbackNexus {
public:
FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_handle_(vector), vector_(NULL), slot_(slot) {}
+ : vector_handle_(vector), vector_(nullptr), slot_(slot) {}
FeedbackNexus(FeedbackVector* vector, FeedbackSlot slot)
: vector_(vector), slot_(slot) {}
virtual ~FeedbackNexus() {}
Handle<FeedbackVector> vector_handle() const {
- DCHECK(vector_ == NULL);
+ DCHECK_NULL(vector_);
return vector_handle_;
}
FeedbackVector* vector() const {
@@ -557,14 +569,14 @@ class FeedbackNexus {
MapHandles maps;
ExtractMaps(&maps);
if (maps.size() > 0) return *maps.at(0);
- return NULL;
+ return nullptr;
}
virtual InlineCacheState StateFromFeedback() const = 0;
virtual int ExtractMaps(MapHandles* maps) const;
virtual MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
virtual bool FindHandlers(ObjectHandles* code_list, int length = -1) const;
- virtual Name* FindFirstName() const { return NULL; }
+ virtual Name* FindFirstName() const { return nullptr; }
bool IsCleared() {
InlineCacheState state = StateFromFeedback();
@@ -574,7 +586,7 @@ class FeedbackNexus {
virtual void Clear() { ConfigureUninitialized(); }
virtual void ConfigureUninitialized();
void ConfigurePremonomorphic();
- void ConfigureMegamorphic(IcCheckType property_type);
+ bool ConfigureMegamorphic(IcCheckType property_type);
inline Object* GetFeedback() const;
inline Object* GetFeedbackExtra() const;
@@ -697,6 +709,7 @@ class KeyedLoadICNexus : public FeedbackNexus {
void Clear() override { ConfigurePremonomorphic(); }
+ KeyedAccessLoadMode GetKeyedAccessLoadMode() const;
IcCheckType GetKeyType() const;
InlineCacheState StateFromFeedback() const override;
Name* FindFirstName() const override;
@@ -819,6 +832,31 @@ class ForInICNexus final : public FeedbackNexus {
}
};
+class InstanceOfICNexus final : public FeedbackNexus {
+ public:
+ InstanceOfICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackSlotKind::kInstanceOf, vector->GetKind(slot));
+ }
+ InstanceOfICNexus(FeedbackVector* vector, FeedbackSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackSlotKind::kInstanceOf, vector->GetKind(slot));
+ }
+
+ void ConfigureUninitialized() final;
+
+ InlineCacheState StateFromFeedback() const final;
+ MaybeHandle<JSObject> GetConstructorFeedback() const;
+
+ int ExtractMaps(MapHandles* maps) const final { return 0; }
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+ return MaybeHandle<Code>();
+ }
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
+ return length == 0;
+ }
+};
+
class StoreDataPropertyInLiteralICNexus : public FeedbackNexus {
public:
StoreDataPropertyInLiteralICNexus(Handle<FeedbackVector> vector,
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index be990f238e..61540773db 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -12,31 +12,33 @@
namespace v8 {
namespace internal {
-inline FieldIndex FieldIndex::ForInObjectOffset(int offset, const Map* map) {
- DCHECK((offset % kPointerSize) == 0);
- int index = offset / kPointerSize;
- DCHECK(map == NULL ||
- index < (map->GetInObjectPropertyOffset(0) / kPointerSize +
- map->GetInObjectProperties()));
- return FieldIndex(true, index, false, 0, 0, true);
+inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding,
+ const Map* map) {
+ DCHECK(map == nullptr || offset < map->instance_size());
+ DCHECK(encoding == kWord32 ? (offset % kInt32Size) == 0
+ : (offset % kPointerSize) == 0);
+ return FieldIndex(true, offset, encoding, 0, 0);
}
inline FieldIndex FieldIndex::ForPropertyIndex(const Map* map,
int property_index,
- bool is_double) {
+ Representation representation) {
DCHECK(map->instance_type() >= FIRST_NONSTRING_TYPE);
int inobject_properties = map->GetInObjectProperties();
bool is_inobject = property_index < inobject_properties;
int first_inobject_offset;
+ int offset;
if (is_inobject) {
first_inobject_offset = map->GetInObjectPropertyOffset(0);
+ offset = map->GetInObjectPropertyOffset(property_index);
} else {
first_inobject_offset = FixedArray::kHeaderSize;
property_index -= inobject_properties;
+ offset = FixedArray::kHeaderSize + property_index * kPointerSize;
}
- return FieldIndex(is_inobject,
- property_index + first_inobject_offset / kPointerSize,
- is_double, inobject_properties, first_inobject_offset);
+ Encoding encoding = FieldEncoding(representation);
+ return FieldIndex(is_inobject, offset, encoding, inobject_properties,
+ first_inobject_offset);
}
// Takes an index as computed by GetLoadByFieldIndex and reconstructs a
@@ -45,21 +47,22 @@ inline FieldIndex FieldIndex::ForLoadByFieldIndex(const Map* map,
int orig_index) {
int field_index = orig_index;
bool is_inobject = true;
- bool is_double = field_index & 1;
int first_inobject_offset = 0;
+ Encoding encoding = field_index & 1 ? kDouble : kTagged;
field_index >>= 1;
+ int offset;
if (field_index < 0) {
+ first_inobject_offset = FixedArray::kHeaderSize;
field_index = -(field_index + 1);
is_inobject = false;
- first_inobject_offset = FixedArray::kHeaderSize;
- field_index += FixedArray::kHeaderSize / kPointerSize;
+ offset = FixedArray::kHeaderSize + field_index * kPointerSize;
} else {
first_inobject_offset = map->GetInObjectPropertyOffset(0);
- field_index += JSObject::kHeaderSize / kPointerSize;
+ offset = map->GetInObjectPropertyOffset(field_index);
}
- FieldIndex result(is_inobject, field_index, is_double,
- map->GetInObjectProperties(), first_inobject_offset);
- DCHECK(result.GetLoadByFieldIndex() == orig_index);
+ FieldIndex result(is_inobject, offset, encoding, map->GetInObjectProperties(),
+ first_inobject_offset);
+ DCHECK_EQ(result.GetLoadByFieldIndex(), orig_index);
return result;
}
@@ -90,12 +93,7 @@ inline FieldIndex FieldIndex::ForDescriptor(const Map* map,
PropertyDetails details =
map->instance_descriptors()->GetDetails(descriptor_index);
int field_index = details.field_index();
- return ForPropertyIndex(map, field_index,
- details.representation().IsDouble());
-}
-
-inline FieldIndex FieldIndex::FromFieldAccessStubKey(int key) {
- return FieldIndex(key);
+ return ForPropertyIndex(map, field_index, details.representation());
}
} // namespace internal
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 78c1d75110..428ad52cc2 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -19,14 +19,17 @@ class Map;
// index it was originally generated from.
class FieldIndex final {
public:
+ enum Encoding { kTagged, kDouble, kWord32 };
+
FieldIndex() : bit_field_(0) {}
- static FieldIndex ForPropertyIndex(const Map* map, int index,
- bool is_double = false);
- static FieldIndex ForInObjectOffset(int offset, const Map* map = NULL);
+ static FieldIndex ForPropertyIndex(
+ const Map* map, int index,
+ Representation representation = Representation::Tagged());
+ static FieldIndex ForInObjectOffset(int offset, Encoding encoding,
+ const Map* map = nullptr);
static FieldIndex ForDescriptor(const Map* map, int descriptor_index);
static FieldIndex ForLoadByFieldIndex(const Map* map, int index);
- static FieldIndex FromFieldAccessStubKey(int key);
int GetLoadByFieldIndex() const;
@@ -36,17 +39,14 @@ class FieldIndex final {
bool is_hidden_field() const { return IsHiddenField::decode(bit_field_); }
- bool is_double() const {
- return IsDoubleBits::decode(bit_field_);
- }
+ bool is_double() const { return EncodingBits::decode(bit_field_) == kDouble; }
- int offset() const {
- return index() * kPointerSize;
- }
+ int offset() const { return OffsetBits::decode(bit_field_); }
// Zero-indexed from beginning of the object.
int index() const {
- return IndexBits::decode(bit_field_);
+ DCHECK_EQ(0, offset() % kPointerSize);
+ return offset() / kPointerSize;
}
int outobject_array_index() const {
@@ -67,7 +67,7 @@ class FieldIndex final {
int GetFieldAccessStubKey() const {
return bit_field_ &
- (IsInObjectBits::kMask | IsDoubleBits::kMask | IndexBits::kMask);
+ (IsInObjectBits::kMask | EncodingBits::kMask | OffsetBits::kMask);
}
bool operator==(FieldIndex const& other) const {
@@ -76,42 +76,59 @@ class FieldIndex final {
bool operator!=(FieldIndex const& other) const { return !(*this == other); }
private:
- FieldIndex(bool is_inobject, int local_index, bool is_double,
+ FieldIndex(bool is_inobject, int offset, Encoding encoding,
int inobject_properties, int first_inobject_property_offset,
bool is_hidden = false) {
- DCHECK((first_inobject_property_offset & (kPointerSize - 1)) == 0);
+ DCHECK_EQ(first_inobject_property_offset & (kPointerSize - 1), 0);
bit_field_ = IsInObjectBits::encode(is_inobject) |
- IsDoubleBits::encode(is_double) |
- FirstInobjectPropertyOffsetBits::encode(first_inobject_property_offset) |
- IsHiddenField::encode(is_hidden) |
- IndexBits::encode(local_index) |
- InObjectPropertyBits::encode(inobject_properties);
+ EncodingBits::encode(encoding) |
+ FirstInobjectPropertyOffsetBits::encode(
+ first_inobject_property_offset) |
+ IsHiddenField::encode(is_hidden) | OffsetBits::encode(offset) |
+ InObjectPropertyBits::encode(inobject_properties);
}
- explicit FieldIndex(int bit_field) : bit_field_(bit_field) {}
+ static Encoding FieldEncoding(Representation representation) {
+ switch (representation.kind()) {
+ case Representation::kNone:
+ case Representation::kSmi:
+ case Representation::kHeapObject:
+ case Representation::kTagged:
+ return kTagged;
+ case Representation::kDouble:
+ return kDouble;
+ default:
+ break;
+ }
+ PrintF("%s\n", representation.Mnemonic());
+ UNREACHABLE();
+ return kTagged;
+ }
int first_inobject_property_offset() const {
DCHECK(!is_hidden_field());
return FirstInobjectPropertyOffsetBits::decode(bit_field_);
}
- static const int kIndexBitsSize = kDescriptorIndexBitCount + 1;
+ static const int kOffsetBitsSize =
+ (kDescriptorIndexBitCount + 1 + kPointerSizeLog2);
// Index from beginning of object.
- class IndexBits: public BitField<int, 0, kIndexBitsSize> {};
- class IsInObjectBits: public BitField<bool, IndexBits::kNext, 1> {};
- class IsDoubleBits: public BitField<bool, IsInObjectBits::kNext, 1> {};
+ class OffsetBits : public BitField64<int, 0, kOffsetBitsSize> {};
+ class IsInObjectBits : public BitField64<bool, OffsetBits::kNext, 1> {};
+ class EncodingBits : public BitField64<Encoding, IsInObjectBits::kNext, 2> {};
// Number of inobject properties.
class InObjectPropertyBits
- : public BitField<int, IsDoubleBits::kNext, kDescriptorIndexBitCount> {};
+ : public BitField64<int, EncodingBits::kNext, kDescriptorIndexBitCount> {
+ };
// Offset of first inobject property from beginning of object.
class FirstInobjectPropertyOffsetBits
- : public BitField<int, InObjectPropertyBits::kNext, 7> {};
+ : public BitField64<int, InObjectPropertyBits::kNext, 7> {};
class IsHiddenField
- : public BitField<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {};
- STATIC_ASSERT(IsHiddenField::kNext <= 32);
+ : public BitField64<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {};
+ STATIC_ASSERT(IsHiddenField::kNext <= 64);
- int bit_field_;
+ uint64_t bit_field_;
};
} // namespace internal
diff --git a/deps/v8/src/fixed-dtoa.cc b/deps/v8/src/fixed-dtoa.cc
index 15797aae86..95b7cbead7 100644
--- a/deps/v8/src/fixed-dtoa.cc
+++ b/deps/v8/src/fixed-dtoa.cc
@@ -36,7 +36,7 @@ class UInt128 {
accumulator >>= 32;
accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
high_bits_ = (accumulator << 32) + part;
- DCHECK((accumulator >> 32) == 0);
+ DCHECK_EQ(accumulator >> 32, 0);
}
void Shift(int shift_amount) {
@@ -218,7 +218,7 @@ static void FillFractionals(uint64_t fractionals, int exponent,
// is a fixed-point number, with binary point at bit 'point'.
if (-exponent <= 64) {
// One 64 bit number is sufficient.
- DCHECK(fractionals >> 56 == 0);
+ DCHECK_EQ(fractionals >> 56, 0);
int point = -exponent;
for (int i = 0; i < fractional_count; ++i) {
if (fractionals == 0) break;
@@ -362,7 +362,7 @@ bool FastFixedDtoa(double v,
} else if (exponent < -128) {
// This configuration (with at most 20 digits) means that all digits must be
// 0.
- DCHECK(fractional_count <= 20);
+ DCHECK_LE(fractional_count, 20);
buffer[0] = '\0';
*length = 0;
*decimal_point = -fractional_count;
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 50a1e660c0..4ea5ed6da0 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -162,7 +162,8 @@ struct MaybeBoolFlag {
#define DEFINE_UINT(nam, def, cmt) FLAG(UINT, unsigned int, nam, def, cmt)
#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_ARGS(nam, cmt) FLAG(ARGS, JSArguments, nam, {0 COMMA NULL}, cmt)
+#define DEFINE_ARGS(nam, cmt) \
+ FLAG(ARGS, JSArguments, nam, {0 COMMA nullptr}, cmt)
#define DEFINE_ALIAS_BOOL(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
#define DEFINE_ALIAS_INT(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
@@ -189,6 +190,7 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
// Enabling import.meta requires to also enable import()
DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
+DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS(V) \
@@ -196,47 +198,31 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
V(harmony_array_prototype_values, "harmony Array.prototype.values") \
V(harmony_function_sent, "harmony function.sent") \
V(harmony_do_expressions, "harmony do-expressions") \
- V(harmony_class_fields, "harmony public fields in class literals") \
+ V(harmony_class_fields, "harmony fields in class literals") \
+ V(harmony_public_fields, "harmony public fields in class literals") \
V(harmony_bigint, "harmony arbitrary precision integers")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V) \
+#define HARMONY_STAGED(V) \
V(harmony_function_tostring, "harmony Function.prototype.toString") \
- V(harmony_regexp_named_captures, "harmony regexp named captures") \
- V(harmony_regexp_property, "harmony Unicode regexp property classes") \
V(harmony_restrict_constructor_return, \
"harmony disallow non undefined primitive return value from class " \
"constructor") \
- V(harmony_dynamic_import, "harmony dynamic import") \
-
-#ifdef V8_INTL_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
- V(harmony_number_format_to_parts, \
- "Intl.NumberFormat.prototype." \
- "formatToParts")
-#else
-#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
-#endif
+ V(harmony_dynamic_import, "harmony dynamic import")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_strict_legacy_accessor_builtins, \
- "treat __defineGetter__ and related functions as strict") \
- V(harmony_restrictive_generators, \
- "harmony restrictions on generator declarations") \
- V(harmony_object_rest_spread, "harmony object rest spread properties") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_regexp_dotall, "harmony regexp dotAll flag") \
- V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
- V(harmony_async_iteration, "harmony async iteration") \
- V(harmony_template_escapes, \
- "harmony invalid escapes in tagged template literals") \
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_regexp_named_captures, "harmony regexp named captures") \
+ V(harmony_regexp_property, "harmony Unicode regexp property classes") \
+ V(harmony_async_iteration, "harmony async iteration") \
V(harmony_promise_finally, "harmony Promise.prototype.finally")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_number_format_to_parts, \
+ "Intl.NumberFormat.prototype.formatToParts") \
V(harmony_plural_rules, "Intl.PluralRules")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
@@ -280,7 +266,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"not-too-far future")
DEFINE_IMPLICATION(future, preparser_scope_analysis)
-DEFINE_IMPLICATION(future, lazy_deserialization)
+DEFINE_IMPLICATION(future, write_protect_code_memory)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
@@ -307,7 +293,7 @@ DEFINE_BOOL(trace_block_coverage, false,
DEFINE_BOOL(feedback_normalization, false,
"feed back normalization to constructors")
// TODO(jkummerow): This currently adds too much load on the stub cache.
-DEFINE_BOOL_READONLY(internalize_on_the_fly, false,
+DEFINE_BOOL_READONLY(internalize_on_the_fly, true,
"internalize string keys for generic keyed ICs on the fly")
// Flags for optimization types.
@@ -319,6 +305,7 @@ DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
// Flags for data representation optimizations
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
+DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
@@ -334,6 +321,11 @@ DEFINE_STRING(print_bytecode_filter, "*",
DEFINE_BOOL(trace_ignition, false,
"trace the bytecodes executed by the ignition interpreter")
#endif
+#ifdef V8_TRACE_FEEDBACK_UPDATES
+DEFINE_BOOL(
+ trace_feedback_updates, false,
+ "trace updates to feedback vectors during ignition interpreter execution.")
+#endif
DEFINE_BOOL(trace_ignition_codegen, false,
"trace the codegen of ignition interpreter bytecode handlers")
DEFINE_BOOL(trace_ignition_dispatches, false,
@@ -377,7 +369,7 @@ DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
DEFINE_BOOL(trace_turbo_scheduled, false, "trace TurboFan IR with schedule")
DEFINE_IMPLICATION(trace_turbo_scheduled, trace_turbo_graph)
-DEFINE_STRING(trace_turbo_cfg_file, NULL,
+DEFINE_STRING(trace_turbo_cfg_file, nullptr,
"trace turbo cfg graph (for C1 visualizer) to a given file name")
DEFINE_BOOL(trace_turbo_types, true, "trace TurboFan's types")
DEFINE_BOOL(trace_turbo_scheduler, false, "trace TurboFan's scheduler")
@@ -412,11 +404,12 @@ DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
-DEFINE_INT(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_INT(max_inlined_bytecode_size, 500,
"maximum size of bytecode for a single inlining")
DEFINE_INT(max_inlined_bytecode_size_cumulative, 1000,
"maximum cumulative size of bytecode considered for inlining")
+DEFINE_INT(max_inlined_bytecode_size_absolute, 5000,
+ "maximum cumulative size of bytecode considered for inlining")
DEFINE_FLOAT(reserve_inline_budget_scale_factor, 1.2,
"maximum cumulative size of bytecode considered for inlining")
DEFINE_INT(max_inlined_bytecode_size_small, 30,
@@ -425,10 +418,11 @@ DEFINE_FLOAT(min_inlining_frequency, 0.15, "minimum frequency for inlining")
DEFINE_BOOL(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_BOOL(stress_inline, false,
"set high thresholds for inlining to inline as much as possible")
-DEFINE_VALUE_IMPLICATION(stress_inline, max_inlining_levels, 999999)
DEFINE_VALUE_IMPLICATION(stress_inline, max_inlined_bytecode_size, 999999)
DEFINE_VALUE_IMPLICATION(stress_inline, max_inlined_bytecode_size_cumulative,
999999)
+DEFINE_VALUE_IMPLICATION(stress_inline, max_inlined_bytecode_size_absolute,
+ 999999)
DEFINE_VALUE_IMPLICATION(stress_inline, min_inlining_frequency, 0)
DEFINE_VALUE_IMPLICATION(stress_inline, polymorphic_inlining, true)
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
@@ -466,6 +460,17 @@ DEFINE_BOOL(turbo_experimental, false,
"enable crashing features, for testing purposes only")
DEFINE_BOOL(turbo_rewrite_far_jumps, true,
"rewrite far to near jumps (ia32,x64)")
+// TODO(rmcilroy): Remove extra_masking once the finch experiment is removed.
+DEFINE_BOOL(extra_masking, false, "obsolete - has no effect")
+
+#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
+#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
+#else
+#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS true
+#endif
+DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
+ "Enable mitigations for executing untrusted code")
+#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
@@ -482,7 +487,12 @@ DEFINE_BOOL(wasm_disable_structured_cloning, false,
"disable wasm structured cloning")
DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
-DEFINE_BOOL(wasm_async_compilation, false,
+DEFINE_BOOL(wasm_trace_native_heap, false, "trace wasm native heap events")
+DEFINE_BOOL(wasm_jit_to_native, false,
+ "JIT wasm code to native (not JS GC) memory")
+DEFINE_BOOL(wasm_trace_serialization, false,
+ "trace serialization/deserialization")
+DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
DEFINE_BOOL(wasm_stream_compilation, false,
"enable streaming compilation for WebAssembly")
@@ -505,6 +515,9 @@ DEFINE_BOOL(trace_wasm_streaming, false,
DEFINE_INT(trace_wasm_ast_start, 0,
"start function for wasm AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
+DEFINE_BOOL(liftoff, false,
+ "enable liftoff, the experimental wasm baseline compiler")
+DEFINE_BOOL(trace_liftoff, false, "trace liftoff, the wasm baseline compiler")
DEFINE_UINT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
@@ -521,7 +534,8 @@ DEFINE_BOOL(trace_asm_parser, false, "verbose logging of asm.js parse failures")
DEFINE_BOOL(stress_validate_asm, false, "try to validate everything as asm.js")
DEFINE_BOOL(dump_wasm_module, false, "dump wasm module bytes")
-DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
+DEFINE_STRING(dump_wasm_module_path, nullptr,
+ "directory to dump wasm modules to")
DEFINE_BOOL(experimental_wasm_simd, false,
"enable prototype simd opcodes for wasm")
@@ -551,6 +565,8 @@ DEFINE_BOOL(asm_wasm_lazy_compilation, false,
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
DEFINE_BOOL(wasm_lazy_compilation, false,
"enable lazy compilation for all wasm modules")
+DEFINE_BOOL(trace_wasm_lazy_compilation, false,
+ "trace lazy compilation of wasm functions")
// wasm-interpret-all resets {asm-,}wasm-lazy-compilation.
DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
@@ -606,6 +622,7 @@ DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
+DEFINE_BOOL(write_protect_code_memory, false, "write protect code memory")
#ifdef V8_CONCURRENT_MARKING
#define V8_CONCURRENT_MARKING_BOOL true
#else
@@ -613,16 +630,13 @@ DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
#endif
DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
"use concurrent marking")
+DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
+DEFINE_IMPLICATION(parallel_marking, concurrent_marking)
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
DEFINE_BOOL(minor_mc_parallel_marking, true,
"use parallel marking for the young generation")
DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
"trace parallel marking for the young generation")
-DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
- "keep finalizing incremental marking as long as we discover at "
- "least this many unmarked objects")
-DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
- "at most try this many times to finalize incremental marking")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
@@ -639,12 +653,15 @@ DEFINE_BOOL(trace_gc_object_stats, false,
"trace object counts and memory usage")
DEFINE_BOOL(track_retaining_path, false,
"enable support for tracking retaining path")
+DEFINE_BOOL(concurrent_array_buffer_freeing, true,
+ "free array buffer allocations on a background thread")
DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
DEFINE_VALUE_IMPLICATION(track_gc_object_stats, gc_stats, 1)
DEFINE_VALUE_IMPLICATION(trace_gc_object_stats, gc_stats, 1)
DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
DEFINE_NEG_IMPLICATION(track_retaining_path, incremental_marking)
+DEFINE_NEG_IMPLICATION(track_retaining_path, parallel_marking)
DEFINE_NEG_IMPLICATION(track_retaining_path, concurrent_marking)
DEFINE_BOOL(track_detached_contexts, true,
"track native contexts that are expected to be garbage collected")
@@ -674,8 +691,14 @@ DEFINE_BOOL(force_marking_deque_overflows, false,
DEFINE_BOOL(stress_compaction, false,
"stress the GC compactor to flush out bugs (implies "
"--force_marking_deque_overflows)")
+DEFINE_BOOL(stress_compaction_random, false,
+ "Stress GC compaction by selecting random percent of pages as "
+ "evacuation candidates. It overrides stress_compaction.")
DEFINE_BOOL(stress_incremental_marking, false,
"force incremental marking for small heaps and run it more often")
+DEFINE_INT(stress_marking, 0,
+ "force marking at random points between 0 and X (inclusive) percent "
+ "of the regular marking start limit")
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
@@ -729,10 +752,10 @@ DEFINE_BOOL(disable_old_api_accessors, false,
"prototype chain")
// bootstrapper.cc
-DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
+DEFINE_STRING(expose_natives_as, nullptr, "expose natives in global object")
DEFINE_BOOL(expose_free_buffer, false, "expose freeBuffer extension")
DEFINE_BOOL(expose_gc, false, "expose gc extension")
-DEFINE_STRING(expose_gc_as, NULL,
+DEFINE_STRING(expose_gc_as, nullptr,
"expose gc extension under the specified name")
DEFINE_IMPLICATION(expose_gc_as, expose_gc)
DEFINE_BOOL(expose_externalize_string, false,
@@ -741,10 +764,13 @@ DEFINE_BOOL(expose_trigger_failure, false, "expose trigger-failure extension")
DEFINE_INT(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_BOOL(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
+DEFINE_BOOL(enable_experimental_builtins, true,
+ "enable new csa-based experimental builtins")
// builtins.cc
DEFINE_BOOL(allow_unsafe_function_constructor, false,
"allow invoking the function constructor without security checks")
+DEFINE_BOOL(force_slow_path, false, "always take the slow path for builtins")
// builtins-ia32.cc
DEFINE_BOOL(inline_new, true, "use fast inline allocation")
@@ -866,9 +892,8 @@ DEFINE_BOOL(trace_prototype_users, false,
"Trace updates to prototype user tracking")
DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
-#if V8_TRACE_MAPS
DEFINE_BOOL(trace_maps, false, "trace map creation")
-#endif
+DEFINE_IMPLICATION(trace_maps, log_code)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
@@ -882,6 +907,9 @@ DEFINE_BOOL(preparser_scope_analysis, true,
"perform scope analysis for preparsed inner functions")
DEFINE_IMPLICATION(preparser_scope_analysis, aggressive_lazy_inner_functions)
+// compiler.cc
+DEFINE_BOOL(background_compile, false, "enable background compilation")
+
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
@@ -930,6 +958,9 @@ DEFINE_INT(hash_seed, 0,
DEFINE_INT(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
+DEFINE_INT(fuzzer_random_seed, 0,
+ "Default seed for initializing fuzzer random generator "
+ "(0, the default, means to use system random).")
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
DEFINE_BOOL(print_all_exceptions, false,
"print exception object and stack trace on each thrown exception")
@@ -941,8 +972,12 @@ DEFINE_INT(runtime_stats, 0,
DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
// snapshot-common.cc
-DEFINE_BOOL(lazy_deserialization, false,
+DEFINE_BOOL(lazy_deserialization, true,
"Deserialize code lazily from the snapshot.")
+DEFINE_BOOL(lazy_handler_deserialization, true,
+ "Deserialize bytecode handlers lazily from the snapshot.")
+DEFINE_IMPLICATION(lazy_handler_deserialization, lazy_deserialization)
+DEFINE_IMPLICATION(future, lazy_handler_deserialization)
DEFINE_BOOL(trace_lazy_deserialization, false, "Trace lazy deserialization.")
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
@@ -951,6 +986,7 @@ DEFINE_BOOL(serialization_statistics, false,
// Regexp
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
+DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag")
@@ -961,9 +997,9 @@ DEFINE_STRING(testing_string_flag, "Hello, world!", "string-flag")
DEFINE_INT(testing_prng_seed, 42, "Seed used for threading test randomness")
// mksnapshot.cc
-DEFINE_STRING(startup_src, NULL,
+DEFINE_STRING(startup_src, nullptr,
"Write V8 startup as C++ src. (mksnapshot only)")
-DEFINE_STRING(startup_blob, NULL,
+DEFINE_STRING(startup_blob, nullptr,
"Write V8 startup blob file. (mksnapshot only)")
//
@@ -1020,7 +1056,6 @@ DEFINE_BOOL(enable_slow_asserts, false,
// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_BOOL(print_ast, false, "print source AST")
-DEFINE_BOOL(print_builtin_ast, false, "print source AST for builtins")
DEFINE_BOOL(trap_on_abort, false, "replace aborts by breakpoints")
// compiler.cc
@@ -1092,6 +1127,9 @@ DEFINE_BOOL(log_gc, false,
DEFINE_BOOL(log_handles, false, "Log global handle events.")
DEFINE_BOOL(log_suspect, false, "Log suspect operations.")
DEFINE_BOOL(log_source_code, false, "Log source code.")
+DEFINE_BOOL(log_function_events, false,
+ "Log function events "
+ "(parse, compile, execute) separately.")
DEFINE_BOOL(prof, false,
"Log statistical profiling information (implies --log-code).")
@@ -1141,7 +1179,7 @@ DEFINE_INT(log_instruction_period, 1 << 22,
DEFINE_BOOL(redirect_code_traces, false,
"output deopt information and disassembly into file "
"code-<pid>-<isolate id>.asm")
-DEFINE_STRING(redirect_code_traces_to, NULL,
+DEFINE_STRING(redirect_code_traces_to, nullptr,
"output deopt information and disassembly into the given file")
DEFINE_BOOL(print_opt_source, false,
@@ -1222,16 +1260,24 @@ DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
//
DEFINE_BOOL(single_threaded, false, "disable the use of background tasks")
+DEFINE_IMPLICATION(single_threaded, single_threaded_gc)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(single_threaded, concurrent_marking)
-DEFINE_NEG_IMPLICATION(single_threaded, concurrent_sweeping)
-DEFINE_NEG_IMPLICATION(single_threaded, minor_mc_parallel_marking)
-DEFINE_NEG_IMPLICATION(single_threaded, parallel_compaction)
-DEFINE_NEG_IMPLICATION(single_threaded, parallel_pointer_update)
-DEFINE_NEG_IMPLICATION(single_threaded, parallel_scavenge)
-DEFINE_NEG_IMPLICATION(single_threaded, concurrent_store_buffer)
DEFINE_NEG_IMPLICATION(single_threaded, compiler_dispatcher)
+//
+// Parallel and concurrent GC (Orinoco) related flags.
+//
+DEFINE_BOOL(single_threaded_gc, false, "disable the use of background gc tasks")
+DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_marking)
+DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_sweeping)
+DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_compaction)
+DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_marking)
+DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_pointer_update)
+DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_scavenge)
+DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_store_buffer)
+DEFINE_NEG_IMPLICATION(single_threaded_gc, minor_mc_parallel_marking)
+DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_array_buffer_freeing)
+
#undef FLAG
#ifdef VERIFY_PREDICTABLE
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 0aeb3f91dc..a1df0e0957 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -89,7 +89,7 @@ struct Flag {
void set_string_value(const char* value, bool owns_ptr) {
DCHECK(type_ == TYPE_STRING);
const char** ptr = reinterpret_cast<const char**>(valptr_);
- if (owns_ptr_ && *ptr != NULL) DeleteArray(*ptr);
+ if (owns_ptr_ && *ptr != nullptr) DeleteArray(*ptr);
*ptr = value;
owns_ptr_ = owns_ptr;
}
@@ -145,8 +145,8 @@ struct Flag {
case TYPE_STRING: {
const char* str1 = string_value();
const char* str2 = string_default();
- if (str2 == NULL) return str1 == NULL;
- if (str1 == NULL) return str2 == NULL;
+ if (str2 == nullptr) return str1 == nullptr;
+ if (str1 == nullptr) return str2 == nullptr;
return strcmp(str1, str2) == 0;
}
case TYPE_ARGS:
@@ -229,7 +229,7 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
break;
case Flag::TYPE_STRING: {
const char* str = flag.string_value();
- os << (str ? str : "NULL");
+ os << (str ? str : "nullptr");
break;
}
case Flag::TYPE_ARGS: {
@@ -250,12 +250,12 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
// static
std::vector<const char*>* FlagList::argv() {
std::vector<const char*>* args = new std::vector<const char*>(8);
- Flag* args_flag = NULL;
+ Flag* args_flag = nullptr;
for (size_t i = 0; i < num_flags; ++i) {
Flag* f = &flags[i];
if (!f->IsDefault()) {
if (f->type() == Flag::TYPE_ARGS) {
- DCHECK(args_flag == NULL);
+ DCHECK_NULL(args_flag);
args_flag = f; // Must be last in arguments.
continue;
}
@@ -272,7 +272,7 @@ std::vector<const char*>* FlagList::argv() {
}
}
}
- if (args_flag != NULL) {
+ if (args_flag != nullptr) {
std::ostringstream os;
os << "--" << args_flag->name();
args->push_back(StrDup(os.str().c_str()));
@@ -289,9 +289,8 @@ inline char NormalizeChar(char ch) {
return ch == '_' ? '-' : ch;
}
-
// Helper function to parse flags: Takes an argument arg and splits it into
-// a flag name and flag value (or NULL if they are missing). is_bool is set
+// a flag name and flag value (or nullptr if they are missing). is_bool is set
// if the arg started with "-no" or "--no". The buffer may be used to NUL-
// terminate the name, it must be large enough to hold any possible name.
static void SplitArgument(const char* arg,
@@ -300,11 +299,11 @@ static void SplitArgument(const char* arg,
const char** name,
const char** value,
bool* is_bool) {
- *name = NULL;
- *value = NULL;
+ *name = nullptr;
+ *value = nullptr;
*is_bool = false;
- if (arg != NULL && *arg == '-') {
+ if (arg != nullptr && *arg == '-') {
// find the begin of the flag name
arg++; // remove 1st '-'
if (*arg == '-') {
@@ -356,7 +355,7 @@ static Flag* FindFlag(const char* name) {
if (EqualNames(name, flags[i].name()))
return &flags[i];
}
- return NULL;
+ return nullptr;
}
@@ -377,10 +376,10 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
bool is_bool;
SplitArgument(arg, buffer, sizeof buffer, &name, &value, &is_bool);
- if (name != NULL) {
+ if (name != nullptr) {
// lookup the flag
Flag* flag = FindFlag(name);
- if (flag == NULL) {
+ if (flag == nullptr) {
if (remove_flags) {
// We don't recognize this flag but since we're removing
// the flags we recognize we assume that the remaining flags
@@ -398,8 +397,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// if we still need a flag value, use the next argument if available
if (flag->type() != Flag::TYPE_BOOL &&
flag->type() != Flag::TYPE_MAYBE_BOOL &&
- flag->type() != Flag::TYPE_ARGS &&
- value == NULL) {
+ flag->type() != Flag::TYPE_ARGS && value == nullptr) {
if (i < *argc) {
value = argv[i++];
}
@@ -446,13 +444,13 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
*flag->float_variable() = strtod(value, &endp);
break;
case Flag::TYPE_STRING:
- flag->set_string_value(value ? StrDup(value) : NULL, true);
+ flag->set_string_value(value ? StrDup(value) : nullptr, true);
break;
case Flag::TYPE_ARGS: {
- int start_pos = (value == NULL) ? i : i - 1;
+ int start_pos = (value == nullptr) ? i : i - 1;
int js_argc = *argc - start_pos;
const char** js_argv = NewArray<const char*>(js_argc);
- if (value != NULL) {
+ if (value != nullptr) {
js_argv[0] = StrDup(value);
}
for (int k = i; k < *argc; k++) {
@@ -467,7 +465,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// handle errors
bool is_bool_type = flag->type() == Flag::TYPE_BOOL ||
flag->type() == Flag::TYPE_MAYBE_BOOL;
- if ((is_bool_type && value != NULL) || (!is_bool_type && is_bool) ||
+ if ((is_bool_type && value != nullptr) || (!is_bool_type && is_bool) ||
*endp != '\0') {
PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
@@ -483,7 +481,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// remove the flag & value from the command
if (remove_flags) {
while (j < i) {
- argv[j++] = NULL;
+ argv[j++] = nullptr;
}
}
}
@@ -493,8 +491,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
if (remove_flags) {
int j = 1;
for (int i = 1; i < *argc; i++) {
- if (argv[i] != NULL)
- argv[j++] = argv[i];
+ if (argv[i] != nullptr) argv[j++] = argv[i];
}
*argc = j;
}
diff --git a/deps/v8/src/frame-constants.h b/deps/v8/src/frame-constants.h
index fa5921aef9..8d2d1f8cc4 100644
--- a/deps/v8/src/frame-constants.h
+++ b/deps/v8/src/frame-constants.h
@@ -251,8 +251,16 @@ class BuiltinContinuationFrameConstants : public TypedFrameConstants {
// FP-relative.
static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kBuiltinOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ // The argument count is in the first allocatable register, stored below the
+ // fixed part of the frame and therefore is not part of the fixed frame size.
static const int kArgCOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
DEFINE_TYPED_FRAME_SIZES(2);
+
+ // Returns the number of padding stack slots needed when we have
+ // 'register_count' register slots.
+ // This is needed on some architectures to ensure the stack pointer is
+ // aligned.
+ static int PaddingSlotCount(int register_count);
};
// Behaves like an exit frame but with target and new target args.
@@ -261,6 +269,9 @@ class BuiltinExitFrameConstants : public CommonFrameConstants {
static const int kNewTargetOffset = kCallerPCOffset + 1 * kPointerSize;
static const int kTargetOffset = kNewTargetOffset + 1 * kPointerSize;
static const int kArgcOffset = kTargetOffset + 1 * kPointerSize;
+ static const int kPaddingOffset = kArgcOffset + 1 * kPointerSize;
+ static const int kFirstArgumentOffset = kPaddingOffset + 1 * kPointerSize;
+ static const int kNumExtraArgsWithReceiver = 5;
};
class InterpreterFrameConstants : public AllStatic {
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index adfff5a9dc..3438c1dfb0 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -41,7 +41,7 @@ inline StackHandler* StackFrame::top_handler() const {
inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
- if (return_address_location_resolver_ == NULL) {
+ if (return_address_location_resolver_ == nullptr) {
return pc_address;
} else {
return reinterpret_cast<Address*>(
@@ -202,6 +202,9 @@ inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
inline JsToWasmFrame::JsToWasmFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
+inline WasmToWasmFrame::WasmToWasmFrame(StackFrameIteratorBase* iterator)
+ : StubFrame(iterator) {}
+
inline CWasmEntryFrame::CWasmEntryFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index d578a64ed3..bd1abda4de 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -16,14 +16,15 @@
#include "src/string-stream.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
+#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
-ReturnAddressLocationResolver
- StackFrame::return_address_location_resolver_ = NULL;
-
+ReturnAddressLocationResolver StackFrame::return_address_location_resolver_ =
+ nullptr;
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
@@ -37,9 +38,7 @@ class StackHandlerIterator BASE_EMBEDDED {
StackHandler* handler() const { return handler_; }
- bool done() {
- return handler_ == NULL || handler_->address() > limit_;
- }
+ bool done() { return handler_ == nullptr || handler_->address() > limit_; }
void Advance() {
DCHECK(!done());
handler_ = handler_->next();
@@ -58,10 +57,9 @@ class StackHandlerIterator BASE_EMBEDDED {
StackFrameIteratorBase::StackFrameIteratorBase(Isolate* isolate,
bool can_access_heap_objects)
: isolate_(isolate),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- can_access_heap_objects_(can_access_heap_objects) {
-}
+ STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON) frame_(nullptr),
+ handler_(nullptr),
+ can_access_heap_objects_(can_access_heap_objects) {}
#undef INITIALIZE_SINGLETON
StackFrameIterator::StackFrameIterator(Isolate* isolate)
@@ -91,7 +89,7 @@ void StackFrameIterator::Advance() {
// When we're done iterating over the stack frames, the handler
// chain must have been completely unwound.
- DCHECK(!done() || handler_ == NULL);
+ DCHECK(!done() || handler_ == nullptr);
}
@@ -119,11 +117,12 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
return &field##_;
switch (type) {
- case StackFrame::NONE: return NULL;
- STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
+ case StackFrame::NONE:
+ return nullptr;
+ STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
default: break;
}
- return NULL;
+ return nullptr;
#undef FRAME_TYPE_CASE
}
@@ -207,7 +206,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(
type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
top_frame_type_ = type;
} else if (IsValidStackAddress(fp)) {
- DCHECK(fp != NULL);
+ DCHECK_NOT_NULL(fp);
state.fp = fp;
state.sp = sp;
state.pc_address = StackFrame::ResolveReturnAddressLocation(
@@ -241,6 +240,11 @@ SafeStackFrameIterator::SafeStackFrameIterator(
}
} else {
// Mark the frame as OPTIMIZED if we cannot determine its type.
+ // We chose OPTIMIZED rather than INTERPRETED because it's closer to
+ // the original value of StackFrame::JAVA_SCRIPT here, in that JAVA_SCRIPT
+ // referred to full-codegen frames (now removed from the tree), and
+ // OPTIMIZED refers to turbofan frames, both of which are generated
+ // code. INTERPRETED frames refer to bytecode.
// The frame anyways will be skipped.
type = StackFrame::OPTIMIZED;
// Top frame is incomplete so we cannot reliably determine its type.
@@ -259,7 +263,7 @@ bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
if (!IsValidExitFrame(c_entry_fp)) return false;
// There should be at least one JS_ENTRY stack handler.
Address handler = Isolate::handler(top);
- if (handler == NULL) return false;
+ if (handler == nullptr) return false;
// Check that there are no js frames on top of the native frames.
return c_entry_fp < handler;
}
@@ -271,7 +275,7 @@ void SafeStackFrameIterator::AdvanceOneFrame() {
Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
// Before advancing to the next stack frame, perform pointer validity tests.
if (!IsValidFrame(last_frame) || !IsValidCaller(last_frame)) {
- frame_ = NULL;
+ frame_ = nullptr;
return;
}
@@ -283,7 +287,7 @@ void SafeStackFrameIterator::AdvanceOneFrame() {
// Check that we have actually moved to the previous frame in the stack.
if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
- frame_ = NULL;
+ frame_ = nullptr;
}
}
@@ -314,7 +318,7 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
}
frame->ComputeCallerState(&state);
return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
- SingletonFor(frame->GetCallerState(&state)) != NULL;
+ SingletonFor(frame->GetCallerState(&state)) != nullptr;
}
@@ -333,8 +337,8 @@ void SafeStackFrameIterator::Advance() {
while (true) {
AdvanceOneFrame();
if (done()) break;
- ExternalCallbackScope* last_callback_scope = NULL;
- while (external_callback_scope_ != NULL &&
+ ExternalCallbackScope* last_callback_scope = nullptr;
+ while (external_callback_scope_ != nullptr &&
external_callback_scope_->scope_address() < frame_->fp()) {
// As long as the setup of a frame is not atomic, we may happen to be
// in an interval where an ExternalCallbackScope is already created,
@@ -376,14 +380,10 @@ Code* StackFrame::LookupCode() const {
return result;
}
-#ifdef DEBUG
-static bool GcSafeCodeContains(HeapObject* object, Address addr);
-#endif
-
void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address, Code* holder) {
Address pc = *pc_address;
- DCHECK(GcSafeCodeContains(holder, pc));
+ DCHECK(holder->GetHeap()->GcSafeCodeContains(holder, pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
Object* code = holder;
v->VisitRootPointer(Root::kTop, &code);
@@ -399,13 +399,13 @@ void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
void StackFrame::SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver) {
- DCHECK(return_address_location_resolver_ == NULL);
+ DCHECK_NULL(return_address_location_resolver_);
return_address_location_resolver_ = resolver;
}
StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
State* state) {
- DCHECK(state->fp != NULL);
+ DCHECK_NOT_NULL(state->fp);
MSAN_MEMORY_IS_INITIALIZED(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
@@ -433,45 +433,68 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
}
} else {
- // Look up the code object to figure out the type of the stack frame.
- Code* code_obj =
- GetContainingCode(iterator->isolate(), *(state->pc_address));
- if (code_obj != nullptr) {
- switch (code_obj->kind()) {
- case Code::BUILTIN:
+ Address pc = *(state->pc_address);
+ // If FLAG_wasm_jit_to_native is disabled, we still have an empty
+ // wasm_code_manager, and this test will be false. This is easier to read
+ // than checking the flag, then getting the code, and then, if both are true
+ // (non-null, respectivelly), going down the wasm_code path.
+ wasm::WasmCode* wasm_code =
+ iterator->isolate()->wasm_code_manager()->LookupCode(pc);
+ if (wasm_code != nullptr) {
+ switch (wasm_code->kind()) {
+ case wasm::WasmCode::InterpreterStub:
+ return WASM_INTERPRETER_ENTRY;
+ case wasm::WasmCode::Function:
+ case wasm::WasmCode::CopiedStub:
+ return WASM_COMPILED;
+ case wasm::WasmCode::LazyStub:
if (StackFrame::IsTypeMarker(marker)) break;
- if (code_obj->is_interpreter_trampoline_builtin()) {
- return INTERPRETED;
- }
- if (code_obj->is_turbofanned()) {
- // TODO(bmeurer): We treat frames for BUILTIN Code objects as
- // OptimizedFrame for now (all the builtins with JavaScript
- // linkage are actually generated with TurboFan currently, so
- // this is sound).
- return OPTIMIZED;
- }
return BUILTIN;
- case Code::OPTIMIZED_FUNCTION:
- return OPTIMIZED;
- case Code::WASM_FUNCTION:
- return WASM_COMPILED;
- case Code::WASM_TO_JS_FUNCTION:
+ case wasm::WasmCode::WasmToJsWrapper:
+ case wasm::WasmCode::WasmToWasmWrapper:
return WASM_TO_JS;
- case Code::JS_TO_WASM_FUNCTION:
- return JS_TO_WASM;
- case Code::WASM_INTERPRETER_ENTRY:
- return WASM_INTERPRETER_ENTRY;
- case Code::C_WASM_ENTRY:
- return C_WASM_ENTRY;
default:
- // All other types should have an explicit marker
- break;
+ UNREACHABLE();
}
} else {
- return NONE;
+ // Look up the code object to figure out the type of the stack frame.
+ Code* code_obj = GetContainingCode(iterator->isolate(), pc);
+ if (code_obj != nullptr) {
+ switch (code_obj->kind()) {
+ case Code::BUILTIN:
+ if (StackFrame::IsTypeMarker(marker)) break;
+ if (code_obj->is_interpreter_trampoline_builtin()) {
+ return INTERPRETED;
+ }
+ if (code_obj->is_turbofanned()) {
+ // TODO(bmeurer): We treat frames for BUILTIN Code objects as
+ // OptimizedFrame for now (all the builtins with JavaScript
+ // linkage are actually generated with TurboFan currently, so
+ // this is sound).
+ return OPTIMIZED;
+ }
+ return BUILTIN;
+ case Code::OPTIMIZED_FUNCTION:
+ return OPTIMIZED;
+ case Code::WASM_FUNCTION:
+ return WASM_COMPILED;
+ case Code::WASM_TO_JS_FUNCTION:
+ return WASM_TO_JS;
+ case Code::JS_TO_WASM_FUNCTION:
+ return JS_TO_WASM;
+ case Code::WASM_INTERPRETER_ENTRY:
+ return WASM_INTERPRETER_ENTRY;
+ case Code::C_WASM_ENTRY:
+ return C_WASM_ENTRY;
+ default:
+ // All other types should have an explicit marker
+ break;
+ }
+ } else {
+ return NONE;
+ }
}
}
-
DCHECK(StackFrame::IsTypeMarker(marker));
StackFrame::Type candidate = StackFrame::MarkerToType(marker);
switch (candidate) {
@@ -636,7 +659,8 @@ bool BuiltinExitFrame::IsConstructor() const {
Object* BuiltinExitFrame::GetParameter(int i) const {
DCHECK(i >= 0 && i < ComputeParametersCount());
- int offset = BuiltinExitFrameConstants::kArgcOffset + (i + 1) * kPointerSize;
+ int offset =
+ BuiltinExitFrameConstants::kFirstArgumentOffset + i * kPointerSize;
return Memory::Object_at(fp() + offset);
}
@@ -646,7 +670,7 @@ int BuiltinExitFrame::ComputeParametersCount() const {
// Argc also counts the receiver, target, new target, and argc itself as args,
// therefore the real argument count is argc - 4.
int argc = Smi::ToInt(argc_slot) - 4;
- DCHECK(argc >= 0);
+ DCHECK_GE(argc, 0);
return argc;
}
@@ -666,7 +690,7 @@ void BuiltinExitFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->PrintSecurityTokenIfChanged(function);
PrintIndex(accumulator, mode, index);
accumulator->Add("builtin exit frame: ");
- Code* code = NULL;
+ Code* code = nullptr;
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
@@ -751,20 +775,38 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// Find the code and compute the safepoint information.
Address inner_pointer = pc();
- InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
- isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
- if (!entry->safepoint_entry.is_valid()) {
- entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
- DCHECK(entry->safepoint_entry.is_valid());
+ const wasm::WasmCode* wasm_code =
+ FLAG_wasm_jit_to_native
+ ? isolate()->wasm_code_manager()->LookupCode(inner_pointer)
+ : nullptr;
+ SafepointEntry safepoint_entry;
+ uint32_t stack_slots;
+ Code* code = nullptr;
+ bool has_tagged_params = false;
+ if (wasm_code != nullptr) {
+ SafepointTable table(wasm_code->instructions().start(),
+ wasm_code->safepoint_table_offset(),
+ wasm_code->stack_slots());
+ safepoint_entry = table.FindEntry(inner_pointer);
+ stack_slots = wasm_code->stack_slots();
+ has_tagged_params = wasm_code->kind() != wasm::WasmCode::Function;
} else {
- DCHECK(entry->safepoint_entry.Equals(
- entry->code->GetSafepointEntry(inner_pointer)));
- }
+ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
+ isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
+ if (!entry->safepoint_entry.is_valid()) {
+ entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
+ DCHECK(entry->safepoint_entry.is_valid());
+ } else {
+ DCHECK(entry->safepoint_entry.Equals(
+ entry->code->GetSafepointEntry(inner_pointer)));
+ }
- Code* code = entry->code;
- SafepointEntry safepoint_entry = entry->safepoint_entry;
- unsigned stack_slots = code->stack_slots();
- unsigned slot_space = stack_slots * kPointerSize;
+ code = entry->code;
+ safepoint_entry = entry->safepoint_entry;
+ stack_slots = code->stack_slots();
+ has_tagged_params = code->has_tagged_params();
+ }
+ uint32_t slot_space = stack_slots * kPointerSize;
// Determine the fixed header and spill slot area size.
int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
@@ -785,6 +827,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
case CONSTRUCT:
case JS_TO_WASM:
case WASM_TO_JS:
+ case WASM_TO_WASM:
case WASM_COMPILED:
case WASM_INTERPRETER_ENTRY:
case C_WASM_ENTRY:
@@ -846,7 +889,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
// Visit the rest of the parameters if they are tagged.
- if (code->has_tagged_params()) {
+ if (has_tagged_params) {
v->VisitRootPointers(Root::kTop, parameters_base, parameters_limit);
}
@@ -859,8 +902,11 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
}
}
- // Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), constant_pool_address(), code);
+ // For wasm-to-js cases, we can skip this.
+ if (code != nullptr) {
+ // Visit the return address in the callee and incoming arguments.
+ IteratePc(v, pc_address(), constant_pool_address(), code);
+ }
if (!is_wasm() && !is_wasm_to_js()) {
// If this frame has JavaScript ABI, visit the context (in stub and JS
@@ -919,16 +965,6 @@ bool JavaScriptFrame::HasInlinedFrames() const {
}
-int JavaScriptFrame::GetArgumentsLength() const {
- // If there is an arguments adaptor frame get the arguments length from it.
- if (has_adapted_arguments()) {
- return ArgumentsAdaptorFrame::GetLength(caller_fp());
- } else {
- return GetNumberOfIncomingArguments();
- }
-}
-
-
Code* JavaScriptFrame::unchecked_code() const {
return function()->code();
}
@@ -1119,19 +1155,15 @@ int JavaScriptFrame::ComputeParametersCount() const {
}
int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
+ // Assert that the first allocatable register is also the argument count
+ // register.
+ DCHECK_EQ(RegisterConfiguration::Default()->GetAllocatableGeneralCode(0),
+ kJavaScriptCallArgCountRegister.code());
Object* argc_object =
Memory::Object_at(fp() + BuiltinContinuationFrameConstants::kArgCOffset);
return Smi::ToInt(argc_object);
}
-namespace {
-
-bool IsNonDeoptimizingAsmCode(Code* code, JSFunction* function) {
- return code->is_turbofanned() && !function->shared()->HasBytecodeArray();
-}
-
-} // namespace
-
FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
Isolate* isolate, Object* receiver, JSFunction* function,
AbstractCode* abstract_code, int code_offset, bool is_constructor)
@@ -1142,8 +1174,7 @@ FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
code_offset_(code_offset),
is_constructor_(is_constructor) {
DCHECK(abstract_code->IsBytecodeArray() ||
- Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION ||
- IsNonDeoptimizingAsmCode(Code::cast(abstract_code), function));
+ Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION);
}
bool FrameSummary::JavaScriptFrameSummary::is_subject_to_debugging() const {
@@ -1219,7 +1250,7 @@ Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
}
FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
- Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> code,
+ Isolate* isolate, Handle<WasmInstanceObject> instance, WasmCodeWrapper code,
int code_offset, bool at_to_number_conversion)
: WasmFrameSummary(isolate, WASM_COMPILED, instance,
at_to_number_conversion),
@@ -1227,16 +1258,38 @@ FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
code_offset_(code_offset) {}
uint32_t FrameSummary::WasmCompiledFrameSummary::function_index() const {
- FixedArray* deopt_data = code()->deoptimization_data();
- DCHECK_EQ(2, deopt_data->length());
- DCHECK(deopt_data->get(1)->IsSmi());
- int val = Smi::ToInt(deopt_data->get(1));
- DCHECK_LE(0, val);
- return static_cast<uint32_t>(val);
+ if (code().IsCodeObject()) {
+ FixedArray* deopt_data = code().GetCode()->deoptimization_data();
+ DCHECK_EQ(2, deopt_data->length());
+ DCHECK(deopt_data->get(1)->IsSmi());
+ int val = Smi::ToInt(deopt_data->get(1));
+ DCHECK_LE(0, val);
+ return static_cast<uint32_t>(val);
+ }
+ return code().GetWasmCode()->index();
+}
+
+int FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
+ const wasm::WasmCode* code, int offset) {
+ int position = 0;
+ // Subtract one because the current PC is one instruction after the call site.
+ offset--;
+ Handle<ByteArray> source_position_table(
+ ByteArray::cast(code->owner()->compiled_module()->source_positions()->get(
+ code->index())));
+ for (SourcePositionTableIterator iterator(source_position_table);
+ !iterator.done() && iterator.code_offset() <= offset;
+ iterator.Advance()) {
+ position = iterator.source_position().ScriptOffset();
+ }
+ return position;
}
int FrameSummary::WasmCompiledFrameSummary::byte_offset() const {
- return AbstractCode::cast(*code())->SourcePosition(code_offset());
+ if (code().IsCodeObject()) {
+ return AbstractCode::cast(*code().GetCode())->SourcePosition(code_offset());
+ }
+ return GetWasmSourcePosition(code_.GetWasmCode(), code_offset());
}
FrameSummary::WasmInterpretedFrameSummary::WasmInterpretedFrameSummary(
@@ -1261,7 +1314,6 @@ FrameSummary::~FrameSummary() {
FrameSummary FrameSummary::GetTop(const StandardFrame* frame) {
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
DCHECK_LT(0, frames.size());
return frames.back();
@@ -1281,7 +1333,6 @@ FrameSummary FrameSummary::GetSingle(const StandardFrame* frame) {
FrameSummary FrameSummary::Get(const StandardFrame* frame, int index) {
DCHECK_LE(0, index);
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
DCHECK_GT(frames.size(), index);
return frames[index];
@@ -1321,13 +1372,12 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
Code* code = LookupCode();
- if (code->kind() == Code::BUILTIN ||
- IsNonDeoptimizingAsmCode(code, function())) {
+ if (code->kind() == Code::BUILTIN) {
return JavaScriptFrame::Summarize(frames);
}
int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
+ DeoptimizationData* const data = GetDeoptimizationData(&deopt_index);
if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
CHECK_NULL(data);
FATAL("Missing deoptimization information for OptimizedFrame::Summarize.");
@@ -1413,8 +1463,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
return table->LookupReturn(pc_offset);
}
-
-DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
+DeoptimizationData* OptimizedFrame::GetDeoptimizationData(
int* deopt_index) const {
DCHECK(is_optimized());
@@ -1425,16 +1474,15 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
// back to a slow search in this case to find the original optimized
// code object.
if (!code->contains(pc())) {
- code = isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(pc());
+ code = isolate()->heap()->GcSafeFindCodeForInnerPointer(pc());
}
- DCHECK(code != NULL);
+ DCHECK_NOT_NULL(code);
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
*deopt_index = safepoint_entry.deoptimization_index();
if (*deopt_index != Safepoint::kNoDeoptimizationIndex) {
- return DeoptimizationInputData::cast(code->deoptimization_data());
+ return DeoptimizationData::cast(code->deoptimization_data());
}
return nullptr;
}
@@ -1461,14 +1509,13 @@ void OptimizedFrame::GetFunctions(
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
Code* code = LookupCode();
- if (code->kind() == Code::BUILTIN ||
- IsNonDeoptimizingAsmCode(code, function())) {
+ if (code->kind() == Code::BUILTIN) {
return JavaScriptFrame::GetFunctions(functions);
}
DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
+ DeoptimizationData* const data = GetDeoptimizationData(&deopt_index);
DCHECK_NOT_NULL(data);
DCHECK_NE(Safepoint::kNoDeoptimizationIndex, deopt_index);
FixedArray* const literal_array = data->LiteralArray();
@@ -1602,11 +1649,6 @@ int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
return Smi::ToInt(GetExpression(0));
}
-int ArgumentsAdaptorFrame::GetLength(Address fp) {
- const int offset = ArgumentsAdaptorFrameConstants::kLengthOffset;
- return Smi::ToInt(Memory::Object_at(fp + offset));
-}
-
Code* ArgumentsAdaptorFrame::unchecked_code() const {
return isolate()->builtins()->builtin(
Builtins::kArgumentsAdaptorTrampoline);
@@ -1629,7 +1671,7 @@ Address InternalFrame::GetCallerStackPointer() const {
Code* InternalFrame::unchecked_code() const {
const int offset = InternalFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp() + offset);
- DCHECK(code != NULL);
+ DCHECK_NOT_NULL(code);
return reinterpret_cast<Code*>(code);
}
@@ -1668,7 +1710,11 @@ Address WasmCompiledFrame::GetCallerStackPointer() const {
}
WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
- WasmInstanceObject* obj = WasmInstanceObject::GetOwningInstance(LookupCode());
+ WasmInstanceObject* obj =
+ FLAG_wasm_jit_to_native
+ ? WasmInstanceObject::GetOwningInstance(
+ isolate()->wasm_code_manager()->LookupCode(pc()))
+ : WasmInstanceObject::GetOwningInstanceGC(LookupCode());
// This is a live stack frame; it must have a live instance.
DCHECK_NOT_NULL(obj);
return obj;
@@ -1688,9 +1734,25 @@ int WasmCompiledFrame::position() const {
void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
- Handle<Code> code(LookupCode(), isolate());
- int offset = static_cast<int>(pc() - code->instruction_start());
- Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
+ WasmCodeWrapper code;
+ Handle<WasmInstanceObject> instance;
+ int offset = -1;
+ if (FLAG_wasm_jit_to_native) {
+ code = WasmCodeWrapper(isolate()->wasm_code_manager()->LookupCode(pc()));
+ offset =
+ static_cast<int>(pc() - code.GetWasmCode()->instructions().start());
+ instance = Handle<WasmInstanceObject>(
+ WasmInstanceObject::cast(code.GetWasmCode()
+ ->owner()
+ ->compiled_module()
+ ->weak_owning_instance()
+ ->value()),
+ isolate());
+ } else {
+ code = WasmCodeWrapper(Handle<Code>(LookupCode(), isolate()));
+ offset = static_cast<int>(pc() - code.GetCode()->instruction_start());
+ instance = Handle<WasmInstanceObject>(wasm_instance(), isolate());
+ }
FrameSummary::WasmCompiledFrameSummary summary(
isolate(), instance, code, offset, at_to_number_conversion());
functions->push_back(summary);
@@ -1700,10 +1762,21 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
// Check whether our callee is a WASM_TO_JS frame, and this frame is at the
// ToNumber conversion call.
Address callee_pc = reinterpret_cast<Address>(this->callee_pc());
- Code* code = callee_pc ? isolate()->FindCodeObject(callee_pc) : nullptr;
- if (!code || code->kind() != Code::WASM_TO_JS_FUNCTION) return false;
- int offset = static_cast<int>(callee_pc - code->instruction_start());
- int pos = AbstractCode::cast(code)->SourcePosition(offset);
+ int pos = -1;
+ if (FLAG_wasm_jit_to_native) {
+ wasm::WasmCode* code =
+ callee_pc ? isolate()->wasm_code_manager()->LookupCode(callee_pc)
+ : nullptr;
+ if (!code || code->kind() != wasm::WasmCode::WasmToJsWrapper) return false;
+ int offset = static_cast<int>(callee_pc - code->instructions().start());
+ pos = FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(code,
+ offset);
+ } else {
+ Code* code = callee_pc ? isolate()->FindCodeObject(callee_pc) : nullptr;
+ if (!code || code->kind() != Code::WASM_TO_JS_FUNCTION) return false;
+ int offset = static_cast<int>(callee_pc - code->instruction_start());
+ pos = AbstractCode::cast(code)->SourcePosition(offset);
+ }
DCHECK(pos == 0 || pos == 1);
// The imported call has position 0, ToNumber has position 1.
return !!pos;
@@ -1711,11 +1784,26 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
DCHECK_NOT_NULL(stack_slots);
- Code* code = LookupCode();
- HandlerTable* table = HandlerTable::cast(code->handler_table());
- int pc_offset = static_cast<int>(pc() - code->entry());
- *stack_slots = code->stack_slots();
- return table->LookupReturn(pc_offset);
+ if (!FLAG_wasm_jit_to_native) {
+ Code* code = LookupCode();
+ HandlerTable* table = HandlerTable::cast(code->handler_table());
+ int pc_offset = static_cast<int>(pc() - code->entry());
+ *stack_slots = code->stack_slots();
+ return table->LookupReturn(pc_offset);
+ }
+ wasm::WasmCode* code = isolate()->wasm_code_manager()->LookupCode(pc());
+ if (!code->IsAnonymous()) {
+ Object* table_entry =
+ code->owner()->compiled_module()->ptr_to_handler_table()->get(
+ code->index());
+ if (table_entry->IsHandlerTable()) {
+ HandlerTable* table = HandlerTable::cast(table_entry);
+ int pc_offset = static_cast<int>(pc() - code->instructions().start());
+ *stack_slots = static_cast<int>(code->stack_slots());
+ return table->LookupReturn(pc_offset);
+ }
+ }
+ return -1;
}
void WasmInterpreterEntryFrame::Iterate(RootVisitor* v) const {
@@ -1746,11 +1834,19 @@ void WasmInterpreterEntryFrame::Summarize(
}
Code* WasmInterpreterEntryFrame::unchecked_code() const {
- return isolate()->FindCodeObject(pc());
+ if (FLAG_wasm_jit_to_native) {
+ UNIMPLEMENTED();
+ } else {
+ return isolate()->FindCodeObject(pc());
+ }
}
WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
- WasmInstanceObject* ret = WasmInstanceObject::GetOwningInstance(LookupCode());
+ WasmInstanceObject* ret =
+ FLAG_wasm_jit_to_native
+ ? WasmInstanceObject::GetOwningInstance(
+ isolate()->wasm_code_manager()->LookupCode(pc()))
+ : WasmInstanceObject::GetOwningInstanceGC(LookupCode());
// This is a live stack frame, there must be a live wasm instance available.
DCHECK_NOT_NULL(ret);
return ret;
@@ -1777,7 +1873,7 @@ namespace {
void PrintFunctionSource(StringStream* accumulator, SharedFunctionInfo* shared,
Code* code) {
- if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
+ if (FLAG_max_stack_trace_source_length != 0 && code != nullptr) {
std::ostringstream os;
os << "--------- s o u r c e c o d e ---------\n"
<< SourceCodeOf(shared, FLAG_max_stack_trace_source_length)
@@ -1800,11 +1896,11 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->PrintSecurityTokenIfChanged(function);
PrintIndex(accumulator, mode, index);
PrintFrameKind(accumulator);
- Code* code = NULL;
+ Code* code = nullptr;
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
- // Get scope information for nicer output, if possible. If code is NULL, or
+ // Get scope information for nicer output, if possible. If code is nullptr, or
// doesn't contain scope info, scope_info will return 0 for the number of
// parameters, stack local variables, context local variables, stack slots,
// or context slots.
@@ -1883,12 +1979,12 @@ void JavaScriptFrame::Print(StringStream* accumulator,
}
// Try to get hold of the context of this frame.
- Context* context = NULL;
- if (this->context() != NULL && this->context()->IsContext()) {
+ Context* context = nullptr;
+ if (this->context() != nullptr && this->context()->IsContext()) {
context = Context::cast(this->context());
while (context->IsWithContext()) {
context = context->previous();
- DCHECK(context != NULL);
+ DCHECK_NOT_NULL(context);
}
}
@@ -1900,7 +1996,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add(" var ");
accumulator->PrintName(scope_info->ContextLocalName(i));
accumulator->Add(" = ");
- if (context != NULL) {
+ if (context != nullptr) {
int index = Context::MIN_CONTEXT_SLOTS + i;
if (index < context->length()) {
accumulator->Add("%o", context->get(index));
@@ -1975,91 +2071,25 @@ void JavaScriptFrame::Iterate(RootVisitor* v) const {
}
void InternalFrame::Iterate(RootVisitor* v) const {
- Code* code = LookupCode();
- IteratePc(v, pc_address(), constant_pool_address(), code);
- // Internal frames typically do not receive any arguments, hence their stack
- // only contains tagged pointers.
- // We are misusing the has_tagged_params flag here to tell us whether
- // the full stack frame contains only tagged pointers or only raw values.
- // This is used for the WasmCompileLazy builtin, where we actually pass
- // untagged arguments and also store untagged values on the stack.
- if (code->has_tagged_params()) IterateExpressions(v);
-}
-
-// -------------------------------------------------------------------------
-
-static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
- MapWord map_word = object->map_word();
- return map_word.IsForwardingAddress() ?
- map_word.ToForwardingAddress()->map() : map_word.ToMap();
-}
-
-
-static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
- return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
-}
-
-
-#ifdef DEBUG
-static bool GcSafeCodeContains(HeapObject* code, Address addr) {
- Map* map = GcSafeMapOfCodeSpaceObject(code);
- DCHECK(map == code->GetHeap()->code_map());
- Address start = code->address();
- Address end = code->address() + code->SizeFromMap(map);
- return start <= addr && addr < end;
-}
-#endif
-
-
-Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
- Address inner_pointer) {
- Code* code = reinterpret_cast<Code*>(object);
- DCHECK(code != NULL && GcSafeCodeContains(code, inner_pointer));
- return code;
-}
-
-
-Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
- Address inner_pointer) {
- Heap* heap = isolate_->heap();
-
- // Check if the inner pointer points into a large object chunk.
- LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
- if (large_page != NULL) {
- return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
- }
-
- if (!heap->code_space()->Contains(inner_pointer)) {
- return nullptr;
- }
-
- // Iterate through the page until we reach the end or find an object starting
- // after the inner pointer.
- Page* page = Page::FromAddress(inner_pointer);
-
- DCHECK_EQ(page->owner(), heap->code_space());
- heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
- page);
-
- Address addr = page->skip_list()->StartFor(inner_pointer);
-
- Address top = heap->code_space()->top();
- Address limit = heap->code_space()->limit();
-
- while (true) {
- if (addr == top && addr != limit) {
- addr = limit;
- continue;
- }
-
- HeapObject* obj = HeapObject::FromAddress(addr);
- int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
- Address next_addr = addr + obj_size;
- if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
- addr = next_addr;
+ wasm::WasmCode* wasm_code =
+ FLAG_wasm_jit_to_native ? isolate()->wasm_code_manager()->LookupCode(pc())
+ : nullptr;
+ if (wasm_code != nullptr) {
+ DCHECK(wasm_code->kind() == wasm::WasmCode::LazyStub);
+ } else {
+ Code* code = LookupCode();
+ IteratePc(v, pc_address(), constant_pool_address(), code);
+ // Internal frames typically do not receive any arguments, hence their stack
+ // only contains tagged pointers.
+ // We are misusing the has_tagged_params flag here to tell us whether
+ // the full stack frame contains only tagged pointers or only raw values.
+ // This is used for the WasmCompileLazy builtin, where we actually pass
+ // untagged arguments and also store untagged values on the stack.
+ if (code->has_tagged_params()) IterateExpressions(v);
}
}
+// -------------------------------------------------------------------------
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
@@ -2070,13 +2100,15 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
isolate_->counters()->pc_to_code_cached()->Increment();
- DCHECK(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
+ DCHECK(entry->code ==
+ isolate_->heap()->GcSafeFindCodeForInnerPointer(inner_pointer));
} else {
// Because this code may be interrupted by a profiling signal that
// also queries the cache, we cannot update inner_pointer before the code
// has been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
- entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
+ entry->code =
+ isolate_->heap()->GcSafeFindCodeForInnerPointer(inner_pointer);
entry->safepoint_entry.Reset();
entry->inner_pointer = inner_pointer;
}
@@ -2110,17 +2142,17 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
default: UNREACHABLE();
}
#undef FRAME_TYPE_CASE
- return NULL;
+ return nullptr;
}
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) {
- ZoneList<StackFrame*> list(10, zone);
+ ZoneVector<StackFrame*> frames(zone);
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
- list.Add(frame, zone);
+ frames.push_back(frame);
}
- return list.ToVector();
+ return Vector<StackFrame*>(frames.data(), frames.size());
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index ebb7f0c3fd..a72832af06 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -9,10 +9,14 @@
#include "src/flags.h"
#include "src/handles.h"
#include "src/objects.h"
+#include "src/objects/code.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
+namespace wasm {
+class WasmCode;
+}
class AbstractCode;
class Debug;
@@ -39,9 +43,6 @@ class InnerPointerToCodeCache {
Flush();
}
- Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
- Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
-
void Flush() {
memset(&cache_[0], 0, sizeof(cache_));
}
@@ -91,6 +92,7 @@ class StackHandler BASE_EMBEDDED {
V(OPTIMIZED, OptimizedFrame) \
V(WASM_COMPILED, WasmCompiledFrame) \
V(WASM_TO_JS, WasmToJsFrame) \
+ V(WASM_TO_WASM, WasmToWasmFrame) \
V(JS_TO_WASM, JsToWasmFrame) \
V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
V(C_WASM_ENTRY, CWasmEntryFrame) \
@@ -186,7 +188,7 @@ class StackFrame BASE_EMBEDDED {
// (as an iterator usually lives on stack).
StackFrame(const StackFrame& original) {
this->state_ = original.state_;
- this->iterator_ = NULL;
+ this->iterator_ = nullptr;
this->isolate_ = original.isolate_;
}
@@ -527,15 +529,17 @@ class FrameSummary BASE_EMBEDDED {
class WasmCompiledFrameSummary : public WasmFrameSummary {
public:
- WasmCompiledFrameSummary(Isolate*, Handle<WasmInstanceObject>, Handle<Code>,
- int code_offset, bool at_to_number_conversion);
+ WasmCompiledFrameSummary(Isolate*, Handle<WasmInstanceObject>,
+ WasmCodeWrapper, int code_offset,
+ bool at_to_number_conversion);
uint32_t function_index() const;
- Handle<Code> code() const { return code_; }
+ WasmCodeWrapper code() const { return code_; }
int code_offset() const { return code_offset_; }
int byte_offset() const;
+ static int GetWasmSourcePosition(const wasm::WasmCode* code, int offset);
private:
- Handle<Code> code_;
+ WasmCodeWrapper const code_;
int code_offset_;
};
@@ -705,7 +709,6 @@ class JavaScriptFrame : public StandardFrame {
// actual passed arguments are available in an arguments adaptor
// frame below it on the stack.
inline bool has_adapted_arguments() const;
- int GetArgumentsLength() const;
// Garbage collection support.
void Iterate(RootVisitor* v) const override;
@@ -816,7 +819,7 @@ class OptimizedFrame : public JavaScriptFrame {
int LookupExceptionHandlerInTable(
int* data, HandlerTable::CatchPrediction* prediction) override;
- DeoptimizationInputData* GetDeoptimizationData(int* deopt_index) const;
+ DeoptimizationData* GetDeoptimizationData(int* deopt_index) const;
Object* receiver() const override;
@@ -895,8 +898,6 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
- static int GetLength(Address fp);
-
protected:
inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
@@ -1026,6 +1027,17 @@ class JsToWasmFrame : public StubFrame {
friend class StackFrameIteratorBase;
};
+class WasmToWasmFrame : public StubFrame {
+ public:
+ Type type() const override { return WASM_TO_WASM; }
+
+ protected:
+ inline explicit WasmToWasmFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
class CWasmEntryFrame : public StubFrame {
public:
Type type() const override { return C_WASM_ENTRY; }
@@ -1119,7 +1131,7 @@ class StackFrameIteratorBase BASE_EMBEDDED {
public:
Isolate* isolate() const { return isolate_; }
- bool done() const { return frame_ == NULL; }
+ bool done() const { return frame_ == nullptr; }
protected:
// An iterator that iterates over a given thread's stack.
@@ -1140,7 +1152,7 @@ class StackFrameIteratorBase BASE_EMBEDDED {
// Get the type-specific frame singleton in a given state.
StackFrame* SingletonFor(StackFrame::Type type, StackFrame::State* state);
- // A helper function, can return a NULL pointer.
+ // A helper function, can return a nullptr pointer.
StackFrame* SingletonFor(StackFrame::Type type);
private:
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index b5144e72db..274e09b2ea 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -170,7 +170,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
}
base::TimeDelta time_until_timeout = timeout_time - current_time;
- DCHECK(time_until_timeout.InMicroseconds() >= 0);
+ DCHECK_GE(time_until_timeout.InMicroseconds(), 0);
bool wait_for_result =
node->cond_.WaitFor(mutex_.Pointer(), time_until_timeout);
USE(wait_for_result);
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 57c8100325..fc35100a30 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -121,7 +121,7 @@ class Writer BASE_EMBEDDED {
if (delta == 0) return;
uintptr_t padding = align - delta;
Ensure(position_ += padding);
- DCHECK((position_ % align) == 0);
+ DCHECK_EQ(position_ % align, 0);
}
void WriteULEB128(uintptr_t value) {
@@ -416,8 +416,10 @@ class FullHeaderELFSection : public ELFSection {
class ELFStringTable : public ELFSection {
public:
explicit ELFStringTable(const char* name)
- : ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) {
- }
+ : ELFSection(name, TYPE_STRTAB, 1),
+ writer_(nullptr),
+ offset_(0),
+ size_(0) {}
uintptr_t Add(const char* str) {
if (*str == '\0') return 0;
@@ -435,12 +437,10 @@ class ELFStringTable : public ELFSection {
WriteString("");
}
- void DetachWriter() {
- writer_ = NULL;
- }
+ void DetachWriter() { writer_ = nullptr; }
virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
- DCHECK(writer_ == NULL);
+ DCHECK_NULL(writer_);
header->offset = offset_;
header->size = size_;
}
@@ -533,7 +533,7 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
- DCHECK(w->position() == 0);
+ DCHECK_EQ(w->position(), 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
#if V8_TARGET_ARCH_IA32
header->magic = 0xFEEDFACEu;
@@ -646,7 +646,7 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
- DCHECK(w->position() == 0);
+ DCHECK_EQ(w->position(), 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
@@ -973,7 +973,7 @@ class CodeDescription BASE_EMBEDDED {
return kind == Code::OPTIMIZED_FUNCTION;
}
- bool has_scope_info() const { return shared_info_ != NULL; }
+ bool has_scope_info() const { return shared_info_ != nullptr; }
ScopeInfo* scope_info() const {
DCHECK(has_scope_info());
@@ -993,7 +993,7 @@ class CodeDescription BASE_EMBEDDED {
}
bool has_script() {
- return shared_info_ != NULL && shared_info_->script()->IsScript();
+ return shared_info_ != nullptr && shared_info_->script()->IsScript();
}
Script* script() { return Script::cast(shared_info_->script()); }
@@ -1001,7 +1001,7 @@ class CodeDescription BASE_EMBEDDED {
bool IsLineInfoAvailable() {
return has_script() && script()->source()->IsString() &&
script()->HasValidSource() && script()->name()->IsString() &&
- lineinfo_ != NULL;
+ lineinfo_ != nullptr;
}
#if V8_TARGET_ARCH_X64
@@ -1198,11 +1198,11 @@ class DebugInfoSection : public DebugSection {
}
// See contexts.h for more information.
- DCHECK(Context::MIN_CONTEXT_SLOTS == 4);
- DCHECK(Context::CLOSURE_INDEX == 0);
- DCHECK(Context::PREVIOUS_INDEX == 1);
- DCHECK(Context::EXTENSION_INDEX == 2);
- DCHECK(Context::NATIVE_CONTEXT_INDEX == 3);
+ DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, 4);
+ DCHECK_EQ(Context::CLOSURE_INDEX, 0);
+ DCHECK_EQ(Context::PREVIOUS_INDEX, 1);
+ DCHECK_EQ(Context::EXTENSION_INDEX, 2);
+ DCHECK_EQ(Context::NATIVE_CONTEXT_INDEX, 3);
w->WriteULEB128(current_abbreviation++);
w->WriteString(".closure");
w->WriteULEB128(current_abbreviation++);
@@ -1684,7 +1684,7 @@ void UnwindInfoSection::WriteLength(Writer* w,
}
}
- DCHECK((w->position() - initial_position) % kPointerSize == 0);
+ DCHECK_EQ((w->position() - initial_position) % kPointerSize, 0);
length_slot->set(static_cast<uint32_t>(w->position() - initial_position));
}
@@ -1897,7 +1897,7 @@ static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
entry->symfile_size_ = symfile_size;
MemCopy(entry->symfile_addr_, symfile_addr, symfile_size);
- entry->prev_ = entry->next_ = NULL;
+ entry->prev_ = entry->next_ = nullptr;
return entry;
}
@@ -1910,7 +1910,7 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
static void RegisterCodeEntry(JITCodeEntry* entry) {
entry->next_ = __jit_debug_descriptor.first_entry_;
- if (entry->next_ != NULL) entry->next_->prev_ = entry;
+ if (entry->next_ != nullptr) entry->next_->prev_ = entry;
__jit_debug_descriptor.first_entry_ =
__jit_debug_descriptor.relevant_entry_ = entry;
@@ -1920,13 +1920,13 @@ static void RegisterCodeEntry(JITCodeEntry* entry) {
static void UnregisterCodeEntry(JITCodeEntry* entry) {
- if (entry->prev_ != NULL) {
+ if (entry->prev_ != nullptr) {
entry->prev_->next_ = entry->next_;
} else {
__jit_debug_descriptor.first_entry_ = entry->next_;
}
- if (entry->next_ != NULL) {
+ if (entry->next_ != nullptr) {
entry->next_->prev_ = entry->prev_;
}
@@ -1984,7 +1984,7 @@ struct SplayTreeConfig {
typedef AddressRange Key;
typedef JITCodeEntry* Value;
static const AddressRange kNoKey;
- static Value NoValue() { return NULL; }
+ static Value NoValue() { return nullptr; }
static int Compare(const AddressRange& a, const AddressRange& b) {
// ptrdiff_t probably doesn't fit in an int.
if (a.start < b.start) return -1;
@@ -1997,8 +1997,8 @@ const AddressRange SplayTreeConfig::kNoKey = {0, 0};
typedef SplayTree<SplayTreeConfig> CodeMap;
static CodeMap* GetCodeMap() {
- static CodeMap* code_map = NULL;
- if (code_map == NULL) code_map = new CodeMap();
+ static CodeMap* code_map = nullptr;
+ if (code_map == nullptr) code_map = new CodeMap();
return code_map;
}
@@ -2010,8 +2010,8 @@ static uint32_t HashCodeAddress(Address addr) {
}
static base::HashMap* GetLineMap() {
- static base::HashMap* line_map = NULL;
- if (line_map == NULL) {
+ static base::HashMap* line_map = nullptr;
+ if (line_map == nullptr) {
line_map = new base::HashMap();
}
return line_map;
@@ -2022,7 +2022,7 @@ static void PutLineInfo(Address addr, LineInfo* info) {
base::HashMap* line_map = GetLineMap();
base::HashMap::Entry* e =
line_map->LookupOrInsert(addr, HashCodeAddress(addr));
- if (e->value != NULL) delete static_cast<LineInfo*>(e->value);
+ if (e->value != nullptr) delete static_cast<LineInfo*>(e->value);
e->value = info;
}
@@ -2115,7 +2115,7 @@ static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
char file_name[64];
SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "/tmp/elfdump%s%d.o",
- (name_hint != NULL) ? name_hint : "", file_num++);
+ (name_hint != nullptr) ? name_hint : "", file_num++);
WriteBytes(file_name, entry->symfile_addr_,
static_cast<int>(entry->symfile_size_));
}
@@ -2152,15 +2152,15 @@ static void AddCode(const char* name, Code* code, SharedFunctionInfo* shared,
delete lineinfo;
- const char* name_hint = NULL;
+ const char* name_hint = nullptr;
bool should_dump = false;
if (FLAG_gdbjit_dump) {
if (strlen(FLAG_gdbjit_dump_filter) == 0) {
name_hint = name;
should_dump = true;
- } else if (name != NULL) {
+ } else if (name != nullptr) {
name_hint = strstr(name, FLAG_gdbjit_dump_filter);
- should_dump = (name_hint != NULL);
+ should_dump = (name_hint != nullptr);
}
}
AddJITCodeEntry(code_map, range, entry, should_dump, name_hint);
@@ -2179,8 +2179,9 @@ void EventHandler(const v8::JitCodeEvent* event) {
StringBuilder builder(buffer.start(), buffer.length());
builder.AddSubstring(event->name.str, static_cast<int>(event->name.len));
// It's called UnboundScript in the API but it's a SharedFunctionInfo.
- SharedFunctionInfo* shared =
- event->script.IsEmpty() ? NULL : *Utils::OpenHandle(*event->script);
+ SharedFunctionInfo* shared = event->script.IsEmpty()
+ ? nullptr
+ : *Utils::OpenHandle(*event->script);
AddCode(builder.Finalize(), code, shared, lineinfo);
break;
}
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 9ae13d59f4..c101877a6f 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -29,20 +29,18 @@ class GlobalHandles::Node {
// Maps handle location (slot) to the containing node.
static Node* FromLocation(Object** location) {
- DCHECK(offsetof(Node, object_) == 0);
+ DCHECK_EQ(offsetof(Node, object_), 0);
return reinterpret_cast<Node*>(location);
}
Node() {
- DCHECK(offsetof(Node, class_id_) == Internals::kNodeClassIdOffset);
- DCHECK(offsetof(Node, flags_) == Internals::kNodeFlagsOffset);
+ DCHECK_EQ(offsetof(Node, class_id_), Internals::kNodeClassIdOffset);
+ DCHECK_EQ(offsetof(Node, flags_), Internals::kNodeFlagsOffset);
STATIC_ASSERT(static_cast<int>(NodeState::kMask) ==
Internals::kNodeStateMask);
STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
STATIC_ASSERT(PENDING == Internals::kNodeStateIsPendingValue);
STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue);
- STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
- Internals::kNodeIsIndependentShift);
STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
Internals::kNodeIsActiveShift);
}
@@ -54,11 +52,10 @@ class GlobalHandles::Node {
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
- set_independent(false);
set_active(false);
set_in_new_space_list(false);
- parameter_or_next_free_.next_free = NULL;
- weak_callback_ = NULL;
+ parameter_or_next_free_.next_free = nullptr;
+ weak_callback_ = nullptr;
}
#endif
@@ -76,11 +73,10 @@ class GlobalHandles::Node {
DCHECK(state() == FREE);
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- set_independent(false);
set_active(false);
set_state(NORMAL);
- parameter_or_next_free_.parameter = NULL;
- weak_callback_ = NULL;
+ parameter_or_next_free_.parameter = nullptr;
+ weak_callback_ = nullptr;
IncreaseBlockUses();
}
@@ -96,9 +92,8 @@ class GlobalHandles::Node {
// Zap the values for eager trapping.
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- set_independent(false);
set_active(false);
- weak_callback_ = NULL;
+ weak_callback_ = nullptr;
DecreaseBlockUses();
}
@@ -123,13 +118,6 @@ class GlobalHandles::Node {
flags_ = NodeState::update(flags_, state);
}
- bool is_independent() {
- return IsIndependent::decode(flags_);
- }
- void set_independent(bool v) {
- flags_ = IsIndependent::update(flags_, v);
- }
-
bool is_active() {
return IsActive::decode(flags_);
}
@@ -194,12 +182,6 @@ class GlobalHandles::Node {
set_state(PENDING);
}
- // Independent flag accessors.
- void MarkIndependent() {
- DCHECK(IsInUse());
- set_independent(true);
- }
-
// Callback parameter accessors.
void set_parameter(void* parameter) {
DCHECK(IsInUse());
@@ -223,7 +205,7 @@ class GlobalHandles::Node {
void MakeWeak(void* parameter,
WeakCallbackInfo<void>::Callback phantom_callback,
v8::WeakCallbackType type) {
- DCHECK(phantom_callback != nullptr);
+ DCHECK_NOT_NULL(phantom_callback);
DCHECK(IsInUse());
CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
set_state(WEAK);
@@ -255,7 +237,7 @@ class GlobalHandles::Node {
DCHECK(IsInUse());
void* p = parameter();
set_state(NORMAL);
- set_parameter(NULL);
+ set_parameter(nullptr);
return p;
}
@@ -265,7 +247,7 @@ class GlobalHandles::Node {
DCHECK(weakness_type() == PHANTOM_WEAK ||
weakness_type() == PHANTOM_WEAK_2_EMBEDDER_FIELDS);
DCHECK(state() == PENDING);
- DCHECK(weak_callback_ != nullptr);
+ DCHECK_NOT_NULL(weak_callback_);
void* embedder_fields[v8::kEmbedderFieldsInWeakCallback] = {nullptr,
nullptr};
@@ -293,7 +275,7 @@ class GlobalHandles::Node {
void ResetPhantomHandle() {
DCHECK(weakness_type() == PHANTOM_WEAK_RESET_HANDLE);
DCHECK(state() == PENDING);
- DCHECK(weak_callback_ == nullptr);
+ DCHECK_NULL(weak_callback_);
Object*** handle = reinterpret_cast<Object***>(parameter());
*handle = nullptr;
Release();
@@ -302,7 +284,7 @@ class GlobalHandles::Node {
bool PostGarbageCollectionProcessing(Isolate* isolate) {
// Handles only weak handles (not phantom) that are dying.
if (state() != Node::PENDING) return false;
- if (weak_callback_ == NULL) {
+ if (weak_callback_ == nullptr) {
Release();
return false;
}
@@ -311,9 +293,9 @@ class GlobalHandles::Node {
// Check that we are not passing a finalized external string to
// the callback.
DCHECK(!object_->IsExternalOneByteString() ||
- ExternalOneByteString::cast(object_)->resource() != NULL);
+ ExternalOneByteString::cast(object_)->resource() != nullptr);
DCHECK(!object_->IsExternalTwoByteString() ||
- ExternalTwoByteString::cast(object_)->resource() != NULL);
+ ExternalTwoByteString::cast(object_)->resource() != nullptr);
if (weakness_type() != FINALIZER_WEAK) {
return false;
}
@@ -344,7 +326,7 @@ class GlobalHandles::Node {
// Placed first to avoid offset computation.
Object* object_;
- // Next word stores class_id, index, state, and independent.
+ // Next word stores class_id, index, and state.
// Note: the most aligned fields should go first.
// Wrapper class ID.
@@ -353,10 +335,7 @@ class GlobalHandles::Node {
// Index in the containing handle block.
uint8_t index_;
- // This stores three flags (independent, partially_dependent and
- // in_new_space_list) and a State.
class NodeState : public BitField<State, 0, 3> {};
- class IsIndependent : public BitField<bool, 3, 1> {};
// The following two fields are mutually exclusive
class IsActive : public BitField<bool, 4, 1> {};
class IsInNewSpaceList : public BitField<bool, 5, 1> {};
@@ -385,8 +364,8 @@ class GlobalHandles::NodeBlock {
explicit NodeBlock(GlobalHandles* global_handles, NodeBlock* next)
: next_(next),
used_nodes_(0),
- next_used_(NULL),
- prev_used_(NULL),
+ next_used_(nullptr),
+ prev_used_(nullptr),
global_handles_(global_handles) {}
void PutNodesOnFreeList(Node** first_free) {
@@ -401,22 +380,22 @@ class GlobalHandles::NodeBlock {
}
void IncreaseUses() {
- DCHECK(used_nodes_ < kSize);
+ DCHECK_LT(used_nodes_, kSize);
if (used_nodes_++ == 0) {
NodeBlock* old_first = global_handles_->first_used_block_;
global_handles_->first_used_block_ = this;
next_used_ = old_first;
- prev_used_ = NULL;
- if (old_first == NULL) return;
+ prev_used_ = nullptr;
+ if (old_first == nullptr) return;
old_first->prev_used_ = this;
}
}
void DecreaseUses() {
- DCHECK(used_nodes_ > 0);
+ DCHECK_GT(used_nodes_, 0);
if (--used_nodes_ == 0) {
- if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
- if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
+ if (next_used_ != nullptr) next_used_->prev_used_ = prev_used_;
+ if (prev_used_ != nullptr) prev_used_->next_used_ = next_used_;
if (this == global_handles_->first_used_block_) {
global_handles_->first_used_block_ = next_used_;
}
@@ -482,7 +461,7 @@ class GlobalHandles::NodeIterator {
: block_(global_handles->first_used_block_),
index_(0) {}
- bool done() const { return block_ == NULL; }
+ bool done() const { return block_ == nullptr; }
Node* node() const {
DCHECK(!done());
@@ -536,29 +515,29 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
number_of_global_handles_(0),
- first_block_(NULL),
- first_used_block_(NULL),
- first_free_(NULL),
+ first_block_(nullptr),
+ first_used_block_(nullptr),
+ first_free_(nullptr),
post_gc_processing_count_(0),
number_of_phantom_handle_resets_(0) {}
GlobalHandles::~GlobalHandles() {
NodeBlock* block = first_block_;
- while (block != NULL) {
+ while (block != nullptr) {
NodeBlock* tmp = block->next();
delete block;
block = tmp;
}
- first_block_ = NULL;
+ first_block_ = nullptr;
}
Handle<Object> GlobalHandles::Create(Object* value) {
- if (first_free_ == NULL) {
+ if (first_free_ == nullptr) {
first_block_ = new NodeBlock(this, first_block_);
first_block_->PutNodesOnFreeList(&first_free_);
}
- DCHECK(first_free_ != NULL);
+ DCHECK_NOT_NULL(first_free_);
// Take the first node in the free list.
Node* result = first_free_;
first_free_ = result->next_free();
@@ -573,13 +552,13 @@ Handle<Object> GlobalHandles::Create(Object* value) {
Handle<Object> GlobalHandles::CopyGlobal(Object** location) {
- DCHECK(location != NULL);
+ DCHECK_NOT_NULL(location);
return Node::FromLocation(location)->GetGlobalHandles()->Create(*location);
}
void GlobalHandles::Destroy(Object** location) {
- if (location != NULL) Node::FromLocation(location)->Release();
+ if (location != nullptr) Node::FromLocation(location)->Release();
}
@@ -600,16 +579,6 @@ void* GlobalHandles::ClearWeakness(Object** location) {
return Node::FromLocation(location)->ClearWeakness();
}
-
-void GlobalHandles::MarkIndependent(Object** location) {
- Node::FromLocation(location)->MarkIndependent();
-}
-
-bool GlobalHandles::IsIndependent(Object** location) {
- return Node::FromLocation(location)->is_independent();
-}
-
-
bool GlobalHandles::IsNearDeath(Object** location) {
return Node::FromLocation(location)->IsNearDeath();
}
@@ -665,8 +634,7 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback should_reset_handle) {
void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
for (Node* node : new_space_nodes_) {
if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- node->is_active())) {
+ (node->IsWeakRetainer() && node->is_active())) {
v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
}
@@ -680,8 +648,7 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
node->set_active(true);
}
if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- node->is_active())) {
+ (node->IsWeakRetainer() && node->is_active())) {
v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
}
@@ -696,31 +663,55 @@ void GlobalHandles::IdentifyWeakUnmodifiedObjects(
}
}
-
void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
- WeakSlotCallbackWithHeap is_unscavenged) {
+ WeakSlotCallbackWithHeap is_dead) {
+ for (Node* node : new_space_nodes_) {
+ DCHECK(node->is_in_new_space_list());
+ if (node->IsWeak() && is_dead(isolate_->heap(), node->location())) {
+ DCHECK(!node->is_active());
+ if (!node->IsPhantomCallback() && !node->IsPhantomResetHandle()) {
+ node->MarkPending();
+ }
+ }
+ }
+}
+
+void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
+ RootVisitor* v) {
for (Node* node : new_space_nodes_) {
DCHECK(node->is_in_new_space_list());
- if ((node->is_independent() || !node->is_active()) && node->IsWeak() &&
- is_unscavenged(isolate_->heap(), node->location())) {
- node->MarkPending();
+ if (!node->is_active() && node->IsWeakRetainer() &&
+ (node->state() == Node::PENDING)) {
+ DCHECK(!node->IsPhantomCallback());
+ DCHECK(!node->IsPhantomResetHandle());
+ // Finalizers need to survive.
+ v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
}
}
-void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(RootVisitor* v) {
+void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ RootVisitor* v, WeakSlotCallbackWithHeap should_reset_handle) {
for (Node* node : new_space_nodes_) {
DCHECK(node->is_in_new_space_list());
- if ((node->is_independent() || !node->is_active()) &&
- node->IsWeakRetainer()) {
- // Pending weak phantom handles die immediately. Everything else survives.
- if (node->IsPendingPhantomResetHandle()) {
- node->ResetPhantomHandle();
- ++number_of_phantom_handle_resets_;
- } else if (node->IsPendingPhantomCallback()) {
- node->CollectPhantomCallbackData(isolate(),
- &pending_phantom_callbacks_);
+ if (!node->is_active() && node->IsWeakRetainer() &&
+ (node->state() != Node::PENDING)) {
+ DCHECK(node->IsPhantomResetHandle() || node->IsPhantomCallback());
+ if (should_reset_handle(isolate_->heap(), node->location())) {
+ if (node->IsPhantomResetHandle()) {
+ node->MarkPending();
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
+
+ } else if (node->IsPhantomCallback()) {
+ node->MarkPending();
+ node->CollectPhantomCallbackData(isolate(),
+ &pending_phantom_callbacks_);
+ } else {
+ UNREACHABLE();
+ }
} else {
+ // Node survived and needs to be visited.
v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
}
@@ -732,7 +723,7 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacks(
while (!callbacks->empty()) {
auto callback = callbacks->back();
callbacks->pop_back();
- DCHECK(callback.node() == nullptr);
+ DCHECK_NULL(callback.node());
// Fire second pass callback
callback.Invoke(isolate);
}
@@ -749,15 +740,12 @@ int GlobalHandles::PostScavengeProcessing(
// the freed_nodes.
continue;
}
- // Skip dependent or unmodified handles. Their weak callbacks might expect
- // to be
- // called between two global garbage collection callbacks which
- // are not called for minor collections.
- if (!node->is_independent() && (node->is_active())) {
- node->set_active(false);
- continue;
- }
+
+ // Active nodes are kept alive, so no further processing is requires.
+ if (node->is_active()) {
node->set_active(false);
+ continue;
+ }
if (node->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
@@ -768,6 +756,7 @@ int GlobalHandles::PostScavengeProcessing(
return freed_nodes;
}
}
+
if (!node->IsRetainer()) {
freed_nodes++;
}
@@ -1064,7 +1053,7 @@ EternalHandles::~EternalHandles() {
void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
int limit = size_;
for (Object** block : blocks_) {
- DCHECK(limit > 0);
+ DCHECK_GT(limit, 0);
visitor->VisitRootPointers(Root::kEternalHandles, block,
block + Min(limit, kSize));
limit -= kSize;
@@ -1092,7 +1081,7 @@ void EternalHandles::PostGarbageCollectionProcessing(Heap* heap) {
void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
DCHECK_EQ(kInvalidIndex, *index);
- if (object == NULL) return;
+ if (object == nullptr) return;
DCHECK_NE(isolate->heap()->the_hole_value(), object);
int block = size_ >> kShift;
int offset = size_ & kMask;
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index b5c3b2191d..59b94e371b 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -97,11 +97,6 @@ class GlobalHandles {
// Clear the weakness of a global handle.
static void* ClearWeakness(Object** location);
- // Mark the reference to this object independent.
- static void MarkIndependent(Object** location);
-
- static bool IsIndependent(Object** location);
-
// Tells whether global handle is near death.
static bool IsNearDeath(Object** location);
@@ -154,14 +149,14 @@ class GlobalHandles {
void IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
RootVisitor* v, size_t start, size_t end);
- // Finds weak independent or unmodified handles satisfying
- // the callback predicate and marks them as pending. See the note above.
+ // Marks weak unmodified handles satisfying |is_dead| as pending.
void MarkNewSpaceWeakUnmodifiedObjectsPending(
- WeakSlotCallbackWithHeap is_unscavenged);
+ WeakSlotCallbackWithHeap is_dead);
- // Iterates over weak independent or unmodified handles.
- // See the note above.
- void IterateNewSpaceWeakUnmodifiedRoots(RootVisitor* v);
+ // Iterates over weak unmodified handles. See the note above.
+ void IterateNewSpaceWeakUnmodifiedRootsForFinalizers(RootVisitor* v);
+ void IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ RootVisitor* v, WeakSlotCallbackWithHeap should_reset_handle);
// Identify unmodified objects that are in weak state and marks them
// unmodified
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index c98f55f02f..8f5253016f 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -8,6 +8,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <limits>
#include <ostream>
#include "src/base/build_config.h"
@@ -41,21 +42,7 @@
#endif // V8_OS_WIN
-// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
-// warning flag and certain versions of GCC due to a bug:
-// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
-// For now, we use the more involved template-based version from <limits>, but
-// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
-#if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
-# include <limits> // NOLINT
-# define V8_INFINITY std::numeric_limits<double>::infinity()
-#elif V8_LIBC_MSVCRT
-# define V8_INFINITY HUGE_VAL
-#elif V8_OS_AIX
-#define V8_INFINITY (__builtin_inff())
-#else
-# define V8_INFINITY INFINITY
-#endif
+#define V8_INFINITY std::numeric_limits<double>::infinity()
namespace v8 {
@@ -161,8 +148,10 @@ const int kMinUInt16 = 0;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
const int kMinUInt32 = 0;
+const int kUInt8Size = sizeof(uint8_t);
const int kCharSize = sizeof(char);
const int kShortSize = sizeof(short); // NOLINT
+const int kUInt16Size = sizeof(uint16_t);
const int kIntSize = sizeof(int);
const int kInt32Size = sizeof(int32_t);
const int kInt64Size = sizeof(int64_t);
@@ -188,6 +177,7 @@ const int kElidedFrameSlots = 0;
#endif
const int kDoubleSizeLog2 = 3;
+const size_t kMaxWasmCodeMemory = 256 * MB;
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
@@ -314,45 +304,53 @@ F FUNCTION_CAST(Address addr) {
// -----------------------------------------------------------------------------
-// Forward declarations for frequently used classes
-// (sorted alphabetically)
-
-class FreeStoreAllocationPolicy;
-template <typename T, class P = FreeStoreAllocationPolicy> class List;
-
-// -----------------------------------------------------------------------------
// Declarations for use in both the preparser and the rest of V8.
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-enum LanguageMode : uint32_t { SLOPPY, STRICT, LANGUAGE_END };
+enum class LanguageMode : bool { kSloppy, kStrict };
+static const size_t LanguageModeSize = 2;
+
+inline size_t hash_value(LanguageMode mode) {
+ return static_cast<size_t>(mode);
+}
inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
switch (mode) {
- case SLOPPY: return os << "sloppy";
- case STRICT: return os << "strict";
- case LANGUAGE_END:
- UNREACHABLE();
+ case LanguageMode::kSloppy:
+ return os << "sloppy";
+ case LanguageMode::kStrict:
+ return os << "strict";
}
UNREACHABLE();
}
inline bool is_sloppy(LanguageMode language_mode) {
- return language_mode == SLOPPY;
+ return language_mode == LanguageMode::kSloppy;
}
inline bool is_strict(LanguageMode language_mode) {
- return language_mode != SLOPPY;
+ return language_mode != LanguageMode::kSloppy;
}
inline bool is_valid_language_mode(int language_mode) {
- return language_mode == SLOPPY || language_mode == STRICT;
+ return language_mode == static_cast<int>(LanguageMode::kSloppy) ||
+ language_mode == static_cast<int>(LanguageMode::kStrict);
}
inline LanguageMode construct_language_mode(bool strict_bit) {
return static_cast<LanguageMode>(strict_bit);
}
+// Return kStrict if either of the language modes is kStrict, or kSloppy
+// otherwise.
+inline LanguageMode stricter_language_mode(LanguageMode mode1,
+ LanguageMode mode2) {
+ STATIC_ASSERT(LanguageModeSize == 2);
+ return static_cast<LanguageMode>(static_cast<int>(mode1) |
+ static_cast<int>(mode2));
+}
+
enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// This constant is used as an undefined value when passing source positions.
@@ -466,11 +464,9 @@ const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
// Forward declarations for frequently used classes
class AccessorInfo;
-class Allocation;
class Arguments;
class Assembler;
class Code;
-class CodeGenerator;
class CodeStub;
class Context;
class Debug;
@@ -480,10 +476,10 @@ class DescriptorArray;
class TransitionArray;
class ExternalReference;
class FixedArray;
+class FreeStoreAllocationPolicy;
class FunctionTemplateInfo;
class MemoryChunk;
-class SeededNumberDictionary;
-class UnseededNumberDictionary;
+class NumberDictionary;
class NameDictionary;
class GlobalDictionary;
template <typename T> class MaybeHandle;
@@ -623,6 +619,8 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+enum Movability { kMovable, kImmovable };
+
enum VisitMode {
VISIT_ALL,
VISIT_ALL_IN_MINOR_MC_MARK,
@@ -631,7 +629,6 @@ enum VisitMode {
VISIT_ALL_IN_SWEEP_NEWSPACE,
VISIT_ONLY_STRONG,
VISIT_ONLY_STRONG_FOR_SERIALIZATION,
- VISIT_ONLY_STRONG_ROOT_LIST,
};
// Flag indicating whether code is built into the VM (one of the natives files).
@@ -713,6 +710,8 @@ enum WhereToStart { kStartAtReceiver, kStartAtPrototype };
enum ResultSentinel { kNotFound = -1, kUnsupported = -2 };
+enum ShouldThrow { kThrowOnError, kDontThrow };
+
// The Store Buffer (GC).
typedef enum {
kStoreBufferFullEvent,
@@ -1299,6 +1298,34 @@ class CompareOperationFeedback {
};
};
+enum class Operation {
+ // Binary operations.
+ kAdd,
+ kSubtract,
+ kMultiply,
+ kDivide,
+ kModulus,
+ kExponentiate,
+ kBitwiseAnd,
+ kBitwiseOr,
+ kBitwiseXor,
+ kShiftLeft,
+ kShiftRight,
+ kShiftRightLogical,
+ // Unary operations.
+ kBitwiseNot,
+ kNegate,
+ kIncrement,
+ kDecrement,
+ // Compare operations.
+ kEqual,
+ kStrictEqual,
+ kLessThan,
+ kLessThanOrEqual,
+ kGreaterThan,
+ kGreaterThanOrEqual,
+};
+
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
@@ -1416,18 +1443,18 @@ inline std::ostream& operator<<(std::ostream& os,
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
-#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
- C(Handler, handler) \
- C(CEntryFP, c_entry_fp) \
- C(CFunction, c_function) \
- C(Context, context) \
- C(PendingException, pending_exception) \
- C(PendingHandlerContext, pending_handler_context) \
- C(PendingHandlerCode, pending_handler_code) \
- C(PendingHandlerOffset, pending_handler_offset) \
- C(PendingHandlerFP, pending_handler_fp) \
- C(PendingHandlerSP, pending_handler_sp) \
- C(ExternalCaughtException, external_caught_exception) \
+#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
+ C(Handler, handler) \
+ C(CEntryFP, c_entry_fp) \
+ C(CFunction, c_function) \
+ C(Context, context) \
+ C(PendingException, pending_exception) \
+ C(PendingHandlerContext, pending_handler_context) \
+ C(PendingHandlerEntrypoint, pending_handler_entrypoint) \
+ C(PendingHandlerConstantPool, pending_handler_constant_pool) \
+ C(PendingHandlerFP, pending_handler_fp) \
+ C(PendingHandlerSP, pending_handler_sp) \
+ C(ExternalCaughtException, external_caught_exception) \
C(JSEntrySP, js_entry_sp)
enum IsolateAddressId {
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 1d3f922489..df99f8652d 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -50,8 +50,8 @@ HandleScope::~HandleScope() {
int before = NumberOfHandles(isolate_);
CloseScope(isolate_, prev_next_, prev_limit_);
int after = NumberOfHandles(isolate_);
- DCHECK(after - before < kCheckHandleThreshold);
- DCHECK(before < kCheckHandleThreshold);
+ DCHECK_LT(after - before, kCheckHandleThreshold);
+ DCHECK_LT(before, kCheckHandleThreshold);
} else {
#endif // DEBUG
CloseScope(isolate_, prev_next_, prev_limit_);
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index d7b35673b6..7f403bcdb5 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -72,7 +72,7 @@ Object** HandleScope::Extend(Isolate* isolate) {
if (!Utils::ApiCheck(current->level != current->sealed_level,
"v8::HandleScope::CreateHandle()",
"Cannot create a handle without a HandleScope")) {
- return NULL;
+ return nullptr;
}
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
// If there's more room in the last block, we use that. This is used
@@ -81,7 +81,7 @@ Object** HandleScope::Extend(Isolate* isolate) {
Object** limit = &impl->blocks()->back()[kHandleBlockSize];
if (current->limit != limit) {
current->limit = limit;
- DCHECK(limit - current->next < kHandleBlockSize);
+ DCHECK_LT(limit - current->next, kHandleBlockSize);
}
}
@@ -108,7 +108,7 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
#ifdef ENABLE_HANDLE_ZAPPING
void HandleScope::ZapRange(Object** start, Object** end) {
- DCHECK(end - start <= kHandleBlockSize);
+ DCHECK_LE(end - start, kHandleBlockSize);
for (Object** p = start; p != end; p++) {
*reinterpret_cast<Address*>(p) = kHandleZapValue;
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index e31a77be41..1d7a79533b 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -23,7 +23,6 @@ class HandleScopeImplementer;
class Isolate;
class Object;
-
// ----------------------------------------------------------------------------
// Base class for Handle instantiations. Don't use directly.
class HandleBase {
@@ -39,7 +38,7 @@ class HandleBase {
(that.location_ == nullptr ||
that.IsDereferenceAllowed(NO_DEFERRED_CHECK)));
if (this->location_ == that.location_) return true;
- if (this->location_ == NULL || that.location_ == NULL) return false;
+ if (this->location_ == nullptr || that.location_ == nullptr) return false;
return *this->location_ == *that.location_;
}
@@ -94,7 +93,8 @@ class Handle final : public HandleBase {
V8_INLINE explicit Handle(T** location = nullptr)
: HandleBase(reinterpret_cast<Object**>(location)) {
// Type check:
- static_assert(std::is_base_of<Object, T>::value, "static type violation");
+ static_assert(std::is_convertible<T*, Object*>::value,
+ "static type violation");
}
V8_INLINE explicit Handle(T* object);
@@ -105,11 +105,9 @@ class Handle final : public HandleBase {
// Constructor for handling automatic up casting.
// Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
- template <typename S>
- V8_INLINE Handle(Handle<S> handle) : HandleBase(handle) {
- // Type check:
- static_assert(std::is_base_of<T, S>::value, "static type violation");
- }
+ template <typename S, typename = typename std::enable_if<
+ std::is_convertible<S*, T*>::value>::type>
+ V8_INLINE Handle(Handle<S> handle) : HandleBase(handle) {}
V8_INLINE T* operator->() const { return operator*(); }
@@ -172,11 +170,10 @@ V8_INLINE Handle<T> handle(T* object) {
return Handle<T>(object);
}
-
// ----------------------------------------------------------------------------
// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
-// into a Handle requires checking that it does not point to NULL. This
-// ensures NULL checks before use.
+// into a Handle requires checking that it does not point to nullptr. This
+// ensures nullptr checks before use.
//
// Also note that Handles do not provide default equality comparison or hashing
// operators on purpose. Such operators would be misleading, because intended
@@ -188,24 +185,19 @@ class MaybeHandle final {
// Constructor for handling automatic up casting from Handle.
// Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
- template <typename S>
+ template <typename S, typename = typename std::enable_if<
+ std::is_convertible<S*, T*>::value>::type>
V8_INLINE MaybeHandle(Handle<S> handle)
- : location_(reinterpret_cast<T**>(handle.location_)) {
- // Type check:
- static_assert(std::is_base_of<T, S>::value, "static type violation");
- }
+ : location_(reinterpret_cast<T**>(handle.location_)) {}
// Constructor for handling automatic up casting.
// Ex. MaybeHandle<JSArray> can be passed when Handle<Object> is expected.
- template <typename S>
+ template <typename S, typename = typename std::enable_if<
+ std::is_convertible<S*, T*>::value>::type>
V8_INLINE MaybeHandle(MaybeHandle<S> maybe_handle)
- : location_(reinterpret_cast<T**>(maybe_handle.location_)) {
- // Type check:
- static_assert(std::is_base_of<T, S>::value, "static type violation");
- }
+ : location_(reinterpret_cast<T**>(maybe_handle.location_)) {}
- template <typename S>
- V8_INLINE MaybeHandle(S* object, Isolate* isolate)
+ V8_INLINE MaybeHandle(T* object, Isolate* isolate)
: MaybeHandle(handle(object, isolate)) {}
V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
@@ -425,9 +417,9 @@ struct HandleScopeData final {
CanonicalHandleScope* canonical_scope;
void Initialize() {
- next = limit = NULL;
+ next = limit = nullptr;
sealed_level = level = 0;
- canonical_scope = NULL;
+ canonical_scope = nullptr;
}
};
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index ac308ebad7..62dc9007ad 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -5,204 +5,203 @@
#ifndef V8_HEAP_SYMBOLS_H_
#define V8_HEAP_SYMBOLS_H_
-#define INTERNALIZED_STRING_LIST(V) \
- V(anonymous_function_string, "(anonymous function)") \
- V(anonymous_string, "anonymous") \
- V(add_string, "add") \
- V(apply_string, "apply") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(arguments_to_string, "[object Arguments]") \
- V(Array_string, "Array") \
- V(ArrayIterator_string, "Array Iterator") \
- V(assign_string, "assign") \
- V(async_string, "async") \
- V(await_string, "await") \
- V(array_to_string, "[object Array]") \
- V(boolean_to_string, "[object Boolean]") \
- V(date_to_string, "[object Date]") \
- V(error_to_string, "[object Error]") \
- V(function_to_string, "[object Function]") \
- V(number_to_string, "[object Number]") \
- V(object_to_string, "[object Object]") \
- V(regexp_to_string, "[object RegExp]") \
- V(string_to_string, "[object String]") \
- V(bigint_string, "bigint") \
- V(bind_string, "bind") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(bound__string, "bound ") \
- V(buffer_string, "buffer") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(call_string, "call") \
- V(callee_string, "callee") \
- V(caller_string, "caller") \
- V(cell_value_string, "%cell_value") \
- V(char_at_string, "CharAt") \
- V(closure_string, "(closure)") \
- V(column_string, "column") \
- V(configurable_string, "configurable") \
- V(constructor_string, "constructor") \
- V(construct_string, "construct") \
- V(create_string, "create") \
- V(currency_string, "currency") \
- V(Date_string, "Date") \
- V(dayperiod_string, "dayperiod") \
- V(day_string, "day") \
- V(decimal_string, "decimal") \
- V(default_string, "default") \
- V(defineProperty_string, "defineProperty") \
- V(deleteProperty_string, "deleteProperty") \
- V(did_handle_string, "didHandle") \
- V(display_name_string, "displayName") \
- V(done_string, "done") \
- V(dotAll_string, "dotAll") \
- V(dot_catch_string, ".catch") \
- V(dot_for_string, ".for") \
- V(dot_generator_object_string, ".generator_object") \
- V(dot_iterator_string, ".iterator") \
- V(dot_result_string, ".result") \
- V(dot_switch_tag_string, ".switch_tag") \
- V(dot_string, ".") \
- V(exec_string, "exec") \
- V(entries_string, "entries") \
- V(enqueue_string, "enqueue") \
- V(enumerable_string, "enumerable") \
- V(era_string, "era") \
- V(Error_string, "Error") \
- V(eval_string, "eval") \
- V(EvalError_string, "EvalError") \
- V(false_string, "false") \
- V(flags_string, "flags") \
- V(fraction_string, "fraction") \
- V(function_string, "function") \
- V(Function_string, "Function") \
- V(Generator_string, "Generator") \
- V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
- V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
- V(getPrototypeOf_string, "getPrototypeOf") \
- V(get_string, "get") \
- V(get_space_string, "get ") \
- V(global_string, "global") \
- V(group_string, "group") \
- V(groups_string, "groups") \
- V(has_string, "has") \
- V(hour_string, "hour") \
- V(ignoreCase_string, "ignoreCase") \
- V(illegal_access_string, "illegal access") \
- V(illegal_argument_string, "illegal argument") \
- V(index_string, "index") \
- V(infinity_string, "infinity") \
- V(Infinity_string, "Infinity") \
- V(integer_string, "integer") \
- V(input_string, "input") \
- V(isExtensible_string, "isExtensible") \
- V(isView_string, "isView") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(keys_string, "keys") \
- V(lastIndex_string, "lastIndex") \
- V(length_string, "length") \
- V(let_string, "let") \
- V(line_string, "line") \
- V(literal_string, "literal") \
- V(Map_string, "Map") \
- V(message_string, "message") \
- V(minus_Infinity_string, "-Infinity") \
- V(minus_zero_string, "-0") \
- V(minusSign_string, "minusSign") \
- V(minute_string, "minute") \
- V(Module_string, "Module") \
- V(month_string, "month") \
- V(multiline_string, "multiline") \
- V(name_string, "name") \
- V(native_string, "native") \
- V(nan_string, "nan") \
- V(NaN_string, "NaN") \
- V(new_target_string, ".new.target") \
- V(next_string, "next") \
- V(NFC_string, "NFC") \
- V(NFD_string, "NFD") \
- V(NFKC_string, "NFKC") \
- V(NFKD_string, "NFKD") \
- V(not_equal, "not-equal") \
- V(null_string, "null") \
- V(null_to_string, "[object Null]") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(object_string, "object") \
- V(Object_string, "Object") \
- V(ok, "ok") \
- V(one_string, "1") \
- V(ownKeys_string, "ownKeys") \
- V(percentSign_string, "percentSign") \
- V(plusSign_string, "plusSign") \
- V(position_string, "position") \
- V(preventExtensions_string, "preventExtensions") \
- V(Promise_string, "Promise") \
- V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
- V(promise_string, "promise") \
- V(proto_string, "__proto__") \
- V(prototype_string, "prototype") \
- V(Proxy_string, "Proxy") \
- V(query_colon_string, "(?:)") \
- V(RangeError_string, "RangeError") \
- V(raw_string, "raw") \
- V(ReferenceError_string, "ReferenceError") \
- V(RegExp_string, "RegExp") \
- V(reject_string, "reject") \
- V(resolve_string, "resolve") \
- V(return_string, "return") \
- V(script_string, "script") \
- V(second_string, "second") \
- V(setPrototypeOf_string, "setPrototypeOf") \
- V(set_space_string, "set ") \
- V(set_string, "set") \
- V(Set_string, "Set") \
- V(source_mapping_url_string, "source_mapping_url") \
- V(source_string, "source") \
- V(sourceText_string, "sourceText") \
- V(source_url_string, "source_url") \
- V(stack_string, "stack") \
- V(stackTraceLimit_string, "stackTraceLimit") \
- V(star_default_star_string, "*default*") \
- V(sticky_string, "sticky") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(symbol_string, "symbol") \
- V(Symbol_string, "Symbol") \
- V(symbol_species_string, "[Symbol.species]") \
- V(SyntaxError_string, "SyntaxError") \
- V(then_string, "then") \
- V(this_function_string, ".this_function") \
- V(this_string, "this") \
- V(throw_string, "throw") \
- V(timed_out, "timed-out") \
- V(timeZoneName_string, "timeZoneName") \
- V(toJSON_string, "toJSON") \
- V(toString_string, "toString") \
- V(true_string, "true") \
- V(TypeError_string, "TypeError") \
- V(type_string, "type") \
- V(CompileError_string, "CompileError") \
- V(LinkError_string, "LinkError") \
- V(RuntimeError_string, "RuntimeError") \
- V(undefined_string, "undefined") \
- V(undefined_to_string, "[object Undefined]") \
- V(unicode_string, "unicode") \
- V(use_asm_string, "use asm") \
- V(use_strict_string, "use strict") \
- V(URIError_string, "URIError") \
- V(valueOf_string, "valueOf") \
- V(values_string, "values") \
- V(value_string, "value") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
- V(weekday_string, "weekday") \
- V(will_handle_string, "willHandle") \
- V(writable_string, "writable") \
- V(year_string, "year") \
+#define INTERNALIZED_STRING_LIST(V) \
+ V(anonymous_function_string, "(anonymous function)") \
+ V(anonymous_string, "anonymous") \
+ V(add_string, "add") \
+ V(apply_string, "apply") \
+ V(arguments_string, "arguments") \
+ V(Arguments_string, "Arguments") \
+ V(arguments_to_string, "[object Arguments]") \
+ V(Array_string, "Array") \
+ V(ArrayIterator_string, "Array Iterator") \
+ V(assign_string, "assign") \
+ V(async_string, "async") \
+ V(await_string, "await") \
+ V(array_to_string, "[object Array]") \
+ V(boolean_to_string, "[object Boolean]") \
+ V(date_to_string, "[object Date]") \
+ V(error_to_string, "[object Error]") \
+ V(function_to_string, "[object Function]") \
+ V(number_to_string, "[object Number]") \
+ V(object_to_string, "[object Object]") \
+ V(regexp_to_string, "[object RegExp]") \
+ V(string_to_string, "[object String]") \
+ V(bigint_string, "bigint") \
+ V(BigInt_string, "BigInt") \
+ V(bind_string, "bind") \
+ V(boolean_string, "boolean") \
+ V(Boolean_string, "Boolean") \
+ V(bound__string, "bound ") \
+ V(buffer_string, "buffer") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(call_string, "call") \
+ V(callee_string, "callee") \
+ V(caller_string, "caller") \
+ V(cell_value_string, "%cell_value") \
+ V(char_at_string, "CharAt") \
+ V(closure_string, "(closure)") \
+ V(column_string, "column") \
+ V(configurable_string, "configurable") \
+ V(constructor_string, "constructor") \
+ V(construct_string, "construct") \
+ V(create_string, "create") \
+ V(currency_string, "currency") \
+ V(Date_string, "Date") \
+ V(dayperiod_string, "dayperiod") \
+ V(day_string, "day") \
+ V(decimal_string, "decimal") \
+ V(default_string, "default") \
+ V(defineProperty_string, "defineProperty") \
+ V(deleteProperty_string, "deleteProperty") \
+ V(did_handle_string, "didHandle") \
+ V(display_name_string, "displayName") \
+ V(done_string, "done") \
+ V(dotAll_string, "dotAll") \
+ V(dot_catch_string, ".catch") \
+ V(dot_for_string, ".for") \
+ V(dot_generator_object_string, ".generator_object") \
+ V(dot_iterator_string, ".iterator") \
+ V(dot_result_string, ".result") \
+ V(dot_switch_tag_string, ".switch_tag") \
+ V(dot_string, ".") \
+ V(exec_string, "exec") \
+ V(entries_string, "entries") \
+ V(enqueue_string, "enqueue") \
+ V(enumerable_string, "enumerable") \
+ V(era_string, "era") \
+ V(Error_string, "Error") \
+ V(eval_string, "eval") \
+ V(EvalError_string, "EvalError") \
+ V(false_string, "false") \
+ V(flags_string, "flags") \
+ V(fraction_string, "fraction") \
+ V(function_string, "function") \
+ V(Function_string, "Function") \
+ V(Generator_string, "Generator") \
+ V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
+ V(getPrototypeOf_string, "getPrototypeOf") \
+ V(get_string, "get") \
+ V(get_space_string, "get ") \
+ V(global_string, "global") \
+ V(group_string, "group") \
+ V(groups_string, "groups") \
+ V(has_string, "has") \
+ V(hour_string, "hour") \
+ V(ignoreCase_string, "ignoreCase") \
+ V(illegal_access_string, "illegal access") \
+ V(illegal_argument_string, "illegal argument") \
+ V(index_string, "index") \
+ V(infinity_string, "infinity") \
+ V(Infinity_string, "Infinity") \
+ V(integer_string, "integer") \
+ V(input_string, "input") \
+ V(isExtensible_string, "isExtensible") \
+ V(isView_string, "isView") \
+ V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(keys_string, "keys") \
+ V(lastIndex_string, "lastIndex") \
+ V(length_string, "length") \
+ V(let_string, "let") \
+ V(line_string, "line") \
+ V(literal_string, "literal") \
+ V(Map_string, "Map") \
+ V(message_string, "message") \
+ V(minus_Infinity_string, "-Infinity") \
+ V(minus_zero_string, "-0") \
+ V(minusSign_string, "minusSign") \
+ V(minute_string, "minute") \
+ V(Module_string, "Module") \
+ V(month_string, "month") \
+ V(multiline_string, "multiline") \
+ V(name_string, "name") \
+ V(native_string, "native") \
+ V(nan_string, "nan") \
+ V(NaN_string, "NaN") \
+ V(new_target_string, ".new.target") \
+ V(next_string, "next") \
+ V(NFC_string, "NFC") \
+ V(NFD_string, "NFD") \
+ V(NFKC_string, "NFKC") \
+ V(NFKD_string, "NFKD") \
+ V(not_equal, "not-equal") \
+ V(null_string, "null") \
+ V(null_to_string, "[object Null]") \
+ V(number_string, "number") \
+ V(Number_string, "Number") \
+ V(object_string, "object") \
+ V(Object_string, "Object") \
+ V(ok, "ok") \
+ V(one_string, "1") \
+ V(ownKeys_string, "ownKeys") \
+ V(percentSign_string, "percentSign") \
+ V(plusSign_string, "plusSign") \
+ V(position_string, "position") \
+ V(preventExtensions_string, "preventExtensions") \
+ V(Promise_string, "Promise") \
+ V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
+ V(promise_string, "promise") \
+ V(proto_string, "__proto__") \
+ V(prototype_string, "prototype") \
+ V(Proxy_string, "Proxy") \
+ V(query_colon_string, "(?:)") \
+ V(RangeError_string, "RangeError") \
+ V(raw_string, "raw") \
+ V(ReferenceError_string, "ReferenceError") \
+ V(RegExp_string, "RegExp") \
+ V(reject_string, "reject") \
+ V(resolve_string, "resolve") \
+ V(return_string, "return") \
+ V(script_string, "script") \
+ V(second_string, "second") \
+ V(setPrototypeOf_string, "setPrototypeOf") \
+ V(set_space_string, "set ") \
+ V(set_string, "set") \
+ V(Set_string, "Set") \
+ V(source_string, "source") \
+ V(sourceText_string, "sourceText") \
+ V(stack_string, "stack") \
+ V(stackTraceLimit_string, "stackTraceLimit") \
+ V(star_default_star_string, "*default*") \
+ V(sticky_string, "sticky") \
+ V(string_string, "string") \
+ V(String_string, "String") \
+ V(symbol_string, "symbol") \
+ V(Symbol_string, "Symbol") \
+ V(symbol_species_string, "[Symbol.species]") \
+ V(SyntaxError_string, "SyntaxError") \
+ V(then_string, "then") \
+ V(this_function_string, ".this_function") \
+ V(this_string, "this") \
+ V(throw_string, "throw") \
+ V(timed_out, "timed-out") \
+ V(timeZoneName_string, "timeZoneName") \
+ V(toJSON_string, "toJSON") \
+ V(toString_string, "toString") \
+ V(true_string, "true") \
+ V(TypeError_string, "TypeError") \
+ V(type_string, "type") \
+ V(CompileError_string, "CompileError") \
+ V(LinkError_string, "LinkError") \
+ V(RuntimeError_string, "RuntimeError") \
+ V(undefined_string, "undefined") \
+ V(undefined_to_string, "[object Undefined]") \
+ V(unicode_string, "unicode") \
+ V(use_asm_string, "use asm") \
+ V(use_strict_string, "use strict") \
+ V(URIError_string, "URIError") \
+ V(valueOf_string, "valueOf") \
+ V(values_string, "values") \
+ V(value_string, "value") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
+ V(weekday_string, "weekday") \
+ V(will_handle_string, "willHandle") \
+ V(writable_string, "writable") \
+ V(year_string, "year") \
V(zero_string, "0")
#define PRIVATE_SYMBOL_LIST(V) \
@@ -213,8 +212,8 @@
V(call_site_frame_index_symbol) \
V(console_context_id_symbol) \
V(console_context_name_symbol) \
- V(class_end_position_symbol) \
- V(class_start_position_symbol) \
+ V(class_fields_symbol) \
+ V(class_positions_symbol) \
V(detailed_stack_trace_symbol) \
V(elements_transition_symbol) \
V(error_end_pos_symbol) \
@@ -350,6 +349,7 @@
F(MINOR_MC_SWEEPING) \
F(SCAVENGER_FAST_PROMOTE) \
F(SCAVENGER_SCAVENGE) \
+ F(SCAVENGER_PROCESS_ARRAY_BUFFERS) \
F(SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY) \
F(SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS) \
F(SCAVENGER_SCAVENGE_PARALLEL) \
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
new file mode 100644
index 0000000000..1f41ffb2eb
--- /dev/null
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -0,0 +1,58 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/array-buffer-collector.h"
+
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void ArrayBufferCollector::AddGarbageAllocations(
+ std::vector<JSArrayBuffer::Allocation>* allocations) {
+ base::LockGuard<base::Mutex> guard(&allocations_mutex_);
+ allocations_.push_back(allocations);
+}
+
+void ArrayBufferCollector::FreeAllocations() {
+ base::LockGuard<base::Mutex> guard(&allocations_mutex_);
+ for (std::vector<JSArrayBuffer::Allocation>* allocations : allocations_) {
+ for (auto alloc : *allocations) {
+ JSArrayBuffer::FreeBackingStore(heap_->isolate(), alloc);
+ }
+ delete allocations;
+ }
+ allocations_.clear();
+}
+
+class ArrayBufferCollector::FreeingTask final : public CancelableTask {
+ public:
+ explicit FreeingTask(Heap* heap)
+ : CancelableTask(heap->isolate()), heap_(heap) {}
+
+ virtual ~FreeingTask() {}
+
+ private:
+ void RunInternal() final {
+ heap_->array_buffer_collector()->FreeAllocations();
+ }
+
+ Heap* heap_;
+};
+
+void ArrayBufferCollector::FreeAllocationsOnBackgroundThread() {
+ heap_->account_external_memory_concurrently_freed();
+ if (heap_->use_tasks() && FLAG_concurrent_array_buffer_freeing) {
+ FreeingTask* task = new FreeingTask(heap_);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ } else {
+ // Fallback for when concurrency is disabled/restricted.
+ FreeAllocations();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/array-buffer-collector.h b/deps/v8/src/heap/array-buffer-collector.h
new file mode 100644
index 0000000000..002eba9a43
--- /dev/null
+++ b/deps/v8/src/heap/array-buffer-collector.h
@@ -0,0 +1,51 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_ARRAY_BUFFER_COLLECTOR_H_
+#define V8_HEAP_ARRAY_BUFFER_COLLECTOR_H_
+
+#include <vector>
+
+#include "src/base/platform/mutex.h"
+#include "src/objects/js-array.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+// To support background processing of array buffer backing stores, we process
+// array buffers using the ArrayBufferTracker class. The ArrayBufferCollector
+// keeps track of garbage backing stores so that they can be freed on a
+// background thread.
+class ArrayBufferCollector {
+ public:
+ explicit ArrayBufferCollector(Heap* heap) : heap_(heap) {}
+
+ ~ArrayBufferCollector() { FreeAllocations(); }
+
+ // These allocations will begin to be freed once FreeAllocations() is called,
+ // or on TearDown.
+ void AddGarbageAllocations(
+ std::vector<JSArrayBuffer::Allocation>* allocations);
+
+ // Calls FreeAllocations() on a background thread.
+ void FreeAllocationsOnBackgroundThread();
+
+ private:
+ class FreeingTask;
+
+ // Begin freeing the allocations added through AddGarbageAllocations. Also
+ // called by TearDown.
+ void FreeAllocations();
+
+ Heap* heap_;
+ base::Mutex allocations_mutex_;
+ std::vector<std::vector<JSArrayBuffer::Allocation>*> allocations_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_ARRAY_BUFFER_COLLECTOR_H_
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 8d415fd75e..568f149b04 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -14,13 +14,12 @@ namespace v8 {
namespace internal {
void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
- void* data = buffer->backing_store();
- if (!data) return;
+ if (buffer->backing_store() == nullptr) return;
- size_t length = buffer->allocation_length();
+ const size_t length = NumberToSize(buffer->byte_length());
Page* page = Page::FromAddress(buffer->address());
{
- base::LockGuard<base::RecursiveMutex> guard(page->mutex());
+ base::LockGuard<base::Mutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) {
page->AllocateLocalTracker();
@@ -36,13 +35,12 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
}
void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
- void* data = buffer->backing_store();
- if (!data) return;
+ if (buffer->backing_store() == nullptr) return;
Page* page = Page::FromAddress(buffer->address());
- size_t length = buffer->allocation_length();
+ const size_t length = NumberToSize(buffer->byte_length());
{
- base::LockGuard<base::RecursiveMutex> guard(page->mutex());
+ base::LockGuard<base::Mutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
tracker->Remove(buffer, length);
@@ -52,26 +50,25 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
- size_t freed_memory = 0;
- size_t retained_size = 0;
+ size_t new_retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
const size_t length = buffer->allocation_length();
if (should_free(buffer)) {
- freed_memory += length;
buffer->FreeBackingStore();
it = array_buffers_.erase(it);
} else {
- retained_size += length;
+ new_retained_size += length;
++it;
}
}
- retained_size_ = retained_size;
+ const size_t freed_memory = retained_size_ - new_retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
}
+ retained_size_ = new_retained_size;
}
template <typename MarkingState>
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 238c8de57d..5acf9b9bfb 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -3,6 +3,10 @@
// found in the LICENSE file.
#include "src/heap/array-buffer-tracker.h"
+
+#include <vector>
+
+#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
@@ -16,56 +20,70 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
+ std::vector<JSArrayBuffer::Allocation>* backing_stores_to_free =
+ new std::vector<JSArrayBuffer::Allocation>();
+
JSArrayBuffer* new_buffer = nullptr;
JSArrayBuffer* old_buffer = nullptr;
- size_t freed_memory = 0;
- size_t retained_size = 0;
+ size_t new_retained_size = 0;
+ size_t moved_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
old_buffer = reinterpret_cast<JSArrayBuffer*>(*it);
- const size_t length = old_buffer->allocation_length();
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
- retained_size += length;
+ new_retained_size += NumberToSize(old_buffer->byte_length());
++it;
} else if (result == kUpdateEntry) {
DCHECK_NOT_NULL(new_buffer);
Page* target_page = Page::FromAddress(new_buffer->address());
{
- base::LockGuard<base::RecursiveMutex> guard(target_page->mutex());
+ base::LockGuard<base::Mutex> guard(target_page->mutex());
LocalArrayBufferTracker* tracker = target_page->local_tracker();
if (tracker == nullptr) {
target_page->AllocateLocalTracker();
tracker = target_page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- DCHECK_EQ(length, new_buffer->allocation_length());
- tracker->Add(new_buffer, length);
+ const size_t size = NumberToSize(new_buffer->byte_length());
+ moved_size += size;
+ tracker->Add(new_buffer, size);
}
it = array_buffers_.erase(it);
} else if (result == kRemoveEntry) {
- freed_memory += length;
- old_buffer->FreeBackingStore();
+ // Size of freed memory is computed to avoid looking at dead objects.
+ void* allocation_base = old_buffer->allocation_base();
+ DCHECK_NOT_NULL(allocation_base);
+
+ backing_stores_to_free->emplace_back(allocation_base,
+ old_buffer->allocation_length(),
+ old_buffer->allocation_mode());
it = array_buffers_.erase(it);
} else {
UNREACHABLE();
}
}
- retained_size_ = retained_size;
+ const size_t freed_memory = retained_size_ - new_retained_size - moved_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
}
+ retained_size_ = new_retained_size;
+
+ // Pass the backing stores that need to be freed to the main thread for later
+ // distribution.
+ // ArrayBufferCollector takes ownership of this pointer.
+ heap_->array_buffer_collector()->AddGarbageAllocations(
+ backing_stores_to_free);
}
-void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
+void ArrayBufferTracker::PrepareToFreeDeadInNewSpace(Heap* heap) {
DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
for (Page* page : PageRange(heap->new_space()->FromSpaceStart(),
heap->new_space()->FromSpaceEnd())) {
bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
CHECK(empty);
}
- heap->account_external_memory_concurrently_freed();
}
size_t ArrayBufferTracker::RetainedInNewSpace(Heap* heap) {
@@ -110,7 +128,7 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
Page* page = Page::FromAddress(buffer->address());
{
- base::LockGuard<base::RecursiveMutex> guard(page->mutex());
+ base::LockGuard<base::Mutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return false;
return tracker->IsTracked(buffer);
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 3c8def3e3a..7bfc1b83f6 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -34,9 +34,9 @@ class ArrayBufferTracker : public AllStatic {
inline static void RegisterNew(Heap* heap, JSArrayBuffer* buffer);
inline static void Unregister(Heap* heap, JSArrayBuffer* buffer);
- // Frees all backing store pointers for dead JSArrayBuffers in new space.
+ // Identifies all backing store pointers for dead JSArrayBuffers in new space.
// Does not take any locks and can only be called during Scavenge.
- static void FreeDeadInNewSpace(Heap* heap);
+ static void PrepareToFreeDeadInNewSpace(Heap* heap);
// Number of array buffer bytes retained from new space.
static size_t RetainedInNewSpace(Heap* heap);
@@ -101,7 +101,14 @@ class LocalArrayBufferTracker {
size_t retained_size() const { return retained_size_; }
private:
- typedef std::unordered_set<JSArrayBuffer*> TrackingData;
+ class Hasher {
+ public:
+ size_t operator()(JSArrayBuffer* buffer) const {
+ return reinterpret_cast<size_t>(buffer) >> 3;
+ }
+ };
+
+ typedef std::unordered_set<JSArrayBuffer*, Hasher> TrackingData;
Heap* heap_;
// The set contains raw heap pointers which are removed by the GC upon
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index d4ff5fbba7..e404101753 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -49,7 +49,7 @@ void CodeStatistics::ResetCodeAndMetadataStatistics(Isolate* isolate) {
void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
Isolate* isolate) {
HeapObjectIterator obj_it(space);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+ for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
}
@@ -61,7 +61,7 @@ void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
void CodeStatistics::CollectCodeStatistics(LargeObjectSpace* space,
Isolate* isolate) {
LargeObjectIterator obj_it(space);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+ for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
}
@@ -136,7 +136,7 @@ void CodeStatistics::EnterComment(Isolate* isolate, const char* comment,
// Search for a free or matching entry in 'comments_statistics': 'cs'
// points to result.
for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
- if (comments_statistics[i].comment == NULL) {
+ if (comments_statistics[i].comment == nullptr) {
cs = &comments_statistics[i];
cs->comment = comment;
break;
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 60bcbe9bab..9634db951a 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -80,6 +80,11 @@ class ConcurrentMarkingVisitor final
marking_state_(live_bytes),
task_id_(task_id) {}
+ template <typename T>
+ static V8_INLINE T* Cast(HeapObject* object) {
+ return T::cast(object);
+ }
+
bool ShouldVisit(HeapObject* object) {
return marking_state_.GreyToBlack(object);
}
@@ -109,7 +114,10 @@ class ConcurrentMarkingVisitor final
int VisitJSObject(Map* map, JSObject* object) {
int size = JSObject::BodyDescriptor::SizeOf(map, object);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
+ int used_size = map->UsedInstanceSize();
+ DCHECK_LE(used_size, size);
+ DCHECK_GE(used_size, JSObject::kHeaderSize);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
if (!ShouldVisit(object)) return 0;
VisitPointersInSnapshot(object, snapshot);
return size;
@@ -216,6 +224,14 @@ class ConcurrentMarkingVisitor final
return size;
}
+ int VisitCodeDataContainer(Map* map, CodeDataContainer* object) {
+ if (!ShouldVisit(object)) return 0;
+ int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ CodeDataContainer::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
+ }
+
int VisitJSFunction(Map* map, JSFunction* object) {
if (!ShouldVisit(object)) return 0;
int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
@@ -243,15 +259,11 @@ class ConcurrentMarkingVisitor final
}
int VisitNativeContext(Map* map, Context* object) {
- if (marking_state_.IsGrey(object)) {
- int size = Context::BodyDescriptorWeak::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptorWeak::IterateBody(object, size, this);
- // TODO(ulan): implement proper weakness for normalized map cache
- // and remove this bailout.
- bailout_.Push(object);
- }
- return 0;
+ if (!ShouldVisit(object)) return 0;
+ int size = Context::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ Context::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
}
int VisitTransitionArray(Map* map, TransitionArray* array) {
@@ -342,6 +354,33 @@ class ConcurrentMarkingVisitor final
SlotSnapshot slot_snapshot_;
};
+// Strings can change maps due to conversion to thin string or external strings.
+// Use reinterpret cast to avoid data race in slow dchecks.
+template <>
+ConsString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return reinterpret_cast<ConsString*>(object);
+}
+
+template <>
+SlicedString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return reinterpret_cast<SlicedString*>(object);
+}
+
+template <>
+ThinString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return reinterpret_cast<ThinString*>(object);
+}
+
+template <>
+SeqOneByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return reinterpret_cast<SeqOneByteString*>(object);
+}
+
+template <>
+SeqTwoByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return reinterpret_cast<SeqTwoByteString*>(object);
+}
+
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
@@ -374,6 +413,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
bailout_(bailout),
on_hold_(on_hold),
weak_objects_(weak_objects),
+ total_marked_bytes_(0),
pending_task_count_(0),
task_count_(0) {
// The runtime flag should be set only if the compile time flag was set.
@@ -382,6 +422,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
#endif
for (int i = 0; i <= kMaxTasks; i++) {
is_pending_[i] = false;
+ task_state_[i].marked_bytes = 0;
}
}
@@ -540,6 +581,7 @@ void ConcurrentMarking::FlushLiveBytes(
}
}
live_bytes.clear();
+ task_state_[i].marked_bytes = 0;
}
total_marked_bytes_.SetValue(0);
}
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 7bfe0adfa0..b9832d5433 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -16,7 +16,8 @@ namespace internal {
static size_t CountTotalHolesSize(Heap* heap) {
size_t holes_size = 0;
OldSpaces spaces(heap);
- for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (OldSpace* space = spaces.next(); space != nullptr;
+ space = spaces.next()) {
DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
holes_size += space->Waste() + space->Available();
}
@@ -460,6 +461,7 @@ void GCTracer::PrintNVP() const {
"heap.external_weak_global_handles=%.2f "
"fast_promote=%.2f "
"scavenge=%.2f "
+ "scavenge.process_array_buffers=%.2f "
"scavenge.roots=%.2f "
"scavenge.weak=%.2f "
"scavenge.weak_global_handles.identify=%.2f "
@@ -502,6 +504,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.scopes[Scope::SCAVENGER_FAST_PROMOTE],
current_.scopes[Scope::SCAVENGER_SCAVENGE],
+ current_.scopes[Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS],
current_.scopes[Scope::SCAVENGER_SCAVENGE_ROOTS],
current_.scopes[Scope::SCAVENGER_SCAVENGE_WEAK],
current_
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index a966fa03d8..bf9eb2874f 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -7,14 +7,16 @@
#include <cmath>
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap other than src/heap/heap.h here!
+#include "src/heap/heap.h"
+
#include "src/base/platform/platform.h"
#include "src/counters-inl.h"
#include "src/feedback-vector.h"
-#include "src/heap/heap.h"
+// TODO(mstarzinger): There are 3 more includes to remove in order to no longer
+// leak heap internals to users of this interface!
#include "src/heap/incremental-marking-inl.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/object-stats.h"
-#include "src/heap/remembered-set.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
#include "src/isolate.h"
@@ -66,6 +68,13 @@ PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
+#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
+ AccessorInfo* Heap::accessor_name##_accessor() { \
+ return AccessorInfo::cast(roots_[k##AccessorName##AccessorRootIndex]); \
+ }
+ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
+#undef ACCESSOR_INFO_ACCESSOR
+
#define ROOT_ACCESSOR(type, name, camel_name) \
void Heap::set_##name(type* value) { \
/* The deserializer makes use of the fact that these common roots are */ \
@@ -217,8 +226,16 @@ AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
return CopyFixedDoubleArrayWithMap(src, src->map());
}
+AllocationResult Heap::AllocateFixedArrayWithMap(RootListIndex map_root_index,
+ int length,
+ PretenureFlag pretenure) {
+ return AllocateFixedArrayWithFiller(map_root_index, length, pretenure,
+ undefined_value());
+}
+
AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+ return AllocateFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ pretenure, undefined_value());
}
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
@@ -373,9 +390,9 @@ void Heap::FinalizeExternalString(String* string) {
kHeapObjectTag);
// Dispose of the C++ object if it has not already been disposed.
- if (*resource_addr != NULL) {
+ if (*resource_addr != nullptr) {
(*resource_addr)->Dispose();
- *resource_addr = NULL;
+ *resource_addr = nullptr;
}
}
@@ -590,6 +607,54 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_count_.Decrement(1);
}
+CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
+ : heap_(heap) {
+ if (heap_->write_protect_code_memory()) {
+ heap_->increment_code_space_memory_modification_scope_depth();
+ heap_->code_space()->SetReadAndWritable();
+ LargePage* page = heap_->lo_space()->first_page();
+ while (page != nullptr) {
+ if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ page->SetReadAndWritable();
+ }
+ page = page->next_page();
+ }
+ }
+}
+
+CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
+ if (heap_->write_protect_code_memory()) {
+ heap_->decrement_code_space_memory_modification_scope_depth();
+ heap_->code_space()->SetReadAndExecutable();
+ LargePage* page = heap_->lo_space()->first_page();
+ while (page != nullptr) {
+ if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ page->SetReadAndExecutable();
+ }
+ page = page->next_page();
+ }
+ }
+}
+
+CodePageMemoryModificationScope::CodePageMemoryModificationScope(
+ MemoryChunk* chunk)
+ : chunk_(chunk),
+ scope_active_(chunk_->heap()->write_protect_code_memory() &&
+ chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ if (scope_active_) {
+ DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
+ (chunk_->owner()->identity() == LO_SPACE &&
+ chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)));
+ chunk_->SetReadAndWritable();
+ }
+}
+
+CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
+ if (scope_active_) {
+ chunk_->SetReadAndExecutable();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 458c6c7e09..d90f086be4 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -15,7 +15,7 @@
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/conversions.h"
@@ -23,6 +23,7 @@
#include "src/deoptimizer.h"
#include "src/feedback-vector.h"
#include "src/global-handles.h"
+#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/barrier.h"
#include "src/heap/code-stats.h"
@@ -42,6 +43,7 @@
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
+#include "src/heap/sweeper.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/object-macros.h"
#include "src/objects/shared-function-info.h"
@@ -82,16 +84,6 @@ void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
}
-void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
- DCHECK_EQ(Smi::kZero, getter_stub_deopt_pc_offset());
- set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
- DCHECK_EQ(Smi::kZero, setter_stub_deopt_pc_offset());
- set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
@@ -170,10 +162,12 @@ Heap::Heap()
contexts_disposed_(0),
number_of_disposed_maps_(0),
new_space_(nullptr),
- old_space_(NULL),
- code_space_(NULL),
- map_space_(NULL),
- lo_space_(NULL),
+ old_space_(nullptr),
+ code_space_(nullptr),
+ map_space_(nullptr),
+ lo_space_(nullptr),
+ write_protect_code_memory_(false),
+ code_space_memory_modification_scope_depth_(0),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
allocations_count_(0),
@@ -201,6 +195,7 @@ Heap::Heap()
last_gc_time_(0.0),
mark_compact_collector_(nullptr),
minor_mark_compact_collector_(nullptr),
+ array_buffer_collector_(nullptr),
memory_allocator_(nullptr),
store_buffer_(nullptr),
incremental_marking_(nullptr),
@@ -225,7 +220,7 @@ Heap::Heap()
external_string_table_(this),
gc_callbacks_depth_(0),
deserialization_complete_(false),
- strong_roots_list_(NULL),
+ strong_roots_list_(nullptr),
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
@@ -237,12 +232,12 @@ Heap::Heap()
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
- set_native_contexts_list(NULL);
+ set_native_contexts_list(nullptr);
set_allocation_sites_list(Smi::kZero);
set_encountered_weak_collections(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
- RememberUnmappedPage(NULL, false);
+ RememberUnmappedPage(nullptr, false);
}
size_t Heap::Capacity() {
@@ -253,7 +248,6 @@ size_t Heap::Capacity() {
size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
-
return old_space_->Capacity() + code_space_->Capacity() +
map_space_->Capacity() + lo_space_->SizeOfObjects();
}
@@ -303,16 +297,24 @@ size_t Heap::Available() {
size_t total = 0;
AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
total += space->Available();
}
return total;
}
+bool Heap::CanExpandOldGeneration(size_t size) {
+ if (force_oom_) return false;
+ if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
+ // The OldGenerationCapacity does not account compaction spaces used
+ // during evacuation. Ensure that expanding the old generation does push
+ // the total allocated memory size over the maximum heap size.
+ return memory_allocator()->Size() + size <= MaxReserved();
+}
bool Heap::HasBeenSetUp() {
- return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
- lo_space_ != NULL;
+ return old_space_ != nullptr && code_space_ != nullptr &&
+ map_space_ != nullptr && lo_space_ != nullptr;
}
@@ -336,16 +338,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
- // Is there enough space left in OLD to guarantee that a scavenge can
- // succeed?
- //
- // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
- // for object promotion. It counts only the bytes that the memory
- // allocator has not yet allocated from the OS and assigned to any space,
- // and does not count available bytes already in the old space or code
- // space. Undercounting is safe---we may get an unrequested full GC when
- // a scavenge would have succeeded.
- if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
+ // Over-estimate the new space size using capacity to allow some slack.
+ if (!CanExpandOldGeneration(new_space_->TotalCapacity())) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
@@ -354,7 +348,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
}
// Default
- *reason = NULL;
+ *reason = nullptr;
return YoungGenerationCollector();
}
@@ -469,21 +463,30 @@ void Heap::ReportStatisticsAfterGC() {
}
}
-void Heap::AddRetainingPathTarget(Handle<HeapObject> object) {
+void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
+ RetainingPathOption option) {
if (!FLAG_track_retaining_path) {
PrintF("Retaining path tracking requires --trace-retaining-path\n");
} else {
+ int index = 0;
Handle<WeakFixedArray> array = WeakFixedArray::Add(
- handle(retaining_path_targets(), isolate()), object);
+ handle(retaining_path_targets(), isolate()), object, &index);
set_retaining_path_targets(*array);
+ retaining_path_target_option_[index] = option;
}
}
-bool Heap::IsRetainingPathTarget(HeapObject* object) {
- WeakFixedArray::Iterator it(retaining_path_targets());
- HeapObject* target;
- while ((target = it.Next<HeapObject>()) != nullptr) {
- if (target == object) return true;
+bool Heap::IsRetainingPathTarget(HeapObject* object,
+ RetainingPathOption* option) {
+ if (!retaining_path_targets()->IsWeakFixedArray()) return false;
+ WeakFixedArray* targets = WeakFixedArray::cast(retaining_path_targets());
+ int length = targets->Length();
+ for (int i = 0; i < length; i++) {
+ if (targets->Get(i) == object) {
+ DCHECK(retaining_path_target_option_.count(i));
+ *option = retaining_path_target_option_[i];
+ return true;
+ }
}
return false;
}
@@ -512,17 +515,23 @@ const char* RootToString(Root root) {
}
} // namespace
-void Heap::PrintRetainingPath(HeapObject* target) {
+void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
PrintF("\n\n\n");
PrintF("#################################################\n");
PrintF("Retaining path for %p:\n", static_cast<void*>(target));
HeapObject* object = target;
- std::vector<HeapObject*> retaining_path;
+ std::vector<std::pair<HeapObject*, bool>> retaining_path;
Root root = Root::kUnknown;
+ bool ephemeral = false;
while (true) {
- retaining_path.push_back(object);
- if (retainer_.count(object)) {
+ retaining_path.push_back(std::make_pair(object, ephemeral));
+ if (option == RetainingPathOption::kTrackEphemeralPath &&
+ ephemeral_retainer_.count(object)) {
+ object = ephemeral_retainer_[object];
+ ephemeral = true;
+ } else if (retainer_.count(object)) {
object = retainer_[object];
+ ephemeral = false;
} else {
if (retaining_root_.count(object)) {
root = retaining_root_[object];
@@ -531,10 +540,13 @@ void Heap::PrintRetainingPath(HeapObject* target) {
}
}
int distance = static_cast<int>(retaining_path.size());
- for (auto object : retaining_path) {
+ for (auto node : retaining_path) {
+ HeapObject* object = node.first;
+ bool ephemeral = node.second;
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
- PrintF("Distance from root %d: ", distance);
+ PrintF("Distance from root %d%s: ", distance,
+ ephemeral ? " (ephemeral)" : "");
object->ShortPrint();
PrintF("\n");
#ifdef OBJECT_PRINT
@@ -550,16 +562,38 @@ void Heap::PrintRetainingPath(HeapObject* target) {
}
void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
+ if (retainer_.count(object)) return;
retainer_[object] = retainer;
- if (IsRetainingPathTarget(object)) {
- PrintRetainingPath(object);
+ RetainingPathOption option = RetainingPathOption::kDefault;
+ if (IsRetainingPathTarget(object, &option)) {
+ // Check if the retaining path was already printed in
+ // AddEphemeralRetainer().
+ if (ephemeral_retainer_.count(object) == 0 ||
+ option == RetainingPathOption::kDefault) {
+ PrintRetainingPath(object, option);
+ }
+ }
+}
+
+void Heap::AddEphemeralRetainer(HeapObject* retainer, HeapObject* object) {
+ if (ephemeral_retainer_.count(object)) return;
+ ephemeral_retainer_[object] = retainer;
+ RetainingPathOption option = RetainingPathOption::kDefault;
+ if (IsRetainingPathTarget(object, &option) &&
+ option == RetainingPathOption::kTrackEphemeralPath) {
+ // Check if the retaining path was already printed in AddRetainer().
+ if (retainer_.count(object) == 0) {
+ PrintRetainingPath(object, option);
+ }
}
}
void Heap::AddRetainingRoot(Root root, HeapObject* object) {
+ if (retaining_root_.count(object)) return;
retaining_root_[object] = root;
- if (IsRetainingPathTarget(object)) {
- PrintRetainingPath(object);
+ RetainingPathOption option = RetainingPathOption::kDefault;
+ if (IsRetainingPathTarget(object, &option)) {
+ PrintRetainingPath(object, option);
}
}
@@ -609,6 +643,7 @@ void Heap::GarbageCollectionPrologue() {
UpdateNewSpaceAllocationCounter();
if (FLAG_track_retaining_path) {
retainer_.clear();
+ ephemeral_retainer_.clear();
retaining_root_.clear();
}
}
@@ -616,7 +651,7 @@ void Heap::GarbageCollectionPrologue() {
size_t Heap::SizeOfObjects() {
size_t total = 0;
AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
total += space->SizeOfObjects();
}
return total;
@@ -641,13 +676,13 @@ const char* Heap::GetSpaceName(int idx) {
return nullptr;
}
-void Heap::SetRootCodeStubs(UnseededNumberDictionary* value) {
+void Heap::SetRootCodeStubs(NumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
void Heap::RepairFreeListsAfterDeserialization() {
PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != NULL;
+ for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
space->RepairFreeListsAfterDeserialization();
}
@@ -836,6 +871,17 @@ void Heap::ProcessPretenuringFeedback() {
}
}
+void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
+ CodePageMemoryModificationScope modification_scope(chunk);
+ code->InvalidateEmbeddedObjects();
+}
+
+void Heap::InvalidateCodeDeoptimizationData(Code* code) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
+ CodePageMemoryModificationScope modification_scope(chunk);
+ code->set_deoptimization_data(empty_fixed_array());
+}
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
@@ -1145,8 +1191,7 @@ void Heap::ReportExternalMemoryPressure() {
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
incremental_marking()->AdvanceIncrementalMarking(
- deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
}
@@ -1169,7 +1214,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
- const char* collector_reason = NULL;
+ const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
#ifdef DEBUG
@@ -1439,6 +1484,13 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
}
}
if (perform_gc) {
+ // We cannot perfom a GC with an uninitialized isolate. This check
+ // fails for example if the max old space size is chosen unwisely,
+ // so that we cannot allocate space to deserialize the initial heap.
+ if (!deserialization_complete_) {
+ V8::FatalProcessOutOfMemory(
+ "insufficient memory to create an Isolate");
+ }
if (space == NEW_SPACE) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
} else {
@@ -1470,26 +1522,6 @@ void Heap::EnsureFromSpaceIsCommitted() {
}
-void Heap::ClearNormalizedMapCaches() {
- if (isolate_->bootstrapper()->IsActive() &&
- !incremental_marking()->IsMarking()) {
- return;
- }
-
- Object* context = native_contexts_list();
- while (!context->IsUndefined(isolate())) {
- // GC can happen when the context is not fully initialized,
- // so the cache can be undefined.
- Object* cache =
- Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
- if (!cache->IsUndefined(isolate())) {
- NormalizedMapCache::cast(cache)->Clear();
- }
- context = Context::cast(context)->next_context_link();
- }
-}
-
-
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
@@ -1542,7 +1574,7 @@ bool Heap::PerformGarbageCollection(
EnsureFromSpaceIsCommitted();
- int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
+ size_t start_new_space_size = Heap::new_space()->Size();
{
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
@@ -1581,9 +1613,16 @@ bool Heap::PerformGarbageCollection(
ProcessPretenuringFeedback();
}
- UpdateSurvivalStatistics(start_new_space_size);
+ UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
ConfigureInitialOldGenerationSize();
+ if (collector != MARK_COMPACTOR) {
+ // Objects that died in the new space might have been accounted
+ // as bytes marked ahead of schedule by the incremental marker.
+ incremental_marking()->UpdateMarkedBytesAfterScavenge(
+ start_new_space_size - SurvivedNewSpaceObjectSize());
+ }
+
if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
}
@@ -1672,6 +1711,8 @@ void Heap::MarkCompact() {
uint64_t size_of_objects_before_gc = SizeOfObjects();
+ CodeSpaceMemoryModificationScope code_modifcation(this);
+
mark_compact_collector()->Prepare();
ms_count_++;
@@ -1730,7 +1771,6 @@ void Heap::MarkCompactPrologue() {
isolate_->compilation_cache()->MarkCompactPrologue();
FlushNumberStringCache();
- ClearNormalizedMapCaches();
}
@@ -1753,7 +1793,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
}
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
- return heap->InNewSpace(*p) &&
+ return heap->InFromSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
@@ -1770,7 +1810,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
- return NULL;
+ return nullptr;
}
private:
@@ -1786,7 +1826,7 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanExpandOldGeneration(new_space()->Size()));
}
- mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+ mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -1827,10 +1867,15 @@ static bool IsLogging(Isolate* isolate) {
isolate->heap_profiler()->is_tracking_object_moves());
}
-class ScavengingItem : public ItemParallelJob::Item {
+class PageScavengingItem final : public ItemParallelJob::Item {
public:
- virtual ~ScavengingItem() {}
- virtual void Process(Scavenger* scavenger) = 0;
+ explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ virtual ~PageScavengingItem() {}
+
+ void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
+
+ private:
+ MemoryChunk* const chunk_;
};
class ScavengingTask final : public ItemParallelJob::Task {
@@ -1846,8 +1891,8 @@ class ScavengingTask final : public ItemParallelJob::Task {
{
barrier_->Start();
TimedScope scope(&scavenging_time);
- ScavengingItem* item = nullptr;
- while ((item = GetItem<ScavengingItem>()) != nullptr) {
+ PageScavengingItem* item = nullptr;
+ while ((item = GetItem<PageScavengingItem>()) != nullptr) {
item->Process(scavenger_);
item->MarkFinished();
}
@@ -1870,41 +1915,6 @@ class ScavengingTask final : public ItemParallelJob::Task {
OneshotBarrier* const barrier_;
};
-class PageScavengingItem final : public ScavengingItem {
- public:
- explicit PageScavengingItem(Heap* heap, MemoryChunk* chunk)
- : heap_(heap), chunk_(chunk) {}
- virtual ~PageScavengingItem() {}
-
- void Process(Scavenger* scavenger) final {
- base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
- scavenger->AnnounceLockedPage(chunk_);
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this, scavenger](Address addr) {
- return scavenger->CheckAndScavengeObject(heap_, addr);
- },
- SlotSet::KEEP_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_,
- [this, scavenger](SlotType type, Address host_addr, Address addr) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_->isolate(), type, addr, [this, scavenger](Object** addr) {
- // We expect that objects referenced by code are long
- // living. If we do not force promotion, then we need to
- // clear old_to_new slots in dead code objects after
- // mark-compact.
- return scavenger->CheckAndScavengeObject(
- heap_, reinterpret_cast<Address>(addr));
- });
- });
- }
-
- private:
- Heap* const heap_;
- MemoryChunk* const chunk_;
-};
-
int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
@@ -1932,13 +1942,14 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
- if (mark_compact_collector()->sweeper().sweeping_in_progress() &&
+ if (mark_compact_collector()->sweeper()->sweeping_in_progress() &&
memory_allocator_->unmapper()->NumberOfDelayedChunks() >
static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
mark_compact_collector()->EnsureSweepingCompleted();
}
- mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+ // TODO(mlippautz): Untangle the dependency of the unmapper from the sweeper.
+ mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
@@ -1965,51 +1976,74 @@ void Heap::Scavenge() {
job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
}
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- this, [this, &job](MemoryChunk* chunk) {
- job.AddItem(new PageScavengingItem(this, chunk));
- });
+ {
+ Sweeper* sweeper = mark_compact_collector()->sweeper();
+ // Pause the concurrent sweeper.
+ Sweeper::PauseOrCompleteScope pause_scope(sweeper);
+ // Filter out pages from the sweeper that need to be processed for old to
+ // new slots by the Scavenger. After processing, the Scavenger adds back
+ // pages that are still unsweeped. This way the Scavenger has exclusive
+ // access to the slots of a page and can completely avoid any locks on
+ // the page itself.
+ Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
+ filter_scope.FilterOldSpaceSweepingPages(
+ [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ this, [&job](MemoryChunk* chunk) {
+ job.AddItem(new PageScavengingItem(chunk));
+ });
- RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]);
+ RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]);
- {
- // Identify weak unmodified handles. Requires an unmodified graph.
- TRACE_GC(tracer(),
- GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &JSObject::IsUnmodifiedApiObject);
- }
- {
- // Copy roots.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
- IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
- }
- {
- // Weak collections are held strongly by the Scavenger.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK);
- IterateEncounteredWeakCollections(&root_scavenge_visitor);
- }
- {
- // Parallel phase scavenging all copied and promoted objects.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run();
- DCHECK(copied_list.IsGlobalEmpty());
- DCHECK(promotion_list.IsGlobalEmpty());
- }
- {
- // Scavenge weak global handles.
- TRACE_GC(tracer(),
- GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnscavengedHeapObject);
- isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
- &root_scavenge_visitor);
- scavengers[kMainThreadId]->Process();
- }
+ {
+ // Identify weak unmodified handles. Requires an unmodified graph.
+ TRACE_GC(
+ tracer(),
+ GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &JSObject::IsUnmodifiedApiObject);
+ }
+ {
+ // Copy roots.
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
+ IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+ }
+ {
+ // Weak collections are held strongly by the Scavenger.
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK);
+ IterateEncounteredWeakCollections(&root_scavenge_visitor);
+ }
+ {
+ // Parallel phase scavenging all copied and promoted objects.
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
+ job.Run();
+ DCHECK(copied_list.IsGlobalEmpty());
+ DCHECK(promotion_list.IsGlobalEmpty());
+ }
+ {
+ // Scavenge weak global handles.
+ TRACE_GC(tracer(),
+ GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnscavengedHeapObject);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
+ &root_scavenge_visitor);
+ scavengers[kMainThreadId]->Process();
+
+ DCHECK(copied_list.IsGlobalEmpty());
+ DCHECK(promotion_list.IsGlobalEmpty());
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ &root_scavenge_visitor, &IsUnscavengedHeapObject);
+ }
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i]->Finalize();
- delete scavengers[i];
+ for (int i = 0; i < num_scavenge_tasks; i++) {
+ scavengers[i]->Finalize();
+ delete scavengers[i];
+ }
}
UpdateNewSpaceReferencesInExternalStringTable(
@@ -2032,7 +2066,11 @@ void Heap::Scavenge() {
// Set age mark.
new_space_->set_age_mark(new_space_->top());
- ArrayBufferTracker::FreeDeadInNewSpace(this);
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
+ ArrayBufferTracker::PrepareToFreeDeadInNewSpace(this);
+ }
+ array_buffer_collector()->FreeAllocationsOnBackgroundThread();
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
if (chunk->SweepingDone()) {
@@ -2078,10 +2116,10 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
if (!string->IsExternalString()) {
// Original external string has been internalized.
DCHECK(string->IsThinString());
- return NULL;
+ return nullptr;
}
heap->FinalizeExternalString(string);
- return NULL;
+ return nullptr;
}
// String is still reachable.
@@ -2117,7 +2155,7 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
for (Object** p = start; p < end; ++p) {
String* target = updater_func(heap_, p);
- if (target == NULL) continue;
+ if (target == nullptr) continue;
DCHECK(target->IsExternalString());
@@ -2371,9 +2409,9 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
}
// GetVisitorId requires a properly initialized LayoutDescriptor.
map->set_visitor_id(Map::GetVisitorId(map));
- map->clear_unused();
- map->set_inobject_properties_or_constructor_function_index(0);
- map->set_unused_property_fields(0);
+ map->set_inobject_properties_start_or_constructor_function_index(0);
+ DCHECK(!map->IsJSObjectMap());
+ map->SetInObjectUnusedPropertyFields(0);
map->set_bit_field(0);
map->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -2385,10 +2423,10 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
return map;
}
-
AllocationResult Heap::AllocateMap(InstanceType instance_type,
int instance_size,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ int inobject_properties) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE &&
!Map::CanHaveFastTransitionableElementsKind(instance_type),
@@ -2405,13 +2443,19 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
- map->clear_unused();
- map->set_inobject_properties_or_constructor_function_index(0);
+ if (map->IsJSObjectMap()) {
+ map->SetInObjectPropertiesStartInWords(instance_size / kPointerSize -
+ inobject_properties);
+ DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
+ } else {
+ DCHECK_EQ(inobject_properties, 0);
+ map->set_inobject_properties_start_or_constructor_function_index(0);
+ }
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
map->set_weak_cell_cache(Smi::kZero);
map->set_raw_transitions(Smi::kZero);
- map->set_unused_property_fields(0);
+ map->SetInObjectUnusedPropertyFields(inobject_properties);
map->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -2469,20 +2513,18 @@ AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
return result;
}
-AllocationResult Heap::AllocateBigInt(int length, bool zero_initialize,
- PretenureFlag pretenure) {
+AllocationResult Heap::AllocateBigInt(int length) {
if (length < 0 || length > BigInt::kMaxLength) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
}
int size = BigInt::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
+ AllocationSpace space = SelectSpace(NOT_TENURED);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
}
result->set_map_after_allocation(bigint_map(), SKIP_WRITE_BARRIER);
- BigInt::cast(result)->Initialize(length, zero_initialize);
return result;
}
@@ -2611,7 +2653,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kWeakNewSpaceObjectToCodeListRootIndex:
case kRetainedMapsRootIndex:
case kRetainingPathTargetsRootIndex:
- case kCodeCoverageListRootIndex:
+ case kFeedbackVectorsForProfilingToolsRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
case kSerializedTemplatesRootIndex:
@@ -2620,6 +2662,9 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kApiSymbolTableRootIndex:
case kApiPrivateSymbolTableRootIndex:
case kMessageListenersRootIndex:
+ case kDeserializeLazyHandlerRootIndex:
+ case kDeserializeLazyHandlerWideRootIndex:
+ case kDeserializeLazyHandlerExtraWideRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
@@ -2836,8 +2881,8 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
}
// At this point, we may be deserializing the heap from a snapshot, and
- // none of the maps have been created yet and are NULL.
- DCHECK((filler->map() == NULL && !deserialization_complete_) ||
+ // none of the maps have been created yet and are nullptr.
+ DCHECK((filler->map() == nullptr && !deserialization_complete_) ||
filler->map()->IsMap());
return filler;
}
@@ -2925,16 +2970,12 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
DCHECK_GE(elements_to_trim, 0);
int bytes_to_trim;
- if (object->IsFixedTypedArrayBase()) {
- InstanceType type = object->map()->instance_type();
- bytes_to_trim =
- FixedTypedArrayBase::TypedArraySize(type, len) -
- FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
- } else if (object->IsByteArray()) {
+ DCHECK(!object->IsFixedTypedArrayBase());
+ if (object->IsByteArray()) {
int new_size = ByteArray::SizeFor(len - elements_to_trim);
bytes_to_trim = ByteArray::SizeFor(len) - new_size;
DCHECK_GE(bytes_to_trim, 0);
- } else if (object->IsFixedArray() || object->IsTransitionArray()) {
+ } else if (object->IsFixedArray()) {
bytes_to_trim = elements_to_trim * kPointerSize;
} else {
DCHECK(object->IsFixedDoubleArray());
@@ -3060,23 +3101,21 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
return elements;
}
-
-AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
+AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
HeapObject* result = nullptr;
if (!allocation.To(&result)) return allocation;
- if (immovable) {
+ if (movability == kImmovable) {
Address address = result->address();
MemoryChunk* chunk = MemoryChunk::FromAddress(address);
// Code objects which should stay at a fixed address are allocated either
- // in the first page of code space (objects on the first page of each space
- // are never moved), in large object space, or (during snapshot creation)
- // the containing page is marked as immovable.
- if (!Heap::IsImmovable(result) &&
- !code_space_->FirstPage()->Contains(address)) {
- if (isolate()->serializer_enabled()) {
+ // in the first page of code space, in large object space, or (during
+ // snapshot creation) the containing page is marked as immovable.
+ if (!Heap::IsImmovable(result)) {
+ if (isolate()->serializer_enabled() ||
+ code_space_->FirstPage()->Contains(address)) {
chunk->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
@@ -3099,8 +3138,86 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
return code;
}
+AllocationResult Heap::AllocateCode(
+ const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
+ int32_t builtin_index, ByteArray* reloc_info,
+ CodeDataContainer* data_container, HandlerTable* handler_table,
+ ByteArray* source_position_table, DeoptimizationData* deopt_data,
+ Movability movability, uint32_t stub_key, bool is_turbofanned,
+ int stack_slots, int safepoint_table_offset) {
+ bool has_unwinding_info = desc.unwinding_info != nullptr;
+ DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
+ (!has_unwinding_info && desc.unwinding_info_size == 0));
+
+ // Compute size.
+ int body_size = desc.instr_size;
+ int unwinding_info_size_field_size = kInt64Size;
+ if (has_unwinding_info) {
+ body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
+ unwinding_info_size_field_size;
+ }
+ int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
+
+ Code* code = nullptr;
+ CodeSpaceMemoryModificationScope code_allocation(this);
+ AllocationResult allocation = AllocateCode(object_size, movability);
+ if (!allocation.To(&code)) return allocation;
+
+ // The code object has not been fully initialized yet. We rely on the
+ // fact that no allocation will happen from this point on.
+ DisallowHeapAllocation no_gc;
+ code->set_instruction_size(desc.instr_size);
+ code->set_relocation_info(reloc_info);
+ code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
+ code->set_safepoint_table_offset(safepoint_table_offset);
+ code->set_code_data_container(data_container);
+ code->set_has_tagged_params(true);
+ code->set_deoptimization_data(deopt_data);
+ code->set_stub_key(stub_key);
+ code->set_handler_table(handler_table);
+ code->set_source_position_table(source_position_table);
+ code->set_protected_instructions(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+ code->set_builtin_index(builtin_index);
+ code->set_trap_handler_index(Smi::FromInt(-1));
+
+ switch (code->kind()) {
+ case Code::OPTIMIZED_FUNCTION:
+ code->set_marked_for_deoptimization(false);
+ break;
+ case Code::JS_TO_WASM_FUNCTION:
+ case Code::C_WASM_ENTRY:
+ case Code::WASM_FUNCTION:
+ code->set_has_tagged_params(false);
+ break;
+ default:
+ break;
+ }
+
+ // Allow self references to created code object by patching the handle to
+ // point to the newly allocated Code object.
+ if (!self_ref.is_null()) *(self_ref.location()) = code;
+
+ // Migrate generated code.
+ // The generated code can contain Object** values (typically from handles)
+ // that are dereferenced during the copy to point directly to the actual heap
+ // objects. These pointers can include references to the code object itself,
+ // through the self_reference parameter.
+ code->CopyFrom(desc);
+
+ code->clear_padding();
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) code->ObjectVerify();
+#endif
+ DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
+ DCHECK(!memory_allocator()->code_range()->valid() ||
+ memory_allocator()->code_range()->contains(code->address()) ||
+ object_size <= code_space()->AreaSize());
+ return code;
+}
-AllocationResult Heap::CopyCode(Code* code) {
+AllocationResult Heap::CopyCode(Code* code, CodeDataContainer* data_container) {
AllocationResult allocation;
HeapObject* result = nullptr;
@@ -3115,23 +3232,29 @@ AllocationResult Heap::CopyCode(Code* code) {
CopyBlock(new_addr, old_addr, obj_size);
Code* new_code = Code::cast(result);
- // Relocate the copy.
- DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
- DCHECK(!memory_allocator()->code_range()->valid() ||
- memory_allocator()->code_range()->contains(code->address()) ||
- obj_size <= code_space()->AreaSize());
+ // Set the {CodeDataContainer}, it cannot be shared.
+ new_code->set_code_data_container(data_container);
// Clear the trap handler index since they can't be shared between code. We
- // have to do this before calling Relocate becauase relocate would adjust the
+ // have to do this before calling Relocate because relocate would adjust the
// base pointer for the old code.
new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
+ // Relocate the copy.
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
// allocation is on.
incremental_marking()->ProcessBlackAllocatedObject(new_code);
// Record all references to embedded objects in the new code object.
RecordWritesIntoCode(new_code);
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) new_code->ObjectVerify();
+#endif
+ DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
+ DCHECK(!memory_allocator()->code_range()->valid() ||
+ memory_allocator()->code_range()->contains(new_code->address()) ||
+ obj_size <= code_space()->AreaSize());
return new_code;
}
@@ -3177,7 +3300,7 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
DCHECK(gc_state_ == NOT_IN_GC);
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
- if (allocation_site != NULL) {
+ if (allocation_site != nullptr) {
size += AllocationMemento::kSize;
}
HeapObject* result = nullptr;
@@ -3187,7 +3310,7 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
WriteBarrierMode write_barrier_mode =
space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
result->set_map_after_allocation(map, write_barrier_mode);
- if (allocation_site != NULL) {
+ if (allocation_site != nullptr) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
InitializeAllocationMemento(alloc_memento, allocation_site);
@@ -3302,10 +3425,10 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
int object_size = map->instance_size();
HeapObject* clone = nullptr;
- DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
+ DCHECK(site == nullptr || AllocationSite::CanTrack(map->instance_type()));
int adjusted_object_size =
- site != NULL ? object_size + AllocationMemento::kSize : object_size;
+ site != nullptr ? object_size + AllocationMemento::kSize : object_size;
AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
if (!allocation.To(&clone)) return allocation;
@@ -3314,7 +3437,7 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// the contents without worrying about updating the write barrier.
CopyBlock(clone->address(), source->address(), object_size);
- if (site != NULL) {
+ if (site != nullptr) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(clone) + object_size);
InitializeAllocationMemento(alloc_memento, site);
@@ -3742,22 +3865,21 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
return result;
}
-
-AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
- PretenureFlag pretenure,
- Object* filler) {
- DCHECK_LE(0, length);
- DCHECK(empty_fixed_array()->IsFixedArray());
- if (length == 0) return empty_fixed_array();
-
+AllocationResult Heap::AllocateFixedArrayWithFiller(
+ RootListIndex map_root_index, int length, PretenureFlag pretenure,
+ Object* filler) {
+ // Zero-length case must be handled outside, where the knowledge about
+ // the map is.
+ DCHECK_LT(0, length);
DCHECK(!InNewSpace(filler));
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
if (!allocation.To(&result)) return allocation;
}
-
- result->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
+ DCHECK(RootIsImmortalImmovable(map_root_index));
+ Map* map = Map::cast(root(map_root_index));
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -4195,7 +4317,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
const double remaining_idle_time_in_ms =
incremental_marking()->AdvanceIncrementalMarking(
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
+ StepOrigin::kTask);
if (remaining_idle_time_in_ms > 0.0) {
FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
@@ -4425,7 +4547,7 @@ void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack(stdout);
AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
space->Print();
}
}
@@ -4725,7 +4847,7 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
void Heap::VerifyRememberedSetFor(HeapObject* object) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- base::LockGuard<base::RecursiveMutex> lock_guard(chunk->mutex());
+ base::LockGuard<base::Mutex> lock_guard(chunk->mutex());
Address start = object->address();
Address end = start + object->Size();
std::set<Address> old_to_new;
@@ -4853,10 +4975,6 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
&roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
- // The serializer/deserializer iterates the root list twice, first to pick
- // off immortal immovable roots to make sure they end up on the first page,
- // and then again for the rest.
- if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
isolate_->bootstrapper()->Iterate(v);
v->Synchronize(VisitorSynchronization::kBootstrapper);
@@ -4889,11 +5007,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over global handles.
switch (mode) {
- case VISIT_ONLY_STRONG_ROOT_LIST:
- UNREACHABLE();
- break;
case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
- break;
case VISIT_ONLY_STRONG:
isolate_->global_handles()->IterateStrongRoots(v);
break;
@@ -5086,7 +5200,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
if (take_snapshot) {
HeapIterator iterator(this);
- for (HeapObject* obj = iterator.next(); obj != NULL;
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
InstanceType type = obj->map()->instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
@@ -5094,9 +5208,9 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
stats->size_per_type[type] += obj->Size();
}
}
- if (stats->last_few_messages != NULL)
+ if (stats->last_few_messages != nullptr)
GetFromRingBuffer(stats->last_few_messages);
- if (stats->js_stacktrace != NULL) {
+ if (stats->js_stacktrace != nullptr) {
FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
if (gc_state() == Heap::NOT_IN_GC) {
@@ -5346,7 +5460,31 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
// start marking immediately.
return IncrementalMarkingLimit::kHardLimit;
}
+
+ if (FLAG_stress_marking > 0) {
+ double gained_since_last_gc =
+ PromotedSinceLastGC() +
+ (external_memory_ - external_memory_at_last_mark_compact_);
+ double size_before_gc = PromotedTotalSize() - gained_since_last_gc;
+ double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
+ if (bytes_to_limit > 0) {
+ double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
+
+ if (FLAG_trace_incremental_marking) {
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
+ current_percent);
+ }
+
+ if (static_cast<int>(current_percent) >= stress_marking_percentage_) {
+ stress_marking_percentage_ = NextStressMarkingLimit();
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ }
+ }
+
size_t old_generation_space_available = OldGenerationSpaceAvailable();
+
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
@@ -5380,7 +5518,8 @@ void Heap::DisableInlineAllocation() {
// Update inline allocation limit for old spaces.
PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != NULL;
+ CodeSpaceMemoryModificationScope modification_scope(this);
+ for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
space->EmptyAllocationInfo();
}
@@ -5404,7 +5543,7 @@ bool Heap::SetUp() {
}
mmap_region_base_ =
- reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.
@@ -5414,10 +5553,9 @@ bool Heap::SetUp() {
store_buffer_ = new StoreBuffer(this);
mark_compact_collector_ = new MarkCompactCollector(this);
- incremental_marking_ = new IncrementalMarking(this);
+ incremental_marking_ =
+ new IncrementalMarking(this, mark_compact_collector_->marking_worklist());
- incremental_marking_->set_marking_worklist(
- mark_compact_collector_->marking_worklist());
if (FLAG_concurrent_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
@@ -5465,6 +5603,7 @@ bool Heap::SetUp() {
tracer_ = new GCTracer(this);
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
+ array_buffer_collector_ = new ArrayBufferCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
if (V8_UNLIKELY(FLAG_gc_stats)) {
@@ -5491,6 +5630,12 @@ bool Heap::SetUp() {
SetGetExternallyAllocatedMemoryInBytesCallback(
DefaultGetExternallyAllocatedMemoryInBytesCallback);
+ if (FLAG_stress_marking > 0) {
+ stress_marking_percentage_ = NextStressMarkingLimit();
+ }
+
+ write_protect_code_memory_ = FLAG_write_protect_code_memory;
+
return true;
}
@@ -5504,7 +5649,7 @@ void Heap::InitializeHashSeed() {
}
void Heap::SetStackLimits() {
- DCHECK(isolate_ != NULL);
+ DCHECK_NOT_NULL(isolate_);
DCHECK(isolate_ == isolate());
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
@@ -5527,10 +5672,13 @@ void Heap::PrintAllocationsHash() {
PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
}
+int Heap::NextStressMarkingLimit() {
+ return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
+}
void Heap::NotifyDeserializationComplete() {
PagedSpaces spaces(this);
- for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
+ for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
@@ -5606,6 +5754,11 @@ void Heap::TearDown() {
minor_mark_compact_collector_ = nullptr;
}
+ if (array_buffer_collector_ != nullptr) {
+ delete array_buffer_collector_;
+ array_buffer_collector_ = nullptr;
+ }
+
delete incremental_marking_;
incremental_marking_ = nullptr;
@@ -5648,37 +5801,37 @@ void Heap::TearDown() {
delete new_space_;
new_space_ = nullptr;
- if (old_space_ != NULL) {
+ if (old_space_ != nullptr) {
delete old_space_;
- old_space_ = NULL;
+ old_space_ = nullptr;
}
- if (code_space_ != NULL) {
+ if (code_space_ != nullptr) {
delete code_space_;
- code_space_ = NULL;
+ code_space_ = nullptr;
}
- if (map_space_ != NULL) {
+ if (map_space_ != nullptr) {
delete map_space_;
- map_space_ = NULL;
+ map_space_ = nullptr;
}
- if (lo_space_ != NULL) {
+ if (lo_space_ != nullptr) {
lo_space_->TearDown();
delete lo_space_;
- lo_space_ = NULL;
+ lo_space_ = nullptr;
}
store_buffer()->TearDown();
memory_allocator()->TearDown();
- StrongRootsList* next = NULL;
+ StrongRootsList* next = nullptr;
for (StrongRootsList* list = strong_roots_list_; list; list = next) {
next = list->next;
delete list;
}
- strong_roots_list_ = NULL;
+ strong_roots_list_ = nullptr;
delete store_buffer_;
store_buffer_ = nullptr;
@@ -5776,7 +5929,7 @@ void CompactWeakFixedArray(Object* object) {
void Heap::CompactWeakFixedArrays() {
// Find known WeakFixedArrays and compact them.
HeapIterator iterator(this);
- for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
+ for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
if (o->IsPrototypeInfo()) {
Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
if (prototype_users->IsWeakFixedArray()) {
@@ -5946,7 +6099,7 @@ Space* AllSpaces::next() {
case LO_SPACE:
return heap_->lo_space();
default:
- return NULL;
+ return nullptr;
}
}
@@ -5959,7 +6112,7 @@ PagedSpace* PagedSpaces::next() {
case MAP_SPACE:
return heap_->map_space();
default:
- return NULL;
+ return nullptr;
}
}
@@ -5971,7 +6124,7 @@ OldSpace* OldSpaces::next() {
case CODE_SPACE:
return heap_->code_space();
default:
- return NULL;
+ return nullptr;
}
}
@@ -6247,7 +6400,7 @@ void Heap::RegisterStrongRoots(Object** start, Object** end) {
void Heap::UnregisterStrongRoots(Object** start) {
- StrongRootsList* prev = NULL;
+ StrongRootsList* prev = nullptr;
StrongRootsList* list = strong_roots_list_;
while (list != nullptr) {
StrongRootsList* next = list->next;
@@ -6265,6 +6418,23 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
+bool Heap::IsDeserializeLazyHandler(Code* code) {
+ return (code == deserialize_lazy_handler() ||
+ code == deserialize_lazy_handler_wide() ||
+ code == deserialize_lazy_handler_extra_wide());
+}
+
+void Heap::SetDeserializeLazyHandler(Code* code) {
+ set_deserialize_lazy_handler(code);
+}
+
+void Heap::SetDeserializeLazyHandlerWide(Code* code) {
+ set_deserialize_lazy_handler_wide(code);
+}
+
+void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
+ set_deserialize_lazy_handler_extra_wide(code);
+}
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;
@@ -6330,7 +6500,7 @@ const char* AllocationSpaceName(AllocationSpace space) {
default:
UNREACHABLE();
}
- return NULL;
+ return nullptr;
}
void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
@@ -6404,5 +6574,70 @@ void Heap::CreateObjectStats() {
}
}
+namespace {
+
+Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
+ MapWord map_word = object->map_word();
+ return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
+ : map_word.ToMap();
+}
+
+int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
+ return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
+}
+
+Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
+ Code* code = reinterpret_cast<Code*>(object);
+ DCHECK_NOT_NULL(code);
+ DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
+ return code;
+}
+
+} // namespace
+
+bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
+ Map* map = GcSafeMapOfCodeSpaceObject(code);
+ DCHECK(map == code->GetHeap()->code_map());
+ Address start = code->address();
+ Address end = code->address() + code->SizeFromMap(map);
+ return start <= addr && addr < end;
+}
+
+Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
+ // Check if the inner pointer points into a large object chunk.
+ LargePage* large_page = lo_space()->FindPage(inner_pointer);
+ if (large_page != nullptr) {
+ return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
+ }
+
+ if (!code_space()->Contains(inner_pointer)) {
+ return nullptr;
+ }
+
+ // Iterate through the page until we reach the end or find an object starting
+ // after the inner pointer.
+ Page* page = Page::FromAddress(inner_pointer);
+ DCHECK_EQ(page->owner(), code_space());
+ mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(page);
+
+ Address addr = page->skip_list()->StartFor(inner_pointer);
+ Address top = code_space()->top();
+ Address limit = code_space()->limit();
+
+ while (true) {
+ if (addr == top && addr != limit) {
+ addr = limit;
+ continue;
+ }
+
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
+ Address next_addr = addr + obj_size;
+ if (next_addr > inner_pointer)
+ return GcSafeCastToCode(this, obj, inner_pointer);
+ addr = next_addr;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 687be8a3db..7048d01e25 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -13,12 +13,14 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
#include "include/v8.h"
+#include "src/accessors.h"
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/base/atomic-utils.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
#include "src/objects.h"
+#include "src/objects/code.h"
#include "src/objects/hash-table.h"
#include "src/objects/string-table.h"
#include "src/visitors.h"
@@ -36,6 +38,12 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
+class BytecodeArray;
+class CodeDataContainer;
+class DeoptimizationData;
+class HandlerTable;
+class JSArrayBuffer;
+
using v8::MemoryPressureLevel;
// Defines all the roots in Heap.
@@ -96,13 +104,20 @@ using v8::MemoryPressureLevel;
V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
V(Map, script_context_table_map, ScriptContextTableMap) \
/* Maps */ \
+ V(Map, descriptor_array_map, DescriptorArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
- V(Map, ordered_hash_table_map, OrderedHashTableMap) \
- V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap) \
+ V(Map, ordered_hash_map_map, OrderedHashMapMap) \
+ V(Map, ordered_hash_set_map, OrderedHashSetMap) \
+ V(Map, name_dictionary_map, NameDictionaryMap) \
+ V(Map, global_dictionary_map, GlobalDictionaryMap) \
+ V(Map, number_dictionary_map, NumberDictionaryMap) \
+ V(Map, string_table_map, StringTableMap) \
+ V(Map, weak_hash_table_map, WeakHashTableMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
+ V(Map, code_data_container_map, CodeDataContainerMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, external_map, ExternalMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
@@ -179,15 +194,16 @@ using v8::MemoryPressureLevel;
V(Script, empty_script, EmptyScript) \
V(Cell, undefined_cell, UndefinedCell) \
V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
- V(SeededNumberDictionary, empty_slow_element_dictionary, \
+ V(NumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
- V(FixedArray, empty_ordered_hash_table, EmptyOrderedHashTable) \
+ V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap) \
+ V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
/* Protectors */ \
V(Cell, array_constructor_protector, ArrayConstructorProtector) \
- V(PropertyCell, array_protector, ArrayProtector) \
+ V(PropertyCell, no_elements_protector, NoElementsProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
V(PropertyCell, species_protector, SpeciesProtector) \
V(Cell, string_length_protector, StringLengthProtector) \
@@ -212,7 +228,7 @@ using v8::MemoryPressureLevel;
V(NameDictionary, api_symbol_table, ApiSymbolTable) \
V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
V(Object, script_list, ScriptList) \
- V(UnseededNumberDictionary, code_stubs, CodeStubs) \
+ V(NumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
V(FixedArray, detached_contexts, DetachedContexts) \
@@ -224,13 +240,19 @@ using v8::MemoryPressureLevel;
/* slots refer to the code with the reference to the weak object. */ \
V(ArrayList, weak_new_space_object_to_code_list, \
WeakNewSpaceObjectToCodeList) \
- /* List to hold onto feedback vectors that we need for code coverage */ \
- V(Object, code_coverage_list, CodeCoverageList) \
+ /* Feedback vectors that we need for code coverage or type profile */ \
+ V(Object, feedback_vectors_for_profiling_tools, \
+ FeedbackVectorsForProfilingTools) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, serialized_templates, SerializedTemplates) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
+ /* DeserializeLazy handlers for lazy bytecode deserialization */ \
+ V(Object, deserialize_lazy_handler, DeserializeLazyHandler) \
+ V(Object, deserialize_lazy_handler_wide, DeserializeLazyHandlerWide) \
+ V(Object, deserialize_lazy_handler_extra_wide, \
+ DeserializeLazyHandlerExtraWide) \
/* JS Entries */ \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode)
@@ -249,8 +271,6 @@ using v8::MemoryPressureLevel;
ConstructStubCreateDeoptPCOffset) \
V(Smi, construct_stub_invoke_deopt_pc_offset, \
ConstructStubInvokeDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
#define ROOT_LIST(V) \
@@ -266,7 +286,7 @@ using v8::MemoryPressureLevel;
V(ArgumentsMarkerMap) \
V(ArrayBufferNeuteringProtector) \
V(ArrayIteratorProtector) \
- V(ArrayProtector) \
+ V(NoElementsProtector) \
V(BigIntMap) \
V(BlockContextMap) \
V(BooleanMap) \
@@ -275,6 +295,7 @@ using v8::MemoryPressureLevel;
V(CatchContextMap) \
V(CellMap) \
V(CodeMap) \
+ V(DescriptorArrayMap) \
V(EmptyByteArray) \
V(EmptyDescriptorArray) \
V(EmptyFixedArray) \
@@ -287,6 +308,8 @@ using v8::MemoryPressureLevel;
V(EmptyFixedUint32Array) \
V(EmptyFixedUint8Array) \
V(EmptyFixedUint8ClampedArray) \
+ V(EmptyOrderedHashMap) \
+ V(EmptyOrderedHashSet) \
V(EmptyPropertyCell) \
V(EmptyScopeInfo) \
V(EmptyScript) \
@@ -304,6 +327,7 @@ using v8::MemoryPressureLevel;
V(ForeignMap) \
V(FreeSpaceMap) \
V(FunctionContextMap) \
+ V(GlobalDictionaryMap) \
V(GlobalPropertyCellMap) \
V(HashTableMap) \
V(HeapNumberMap) \
@@ -320,25 +344,29 @@ using v8::MemoryPressureLevel;
V(ModuleContextMap) \
V(ModuleInfoMap) \
V(MutableHeapNumberMap) \
+ V(NameDictionaryMap) \
V(NanValue) \
V(NativeContextMap) \
V(NoClosuresCellMap) \
V(NullMap) \
V(NullValue) \
+ V(NumberDictionaryMap) \
V(OneClosureCellMap) \
V(OnePointerFillerMap) \
V(OptimizedOut) \
- V(OrderedHashTableMap) \
+ V(OrderedHashMapMap) \
+ V(OrderedHashSetMap) \
V(PropertyArrayMap) \
- V(SmallOrderedHashMapMap) \
- V(SmallOrderedHashSetMap) \
V(ScopeInfoMap) \
V(ScriptContextMap) \
V(SharedFunctionInfoMap) \
V(SloppyArgumentsElementsMap) \
+ V(SmallOrderedHashMapMap) \
+ V(SmallOrderedHashSetMap) \
V(SpeciesProtector) \
V(StaleRegister) \
V(StringLengthProtector) \
+ V(StringTableMap) \
V(SymbolMap) \
V(TerminationException) \
V(TheHoleMap) \
@@ -352,6 +380,7 @@ using v8::MemoryPressureLevel;
V(UninitializedMap) \
V(UninitializedValue) \
V(WeakCellMap) \
+ V(WeakHashTableMap) \
V(WithContextMap) \
PRIVATE_SYMBOL_LIST(V)
@@ -362,6 +391,7 @@ using v8::MemoryPressureLevel;
} while (false)
class AllocationObserver;
+class ArrayBufferCollector;
class ArrayBufferTracker;
class ConcurrentMarking;
class GCIdleTimeAction;
@@ -397,6 +427,12 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
+enum class FixedArrayVisitationMode { kRegular, kIncremental };
+
+enum class TraceRetainingPathMode { kEnabled, kDisabled };
+
+enum class RetainingPathOption { kDefault, kTrackEphemeralPath };
+
enum class GarbageCollectionReason {
kUnknown = 0,
kAllocationFailure = 1,
@@ -478,7 +514,7 @@ struct CommentStatistic {
int size;
int count;
void Clear() {
- comment = NULL;
+ comment = nullptr;
size = 0;
count = 0;
}
@@ -541,11 +577,16 @@ class Heap {
WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_INDEX_DECLARATION
+#define ACCESSOR_INDEX_DECLARATION(accessor_name, AccessorName) \
+ k##AccessorName##AccessorRootIndex,
+ ACCESSOR_INFO_LIST(ACCESSOR_INDEX_DECLARATION)
+#undef ACCESSOR_INDEX_DECLARATION
+
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
- kStringTableRootIndex,
+ kStringTableRootIndex,
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -770,13 +811,27 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
+ bool write_protect_code_memory() const { return write_protect_code_memory_; }
+
+ uintptr_t code_space_memory_modification_scope_depth() {
+ return code_space_memory_modification_scope_depth_;
+ }
+
+ void increment_code_space_memory_modification_scope_depth() {
+ code_space_memory_modification_scope_depth_++;
+ }
+
+ void decrement_code_space_memory_modification_scope_depth() {
+ code_space_memory_modification_scope_depth_--;
+ }
+
inline HeapState gc_state() { return gc_state_; }
void SetGCState(HeapState state);
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
// If an object has an AllocationMemento trailing it, return it, otherwise
- // return NULL;
+ // return nullptr;
template <FindMementoMode mode>
inline AllocationMemento* FindAllocationMemento(Map* map, HeapObject* object);
@@ -787,7 +842,7 @@ class Heap {
// Support for the API.
//
- bool CreateApiObjects();
+ void CreateApiObjects();
// Implements the corresponding V8 API function.
bool IdleNotification(double deadline_in_seconds);
@@ -813,8 +868,6 @@ class Heap {
// scavenge operation.
inline bool ShouldBePromoted(Address old_address);
- void ClearNormalizedMapCaches();
-
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
inline uint32_t HashSeed();
@@ -822,13 +875,6 @@ class Heap {
inline int NextScriptId();
inline int GetNextTemplateSerialNumber();
- void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
- void SetConstructStubCreateDeoptPCOffset(int pc_offset);
- void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
- void SetGetterStubDeoptPCOffset(int pc_offset);
- void SetSetterStubDeoptPCOffset(int pc_offset);
- void SetInterpreterEntryReturnPCOffset(int pc_offset);
-
void SetSerializedTemplates(FixedArray* templates);
void SetSerializedGlobalProxySizes(FixedArray* sizes);
@@ -849,10 +895,6 @@ class Heap {
external_memory_concurrently_freed_.SetValue(0);
}
- void DeoptMarkedAllocationSites();
-
- bool DeoptMaybeTenuredAllocationSites();
-
void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
Handle<WeakCell> code);
@@ -985,6 +1027,10 @@ class Heap {
return minor_mark_compact_collector_;
}
+ ArrayBufferCollector* array_buffer_collector() {
+ return array_buffer_collector_;
+ }
+
// ===========================================================================
// Root set access. ==========================================================
// ===========================================================================
@@ -1012,6 +1058,11 @@ class Heap {
WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
+#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
+ inline AccessorInfo* accessor_name##_accessor();
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
+#undef ACCESSOR_INFO_ACCESSOR
+
Object* root(RootListIndex index) { return roots_[index]; }
Handle<Object> root_handle(RootListIndex index) {
return Handle<Object>(&roots_[index]);
@@ -1029,7 +1080,7 @@ class Heap {
Object** roots_array_start() { return roots_; }
// Sets the stub_cache_ (only used when expanding the dictionary).
- void SetRootCodeStubs(UnseededNumberDictionary* value);
+ void SetRootCodeStubs(NumberDictionary* value);
void SetRootMaterializedObjects(FixedArray* objects) {
roots_[kMaterializedObjectsRootIndex] = objects;
@@ -1072,6 +1123,11 @@ class Heap {
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
+ bool IsDeserializeLazyHandler(Code* code);
+ void SetDeserializeLazyHandler(Code* code);
+ void SetDeserializeLazyHandlerWide(Code* code);
+ void SetDeserializeLazyHandlerExtraWide(Code* code);
+
// ===========================================================================
// Inline allocation. ========================================================
// ===========================================================================
@@ -1207,6 +1263,28 @@ class Heap {
#endif
// ===========================================================================
+ // Deoptimization support API. ===============================================
+ // ===========================================================================
+
+ // Setters for code offsets of well-known deoptimization targets.
+ void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
+ void SetConstructStubCreateDeoptPCOffset(int pc_offset);
+ void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
+ void SetInterpreterEntryReturnPCOffset(int pc_offset);
+
+ // Invalidates references in the given {code} object that are directly
+ // embedded within the instruction stream. Mutates write-protected code.
+ void InvalidateCodeEmbeddedObjects(Code* code);
+
+ // Invalidates references in the given {code} object that are referenced
+ // transitively from the deoptimization data. Mutates write-protected code.
+ void InvalidateCodeDeoptimizationData(Code* code);
+
+ void DeoptMarkedAllocationSites();
+
+ bool DeoptMaybeTenuredAllocationSites();
+
+ // ===========================================================================
// Embedder heap tracer support. =============================================
// ===========================================================================
@@ -1499,7 +1577,20 @@ class Heap {
// Adds the given object to the weak table of retaining path targets.
// On each GC if the marker discovers the object, it will print the retaining
// path. This requires --track-retaining-path flag.
- void AddRetainingPathTarget(Handle<HeapObject> object);
+ void AddRetainingPathTarget(Handle<HeapObject> object,
+ RetainingPathOption option);
+
+ // ===========================================================================
+ // Stack frame support. ======================================================
+ // ===========================================================================
+
+ // Returns the Code object for a given interior pointer. Returns nullptr if
+ // {inner_pointer} is not contained within a Code object.
+ Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
+
+ // Returns true if {addr} is contained within {code} and false otherwise.
+ // Mostly useful for debugging.
+ bool GcSafeCodeContains(HeapObject* code, Address addr);
// =============================================================================
#ifdef VERIFY_HEAP
@@ -1522,7 +1613,7 @@ class Heap {
void ReportCodeStatistics(const char* title);
#endif
void* GetRandomMmapAddr() {
- void* result = v8::internal::GetRandomMmapAddr();
+ void* result = base::OS::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
#if V8_OS_MACOSX
// The Darwin kernel [as of macOS 10.12.5] does not clean up page
@@ -1533,7 +1624,7 @@ class Heap {
// killed. Confine the hint to a 32-bit section of the virtual address
// space. See crbug.com/700928.
uintptr_t offset =
- reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
#endif // V8_OS_MACOSX
@@ -1546,7 +1637,6 @@ class Heap {
private:
class SkipStoreBufferScope;
- class PretenuringScope;
typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
Object** pointer);
@@ -1642,11 +1732,7 @@ class Heap {
static const int kInitialFeedbackCapacity = 256;
-#ifdef V8_TARGET_ARCH_ARM
- static const int kMaxScavengerTasks = 2;
-#else
static const int kMaxScavengerTasks = 8;
-#endif
Heap();
@@ -1723,6 +1809,7 @@ class Heap {
AllocationSite* allocation_site);
bool CreateInitialMaps();
+ void CreateInternalAccessorInfoObjects();
void CreateInitialObjects();
// These five Create*EntryStub functions are here and forced to not be inlined
@@ -1782,6 +1869,8 @@ class Heap {
inline void UpdateAllocationsHash(uint32_t value);
void PrintAllocationsHash();
+ int NextStressMarkingLimit();
+
void AddToRingBuffer(const char* string);
void GetFromRingBuffer(char* buffer);
@@ -1918,10 +2007,7 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
- bool CanExpandOldGeneration(size_t size) {
- if (force_oom_) return false;
- return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
- }
+ bool CanExpandOldGeneration(size_t size);
bool IsCloseToOutOfMemory(size_t slack) {
return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
@@ -1957,12 +2043,13 @@ class Heap {
// Properties and elements are copied too.
// Optionally takes an AllocationSite to be appended in an AllocationMemento.
MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
- AllocationSite* site = NULL);
+ AllocationSite* site = nullptr);
// Allocates a JS Map in the heap.
MUST_USE_RESULT AllocationResult
AllocateMap(InstanceType instance_type, int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ int inobject_properties = 0);
// Allocates and initializes a new JavaScript object based on a
// constructor.
@@ -1970,22 +2057,20 @@ class Heap {
// that points to the site.
MUST_USE_RESULT AllocationResult AllocateJSObject(
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = NULL);
+ AllocationSite* allocation_site = nullptr);
// Allocates and initializes a new JavaScript object based on a map.
// Passing an allocation site means that a memento will be created that
// points to the site.
MUST_USE_RESULT AllocationResult
AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = NULL);
+ AllocationSite* allocation_site = nullptr);
// Allocates a HeapNumber from value.
MUST_USE_RESULT AllocationResult AllocateHeapNumber(
MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT AllocationResult AllocateBigInt(int length,
- bool zero_initialize,
- PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult AllocateBigInt(int length);
// Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
@@ -1996,11 +2081,18 @@ class Heap {
AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
int parameter_count, FixedArray* constant_pool);
- MUST_USE_RESULT AllocationResult CopyCode(Code* code);
+ MUST_USE_RESULT AllocationResult CopyCode(Code* code,
+ CodeDataContainer* data_container);
MUST_USE_RESULT AllocationResult
CopyBytecodeArray(BytecodeArray* bytecode_array);
+ // Allocates a fixed array-like object with given map and initialized with
+ // undefined values.
+ MUST_USE_RESULT inline AllocationResult AllocateFixedArrayWithMap(
+ RootListIndex map_root_index, int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
// Allocates a fixed array initialized with undefined values
MUST_USE_RESULT inline AllocationResult AllocateFixedArray(
int length, PretenureFlag pretenure = NOT_TENURED);
@@ -2033,8 +2125,8 @@ class Heap {
// Allocates a heap object based on the map.
MUST_USE_RESULT AllocationResult
- Allocate(Map* map, AllocationSpace space,
- AllocationSite* allocation_site = NULL);
+ Allocate(Map* map, AllocationSpace space,
+ AllocationSite* allocation_site = nullptr);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT AllocationResult
@@ -2055,8 +2147,8 @@ class Heap {
// Allocate an initialized fixed array with the given filler value.
MUST_USE_RESULT AllocationResult
- AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
- Object* filler);
+ AllocateFixedArrayWithFiller(RootListIndex map_root_index, int length,
+ PretenureFlag pretenure, Object* filler);
// Allocate and partially initializes a String. There are two String
// encodings: one-byte and two-byte. These functions allocate a string of
@@ -2182,8 +2274,20 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
+ // Allocates a new code object (mostly uninitialized). Can only be used when
+ // code space is unprotected and requires manual initialization by the caller.
+ MUST_USE_RESULT AllocationResult AllocateCode(int object_size,
+ Movability movability);
+
+ // Allocates a new code object (fully initialized). All header fields of the
+ // returned object are immutable and the code object is write protected.
MUST_USE_RESULT AllocationResult
- AllocateCode(int object_size, bool immovable);
+ AllocateCode(const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
+ int32_t builtin_index, ByteArray* reloc_info,
+ CodeDataContainer* data_container, HandlerTable* handler_table,
+ ByteArray* source_position_table, DeoptimizationData* deopt_data,
+ Movability movability, uint32_t stub_key, bool is_turbofanned,
+ int stack_slots, int safepoint_table_offset);
void set_force_oom(bool value) { force_oom_ = value; }
@@ -2192,9 +2296,12 @@ class Heap {
// ===========================================================================
void AddRetainer(HeapObject* retainer, HeapObject* object);
+ void AddEphemeralRetainer(HeapObject* retainer, HeapObject* object);
void AddRetainingRoot(Root root, HeapObject* object);
- bool IsRetainingPathTarget(HeapObject* object);
- void PrintRetainingPath(HeapObject* object);
+ // Returns true if the given object is a target of retaining path tracking.
+ // Stores the option corresponding to the object in the provided *option.
+ bool IsRetainingPathTarget(HeapObject* object, RetainingPathOption* option);
+ void PrintRetainingPath(HeapObject* object, RetainingPathOption option);
// The amount of external memory registered through the API.
int64_t external_memory_;
@@ -2256,6 +2363,14 @@ class Heap {
LargeObjectSpace* lo_space_;
// Map from the space id to the space.
Space* space_[LAST_SPACE + 1];
+
+ // Determines whether code space is write-protected. This is essentially a
+ // race-free copy of the {FLAG_write_protect_code_memory} flag.
+ bool write_protect_code_memory_;
+
+ // Holds the number of open CodeSpaceMemoryModificationScopes.
+ uintptr_t code_space_memory_modification_scope_depth_;
+
HeapState gc_state_;
int gc_post_processing_depth_;
@@ -2268,6 +2383,10 @@ class Heap {
// Running hash over allocations performed.
uint32_t raw_allocations_hash_;
+ // Starts marking when stress_marking_percentage_% of the marking start limit
+ // is reached.
+ int stress_marking_percentage_;
+
// How many mark-sweep collections happened.
unsigned int ms_count_;
@@ -2345,6 +2464,8 @@ class Heap {
MarkCompactCollector* mark_compact_collector_;
MinorMarkCompactCollector* minor_mark_compact_collector_;
+ ArrayBufferCollector* array_buffer_collector_;
+
MemoryAllocator* memory_allocator_;
StoreBuffer* store_buffer_;
@@ -2433,6 +2554,12 @@ class Heap {
std::map<HeapObject*, HeapObject*> retainer_;
std::map<HeapObject*, Root> retaining_root_;
+ // If an object is retained by an ephemeron, then the retaining key of the
+ // ephemeron is stored in this map.
+ std::map<HeapObject*, HeapObject*> ephemeral_retainer_;
+ // For each index inthe retaining_path_targets_ array this map
+ // stores the option of the corresponding target.
+ std::map<int, RetainingPathOption> retaining_path_target_option_;
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
@@ -2440,22 +2567,23 @@ class Heap {
friend class GCCallbacksScope;
friend class GCTracer;
friend class HeapIterator;
- template <typename ConcreteVisitor>
- friend class MarkingVisitor;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class LargeObjectSpace;
+ template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+ friend class MarkingVisitor;
friend class MarkCompactCollector;
friend class MarkCompactCollectorBase;
friend class MinorMarkCompactCollector;
- friend class MarkCompactMarkingVisitor;
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpace;
friend class Scavenger;
friend class StoreBuffer;
+ friend class Sweeper;
friend class heap::TestMemoryAllocatorScope;
// The allocator interface.
@@ -2513,6 +2641,28 @@ class AlwaysAllocateScope {
Heap* heap_;
};
+class CodeSpaceMemoryModificationScope {
+ public:
+ explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
+ inline ~CodeSpaceMemoryModificationScope();
+
+ private:
+ Heap* heap_;
+};
+
+class CodePageMemoryModificationScope {
+ public:
+ explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
+ inline ~CodePageMemoryModificationScope();
+
+ private:
+ MemoryChunk* chunk_;
+ bool scope_active_;
+
+ // Disallow any GCs inside this scope, as a relocation of the underlying
+ // object would change the {MemoryChunk} that this scope targets.
+ DisallowHeapAllocation no_heap_allocation_;
+};
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
@@ -2631,7 +2781,7 @@ class WeakObjectRetainer {
public:
virtual ~WeakObjectRetainer() {}
- // Return whether this object should be retained. If NULL is returned the
+ // Return whether this object should be retained. If nullptr is returned the
// object has no references. Otherwise the address of the retained object
// should be returned as in some GC situations the object has been moved.
virtual Object* RetainAs(Object* object) = 0;
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 44777bbafa..8acbd31ec7 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -35,7 +35,7 @@ void IncrementalMarkingJob::Task::Step(Heap* heap) {
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
heap->incremental_marking()->AdvanceIncrementalMarking(
deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- i::IncrementalMarking::FORCE_COMPLETION, i::StepOrigin::kTask);
+ i::StepOrigin::kTask);
heap->FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index b286289254..a046dff4b0 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -15,6 +15,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/sweeper.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
#include "src/visitors.h"
@@ -23,6 +24,11 @@
namespace v8 {
namespace internal {
+using IncrementalMarkingMarkingVisitor =
+ MarkingVisitor<FixedArrayVisitationMode::kIncremental,
+ TraceRetainingPathMode::kDisabled,
+ IncrementalMarking::MarkingState>;
+
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
Heap* heap = incremental_marking_.heap();
@@ -45,14 +51,14 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
}
}
-IncrementalMarking::IncrementalMarking(Heap* heap)
+IncrementalMarking::IncrementalMarking(
+ Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist)
: heap_(heap),
- marking_worklist_(nullptr),
+ marking_worklist_(marking_worklist),
initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0),
+ bytes_marked_concurrently_(0),
unscanned_bytes_of_large_object_(0),
- idle_marking_delay_counter_(0),
- incremental_marking_finalization_rounds_(0),
is_compacting_(false),
should_hurry_(false),
was_activated_(false),
@@ -62,6 +68,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
request_type_(NONE),
new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
+ DCHECK_NOT_NULL(marking_worklist_);
SetState(STOPPED);
}
@@ -86,7 +93,7 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
Object* value) {
- if (BaseRecordWrite(obj, value) && slot != NULL) {
+ if (BaseRecordWrite(obj, value) && slot != nullptr) {
// Object is not going to be rescanned we need to record the slot.
heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
}
@@ -189,112 +196,6 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
}
}
-class IncrementalMarkingMarkingVisitor final
- : public MarkingVisitor<IncrementalMarkingMarkingVisitor> {
- public:
- typedef MarkingVisitor<IncrementalMarkingMarkingVisitor> Parent;
-
- static const int kProgressBarScanningChunk = 32 * 1024;
-
- explicit IncrementalMarkingMarkingVisitor(MarkCompactCollector* collector)
- : MarkingVisitor<IncrementalMarkingMarkingVisitor>(collector->heap(),
- collector),
- incremental_marking_(collector->heap()->incremental_marking()) {}
-
- V8_INLINE int VisitFixedArray(Map* map, FixedArray* object) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- DCHECK(!FLAG_use_marking_progress_bar ||
- chunk->owner()->identity() == LO_SPACE);
- // When using a progress bar for large fixed arrays, scan only a chunk of
- // the array and try to push it onto the marking deque again until it is
- // fully scanned. Fall back to scanning it through to the end in case this
- // fails because of a full deque.
- int start_offset =
- Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
- if (start_offset < object_size) {
- // Ensure that the object is either grey or black before pushing it
- // into marking worklist.
- incremental_marking_->marking_state()->WhiteToGrey(object);
- if (FLAG_concurrent_marking) {
- incremental_marking_->marking_worklist()->PushBailout(object);
- } else {
- incremental_marking_->marking_worklist()->Push(object);
- }
- DCHECK(incremental_marking_->marking_state()->IsGrey(object) ||
- incremental_marking_->marking_state()->IsBlack(object));
-
- int end_offset =
- Min(object_size, start_offset + kProgressBarScanningChunk);
- int already_scanned_offset = start_offset;
- VisitPointers(object, HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- start_offset = end_offset;
- end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- chunk->set_progress_bar(start_offset);
- if (start_offset < object_size) {
- incremental_marking_->NotifyIncompleteScanOfObject(
- object_size - (start_offset - already_scanned_offset));
- }
- }
- } else {
- FixedArray::BodyDescriptor::IterateBody(object, object_size, this);
- }
- return object_size;
- }
-
- V8_INLINE int VisitNativeContext(Map* map, Context* context) {
- // We will mark cache black with a separate pass when we finish marking.
- // Note that GC can happen when the context is not fully initialized,
- // so the cache can be undefined.
- Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
- if (!cache->IsUndefined(map->GetIsolate())) {
- if (cache->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(cache);
- // Mark the object grey if it is white, do not enque it into the marking
- // deque.
- incremental_marking_->marking_state()->WhiteToGrey(heap_obj);
- }
- }
- return Parent::VisitNativeContext(map, context);
- }
-
- V8_INLINE void VisitPointer(HeapObject* host, Object** p) final {
- Object* target = *p;
- if (target->IsHeapObject()) {
- collector_->RecordSlot(host, p, target);
- MarkObject(host, target);
- }
- }
-
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
- Object* target = *p;
- if (target->IsHeapObject()) {
- collector_->RecordSlot(host, p, target);
- MarkObject(host, target);
- }
- }
- }
-
- // Marks the object grey and pushes it on the marking stack.
- V8_INLINE void MarkObject(HeapObject* host, Object* obj) {
- incremental_marking_->WhiteToGreyAndPush(HeapObject::cast(obj));
- }
-
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, Object* obj) {
- HeapObject* heap_object = HeapObject::cast(obj);
- return incremental_marking_->marking_state()->WhiteToBlack(heap_object);
- }
-
- private:
- IncrementalMarking* const incremental_marking_;
-};
-
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
public:
explicit IncrementalMarkingRootMarkingVisitor(
@@ -411,45 +312,8 @@ bool IncrementalMarking::CanBeActivated() {
}
-void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
- DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
-
- if (!IsMarking()) {
- // Initially stub is generated in STORE_BUFFER_ONLY mode thus
- // we don't need to do anything if incremental marking is
- // not active.
- } else if (IsCompacting()) {
- RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
- } else {
- RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
- }
-}
-
-static void PatchIncrementalMarkingRecordWriteStubs(
- Heap* heap, RecordWriteStub::Mode mode) {
- UnseededNumberDictionary* stubs = heap->code_stubs();
-
- int capacity = stubs->Capacity();
- Isolate* isolate = heap->isolate();
- for (int i = 0; i < capacity; i++) {
- Object* k = stubs->KeyAt(i);
- if (stubs->IsKey(isolate, k)) {
- uint32_t key = NumberToUint32(k);
-
- if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
- Object* e = stubs->ValueAt(i);
- if (e->IsCode()) {
- RecordWriteStub::Patch(Code::cast(e), mode);
- }
- }
- }
- }
-}
-
void IncrementalMarking::Deactivate() {
DeactivateIncrementalWriteBarrier();
- PatchIncrementalMarkingRecordWriteStubs(heap_,
- RecordWriteStub::STORE_BUFFER_ONLY);
}
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
@@ -485,6 +349,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
bytes_allocated_ = 0;
bytes_marked_ahead_of_schedule_ = 0;
+ bytes_marked_concurrently_ = 0;
should_hurry_ = false;
was_activated_ = true;
@@ -539,12 +404,6 @@ void IncrementalMarking::StartMarking() {
heap_->local_embedder_heap_tracer()->TracePrologue();
}
- RecordWriteStub::Mode mode = is_compacting_
- ? RecordWriteStub::INCREMENTAL_COMPACTION
- : RecordWriteStub::INCREMENTAL;
-
- PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
-
ActivateIncrementalWriteBarrier();
// Marking bits are cleared by the sweeper.
@@ -708,34 +567,11 @@ void IncrementalMarking::FinalizeIncrementally() {
// do not need processing during GC.
MarkRoots();
- if (incremental_marking_finalization_rounds_ == 0) {
- // Map retaining is needed for perfromance, not correctness,
- // so we can do it only once at the beginning of the finalization.
- RetainMaps();
- }
+ // Map retaining is needed for perfromance, not correctness,
+ // so we can do it only once at the beginning of the finalization.
+ RetainMaps();
- int marking_progress =
- heap_->mark_compact_collector()->marking_worklist()->Size() +
- static_cast<int>(
- heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
-
- double end = heap_->MonotonicallyIncreasingTimeInMs();
- double delta = end - start;
- if (FLAG_trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Finalize incrementally round %d, "
- "spent %d ms, marking progress %d.\n",
- static_cast<int>(delta), incremental_marking_finalization_rounds_,
- marking_progress);
- }
-
- ++incremental_marking_finalization_rounds_;
- if ((incremental_marking_finalization_rounds_ >=
- FLAG_max_incremental_marking_finalization_rounds) ||
- (marking_progress <
- FLAG_min_progress_during_incremental_marking_finalization)) {
- finalize_marking_completed_ = true;
- }
+ finalize_marking_completed_ = true;
if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
!black_allocation_) {
@@ -743,6 +579,13 @@ void IncrementalMarking::FinalizeIncrementally() {
// progress.
StartBlackAllocation();
}
+
+ if (FLAG_trace_incremental_marking) {
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
+ double delta = end - start;
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
+ }
}
void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
@@ -802,6 +645,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
});
}
+void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
+ size_t dead_bytes_in_new_space) {
+ if (!IsMarking()) return;
+ bytes_marked_ahead_of_schedule_ -=
+ Min(bytes_marked_ahead_of_schedule_, dead_bytes_in_new_space);
+}
+
bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
if (!obj->IsFixedArray()) return false;
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
@@ -822,7 +672,8 @@ int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
}
DCHECK(marking_state()->IsBlack(obj));
WhiteToGreyAndPush(map);
- IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector());
+ IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
+ marking_state());
return visitor.Visit(map, obj);
}
@@ -841,15 +692,22 @@ void IncrementalMarking::RevisitObject(HeapObject* obj) {
}
Map* map = obj->map();
WhiteToGreyAndPush(map);
- IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector());
+ IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
+ marking_state());
visitor.Visit(map, obj);
}
+template <WorklistToProcess worklist_to_process>
intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
- HeapObject* obj = marking_worklist()->Pop();
+ HeapObject* obj;
+ if (worklist_to_process == WorklistToProcess::kBailout) {
+ obj = marking_worklist()->PopBailout();
+ } else {
+ obj = marking_worklist()->Pop();
+ }
if (obj == nullptr) break;
// Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects.
@@ -898,18 +756,6 @@ void IncrementalMarking::Hurry() {
}
}
}
-
- Object* context = heap_->native_contexts_list();
- while (!context->IsUndefined(heap_->isolate())) {
- // GC can happen when the context is not fully initialized,
- // so the cache can be undefined.
- HeapObject* cache = HeapObject::cast(
- Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
- if (!cache->IsUndefined(heap_->isolate())) {
- marking_state()->GreyToBlack(cache);
- }
- context = Context::cast(context)->next_context_link();
- }
}
@@ -987,12 +833,11 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
void IncrementalMarking::Epilogue() {
was_activated_ = false;
finalize_marking_completed_ = false;
- incremental_marking_finalization_rounds_ = 0;
}
double IncrementalMarking::AdvanceIncrementalMarking(
double deadline_in_ms, CompletionAction completion_action,
- ForceCompletionAction force_completion, StepOrigin step_origin) {
+ StepOrigin step_origin) {
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
@@ -1023,8 +868,7 @@ double IncrementalMarking::AdvanceIncrementalMarking(
DO_NOT_FORCE_COMPLETION));
}
} else {
- Step(step_size_in_bytes, completion_action, force_completion,
- step_origin);
+ Step(step_size_in_bytes, completion_action, step_origin);
}
trace_wrappers_toggle_ = !trace_wrappers_toggle_;
remaining_time_in_ms =
@@ -1039,7 +883,7 @@ void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(!FLAG_concurrent_sweeping ||
- !heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
+ !heap_->mark_compact_collector()->sweeper()->AreSweeperTasksRunning())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
@@ -1091,43 +935,50 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
kMaxStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size);
-
- if (FLAG_concurrent_marking && marking_worklist()->IsBailoutEmpty()) {
- // The number of background tasks + the main thread.
- size_t tasks = heap()->concurrent_marking()->TaskCount() + 1;
- bytes_to_process = Max(IncrementalMarking::kMinStepSizeInBytes,
- bytes_to_process / tasks);
- }
-
size_t bytes_processed = 0;
+ if (FLAG_concurrent_marking) {
+ bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
+ StepOrigin::kV8, WorklistToProcess::kBailout);
+ bytes_to_process = (bytes_processed >= bytes_to_process)
+ ? 0
+ : bytes_to_process - bytes_processed;
+ size_t current_bytes_marked_concurrently =
+ heap()->concurrent_marking()->TotalMarkedBytes();
+ // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
+ // short period of time when a concurrent marking task is finishing.
+ if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
+ bytes_marked_ahead_of_schedule_ +=
+ current_bytes_marked_concurrently - bytes_marked_concurrently_;
+ bytes_marked_concurrently_ = current_bytes_marked_concurrently;
+ }
+ }
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
- // Steps performed in tasks have put us ahead of schedule.
- // We skip processing of marking dequeue here and thus
- // shift marking time from inside V8 to standalone tasks.
+ // Steps performed in tasks and concurrently have put us ahead of
+ // schedule. We skip processing of marking dequeue here and thus shift
+ // marking time from inside V8 to standalone tasks.
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
- bytes_processed = bytes_to_process;
- } else {
- HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
- bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
- FORCE_COMPLETION, StepOrigin::kV8);
+ bytes_processed += bytes_to_process;
+ bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
}
+ bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD,
+ StepOrigin::kV8, WorklistToProcess::kAll);
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
}
size_t IncrementalMarking::Step(size_t bytes_to_process,
- CompletionAction action,
- ForceCompletionAction completion,
- StepOrigin step_origin) {
+ CompletionAction action, StepOrigin step_origin,
+ WorklistToProcess worklist_to_process) {
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
@@ -1149,7 +1000,15 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
FLAG_trace_gc_verbose) {
marking_worklist()->Print();
}
- bytes_processed = ProcessMarkingWorklist(bytes_to_process);
+
+ if (worklist_to_process == WorklistToProcess::kBailout) {
+ bytes_processed =
+ ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
+ } else {
+ bytes_processed =
+ ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
+ }
+
if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed;
}
@@ -1157,15 +1016,10 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
if (marking_worklist()->IsEmpty()) {
if (heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
- if (completion == FORCE_COMPLETION ||
- IsIdleMarkingDelayCounterLimitReached()) {
- if (!finalize_marking_completed_) {
- FinalizeMarking(action);
- } else {
- MarkingComplete(action);
- }
+ if (!finalize_marking_completed_) {
+ FinalizeMarking(action);
} else {
- IncrementIdleMarkingDelayCounter();
+ MarkingComplete(action);
}
} else {
heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
@@ -1196,20 +1050,5 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
return bytes_processed;
}
-
-bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
- return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
-}
-
-
-void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
- idle_marking_delay_counter_++;
-}
-
-
-void IncrementalMarking::ClearIdleMarkingDelayCounter() {
- idle_marking_delay_counter_ = 0;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 0579c9c676..87a1751fd9 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -20,28 +20,7 @@ class Object;
class PagedSpace;
enum class StepOrigin { kV8, kTask };
-
-// This marking state is used when concurrent marking is running.
-class IncrementalMarkingState final
- : public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
- public:
- Bitmap* bitmap(const MemoryChunk* chunk) const {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
- }
-
- // Concurrent marking uses local live bytes.
- void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_ += by;
- }
-
- intptr_t live_bytes(MemoryChunk* chunk) const {
- return chunk->live_byte_count_;
- }
-
- void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->live_byte_count_ = value;
- }
-};
+enum class WorklistToProcess { kAll, kBailout };
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
@@ -57,7 +36,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
using MarkingState = IncrementalMarkingState;
#else
using MarkingState = MajorNonAtomicMarkingState;
-#endif
+#endif // V8_CONCURRENT_MARKING
using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
@@ -95,10 +74,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static const int kStepSizeInMs = 1;
static const int kMaxStepSizeInMs = 5;
- // This is the upper bound for how many times we allow finalization of
- // incremental marking to be postponed.
- static const int kMaxIdleMarkingDelayCounter = 3;
-
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
#else
@@ -111,7 +86,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
#endif
- explicit IncrementalMarking(Heap* heap);
+ IncrementalMarking(Heap* heap,
+ MarkCompactCollector::MarkingWorklist* marking_worklist);
MarkingState* marking_state() { return &marking_state_; }
@@ -189,6 +165,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeIncrementally();
void UpdateMarkingWorklistAfterScavenge();
+ void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry();
@@ -207,13 +184,13 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// anymore because a single step would exceed the deadline.
double AdvanceIncrementalMarking(double deadline_in_ms,
CompletionAction completion_action,
- ForceCompletionAction force_completion,
StepOrigin step_origin);
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
- ForceCompletionAction completion, StepOrigin step_origin);
+ StepOrigin step_origin,
+ WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
inline void RestartIfNotMarking();
@@ -260,10 +237,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
unscanned_bytes_of_large_object_ = unscanned_bytes;
}
- void ClearIdleMarkingDelayCounter();
-
- bool IsIdleMarkingDelayCounterLimitReached();
-
void ProcessBlackAllocatedObject(HeapObject* obj);
Heap* heap() const { return heap_; }
@@ -282,16 +255,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void AbortBlackAllocation();
- MarkCompactCollector::MarkingWorklist* marking_worklist() {
- SLOW_DCHECK(marking_worklist_ != nullptr);
+ MarkCompactCollector::MarkingWorklist* marking_worklist() const {
return marking_worklist_;
}
- void set_marking_worklist(
- MarkCompactCollector::MarkingWorklist* marking_worklist) {
- marking_worklist_ = marking_worklist;
- }
-
void Deactivate();
private:
@@ -331,6 +298,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier();
+ template <WorklistToProcess worklist_to_process = WorklistToProcess::kAll>
V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
@@ -354,22 +322,23 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
heap_->SetIsMarkingFlag(s >= MARKING);
}
- Heap* heap_;
- MarkCompactCollector::MarkingWorklist* marking_worklist_;
+ Heap* const heap_;
+ MarkCompactCollector::MarkingWorklist* const marking_worklist_;
double start_time_ms_;
size_t initial_old_generation_size_;
size_t old_generation_allocation_counter_;
size_t bytes_allocated_;
size_t bytes_marked_ahead_of_schedule_;
+ // A sample of concurrent_marking()->TotalMarkedBytes() at the last
+ // incremental marking step. It is used for updating
+ // bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
+ size_t bytes_marked_concurrently_;
size_t unscanned_bytes_of_large_object_;
// Must use SetState() above to update state_
State state_;
- int idle_marking_delay_counter_;
- int incremental_marking_finalization_rounds_;
-
bool is_compacting_;
bool should_hurry_;
bool was_activated_;
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index 0508c83ae1..2f21b382b6 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -73,17 +73,6 @@ class LocalAllocator {
}
}
- void AnnounceLockedPage(MemoryChunk* chunk) {
- const AllocationSpace space = chunk->owner()->identity();
- // There are no allocations on large object and map space and hence we
- // cannot announce that we locked a page there.
- if (space == LO_SPACE || space == MAP_SPACE) return;
-
- DCHECK(space != NEW_SPACE);
- compaction_spaces_.Get(space)->AnnounceLockedPage(
- reinterpret_cast<Page*>(chunk));
- }
-
private:
AllocationResult AllocateInNewSpace(int object_size,
AllocationAlignment alignment) {
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index e914ec1f6c..a6bbecd88e 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -7,13 +7,325 @@
#include "src/base/bits.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set.h"
namespace v8 {
namespace internal {
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::MarkingVisitor(MarkCompactCollector* collector,
+ MarkingState* marking_state)
+ : heap_(collector->heap()),
+ collector_(collector),
+ marking_state_(marking_state) {}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitAllocationSite(Map* map,
+ AllocationSite* object) {
+ int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
+ AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitBytecodeArray(Map* map,
+ BytecodeArray* array) {
+ int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
+ BytecodeArray::BodyDescriptor::IterateBody(array, size, this);
+ array->MakeOlder();
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
+ VisitCodeDataContainer(Map* map, CodeDataContainer* object) {
+ int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
+ CodeDataContainer::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitFixedArray(Map* map,
+ FixedArray* object) {
+ return (fixed_array_mode == FixedArrayVisitationMode::kRegular)
+ ? Parent::VisitFixedArray(map, object)
+ : VisitFixedArrayIncremental(map, object);
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSApiObject(Map* map, JSObject* object) {
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ DCHECK(object->IsJSObject());
+ heap_->TracePossibleWrapper(object);
+ }
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ JSObject::BodyDescriptor::IterateBody(object, size, this);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSFunction(Map* map,
+ JSFunction* object) {
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
+ VisitJSWeakCollection(Map* map, JSWeakCollection* weak_collection) {
+ // Enqueue weak collection in linked list of encountered weak collections.
+ if (weak_collection->next() == heap_->undefined_value()) {
+ weak_collection->set_next(heap_->encountered_weak_collections());
+ heap_->set_encountered_weak_collections(weak_collection);
+ }
+
+ // Skip visiting the backing hash table containing the mappings and the
+ // pointer to the other enqueued weak collections, both are post-processed.
+ int size = JSWeakCollection::BodyDescriptorWeak::SizeOf(map, weak_collection);
+ JSWeakCollection::BodyDescriptorWeak::IterateBody(weak_collection, size,
+ this);
+
+ // Partially initialized weak collection is enqueued, but table is ignored.
+ if (!weak_collection->table()->IsHashTable()) return size;
+
+ // Mark the backing hash table without pushing it on the marking stack.
+ Object** slot =
+ HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
+ HeapObject* obj = HeapObject::cast(*slot);
+ collector_->RecordSlot(weak_collection, slot, obj);
+ MarkObjectWithoutPush(weak_collection, obj);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitMap(Map* map, Map* object) {
+ // When map collection is enabled we have to mark through map's transitions
+ // and back pointers in a special way to make these links weak.
+ if (object->CanTransition()) {
+ MarkMapContents(object);
+ } else {
+ VisitPointers(object,
+ HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
+ }
+ return Map::BodyDescriptor::SizeOf(map, object);
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitNativeContext(Map* map,
+ Context* context) {
+ int size = Context::BodyDescriptorWeak::SizeOf(map, context);
+ Context::BodyDescriptorWeak::IterateBody(context, size, this);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitTransitionArray(Map* map,
+ TransitionArray* array) {
+ int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
+ TransitionArray::BodyDescriptor::IterateBody(array, size, this);
+ collector_->AddTransitionArray(array);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitWeakCell(Map* map, WeakCell* weak_cell) {
+ // Enqueue weak cell in linked list of encountered weak collections.
+ // We can ignore weak cells with cleared values because they will always
+ // contain smi zero.
+ if (!weak_cell->cleared()) {
+ HeapObject* value = HeapObject::cast(weak_cell->value());
+ if (marking_state()->IsBlackOrGrey(value)) {
+ // Weak cells with live values are directly processed here to reduce
+ // the processing time of weak cells during the main GC pause.
+ Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+ collector_->RecordSlot(weak_cell, slot, *slot);
+ } else {
+ // If we do not know about liveness of values of weak cells, we have to
+ // process them when we know the liveness of the whole transitive
+ // closure.
+ collector_->AddWeakCell(weak_cell);
+ }
+ }
+ return WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitPointer(HeapObject* host, Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+ HeapObject* target_object = HeapObject::cast(*p);
+ collector_->RecordSlot(host, p, target_object);
+ MarkObject(host, target_object);
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitPointers(HeapObject* host,
+ Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ VisitPointer(host, p);
+ }
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitEmbeddedPointer(Code* host,
+ RelocInfo* rinfo) {
+ DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ HeapObject* object = HeapObject::cast(rinfo->target_object());
+ collector_->RecordRelocSlot(host, rinfo, object);
+ if (!host->IsWeakObject(object)) {
+ MarkObject(host, object);
+ }
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitCodeTarget(Code* host,
+ RelocInfo* rinfo) {
+ DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ collector_->RecordRelocSlot(host, rinfo, target);
+ MarkObject(host, target);
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+bool MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::MarkObjectWithoutPush(HeapObject* host,
+ HeapObject* object) {
+ if (marking_state()->WhiteToBlack(object)) {
+ if (retaining_path_mode == TraceRetainingPathMode::kEnabled &&
+ V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainer(host, object);
+ }
+ return true;
+ }
+ return false;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::MarkObject(HeapObject* host,
+ HeapObject* object) {
+ if (marking_state()->WhiteToGrey(object)) {
+ marking_worklist()->Push(object);
+ if (retaining_path_mode == TraceRetainingPathMode::kEnabled &&
+ V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainer(host, object);
+ }
+ }
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
+ VisitFixedArrayIncremental(Map* map, FixedArray* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ DCHECK(!FLAG_use_marking_progress_bar ||
+ chunk->owner()->identity() == LO_SPACE);
+ // When using a progress bar for large fixed arrays, scan only a chunk of
+ // the array and try to push it onto the marking deque again until it is
+ // fully scanned. Fall back to scanning it through to the end in case this
+ // fails because of a full deque.
+ int start_offset =
+ Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
+ if (start_offset < object_size) {
+ // Ensure that the object is either grey or black before pushing it
+ // into marking worklist.
+ marking_state()->WhiteToGrey(object);
+ if (FLAG_concurrent_marking) {
+ marking_worklist()->PushBailout(object);
+ } else {
+ marking_worklist()->Push(object);
+ }
+ DCHECK(marking_state()->IsGrey(object) ||
+ marking_state()->IsBlack(object));
+
+ int end_offset =
+ Min(object_size, start_offset + kProgressBarScanningChunk);
+ int already_scanned_offset = start_offset;
+ VisitPointers(object, HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
+ start_offset = end_offset;
+ end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
+ chunk->set_progress_bar(start_offset);
+ if (start_offset < object_size) {
+ heap_->incremental_marking()->NotifyIncompleteScanOfObject(
+ object_size - (start_offset - already_scanned_offset));
+ }
+ }
+ } else {
+ FixedArray::BodyDescriptor::IterateBody(object, object_size, this);
+ }
+ return object_size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::MarkMapContents(Map* map) {
+ // Since descriptor arrays are potentially shared, ensure that only the
+ // descriptors that belong to this map are marked. The first time a non-empty
+ // descriptor array is marked, its header is also visited. The slot holding
+ // the descriptor array will be implicitly recorded when the pointer fields of
+ // this map are visited. Prototype maps don't keep track of transitions, so
+ // just mark the entire descriptor array.
+ if (!map->is_prototype_map()) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ if (MarkObjectWithoutPush(map, descriptors) && descriptors->length() > 0) {
+ VisitPointers(descriptors, descriptors->GetFirstElementAddress(),
+ descriptors->GetDescriptorEndSlot(0));
+ }
+ int start = 0;
+ int end = map->NumberOfOwnDescriptors();
+ if (start < end) {
+ VisitPointers(descriptors, descriptors->GetDescriptorStartSlot(start),
+ descriptors->GetDescriptorEndSlot(end));
+ }
+ }
+
+ // Mark the pointer fields of the Map. Since the transitions array has
+ // been marked already, it is fine that one of these fields contains a
+ // pointer to it.
+ VisitPointers(map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+}
+
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
- if (atomic_marking_state()->WhiteToGrey(obj)) {
+ if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, obj);
@@ -22,7 +334,7 @@ void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
}
void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
- if (atomic_marking_state()->WhiteToGrey(obj)) {
+ if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(root, obj);
@@ -31,7 +343,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
}
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
- if (atomic_marking_state()->WhiteToGrey(obj)) {
+ if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
@@ -90,13 +402,13 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
HeapObject* object = nullptr;
int size = 0;
while (current_cell_ != 0) {
- uint32_t trailing_zeros = base::bits::CountTrailingZeros32(current_cell_);
+ uint32_t trailing_zeros = base::bits::CountTrailingZeros(current_cell_);
Address addr = cell_base_ + trailing_zeros * kPointerSize;
// Clear the first bit of the found object..
current_cell_ &= ~(1u << trailing_zeros);
- uint32_t second_bit_index = 1u << (trailing_zeros + 1);
+ uint32_t second_bit_index = 0;
if (trailing_zeros >= Bitmap::kBitIndexMask) {
second_bit_index = 0x1;
// The overlapping case; there has to exist a cell after the current
@@ -111,6 +423,8 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
}
cell_base_ = it_.CurrentCellBase();
current_cell_ = *it_.CurrentCell();
+ } else {
+ second_bit_index = 1u << (trailing_zeros + 1);
}
Map* map = nullptr;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 3d28a18c7a..4ae9dce439 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -6,6 +6,7 @@
#include <unordered_map>
+#include "src/base/utils/random-number-generator.h"
#include "src/cancelable-task.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
@@ -13,6 +14,7 @@
#include "src/execution.h"
#include "src/frames-inl.h"
#include "src/global-handles.h"
+#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
@@ -24,11 +26,13 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/spaces-inl.h"
+#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
#include "src/transitions-inl.h"
#include "src/utils-inl.h"
#include "src/v8.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -149,7 +153,7 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->map_space());
LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
if (marking_state_->IsBlackOrGrey(obj)) {
obj->Iterate(this);
}
@@ -348,6 +352,11 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
// MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
// =============================================================================
+using MarkCompactMarkingVisitor =
+ MarkingVisitor<FixedArrayVisitationMode::kRegular,
+ TraceRetainingPathMode::kEnabled,
+ MarkCompactCollector::MarkingState>;
+
namespace {
// This root visitor walks all roots and creates items bundling objects that
@@ -454,10 +463,12 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
black_allocation_(false),
have_code_to_deoptimize_(false),
marking_worklist_(heap),
- sweeper_(heap, non_atomic_marking_state()) {
+ sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
old_to_new_slots_ = -1;
}
+MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
+
void MarkCompactCollector::SetUp() {
DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
@@ -559,7 +570,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->new_space());
LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
MemoryChunk::FromAddress(obj->address())));
@@ -568,7 +579,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
HeapObjectIterator code_iterator(heap()->code_space());
- for (HeapObject* obj = code_iterator.Next(); obj != NULL;
+ for (HeapObject* obj = code_iterator.Next(); obj != nullptr;
obj = code_iterator.Next()) {
Code* code = Code::cast(obj);
if (!code->is_optimized_code()) continue;
@@ -600,147 +611,10 @@ void MarkCompactCollector::ClearMarkbits() {
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
}
-class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
- public:
- SweeperTask(Isolate* isolate, Sweeper* sweeper,
- base::Semaphore* pending_sweeper_tasks,
- base::AtomicNumber<intptr_t>* num_sweeping_tasks,
- AllocationSpace space_to_start)
- : CancelableTask(isolate),
- sweeper_(sweeper),
- pending_sweeper_tasks_(pending_sweeper_tasks),
- num_sweeping_tasks_(num_sweeping_tasks),
- space_to_start_(space_to_start) {}
-
- virtual ~SweeperTask() {}
-
- private:
- void RunInternal() final {
- DCHECK_GE(space_to_start_, FIRST_SPACE);
- DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
- const int offset = space_to_start_ - FIRST_SPACE;
- const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
- for (int i = 0; i < num_spaces; i++) {
- const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
- DCHECK_GE(space_id, FIRST_SPACE);
- DCHECK_LE(space_id, LAST_PAGED_SPACE);
- sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
- }
- num_sweeping_tasks_->Decrement(1);
- pending_sweeper_tasks_->Signal();
- }
-
- Sweeper* const sweeper_;
- base::Semaphore* const pending_sweeper_tasks_;
- base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
- AllocationSpace space_to_start_;
-
- DISALLOW_COPY_AND_ASSIGN(SweeperTask);
-};
-
-void MarkCompactCollector::Sweeper::StartSweeping() {
- sweeping_in_progress_ = true;
- NonAtomicMarkingState* marking_state =
- heap_->mark_compact_collector()->non_atomic_marking_state();
- ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
- std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
- [marking_state](Page* a, Page* b) {
- return marking_state->live_bytes(a) <
- marking_state->live_bytes(b);
- });
- });
-}
-
-void MarkCompactCollector::Sweeper::StartSweeperTasks() {
- DCHECK_EQ(0, num_tasks_);
- DCHECK_EQ(0, num_sweeping_tasks_.Value());
- if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
- ForAllSweepingSpaces([this](AllocationSpace space) {
- if (space == NEW_SPACE) return;
- num_sweeping_tasks_.Increment(1);
- SweeperTask* task = new SweeperTask(heap_->isolate(), this,
- &pending_sweeper_tasks_semaphore_,
- &num_sweeping_tasks_, space);
- DCHECK_LT(num_tasks_, kMaxSweeperTasks);
- task_ids_[num_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- });
- }
-}
-
-void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
- Page* page) {
- if (!page->SweepingDone()) {
- ParallelSweepPage(page, page->owner()->identity());
- if (!page->SweepingDone()) {
- // We were not able to sweep that page, i.e., a concurrent
- // sweeper thread currently owns this page. Wait for the sweeper
- // thread to be done with this page.
- page->WaitUntilSweepingCompleted();
- }
- }
-}
-
-void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
- if (FLAG_concurrent_sweeping && sweeper().sweeping_in_progress()) {
- sweeper().ParallelSweepSpace(space->identity(), 0);
- space->RefillFreeList();
- }
-}
-
-Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- SweptList& list = swept_list_[space->identity()];
- if (!list.empty()) {
- auto last_page = list.back();
- list.pop_back();
- return last_page;
- }
- return nullptr;
-}
-
-void MarkCompactCollector::Sweeper::EnsureCompleted() {
- if (!sweeping_in_progress_) return;
-
- // If sweeping is not completed or not running at all, we try to complete it
- // here.
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
-
- if (FLAG_concurrent_sweeping) {
- for (int i = 0; i < num_tasks_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_sweeper_tasks_semaphore_.Wait();
- }
- }
- num_tasks_ = 0;
- num_sweeping_tasks_.SetValue(0);
- }
-
- ForAllSweepingSpaces([this](AllocationSpace space) {
- if (space == NEW_SPACE) {
- swept_list_[NEW_SPACE].clear();
- }
- DCHECK(sweeping_list_[space].empty());
- });
- sweeping_in_progress_ = false;
-}
-
-void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
- if (!sweeping_in_progress_) return;
- if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
- for (Page* p : *heap_->new_space()) {
- SweepOrWaitUntilSweepingCompleted(p);
- }
- }
-}
-
void MarkCompactCollector::EnsureSweepingCompleted() {
- if (!sweeper().sweeping_in_progress()) return;
+ if (!sweeper()->sweeping_in_progress()) return;
- sweeper().EnsureCompleted();
+ sweeper()->EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
@@ -756,10 +630,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
-bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
- return num_sweeping_tasks_.Value() != 0;
-}
-
void MarkCompactCollector::ComputeEvacuationHeuristics(
size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) {
@@ -808,7 +678,6 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
}
}
-
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
@@ -855,6 +724,16 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
AddEvacuationCandidate(p);
}
}
+ } else if (FLAG_stress_compaction_random) {
+ double fraction = isolate()->fuzzer_rng()->NextDouble();
+ size_t pages_to_mark_count =
+ static_cast<size_t>(fraction * (pages.size() + 1));
+ for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
+ pages.size(), pages_to_mark_count)) {
+ candidate_count++;
+ total_live_bytes += pages[i].first;
+ AddEvacuationCandidate(pages[i].second);
+ }
} else if (FLAG_stress_compaction) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
@@ -1002,7 +881,7 @@ void MarkCompactCollector::Prepare() {
}
PagedSpaces spaces(heap());
- for (PagedSpace* space = spaces.next(); space != NULL;
+ for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
space->PrepareForMarkCompact();
}
@@ -1045,9 +924,7 @@ void MarkCompactCollector::Finish() {
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
- if (!heap()->delay_sweeper_tasks_for_testing_) {
- sweeper().StartSweeperTasks();
- }
+ sweeper()->StartSweeperTasks();
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash();
@@ -1073,82 +950,8 @@ void MarkCompactCollector::Finish() {
Deoptimizer::DeoptimizeMarkedCode(isolate());
have_code_to_deoptimize_ = false;
}
-
- heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
}
-
-// -------------------------------------------------------------------------
-// Phase 1: tracing and marking live objects.
-// before: all objects are in normal state.
-// after: a live object's map pointer is marked as '00'.
-
-// Marking all live objects in the heap as part of mark-sweep or mark-compact
-// collection. Before marking, all objects are in their normal state. After
-// marking, live objects' map pointers are marked indicating that the object
-// has been found reachable.
-//
-// The marking algorithm is a (mostly) depth-first (because of possible stack
-// overflow) traversal of the graph of objects reachable from the roots. It
-// uses an explicit stack of pointers rather than recursion. The young
-// generation's inactive ('from') space is used as a marking stack. The
-// objects in the marking stack are the ones that have been reached and marked
-// but their children have not yet been visited.
-//
-// The marking stack can overflow during traversal. In that case, we set an
-// overflow flag. When the overflow flag is set, we continue marking objects
-// reachable from the objects on the marking stack, but no longer push them on
-// the marking stack. Instead, we mark them as both marked and overflowed.
-// When the stack is in the overflowed state, objects marked as overflowed
-// have been reached and marked but their children have not been visited yet.
-// After emptying the marking stack, we clear the overflow flag and traverse
-// the heap looking for objects marked as overflowed, push them on the stack,
-// and continue with marking. This process repeats until all reachable
-// objects have been marked.
-
-class MarkCompactMarkingVisitor final
- : public MarkingVisitor<MarkCompactMarkingVisitor> {
- public:
- explicit MarkCompactMarkingVisitor(MarkCompactCollector* collector)
- : MarkingVisitor<MarkCompactMarkingVisitor>(collector->heap(),
- collector) {}
-
- V8_INLINE void VisitPointer(HeapObject* host, Object** p) final {
- MarkObjectByPointer(host, p);
- }
-
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
- MarkObjectByPointer(host, p);
- }
- }
-
- // Marks the object black and pushes it on the marking stack.
- V8_INLINE void MarkObject(HeapObject* host, HeapObject* object) {
- collector_->MarkObject(host, object);
- }
-
- // Marks the object black without pushing it on the marking stack. Returns
- // true if object needed marking and false otherwise.
- V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object) {
- if (collector_->atomic_marking_state()->WhiteToBlack(object)) {
- if (V8_UNLIKELY(FLAG_track_retaining_path)) {
- heap_->AddRetainer(host, object);
- }
- return true;
- }
- return false;
- }
-
- V8_INLINE void MarkObjectByPointer(HeapObject* host, Object** p) {
- if (!(*p)->IsHeapObject()) return;
- HeapObject* target_object = HeapObject::cast(*p);
- collector_->RecordSlot(host, p, target_object);
- collector_->MarkObject(host, target_object);
- }
-};
-
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
for (Page* p : sweep_to_iterate_pages_) {
if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
@@ -1207,9 +1010,6 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
// VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
- // Skip the weak next code link in a code object.
- void VisitNextCodeLink(Code* host, Object** p) override {}
-
private:
void MarkObject(HeapObject* host, Object* object) {
if (!object->IsHeapObject()) return;
@@ -1370,7 +1170,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
marking_state_->WhiteToBlack(site);
return object;
} else {
- return NULL;
+ return nullptr;
}
}
@@ -1800,7 +1600,7 @@ void MarkCompactCollector::MarkStringTable(
ObjectVisitor* custom_root_body_visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
- if (atomic_marking_state()->WhiteToBlack(string_table)) {
+ if (marking_state()->WhiteToBlack(string_table)) {
// Explicitly mark the prefix.
string_table->IteratePrefix(custom_root_body_visitor);
}
@@ -1819,13 +1619,13 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
void MarkCompactCollector::ProcessMarkingWorklist() {
HeapObject* object;
- MarkCompactMarkingVisitor visitor(this);
+ MarkCompactMarkingVisitor visitor(this, marking_state());
while ((object = marking_worklist()->Pop()) != nullptr) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!(atomic_marking_state()->IsWhite(object)));
- atomic_marking_state()->GreyToBlack(object);
+ DCHECK(!(marking_state()->IsWhite(object)));
+ marking_state()->GreyToBlack(object);
Map* map = object->map();
MarkObject(object, map);
visitor.Visit(map, object);
@@ -2131,7 +1931,7 @@ class PageMarkingItem : public MarkingItem {
virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
void Process(YoungGenerationMarkingTask* task) override {
- base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
+ base::LockGuard<base::Mutex> guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
}
@@ -2319,8 +2119,13 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
&IsUnmarkedObjectForYoungGeneration);
- isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
- &root_visitor);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ &root_visitor, &IsUnmarkedObjectForYoungGeneration);
ProcessMarkingWorklist();
}
}
@@ -2341,7 +2146,7 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
- heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+ heap()->mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
CleanupSweepToIteratePages();
}
@@ -2547,7 +2352,8 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
- if (FLAG_concurrent_marking) {
+ if (FLAG_parallel_marking) {
+ DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
ProcessMarkingWorklist();
@@ -2877,7 +2683,7 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ProcessWeakCollections() {
- MarkCompactMarkingVisitor visitor(this);
+ MarkCompactMarkingVisitor visitor(this, marking_state());
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
@@ -2893,7 +2699,12 @@ void MarkCompactCollector::ProcessWeakCollections() {
RecordSlot(table, key_slot, *key_slot);
Object** value_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
- visitor.MarkObjectByPointer(table, value_slot);
+ if (V8_UNLIKELY(FLAG_track_retaining_path) &&
+ (*value_slot)->IsHeapObject()) {
+ heap()->AddEphemeralRetainer(heap_object,
+ HeapObject::cast(*value_slot));
+ }
+ visitor.VisitPointer(table, value_slot);
}
}
}
@@ -3004,7 +2815,7 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
if (target_page->IsEvacuationCandidate() &&
- (rinfo->host() == NULL ||
+ (rinfo->host() == nullptr ||
!source_page->ShouldSkipEvacuationSlotRecording())) {
RelocInfo::Mode rmode = rinfo->rmode();
Address addr = rinfo->pc();
@@ -3113,6 +2924,7 @@ void MarkCompactCollector::EvacuatePrologue() {
}
void MarkCompactCollector::EvacuateEpilogue() {
+ aborted_evacuation_candidates_.clear();
// New space.
heap()->new_space()->set_age_mark(heap()->new_space()->top());
// Deallocate unmarked large objects.
@@ -3161,7 +2973,6 @@ class Evacuator : public Malloced {
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
: heap_(heap),
local_allocator_(heap_),
- compaction_spaces_(heap_),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, &local_allocator_, record_visitor,
&local_pretenuring_feedback_),
@@ -3204,7 +3015,6 @@ class Evacuator : public Malloced {
// Locally cached collector data.
LocalAllocator local_allocator_;
- CompactionSpaceCollection compaction_spaces_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
// Visitors for the corresponding spaces.
@@ -3547,149 +3357,6 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
-int MarkCompactCollector::Sweeper::RawSweep(
- Page* p, FreeListRebuildingMode free_list_mode,
- FreeSpaceTreatmentMode free_space_mode) {
- Space* space = p->owner();
- DCHECK_NOT_NULL(space);
- DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
- space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
- DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
-
- // TODO(ulan): we don't have to clear type old-to-old slots in code space
- // because the concurrent marker doesn't mark code objects. This requires
- // the write barrier for code objects to check the color of the code object.
- bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
- p->typed_slot_set<OLD_TO_OLD>() != nullptr;
-
- // The free ranges map is used for filtering typed slots.
- std::map<uint32_t, uint32_t> free_ranges;
-
- // Before we sweep objects on the page, we free dead array buffers which
- // requires valid mark bits.
- ArrayBufferTracker::FreeDead(p, marking_state_);
-
- Address free_start = p->area_start();
- DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
-
- // If we use the skip list for code space pages, we have to lock the skip
- // list because it could be accessed concurrently by the runtime or the
- // deoptimizer.
- const bool rebuild_skip_list =
- space->identity() == CODE_SPACE && p->skip_list() != nullptr;
- SkipList* skip_list = p->skip_list();
- if (rebuild_skip_list) {
- skip_list->Clear();
- }
-
- intptr_t live_bytes = 0;
- intptr_t freed_bytes = 0;
- intptr_t max_freed_bytes = 0;
- int curr_region = -1;
-
- // Set the allocated_bytes counter to area_size. The free operations below
- // will decrease the counter to actual live bytes.
- p->ResetAllocatedBytes();
-
- for (auto object_and_size :
- LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
- HeapObject* const object = object_and_size.first;
- DCHECK(marking_state_->IsBlack(object));
- Address free_end = object->address();
- if (free_end != free_start) {
- CHECK_GT(free_end, free_start);
- size_t size = static_cast<size_t>(free_end - free_start);
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
- }
- if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
- free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- } else {
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
- }
- RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
- if (non_empty_typed_slots) {
- free_ranges.insert(std::pair<uint32_t, uint32_t>(
- static_cast<uint32_t>(free_start - p->address()),
- static_cast<uint32_t>(free_end - p->address())));
- }
- }
- Map* map = object->synchronized_map();
- int size = object->SizeFromMap(map);
- live_bytes += size;
- if (rebuild_skip_list) {
- int new_region_start = SkipList::RegionNumber(free_end);
- int new_region_end =
- SkipList::RegionNumber(free_end + size - kPointerSize);
- if (new_region_start != curr_region || new_region_end != curr_region) {
- skip_list->AddObject(free_end, size);
- curr_region = new_region_end;
- }
- }
- free_start = free_end + size;
- }
-
- if (free_start != p->area_end()) {
- CHECK_GT(p->area_end(), free_start);
- size_t size = static_cast<size_t>(p->area_end() - free_start);
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
- }
- if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
- free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- } else {
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
- }
-
- RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
- SlotSet::KEEP_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
- SlotSet::KEEP_EMPTY_BUCKETS);
- if (non_empty_typed_slots) {
- free_ranges.insert(std::pair<uint32_t, uint32_t>(
- static_cast<uint32_t>(free_start - p->address()),
- static_cast<uint32_t>(p->area_end() - p->address())));
- }
- }
-
- // Clear invalid typed slots after collection all free ranges.
- if (!free_ranges.empty()) {
- TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
- if (old_to_new != nullptr) {
- old_to_new->RemoveInvaldSlots(free_ranges);
- }
- TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
- if (old_to_old != nullptr) {
- old_to_old->RemoveInvaldSlots(free_ranges);
- }
- }
-
- marking_state_->bitmap(p)->Clear();
- if (free_list_mode == IGNORE_FREE_LIST) {
- marking_state_->SetLiveBytes(p, 0);
- // We did not free memory, so have to adjust allocated bytes here.
- intptr_t freed_bytes = p->area_size() - live_bytes;
- p->DecreaseAllocatedBytes(freed_bytes);
- } else {
- // Keep the old live bytes counter of the page until RefillFreeList, where
- // the space size is refined.
- // The allocated_bytes() counter is precisely the total size of objects.
- DCHECK_EQ(live_bytes, p->allocated_bytes());
- }
- p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
- if (free_list_mode == IGNORE_FREE_LIST) return 0;
- return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
-}
-
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
return code->is_optimized_code() && code->marked_for_deoptimization();
@@ -3774,12 +3441,6 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
marking_state->SetLiveBytes(chunk, new_live_size);
}
-void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
- Page* page) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[space->identity()].push_back(page);
-}
-
void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
@@ -3816,12 +3477,12 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- sweeper().AddPage(p->owner()->identity(), p);
+ sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
- sweeper().AddPage(p->owner()->identity(), p);
+ sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
}
}
new_space_evacuation_pages_.clear();
@@ -3831,9 +3492,9 @@ void MarkCompactCollector::Evacuate() {
// because root iteration traverses the stack and might have to find
// code objects from non-updated pc pointing into evacuation candidate.
SkipList* list = p->skip_list();
- if (list != NULL) list->Clear();
+ if (list != nullptr) list->Clear();
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- sweeper().AddPage(p->owner()->identity(), p);
+ sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
@@ -3845,7 +3506,7 @@ void MarkCompactCollector::Evacuate() {
}
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
+ if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
FullEvacuationVerifier verifier(heap());
verifier.Run();
}
@@ -3934,7 +3595,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
virtual ~RememberedSetUpdatingItem() {}
void Process() override {
- base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
+ base::LockGuard<base::Mutex> guard(chunk_->mutex());
UpdateUntypedPointers();
UpdateTypedPointers();
}
@@ -4107,16 +3768,28 @@ class GlobalHandlesUpdatingItem : public UpdatingItem {
// using a lock.
class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
public:
- explicit ArrayBufferTrackerUpdatingItem(Page* page) : page_(page) {}
+ enum EvacuationState { kRegular, kAborted };
+
+ explicit ArrayBufferTrackerUpdatingItem(Page* page, EvacuationState state)
+ : page_(page), state_(state) {}
virtual ~ArrayBufferTrackerUpdatingItem() {}
void Process() override {
- ArrayBufferTracker::ProcessBuffers(
- page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ switch (state_) {
+ case EvacuationState::kRegular:
+ ArrayBufferTracker::ProcessBuffers(
+ page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ break;
+ case EvacuationState::kAborted:
+ ArrayBufferTracker::ProcessBuffers(
+ page_, ArrayBufferTracker::kUpdateForwardedKeepOthers);
+ break;
+ }
}
private:
- Page* page_;
+ Page* const page_;
+ const EvacuationState state_;
};
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
@@ -4162,32 +3835,58 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
return pages;
}
-void MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
+int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
ItemParallelJob* job) {
+ int pages = 0;
for (Page* p : new_space_evacuation_pages_) {
if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
- job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ if (p->local_tracker() == nullptr) continue;
+
+ pages++;
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(
+ p, ArrayBufferTrackerUpdatingItem::kRegular));
}
}
+ return pages;
}
-void MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
+int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
ItemParallelJob* job) {
+ int pages = 0;
for (Page* p : new_space_evacuation_pages_) {
if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
- job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ if (p->local_tracker() == nullptr) continue;
+
+ pages++;
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(
+ p, ArrayBufferTrackerUpdatingItem::kRegular));
}
}
+ return pages;
}
-void MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
+int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
ItemParallelJob* job) {
+ int pages = 0;
for (Page* p : old_space_evacuation_pages_) {
if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
p->IsEvacuationCandidate()) {
- job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ if (p->local_tracker() == nullptr) continue;
+
+ pages++;
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(
+ p, ArrayBufferTrackerUpdatingItem::kRegular));
}
}
+ for (auto object_and_page : aborted_evacuation_candidates_) {
+ Page* p = object_and_page.second;
+ if (p->local_tracker() == nullptr) continue;
+
+ pages++;
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(
+ p, ArrayBufferTrackerUpdatingItem::kAborted));
+ }
+ return pages;
}
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
@@ -4207,9 +3906,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
- CollectNewSpaceArrayBufferTrackerItems(&updating_job);
- CollectOldSpaceArrayBufferTrackerItems(&updating_job);
-
int remembered_set_pages = 0;
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
@@ -4231,25 +3927,34 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
{
- // Update pointers in map space in a separate phase to avoid data races
- // with Map->LayoutDescriptor edge.
+ // - Update pointers in map space in a separate phase to avoid data races
+ // with Map->LayoutDescriptor edge.
+ // - Update array buffer trackers in the second phase to have access to
+ // byte length which is potentially a HeapNumber.
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
+ int array_buffer_pages = 0;
+ array_buffer_pages += CollectNewSpaceArrayBufferTrackerItems(&updating_job);
+ array_buffer_pages += CollectOldSpaceArrayBufferTrackerItems(&updating_job);
+
int remembered_set_pages = 0;
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
- const int num_tasks = remembered_set_pages == 0
- ? 0
- : NumberOfParallelPointerUpdateTasks(
- remembered_set_pages, old_to_new_slots_);
+ const int remembered_set_tasks =
+ remembered_set_pages == 0
+ ? 0
+ : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
+ old_to_new_slots_);
+ const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
if (num_tasks > 0) {
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(isolate()));
}
updating_job.Run();
+ heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
}
@@ -4307,6 +4012,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run();
+ heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
{
@@ -4327,7 +4033,6 @@ void MarkCompactCollector::ReportAbortedEvacuationCandidate(
HeapObject* failed_object, Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
- page->SetFlag(Page::COMPACTION_WAS_ABORTED);
aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
}
@@ -4335,7 +4040,7 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
for (auto object_and_page : aborted_evacuation_candidates_) {
HeapObject* failed_object = object_and_page.first;
Page* page = object_and_page.second;
- DCHECK(page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
@@ -4352,13 +4057,10 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
&record_visitor,
LiveObjectVisitor::kKeepMarking);
- // Fix up array buffers.
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
+ // Array buffers will be processed during pointer updating.
}
const int aborted_pages =
static_cast<int>(aborted_evacuation_candidates_.size());
- aborted_evacuation_candidates_.clear();
int aborted_pages_verified = 0;
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
@@ -4391,93 +4093,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
compacting_ = false;
}
-int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
- int required_freed_bytes,
- int max_pages) {
- int max_freed = 0;
- int pages_freed = 0;
- Page* page = nullptr;
- while ((page = GetSweepingPageSafe(identity)) != nullptr) {
- int freed = ParallelSweepPage(page, identity);
- pages_freed += 1;
- DCHECK_GE(freed, 0);
- max_freed = Max(max_freed, freed);
- if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
- return max_freed;
- if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
- }
- return max_freed;
-}
-
-int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
- AllocationSpace identity) {
- // Early bailout for pages that are swept outside of the regular sweeping
- // path. This check here avoids taking the lock first, avoiding deadlocks.
- if (page->SweepingDone()) return 0;
-
- int max_freed = 0;
- {
- base::LockGuard<base::RecursiveMutex> guard(page->mutex());
- // If this page was already swept in the meantime, we can return here.
- if (page->SweepingDone()) return 0;
- DCHECK_EQ(Page::kSweepingPending,
- page->concurrent_sweeping_state().Value());
- page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
- const FreeSpaceTreatmentMode free_space_mode =
- Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
- if (identity == NEW_SPACE) {
- RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
- } else {
- max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
- }
- DCHECK(page->SweepingDone());
-
- // After finishing sweeping of a page we clean up its remembered set.
- TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
- if (typed_slot_set) {
- typed_slot_set->FreeToBeFreedChunks();
- }
- SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
- if (slot_set) {
- slot_set->FreeToBeFreedBuckets();
- }
- }
-
- {
- base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[identity].push_back(page);
- }
- return max_freed;
-}
-
-void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
- DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
- PrepareToBeSweptPage(space, page);
- sweeping_list_[space].push_back(page);
-}
-
-void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
- Page* page) {
- page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
- DCHECK_GE(page->area_size(),
- static_cast<size_t>(marking_state_->live_bytes(page)));
- if (space != NEW_SPACE) {
- heap_->paged_space(space)->IncreaseAllocatedBytes(
- marking_state_->live_bytes(page), page);
- }
-}
-
-Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
- AllocationSpace space) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- Page* page = nullptr;
- if (!sweeping_list_[space].empty()) {
- page = sweeping_list_[space].front();
- sweeping_list_[space].pop_front();
- }
- return page;
-}
-
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
@@ -4501,10 +4116,10 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
- sweeper().RawSweep(p, Sweeper::IGNORE_FREE_LIST,
- Heap::ShouldZapGarbage()
- ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
- : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
+ sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
+ Heap::ShouldZapGarbage()
+ ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
+ : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
continue;
}
@@ -4523,7 +4138,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
unused_page_present = true;
}
- sweeper().AddPage(space->identity(), p);
+ sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
will_be_swept++;
}
@@ -4555,7 +4170,7 @@ void MarkCompactCollector::StartSweepSpaces() {
GCTracer::Scope::MC_SWEEP_MAP);
StartSweepSpace(heap()->map_space());
}
- sweeper().StartSweeping();
+ sweeper()->StartSweeping();
}
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 1784a32e16..a68be9b241 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -9,7 +9,9 @@
#include <vector>
#include "src/heap/marking.h"
+#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
+#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
namespace v8 {
@@ -242,7 +244,6 @@ class LiveObjectVisitor : AllStatic {
};
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
-enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum MarkingTreatmentMode { KEEP, CLEAR };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
@@ -404,7 +405,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
UpdatingItem* CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
- void CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+ int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
int NumberOfParallelMarkingTasks(int pages);
@@ -422,6 +423,28 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
friend class YoungGenerationMarkingVisitor;
};
+// This marking state is used when concurrent marking is running.
+class IncrementalMarkingState final
+ : public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
+ public:
+ Bitmap* bitmap(const MemoryChunk* chunk) const {
+ return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ }
+
+ // Concurrent marking uses local live bytes.
+ void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
+ chunk->live_byte_count_ += by;
+ }
+
+ intptr_t live_bytes(MemoryChunk* chunk) const {
+ return chunk->live_byte_count_;
+ }
+
+ void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
+ chunk->live_byte_count_ = value;
+ }
+};
+
class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
@@ -476,7 +499,11 @@ struct WeakObjects {
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
- using AtomicMarkingState = MajorAtomicMarkingState;
+#ifdef V8_CONCURRENT_MARKING
+ using MarkingState = IncrementalMarkingState;
+#else
+ using MarkingState = MajorNonAtomicMarkingState;
+#endif // V8_CONCURRENT_MARKING
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
static const int kMainThread = 0;
@@ -514,6 +541,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
return nullptr;
}
+ HeapObject* PopBailout() {
+ HeapObject* result;
+#ifdef V8_CONCURRENT_MARKING
+ if (bailout_.Pop(kMainThread, &result)) return result;
+#endif
+ return nullptr;
+ }
+
void Clear() {
bailout_.Clear();
shared_.Clear();
@@ -590,88 +625,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
class RootMarkingVisitor;
class CustomRootBodyMarkingVisitor;
- class Sweeper {
- public:
- enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
- enum ClearOldToNewSlotsMode {
- DO_NOT_CLEAR,
- CLEAR_REGULAR_SLOTS,
- CLEAR_TYPED_SLOTS
- };
-
- typedef std::deque<Page*> SweepingList;
- typedef std::vector<Page*> SweptList;
-
- int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
- FreeSpaceTreatmentMode free_space_mode);
-
- explicit Sweeper(Heap* heap,
- MarkCompactCollector::NonAtomicMarkingState* marking_state)
- : heap_(heap),
- marking_state_(marking_state),
- num_tasks_(0),
- pending_sweeper_tasks_semaphore_(0),
- sweeping_in_progress_(false),
- num_sweeping_tasks_(0) {}
-
- bool sweeping_in_progress() { return sweeping_in_progress_; }
-
- void AddPage(AllocationSpace space, Page* page);
-
- int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
- int max_pages = 0);
- int ParallelSweepPage(Page* page, AllocationSpace identity);
-
- // After calling this function sweeping is considered to be in progress
- // and the main thread can sweep lazily, but the background sweeper tasks
- // are not running yet.
- void StartSweeping();
- void StartSweeperTasks();
- void EnsureCompleted();
- void EnsureNewSpaceCompleted();
- bool AreSweeperTasksRunning();
- void SweepOrWaitUntilSweepingCompleted(Page* page);
-
- void AddSweptPageSafe(PagedSpace* space, Page* page);
- Page* GetSweptPageSafe(PagedSpace* space);
-
- private:
- class SweeperTask;
-
- static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
- static const int kMaxSweeperTasks = kAllocationSpaces;
-
- template <typename Callback>
- void ForAllSweepingSpaces(Callback callback) {
- for (int i = 0; i < kAllocationSpaces; i++) {
- callback(static_cast<AllocationSpace>(i));
- }
- }
-
- Page* GetSweepingPageSafe(AllocationSpace space);
-
- void PrepareToBeSweptPage(AllocationSpace space, Page* page);
-
- Heap* const heap_;
- MarkCompactCollector::NonAtomicMarkingState* marking_state_;
- int num_tasks_;
- CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
- base::Semaphore pending_sweeper_tasks_semaphore_;
- base::Mutex mutex_;
- SweptList swept_list_[kAllocationSpaces];
- SweepingList sweeping_list_[kAllocationSpaces];
- bool sweeping_in_progress_;
- // Counter is actively maintained by the concurrent tasks to avoid querying
- // the semaphore for maintaining a task counter on the main thread.
- base::AtomicNumber<intptr_t> num_sweeping_tasks_;
- };
-
enum IterationMode {
kKeepMarking,
kClearMarkbits,
};
- AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
+ MarkingState* marking_state() { return &marking_state_; }
NonAtomicMarkingState* non_atomic_marking_state() {
return &non_atomic_marking_state_;
@@ -718,14 +677,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Note: Can only be called safely from main thread.
void EnsureSweepingCompleted();
- // Help out in sweeping the corresponding space and refill memory that has
- // been regained.
- //
- // Note: Thread-safe.
- void SweepAndRefill(CompactionSpace* space);
-
// Checks if sweeping is in progress right now on any space.
- bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
+ bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
@@ -743,7 +696,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
- Sweeper& sweeper() { return sweeper_; }
+ Sweeper* sweeper() { return sweeper_; }
#ifdef DEBUG
// Checks whether performing mark-compact collection.
@@ -762,6 +715,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
private:
explicit MarkCompactCollector(Heap* heap);
+ ~MarkCompactCollector();
bool WillBeDeoptimized(Code* code);
@@ -877,8 +831,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
UpdatingItem* CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
- void CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
- void CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+ int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+ int CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
void ReleaseEvacuationCandidates();
void PostProcessEvacuationCandidates();
@@ -927,18 +881,78 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
std::vector<Page*> new_space_evacuation_pages_;
std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
- Sweeper sweeper_;
+ Sweeper* sweeper_;
- AtomicMarkingState atomic_marking_state_;
+ MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
friend class FullEvacuator;
friend class Heap;
- friend class IncrementalMarkingMarkingVisitor;
- friend class MarkCompactMarkingVisitor;
friend class RecordMigratedSlotVisitor;
};
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+class MarkingVisitor final
+ : public HeapVisitor<
+ int,
+ MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>> {
+ public:
+ typedef HeapVisitor<
+ int, MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>>
+ Parent;
+
+ V8_INLINE MarkingVisitor(MarkCompactCollector* collector,
+ MarkingState* marking_state);
+
+ V8_INLINE bool ShouldVisitMapPointer() { return false; }
+
+ V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
+ V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
+ V8_INLINE int VisitCodeDataContainer(Map* map, CodeDataContainer* object);
+ V8_INLINE int VisitFixedArray(Map* map, FixedArray* object);
+ V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
+ V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
+ V8_INLINE int VisitJSWeakCollection(Map* map, JSWeakCollection* object);
+ V8_INLINE int VisitMap(Map* map, Map* object);
+ V8_INLINE int VisitNativeContext(Map* map, Context* object);
+ V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
+ V8_INLINE int VisitWeakCell(Map* map, WeakCell* object);
+
+ // ObjectVisitor implementation.
+ V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final;
+ V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
+
+ private:
+ // Granularity in which FixedArrays are scanned if |fixed_array_mode|
+ // is true.
+ static const int kProgressBarScanningChunk = 32 * 1024;
+
+ V8_INLINE int VisitFixedArrayIncremental(Map* map, FixedArray* object);
+
+ V8_INLINE void MarkMapContents(Map* map);
+
+ // Marks the object black without pushing it on the marking work list. Returns
+ // true if the object needed marking and false otherwise.
+ V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object);
+
+ // Marks the object grey and pushes it on the marking work list.
+ V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
+
+ MarkingState* marking_state() { return marking_state_; }
+
+ MarkCompactCollector::MarkingWorklist* marking_worklist() const {
+ return collector_->marking_worklist();
+ }
+
+ Heap* const heap_;
+ MarkCompactCollector* const collector_;
+ MarkingState* const marking_state_;
+};
+
class EvacuationScope {
public:
explicit EvacuationScope(MarkCompactCollector* collector)
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index a269873024..cc1030846a 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -79,7 +79,7 @@ void MemoryReducer::NotifyTimer(const Event& event) {
kIncrementalMarkingDelayMs;
heap()->incremental_marking()->AdvanceIncrementalMarking(
deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
+ StepOrigin::kTask);
heap()->FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 0ffe75c84a..d8fe9fe7d8 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -5,8 +5,10 @@
#include "src/heap/object-stats.h"
#include "src/assembler-inl.h"
+#include "src/base/bits.h"
#include "src/compilation-cache.h"
#include "src/counters.h"
+#include "src/globals.h"
#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects/compilation-cache-inl.h"
@@ -209,6 +211,59 @@ void ObjectStats::CheckpointObjectStats() {
ClearObjectStats();
}
+namespace {
+
+int Log2ForSize(size_t size) {
+ DCHECK_GT(size, 0);
+ return kSizetSize * 8 - 1 - base::bits::CountLeadingZeros(size);
+}
+
+} // namespace
+
+int ObjectStats::HistogramIndexFromSize(size_t size) {
+ if (size == 0) return 0;
+ return Min(Max(Log2ForSize(size) + 1 - kFirstBucketShift, 0),
+ kLastValueBucketIndex);
+}
+
+void ObjectStats::RecordObjectStats(InstanceType type, size_t size) {
+ DCHECK_LE(type, LAST_TYPE);
+ object_counts_[type]++;
+ object_sizes_[type] += size;
+ size_histogram_[type][HistogramIndexFromSize(size)]++;
+}
+
+void ObjectStats::RecordCodeSubTypeStats(int code_sub_type, size_t size) {
+ int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
+ DCHECK_GE(code_sub_type_index, FIRST_CODE_KIND_SUB_TYPE);
+ DCHECK_LT(code_sub_type_index, FIRST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[code_sub_type_index]++;
+ object_sizes_[code_sub_type_index] += size;
+ size_histogram_[code_sub_type_index][HistogramIndexFromSize(size)]++;
+}
+
+bool ObjectStats::RecordFixedArraySubTypeStats(FixedArrayBase* array,
+ int array_sub_type, size_t size,
+ size_t over_allocated) {
+ auto it = visited_fixed_array_sub_types_.insert(array);
+ if (!it.second) return false;
+ DCHECK_LE(array_sub_type, LAST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
+ object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
+ size_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
+ [HistogramIndexFromSize(size)]++;
+ if (over_allocated > 0) {
+ InstanceType type =
+ array->IsHashTable() ? HASH_TABLE_TYPE : FIXED_ARRAY_TYPE;
+ over_allocated_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] +=
+ over_allocated;
+ over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
+ [HistogramIndexFromSize(over_allocated)]++;
+ over_allocated_[type] += over_allocated;
+ over_allocated_histogram_[type][HistogramIndexFromSize(over_allocated)]++;
+ }
+ return true;
+}
Isolate* ObjectStats::isolate() { return heap()->isolate(); }
@@ -305,14 +360,10 @@ void ObjectStatsCollector::CollectGlobalStatistics() {
}
static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
- return (array->map()->instance_type() == FIXED_ARRAY_TYPE ||
- array->map()->instance_type() == HASH_TABLE_TYPE) &&
- array->map() != heap->fixed_double_array_map() &&
+ return array->map()->instance_type() == FIXED_ARRAY_TYPE &&
array != heap->empty_fixed_array() &&
- array != heap->empty_byte_array() &&
array != heap->empty_sloppy_arguments_elements() &&
array != heap->empty_slow_element_dictionary() &&
- array != heap->empty_descriptor_array() &&
array != heap->empty_property_dictionary();
}
@@ -367,7 +418,7 @@ void ObjectStatsCollector::RecordJSObjectDetails(JSObject* object) {
FixedArrayBase* elements = object->elements();
if (CanRecordFixedArray(heap_, elements) && !IsCowArray(heap_, elements)) {
if (elements->IsDictionary() && SameLiveness(object, elements)) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements);
+ NumberDictionary* dict = NumberDictionary::cast(elements);
RecordHashTableHelper(object, dict, DICTIONARY_ELEMENTS_SUB_TYPE);
} else {
if (IsHoleyElementsKind(object->GetElementsKind())) {
@@ -480,8 +531,8 @@ void ObjectStatsCollector::RecordCodeDetails(Code* code) {
RecordFixedArrayHelper(code, code->deoptimization_data(),
DEOPTIMIZATION_DATA_SUB_TYPE, 0);
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
+ DeoptimizationData* input_data =
+ DeoptimizationData::cast(code->deoptimization_data());
if (input_data->length() > 0) {
RecordFixedArrayHelper(code->deoptimization_data(),
input_data->LiteralArray(),
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 885cbd866b..18bbaaaa43 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -37,44 +37,10 @@ class ObjectStats {
void PrintJSON(const char* key);
void Dump(std::stringstream& stream);
- void RecordObjectStats(InstanceType type, size_t size) {
- DCHECK(type <= LAST_TYPE);
- object_counts_[type]++;
- object_sizes_[type] += size;
- size_histogram_[type][HistogramIndexFromSize(size)]++;
- }
-
- void RecordCodeSubTypeStats(int code_sub_type, size_t size) {
- int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
- DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
- code_sub_type_index < FIRST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[code_sub_type_index]++;
- object_sizes_[code_sub_type_index] += size;
- const int idx = HistogramIndexFromSize(size);
- size_histogram_[code_sub_type_index][idx]++;
- }
-
+ void RecordObjectStats(InstanceType type, size_t size);
+ void RecordCodeSubTypeStats(int code_sub_type, size_t size);
bool RecordFixedArraySubTypeStats(FixedArrayBase* array, int array_sub_type,
- size_t size, size_t over_allocated) {
- auto it = visited_fixed_array_sub_types_.insert(array);
- if (!it.second) return false;
- DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
- size_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
- [HistogramIndexFromSize(size)]++;
- if (over_allocated > 0) {
- InstanceType type =
- array->IsHashTable() ? HASH_TABLE_TYPE : FIXED_ARRAY_TYPE;
- over_allocated_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] +=
- over_allocated;
- over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
- [HistogramIndexFromSize(over_allocated)]++;
- over_allocated_[type] += over_allocated;
- over_allocated_histogram_[type][HistogramIndexFromSize(over_allocated)]++;
- }
- return true;
- }
+ size_t size, size_t over_allocated);
size_t object_count_last_gc(size_t index) {
return object_counts_last_time_[index];
@@ -88,11 +54,12 @@ class ObjectStats {
Heap* heap() { return heap_; }
private:
- static const int kFirstBucketShift = 5; // <=32
- static const int kLastBucketShift = 19; // >512k
+ static const int kFirstBucketShift = 5; // <32
+ static const int kLastBucketShift = 20; // >=1M
static const int kFirstBucket = 1 << kFirstBucketShift;
static const int kLastBucket = 1 << kLastBucketShift;
static const int kNumberOfBuckets = kLastBucketShift - kFirstBucketShift + 1;
+ static const int kLastValueBucketIndex = kLastBucketShift - kFirstBucketShift;
void PrintKeyAndId(const char* key, int gc_count);
// The following functions are excluded from inline to reduce the overall
@@ -102,12 +69,7 @@ class ObjectStats {
V8_NOINLINE void DumpInstanceTypeData(std::stringstream& stream,
const char* name, int index);
- int HistogramIndexFromSize(size_t size) {
- if (size == 0) return 0;
- int idx = static_cast<int>(base::ieee754::log2(static_cast<double>(size))) -
- kFirstBucketShift;
- return idx < 0 ? 0 : idx;
- }
+ int HistogramIndexFromSize(size_t size);
Heap* heap_;
// Object counts and used memory by InstanceType.
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index dbd1e3b370..0a8c866979 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -17,6 +17,12 @@ namespace v8 {
namespace internal {
template <typename ResultType, typename ConcreteVisitor>
+template <typename T>
+T* HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject* object) {
+ return T::cast(object);
+}
+
+template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
return Visit(object->map(), object);
}
@@ -25,24 +31,29 @@ template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map* map,
HeapObject* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- switch (static_cast<VisitorId>(map->visitor_id())) {
-#define CASE(type) \
- case kVisit##type: \
- return visitor->Visit##type(map, type::cast(object));
+ switch (map->visitor_id()) {
+#define CASE(type) \
+ case kVisit##type: \
+ return visitor->Visit##type(map, \
+ ConcreteVisitor::template Cast<type>(object));
TYPED_VISITOR_ID_LIST(CASE)
#undef CASE
case kVisitShortcutCandidate:
- return visitor->VisitShortcutCandidate(map, ConsString::cast(object));
+ return visitor->VisitShortcutCandidate(
+ map, ConcreteVisitor::template Cast<ConsString>(object));
case kVisitNativeContext:
- return visitor->VisitNativeContext(map, Context::cast(object));
+ return visitor->VisitNativeContext(
+ map, ConcreteVisitor::template Cast<Context>(object));
case kVisitDataObject:
- return visitor->VisitDataObject(map, HeapObject::cast(object));
+ return visitor->VisitDataObject(map, object);
case kVisitJSObjectFast:
- return visitor->VisitJSObjectFast(map, JSObject::cast(object));
+ return visitor->VisitJSObjectFast(
+ map, ConcreteVisitor::template Cast<JSObject>(object));
case kVisitJSApiObject:
- return visitor->VisitJSApiObject(map, JSObject::cast(object));
+ return visitor->VisitJSApiObject(
+ map, ConcreteVisitor::template Cast<JSObject>(object));
case kVisitStruct:
- return visitor->VisitStruct(map, HeapObject::cast(object));
+ return visitor->VisitStruct(map, object);
case kVisitFreeSpace:
return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
case kVisitorIdCount:
@@ -78,13 +89,7 @@ TYPED_VISITOR_ID_LIST(VISIT)
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitShortcutCandidate(
Map* map, ConsString* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = ConsString::BodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object->map_slot());
- ConsString::BodyDescriptor::IterateBody(object, size, visitor);
- return static_cast<ResultType>(size);
+ return static_cast<ConcreteVisitor*>(this)->VisitConsString(map, object);
}
template <typename ResultType, typename ConcreteVisitor>
@@ -181,197 +186,6 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
return visitor->VisitJSObject(map, object);
}
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitJSFunction(Map* map,
- JSFunction* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- JSFunction::BodyDescriptorWeak::IterateBody(object, size, visitor);
- return size;
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitTransitionArray(
- Map* map, TransitionArray* array) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
- TransitionArray::BodyDescriptor::IterateBody(array, size, visitor);
- collector_->AddTransitionArray(array);
- return size;
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitWeakCell(Map* map,
- WeakCell* weak_cell) {
- // Enqueue weak cell in linked list of encountered weak collections.
- // We can ignore weak cells with cleared values because they will always
- // contain smi zero.
- if (!weak_cell->cleared()) {
- HeapObject* value = HeapObject::cast(weak_cell->value());
- if (heap_->incremental_marking()->marking_state()->IsBlackOrGrey(value)) {
- // Weak cells with live values are directly processed here to reduce
- // the processing time of weak cells during the main GC pause.
- Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- collector_->RecordSlot(weak_cell, slot, *slot);
- } else {
- // If we do not know about liveness of values of weak cells, we have to
- // process them when we know the liveness of the whole transitive
- // closure.
- collector_->AddWeakCell(weak_cell);
- }
- }
- return WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
- Context* context) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = Context::BodyDescriptorWeak::SizeOf(map, context);
- Context::BodyDescriptorWeak::IterateBody(context, size, visitor);
- return size;
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitJSWeakCollection(
- Map* map, JSWeakCollection* weak_collection) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
-
- // Enqueue weak collection in linked list of encountered weak collections.
- if (weak_collection->next() == heap_->undefined_value()) {
- weak_collection->set_next(heap_->encountered_weak_collections());
- heap_->set_encountered_weak_collections(weak_collection);
- }
-
- // Skip visiting the backing hash table containing the mappings and the
- // pointer to the other enqueued weak collections, both are post-processed.
- int size = JSWeakCollection::BodyDescriptorWeak::SizeOf(map, weak_collection);
- JSWeakCollection::BodyDescriptorWeak::IterateBody(weak_collection, size,
- visitor);
-
- // Partially initialized weak collection is enqueued, but table is ignored.
- if (!weak_collection->table()->IsHashTable()) return size;
-
- // Mark the backing hash table without pushing it on the marking stack.
- Object** slot =
- HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
- HeapObject* obj = HeapObject::cast(*slot);
- collector_->RecordSlot(weak_collection, slot, obj);
- visitor->MarkObjectWithoutPush(weak_collection, obj);
- return size;
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitBytecodeArray(Map* map,
- BytecodeArray* array) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
- BytecodeArray::BodyDescriptor::IterateBody(array, size, visitor);
- array->MakeOlder();
- return size;
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitCode(Map* map, Code* code) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = Code::BodyDescriptor::SizeOf(map, code);
- Code::BodyDescriptor::IterateBody(code, size, visitor);
- return size;
-}
-
-template <typename ConcreteVisitor>
-void MarkingVisitor<ConcreteVisitor>::MarkMapContents(Map* map) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- // Since descriptor arrays are potentially shared, ensure that only the
- // descriptors that belong to this map are marked. The first time a non-empty
- // descriptor array is marked, its header is also visited. The slot holding
- // the descriptor array will be implicitly recorded when the pointer fields of
- // this map are visited. Prototype maps don't keep track of transitions, so
- // just mark the entire descriptor array.
- if (!map->is_prototype_map()) {
- DescriptorArray* descriptors = map->instance_descriptors();
- if (visitor->MarkObjectWithoutPush(map, descriptors) &&
- descriptors->length() > 0) {
- visitor->VisitPointers(descriptors, descriptors->GetFirstElementAddress(),
- descriptors->GetDescriptorEndSlot(0));
- }
- int start = 0;
- int end = map->NumberOfOwnDescriptors();
- if (start < end) {
- visitor->VisitPointers(descriptors,
- descriptors->GetDescriptorStartSlot(start),
- descriptors->GetDescriptorEndSlot(end));
- }
- }
-
- // Mark the pointer fields of the Map. Since the transitions array has
- // been marked already, it is fine that one of these fields contains a
- // pointer to it.
- visitor->VisitPointers(
- map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitMap(Map* map, Map* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
-
- // When map collection is enabled we have to mark through map's transitions
- // and back pointers in a special way to make these links weak.
- if (object->CanTransition()) {
- MarkMapContents(object);
- } else {
- visitor->VisitPointers(
- object, HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
- }
- return Map::BodyDescriptor::SizeOf(map, object);
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
- JSObject* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (heap_->local_embedder_heap_tracer()->InUse()) {
- DCHECK(object->IsJSObject());
- heap_->TracePossibleWrapper(object);
- }
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- JSObject::BodyDescriptor::IterateBody(object, size, visitor);
- return size;
-}
-
-template <typename ConcreteVisitor>
-int MarkingVisitor<ConcreteVisitor>::VisitAllocationSite(
- Map* map, AllocationSite* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
- AllocationSite::BodyDescriptorWeak::IterateBody(object, size, visitor);
- return size;
-}
-
-template <typename ConcreteVisitor>
-void MarkingVisitor<ConcreteVisitor>::VisitEmbeddedPointer(Code* host,
- RelocInfo* rinfo) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- HeapObject* object = HeapObject::cast(rinfo->target_object());
- collector_->RecordRelocSlot(host, rinfo, object);
- if (!host->IsWeakObject(object)) {
- visitor->MarkObject(host, object);
- }
-}
-
-template <typename ConcreteVisitor>
-void MarkingVisitor<ConcreteVisitor>::VisitCodeTarget(Code* host,
- RelocInfo* rinfo) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- collector_->RecordRelocSlot(host, rinfo, target);
- visitor->MarkObject(host, target);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 93bbd0f524..c7c384f52a 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -29,7 +29,7 @@ template <class T>
Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
Object* undefined = heap->undefined_value();
Object* head = undefined;
- T* tail = NULL;
+ T* tail = nullptr;
bool record_slots = MustRecordSlots(heap);
while (list != undefined) {
@@ -37,18 +37,23 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
T* candidate = reinterpret_cast<T*>(list);
Object* retained = retainer->RetainAs(list);
- if (retained != NULL) {
+
+ // Move to the next element before the WeakNext is cleared.
+ list = WeakListVisitor<T>::WeakNext(candidate);
+
+ if (retained != nullptr) {
if (head == undefined) {
// First element in the list.
head = retained;
} else {
// Subsequent elements in the list.
- DCHECK(tail != NULL);
+ DCHECK_NOT_NULL(tail);
WeakListVisitor<T>::SetWeakNext(tail, retained);
if (record_slots) {
- Object** next_slot =
- HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
- MarkCompactCollector::RecordSlot(tail, next_slot, retained);
+ HeapObject* slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
+ int slot_offset = WeakListVisitor<T>::WeakNextOffset();
+ Object** slot = HeapObject::RawField(slot_holder, slot_offset);
+ MarkCompactCollector::RecordSlot(slot_holder, slot, retained);
}
}
// Retained object is new tail.
@@ -62,13 +67,10 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
} else {
WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
}
-
- // Move to next element in the list.
- list = WeakListVisitor<T>::WeakNext(candidate);
}
// Terminate the list if there is one or more elements.
- if (tail != NULL) WeakListVisitor<T>::SetWeakNext(tail, undefined);
+ if (tail != nullptr) WeakListVisitor<T>::SetWeakNext(tail, undefined);
return head;
}
@@ -86,16 +88,27 @@ static void ClearWeakList(Heap* heap, Object* list) {
template <>
struct WeakListVisitor<Code> {
static void SetWeakNext(Code* code, Object* next) {
- code->set_next_code_link(next, UPDATE_WEAK_WRITE_BARRIER);
+ code->code_data_container()->set_next_code_link(next,
+ UPDATE_WEAK_WRITE_BARRIER);
}
- static Object* WeakNext(Code* code) { return code->next_code_link(); }
+ static Object* WeakNext(Code* code) {
+ return code->code_data_container()->next_code_link();
+ }
- static int WeakNextOffset() { return Code::kNextCodeLinkOffset; }
+ static HeapObject* WeakNextHolder(Code* code) {
+ return code->code_data_container();
+ }
+
+ static int WeakNextOffset() { return CodeDataContainer::kNextCodeLinkOffset; }
static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {}
- static void VisitPhantomObject(Heap*, Code*) {}
+ static void VisitPhantomObject(Heap* heap, Code* code) {
+ // Even though the code is dying, its code_data_container can still be
+ // alive. Clear the next_code_link slot to avoid a dangling pointer.
+ SetWeakNext(code, heap->undefined_value());
+ }
};
@@ -109,6 +122,8 @@ struct WeakListVisitor<Context> {
return context->next_context_link();
}
+ static HeapObject* WeakNextHolder(Context* context) { return context; }
+
static int WeakNextOffset() {
return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
}
@@ -161,6 +176,8 @@ struct WeakListVisitor<AllocationSite> {
static Object* WeakNext(AllocationSite* obj) { return obj->weak_next(); }
+ static HeapObject* WeakNextHolder(AllocationSite* obj) { return obj; }
+
static int WeakNextOffset() { return AllocationSite::kWeakNextOffset; }
static void VisitLiveObject(Heap*, AllocationSite*, WeakObjectRetainer*) {}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 01708e7655..39ebdd2cbd 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -11,11 +11,15 @@
#include "src/objects.h"
#include "src/objects/hash-table.h"
#include "src/objects/string.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
class BigInt;
+class BytecodeArray;
+class JSArrayBuffer;
+class JSRegExp;
#define TYPED_VISITOR_ID_LIST(V) \
V(AllocationSite) \
@@ -24,6 +28,7 @@ class BigInt;
V(BytecodeArray) \
V(Cell) \
V(Code) \
+ V(CodeDataContainer) \
V(ConsString) \
V(FeedbackVector) \
V(FixedArray) \
@@ -87,6 +92,9 @@ class HeapVisitor : public ObjectVisitor {
V8_INLINE ResultType VisitJSApiObject(Map* map, JSObject* object);
V8_INLINE ResultType VisitStruct(Map* map, HeapObject* object);
V8_INLINE ResultType VisitFreeSpace(Map* map, FreeSpace* object);
+
+ template <typename T>
+ static V8_INLINE T* Cast(HeapObject* object);
};
template <typename ConcreteVisitor>
@@ -111,38 +119,6 @@ class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
}
};
-template <typename ConcreteVisitor>
-class MarkingVisitor : public HeapVisitor<int, ConcreteVisitor> {
- public:
- explicit MarkingVisitor(Heap* heap, MarkCompactCollector* collector)
- : heap_(heap), collector_(collector) {}
-
- V8_INLINE bool ShouldVisitMapPointer() { return false; }
-
- V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
- V8_INLINE int VisitWeakCell(Map* map, WeakCell* object);
- V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
- V8_INLINE int VisitNativeContext(Map* map, Context* object);
- V8_INLINE int VisitJSWeakCollection(Map* map, JSWeakCollection* object);
- V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
- V8_INLINE int VisitCode(Map* map, Code* object);
- V8_INLINE int VisitMap(Map* map, Map* object);
- V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
- V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
-
- // ObjectVisitor implementation.
- V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
- V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
- // Skip weak next code link.
- V8_INLINE void VisitNextCodeLink(Code* host, Object** p) final {}
-
- protected:
- V8_INLINE void MarkMapContents(Map* map);
-
- Heap* heap_;
- MarkCompactCollector* collector_;
-};
-
class WeakObjectRetainer;
// A weak list is single linked list where each element has a weak pointer to
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index d17a355247..cd9c45141d 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -327,14 +327,14 @@ class UpdateTypedSlotHelper {
Callback callback) {
switch (slot_type) {
case CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, nullptr);
return UpdateCodeTarget(&rinfo, callback);
}
case CODE_ENTRY_SLOT: {
return UpdateCodeEntry(addr, callback);
}
case EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+ RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, nullptr);
return UpdateEmbeddedPointer(&rinfo, callback);
}
case OBJECT_SLOT: {
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 1ea2f3493c..99e1a8004e 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -116,7 +116,7 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
}
*slot = target;
- if (!ContainsOnlyData(static_cast<VisitorId>(map->visitor_id()))) {
+ if (!ContainsOnlyData(map->visitor_id())) {
promotion_list_.Push(ObjectAndSize(target, object_size));
}
promoted_size_ += object_size;
@@ -206,7 +206,7 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
int size = source->SizeFromMap(map);
// Cannot use ::cast() below because that would add checks in debug mode
// that require re-reading the map.
- switch (static_cast<VisitorId>(map->visitor_id())) {
+ switch (map->visitor_id()) {
case kVisitThinString:
EvacuateThinString(map, slot, reinterpret_cast<ThinString*>(source),
size);
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index fc70f60483..231a8f5074 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -9,6 +9,7 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
+#include "src/heap/sweeper.h"
#include "src/objects-body-descriptors-inl.h"
namespace v8 {
@@ -86,6 +87,33 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
target->IterateBody(target->map()->instance_type(), size, &visitor);
}
+void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
+ AllocationSpace space = page->owner()->identity();
+ if ((space == OLD_SPACE) && !page->SweepingDone()) {
+ heap()->mark_compact_collector()->sweeper()->AddPage(
+ space, reinterpret_cast<Page*>(page),
+ Sweeper::READD_TEMPORARY_REMOVED_PAGE);
+ }
+}
+
+void Scavenger::ScavengePage(MemoryChunk* page) {
+ CodePageMemoryModificationScope memory_modification_scope(page);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ page,
+ [this](Address addr) { return CheckAndScavengeObject(heap_, addr); },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ page, [this](SlotType type, Address host_addr, Address addr) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ heap_->isolate(), type, addr, [this](Object** addr) {
+ return CheckAndScavengeObject(heap(),
+ reinterpret_cast<Address>(addr));
+ });
+ });
+
+ AddPageToSweeperIfNecessary(page);
+}
+
void Scavenger::Process(OneshotBarrier* barrier) {
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 1437092874..75b24fe282 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -28,14 +28,9 @@ class Scavenger {
Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
PromotionList* promotion_list, int task_id);
- // Scavenges an object |object| referenced from slot |p|. |object| is required
- // to be in from space.
- inline void ScavengeObject(HeapObject** p, HeapObject* object);
-
- // Potentially scavenges an object referenced from |slot_address| if it is
- // indeed a HeapObject and resides in from space.
- inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
- Address slot_address);
+ // Entry point for scavenging an old generation page. For scavenging single
+ // objects see RootScavengingVisitor and ScavengeVisitor below.
+ void ScavengePage(MemoryChunk* page);
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
@@ -47,10 +42,6 @@ class Scavenger {
size_t bytes_copied() const { return copied_size_; }
size_t bytes_promoted() const { return promoted_size_; }
- void AnnounceLockedPage(MemoryChunk* chunk) {
- allocator_.AnnounceLockedPage(chunk);
- }
-
private:
// Number of objects to process before interrupting for potentially waking
// up other tasks.
@@ -61,6 +52,17 @@ class Scavenger {
inline void PageMemoryFence(Object* object);
+ void AddPageToSweeperIfNecessary(MemoryChunk* page);
+
+ // Potentially scavenges an object referenced from |slot_address| if it is
+ // indeed a HeapObject and resides in from space.
+ inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
+ Address slot_address);
+
+ // Scavenges an object |object| referenced from slot |p|. |object| is required
+ // to be in from space.
+ inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
// Copies |source| to |target| and sets the forwarding pointer in |source|.
V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target,
int size);
@@ -106,6 +108,8 @@ class Scavenger {
const bool is_compacting_;
friend class IterateAndScavengePromotedObjectsVisitor;
+ friend class RootScavengeVisitor;
+ friend class ScavengeVisitor;
};
// Helper class for turning the scavenger into an object visitor that is also
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 592fb53a7f..8831417ce2 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -4,12 +4,14 @@
#include "src/setup-isolate.h"
+#include "src/accessors.h"
#include "src/ast/context-slot-cache.h"
#include "src/compilation-cache.h"
#include "src/contexts.h"
#include "src/factory.h"
#include "src/heap-symbols.h"
#include "src/heap/heap.h"
+#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/layout-descriptor.h"
#include "src/lookup-cache.h"
@@ -35,10 +37,11 @@ bool SetupIsolateDelegate::SetupHeapInternal(Heap* heap) {
bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
- if (!CreateApiObjects()) return false;
+ CreateApiObjects();
// Create initial objects
CreateInitialObjects();
+ CreateInternalAccessorInfoObjects();
CHECK_EQ(0u, gc_count_);
set_native_contexts_list(undefined_value());
@@ -107,6 +110,9 @@ bool Heap::CreateInitialMaps() {
fixed_cow_array)
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
+ ALLOCATE_PARTIAL_MAP(DESCRIPTOR_ARRAY_TYPE, kVariableSizeSentinel,
+ descriptor_array)
+
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
@@ -164,10 +170,12 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty descriptor array.
{
+ STATIC_ASSERT(DescriptorArray::kFirstIndex != 0);
AllocationResult allocation =
AllocateUninitializedFixedArray(DescriptorArray::kFirstIndex, TENURED);
if (!allocation.To(&obj)) return false;
}
+ obj->set_map_no_write_barrier(descriptor_array_map());
set_empty_descriptor_array(DescriptorArray::cast(obj));
DescriptorArray::cast(obj)->set(DescriptorArray::kDescriptorLengthIndex,
Smi::kZero);
@@ -178,6 +186,7 @@ bool Heap::CreateInitialMaps() {
FinalizePartialMap(this, meta_map());
FinalizePartialMap(this, fixed_array_map());
FinalizePartialMap(this, fixed_cow_array_map());
+ FinalizePartialMap(this, descriptor_array_map());
FinalizePartialMap(this, undefined_map());
undefined_map()->set_is_undetectable();
FinalizePartialMap(this, null_map());
@@ -283,8 +292,13 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, unseeded_number_dictionary)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_map)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_set)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, name_dictionary)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, global_dictionary)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, number_dictionary)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, weak_hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
@@ -302,6 +316,9 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
+ ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
+ code_data_container)
+
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
external_map()->set_is_extensible(false);
@@ -358,18 +375,16 @@ bool Heap::CreateInitialMaps() {
return true;
}
-bool Heap::CreateApiObjects() {
- HandleScope scope(isolate());
- set_message_listeners(*TemplateList::New(isolate(), 2));
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
- if (!allocation.To(&obj)) return false;
- }
- InterceptorInfo* info = InterceptorInfo::cast(obj);
+void Heap::CreateApiObjects() {
+ Isolate* isolate = this->isolate();
+ HandleScope scope(isolate);
+
+ set_message_listeners(*TemplateList::New(isolate, 2));
+
+ Handle<InterceptorInfo> info = Handle<InterceptorInfo>::cast(
+ isolate->factory()->NewStruct(INTERCEPTOR_INFO_TYPE, TENURED));
info->set_flags(0);
- set_noop_interceptor_info(info);
- return true;
+ set_noop_interceptor_info(*info);
}
void Heap::CreateInitialObjects() {
@@ -452,7 +467,7 @@ void Heap::CreateInitialObjects() {
// Create the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
- set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
+ set_code_stubs(*NumberDictionary::New(isolate(), 128));
{
HandleScope scope(isolate());
@@ -540,13 +555,12 @@ void Heap::CreateInitialObjects() {
ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
weak_new_space_object_to_code_list()->SetLength(0);
- set_code_coverage_list(undefined_value());
+ set_feedback_vectors_for_profiling_tools(undefined_value());
set_script_list(Smi::kZero);
- Handle<SeededNumberDictionary> slow_element_dictionary =
- SeededNumberDictionary::New(isolate(), 1, TENURED,
- USE_CUSTOM_MINIMUM_CAPACITY);
+ Handle<NumberDictionary> slow_element_dictionary =
+ NumberDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
slow_element_dictionary->set_requires_slow_elements();
set_empty_slow_element_dictionary(*slow_element_dictionary);
@@ -557,15 +571,25 @@ void Heap::CreateInitialObjects() {
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
set_next_template_serial_number(Smi::kZero);
- // Allocate the empty OrderedHashTable.
- Handle<FixedArray> empty_ordered_hash_table =
+ // Allocate the empty OrderedHashMap.
+ Handle<FixedArray> empty_ordered_hash_map =
factory->NewFixedArray(OrderedHashMap::kHashTableStartIndex, TENURED);
- empty_ordered_hash_table->set_map_no_write_barrier(
- *factory->ordered_hash_table_map());
- for (int i = 0; i < empty_ordered_hash_table->length(); ++i) {
- empty_ordered_hash_table->set(i, Smi::kZero);
+ empty_ordered_hash_map->set_map_no_write_barrier(
+ *factory->ordered_hash_map_map());
+ for (int i = 0; i < empty_ordered_hash_map->length(); ++i) {
+ empty_ordered_hash_map->set(i, Smi::kZero);
}
- set_empty_ordered_hash_table(*empty_ordered_hash_table);
+ set_empty_ordered_hash_map(*empty_ordered_hash_map);
+
+ // Allocate the empty OrderedHashSet.
+ Handle<FixedArray> empty_ordered_hash_set =
+ factory->NewFixedArray(OrderedHashSet::kHashTableStartIndex, TENURED);
+ empty_ordered_hash_set->set_map_no_write_barrier(
+ *factory->ordered_hash_set_map());
+ for (int i = 0; i < empty_ordered_hash_set->length(); ++i) {
+ empty_ordered_hash_set->set(i, Smi::kZero);
+ }
+ set_empty_ordered_hash_set(*empty_ordered_hash_set);
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
@@ -578,7 +602,7 @@ void Heap::CreateInitialObjects() {
Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_protector(*cell);
+ set_no_elements_protector(*cell);
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(the_hole_value());
@@ -615,6 +639,11 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(Smi::kZero);
+ STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
+ set_deserialize_lazy_handler(Smi::kZero);
+ set_deserialize_lazy_handler_wide(Smi::kZero);
+ set_deserialize_lazy_handler_extra_wide(Smi::kZero);
+
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();
@@ -625,5 +654,17 @@ void Heap::CreateInitialObjects() {
isolate_->compilation_cache()->Clear();
}
+void Heap::CreateInternalAccessorInfoObjects() {
+ Isolate* isolate = this->isolate();
+ HandleScope scope(isolate);
+ Handle<AccessorInfo> acessor_info;
+
+#define INIT_ACCESSOR_INFO(accessor_name, AccessorName) \
+ acessor_info = Accessors::Make##AccessorName##Info(isolate); \
+ roots_[k##AccessorName##AccessorRootIndex] = *acessor_info;
+ ACCESSOR_INFO_LIST(INIT_ACCESSOR_INFO)
+#undef INIT_ACCESSOR_INFO
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 8f3cd2250e..f1edb6f2fb 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -200,7 +200,7 @@ class SlotSet : public Malloced {
uint32_t old_cell = cell;
uint32_t mask = 0;
while (cell) {
- int bit_offset = base::bits::CountTrailingZeros32(cell);
+ int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
if (callback(page_start_ + slot) == KEEP_SLOT) {
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index a33d22f80c..fb78b99c2f 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -8,6 +8,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/msan.h"
+#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
@@ -369,10 +370,13 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
- if (top() < top_on_previous_step_) {
- // Generated code decreased the top() pointer to do folded allocations
- DCHECK_EQ(Page::FromAddress(top()),
- Page::FromAddress(top_on_previous_step_));
+ if (top_on_previous_step_ && top() < top_on_previous_step_ &&
+ SupportsInlineAllocation()) {
+ // Generated code decreased the top() pointer to do folded allocations.
+ // The top_on_previous_step_ can be one byte beyond the current page.
+ DCHECK_NOT_NULL(top());
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
+ Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index f654c6689e..7657e1e6ec 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -15,6 +15,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
+#include "src/heap/sweeper.h"
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
@@ -56,7 +57,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
Page* cur_page = *(current_page_++);
Heap* heap = space_->heap();
- heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
+ heap->mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(
cur_page);
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
@@ -71,14 +72,14 @@ bool HeapObjectIterator::AdvanceToNextPage() {
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
space->PauseAllocationObservers();
}
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
space->ResumeAllocationObservers();
}
}
@@ -119,21 +120,21 @@ bool CodeRange::SetUp(size_t requested) {
VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(
- requested,
- Max(kCodeRangeAreaAlignment,
- static_cast<size_t>(base::OS::AllocateAlignment())),
- v8::internal::GetRandomMmapAddr(), &reservation)) {
+ requested, Max(kCodeRangeAreaAlignment, base::OS::AllocatePageSize()),
+ base::OS::GetRandomMmapAddr(), &reservation)) {
return false;
}
// We are sure that we have mapped a block of requested addresses.
- DCHECK(reservation.size() == requested);
+ DCHECK_GE(reservation.size(), requested);
Address base = reinterpret_cast<Address>(reservation.address());
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space.
if (reserved_area > 0) {
- if (!reservation.Commit(base, reserved_area, true)) return false;
+ if (!reservation.SetPermissions(base, reserved_area,
+ base::OS::MemoryPermission::kReadWrite))
+ return false;
base += reserved_area;
}
@@ -198,23 +199,22 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) {
Address CodeRange::AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
size_t* allocated) {
- // request_size includes guards while committed_size does not. Make sure
- // callers know about the invariant.
- CHECK_LE(commit_size,
- requested_size - 2 * MemoryAllocator::CodePageGuardSize());
+ // requested_size includes the header and two guard regions, while commit_size
+ // only includes the header.
+ DCHECK_LE(commit_size,
+ requested_size - 2 * MemoryAllocator::CodePageGuardSize());
FreeBlock current;
if (!ReserveBlock(requested_size, &current)) {
*allocated = 0;
- return NULL;
+ return nullptr;
}
*allocated = current.size;
- DCHECK(*allocated <= current.size);
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
&virtual_memory_, current.start, commit_size, *allocated)) {
*allocated = 0;
ReleaseBlock(&current);
- return NULL;
+ return nullptr;
}
return current.start;
}
@@ -227,7 +227,8 @@ bool CodeRange::CommitRawMemory(Address start, size_t length) {
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
- return virtual_memory_.Uncommit(start, length);
+ return virtual_memory_.SetPermissions(start, length,
+ base::OS::MemoryPermission::kNoAccess);
}
@@ -235,7 +236,8 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.emplace_back(address, length);
- virtual_memory_.Uncommit(address, length);
+ virtual_memory_.SetPermissions(address, length,
+ base::OS::MemoryPermission::kNoAccess);
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
@@ -304,7 +306,7 @@ void MemoryAllocator::TearDown() {
capacity_ = 0;
if (last_chunk_.IsReserved()) {
- last_chunk_.Release();
+ last_chunk_.Free();
}
delete code_range_;
@@ -411,12 +413,13 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
return !chunk->InNewSpace() || mc == nullptr ||
- !mc->sweeper().sweeping_in_progress();
+ !mc->sweeper()->sweeping_in_progress();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
- if (!base::OS::CommitRegion(base, size, executable == EXECUTABLE)) {
+ if (!base::OS::SetPermissions(base, size,
+ base::OS::MemoryPermission::kReadWrite)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
@@ -427,27 +430,25 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory.
- DCHECK(code_range() == NULL ||
+ DCHECK(code_range() == nullptr ||
!code_range()->contains(static_cast<Address>(reservation->address())));
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
reservation->size() <= Page::kPageSize);
- reservation->Release();
+ reservation->Free();
}
void MemoryAllocator::FreeMemory(Address base, size_t size,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
- if (code_range() != NULL &&
+ if (code_range() != nullptr &&
code_range()->contains(static_cast<Address>(base))) {
DCHECK(executable == EXECUTABLE);
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
- bool result = base::OS::ReleaseRegion(base, size);
- USE(result);
- DCHECK(result);
+ CHECK(base::OS::Free(base, size));
}
}
@@ -458,15 +459,10 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
return nullptr;
- const Address base =
- ::RoundUp(static_cast<Address>(reservation.address()), alignment);
- if (base + size != reservation.end()) {
- const Address unused_start = ::RoundUp(base + size, GetCommitPageSize());
- reservation.ReleasePartial(unused_start);
- }
+ Address result = static_cast<Address>(reservation.address());
size_.Increment(reservation.size());
controller->TakeControl(&reservation);
- return base;
+ return result;
}
Address MemoryAllocator::AllocateAlignedMemory(
@@ -476,27 +472,28 @@ Address MemoryAllocator::AllocateAlignedMemory(
VirtualMemory reservation;
Address base =
ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
- if (base == NULL) return NULL;
+ if (base == nullptr) return nullptr;
if (executable == EXECUTABLE) {
if (!CommitExecutableMemory(&reservation, base, commit_size,
reserve_size)) {
- base = NULL;
+ base = nullptr;
}
} else {
- if (reservation.Commit(base, commit_size, false)) {
+ if (reservation.SetPermissions(base, commit_size,
+ base::OS::MemoryPermission::kReadWrite)) {
UpdateAllocatedSpaceLimits(base, base + commit_size);
} else {
- base = NULL;
+ base = nullptr;
}
}
- if (base == NULL) {
- // Failed to commit the body. Release the mapping and any partially
- // committed regions inside it.
- reservation.Release();
+ if (base == nullptr) {
+ // Failed to commit the body. Free the mapping and any partially committed
+ // regions inside it.
+ reservation.Free();
size_.Decrement(reserve_size);
- return NULL;
+ return nullptr;
}
controller->TakeControl(&reservation);
@@ -528,6 +525,50 @@ void MemoryChunk::InitializationMemoryFence() {
#endif
}
+void MemoryChunk::SetReadAndExecutable() {
+ DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
+ // Decrementing the write_unprotect_counter_ and changing the page
+ // protection mode has to be atomic.
+ base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
+ if (write_unprotect_counter_ == 0) {
+ // This is a corner case that may happen when we have a
+ // CodeSpaceMemoryModificationScope open and this page was newly
+ // added.
+ return;
+ }
+ write_unprotect_counter_--;
+ DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
+ if (write_unprotect_counter_ == 0) {
+ Address protect_start =
+ address() + MemoryAllocator::CodePageAreaStartOffset();
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAddressAligned(protect_start, page_size));
+ size_t protect_size = RoundUp(area_size(), page_size);
+ CHECK(base::OS::SetPermissions(protect_start, protect_size,
+ base::OS::MemoryPermission::kReadExecute));
+ }
+}
+
+void MemoryChunk::SetReadAndWritable() {
+ DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
+ // Incrementing the write_unprotect_counter_ and changing the page
+ // protection mode has to be atomic.
+ base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
+ write_unprotect_counter_++;
+ DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
+ if (write_unprotect_counter_ == 1) {
+ Address unprotect_start =
+ address() + MemoryAllocator::CodePageAreaStartOffset();
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAddressAligned(unprotect_start, page_size));
+ size_t unprotect_size = RoundUp(area_size(), page_size);
+ CHECK(base::OS::SetPermissions(unprotect_start, unprotect_size,
+ base::OS::MemoryPermission::kReadWrite));
+ }
+}
+
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
@@ -554,7 +595,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
- chunk->mutex_ = new base::RecursiveMutex();
+ chunk->page_protection_change_mutex_ = new base::Mutex();
+ chunk->write_unprotect_counter_ = 0;
+ chunk->mutex_ = new base::Mutex();
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
@@ -568,6 +611,17 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
+ if (heap->write_protect_code_memory()) {
+ chunk->write_unprotect_counter_ =
+ heap->code_space_memory_modification_scope_depth();
+ } else {
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAddressAligned(area_start, page_size));
+ size_t area_size = RoundUp(area_end - area_start, page_size);
+ CHECK(base::OS::SetPermissions(
+ area_start, area_size,
+ base::OS::MemoryPermission::kReadWriteExecute));
+ }
}
if (reservation != nullptr) {
@@ -641,56 +695,6 @@ Page* Page::ConvertNewToOld(Page* old_page) {
return new_page;
}
-// Commit MemoryChunk area to the requested size.
-bool MemoryChunk::CommitArea(size_t requested) {
- size_t guard_size =
- IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
- size_t header_size = area_start() - address() - guard_size;
- size_t commit_size =
- ::RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
- size_t committed_size = ::RoundUp(header_size + (area_end() - area_start()),
- MemoryAllocator::GetCommitPageSize());
-
- if (commit_size > committed_size) {
- // Commit size should be less or equal than the reserved size.
- DCHECK(commit_size <= size() - 2 * guard_size);
- // Append the committed area.
- Address start = address() + committed_size + guard_size;
- size_t length = commit_size - committed_size;
- if (reservation_.IsReserved()) {
- Executability executable =
- IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- if (!heap()->memory_allocator()->CommitMemory(start, length,
- executable)) {
- return false;
- }
- } else {
- CodeRange* code_range = heap_->memory_allocator()->code_range();
- DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
- if (!code_range->CommitRawMemory(start, length)) return false;
- }
-
- if (Heap::ShouldZapGarbage()) {
- heap_->memory_allocator()->ZapBlock(start, length);
- }
- } else if (commit_size < committed_size) {
- DCHECK_LT(0, commit_size);
- // Shrink the committed area.
- size_t length = committed_size - commit_size;
- Address start = address() + committed_size + guard_size - length;
- if (reservation_.IsReserved()) {
- if (!reservation_.Uncommit(start, length)) return false;
- } else {
- CodeRange* code_range = heap_->memory_allocator()->code_range();
- DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
- if (!code_range->UncommitRawMemory(start, length)) return false;
- }
- }
-
- area_end_ = area_start_ + requested;
- return true;
-}
-
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size();
@@ -712,8 +716,8 @@ void MemoryChunk::Unlink() {
MemoryChunk* prev_element = prev_chunk();
next_element->set_prev_chunk(prev_element);
prev_element->set_next_chunk(next_element);
- set_prev_chunk(NULL);
- set_next_chunk(NULL);
+ set_prev_chunk(nullptr);
+ set_next_chunk(nullptr);
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
@@ -761,15 +765,14 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
//
if (executable == EXECUTABLE) {
- chunk_size = ::RoundUp(CodePageAreaStartOffset() + reserve_area_size,
- GetCommitPageSize()) +
- CodePageGuardSize();
+ chunk_size = ::RoundUp(
+ CodePageAreaStartOffset() + reserve_area_size + CodePageGuardSize(),
+ GetCommitPageSize());
// Size of header (not executable) plus area (executable).
size_t commit_size = ::RoundUp(
CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
- // Allocate executable memory either from code range or from the
- // OS.
+// Allocate executable memory either from code range or from the OS.
#ifdef V8_TARGET_ARCH_MIPS64
// Use code range only for large object space on mips64 to keep address
// range within 256-MB memory region.
@@ -781,7 +784,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
DCHECK(
IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
- if (base == NULL) return NULL;
+ if (base == nullptr) return nullptr;
size_.Increment(chunk_size);
// Update executable memory size.
size_executable_.Increment(chunk_size);
@@ -789,7 +792,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
address_hint, &reservation);
- if (base == NULL) return NULL;
+ if (base == nullptr) return nullptr;
// Update executable memory size.
size_executable_.Increment(reservation.size());
}
@@ -811,7 +814,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
- if (base == NULL) return NULL;
+ if (base == nullptr) return nullptr;
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
@@ -949,16 +952,19 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
chunk->size_ -= bytes_to_free;
chunk->area_end_ = new_area_end;
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ // Add guard page at the end.
+ size_t page_size = GetCommitPageSize();
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(chunk->area_end_) %
- static_cast<uintptr_t>(GetCommitPageSize()));
+ static_cast<uintptr_t>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + CodePageGuardSize());
- reservation->Guard(chunk->area_end_);
+ reservation->SetPermissions(chunk->area_end_, page_size,
+ base::OS::MemoryPermission::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
// partially starting at |start_free| will also release the potentially
// unused part behind the current page.
- const size_t released_bytes = reservation->ReleasePartial(start_free);
+ const size_t released_bytes = reservation->Release(start_free);
DCHECK_GE(size_.Value(), released_bytes);
size_.Decrement(released_bytes);
isolate_->counters()->memory_allocated()->Decrement(
@@ -1105,7 +1111,9 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!base::OS::UncommitRegion(start, size)) return false;
+ if (!base::OS::SetPermissions(start, size,
+ base::OS::MemoryPermission::kNoAccess))
+ return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -1132,9 +1140,7 @@ size_t MemoryAllocator::CodePageGuardStartOffset() {
return ::RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
}
-size_t MemoryAllocator::CodePageGuardSize() {
- return static_cast<int>(GetCommitPageSize());
-}
+size_t MemoryAllocator::CodePageGuardSize() { return GetCommitPageSize(); }
size_t MemoryAllocator::CodePageAreaStartOffset() {
// We are guarding code pages: the first OS page after the header
@@ -1160,27 +1166,40 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
size_t commit_size,
size_t reserved_size) {
- // Commit page header (not executable).
- Address header = start;
- size_t header_size = CodePageGuardStartOffset();
- if (vm->Commit(header, header_size, false)) {
- // Create guard page after the header.
- if (vm->Guard(start + CodePageGuardStartOffset())) {
- // Commit page body (executable).
- Address body = start + CodePageAreaStartOffset();
- size_t body_size = commit_size - CodePageGuardStartOffset();
- if (vm->Commit(body, body_size, true)) {
- // Create guard page before the end.
- if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
- UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
- commit_size -
- CodePageGuardStartOffset());
+ const size_t page_size = GetCommitPageSize();
+ // All addresses and sizes must be aligned to the commit page size.
+ DCHECK(IsAddressAligned(start, page_size));
+ DCHECK_EQ(0, commit_size % page_size);
+ DCHECK_EQ(0, reserved_size % page_size);
+ const size_t guard_size = CodePageGuardSize();
+ const size_t pre_guard_offset = CodePageGuardStartOffset();
+ const size_t code_area_offset = CodePageAreaStartOffset();
+ // reserved_size includes two guard regions, commit_size does not.
+ DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
+ const Address pre_guard_page = start + pre_guard_offset;
+ const Address code_area = start + code_area_offset;
+ const Address post_guard_page = start + reserved_size - guard_size;
+ // Commit the non-executable header, from start to pre-code guard page.
+ if (vm->SetPermissions(start, pre_guard_offset,
+ base::OS::MemoryPermission::kReadWrite)) {
+ // Create the pre-code guard page, following the header.
+ if (vm->SetPermissions(pre_guard_page, page_size,
+ base::OS::MemoryPermission::kNoAccess)) {
+ // Commit the executable code body.
+ if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
+ base::OS::MemoryPermission::kReadWrite)) {
+ // Create the post-code guard page.
+ if (vm->SetPermissions(post_guard_page, page_size,
+ base::OS::MemoryPermission::kNoAccess)) {
+ UpdateAllocatedSpaceLimits(start, code_area + commit_size);
return true;
}
- vm->Uncommit(body, body_size);
+ vm->SetPermissions(code_area, commit_size,
+ base::OS::MemoryPermission::kNoAccess);
}
}
- vm->Uncommit(header, header_size);
+ vm->SetPermissions(start, pre_guard_offset,
+ base::OS::MemoryPermission::kNoAccess);
}
return false;
}
@@ -1202,6 +1221,10 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete mutex_;
mutex_ = nullptr;
}
+ if (page_protection_change_mutex_ != nullptr) {
+ delete page_protection_change_mutex_;
+ page_protection_change_mutex_ = nullptr;
+ }
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
@@ -1379,7 +1402,6 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable),
anchor_(this),
free_list_(this),
- locked_page_(nullptr),
top_on_previous_step_(0) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
@@ -1416,7 +1438,7 @@ void PagedSpace::RefillFreeList() {
size_t added = 0;
{
Page* p = nullptr;
- while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
+ while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
@@ -1702,7 +1724,7 @@ void PagedSpace::EmptyAllocationInfo() {
nullptr, 0);
top_on_previous_step_ = 0;
}
- SetTopAndLimit(NULL, NULL);
+ SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
Free(current_top, current_limit - current_top);
}
@@ -1722,8 +1744,8 @@ void PagedSpace::ReleasePage(Page* page) {
}
// If page is still in a list, unlink it from that list.
- if (page->next_chunk() != NULL) {
- DCHECK(page->prev_chunk() != NULL);
+ if (page->next_chunk() != nullptr) {
+ DCHECK_NOT_NULL(page->prev_chunk());
page->Unlink();
}
AccountUncommitted(page->size());
@@ -1731,6 +1753,20 @@ void PagedSpace::ReleasePage(Page* page) {
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
}
+void PagedSpace::SetReadAndExecutable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ page->SetReadAndExecutable();
+ }
+}
+
+void PagedSpace::SetReadAndWritable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ page->SetReadAndWritable();
+ }
+}
+
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
}
@@ -1752,7 +1788,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ for (HeapObject* object = it.Next(); object != nullptr;
+ object = it.Next()) {
CHECK(end_of_previous_object <= object->address());
// The first word should be a map, and we expect all map pointers to
@@ -1791,7 +1828,8 @@ void PagedSpace::VerifyLiveBytes() {
CHECK(page->SweepingDone());
HeapObjectIterator it(page);
int black_size = 0;
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ for (HeapObject* object = it.Next(); object != nullptr;
+ object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
black_size += object->Size();
@@ -1811,7 +1849,8 @@ void PagedSpace::VerifyCountersAfterSweeping() {
total_capacity += page->area_size();
HeapObjectIterator it(page);
size_t real_allocated = 0;
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ for (HeapObject* object = it.Next(); object != nullptr;
+ object = it.Next()) {
if (!object->IsFiller()) {
real_allocated += object->Size();
}
@@ -1883,11 +1922,11 @@ bool NewSpace::SetUp(size_t initial_semispace_capacity,
void NewSpace::TearDown() {
if (allocated_histogram_) {
DeleteArray(allocated_histogram_);
- allocated_histogram_ = NULL;
+ allocated_histogram_ = nullptr;
}
if (promoted_histogram_) {
DeleteArray(promoted_histogram_);
- promoted_histogram_ = NULL;
+ promoted_histogram_ = nullptr;
}
allocation_info_.Reset(nullptr, nullptr);
@@ -2176,7 +2215,7 @@ void NewSpace::ResumeAllocationObservers() {
// TODO(ofrobots): refactor into SpaceWithLinearArea
void PagedSpace::ResumeAllocationObservers() {
- DCHECK(top_on_previous_step_ == 0);
+ DCHECK_NULL(top_on_previous_step_);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
@@ -2333,7 +2372,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
- DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
+ DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
@@ -2377,7 +2416,7 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
- DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
+ DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* new_last_page;
Page* last_page;
@@ -2564,7 +2603,7 @@ static int CollectHistogramInfo(HeapObject* obj) {
Isolate* isolate = obj->GetIsolate();
InstanceType type = obj->map()->instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
- DCHECK(isolate->heap_histograms()[type].name() != NULL);
+ DCHECK_NOT_NULL(isolate->heap_histograms()[type].name());
isolate->heap_histograms()[type].increment_number(1);
isolate->heap_histograms()[type].increment_bytes(obj->Size());
@@ -2624,7 +2663,7 @@ void NewSpace::ClearHistograms() {
void NewSpace::CollectStatistics() {
ClearHistograms();
SemiSpaceIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next())
RecordAllocation(obj);
}
@@ -2784,9 +2823,9 @@ void FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
void FreeListCategory::RepairFreeList(Heap* heap) {
FreeSpace* n = top();
- while (n != NULL) {
+ while (n != nullptr) {
Map** map_location = reinterpret_cast<Map**>(n->address());
- if (*map_location == NULL) {
+ if (*map_location == nullptr) {
*map_location = heap->free_space_map();
} else {
DCHECK(*map_location == heap->free_space_map());
@@ -3063,7 +3102,7 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
FreeSpace* cur = top();
- while (cur != NULL) {
+ while (cur != nullptr) {
DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
sum += cur->relaxed_read_size();
cur = cur->next();
@@ -3074,7 +3113,7 @@ size_t FreeListCategory::SumFreeList() {
int FreeListCategory::FreeListLength() {
int length = 0;
FreeSpace* cur = top();
- while (cur != NULL) {
+ while (cur != nullptr) {
length++;
cur = cur->next();
if (length == kVeryLongFreeList) return length;
@@ -3125,10 +3164,9 @@ size_t PagedSpace::SizeOfObjects() {
return Size() - (limit() - top());
}
-
// After we have booted, we have created a map which represents free space
// on the heap. If there was already a free list then the elements on it
-// were created with the wrong FreeSpaceMap (normally NULL), so we need to
+// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
// fix them.
void PagedSpace::RepairFreeListsAfterDeserialization() {
free_list_.RepairLists(heap());
@@ -3163,8 +3201,9 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- collector->SweepAndRefill(this);
+ if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
+ collector->sweeper()->ParallelSweepSpace(identity(), 0);
+ RefillFreeList();
return free_list_.Allocate(size_in_bytes);
}
return false;
@@ -3190,7 +3229,7 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
if (FLAG_concurrent_sweeping && !is_local() &&
- !collector->sweeper().AreSweeperTasksRunning()) {
+ !collector->sweeper()->AreSweeperTasksRunning()) {
collector->EnsureSweepingCompleted();
}
@@ -3201,15 +3240,8 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Retry the free list allocation.
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
- if (locked_page_ != nullptr) {
- DCHECK_EQ(locked_page_->owner()->identity(), identity());
- collector->sweeper().ParallelSweepPage(locked_page_, identity());
- locked_page_ = nullptr;
- if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
- }
-
// If sweeping is still in progress try to sweep pages.
- int max_freed = collector->sweeper().ParallelSweepSpace(
+ int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
@@ -3248,7 +3280,7 @@ void PagedSpace::ReportStatistics() {
heap()->mark_compact_collector()->EnsureSweepingCompleted();
ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
+ for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next())
CollectHistogramInfo(obj);
ReportHistogram(heap()->isolate(), true);
}
@@ -3293,7 +3325,7 @@ LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
HeapObject* LargeObjectIterator::Next() {
- if (current_ == NULL) return NULL;
+ if (current_ == nullptr) return nullptr;
HeapObject* object = current_->GetObject();
current_ = current_->next_page();
@@ -3319,7 +3351,7 @@ bool LargeObjectSpace::SetUp() {
}
void LargeObjectSpace::TearDown() {
- while (first_page_ != NULL) {
+ while (first_page_ != nullptr) {
LargePage* page = first_page_;
first_page_ = first_page_->next_page();
LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
@@ -3340,7 +3372,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
- if (page == NULL) return AllocationResult::Retry(identity());
+ if (page == nullptr) return AllocationResult::Retry(identity());
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size());
@@ -3388,7 +3420,7 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
// GC support
Object* LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
- if (page != NULL) {
+ if (page != nullptr) {
return page->GetObject();
}
return Smi::kZero; // Signaling not found.
@@ -3417,7 +3449,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
@@ -3523,7 +3555,7 @@ std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
- for (LargePage* chunk = first_page_; chunk != NULL;
+ for (LargePage* chunk = first_page_; chunk != nullptr;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
@@ -3588,7 +3620,7 @@ void LargeObjectSpace::Verify() {
void LargeObjectSpace::Print() {
OFStream os(stdout);
LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
obj->Print(os);
}
}
@@ -3599,7 +3631,7 @@ void LargeObjectSpace::ReportStatistics() {
int num_objects = 0;
ClearHistograms(heap()->isolate());
LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
num_objects++;
CollectHistogramInfo(obj);
}
@@ -3619,7 +3651,7 @@ void Page::Print() {
printf(" --------------------------------------\n");
HeapObjectIterator objects(this);
unsigned mark_size = 0;
- for (HeapObject* object = objects.Next(); object != NULL;
+ for (HeapObject* object = objects.Next(); object != nullptr;
object = objects.Next()) {
bool is_marked =
heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index d386d11425..3fb3c39496 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -364,8 +364,10 @@ class MemoryChunk {
+ kPointerSize // InvalidatedSlots* invalidated_slots_
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_
- + kPointerSize // base::RecursiveMutex* mutex_
+ + kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
+ + kPointerSize // base::Mutex* page_protection_change_mutex_
+ + kPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kPointerSize // AtomicValue next_chunk_
@@ -398,6 +400,10 @@ class MemoryChunk {
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
+ // Maximum number of nested code memory modification scopes.
+ // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
+ static const int kMaxWriteUnprotectCounter = 4;
+
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
@@ -425,7 +431,7 @@ class MemoryChunk {
return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
}
- base::RecursiveMutex* mutex() { return mutex_; }
+ base::Mutex* mutex() { return mutex_; }
bool Contains(Address addr) {
return addr >= area_start() && addr < area_end();
@@ -456,6 +462,12 @@ class MemoryChunk {
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
+ template <RememberedSetType type>
+ bool ContainsSlots() {
+ return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
+ invalidated_slots() != nullptr;
+ }
+
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* slot_set() {
if (access_mode == AccessMode::ATOMIC)
@@ -498,8 +510,6 @@ class MemoryChunk {
Address area_end() { return area_end_; }
size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
- bool CommitArea(size_t requested);
-
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory();
@@ -627,6 +637,9 @@ class MemoryChunk {
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
+ void SetReadAndExecutable();
+ void SetReadAndWritable();
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -675,10 +688,26 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
base::AtomicValue<intptr_t> high_water_mark_;
- base::RecursiveMutex* mutex_;
+ base::Mutex* mutex_;
base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
+ base::Mutex* page_protection_change_mutex_;
+
+ // This field is only relevant for code pages. It depicts the number of
+ // times a component requested this page to be read+writeable. The
+ // counter is decremented when a component resets to read+executable.
+ // If Value() == 0 => The memory is read and executable.
+ // If Value() >= 1 => The Memory is read and writable (and maybe executable).
+ // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
+ // excessive nesting of scopes.
+ // All executable MemoryChunks are allocated rw based on the assumption that
+ // they will be used immediatelly for an allocation. They are initialized
+ // with the number of open CodeSpaceMemoryModificationScopes. The caller
+ // that triggers the page allocation is responsible for decrementing the
+ // counter.
+ uintptr_t write_unprotect_counter_;
+
// Byte allocated on the page, which includes all objects on the page
// and the linear allocation area.
size_t allocated_bytes_;
@@ -703,6 +732,7 @@ class MemoryChunk {
friend class ConcurrentMarkingState;
friend class IncrementalMarkingState;
friend class MajorAtomicMarkingState;
+ friend class MajorMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
friend class MemoryChunkValidator;
@@ -1004,7 +1034,7 @@ class CodeRange {
public:
explicit CodeRange(Isolate* isolate);
~CodeRange() {
- if (virtual_memory_.IsReserved()) virtual_memory_.Release();
+ if (virtual_memory_.IsReserved()) virtual_memory_.Free();
}
// Reserves a range of virtual memory, but does not commit any of it.
@@ -1124,7 +1154,7 @@ class SkipList {
static void Update(Address addr, int size) {
Page* page = Page::FromAddress(addr);
SkipList* list = page->skip_list();
- if (list == NULL) {
+ if (list == nullptr) {
list = new SkipList();
page->set_skip_list(list);
}
@@ -1363,19 +1393,19 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
size_t bytes_to_free, Address new_area_end);
// Commit a contiguous block of memory from the initial chunk. Assumes that
- // the address is not NULL, the size is greater than zero, and that the
+ // the address is not nullptr, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
bool CommitBlock(Address start, size_t size, Executability executable);
// Uncommit a contiguous block of memory [start..(start+size)[.
- // start is not NULL, the size is greater than zero, and the
+ // start is not nullptr, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
bool UncommitBlock(Address start, size_t size);
// Zaps a contiguous block of memory [start..(start+size)[ thus
- // filling it up with a recognizable non-NULL bit pattern.
+ // filling it up with a recognizable non-nullptr bit pattern.
void ZapBlock(Address start, size_t size);
MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start,
@@ -1565,13 +1595,13 @@ class AllocationInfo {
}
INLINE(void set_top(Address top)) {
- SLOW_DCHECK(top == NULL ||
+ SLOW_DCHECK(top == nullptr ||
(reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
top_ = top;
}
INLINE(Address top()) const {
- SLOW_DCHECK(top_ == NULL ||
+ SLOW_DCHECK(top_ == nullptr ||
(reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
return top_;
}
@@ -2115,6 +2145,9 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
+ void SetReadAndExecutable();
+ void SetReadAndWritable();
+
#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
@@ -2179,11 +2212,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
- // Sets the page that is currently locked by the task using the space. This
- // page will be preferred for sweeping to avoid a potential deadlock where
- // multiple tasks hold locks on pages while trying to sweep each others pages.
- void AnnounceLockedPage(Page* page) { locked_page_ = page; }
-
Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
void SetAllocationInfo(Address top, Address limit);
@@ -2260,7 +2288,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
- Page* locked_page_;
Address top_on_previous_step_;
friend class IncrementalMarking;
@@ -2926,7 +2953,7 @@ class LargeObjectSpace : public Space {
// Takes the chunk_map_mutex_ and calls FindPage after that.
LargePage* FindPageThreadSafe(Address a);
- // Finds a large object page containing the given address, returns NULL
+ // Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
LargePage* FindPage(Address a);
@@ -2947,7 +2974,7 @@ class LargeObjectSpace : public Space {
bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
// Checks whether the space is empty.
- bool IsEmpty() { return first_page_ == NULL; }
+ bool IsEmpty() { return first_page_ == nullptr; }
LargePage* first_page() { return first_page_; }
@@ -3000,7 +3027,7 @@ class MemoryChunkIterator BASE_EMBEDDED {
public:
inline explicit MemoryChunkIterator(Heap* heap);
- // Return NULL when the iterator is done.
+ // Return nullptr when the iterator is done.
inline MemoryChunk* next();
private:
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index ccefd1a058..4613b705fa 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -56,9 +56,9 @@ void StoreBuffer::SetUp() {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask);
}
- if (!reservation.Commit(reinterpret_cast<Address>(start_[0]),
- kStoreBufferSize * kStoreBuffers,
- false)) { // Not executable.
+ if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
+ kStoreBufferSize * kStoreBuffers,
+ base::OS::MemoryPermission::kReadWrite)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;
@@ -68,7 +68,7 @@ void StoreBuffer::SetUp() {
void StoreBuffer::TearDown() {
- if (virtual_memory_.IsReserved()) virtual_memory_.Release();
+ if (virtual_memory_.IsReserved()) virtual_memory_.Free();
top_ = nullptr;
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
new file mode 100644
index 0000000000..17375aad97
--- /dev/null
+++ b/deps/v8/src/heap/sweeper.cc
@@ -0,0 +1,498 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/sweeper.h"
+
+#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/mark-compact-inl.h"
+#include "src/heap/remembered-set.h"
+#include "src/objects-inl.h"
+#include "src/vm-state-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
+ : sweeper_(sweeper) {
+ sweeper_->stop_sweeper_tasks_.SetValue(true);
+ if (!sweeper_->sweeping_in_progress()) return;
+
+ sweeper_->AbortAndWaitForTasks();
+
+ // Complete sweeping if there's nothing more to do.
+ if (sweeper_->IsDoneSweeping()) {
+ sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
+ DCHECK(!sweeper_->sweeping_in_progress());
+ } else {
+ // Unless sweeping is complete the flag still indicates that the sweeper
+ // is enabled. It just cannot use tasks anymore.
+ DCHECK(sweeper_->sweeping_in_progress());
+ }
+}
+
+Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
+ sweeper_->stop_sweeper_tasks_.SetValue(false);
+ if (!sweeper_->sweeping_in_progress()) return;
+
+ sweeper_->StartSweeperTasks();
+}
+
+Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
+ Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
+ : sweeper_(sweeper),
+ pause_or_complete_scope_(pause_or_complete_scope),
+ sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
+ USE(pause_or_complete_scope_);
+ if (!sweeping_in_progress_) return;
+
+ old_space_sweeping_list_ = std::move(sweeper_->sweeping_list_[OLD_SPACE]);
+ sweeper_->sweeping_list_[OLD_SPACE].clear();
+}
+
+Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
+ DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
+ if (!sweeping_in_progress_) return;
+
+ sweeper_->sweeping_list_[OLD_SPACE] = std::move(old_space_sweeping_list_);
+ // old_space_sweeping_list_ does not need to be cleared as we don't use it.
+}
+
+class Sweeper::SweeperTask final : public CancelableTask {
+ public:
+ SweeperTask(Isolate* isolate, Sweeper* sweeper,
+ base::Semaphore* pending_sweeper_tasks,
+ base::AtomicNumber<intptr_t>* num_sweeping_tasks,
+ AllocationSpace space_to_start)
+ : CancelableTask(isolate),
+ sweeper_(sweeper),
+ pending_sweeper_tasks_(pending_sweeper_tasks),
+ num_sweeping_tasks_(num_sweeping_tasks),
+ space_to_start_(space_to_start) {}
+
+ virtual ~SweeperTask() {}
+
+ private:
+ void RunInternal() final {
+ DCHECK_GE(space_to_start_, FIRST_SPACE);
+ DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
+ const int offset = space_to_start_ - FIRST_SPACE;
+ const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
+ for (int i = 0; i < num_spaces; i++) {
+ const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
+ // Do not sweep code space concurrently.
+ if (static_cast<AllocationSpace>(space_id) == CODE_SPACE) continue;
+ DCHECK_GE(space_id, FIRST_SPACE);
+ DCHECK_LE(space_id, LAST_PAGED_SPACE);
+ sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
+ }
+ num_sweeping_tasks_->Decrement(1);
+ pending_sweeper_tasks_->Signal();
+ }
+
+ Sweeper* const sweeper_;
+ base::Semaphore* const pending_sweeper_tasks_;
+ base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
+ AllocationSpace space_to_start_;
+
+ DISALLOW_COPY_AND_ASSIGN(SweeperTask);
+};
+
+class Sweeper::IncrementalSweeperTask final : public CancelableTask {
+ public:
+ IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
+ : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
+
+ virtual ~IncrementalSweeperTask() {}
+
+ private:
+ void RunInternal() final {
+ VMState<GC> state(isolate_);
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
+
+ sweeper_->incremental_sweeper_pending_ = false;
+
+ if (sweeper_->sweeping_in_progress()) {
+ if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
+ sweeper_->ScheduleIncrementalSweepingTask();
+ }
+ }
+ }
+
+ Isolate* const isolate_;
+ Sweeper* const sweeper_;
+ DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
+};
+
+void Sweeper::StartSweeping() {
+ CHECK(!stop_sweeper_tasks_.Value());
+ sweeping_in_progress_ = true;
+ MajorNonAtomicMarkingState* marking_state =
+ heap_->mark_compact_collector()->non_atomic_marking_state();
+ ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
+ std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
+ [marking_state](Page* a, Page* b) {
+ return marking_state->live_bytes(a) <
+ marking_state->live_bytes(b);
+ });
+ });
+}
+
+void Sweeper::StartSweeperTasks() {
+ DCHECK_EQ(0, num_tasks_);
+ DCHECK_EQ(0, num_sweeping_tasks_.Value());
+ if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
+ !heap_->delay_sweeper_tasks_for_testing_) {
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ if (space == NEW_SPACE) return;
+ num_sweeping_tasks_.Increment(1);
+ SweeperTask* task = new SweeperTask(heap_->isolate(), this,
+ &pending_sweeper_tasks_semaphore_,
+ &num_sweeping_tasks_, space);
+ DCHECK_LT(num_tasks_, kMaxSweeperTasks);
+ task_ids_[num_tasks_++] = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ });
+ ScheduleIncrementalSweepingTask();
+ }
+}
+
+void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
+ if (!page->SweepingDone()) {
+ ParallelSweepPage(page, page->owner()->identity());
+ if (!page->SweepingDone()) {
+ // We were not able to sweep that page, i.e., a concurrent
+ // sweeper thread currently owns this page. Wait for the sweeper
+ // thread to be done with this page.
+ page->WaitUntilSweepingCompleted();
+ }
+ }
+}
+
+Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ SweptList& list = swept_list_[space->identity()];
+ if (!list.empty()) {
+ auto last_page = list.back();
+ list.pop_back();
+ return last_page;
+ }
+ return nullptr;
+}
+
+void Sweeper::AbortAndWaitForTasks() {
+ if (!FLAG_concurrent_sweeping) return;
+
+ for (int i = 0; i < num_tasks_; i++) {
+ if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ pending_sweeper_tasks_semaphore_.Wait();
+ } else {
+ // Aborted case.
+ num_sweeping_tasks_.Decrement(1);
+ }
+ }
+ num_tasks_ = 0;
+ DCHECK_EQ(0, num_sweeping_tasks_.Value());
+}
+
+void Sweeper::EnsureCompleted() {
+ if (!sweeping_in_progress_) return;
+
+ // If sweeping is not completed or not running at all, we try to complete it
+ // here.
+ ForAllSweepingSpaces(
+ [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
+
+ AbortAndWaitForTasks();
+
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ if (space == NEW_SPACE) {
+ swept_list_[NEW_SPACE].clear();
+ }
+ DCHECK(sweeping_list_[space].empty());
+ });
+ sweeping_in_progress_ = false;
+}
+
+void Sweeper::EnsureNewSpaceCompleted() {
+ if (!sweeping_in_progress_) return;
+ if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
+ for (Page* p : *heap_->new_space()) {
+ SweepOrWaitUntilSweepingCompleted(p);
+ }
+ }
+}
+
+bool Sweeper::AreSweeperTasksRunning() {
+ return num_sweeping_tasks_.Value() != 0;
+}
+
+int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode) {
+ Space* space = p->owner();
+ DCHECK_NOT_NULL(space);
+ DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
+ space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
+ DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
+
+ // TODO(ulan): we don't have to clear type old-to-old slots in code space
+ // because the concurrent marker doesn't mark code objects. This requires
+ // the write barrier for code objects to check the color of the code object.
+ bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
+ p->typed_slot_set<OLD_TO_OLD>() != nullptr;
+
+ // The free ranges map is used for filtering typed slots.
+ std::map<uint32_t, uint32_t> free_ranges;
+
+ // Before we sweep objects on the page, we free dead array buffers which
+ // requires valid mark bits.
+ ArrayBufferTracker::FreeDead(p, marking_state_);
+
+ Address free_start = p->area_start();
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
+
+ // If we use the skip list for code space pages, we have to lock the skip
+ // list because it could be accessed concurrently by the runtime or the
+ // deoptimizer.
+ const bool rebuild_skip_list =
+ space->identity() == CODE_SPACE && p->skip_list() != nullptr;
+ SkipList* skip_list = p->skip_list();
+ if (rebuild_skip_list) {
+ skip_list->Clear();
+ }
+
+ intptr_t live_bytes = 0;
+ intptr_t freed_bytes = 0;
+ intptr_t max_freed_bytes = 0;
+ int curr_region = -1;
+
+ // Set the allocated_bytes counter to area_size. The free operations below
+ // will decrease the counter to actual live bytes.
+ p->ResetAllocatedBytes();
+
+ for (auto object_and_size :
+ LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(marking_state_->IsBlack(object));
+ Address free_end = object->address();
+ if (free_end != free_start) {
+ CHECK_GT(free_end, free_start);
+ size_t size = static_cast<size_t>(free_end - free_start);
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, size);
+ }
+ if (free_list_mode == REBUILD_FREE_LIST) {
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
+ free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ } else {
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
+ }
+ RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ if (non_empty_typed_slots) {
+ free_ranges.insert(std::pair<uint32_t, uint32_t>(
+ static_cast<uint32_t>(free_start - p->address()),
+ static_cast<uint32_t>(free_end - p->address())));
+ }
+ }
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ live_bytes += size;
+ if (rebuild_skip_list) {
+ int new_region_start = SkipList::RegionNumber(free_end);
+ int new_region_end =
+ SkipList::RegionNumber(free_end + size - kPointerSize);
+ if (new_region_start != curr_region || new_region_end != curr_region) {
+ skip_list->AddObject(free_end, size);
+ curr_region = new_region_end;
+ }
+ }
+ free_start = free_end + size;
+ }
+
+ if (free_start != p->area_end()) {
+ CHECK_GT(p->area_end(), free_start);
+ size_t size = static_cast<size_t>(p->area_end() - free_start);
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, size);
+ }
+ if (free_list_mode == REBUILD_FREE_LIST) {
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
+ free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ } else {
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
+ }
+
+ RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ if (non_empty_typed_slots) {
+ free_ranges.insert(std::pair<uint32_t, uint32_t>(
+ static_cast<uint32_t>(free_start - p->address()),
+ static_cast<uint32_t>(p->area_end() - p->address())));
+ }
+ }
+
+ // Clear invalid typed slots after collection all free ranges.
+ if (!free_ranges.empty()) {
+ TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
+ if (old_to_new != nullptr) {
+ old_to_new->RemoveInvaldSlots(free_ranges);
+ }
+ TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
+ if (old_to_old != nullptr) {
+ old_to_old->RemoveInvaldSlots(free_ranges);
+ }
+ }
+
+ marking_state_->bitmap(p)->Clear();
+ if (free_list_mode == IGNORE_FREE_LIST) {
+ marking_state_->SetLiveBytes(p, 0);
+ // We did not free memory, so have to adjust allocated bytes here.
+ intptr_t freed_bytes = p->area_size() - live_bytes;
+ p->DecreaseAllocatedBytes(freed_bytes);
+ } else {
+ // Keep the old live bytes counter of the page until RefillFreeList, where
+ // the space size is refined.
+ // The allocated_bytes() counter is precisely the total size of objects.
+ DCHECK_EQ(live_bytes, p->allocated_bytes());
+ }
+ p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
+ if (free_list_mode == IGNORE_FREE_LIST) return 0;
+ return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
+}
+
+void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
+ Page* page = nullptr;
+ while (!stop_sweeper_tasks_.Value() &&
+ ((page = GetSweepingPageSafe(identity)) != nullptr)) {
+ ParallelSweepPage(page, identity);
+ }
+}
+
+bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
+ if (Page* page = GetSweepingPageSafe(identity)) {
+ ParallelSweepPage(page, identity);
+ }
+ return sweeping_list_[identity].empty();
+}
+
+int Sweeper::ParallelSweepSpace(AllocationSpace identity,
+ int required_freed_bytes, int max_pages) {
+ int max_freed = 0;
+ int pages_freed = 0;
+ Page* page = nullptr;
+ while ((page = GetSweepingPageSafe(identity)) != nullptr) {
+ int freed = ParallelSweepPage(page, identity);
+ pages_freed += 1;
+ DCHECK_GE(freed, 0);
+ max_freed = Max(max_freed, freed);
+ if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
+ return max_freed;
+ if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
+ }
+ return max_freed;
+}
+
+int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
+ // Early bailout for pages that are swept outside of the regular sweeping
+ // path. This check here avoids taking the lock first, avoiding deadlocks.
+ if (page->SweepingDone()) return 0;
+
+ int max_freed = 0;
+ {
+ base::LockGuard<base::Mutex> guard(page->mutex());
+ // If this page was already swept in the meantime, we can return here.
+ if (page->SweepingDone()) return 0;
+
+ // If the page is a code page, the CodePageMemoryModificationScope changes
+ // the page protection mode from rx -> rw while sweeping.
+ CodePageMemoryModificationScope code_page_scope(page);
+
+ DCHECK_EQ(Page::kSweepingPending,
+ page->concurrent_sweeping_state().Value());
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
+ const FreeSpaceTreatmentMode free_space_mode =
+ Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
+ if (identity == NEW_SPACE) {
+ RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
+ } else {
+ max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
+ }
+ DCHECK(page->SweepingDone());
+
+ // After finishing sweeping of a page we clean up its remembered set.
+ TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
+ if (typed_slot_set) {
+ typed_slot_set->FreeToBeFreedChunks();
+ }
+ SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
+ if (slot_set) {
+ slot_set->FreeToBeFreedBuckets();
+ }
+ }
+
+ {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ swept_list_[identity].push_back(page);
+ }
+ return max_freed;
+}
+
+void Sweeper::ScheduleIncrementalSweepingTask() {
+ if (!incremental_sweeper_pending_) {
+ incremental_sweeper_pending_ = true;
+ IncrementalSweeperTask* task =
+ new IncrementalSweeperTask(heap_->isolate(), this);
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
+ V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
+ }
+}
+
+void Sweeper::AddPage(AllocationSpace space, Page* page,
+ Sweeper::AddPageMode mode) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
+ if (mode == Sweeper::REGULAR) {
+ DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
+ PrepareToBeSweptPage(space, page);
+ } else {
+ // Page has been temporarily removed from the sweeper. Accounting already
+ // happened when the page was initially added, so it is skipped here.
+ DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
+ }
+ DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
+ sweeping_list_[space].push_back(page);
+}
+
+void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+ DCHECK_GE(page->area_size(),
+ static_cast<size_t>(marking_state_->live_bytes(page)));
+ if (space != NEW_SPACE) {
+ heap_->paged_space(space)->IncreaseAllocatedBytes(
+ marking_state_->live_bytes(page), page);
+ }
+}
+
+Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ Page* page = nullptr;
+ if (!sweeping_list_[space].empty()) {
+ page = sweeping_list_[space].front();
+ sweeping_list_[space].pop_front();
+ }
+ return page;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
new file mode 100644
index 0000000000..9a8eef3115
--- /dev/null
+++ b/deps/v8/src/heap/sweeper.h
@@ -0,0 +1,167 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SWEEPER_H_
+#define V8_HEAP_SWEEPER_H_
+
+#include <deque>
+#include <vector>
+
+#include "src/base/platform/semaphore.h"
+#include "src/cancelable-task.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class MajorNonAtomicMarkingState;
+class Page;
+class PagedSpace;
+
+enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+
+class Sweeper {
+ public:
+ typedef std::deque<Page*> SweepingList;
+ typedef std::vector<Page*> SweptList;
+
+ // Pauses the sweeper tasks or completes sweeping.
+ class PauseOrCompleteScope final {
+ public:
+ explicit PauseOrCompleteScope(Sweeper* sweeper);
+ ~PauseOrCompleteScope();
+
+ private:
+ Sweeper* const sweeper_;
+ };
+
+ // Temporary filters old space sweeping lists. Requires the concurrent
+ // sweeper to be paused. Allows for pages to be added to the sweeper while
+ // in this scope. Note that the original list of sweeping pages is restored
+ // after exiting this scope.
+ class FilterSweepingPagesScope final {
+ public:
+ explicit FilterSweepingPagesScope(
+ Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope);
+ ~FilterSweepingPagesScope();
+
+ template <typename Callback>
+ void FilterOldSpaceSweepingPages(Callback callback) {
+ if (!sweeping_in_progress_) return;
+
+ SweepingList* sweeper_list = &sweeper_->sweeping_list_[OLD_SPACE];
+ // Iteration here is from most free space to least free space.
+ for (auto it = old_space_sweeping_list_.begin();
+ it != old_space_sweeping_list_.end(); it++) {
+ if (callback(*it)) {
+ sweeper_list->push_back(*it);
+ }
+ }
+ }
+
+ private:
+ Sweeper* const sweeper_;
+ SweepingList old_space_sweeping_list_;
+ const PauseOrCompleteScope& pause_or_complete_scope_;
+ bool sweeping_in_progress_;
+ };
+
+ enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
+ enum ClearOldToNewSlotsMode {
+ DO_NOT_CLEAR,
+ CLEAR_REGULAR_SLOTS,
+ CLEAR_TYPED_SLOTS
+ };
+ enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
+
+ Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
+ : heap_(heap),
+ marking_state_(marking_state),
+ num_tasks_(0),
+ pending_sweeper_tasks_semaphore_(0),
+ incremental_sweeper_pending_(false),
+ sweeping_in_progress_(false),
+ num_sweeping_tasks_(0),
+ stop_sweeper_tasks_(false) {}
+
+ bool sweeping_in_progress() const { return sweeping_in_progress_; }
+
+ void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
+
+ int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
+ int max_pages = 0);
+ int ParallelSweepPage(Page* page, AllocationSpace identity);
+
+ void ScheduleIncrementalSweepingTask();
+
+ int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode);
+
+ // After calling this function sweeping is considered to be in progress
+ // and the main thread can sweep lazily, but the background sweeper tasks
+ // are not running yet.
+ void StartSweeping();
+ void StartSweeperTasks();
+ void EnsureCompleted();
+ void EnsureNewSpaceCompleted();
+ bool AreSweeperTasksRunning();
+ void SweepOrWaitUntilSweepingCompleted(Page* page);
+
+ Page* GetSweptPageSafe(PagedSpace* space);
+
+ private:
+ class IncrementalSweeperTask;
+ class SweeperTask;
+
+ static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
+ static const int kMaxSweeperTasks = kAllocationSpaces;
+
+ template <typename Callback>
+ void ForAllSweepingSpaces(Callback callback) {
+ for (int i = 0; i < kAllocationSpaces; i++) {
+ callback(static_cast<AllocationSpace>(i));
+ }
+ }
+
+ // Can only be called on the main thread when no tasks are running.
+ bool IsDoneSweeping() const {
+ for (int i = 0; i < kAllocationSpaces; i++) {
+ if (!sweeping_list_[i].empty()) return false;
+ }
+ return true;
+ }
+
+ void SweepSpaceFromTask(AllocationSpace identity);
+
+ // Sweeps incrementally one page from the given space. Returns true if
+ // there are no more pages to sweep in the given space.
+ bool SweepSpaceIncrementallyFromTask(AllocationSpace identity);
+
+ void AbortAndWaitForTasks();
+
+ Page* GetSweepingPageSafe(AllocationSpace space);
+
+ void PrepareToBeSweptPage(AllocationSpace space, Page* page);
+
+ Heap* const heap_;
+ MajorNonAtomicMarkingState* marking_state_;
+ int num_tasks_;
+ CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
+ base::Semaphore pending_sweeper_tasks_semaphore_;
+ base::Mutex mutex_;
+ SweptList swept_list_[kAllocationSpaces];
+ SweepingList sweeping_list_[kAllocationSpaces];
+ bool incremental_sweeper_pending_;
+ bool sweeping_in_progress_;
+ // Counter is actively maintained by the concurrent tasks to avoid querying
+ // the semaphore for maintaining a task counter on the main thread.
+ base::AtomicNumber<intptr_t> num_sweeping_tasks_;
+ // Used by PauseOrCompleteScope to signal early bailout to tasks.
+ base::AtomicValue<bool> stop_sweeper_tasks_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_SWEEPER_H_
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 6ef0c25905..ebc9f49dd9 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -56,7 +56,8 @@ static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
- if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
+ if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) ||
+ rmode_ == RelocInfo::JS_TO_WASM_CALL) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
} else if (IsInternalReference(rmode_)) {
@@ -68,14 +69,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
@@ -107,7 +107,7 @@ void RelocInfo::set_target_object(HeapObject* target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(target->GetIsolate(), pc_, sizeof(Address));
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
@@ -150,7 +150,7 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
void RelocInfo::WipeOut(Isolate* isolate) {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(isolate, pc_, host_,
@@ -262,15 +262,15 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target);
+ Address constant_pool = code ? code->constant_pool() : nullptr;
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
Address Assembler::target_address_from_return_address(Address pc) {
@@ -318,8 +318,8 @@ void Assembler::deserialization_set_target_internal_reference_at(
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- DCHECK(len_ == 1);
- DCHECK((scale & -4) == 0);
+ DCHECK_EQ(len_, 1);
+ DCHECK_EQ(scale & -4, 0);
// Use SIB with no index register only for base esp.
DCHECK(index != esp || base == esp);
buf_[1] = scale << 6 | index.code() << 3 | base.code();
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index d7fbce907a..99f52031ed 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -175,7 +175,7 @@ void Displacement::init(Label* L, Type type) {
int next = 0;
if (L->is_linked()) {
next = L->pos();
- DCHECK(next > 0); // Displacements must be at positions > 0
+ DCHECK_GT(next, 0); // Displacements must be at positions > 0
}
// Ensure that we _never_ overflow the next field.
DCHECK(NextField::is_valid(Assembler::kMaximalBufferSize));
@@ -186,9 +186,9 @@ void Displacement::init(Label* L, Type type) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::INTERNAL_REFERENCE;
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::JS_TO_WASM_CALL;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -223,6 +223,18 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
}
}
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ icache_flush_mode);
+}
+
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -283,12 +295,6 @@ Operand::Operand(Register index,
}
-bool Operand::is_reg(Register reg) const {
- return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
- && ((buf_[0] & 0x07) == reg.code()); // register codes match.
-}
-
-
bool Operand::is_reg_only() const {
return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
}
@@ -328,7 +334,7 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
-// existing code in it; see CodePatcher::CodePatcher(...).
+// existing code in it.
#ifdef DEBUG
if (own_buffer_) {
memset(buffer_, 0xCC, buffer_size_); // int3
@@ -531,7 +537,7 @@ void Assembler::push(const Operand& src) {
void Assembler::pop(Register dst) {
- DCHECK(reloc_info_writer.last_pc() != NULL);
+ DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
EMIT(0x58 | dst.code());
}
@@ -818,7 +824,7 @@ void Assembler::add(const Operand& dst, Register src) {
void Assembler::add(const Operand& dst, const Immediate& x) {
- DCHECK(reloc_info_writer.last_pc() != NULL);
+ DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
emit_arith(0, dst, x);
}
@@ -1503,14 +1509,14 @@ void Assembler::ud2() {
// to be generated; pos() is the position of the 32bit
// Displacement of the last instruction using the label.
-
-void Assembler::print(Label* L) {
+void Assembler::print(const Label* L) {
if (L->is_unused()) {
PrintF("unused label\n");
} else if (L->is_bound()) {
PrintF("bound label to %d\n", L->pos());
} else if (L->is_linked()) {
- Label l = *L;
+ Label l;
+ l.link_to(L->pos());
PrintF("unbound label");
while (l.is_linked()) {
Displacement disp = disp_at(&l);
@@ -1539,7 +1545,7 @@ void Assembler::bind_to(Label* L, int pos) {
long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
} else {
if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
- DCHECK(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
+ DCHECK_EQ(byte_at(fixup_pos - 1), 0xE9); // jmp expected
}
// Relative address, relative to point after address.
int imm32 = pos - (fixup_pos + sizeof(int32_t));
@@ -1551,7 +1557,7 @@ void Assembler::bind_to(Label* L, int pos) {
int fixup_pos = L->near_link_pos();
int offset_to_next =
static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
- DCHECK(offset_to_next <= 0);
+ DCHECK_LE(offset_to_next, 0);
// Relative address, relative to point after address.
int disp = pos - fixup_pos - sizeof(int8_t);
CHECK(0 <= disp && disp <= 127);
@@ -1608,7 +1614,7 @@ void Assembler::call(Label* L) {
if (L->is_bound()) {
const int long_size = 5;
int offs = L->pos() - pc_offset();
- DCHECK(offs <= 0);
+ DCHECK_LE(offs, 0);
// 1110 1000 #32-bit disp.
EMIT(0xE8);
emit(offs - long_size);
@@ -1631,6 +1637,11 @@ void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
}
}
+void Assembler::wasm_call(Address entry, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xE8);
+ emit(reinterpret_cast<intptr_t>(entry), rmode);
+}
int Assembler::CallSize(const Operand& adr) {
// Call size is 1 (opcode) + adr.len_ (operand).
@@ -1668,7 +1679,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
const int short_size = 2;
const int long_size = 5;
int offs = L->pos() - pc_offset();
- DCHECK(offs <= 0);
+ DCHECK_LE(offs, 0);
if (is_int8(offs - short_size)) {
// 1110 1011 #8-bit disp.
EMIT(0xEB);
@@ -1700,7 +1711,6 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
}
}
-
void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(!RelocInfo::IsCodeTarget(rmode));
@@ -1735,7 +1745,7 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
const int short_size = 2;
const int long_size = 6;
int offs = L->pos() - pc_offset();
- DCHECK(offs <= 0);
+ DCHECK_LE(offs, 0);
if (is_int8(offs - short_size)) {
// 0111 tttn #8-bit disp
EMIT(0x70 | cc);
@@ -3248,7 +3258,7 @@ void Assembler::GrowBuffer() {
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode
DCHECK(is_uint8(imm8));
- DCHECK((op1 & 0x01) == 0); // should be 8bit operation
+ DCHECK_EQ(op1 & 0x01, 0); // should be 8bit operation
EMIT(op1);
EMIT(op2 | dst.code());
EMIT(imm8);
@@ -3275,7 +3285,7 @@ void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
void Assembler::emit_operand(Register reg, const Operand& adr) {
const unsigned length = adr.len_;
- DCHECK(length > 0);
+ DCHECK_GT(length, 0);
// Emit updated ModRM byte containing the given register.
pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
@@ -3347,7 +3357,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(pc_, rmode, data, nullptr);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index e2d88dc851..83e30df4f5 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -87,22 +87,6 @@ namespace internal {
V(xmm6) \
V(xmm7)
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 8;
-
-// Caller-saved registers
-const RegList kJSCallerSaved =
- 1 << 0 | // eax
- 1 << 1 | // ecx
- 1 << 2 | // edx
- 1 << 3 | // ebx - used as a caller-saved register in JavaScript code
- 1 << 7; // edi - callee function
-
-const int kNumJSCallerSaved = 5;
-
-// Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 8;
-
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
@@ -156,6 +140,21 @@ DOUBLE_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
constexpr DoubleRegister no_double_reg = DoubleRegister::no_reg();
+// Note that the bit values must match those used in actual instruction encoding
+constexpr int kNumRegs = 8;
+
+// Caller-saved registers
+constexpr RegList kJSCallerSaved =
+ Register::ListOf<eax, ecx, edx,
+ ebx, // used as a caller-saved register in JavaScript code
+ edi // callee function
+ >();
+
+constexpr int kNumJSCallerSaved = 5;
+
+// Number of registers for which space is reserved in safepoints.
+constexpr int kNumSafepointRegisters = 8;
+
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -393,7 +392,8 @@ class Operand BASE_EMBEDDED {
}
// Returns true if this Operand is a wrapper for the specified register.
- bool is_reg(Register reg) const;
+ bool is_reg(Register reg) const { return is_reg(reg.code()); }
+ bool is_reg(XMMRegister reg) const { return is_reg(reg.code()); }
// Returns true if this Operand is a wrapper for one register.
bool is_reg_only() const;
@@ -406,7 +406,7 @@ class Operand BASE_EMBEDDED {
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
inline void set_modrm(int mod, Register rm) {
- DCHECK((mod & -4) == 0);
+ DCHECK_EQ(mod & -4, 0);
buf_[0] = mod << 6 | rm.code();
len_ = 1;
}
@@ -421,6 +421,11 @@ class Operand BASE_EMBEDDED {
rmode_ = rmode;
}
+ inline bool is_reg(int reg_code) const {
+ return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
+ && ((buf_[0] & 0x07) == reg_code); // register codes match.
+ }
+
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
@@ -500,14 +505,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -857,6 +863,7 @@ class Assembler : public AssemblerBase {
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code, RelocInfo::Mode rmode);
void call(CodeStub* stub);
+ void wasm_call(Address address, RelocInfo::Mode rmode);
// Jumps
// unconditional jump to L
@@ -1758,7 +1765,7 @@ class Assembler : public AssemblerBase {
LeadingOpcode m, VexW w);
// labels
- void print(Label* L);
+ void print(const Label* L);
void bind_to(Label* L, int pos);
// displacements
@@ -1788,7 +1795,6 @@ class Assembler : public AssemblerBase {
bool is_optimizable_farjmp(int idx);
- friend class CodePatcher;
friend class EnsureSpace;
// Internal reference positions, required for (potential) patching in
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 46c386b149..8ca0b5989f 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -9,11 +9,9 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -21,8 +19,6 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
-#include "src/ia32/code-stubs-ia32.h" // Cannot be the first include.
-
namespace v8 {
namespace internal {
@@ -39,89 +35,24 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
}
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ pushad();
- if (save_doubles()) {
- __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movsd(Operand(esp, i * kDoubleSize), reg);
- }
- }
- const int argument_count = 1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, ecx);
- __ mov(Operand(esp, 0 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- if (save_doubles()) {
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movsd(reg, Operand(esp, i * kDoubleSize));
- }
- __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- }
- __ popad();
- __ ret(0);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in register number. Returns operand as floating point number
- // on FPU stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register number);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in eax, operand_2 in edx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch);
-
- // Test if operands are numbers (smi or HeapNumber objects), and load
- // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
- // either operand is not a number. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-};
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
- Register input_reg = this->source();
Register final_result_reg = this->destination();
- DCHECK(is_truncating());
-
- Label check_negative, process_64_bits, done, done_no_stash;
- int double_offset = offset();
+ Label check_negative, process_64_bits, done;
- // Account for return address and saved regs if input is esp.
- if (input_reg == esp) double_offset += 3 * kPointerSize;
+ // Account for return address and saved regs.
+ const int kArgumentOffset = 3 * kPointerSize;
- MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
- MemOperand exponent_operand(MemOperand(input_reg,
- double_offset + kDoubleSize / 2));
+ MemOperand mantissa_operand(MemOperand(esp, kArgumentOffset));
+ MemOperand exponent_operand(
+ MemOperand(esp, kArgumentOffset + kDoubleSize / 2));
Register scratch1 = no_reg;
{
Register scratch_candidates[3] = { ebx, edx, edi };
for (int i = 0; i < 3; i++) {
scratch1 = scratch_candidates[i];
- if (final_result_reg != scratch1 && input_reg != scratch1) break;
+ if (final_result_reg != scratch1) break;
}
}
// Since we must use ecx for shifts below, use some other register (eax)
@@ -134,7 +65,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ push(scratch1);
__ push(save_reg);
- bool stash_exponent_copy = input_reg != esp;
__ mov(scratch1, mantissa_operand);
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatureScope scope(masm, SSE3);
@@ -142,7 +72,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ fld_d(mantissa_operand);
}
__ mov(ecx, exponent_operand);
- if (stash_exponent_copy) __ push(ecx);
__ and_(ecx, HeapNumber::kExponentMask);
__ shr(ecx, HeapNumber::kExponentShift);
@@ -165,28 +94,18 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ bind(&process_64_bits);
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatureScope scope(masm, SSE3);
- if (stash_exponent_copy) {
- // Already a copy of the exponent on the stack, overwrite it.
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- __ sub(esp, Immediate(kDoubleSize / 2));
- } else {
- // Reserve space for 64 bit answer.
- __ sub(esp, Immediate(kDoubleSize)); // Nolint.
- }
+ // Reserve space for 64 bit answer.
+ __ sub(esp, Immediate(kDoubleSize)); // Nolint.
// Do conversion, which cannot fail because we checked the exponent.
__ fisttp_d(Operand(esp, 0));
__ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
__ add(esp, Immediate(kDoubleSize));
- __ jmp(&done_no_stash);
+ __ jmp(&done);
} else {
// Result must be extracted from shifted 32-bit mantissa
__ sub(ecx, Immediate(delta));
__ neg(ecx);
- if (stash_exponent_copy) {
- __ mov(result_reg, MemOperand(esp, 0));
- } else {
- __ mov(result_reg, exponent_operand);
- }
+ __ mov(result_reg, exponent_operand);
__ and_(result_reg,
Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
__ add(result_reg,
@@ -201,19 +120,11 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ bind(&check_negative);
__ mov(result_reg, scratch1);
__ neg(result_reg);
- if (stash_exponent_copy) {
- __ cmp(MemOperand(esp, 0), Immediate(0));
- } else {
- __ cmp(exponent_operand, Immediate(0));
- }
- __ cmov(greater, result_reg, scratch1);
+ __ cmp(exponent_operand, Immediate(0));
+ __ cmov(greater, result_reg, scratch1);
// Restore registers
__ bind(&done);
- if (stash_exponent_copy) {
- __ add(esp, Immediate(kDoubleSize / 2));
- }
- __ bind(&done_no_stash);
if (final_result_reg != result_reg) {
DCHECK(final_result_reg == ecx);
__ mov(final_result_reg, result_reg);
@@ -224,78 +135,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ JumpIfSmi(number, &load_smi, Label::kNear);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi);
- __ SmiUntag(number);
- __ push(number);
- __ fild_s(Operand(esp, 0));
- __ pop(number);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
- // Load operand in edx into xmm0, or branch to not_numbers.
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
- Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
- __ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ bind(&load_eax);
- // Load operand in eax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
- __ j(equal, &load_float_eax, Label::kNear);
- __ jmp(not_numbers); // Argument in eax is not a number.
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ Cvtsi2sd(xmm0, edx);
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ Cvtsi2sd(xmm1, eax);
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
- __ jmp(&done, Label::kNear);
- __ bind(&load_float_eax);
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ JumpIfSmi(edx, &test_other, Label::kNear);
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ JumpIfSmi(eax, &done, Label::kNear);
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == eax);
@@ -444,15 +283,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
+Movability CEntryStub::NeedsImmovableCode() { return kMovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@@ -556,10 +390,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -593,9 +425,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&skip);
// Compute the handler entry address and jump to it.
- __ mov(edi, Operand::StaticVariable(pending_handler_code_address));
- __ mov(edx, Operand::StaticVariable(pending_handler_offset_address));
- __ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+ __ mov(edi, Operand::StaticVariable(pending_handler_entrypoint_address));
__ jmp(edi);
}
@@ -689,508 +519,15 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ mov(length, FieldOperand(left, String::kLengthOffset));
- __ cmp(length, FieldOperand(right, String::kLengthOffset));
- __ j(equal, &check_zero_length, Label::kNear);
- __ bind(&strings_not_equal);
- __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
- __ ret(0);
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(length, length);
- __ j(not_zero, &compare_chars, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
-
- // Characters are equal.
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
-
- // Find minimum length.
- Label left_shorter;
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter, Label::kNear);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, length_delta);
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- Label compare_lengths;
- __ test(min_length, min_length);
- __ j(zero, &compare_lengths, Label::kNear);
-
- // Compare characters.
- Label result_not_equal;
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- __ test(length_delta, length_delta);
- Label length_not_equal;
- __ j(not_zero, &length_not_equal, Label::kNear);
-
- // Result is EQUAL.
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- Label result_greater;
- Label result_less;
- __ bind(&length_not_equal);
- __ j(greater, &result_greater, Label::kNear);
- __ jmp(&result_less, Label::kNear);
- __ bind(&result_not_equal);
- __ j(above, &result_greater, Label::kNear);
- __ bind(&result_less);
-
- // Result is LESS.
- __ Move(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Move(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(0);
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch, Label* chars_not_equal,
- Label::Distance chars_not_equal_near) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ lea(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(left, index, times_1, 0));
- __ cmpb(scratch, Operand(right, index, times_1, 0));
- __ j(not_equal, chars_not_equal, chars_not_equal_near);
- __ inc(index);
- __ j(not_zero, &loop);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a unique name and receiver must be a heap object.
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<Name> name,
- Register r0) {
- DCHECK(name->IsUniqueName());
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ mov(index, FieldOperand(properties, kCapacityOffset));
- __ dec(index);
- __ and_(index,
- Immediate(Smi::FromInt(name->Hash() +
- NameDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ cmp(entity_name, Handle<Name>(name));
- __ j(equal, miss);
-
- Label good;
- // Check for the hole and skip.
- __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &good, Label::kNear);
-
- // Check if the entry name is not a unique name.
- __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ JumpIfNotUniqueNameInstanceType(
- FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
- __ bind(&good);
- }
-
- NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
- NEGATIVE_LOOKUP);
- __ push(Immediate(name));
- __ push(Immediate(name->Hash()));
- __ CallStub(&stub);
- __ test(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Stack frame on entry:
- // esp[0 * kPointerSize]: return address.
- // esp[1 * kPointerSize]: key's hash.
- // esp[2 * kPointerSize]: key.
- // Registers:
- // dictionary_: NameDictionary to probe.
- // result_: used as scratch.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- Register scratch = result();
-
- __ mov(scratch, FieldOperand(dictionary(), kCapacityOffset));
- __ dec(scratch);
- __ SmiUntag(scratch);
- __ push(scratch);
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- if (i > 0) {
- __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(esp, 0));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
-
- // Having undefined at this place means the name is not contained.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(scratch, isolate()->factory()->undefined_value());
- __ j(equal, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(scratch, Operand(esp, 3 * kPointerSize));
- __ j(equal, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // If we hit a key that is not a unique name during negative
- // lookup we have to bailout as this key might be equal to the
- // key we are looking for.
-
- // Check if the entry name is not a unique name.
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ JumpIfNotUniqueNameInstanceType(
- FieldOperand(scratch, Map::kInstanceTypeOffset),
- &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ mov(result(), Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
- }
-
- __ bind(&in_dictionary);
- __ mov(result(), Immediate(1));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_in_dictionary);
- __ mov(result(), Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs);
- stub.GetCode();
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- DCHECK(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
-}
-
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
-}
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm,
- kReturnOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
-
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental, need_incremental_pop_object;
-
-#ifndef V8_CONCURRENT_MARKING
- Label object_is_black;
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &object_is_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ ret(0);
- }
-
- __ bind(&object_is_black);
-#endif
-
- // Get the value from the slot.
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- not_zero,
- &ensure_not_white,
- Label::kNear);
-
- __ jmp(&need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ push(regs_.object());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object, Label::kNear);
- __ pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
masm->CallStub(&stub);
}
@@ -1215,7 +552,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ push(eax);
// Call the entry hook.
- DCHECK(isolate()->function_entry_hook() != NULL);
+ DCHECK_NOT_NULL(isolate()->function_entry_hook());
__ call(FUNCTION_ADDR(isolate()->function_entry_hook()),
RelocInfo::RUNTIME_ENTRY);
__ add(esp, Immediate(2 * kPointerSize));
@@ -1392,7 +729,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
@@ -1489,7 +826,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
@@ -1552,8 +889,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference thunk_ref,
Operand thunk_last_arg, int stack_space,
Operand* stack_space_operand,
- Operand return_value_operand,
- Operand* context_restore_operand) {
+ Operand return_value_operand) {
Isolate* isolate = masm->isolate();
ExternalReference next_address =
@@ -1629,14 +965,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Leave the API exit frame.
__ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ mov(esi, *context_restore_operand);
- }
if (stack_space_operand != nullptr) {
__ mov(ebx, *stack_space_operand);
}
- __ LeaveApiExitFrame(!restore_context);
+ __ LeaveApiExitFrame();
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
@@ -1709,7 +1041,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- edi : callee
// -- ebx : call_data
// -- ecx : holder
// -- edx : api_function_address
@@ -1720,22 +1051,17 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- esp[argc * 4] : first argument
// -- esp[(argc + 1) * 4] : receiver
- // -- esp[(argc + 2) * 4] : accessor_holder
// -----------------------------------
- Register callee = edi;
Register call_data = ebx;
Register holder = ecx;
Register api_function_address = edx;
- Register context = esi;
Register return_address = eax;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1747,56 +1073,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
- // context save.
- __ push(context);
-
- // callee
- __ push(callee);
-
// call data
__ push(call_data);
// return value
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// return value default
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// isolate
- __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
+ __ push(Immediate(ExternalReference::isolate_address(isolate())));
// holder
__ push(holder);
- // enter a new context
Register scratch = call_data;
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- esp[0] : holder
- // -- ...
- // -- esp[(FCA::kArgsLength - 1) * 4] : new_target
- // -- esp[FCA::kArgsLength * 4] : last argument
- // -- ...
- // -- esp[(FCA::kArgsLength + argc - 1) * 4] : first argument
- // -- esp[(FCA::kArgsLength + argc) * 4] : receiver
- // -- esp[(FCA::kArgsLength + argc + 1) * 4] : accessor_holder
- // -----------------------------------------------------------
-
- // load context from accessor_holder
- Register accessor_holder = context;
- Register scratch2 = callee;
- __ mov(accessor_holder,
- MemOperand(esp, (argc() + FCA::kArgsLength + 1) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ mov(scratch, FieldOperand(accessor_holder, HeapObject::kMapOffset));
- __ test_b(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(not_zero, &skip_looking_for_constructor, Label::kNear);
- __ GetMapConstructor(context, scratch, scratch2);
- __ bind(&skip_looking_for_constructor);
- __ mov(context, FieldOperand(context, JSFunction::kContextOffset));
- } else {
- // load context from callee
- __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
- }
__ mov(scratch, esp);
@@ -1830,22 +1119,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
- Operand context_restore_operand(ebp,
- (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
Operand return_value_operand(ebp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 2;
+ const int stack_space = argc() + FCA::kArgsLength + 1;
Operand* stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
ApiParameterOperand(1), stack_space,
- stack_space_operand, return_value_operand,
- &context_restore_operand);
+ stack_space_operand, return_value_operand);
}
@@ -1916,8 +1197,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Operand return_value_operand(
ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
- kStackUnwindSpace, nullptr, return_value_operand,
- NULL);
+ kStackUnwindSpace, nullptr, return_value_operand);
}
#undef __
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
deleted file mode 100644
index 15e40600af..0000000000
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IA32_CODE_STUBS_IA32_H_
-#define V8_IA32_CODE_STUBS_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one byte strings and returns result in eax.
- static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat one byte strings for equality and returns result in eax.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- static void GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch, Label* chars_not_equal,
- Label::Distance chars_not_equal_near = Label::kFar);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class NameDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
- Register result, Register index, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = DictionaryBits::encode(dictionary.code()) |
- ResultBits::encode(result.code()) |
- IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<Name> name,
- Register r0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- Register dictionary() const {
- return Register::from_code(DictionaryBits::decode(minor_key_));
- }
-
- Register result() const {
- return Register::from_code(ResultBits::decode(minor_key_));
- }
-
- Register index() const {
- return Register::from_code(IndexBits::decode(minor_key_));
- }
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class DictionaryBits: public BitField<int, 0, 3> {};
- class ResultBits: public BitField<int, 3, 3> {};
- class IndexBits: public BitField<int, 6, 3> {};
- class LookupModeBits: public BitField<LookupMode, 9, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate,
- Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always ecx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0),
- scratch1_(no_reg) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
- if (scratch0 == ecx) {
- scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
- }
- if (object == ecx) {
- object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
- }
- if (address == ecx) {
- address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
- }
- DCHECK(!AreAliased(scratch0_, object_, address_, ecx));
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(address_orig_ != object_);
- DCHECK(object_ == object_orig_ || address_ == address_orig_);
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (scratch0_ != scratch0_orig_) masm->push(scratch0_);
- if (ecx != scratch0_orig_ && ecx != object_orig_ &&
- ecx != address_orig_) {
- masm->push(ecx);
- }
- masm->push(scratch1_);
- if (address_ != address_orig_) {
- masm->push(address_);
- masm->mov(address_, address_orig_);
- }
- if (object_ != object_orig_) {
- masm->push(object_);
- masm->mov(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with ecx.
- if (object_ != object_orig_) {
- masm->mov(object_orig_, object_);
- masm->pop(object_);
- }
- if (address_ != address_orig_) {
- masm->mov(address_orig_, address_);
- masm->pop(address_);
- }
- masm->pop(scratch1_);
- if (ecx != scratch0_orig_ && ecx != object_orig_ &&
- ecx != address_orig_) {
- masm->pop(ecx);
- }
- if (scratch0_ != scratch0_orig_) masm->pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The caller saved
- // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->PushCallerSaved(mode, ecx, scratch0_, scratch1_);
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
- SaveFPRegsMode mode) {
- masm->PopCallerSaved(mode, ecx, scratch0_, scratch1_);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always ecx.
-
- Register GetRegThatIsNotEcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(i)) {
- Register candidate = Register::from_code(i);
- if (candidate != ecx && candidate != r1 && candidate != r2 &&
- candidate != r3) {
- return candidate;
- }
- }
- }
- UNREACHABLE();
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits: public BitField<int, 0, 3> {};
- class ValueBits: public BitField<int, 3, 3> {};
- class AddressBits: public BitField<int, 6, 3> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
-
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 66da29ebb0..a66334e3a0 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ia32/codegen-ia32.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
@@ -16,14 +14,13 @@ namespace internal {
#define __ masm.
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
- size_t actual_size;
- // Allocate buffer in executable space.
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
@@ -41,8 +38,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -134,12 +132,12 @@ class LabelConverter {
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
- size_t actual_size;
- // Allocate buffer in executable space.
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
LabelConverter conv(buffer);
@@ -453,8 +451,9 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
@@ -463,123 +462,6 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
#undef __
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Factory* factory,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ test(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string, thin_string;
- __ and_(result, Immediate(kStringRepresentationMask));
- __ cmp(result, Immediate(kConsStringTag));
- __ j(equal, &cons_string, Label::kNear);
- __ cmp(result, Immediate(kThinStringTag));
- __ j(equal, &thin_string, Label::kNear);
-
- // Handle slices.
- __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ SmiUntag(result);
- __ add(index, result);
- __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ mov(string, FieldOperand(string, ThinString::kActualOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ cmp(FieldOperand(string, ConsString::kSecondOffset),
- Immediate(factory->empty_string()));
- __ j(not_equal, call_runtime);
- __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
- __ jmp(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label seq_string;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
-
- // Handle external strings.
- Label one_byte_external, done;
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ test(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, kExternalStringExpectedButNotFound);
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(result, Immediate(kShortExternalStringMask));
- __ j(not_zero, call_runtime);
- // Check encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(result, Immediate(kStringEncodingMask));
- __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &one_byte_external, Label::kNear);
- // Two-byte string.
- __ movzx_w(result, Operand(result, index, times_2, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&one_byte_external);
- // One-byte string.
- __ movzx_b(result, Operand(result, index, times_1, 0));
- __ jmp(&done, Label::kNear);
-
- // Dispatch on the encoding: one-byte or two-byte.
- Label one_byte;
- __ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &one_byte, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- __ movzx_w(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // One-byte string.
- // Load the byte into the result register.
- __ bind(&one_byte);
- __ movzx_b(result, FieldOperand(string,
- index,
- times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
deleted file mode 100644
index 685157ddb1..0000000000
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IA32_CODEGEN_IA32_H_
-#define V8_IA32_CODEGEN_IA32_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Factory* factory,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index f2588a8e16..b072784441 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -5,7 +5,6 @@
#if V8_TARGET_ARCH_IA32
#include "src/assembler-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index c2f199b83f..39c6ff0d5c 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -583,7 +583,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
byte modrm = *++data;
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = NULL;
+ const char* mnem = nullptr;
switch (regop) {
case 0:
mnem = "test";
@@ -626,7 +626,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
int imm8 = -1;
- const char* mnem = NULL;
+ const char* mnem = nullptr;
switch (regop) {
case kROL:
mnem = "rol";
@@ -688,7 +688,7 @@ int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
const char* mnem = jump_conditional_mnem[cond];
AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
+ if (comment != nullptr) {
AppendToBuffer(", %s", comment);
}
return 6; // includes 0x0F
@@ -702,7 +702,7 @@ int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
byte* dest = data + static_cast<int8_t>(b) + 2;
const char* mnem = jump_conditional_mnem[cond];
AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
+ if (comment != nullptr) {
AppendToBuffer(", %s", comment);
}
return 2;
@@ -1393,9 +1393,8 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
return 2;
}
-
// Mnemonics for instructions 0xF0 byte.
-// Returns NULL if the instruction is not handled here.
+// Returns nullptr if the instruction is not handled here.
static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
case 0x0B:
@@ -1432,7 +1431,8 @@ static const char* F0Mnem(byte f0byte) {
return "bsf";
case 0xBD:
return "bsr";
- default: return NULL;
+ default:
+ return nullptr;
}
}
@@ -1443,7 +1443,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
tmp_buffer_pos_ = 0; // starting to write as position 0
byte* data = instr;
// Check for hints.
- const char* branch_hint = NULL;
+ const char* branch_hint = nullptr;
// We use these two prefixes only with branch prediction
if (*data == 0x3E /*ds*/) {
branch_hint = "predicted taken";
@@ -1753,7 +1753,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
{ data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
+ const char* mnem = nullptr;
switch (regop) {
case esi: mnem = "push"; break;
case eax: mnem = "inc"; break;
@@ -1791,7 +1791,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
{ data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
+ const char* mnem = nullptr;
switch (regop) {
case 5: mnem = "subb"; break;
case 7: mnem = "cmpb"; break;
@@ -2438,7 +2438,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (instr_len == 0) {
printf("%02x", *data);
}
- DCHECK(instr_len > 0); // Ensure progress.
+ DCHECK_GT(instr_len, 0); // Ensure progress.
int outp = 0;
// Instruction bytes.
diff --git a/deps/v8/src/ia32/frame-constants-ia32.cc b/deps/v8/src/ia32/frame-constants-ia32.cc
index 9cf76604df..32c2caf139 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.cc
+++ b/deps/v8/src/ia32/frame-constants-ia32.cc
@@ -22,6 +22,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 95c1dc4a5e..f0f9ec0a30 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -57,9 +57,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
const Register StoreTransitionDescriptor::MapRegister() { return edi; }
-const Register StringCompareDescriptor::LeftRegister() { return edx; }
-const Register StringCompareDescriptor::RightRegister() { return eax; }
-
const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
@@ -78,7 +75,7 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// SharedFunctionInfo, vector, slot index.
Register registers[] = {ebx, ecx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
// static
@@ -87,13 +84,13 @@ const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
@@ -204,7 +201,7 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
@@ -218,7 +215,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {edi, edx, eax, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -228,7 +225,7 @@ void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
// edi -- function
// ebx -- allocation site with elements kind
Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -238,7 +235,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// edi -- function
// ebx -- allocation site with elements kind
Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@@ -248,26 +245,26 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
// edi -- function
// ebx -- allocation site with elements kind
Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
@@ -284,10 +281,10 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- edi, // callee
- ebx, // call_data
- ecx, // holder
- edx, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -336,8 +333,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // the value to pass to the generator
- ebx, // the JSGeneratorObject to resume
- edx // the resume mode (tagged)
+ edx // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index fe2fcffdd7..850424293a 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -9,7 +9,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
@@ -178,49 +178,8 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
- Label* condition_met,
- Label::Distance distance) {
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc,
- condition_met, distance);
-}
-
-void MacroAssembler::RememberedSetHelper(
- Register object, // Only used for debug checks.
- Register addr, Register scratch, SaveFPRegsMode save_fp) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(scratch, Operand::StaticVariable(store_buffer));
- // Store pointer to buffer.
- mov(Operand(scratch, 0), addr);
- // Increment buffer top.
- add(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- mov(Operand::StaticVariable(store_buffer), scratch);
- // Call stub on end of buffer.
- // Check for end of buffer.
- test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
- Label buffer_overflowed;
- j(equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
- CallStub(&store_buffer_overflow);
- ret(0);
-}
-
-void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg,
- Register input_reg, int offset) {
- CallStubDelayed(
- new (zone) DoubleToIStub(nullptr, input_reg, result_reg, offset, true));
+void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg) {
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, result_reg));
}
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
@@ -301,7 +260,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
push(Register::from_code(i));
@@ -310,7 +269,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
if ((registers >> i) & 1u) {
pop(Register::from_code(i));
@@ -400,13 +359,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
&done,
Label::kNear);
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
bind(&done);
@@ -530,7 +483,7 @@ void MacroAssembler::CmpObjectType(Register heap_object,
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
+ cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
void MacroAssembler::AssertSmi(Register object) {
@@ -776,17 +729,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
leave();
}
- LeaveExitFrameEpilogue(true);
+ LeaveExitFrameEpilogue();
}
-
-void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
+void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(IsolateAddressId::kContextAddress,
isolate());
- if (restore_context) {
- mov(esi, Operand::StaticVariable(context_address));
- }
+ mov(esi, Operand::StaticVariable(context_address));
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
@@ -797,12 +747,11 @@ void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
}
-
-void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
+void MacroAssembler::LeaveApiExitFrame() {
mov(esp, ebp);
pop(ebp);
- LeaveExitFrameEpilogue(restore_context);
+ LeaveExitFrameEpilogue();
}
@@ -830,19 +779,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp) {
- Label done, loop;
- mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done, Label::kNear);
- CmpObjectType(result, MAP_TYPE, temp);
- j(not_equal, &done, Label::kNear);
- mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
- jmp(&loop);
- bind(&done);
-}
-
void MacroAssembler::CallStub(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET);
@@ -925,7 +861,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count, Register caller_args_count_reg,
- Register scratch0, Register scratch1, ReturnAddressState ra_state,
+ Register scratch0, Register scratch1,
int number_of_temp_values_after_return_address) {
#if DEBUG
if (callee_args_count.is_reg()) {
@@ -934,8 +870,6 @@ void TurboAssembler::PrepareForTailCall(
} else {
DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
}
- DCHECK(ra_state != ReturnAddressState::kNotOnStack ||
- number_of_temp_values_after_return_address == 0);
#endif
// Calculate the destination address where we will put the return address
@@ -964,15 +898,9 @@ void TurboAssembler::PrepareForTailCall(
// to avoid its trashing and let the following loop copy it to the right
// place.
Register tmp_reg = scratch1;
- if (ra_state == ReturnAddressState::kOnStack) {
- mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
- tmp_reg);
- } else {
- DCHECK(ReturnAddressState::kNotOnStack == ra_state);
- DCHECK_EQ(0, number_of_temp_values_after_return_address);
- Push(Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- }
+ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
+ tmp_reg);
// Restore caller's frame pointer now as it could be overwritten by
// the copying loop.
@@ -1201,18 +1129,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
return kNumSafepointRegisters - reg_code - 1;
}
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- mov(value, cell);
- mov(value, FieldOperand(value, WeakCell::kValueOffset));
-}
-
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
void TurboAssembler::Ret() { ret(0); }
void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
@@ -1260,7 +1176,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
pxor(dst, dst);
} else {
- unsigned cnt = base::bits::CountPopulation32(src);
+ unsigned cnt = base::bits::CountPopulation(src);
unsigned nlz = base::bits::CountLeadingZeros32(src);
unsigned ntz = base::bits::CountTrailingZeros32(src);
if (nlz + cnt + ntz == 32) {
@@ -1286,7 +1202,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
} else {
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- unsigned cnt = base::bits::CountPopulation64(src);
+ unsigned cnt = base::bits::CountPopulation(src);
unsigned nlz = base::bits::CountLeadingZeros64(src);
unsigned ntz = base::bits::CountTrailingZeros64(src);
if (nlz + cnt + ntz == 64) {
@@ -1337,6 +1253,34 @@ void TurboAssembler::Pshufd(XMMRegister dst, const Operand& src,
}
}
+void TurboAssembler::Psignb(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsignb(dst, dst, src);
+ return;
+ }
+ if (CpuFeatures::IsSupported(SSSE3)) {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ psignb(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::Psignw(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsignw(dst, dst, src);
+ return;
+ }
+ if (CpuFeatures::IsSupported(SSSE3)) {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ psignw(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
void TurboAssembler::Psignd(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1479,7 +1423,7 @@ void TurboAssembler::Popcnt(Register dst, const Operand& src) {
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand operand = Operand::StaticVariable(ExternalReference(counter));
if (value == 1) {
@@ -1492,7 +1436,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand operand = Operand::StaticVariable(ExternalReference(counter));
if (value == 1) {
@@ -1537,7 +1481,7 @@ void TurboAssembler::CheckStackAlignment() {
void TurboAssembler::Abort(BailoutReason reason) {
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
+ if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@@ -1564,35 +1508,6 @@ void TurboAssembler::Abort(BailoutReason reason) {
}
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
-}
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
- : AccessorPair::kSetterOffset;
- mov(dst, FieldOperand(dst, offset));
-}
-
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
- Label* not_unique_name,
- Label::Distance distance) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
- j(zero, &succeed);
- cmpb(operand, Immediate(SYMBOL_TYPE));
- j(not_equal, not_unique_name, distance);
-
- bind(&succeed);
-}
-
void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
@@ -1661,26 +1576,6 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
- : address_(address),
- size_(size),
- masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- Assembler::FlushICache(masm_.isolate(), address_, size_);
-
- // Check that the code was patched as expected.
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
@@ -1699,86 +1594,6 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
j(cc, condition_met, condition_met_distance);
}
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
- 1); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
- add(mask_scratch, mask_scratch); // Shift left 1 by adding.
- j(zero, &word_boundary, Label::kNear);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- jmp(&other_color, Label::kNear);
-
- bind(&word_boundary);
- test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize),
- Immediate(1));
-
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
- mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- and_(bitmap_reg, addr_reg);
- mov(ecx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shr(ecx, shift);
- and_(ecx,
- (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
-
- add(bitmap_reg, ecx);
- mov(ecx, addr_reg);
- shr(ecx, kPointerSizeLog2);
- and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
- mov(mask_reg, Immediate(1));
- shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Label* value_is_white,
- Label::Distance distance) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, value_is_white, Label::kNear);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 745055ecda..342281d0b3 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -38,8 +38,6 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
-enum class ReturnAddressState { kOnStack, kNotOnStack };
-
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
@@ -140,18 +138,17 @@ class TurboAssembler : public Assembler {
void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // |ra_state| defines whether return address is already pushed to stack or
- // not. Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed. |number_of_temp_values_after_return_address| specifies
- // the number of words pushed to the stack after the return address. This is
- // to allow "allocation" of scratch registers that this function requires
- // by saving their values on the stack.
+ // Removes current frame and its arguments from the stack preserving the
+ // arguments and a return address pushed to the stack for the next call. Both
+ // |callee_args_count| and |caller_args_count_reg| do not include receiver.
+ // |callee_args_count| is not modified, |caller_args_count_reg| is trashed.
+ // |number_of_temp_values_after_return_address| specifies the number of words
+ // pushed to the stack after the return address. This is to allow "allocation"
+ // of scratch registers that this function requires by saving their values on
+ // the stack.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
- Register scratch1, ReturnAddressState ra_state,
+ Register scratch1,
int number_of_temp_values_after_return_address);
// Before calling a C-function from generated code, align arguments on stack.
@@ -240,6 +237,8 @@ class TurboAssembler : public Assembler {
AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, const Operand&)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
+ AVX_OP3_XO(Psubb, psubb)
+ AVX_OP3_XO(Psubw, psubw)
AVX_OP3_XO(Psubd, psubd)
AVX_OP3_XO(Pxor, pxor)
@@ -249,6 +248,11 @@ class TurboAssembler : public Assembler {
// Non-SSE2 instructions.
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
void Pshufb(XMMRegister dst, const Operand& src);
+
+ void Psignb(XMMRegister dst, XMMRegister src) { Psignb(dst, Operand(src)); }
+ void Psignb(XMMRegister dst, const Operand& src);
+ void Psignw(XMMRegister dst, XMMRegister src) { Psignw(dst, Operand(src)); }
+ void Psignw(XMMRegister dst, const Operand& src);
void Psignd(XMMRegister dst, XMMRegister src) { Psignd(dst, Operand(src)); }
void Psignd(XMMRegister dst, const Operand& src);
@@ -276,10 +280,7 @@ class TurboAssembler : public Assembler {
void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
- void SlowTruncateToIDelayed(Zone* zone, Register result_reg,
- Register input_reg,
- int offset = HeapNumber::kValueOffset -
- kHeapObjectTag);
+ void SlowTruncateToIDelayed(Zone* zone, Register result_reg);
void Push(Register src) { push(src); }
void Push(const Operand& src) { push(src); }
@@ -378,41 +379,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// GC Support
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, zero, branch, distance);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_zero, branch, distance);
- }
-
- // Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object, Register scratch0, Register scratch1,
- Label* has_color, Label::Distance has_color_distance,
- int first_bit, int second_bit);
-
- void JumpIfBlack(Register object, Register scratch0, Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Label* value_is_white, Label::Distance distance);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -424,17 +390,6 @@ class MacroAssembler : public TurboAssembler {
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
- // Notify the garbage collector that we wrote a pointer into a fixed array.
- // |array| is the array being stored into, |value| is the
- // object being stored. |index| is the array index represented as a
- // Smi. All registers are clobbered by the operation RecordWriteArray
- // filters out smis so it does not update the write barrier if the
- // value is a smi.
- void RecordWriteArray(
- Register array, Register value, Register index, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
// object being stored. The address and value registers are clobbered by the
@@ -463,7 +418,7 @@ class MacroAssembler : public TurboAssembler {
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
- void LeaveApiExitFrame(bool restore_context);
+ void LeaveApiExitFrame();
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
@@ -475,12 +430,6 @@ class MacroAssembler : public TurboAssembler {
void PushSafepointRegisters() { pushad(); }
void PopSafepointRegisters() { popad(); }
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the given
- // miss label if the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -548,10 +497,6 @@ class MacroAssembler : public TurboAssembler {
j(not_zero, smi_label, distance);
}
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template<typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
@@ -596,13 +541,6 @@ class MacroAssembler : public TurboAssembler {
void PopStackHandler();
// ---------------------------------------------------------------------------
- // Support functions.
-
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done.
- void GetMapConstructor(Register result, Register map, Register temp);
-
- // ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub. Generate the code if necessary.
@@ -638,24 +576,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Utilities
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp (on ia32 it's at least return address).
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 1) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- DCHECK_LT(parameter_index, Descriptor::kParameterCount);
- DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
- parameter_index);
- int offset = (Descriptor::kParameterCount - parameter_index - 1 +
- sp_to_ra_offset_in_words) *
- kPointerSize;
- mov(reg, Operand(esp, offset));
- }
-
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the esp register.
void Drop(int element_count);
@@ -672,18 +592,6 @@ class MacroAssembler : public TurboAssembler {
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
- // ---------------------------------------------------------------------------
- // String utilities.
-
- // Checks if the given register or operand is a unique name
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
- Label::Distance distance = Label::kFar) {
- JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
- }
-
- void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
- Label::Distance distance = Label::kFar);
-
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
@@ -701,20 +609,13 @@ class MacroAssembler : public TurboAssembler {
void EnterExitFramePrologue(StackFrame::Type frame_type);
void EnterExitFrameEpilogue(int argc, bool save_doubles);
- void LeaveExitFrameEpilogue(bool restore_context);
+ void LeaveExitFrameEpilogue();
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses ecx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@@ -723,25 +624,6 @@ class MacroAssembler : public TurboAssembler {
friend class StandardFrame;
};
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(Isolate* isolate, byte* address, int size);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/ic/access-compiler-data.h b/deps/v8/src/ic/access-compiler-data.h
deleted file mode 100644
index 28bdfd378b..0000000000
--- a/deps/v8/src/ic/access-compiler-data.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_ACCESS_COMPILER_DATA_H_
-#define V8_IC_ACCESS_COMPILER_DATA_H_
-
-#include <memory>
-
-#include "src/allocation.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-
-class AccessCompilerData {
- public:
- AccessCompilerData() {}
-
- bool IsInitialized() const { return load_calling_convention_ != nullptr; }
- void Initialize(int load_register_count, const Register* load_registers,
- int store_register_count, const Register* store_registers) {
- load_calling_convention_.reset(
- NewArray<Register>(load_register_count, no_reg));
- for (int i = 0; i < load_register_count; ++i) {
- load_calling_convention_[i] = load_registers[i];
- }
- store_calling_convention_.reset(
- NewArray<Register>(store_register_count, no_reg));
- for (int i = 0; i < store_register_count; ++i) {
- store_calling_convention_[i] = store_registers[i];
- }
- }
-
- Register* load_calling_convention() { return load_calling_convention_.get(); }
- Register* store_calling_convention() {
- return store_calling_convention_.get();
- }
-
- private:
- std::unique_ptr<Register[]> load_calling_convention_;
- std::unique_ptr<Register[]> store_calling_convention_;
-
- DISALLOW_COPY_AND_ASSIGN(AccessCompilerData);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_IC_ACCESS_COMPILER_DATA_H_
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
deleted file mode 100644
index f338619d5e..0000000000
--- a/deps/v8/src/ic/access-compiler.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ic/access-compiler.h"
-#include "src/assembler-inl.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
- Builtins::Name name) {
- Handle<Code> code(masm->isolate()->builtins()->builtin(name));
- GenerateTailCall(masm, code);
-}
-
-Register* PropertyAccessCompiler::GetCallingConvention(Isolate* isolate,
- Type type) {
- AccessCompilerData* data = isolate->access_compiler_data();
- if (!data->IsInitialized()) {
- InitializePlatformSpecific(data);
- }
- switch (type) {
- case LOAD:
- return data->load_calling_convention();
- case STORE:
- return data->store_calling_convention();
- }
- UNREACHABLE();
- return data->store_calling_convention();
-}
-
-
-Register PropertyAccessCompiler::slot() const {
- switch (type_) {
- case LOAD:
- return LoadDescriptor::SlotRegister();
- case STORE:
- return StoreWithVectorDescriptor::SlotRegister();
- }
- UNREACHABLE();
- return StoreWithVectorDescriptor::SlotRegister();
-}
-
-Register PropertyAccessCompiler::vector() const {
- switch (type_) {
- case LOAD:
- return LoadWithVectorDescriptor::VectorRegister();
- case STORE:
- return StoreWithVectorDescriptor::VectorRegister();
- }
- UNREACHABLE();
- return StoreWithVectorDescriptor::VectorRegister();
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
deleted file mode 100644
index d6ddd54a7f..0000000000
--- a/deps/v8/src/ic/access-compiler.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_ACCESS_COMPILER_H_
-#define V8_IC_ACCESS_COMPILER_H_
-
-#include "src/code-stubs.h"
-#include "src/ic/access-compiler-data.h"
-#include "src/macro-assembler.h"
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-
-class PropertyAccessCompiler BASE_EMBEDDED {
- public:
- enum Type { LOAD, STORE };
-
- static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
-
- protected:
- PropertyAccessCompiler(Isolate* isolate, Type type)
- : registers_(GetCallingConvention(isolate, type)),
- type_(type),
- isolate_(isolate),
- masm_(isolate, NULL, 256, CodeObjectRequired::kYes) {
- // TODO(yangguo): remove this once we can serialize IC stubs.
- masm_.enable_serializer();
- }
-
- Type type() const { return type_; }
-
- MacroAssembler* masm() { return &masm_; }
- Isolate* isolate() const { return isolate_; }
- Factory* factory() const { return isolate()->factory(); }
-
- Register receiver() const { return registers_[0]; }
- Register name() const { return registers_[1]; }
- Register slot() const;
- Register vector() const;
- Register scratch1() const { return registers_[2]; }
- Register scratch2() const { return registers_[3]; }
-
- Register* registers_;
-
- static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
-
- private:
- static Register* GetCallingConvention(Isolate* isolate, Type type);
- static void InitializePlatformSpecific(AccessCompilerData* data);
-
- Type type_;
- Isolate* isolate_;
- MacroAssembler masm_;
- // Ensure that MacroAssembler has a reasonable size.
- STATIC_ASSERT(sizeof(MacroAssembler) < 128 * kPointerSize);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_IC_ACCESS_COMPILER_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 2472febd03..c4852d860d 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -169,7 +169,8 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
Label* rebox_double,
ExitPoint* exit_point) {
Comment("field_load");
- Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
+ Node* index = DecodeWord<LoadHandler::FieldIndexBits>(handler_word);
+ Node* offset = IntPtrMul(index, IntPtrConstant(kPointerSize));
Label inobject(this), out_of_object(this);
Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
@@ -206,6 +207,18 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
}
}
+Node* AccessorAssembler::LoadDescriptorValue(Node* map, Node* descriptor) {
+ Node* descriptors = LoadMapDescriptors(map);
+ Node* scaled_descriptor =
+ IntPtrMul(descriptor, IntPtrConstant(DescriptorArray::kEntrySize));
+ Node* value_index = IntPtrAdd(
+ scaled_descriptor, IntPtrConstant(DescriptorArray::kFirstIndex +
+ DescriptorArray::kEntryValueIndex));
+ CSA_ASSERT(this, UintPtrLessThan(descriptor, LoadAndUntagFixedArrayBaseLength(
+ descriptors)));
+ return LoadFixedArrayElement(descriptors, value_index);
+}
+
void AccessorAssembler::HandleLoadICSmiHandlerCase(
const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
ExitPoint* exit_point, bool throw_reference_error_if_nonexistent,
@@ -216,10 +229,13 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Node* handler_word = SmiUntag(smi_handler);
Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
if (support_elements == kSupportElements) {
- Label property(this);
- GotoIfNot(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kElement)),
- &property);
+ Label if_element(this), if_indexed_string(this), if_property(this);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kElement)),
+ &if_element);
+ Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kIndexedString)),
+ &if_indexed_string, &if_property);
+ BIND(&if_element);
Comment("element_load");
Node* intptr_index = TryToIntptr(p->name, miss);
Node* elements = LoadElements(holder);
@@ -227,12 +243,12 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
Node* elements_kind =
DecodeWord32FromWord<LoadHandler::ElementsKindBits>(handler_word);
- Label if_hole(this), unimplemented_elements_kind(this);
- Label* out_of_bounds = miss;
+ Label if_hole(this), unimplemented_elements_kind(this),
+ if_oob(this, Label::kDeferred);
EmitElementLoad(holder, elements, elements_kind, intptr_index,
is_jsarray_condition, &if_hole, &rebox_double,
- &var_double_value, &unimplemented_elements_kind,
- out_of_bounds, miss, exit_point);
+ &var_double_value, &unimplemented_elements_kind, &if_oob,
+ miss, exit_point);
BIND(&unimplemented_elements_kind);
{
@@ -242,22 +258,70 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Goto(miss);
}
+ BIND(&if_oob);
+ {
+ Comment("out of bounds elements access");
+ Label return_undefined(this);
+
+ // Negative indices aren't valid array indices (according to
+ // the ECMAScript specification), and are stored as properties
+ // in V8, not elements. So we cannot handle them here.
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), miss);
+
+ // Check if we're allowed to handle OOB accesses.
+ Node* allow_out_of_bounds =
+ IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
+ GotoIfNot(allow_out_of_bounds, miss);
+
+ // For typed arrays we never lookup elements in the prototype chain.
+ GotoIf(IsJSTypedArray(holder), &return_undefined);
+
+ // For all other receivers we need to check that the prototype chain
+ // doesn't contain any elements.
+ BranchIfPrototypesHaveNoElements(LoadMap(holder), &return_undefined,
+ miss);
+
+ BIND(&return_undefined);
+ exit_point->Return(UndefinedConstant());
+ }
+
BIND(&if_hole);
{
Comment("convert hole");
GotoIfNot(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
- GotoIf(IsArrayProtectorCellInvalid(), miss);
+ GotoIf(IsNoElementsProtectorCellInvalid(), miss);
exit_point->Return(UndefinedConstant());
}
- BIND(&property);
+ BIND(&if_indexed_string);
+ {
+ Label if_oob(this, Label::kDeferred);
+
+ Comment("indexed string");
+ Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* length = LoadStringLengthAsWord(holder);
+ GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
+ Node* code = StringCharCodeAt(holder, intptr_index);
+ Node* result = StringFromCharCode(code);
+ Return(result);
+
+ BIND(&if_oob);
+ Node* allow_out_of_bounds =
+ IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
+ GotoIfNot(allow_out_of_bounds, miss);
+ GotoIf(IsNoElementsProtectorCellInvalid(), miss);
+ Return(UndefinedConstant());
+ }
+
+ BIND(&if_property);
Comment("property_load");
}
Label constant(this), field(this), normal(this, Label::kDeferred),
interceptor(this, Label::kDeferred), nonexistent(this),
accessor(this, Label::kDeferred), global(this, Label::kDeferred),
- module_export(this, Label::kDeferred), proxy(this, Label::kDeferred);
+ module_export(this, Label::kDeferred), proxy(this, Label::kDeferred),
+ native_data_property(this), api_getter(this);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)), &field);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kConstant)),
@@ -272,6 +336,17 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kAccessor)),
&accessor);
+ GotoIf(
+ WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNativeDataProperty)),
+ &native_data_property);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
+ &api_getter);
+
+ GotoIf(WordEqual(handler_kind,
+ IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)),
+ &api_getter);
+
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)),
&global);
@@ -296,28 +371,10 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&constant);
{
Comment("constant_load");
- Node* descriptors = LoadMapDescriptors(LoadMap(holder));
Node* descriptor = DecodeWord<LoadHandler::DescriptorBits>(handler_word);
- Node* scaled_descriptor =
- IntPtrMul(descriptor, IntPtrConstant(DescriptorArray::kEntrySize));
- Node* value_index =
- IntPtrAdd(scaled_descriptor,
- IntPtrConstant(DescriptorArray::kFirstIndex +
- DescriptorArray::kEntryValueIndex));
- CSA_ASSERT(this,
- UintPtrLessThan(descriptor,
- LoadAndUntagFixedArrayBaseLength(descriptors)));
- Node* value = LoadFixedArrayElement(descriptors, value_index);
+ Node* value = LoadDescriptorValue(LoadMap(holder), descriptor);
- Label if_accessor_info(this, Label::kDeferred);
- GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
- &if_accessor_info);
exit_point->Return(value);
-
- BIND(&if_accessor_info);
- Callable callable = CodeFactory::ApiGetter(isolate());
- exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
- value);
}
BIND(&normal);
@@ -343,18 +400,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&accessor);
{
Comment("accessor_load");
- Node* descriptors = LoadMapDescriptors(LoadMap(holder));
Node* descriptor = DecodeWord<LoadHandler::DescriptorBits>(handler_word);
- Node* scaled_descriptor =
- IntPtrMul(descriptor, IntPtrConstant(DescriptorArray::kEntrySize));
- Node* value_index =
- IntPtrAdd(scaled_descriptor,
- IntPtrConstant(DescriptorArray::kFirstIndex +
- DescriptorArray::kEntryValueIndex));
- CSA_ASSERT(this,
- UintPtrLessThan(descriptor,
- LoadAndUntagFixedArrayBaseLength(descriptors)));
- Node* accessor_pair = LoadFixedArrayElement(descriptors, value_index);
+ Node* accessor_pair = LoadDescriptorValue(LoadMap(holder), descriptor);
CSA_ASSERT(this, IsAccessorPair(accessor_pair));
Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(getter)));
@@ -363,6 +410,51 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
exit_point->Return(CallJS(callable, p->context, getter, p->receiver));
}
+ BIND(&native_data_property);
+ {
+ Comment("native_data_property_load");
+ Node* descriptor = DecodeWord<LoadHandler::DescriptorBits>(handler_word);
+ Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
+
+ Callable callable = CodeFactory::ApiGetter(isolate());
+ exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
+ accessor_info);
+ }
+
+ BIND(&api_getter);
+ {
+ Comment("api_getter");
+ Node* context = LoadWeakCellValueUnchecked(
+ LoadObjectField(holder, Tuple2::kValue1Offset));
+ Node* call_handler_info = LoadWeakCellValueUnchecked(
+ LoadObjectField(holder, Tuple2::kValue2Offset));
+
+ Node* foreign =
+ LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset);
+ Node* callback = LoadObjectField(foreign, Foreign::kForeignAddressOffset,
+ MachineType::Pointer());
+ Node* data =
+ LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
+
+ VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver);
+ Label load(this);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
+ &load);
+
+ CSA_ASSERT(
+ this,
+ WordEqual(handler_kind,
+ IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
+
+ api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver)));
+ Goto(&load);
+
+ BIND(&load);
+ Callable callable = CodeFactory::CallApiCallback(isolate(), 0);
+ exit_point->Return(CallStub(callable, nullptr, context, data,
+ api_holder.value(), callback, p->receiver));
+ }
+
BIND(&proxy);
{
VARIABLE(var_index, MachineType::PointerRepresentation());
@@ -454,7 +546,7 @@ void AccessorAssembler::HandleLoadICProtoHandlerCase(
DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
// IC dispatchers rely on these assumptions to be held.
- STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
+ STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kDataOffset);
DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
LoadHandler::kSmiHandlerOffset);
DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
@@ -500,8 +592,7 @@ void AccessorAssembler::HandleLoadICProtoHandlerCase(
}
BIND(&check_prototypes);
- Node* maybe_holder_cell =
- LoadObjectField(handler, LoadHandler::kHolderCellOffset);
+ Node* maybe_holder_cell = LoadObjectField(handler, LoadHandler::kDataOffset);
Label array_handler(this), tuple_handler(this);
Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
@@ -509,18 +600,29 @@ void AccessorAssembler::HandleLoadICProtoHandlerCase(
{
Label load_from_cached_holder(this), done(this);
- Branch(WordEqual(maybe_holder_cell, NullConstant()), &done,
- &load_from_cached_holder);
+ Branch(IsNull(maybe_holder_cell), &done, &load_from_cached_holder);
BIND(&load_from_cached_holder);
{
- // For regular holders, having passed the receiver map check and the
- // validity cell check implies that |holder| is alive. However, for
- // global object receivers, the |maybe_holder_cell| may be cleared.
- Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
+ Label unwrap_cell(this), bind_holder(this);
+ Branch(IsWeakCell(maybe_holder_cell), &unwrap_cell, &bind_holder);
- var_holder->Bind(holder);
- Goto(&done);
+ BIND(&unwrap_cell);
+ {
+ // For regular holders, having passed the receiver map check and the
+ // validity cell check implies that |holder| is alive. However, for
+ // global object receivers, the |maybe_holder_cell| may be cleared.
+ Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
+
+ var_holder->Bind(holder);
+ Goto(&done);
+ }
+
+ BIND(&bind_holder);
+ {
+ var_holder->Bind(maybe_holder_cell);
+ Goto(&done);
+ }
}
BIND(&done);
@@ -589,18 +691,32 @@ Node* AccessorAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Node* maybe_holder_cell =
- LoadFixedArrayElement(handler, LoadHandler::kHolderCellIndex);
+ LoadFixedArrayElement(handler, LoadHandler::kDataIndex);
VARIABLE(var_holder, MachineRepresentation::kTagged, p->receiver);
Label done(this);
- GotoIf(WordEqual(maybe_holder_cell, NullConstant()), &done);
+ GotoIf(IsNull(maybe_holder_cell), &done);
{
- // For regular holders, having passed the receiver map check and the
- // validity cell check implies that |holder| is alive. However, for
- // global object receivers, the |maybe_holder_cell| may be cleared.
- var_holder.Bind(LoadWeakCellValue(maybe_holder_cell, miss));
- Goto(&done);
+ Label unwrap_cell(this), bind_holder(this);
+ Branch(IsWeakCell(maybe_holder_cell), &unwrap_cell, &bind_holder);
+
+ BIND(&unwrap_cell);
+ {
+ // For regular holders, having passed the receiver map check and the
+ // validity cell check implies that |holder| is alive. However, for
+ // global object receivers, the |maybe_holder_cell| may be cleared.
+ Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
+
+ var_holder.Bind(holder);
+ Goto(&done);
+ }
+
+ BIND(&bind_holder);
+ {
+ var_holder.Bind(maybe_holder_cell);
+ Goto(&done);
+ }
}
BIND(&done);
@@ -639,6 +755,18 @@ void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
// Fall through if it's an accessor property.
}
+void AccessorAssembler::HandleStoreICNativeDataProperty(
+ const StoreICParameters* p, Node* holder, Node* handler_word) {
+ Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
+ Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
+ CSA_CHECK(this, IsAccessorInfo(accessor_info));
+
+ Node* language_mode = GetLanguageMode(p->vector, p->slot);
+
+ TailCallRuntime(Runtime::kStoreCallbackProperty, p->context, p->receiver,
+ holder, accessor_info, p->name, p->value, language_mode);
+}
+
void AccessorAssembler::HandleStoreICHandlerCase(
const StoreICParameters* p, Node* handler, Label* miss,
ElementSupport support_elements) {
@@ -657,19 +785,18 @@ void AccessorAssembler::HandleStoreICHandlerCase(
Label if_fast_smi(this), if_proxy(this);
- STATIC_ASSERT(StoreHandler::kStoreGlobalProxy + 1 ==
- StoreHandler::kStoreNormal);
- STATIC_ASSERT(StoreHandler::kStoreNormal + 1 == StoreHandler::kProxy);
+ STATIC_ASSERT(StoreHandler::kGlobalProxy + 1 == StoreHandler::kNormal);
+ STATIC_ASSERT(StoreHandler::kNormal + 1 == StoreHandler::kProxy);
STATIC_ASSERT(StoreHandler::kProxy + 1 == StoreHandler::kKindsNumber);
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
GotoIf(IntPtrLessThan(handler_kind,
- IntPtrConstant(StoreHandler::kStoreGlobalProxy)),
+ IntPtrConstant(StoreHandler::kGlobalProxy)),
&if_fast_smi);
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy)),
&if_proxy);
- CSA_ASSERT(this, WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kStoreNormal)));
+ CSA_ASSERT(this,
+ WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)));
Node* properties = LoadSlowProperties(holder);
VARIABLE(var_name_index, MachineType::PointerRepresentation());
@@ -692,8 +819,27 @@ void AccessorAssembler::HandleStoreICHandlerCase(
}
BIND(&if_fast_smi);
- // Handle non-transitioning field stores.
- HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
+ {
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+
+ Label data(this), accessor(this), native_data_property(this);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kAccessor)),
+ &accessor);
+ Branch(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kNativeDataProperty)),
+ &native_data_property, &data);
+
+ BIND(&accessor);
+ HandleStoreAccessor(p, holder, handler_word);
+
+ BIND(&native_data_property);
+ HandleStoreICNativeDataProperty(p, holder, handler_word);
+
+ BIND(&data);
+ // Handle non-transitioning field stores.
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr,
+ miss);
+ }
BIND(&if_proxy);
HandleStoreToProxy(p, holder, miss, support_elements);
@@ -751,14 +897,26 @@ void AccessorAssembler::HandleStoreICElementHandlerCase(
p->value, p->slot, p->vector);
}
+void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
+ Node* holder, Node* handler_word) {
+ Comment("accessor_store");
+ Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
+ Node* accessor_pair = LoadDescriptorValue(LoadMap(holder), descriptor);
+ CSA_ASSERT(this, IsAccessorPair(accessor_pair));
+ Node* setter = LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
+ CSA_ASSERT(this, Word32BinaryNot(IsTheHole(setter)));
+
+ Callable callable = CodeFactory::Call(isolate());
+ Return(CallJS(callable, p->context, setter, p->receiver, p->value));
+}
+
void AccessorAssembler::HandleStoreICProtoHandler(
const StoreICParameters* p, Node* handler, Label* miss,
ElementSupport support_elements) {
Comment("HandleStoreICProtoHandler");
// IC dispatchers rely on these assumptions to be held.
- STATIC_ASSERT(FixedArray::kLengthOffset ==
- StoreHandler::kTransitionOrHolderCellOffset);
+ STATIC_ASSERT(FixedArray::kLengthOffset == StoreHandler::kDataOffset);
DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
StoreHandler::kSmiHandlerOffset);
DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
@@ -779,18 +937,13 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Node* smi_or_code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
Node* maybe_transition_cell =
- LoadObjectField(handler, StoreHandler::kTransitionOrHolderCellOffset);
- Label array_handler(this), tuple_handler(this);
- Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
+ LoadObjectField(handler, StoreHandler::kDataOffset);
+ Label array_handler(this), do_store(this);
- VARIABLE(var_transition_map_or_holder, MachineRepresentation::kTagged);
- Label do_store(this), if_transition_map(this), if_holder_object(this);
- BIND(&tuple_handler);
- {
- Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
- var_transition_map_or_holder.Bind(transition);
- Goto(&do_store);
- }
+ VARIABLE(var_transition_map_or_holder, MachineRepresentation::kTagged,
+ maybe_transition_cell);
+
+ Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &do_store);
BIND(&array_handler);
{
@@ -829,17 +982,27 @@ void AccessorAssembler::HandleStoreICProtoHandler(
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
- Node* maybe_transition_cell = LoadFixedArrayElement(
- handler, StoreHandler::kTransitionMapOrHolderCellIndex);
- Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
- var_transition_map_or_holder.Bind(transition);
+ Node* maybe_transition_cell =
+ LoadFixedArrayElement(handler, StoreHandler::kDataIndex);
+ var_transition_map_or_holder.Bind(maybe_transition_cell);
Goto(&do_store);
}
+ Label if_transition_map(this), if_holder_object(this);
+
BIND(&do_store);
{
- Node* transition = var_transition_map_or_holder.value();
- Branch(IsMap(transition), &if_transition_map, &if_holder_object);
+ Node* maybe_transition_cell = var_transition_map_or_holder.value();
+
+ Label unwrap_cell(this);
+ Branch(IsWeakCell(maybe_transition_cell), &unwrap_cell, &if_holder_object);
+
+ BIND(&unwrap_cell);
+ {
+ Node* maybe_transition = LoadWeakCellValue(maybe_transition_cell, miss);
+ var_transition_map_or_holder.Bind(maybe_transition);
+ Branch(IsMap(maybe_transition), &if_transition_map, &if_holder_object);
+ }
}
BIND(&if_transition_map);
@@ -870,11 +1033,12 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Node* handler_word = SmiUntag(smi_handler);
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kStoreNormal)),
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)),
&if_store_normal);
GotoIf(WordEqual(handler_kind,
IntPtrConstant(StoreHandler::kTransitionToConstant)),
&if_transition_to_constant);
+
CSA_ASSERT(this,
WordEqual(handler_kind,
IntPtrConstant(StoreHandler::kTransitionToField)));
@@ -887,18 +1051,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
{
// Check that constant matches value.
Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
- Node* scaled_descriptor =
- IntPtrMul(descriptor, IntPtrConstant(DescriptorArray::kEntrySize));
- Node* value_index =
- IntPtrAdd(scaled_descriptor,
- IntPtrConstant(DescriptorArray::kFirstIndex +
- DescriptorArray::kEntryValueIndex));
- Node* descriptors = LoadMapDescriptors(transition_map);
- CSA_ASSERT(
- this, UintPtrLessThan(descriptor,
- LoadAndUntagFixedArrayBaseLength(descriptors)));
-
- Node* constant = LoadFixedArrayElement(descriptors, value_index);
+ Node* constant = LoadDescriptorValue(transition_map, descriptor);
GotoIf(WordNotEqual(p->value, constant), miss);
StoreMap(p->receiver, transition_map);
@@ -943,7 +1096,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
}
BIND(&if_holder_object);
{
- Label if_store_global_proxy(this);
+ Label if_store_global_proxy(this), if_api_setter(this), if_accessor(this),
+ if_native_data_property(this);
Node* holder = var_transition_map_or_holder.value();
Node* smi_handler = smi_or_code;
@@ -951,13 +1105,67 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Node* handler_word = SmiUntag(smi_handler);
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kStoreGlobalProxy)),
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kGlobalProxy)),
&if_store_global_proxy);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kAccessor)),
+ &if_accessor);
+
+ GotoIf(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kNativeDataProperty)),
+ &if_native_data_property);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kApiSetter)),
+ &if_api_setter);
+
+ GotoIf(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kApiSetterHolderIsPrototype)),
+ &if_api_setter);
+
CSA_ASSERT(this,
WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy)));
HandleStoreToProxy(p, holder, miss, support_elements);
+ BIND(&if_accessor);
+ HandleStoreAccessor(p, holder, handler_word);
+
+ BIND(&if_native_data_property);
+ HandleStoreICNativeDataProperty(p, holder, handler_word);
+
+ BIND(&if_api_setter);
+ {
+ Comment("api_setter");
+ Node* context = LoadWeakCellValueUnchecked(
+ LoadObjectField(holder, Tuple2::kValue1Offset));
+ Node* call_handler_info = LoadWeakCellValueUnchecked(
+ LoadObjectField(holder, Tuple2::kValue2Offset));
+
+ Node* foreign = LoadObjectField(call_handler_info,
+ CallHandlerInfo::kJsCallbackOffset);
+ Node* callback = LoadObjectField(foreign, Foreign::kForeignAddressOffset,
+ MachineType::Pointer());
+ Node* data =
+ LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
+
+ VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver);
+ Label store(this);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kApiSetter)),
+ &store);
+
+ CSA_ASSERT(
+ this,
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kApiSetterHolderIsPrototype)));
+
+ api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver)));
+ Goto(&store);
+
+ BIND(&store);
+ Callable callable = CodeFactory::CallApiCallback(isolate(), 1);
+ Return(CallStub(callable, nullptr, context, data, api_holder.value(),
+ callback, p->receiver, p->value));
+ }
+
BIND(&if_store_global_proxy);
{
ExitPoint direct_exit(this);
@@ -966,20 +1174,27 @@ void AccessorAssembler::HandleStoreICProtoHandler(
}
}
+Node* AccessorAssembler::GetLanguageMode(Node* vector, Node* slot) {
+ VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
+ SmiConstant(LanguageMode::kStrict));
+ Label language_mode_determined(this);
+ BranchIfStrictMode(vector, slot, &language_mode_determined);
+ var_language_mode.Bind(SmiConstant(LanguageMode::kSloppy));
+ Goto(&language_mode_determined);
+ BIND(&language_mode_determined);
+ return var_language_mode.value();
+}
+
void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
Node* proxy, Label* miss,
ElementSupport support_elements) {
VARIABLE(var_index, MachineType::PointerRepresentation());
VARIABLE(var_unique, MachineRepresentation::kTagged);
- VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
- SmiConstant(STRICT));
- Label if_index(this), if_unique_name(this), language_mode_determined(this),
+ Label if_index(this), if_unique_name(this),
to_name_failed(this, Label::kDeferred);
- BranchIfStrictMode(p->vector, p->slot, &language_mode_determined);
- var_language_mode.Bind(SmiConstant(SLOPPY));
- Goto(&language_mode_determined);
- BIND(&language_mode_determined);
+
+ Node* language_mode = GetLanguageMode(p->vector, p->slot);
if (support_elements == kSupportElements) {
TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
@@ -987,8 +1202,7 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
BIND(&if_unique_name);
CallBuiltin(Builtins::kProxySetProperty, p->context, proxy,
- var_unique.value(), p->value, p->receiver,
- var_language_mode.value());
+ var_unique.value(), p->value, p->receiver, language_mode);
Return(p->value);
// The index case is handled earlier by the runtime.
@@ -999,11 +1213,11 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
BIND(&to_name_failed);
TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context, proxy,
- p->name, p->value, p->receiver, var_language_mode.value());
+ p->name, p->value, p->receiver, language_mode);
} else {
Node* name = ToName(p->context, p->name);
TailCallBuiltin(Builtins::kProxySetProperty, p->context, proxy, name,
- p->value, p->receiver, var_language_mode.value());
+ p->value, p->receiver, language_mode);
}
}
@@ -1012,7 +1226,6 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
Node* transition,
Label* miss) {
Comment(transition ? "transitioning field store" : "field store");
-
#ifdef DEBUG
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
if (transition) {
@@ -1026,14 +1239,13 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
} else {
if (FLAG_track_constant_fields) {
CSA_ASSERT(
- this,
- Word32Or(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kStoreField)),
- WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kStoreConstField))));
+ this, Word32Or(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kField)),
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kConstField))));
} else {
- CSA_ASSERT(this, WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kStoreField)));
+ CSA_ASSERT(this,
+ WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)));
}
}
#endif
@@ -1138,22 +1350,12 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
// Skip field type check in favor of constant value check when storing
// to constant field.
GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
- IntPtrConstant(StoreHandler::kStoreConstField)),
+ IntPtrConstant(StoreHandler::kConstField)),
&done);
}
Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
- Node* scaled_descriptor =
- IntPtrMul(descriptor, IntPtrConstant(DescriptorArray::kEntrySize));
- Node* value_index =
- IntPtrAdd(scaled_descriptor,
- IntPtrConstant(DescriptorArray::kFirstIndex +
- DescriptorArray::kEntryValueIndex));
- Node* descriptors =
- LoadMapDescriptors(transition ? transition : LoadMap(holder));
- CSA_ASSERT(this,
- UintPtrLessThan(descriptor,
- LoadAndUntagFixedArrayBaseLength(descriptors)));
- Node* maybe_field_type = LoadFixedArrayElement(descriptors, value_index);
+ Node* maybe_field_type = LoadDescriptorValue(
+ transition ? transition : LoadMap(holder), descriptor);
GotoIf(TaggedIsSmi(maybe_field_type), &done);
// Check that value type matches the field type.
@@ -1222,7 +1424,8 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
// Previous property deletion could have left behind unused backing store
// capacity even for a map that think it doesn't have any unused fields.
// Perform a bounds check to see if we actually have to grow the array.
- Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+ Node* index = DecodeWord<StoreHandler::FieldIndexBits>(handler_word);
+ Node* offset = IntPtrMul(index, IntPtrConstant(kPointerSize));
Node* size = ElementOffsetFromIndex(var_length.value(), PACKED_ELEMENTS,
mode, FixedArray::kHeaderSize);
GotoIf(UintPtrLessThan(offset, size), &done);
@@ -1278,7 +1481,8 @@ void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
property_storage = LoadFastProperties(object);
}
- Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+ Node* index = DecodeWord<StoreHandler::FieldIndexBits>(handler_word);
+ Node* offset = IntPtrMul(index, IntPtrConstant(kPointerSize));
if (representation.IsDouble()) {
if (!FLAG_unbox_double_fields || !is_inobject) {
if (transition_to_field) {
@@ -1299,7 +1503,7 @@ void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
if (FLAG_track_constant_fields && !transition_to_field) {
Label done(this);
GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
- IntPtrConstant(StoreHandler::kStoreConstField)),
+ IntPtrConstant(StoreHandler::kConstField)),
&done);
{
if (store_value_as_double) {
@@ -1431,19 +1635,17 @@ void AccessorAssembler::EmitElementLoad(
GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
VARIABLE(var_entry, MachineType::PointerRepresentation());
Label if_found(this);
- NumberDictionaryLookup<SeededNumberDictionary>(
- elements, intptr_index, &if_found, &var_entry, if_hole);
+ NumberDictionaryLookup(elements, intptr_index, &if_found, &var_entry,
+ if_hole);
BIND(&if_found);
// Check that the value is a data property.
- Node* index = EntryToIndex<SeededNumberDictionary>(var_entry.value());
- Node* details =
- LoadDetailsByKeyIndex<SeededNumberDictionary>(elements, index);
+ Node* index = EntryToIndex<NumberDictionary>(var_entry.value());
+ Node* details = LoadDetailsByKeyIndex<NumberDictionary>(elements, index);
Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
// TODO(jkummerow): Support accessors without missing?
GotoIfNot(Word32Equal(kind, Int32Constant(kData)), miss);
// Finally, load the value.
- exit_point->Return(
- LoadValueByKeyIndex<SeededNumberDictionary>(elements, index));
+ exit_point->Return(LoadValueByKeyIndex<NumberDictionary>(elements, index));
}
BIND(&if_typed_array);
@@ -1624,16 +1826,15 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
ExitPoint direct_exit(this);
- Label if_element_hole(this), if_oob(this);
+ Label if_custom(this), if_element_hole(this), if_oob(this);
// Receivers requiring non-standard element accesses (interceptors, access
// checks, strings and string wrappers, proxies) are handled in the runtime.
GotoIf(Int32LessThanOrEqual(instance_type,
Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
- slow);
+ &if_custom);
Node* elements = LoadElements(receiver);
Node* elements_kind = LoadMapElementsKind(receiver_map);
- Node* is_jsarray_condition =
- Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
+ Node* is_jsarray_condition = InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
VARIABLE(var_double_value, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
@@ -1666,6 +1867,18 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
BIND(&return_undefined);
Return(UndefinedConstant());
}
+
+ BIND(&if_custom);
+ {
+ Comment("check if string");
+ GotoIfNot(IsStringInstanceType(instance_type), slow);
+ Comment("load string character");
+ Node* length = LoadAndUntagObjectField(receiver, String::kLengthOffset);
+ GotoIfNot(UintPtrLessThan(index, length), slow);
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
+ TailCallBuiltin(Builtins::kStringCharAt, NoContextConstant(), receiver,
+ index);
+ }
}
void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
@@ -1777,8 +1990,8 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
BIND(&loop);
{
// Bailout if it can be an integer indexed exotic case.
- GotoIf(Word32Equal(var_holder_instance_type.value(),
- Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ GotoIf(InstanceTypeEqual(var_holder_instance_type.value(),
+ JS_TYPED_ARRAY_TYPE),
slow);
Node* proto = LoadMapPrototype(var_holder_map.value());
GotoIf(WordEqual(proto, NullConstant()), &return_undefined);
@@ -1810,7 +2023,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
BIND(&special_receiver);
{
// TODO(jkummerow): Consider supporting JSModuleNamespace.
- GotoIfNot(Word32Equal(instance_type, Int32Constant(JS_PROXY_TYPE)), slow);
+ GotoIfNot(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), slow);
direct_exit.ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
@@ -2076,7 +2289,7 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
}
void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
- Label miss(this);
+ Label miss(this, Label::kDeferred);
Node* receiver = p->receiver;
GotoIf(TaggedIsSmi(receiver), &miss);
Node* receiver_map = LoadMap(receiver);
@@ -2090,13 +2303,21 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
{
// Special case for Function.prototype load, because it's very common
// for ICs that are only executed once (MyFunc.prototype.foo = ...).
- Label not_function_prototype(this);
- GotoIf(Word32NotEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)),
- &not_function_prototype);
+ Label not_function_prototype(this, Label::kDeferred);
+ GotoIfNot(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE),
+ &not_function_prototype);
GotoIfNot(IsPrototypeString(p->name), &not_function_prototype);
- Node* bit_field = LoadMapBitField(receiver_map);
- GotoIf(IsSetWord32(bit_field, 1 << Map::kHasNonInstancePrototype),
- &not_function_prototype);
+
+ // if (!(has_prototype_slot() && !has_non_instance_prototype())) use generic
+ // property loading mechanism.
+ int has_prototype_slot_mask = 1 << Map::kHasPrototypeSlot;
+ int has_non_instance_prototype_mask = 1 << Map::kHasNonInstancePrototype;
+ GotoIfNot(
+ Word32Equal(Word32And(LoadMapBitField(receiver_map),
+ Int32Constant(has_prototype_slot_mask |
+ has_non_instance_prototype_mask)),
+ Int32Constant(has_prototype_slot_mask)),
+ &not_function_prototype);
Return(LoadJSFunctionPrototype(receiver, &miss));
BIND(&not_function_prototype);
}
@@ -2278,15 +2499,47 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
BIND(&try_polymorphic_name);
{
// We might have a name in feedback, and a fixed array in the next slot.
+ Node* name = p->name;
Comment("KeyedLoadIC_try_polymorphic_name");
- GotoIfNot(WordEqual(feedback, p->name), &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- Node* array = LoadFeedbackVectorSlot(p->vector, p->slot, kPointerSize,
- SMI_PARAMETERS);
- HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
- 1);
+ VARIABLE(var_name, MachineRepresentation::kTagged, name);
+ VARIABLE(var_index, MachineType::PointerRepresentation());
+ Label if_polymorphic_name(this, &var_name), if_internalized(this),
+ if_notinternalized(this, Label::kDeferred);
+
+ // Fast-case: The recorded {feedback} matches the {name}.
+ GotoIf(WordEqual(feedback, name), &if_polymorphic_name);
+
+ // Try to internalize the {name} if it isn't already.
+ TryToName(name, &miss, &var_index, &if_internalized, &var_name, &miss,
+ &if_notinternalized);
+
+ BIND(&if_internalized);
+ {
+ // The {var_name} now contains a unique name.
+ Branch(WordEqual(feedback, var_name.value()), &if_polymorphic_name,
+ &miss);
+ }
+
+ BIND(&if_notinternalized);
+ {
+ // Try to internalize the {name}.
+ Node* function = ExternalConstant(
+ ExternalReference::try_internalize_string_function(isolate()));
+ var_name.Bind(CallCFunction1(MachineType::AnyTagged(),
+ MachineType::AnyTagged(), function, name));
+ Goto(&if_internalized);
+ }
+
+ BIND(&if_polymorphic_name);
+ {
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ Node* name = var_name.value();
+ TailCallBuiltin(Builtins::kKeyedLoadIC_PolymorphicName, p->context,
+ p->receiver, name, p->slot, p->vector);
+ }
}
+
BIND(&miss);
{
Comment("KeyedLoadIC_miss");
@@ -2325,14 +2578,26 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
BIND(&if_notunique);
{
if (FLAG_internalize_on_the_fly) {
- Label not_in_string_table(this);
- TryInternalizeString(p->name, &if_index, &var_index, &if_unique_name,
- &var_unique, &not_in_string_table, &slow);
-
- BIND(&not_in_string_table);
- // If the string was not found in the string table, then no object can
- // have a property with that name.
- Return(UndefinedConstant());
+ // Ideally we could return undefined directly here if the name is not
+ // found in the string table, i.e. it was never internalized, but that
+ // invariant doesn't hold with named property interceptors (at this
+ // point), so we take the {slow} path instead.
+ Label if_in_string_table(this);
+ TryInternalizeString(p->name, &if_index, &var_index, &if_in_string_table,
+ &var_unique, &slow, &slow);
+
+ BIND(&if_in_string_table);
+ {
+ // TODO(bmeurer): We currently use a version of GenericPropertyLoad
+ // here, where we don't try to probe the megamorphic stub cache after
+ // successfully internalizing the incoming string. Past experiments
+ // with this have shown that it causes too much traffic on the stub
+ // cache. We may want to re-evaluate that in the future.
+ LoadICParameters pp = *p;
+ pp.name = var_unique.value();
+ GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, &slow,
+ kDontUseStubCache);
+ }
} else {
Goto(&slow);
}
@@ -2348,6 +2613,46 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
}
}
+void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p) {
+ VARIABLE(var_handler, MachineRepresentation::kTagged);
+ Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
+
+ Node* receiver = p->receiver;
+ Node* receiver_map = LoadReceiverMap(receiver);
+ Node* name = p->name;
+ Node* vector = p->vector;
+ Node* slot = p->slot;
+ Node* context = p->context;
+
+ // When we get here, we know that the {name} matches the recorded
+ // feedback name in the {vector} and can safely be used for the
+ // LoadIC handler logic below.
+ CSA_ASSERT(this, IsName(name));
+ CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(receiver_map)));
+ CSA_ASSERT(this, WordEqual(name, LoadFeedbackVectorSlot(vector, slot, 0,
+ SMI_PARAMETERS)));
+
+ // Check if we have a matching handler for the {receiver_map}.
+ Node* array =
+ LoadFeedbackVectorSlot(vector, slot, kPointerSize, SMI_PARAMETERS);
+ HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+ 1);
+
+ BIND(&if_handler);
+ {
+ ExitPoint direct_exit(this);
+ HandleLoadICHandlerCase(p, var_handler.value(), &miss, &direct_exit,
+ kOnlyProperties);
+ }
+
+ BIND(&miss);
+ {
+ Comment("KeyedLoadIC_miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name, slot,
+ vector);
+ }
+}
+
void AccessorAssembler::StoreIC(const StoreICParameters* p) {
VARIABLE(var_handler, MachineRepresentation::kTagged);
Label if_handler(this, &var_handler), try_polymorphic(this, Label::kDeferred),
@@ -2702,6 +3007,19 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
KeyedLoadICGeneric(&p);
}
+void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ KeyedLoadICPolymorphicName(&p);
+}
+
void AccessorAssembler::GenerateStoreIC() {
typedef StoreWithVectorDescriptor Descriptor;
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 4fe1c0bbf9..b11ff738c1 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -31,6 +31,7 @@ class AccessorAssembler : public CodeStubAssembler {
void GenerateKeyedLoadIC();
void GenerateKeyedLoadICTrampoline();
void GenerateKeyedLoadIC_Megamorphic();
+ void GenerateKeyedLoadIC_PolymorphicName();
void GenerateStoreIC();
void GenerateStoreICTrampoline();
@@ -108,12 +109,15 @@ class AccessorAssembler : public CodeStubAssembler {
Node* feedback, Variable* var_handler,
Label* if_handler, Label* miss, ExitPoint* exit_point);
+ Node* LoadDescriptorValue(Node* map, Node* descriptor);
+
void LoadIC_Uninitialized(const LoadICParameters* p);
void LoadICProtoArray(const LoadICParameters* p, Node* handler,
bool throw_reference_error_if_nonexistent);
void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
void KeyedLoadIC(const LoadICParameters* p);
void KeyedLoadICGeneric(const LoadICParameters* p);
+ void KeyedLoadICPolymorphicName(const LoadICParameters* p);
void StoreIC(const StoreICParameters* p);
void StoreGlobalIC_PropertyCellCase(Node* property_cell, Node* value,
ExitPoint* exit_point, Label* miss);
@@ -182,9 +186,15 @@ class AccessorAssembler : public CodeStubAssembler {
Representation representation, Node* value,
Node* transition, Label* miss);
+ void HandleStoreICNativeDataProperty(const StoreICParameters* p, Node* holder,
+ Node* handler_word);
+
void HandleStoreToProxy(const StoreICParameters* p, Node* proxy, Label* miss,
ElementSupport support_elements);
+ void HandleStoreAccessor(const StoreICParameters* p, Node* holder,
+ Node* handler_word);
+
// KeyedLoadIC_Generic implementation.
void GenericElementLoad(Node* receiver, Node* receiver_map,
@@ -198,6 +208,8 @@ class AccessorAssembler : public CodeStubAssembler {
// Low-level helpers.
+ Node* GetLanguageMode(Node* vector, Node* slot);
+
Node* PrepareValueForStore(Node* handler_word, Node* holder,
Representation representation, Node* transition,
Node* value, Label* bailout);
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
deleted file mode 100644
index 627c06c858..0000000000
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/ic/access-compiler.h"
-
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, r3, r0, r4};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, r3, r4};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
deleted file mode 100644
index ac5d3ecc22..0000000000
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ /dev/null
@@ -1,434 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/assembler-inl.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ pop(cp);
- }
- __ Ret();
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Save context register
- __ push(cp);
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (accessor_index >= 0) {
- DCHECK(holder != scratch);
- DCHECK(receiver != scratch);
- DCHECK(value() != scratch);
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ ldr(scratch,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ Push(receiver, value());
- __ LoadAccessor(r1, holder, accessor_index, ACCESSOR_SETTER);
- __ mov(r0, Operand(1));
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(r0);
-
- // Restore context register.
- __ pop(cp);
- }
- __ Ret();
-}
-
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ push(slot);
- __ push(vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ pop(vector);
- __ pop(slot);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ add(sp, sp, Operand(2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(receiver != scratch0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ b(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
- __ b(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- // Check that the properties array is a dictionary.
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ cmp(map, tmp);
- __ b(ne, miss_label);
-
- // Restore the temporarily used register.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
-
- NameDictionaryLookupStub::GenerateNegativeLookup(
- masm, miss_label, &done, receiver, properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ b(ne, miss);
-}
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(accessor_holder != scratch_in);
- DCHECK(receiver != scratch_in);
- __ push(accessor_holder);
- __ push(receiver);
- // Write the arguments to stack frame.
- if (is_store) {
- DCHECK(receiver != store_parameter);
- DCHECK(scratch_in != store_parameter);
- __ push(store_parameter);
- }
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = r0;
- Register data = r4;
- Register holder = r2;
- Register api_function_address = r1;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ ldr(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
- } else {
- if (optimization.is_constant_call()) {
- __ ldr(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ ldr(data,
- FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ ldr(data,
- FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
- ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
- __ mov(api_function_address, Operand(ref));
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ mov(this->name(), Operand(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ ldr(scratch1, NativeContextMemOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
- __ cmp(scratch1, scratch2);
-
- if (!compare_native_contexts_only) {
- __ b(eq, &done);
-
- // Compare security tokens of current and expected native contexts.
- __ ldr(scratch1,
- ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ ldr(scratch2,
- ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- __ cmp(scratch1, scratch2);
- }
- __ b(ne, miss);
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
- DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
- scratch2 != scratch1);
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- __ mov(scratch1, Operand(validity_cell));
- __ ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- __ b(ne, miss);
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ b(&success);
- __ bind(miss);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ b(&success);
- GenerateRestoreName(miss, name);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
-
- __ push(receiver()); // receiver
- __ push(holder_reg);
-
- {
- UseScratchRegisterScope temps(masm());
- Register scratch = temps.Acquire();
-
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ mov(scratch, Operand(callback));
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ mov(scratch, Operand(cell));
- }
- __ push(scratch);
- __ mov(scratch, Operand(name));
- __ Push(scratch, value());
- }
- __ Push(Smi::FromInt(language_mode));
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(name);
-}
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
deleted file mode 100644
index 1b58e5c697..0000000000
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/ic/access-compiler.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-// TODO(all): The so-called scratch registers are significant in some cases. For
-// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
-// actually
-// used for KeyedStoreCompiler::transition_map(). We should verify which
-// registers are actually scratch registers, and which are important. For now,
-// we use the same assignments as ARM to remain on the safe side.
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, x3, x0, x4};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, x3, x4};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
deleted file mode 100644
index ee3a5b9245..0000000000
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ Push(slot, vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ Pop(vector, slot);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ Drop(2);
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(!AreAliased(receiver, scratch0, scratch1));
- DCHECK(name->IsUniqueName());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(scratch0, kInterceptorOrAccessCheckNeededMask,
- miss_label);
-
- // Check that receiver is a JSObject.
- __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Cmp(scratch0, FIRST_JS_RECEIVER_TYPE);
- __ B(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ Ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- // Check that the properties array is a dictionary.
- __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
-
- NameDictionaryLookupStub::GenerateNegativeLookup(
- masm, miss_label, &done, receiver, properties, name, scratch1);
- __ Bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
-}
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(!AreAliased(accessor_holder, scratch));
- DCHECK(!AreAliased(receiver, scratch));
-
- MacroAssembler::PushPopQueue queue(masm);
- queue.Queue(accessor_holder);
- queue.Queue(receiver);
- // Write the arguments to the stack frame.
- if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch.is(store_parameter));
- queue.Queue(store_parameter);
- }
- queue.PushQueued();
-
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = x0;
- Register data = x4;
- Register holder = x2;
- Register api_function_address = x1;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Mov(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ Ldr(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
- } else {
- if (optimization.is_constant_call()) {
- __ Ldr(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ Ldr(data,
- FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ Ldr(data,
- FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ Ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref = ExternalReference(
- &fun, ExternalReference::DIRECT_API_CALL, masm->isolate());
- __ Mov(api_function_address, ref);
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- Label miss;
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save context and value registers, so we can restore them later.
- __ Push(cp, value());
-
- if (accessor_index >= 0) {
- DCHECK(!AreAliased(holder, scratch));
- DCHECK(!AreAliased(receiver, scratch));
- DCHECK(!AreAliased(value(), scratch));
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(scratch,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ Push(receiver, value());
- __ LoadAccessor(x1, holder, accessor_index, ACCESSOR_SETTER);
- __ Mov(x0, 1);
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- // Also, restore the context register.
- __ Pop(x0, cp);
- }
- __ Ret();
-}
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ Pop(cp);
- }
- __ Ret();
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ Bind(label);
- __ Mov(this->name(), Operand(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ Ldr(scratch1, NativeContextMemOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
- __ Cmp(scratch1, scratch2);
-
- if (!compare_native_contexts_only) {
- __ B(eq, &done);
-
- // Compare security tokens of current and expected native contexts.
- __ Ldr(scratch1,
- ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ Ldr(scratch2,
- ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- __ Cmp(scratch1, scratch2);
- }
- __ B(ne, miss);
-
- __ Bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // object_reg and holder_reg registers can alias.
- DCHECK(!AreAliased(object_reg, scratch1, scratch2));
- DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- __ Mov(scratch1, Operand(validity_cell));
- __ Ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- // Compare scratch1 against Map::kPrototypeChainValid.
- static_assert(Map::kPrototypeChainValid == 0,
- "Map::kPrototypeChainValid has unexpected value");
- __ Cbnz(scratch1, miss);
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
- name) == NameDictionary::kNotFound));
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ B(&success);
-
- __ Bind(miss);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
-
- __ Bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ B(&success);
-
- GenerateRestoreName(miss, name);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
-
- __ Bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
- Register holder_reg = Frontend(name);
-
- // Stub never generated for non-global objects that require access checks.
- DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
-
- // receiver() and holder_reg can alias.
- DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value()));
- DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ Mov(scratch1(), Operand(callback));
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ Mov(scratch1(), Operand(cell));
- }
- __ Mov(scratch2(), Operand(name));
- {
- UseScratchRegisterScope temps(this->masm());
- Register temp = temps.AcquireX();
- __ Mov(temp, Smi::FromInt(language_mode));
- __ Push(receiver(), holder_reg, scratch1(), scratch2(), value(), temp);
- }
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(name);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index cfe7317884..717c38e356 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -133,8 +133,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
// No checks on rhs are done yet. We just know lhs is not a number or Smi.
Label if_lhsisoddball(this), if_lhsisnotoddball(this);
Node* lhs_instance_type = LoadInstanceType(lhs);
- Node* lhs_is_oddball =
- Word32Equal(lhs_instance_type, Int32Constant(ODDBALL_TYPE));
+ Node* lhs_is_oddball = InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
BIND(&if_lhsisoddball);
@@ -155,10 +154,8 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&lhs_is_bigint);
{
- // Label "bigint" handles BigInt + {anything except string}.
- GotoIf(TaggedIsSmi(rhs), &bigint);
- Branch(IsStringInstanceType(LoadInstanceType(rhs)),
- &call_with_any_feedback, &bigint);
+ GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
+ Branch(IsBigInt(rhs), &bigint, &call_with_any_feedback);
}
BIND(&lhs_is_string);
@@ -186,8 +183,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
// Check if rhs is an oddball. At this point we know lhs is either a
// Smi or number or oddball and rhs is not a number or Smi.
Node* rhs_instance_type = LoadInstanceType(rhs);
- Node* rhs_is_oddball =
- Word32Equal(rhs_instance_type, Int32Constant(ODDBALL_TYPE));
+ Node* rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
Branch(IsBigIntInstanceType(rhs_instance_type), &bigint,
&call_with_any_feedback);
@@ -197,7 +193,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
- SmiConstant(Token::ADD)));
+ SmiConstant(Operation::kAdd)));
Goto(&end);
}
@@ -228,7 +224,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
const SmiOperation& smiOperation, const FloatOperation& floatOperation,
- Token::Value opcode, bool rhs_is_smi) {
+ Operation op, bool rhs_is_smi) {
Label do_float_operation(this), end(this), call_stub(this),
check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
if_lhsisnotnumber(this, Label::kDeferred),
@@ -322,31 +318,39 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_lhsisnotnumber);
{
// No checks on rhs are done yet. We just know lhs is not a number or Smi.
- // Check if lhs is an oddball.
+ Label if_left_bigint(this), if_left_oddball(this);
Node* lhs_instance_type = LoadInstanceType(lhs);
- GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_bigint);
- Node* lhs_is_oddball =
- Word32Equal(lhs_instance_type, Int32Constant(ODDBALL_TYPE));
- GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
+ GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_left_bigint);
+ Node* lhs_is_oddball = InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
+ Branch(lhs_is_oddball, &if_left_oddball, &call_with_any_feedback);
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
+ BIND(&if_left_oddball);
{
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_stub);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ BIND(&if_rhsissmi);
+ {
+ var_type_feedback.Bind(
+ SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ Goto(&call_stub);
+ }
+
+ BIND(&if_rhsisnotsmi);
+ {
+ // Check if {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+
+ var_type_feedback.Bind(
+ SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ Goto(&call_stub);
+ }
}
- BIND(&if_rhsisnotsmi);
+ BIND(&if_left_bigint);
{
- // Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
-
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_stub);
+ GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
+ Branch(IsBigInt(rhs), &if_bigint, &call_with_any_feedback);
}
}
@@ -356,8 +360,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
// Smi or number or oddball and rhs is not a number or Smi.
Node* rhs_instance_type = LoadInstanceType(rhs);
GotoIf(IsBigIntInstanceType(rhs_instance_type), &if_bigint);
- Node* rhs_is_oddball =
- Word32Equal(rhs_instance_type, Int32Constant(ODDBALL_TYPE));
+ Node* rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
var_type_feedback.Bind(
@@ -370,7 +373,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
- SmiConstant(opcode)));
+ SmiConstant(op)));
Goto(&end);
}
@@ -383,17 +386,17 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&call_stub);
{
Node* result;
- switch (opcode) {
- case Token::SUB:
+ switch (op) {
+ case Operation::kSubtract:
result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs);
break;
- case Token::MUL:
+ case Operation::kMultiply:
result = CallBuiltin(Builtins::kMultiply, context, lhs, rhs);
break;
- case Token::DIV:
+ case Operation::kDivide:
result = CallBuiltin(Builtins::kDivide, context, lhs, rhs);
break;
- case Token::MOD:
+ case Operation::kModulus:
result = CallBuiltin(Builtins::kModulus, context, lhs, rhs);
break;
default:
@@ -452,7 +455,7 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
};
return Generate_BinaryOperationWithFeedback(
context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction,
- Token::SUB, rhs_is_smi);
+ Operation::kSubtract, rhs_is_smi);
}
Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
@@ -471,7 +474,7 @@ Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
};
return Generate_BinaryOperationWithFeedback(
context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction,
- Token::MUL, rhs_is_smi);
+ Operation::kMultiply, rhs_is_smi);
}
Node* BinaryOpAssembler::Generate_DivideWithFeedback(
@@ -505,7 +508,7 @@ Node* BinaryOpAssembler::Generate_DivideWithFeedback(
};
return Generate_BinaryOperationWithFeedback(
context, dividend, divisor, slot_id, feedback_vector, smiFunction,
- floatFunction, Token::DIV, rhs_is_smi);
+ floatFunction, Operation::kDivide, rhs_is_smi);
}
Node* BinaryOpAssembler::Generate_ModulusWithFeedback(
@@ -523,7 +526,16 @@ Node* BinaryOpAssembler::Generate_ModulusWithFeedback(
};
return Generate_BinaryOperationWithFeedback(
context, dividend, divisor, slot_id, feedback_vector, smiFunction,
- floatFunction, Token::MOD, rhs_is_smi);
+ floatFunction, Operation::kModulus, rhs_is_smi);
+}
+
+Node* BinaryOpAssembler::Generate_ExponentiateWithFeedback(
+ Node* context, Node* base, Node* exponent, Node* slot_id,
+ Node* feedback_vector, bool rhs_is_smi) {
+ // We currently don't optimize exponentiation based on feedback.
+ Node* dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
+ UpdateFeedback(dummy_feedback, feedback_vector, slot_id);
+ return CallBuiltin(Builtins::kExponentiate, context, base, exponent);
}
} // namespace internal
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index 625dfce79a..d7afd7b655 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -42,6 +42,11 @@ class BinaryOpAssembler : public CodeStubAssembler {
Node* divisor, Node* slot_id,
Node* feedback_vector, bool rhs_is_smi);
+ Node* Generate_ExponentiateWithFeedback(Node* context, Node* dividend,
+ Node* divisor, Node* slot_id,
+ Node* feedback_vector,
+ bool rhs_is_smi);
+
private:
typedef std::function<Node*(Node*, Node*, Variable*)> SmiOperation;
typedef std::function<Node*(Node*, Node*)> FloatOperation;
@@ -49,7 +54,7 @@ class BinaryOpAssembler : public CodeStubAssembler {
Node* Generate_BinaryOperationWithFeedback(
Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
const SmiOperation& smiOperation, const FloatOperation& floatOperation,
- Token::Value opcode, bool rhs_is_smi);
+ Operation op, bool rhs_is_smi);
};
} // namespace internal
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 975f789596..c8705bc6c9 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -20,6 +20,21 @@ CallOptimization::CallOptimization(Handle<Object> function) {
}
}
+Context* CallOptimization::GetAccessorContext(Map* holder_map) const {
+ if (is_constant_call()) {
+ return constant_function_->context()->native_context();
+ }
+ JSFunction* constructor = JSFunction::cast(holder_map->GetConstructor());
+ return constructor->context()->native_context();
+}
+
+bool CallOptimization::IsCrossContextLazyAccessorPair(Context* native_context,
+ Map* holder_map) const {
+ DCHECK(native_context->IsNativeContext());
+ if (is_constant_call()) return false;
+ return native_context != GetAccessorContext(holder_map);
+}
+
Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
Handle<Map> object_map, HolderLookup* holder_lookup) const {
DCHECK(is_simple_api_call());
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index 8ca8cde112..ee421355e6 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -6,7 +6,6 @@
#define V8_IC_CALL_OPTIMIZATION_H_
#include "src/code-stubs.h"
-#include "src/ic/access-compiler.h"
#include "src/macro-assembler.h"
#include "src/objects.h"
@@ -17,6 +16,10 @@ class CallOptimization BASE_EMBEDDED {
public:
explicit CallOptimization(Handle<Object> function);
+ Context* GetAccessorContext(Map* holder_map) const;
+ bool IsCrossContextLazyAccessorPair(Context* native_context,
+ Map* holder_map) const;
+
bool is_constant_call() const { return !constant_function_.is_null(); }
Handle<JSFunction> constant_function() const {
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
deleted file mode 100644
index 749f2fa963..0000000000
--- a/deps/v8/src/ic/handler-compiler.cc
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/assembler-inl.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/handler-configuration-inl.h"
-#include "src/ic/ic-inl.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-Handle<Code> PropertyHandlerCompiler::GetCode(Handle<Name> name) {
- // Create code object in the heap.
- CodeDesc desc;
- masm()->GetCode(isolate(), &desc);
- Handle<Code> code =
- factory()->NewCode(desc, Code::STUB, masm()->CodeObject());
- DCHECK(code->is_stub());
- code->set_stub_key(CodeStub::NoCacheKey());
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) {
- char* raw_name = !name.is_null() && name->IsString()
- ? String::cast(*name)->ToCString().get()
- : nullptr;
- CodeTracer::Scope trace_scope(isolate()->GetCodeTracer());
- OFStream os(trace_scope.file());
- code->Disassemble(raw_name, os);
- }
-#endif
-
- PROFILE(isolate(), CodeCreateEvent(CodeEventListener::HANDLER_TAG,
- AbstractCode::cast(*code), *name));
-
-#ifdef DEBUG
- code->VerifyEmbeddedObjects();
-#endif
- return code;
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
- Handle<Name> name,
- Label* miss) {
- if (map()->IsPrimitiveMap() || map()->IsJSGlobalProxyMap()) {
- // If the receiver is a global proxy and if we get to this point then
- // the compile-time (current) native context has access to global proxy's
- // native context. Since access rights revocation is not supported at all,
- // we can generate a check that an execution-time native context is either
- // the same as compile-time native context or has the same access token.
- Handle<Context> native_context = isolate()->native_context();
- Handle<WeakCell> weak_cell(native_context->self_weak_cell(), isolate());
-
- bool compare_native_contexts_only = map()->IsPrimitiveMap();
- GenerateAccessCheck(weak_cell, scratch1(), scratch2(), miss,
- compare_native_contexts_only);
- }
-
- // Check that the maps starting from the prototype haven't changed.
- return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
- miss);
-}
-
-
-// Frontend for store uses the name register. It has to be restored before a
-// miss.
-Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
- Handle<Name> name,
- Label* miss) {
- if (map()->IsJSGlobalProxyMap()) {
- Handle<Context> native_context = isolate()->native_context();
- Handle<WeakCell> weak_cell(native_context->self_weak_cell(), isolate());
- GenerateAccessCheck(weak_cell, scratch1(), scratch2(), miss, false);
- }
-
- return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
- miss);
-}
-
-// The ICs that don't pass slot and vector through the stack have to
-// save/restore them in the dispatcher.
-bool PropertyHandlerCompiler::ShouldPushPopSlotAndVector() {
- switch (type()) {
- case LOAD:
- return true;
- case STORE:
- return !StoreWithVectorDescriptor::kPassLastArgsOnStack;
- }
- UNREACHABLE();
- return false;
-}
-
-Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
- Label miss;
- if (ShouldPushPopSlotAndVector()) PushVectorAndSlot();
- Register reg = FrontendHeader(receiver(), name, &miss);
- FrontendFooter(name, &miss);
- // The footer consumes the vector and slot from the stack if miss occurs.
- if (ShouldPushPopSlotAndVector()) DiscardVectorAndSlot();
- return reg;
-}
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
- Handle<Name> name, const CallOptimization& call_optimization,
- int accessor_index, Handle<Code> slow_stub) {
- DCHECK(call_optimization.is_simple_api_call());
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- GenerateTailCall(masm(), slow_stub);
- }
- Register holder = Frontend(name);
- GenerateApiAccessorCall(masm(), call_optimization, map(), receiver(),
- scratch2(), false, no_reg, holder, accessor_index);
- return GetCode(name);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
- Handle<JSObject> object, Handle<Name> name, int accessor_index,
- int expected_arguments) {
- Register holder = Frontend(name);
- GenerateStoreViaSetter(masm(), map(), receiver(), holder, accessor_index,
- expected_arguments, scratch2());
-
- return GetCode(name);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization, int accessor_index,
- Handle<Code> slow_stub) {
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- GenerateTailCall(masm(), slow_stub);
- }
- Register holder = Frontend(name);
- if (Descriptor::kPassLastArgsOnStack) {
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
- }
- GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
- receiver(), scratch2(), true, value(), holder,
- accessor_index);
- return GetCode(name);
-}
-
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
deleted file mode 100644
index 2fe5870ef1..0000000000
--- a/deps/v8/src/ic/handler-compiler.h
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_HANDLER_COMPILER_H_
-#define V8_IC_HANDLER_COMPILER_H_
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-class CallOptimization;
-
-class PropertyHandlerCompiler : public PropertyAccessCompiler {
- protected:
- PropertyHandlerCompiler(Isolate* isolate, Type type, Handle<Map> map,
- Handle<JSObject> holder)
- : PropertyAccessCompiler(isolate, type), map_(map), holder_(holder) {}
-
- virtual ~PropertyHandlerCompiler() {}
-
- // The ICs that don't pass slot and vector through the stack have to
- // save/restore them in the dispatcher.
- bool ShouldPushPopSlotAndVector();
-
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss) {
- UNREACHABLE();
- }
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
-
- // Frontend loads from receiver(), returns holder register which may be
- // different.
- Register Frontend(Handle<Name> name);
-
- // When FLAG_vector_ics is true, handlers that have the possibility of missing
- // will need to save and pass these to miss handlers.
- void PushVectorAndSlot() { PushVectorAndSlot(vector(), slot()); }
- void PushVectorAndSlot(Register vector, Register slot);
- void PopVectorAndSlot() { PopVectorAndSlot(vector(), slot()); }
- void PopVectorAndSlot(Register vector, Register slot);
-
- void DiscardVectorAndSlot();
-
- // TODO(verwaest): Make non-static.
- static void GenerateApiAccessorCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver, Register scratch,
- bool is_store, Register store_parameter,
- Register accessor_holder,
- int accessor_index);
-
- // Helper function used to check that the dictionary doesn't contain
- // the property. This function may return false negatives, so miss_label
- // must always call a backup property check that is complete.
- // This function is safe to call if the receiver has fast properties.
- // Name must be unique and receiver must be a heap object.
- static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name, Register r0,
- Register r1);
-
- // Generate code to check that a global property cell is empty. Create
- // the property cell at compilation time if no cell exists for the
- // property.
- static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<JSGlobalObject> global,
- Handle<Name> name, Register scratch,
- Label* miss);
-
- // Generates check that current native context has the same access rights
- // as the given |native_context_cell|.
- // If |compare_native_contexts_only| is true then access check is considered
- // passed if the execution-time native context is equal to contents of
- // |native_context_cell|.
- // If |compare_native_contexts_only| is false then access check is considered
- // passed if the execution-time native context is equal to contents of
- // |native_context_cell| or security tokens of both contexts are equal.
- void GenerateAccessCheck(Handle<WeakCell> native_context_cell,
- Register scratch1, Register scratch2, Label* miss,
- bool compare_native_contexts_only);
-
- // Generates code that verifies that the property holder has not changed
- // (checking maps of objects in the prototype chain for fast and global
- // objects or doing negative lookup for slow objects, ensures that the
- // property cells for global objects are still empty) and checks that the map
- // of the holder has not changed. If necessary the function also generates
- // code for security check in case of global object holders. Helps to make
- // sure that the current IC is still valid.
- //
- // The scratch and holder registers are always clobbered, but the object
- // register is only clobbered if it the same as the holder register. The
- // function returns a register containing the holder - either object_reg or
- // holder_reg.
- Register CheckPrototypes(Register object_reg, Register holder_reg,
- Register scratch1, Register scratch2,
- Handle<Name> name, Label* miss);
-
- Handle<Code> GetCode(Handle<Name> name);
- Handle<Map> map() const { return map_; }
- Handle<JSObject> holder() const { return holder_; }
-
- private:
- Handle<Map> map_;
- Handle<JSObject> holder_;
-};
-
-
-class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
- public:
- NamedLoadHandlerCompiler(Isolate* isolate, Handle<Map> map,
- Handle<JSObject> holder)
- : PropertyHandlerCompiler(isolate, LOAD, map, holder) {}
-
- virtual ~NamedLoadHandlerCompiler() {}
-
- Handle<Code> CompileLoadCallback(Handle<Name> name,
- const CallOptimization& call_optimization,
- int accessor_index, Handle<Code> slow_stub);
-
- static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm);
-
- protected:
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss);
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss);
-
- private:
- Register scratch3() { return registers_[4]; }
-};
-
-
-class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
- public:
- // All store handlers use StoreWithVectorDescriptor calling convention.
- typedef StoreWithVectorDescriptor Descriptor;
-
- explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<Map> map,
- Handle<JSObject> holder)
- : PropertyHandlerCompiler(isolate, STORE, map, holder) {
-#ifdef DEBUG
- if (Descriptor::kPassLastArgsOnStack) {
- ZapStackArgumentsRegisterAliases();
- }
-#endif
- }
-
- virtual ~NamedStoreHandlerCompiler() {}
-
- void ZapStackArgumentsRegisterAliases();
-
- Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
- Handle<AccessorInfo> callback,
- LanguageMode language_mode);
- Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization,
- int accessor_index, Handle<Code> slow_stub);
- Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
- int accessor_index,
- int expected_arguments);
-
- static void GenerateStoreViaSetter(MacroAssembler* masm, Handle<Map> map,
- Register receiver, Register holder,
- int accessor_index, int expected_arguments,
- Register scratch);
-
- static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
- GenerateStoreViaSetter(masm, Handle<Map>::null(), no_reg, no_reg, -1, -1,
- no_reg);
- }
-
- protected:
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss);
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss);
- void GenerateRestoreName(Label* label, Handle<Name> name);
-
- private:
- static Register value();
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_IC_HANDLER_COMPILER_H_
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index dc1d595723..5c8e0511cf 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -37,19 +37,17 @@ Handle<Smi> LoadHandler::LoadField(Isolate* isolate, FieldIndex field_index) {
int config = KindBits::encode(kField) |
IsInobjectBits::encode(field_index.is_inobject()) |
IsDoubleBits::encode(field_index.is_double()) |
- FieldOffsetBits::encode(field_index.offset());
+ FieldIndexBits::encode(field_index.index());
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadConstant(Isolate* isolate, int descriptor) {
- int config = KindBits::encode(kConstant) | IsAccessorInfoBits::encode(false) |
- DescriptorBits::encode(descriptor);
+ int config = KindBits::encode(kConstant) | DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadAccessor(Isolate* isolate, int descriptor) {
- int config = KindBits::encode(kAccessor) | IsAccessorInfoBits::encode(false) |
- DescriptorBits::encode(descriptor);
+ int config = KindBits::encode(kAccessor) | DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
@@ -58,12 +56,20 @@ Handle<Smi> LoadHandler::LoadProxy(Isolate* isolate) {
return handle(Smi::FromInt(config), isolate);
}
-Handle<Smi> LoadHandler::LoadApiGetter(Isolate* isolate, int descriptor) {
- int config = KindBits::encode(kConstant) | IsAccessorInfoBits::encode(true) |
+Handle<Smi> LoadHandler::LoadNativeDataProperty(Isolate* isolate,
+ int descriptor) {
+ int config = KindBits::encode(kNativeDataProperty) |
DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
+Handle<Smi> LoadHandler::LoadApiGetter(Isolate* isolate,
+ bool holder_is_receiver) {
+ int config = KindBits::encode(
+ holder_is_receiver ? kApiGetter : kApiGetterHolderIsPrototype);
+ return handle(Smi::FromInt(config), isolate);
+}
+
Handle<Smi> LoadHandler::LoadModuleExport(Isolate* isolate, int index) {
int config =
KindBits::encode(kModuleExport) | ExportsIndexBits::encode(index);
@@ -100,21 +106,32 @@ Handle<Smi> LoadHandler::LoadNonExistent(Isolate* isolate) {
Handle<Smi> LoadHandler::LoadElement(Isolate* isolate,
ElementsKind elements_kind,
bool convert_hole_to_undefined,
- bool is_js_array) {
- int config = KindBits::encode(kElement) |
- ElementsKindBits::encode(elements_kind) |
- ConvertHoleBits::encode(convert_hole_to_undefined) |
- IsJsArrayBits::encode(is_js_array);
+ bool is_js_array,
+ KeyedAccessLoadMode load_mode) {
+ int config =
+ KindBits::encode(kElement) |
+ AllowOutOfBoundsBits::encode(load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) |
+ ElementsKindBits::encode(elements_kind) |
+ ConvertHoleBits::encode(convert_hole_to_undefined) |
+ IsJsArrayBits::encode(is_js_array);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Smi> LoadHandler::LoadIndexedString(Isolate* isolate,
+ KeyedAccessLoadMode load_mode) {
+ int config =
+ KindBits::encode(kIndexedString) |
+ AllowOutOfBoundsBits::encode(load_mode == LOAD_IGNORE_OUT_OF_BOUNDS);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
- int config = KindBits::encode(kStoreGlobalProxy);
+ int config = KindBits::encode(kGlobalProxy);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreNormal(Isolate* isolate) {
- int config = KindBits::encode(kStoreNormal);
+ int config = KindBits::encode(kNormal);
return handle(Smi::FromInt(config), isolate);
}
@@ -134,35 +151,35 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation,
bool extend_storage) {
- StoreHandler::FieldRepresentation field_rep;
+ FieldRepresentation field_rep;
switch (representation.kind()) {
case Representation::kSmi:
- field_rep = StoreHandler::kSmi;
+ field_rep = kSmi;
break;
case Representation::kDouble:
- field_rep = StoreHandler::kDouble;
+ field_rep = kDouble;
break;
case Representation::kHeapObject:
- field_rep = StoreHandler::kHeapObject;
+ field_rep = kHeapObject;
break;
case Representation::kTagged:
- field_rep = StoreHandler::kTagged;
+ field_rep = kTagged;
break;
default:
UNREACHABLE();
}
- DCHECK(kind == kStoreField || kind == kTransitionToField ||
- (kind == kStoreConstField && FLAG_track_constant_fields));
+ DCHECK(kind == kField || kind == kTransitionToField ||
+ (kind == kConstField && FLAG_track_constant_fields));
DCHECK_IMPLIES(extend_storage, kind == kTransitionToField);
DCHECK_IMPLIES(field_index.is_inobject(), !extend_storage);
- int config = StoreHandler::KindBits::encode(kind) |
- StoreHandler::ExtendStorageBits::encode(extend_storage) |
- StoreHandler::IsInobjectBits::encode(field_index.is_inobject()) |
- StoreHandler::FieldRepresentationBits::encode(field_rep) |
- StoreHandler::DescriptorBits::encode(descriptor) |
- StoreHandler::FieldOffsetBits::encode(field_index.offset());
+ int config = KindBits::encode(kind) |
+ ExtendStorageBits::encode(extend_storage) |
+ IsInobjectBits::encode(field_index.is_inobject()) |
+ FieldRepresentationBits::encode(field_rep) |
+ DescriptorBits::encode(descriptor) |
+ FieldIndexBits::encode(field_index.index());
return handle(Smi::FromInt(config), isolate);
}
@@ -171,7 +188,7 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, int descriptor,
PropertyConstness constness,
Representation representation) {
DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
- Kind kind = constness == kMutable ? kStoreField : kStoreConstField;
+ Kind kind = constness == kMutable ? kField : kConstField;
return StoreField(isolate, kind, descriptor, field_index, representation,
false);
}
@@ -187,31 +204,48 @@ Handle<Smi> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
Handle<Smi> StoreHandler::TransitionToConstant(Isolate* isolate,
int descriptor) {
DCHECK(!FLAG_track_constant_fields);
- int config =
- StoreHandler::KindBits::encode(StoreHandler::kTransitionToConstant) |
- StoreHandler::DescriptorBits::encode(descriptor);
+ int config = KindBits::encode(kTransitionToConstant) |
+ DescriptorBits::encode(descriptor);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Smi> StoreHandler::StoreNativeDataProperty(Isolate* isolate,
+ int descriptor) {
+ int config = KindBits::encode(kNativeDataProperty) |
+ DescriptorBits::encode(descriptor);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Smi> StoreHandler::StoreAccessor(Isolate* isolate, int descriptor) {
+ int config = KindBits::encode(kAccessor) | DescriptorBits::encode(descriptor);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Smi> StoreHandler::StoreApiSetter(Isolate* isolate,
+ bool holder_is_receiver) {
+ int config = KindBits::encode(
+ holder_is_receiver ? kApiSetter : kApiSetterHolderIsPrototype);
return handle(Smi::FromInt(config), isolate);
}
// static
WeakCell* StoreHandler::GetTransitionCell(Object* handler) {
if (handler->IsTuple3()) {
- STATIC_ASSERT(kTransitionOrHolderCellOffset == Tuple3::kValue1Offset);
+ STATIC_ASSERT(kDataOffset == Tuple3::kValue1Offset);
WeakCell* cell = WeakCell::cast(Tuple3::cast(handler)->value1());
DCHECK(!cell->cleared());
return cell;
}
- DCHECK(handler->IsFixedArray());
- WeakCell* cell = WeakCell::cast(
- FixedArray::cast(handler)->get(kTransitionMapOrHolderCellIndex));
+ DCHECK(handler->IsFixedArrayExact());
+ WeakCell* cell = WeakCell::cast(FixedArray::cast(handler)->get(kDataIndex));
DCHECK(!cell->cleared());
return cell;
}
// static
bool StoreHandler::IsHandler(Object* maybe_handler) {
- return maybe_handler->IsFixedArray() || maybe_handler->IsTuple3();
+ return maybe_handler->IsFixedArrayExact() || maybe_handler->IsTuple3();
}
} // namespace internal
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index b294c864a9..077bdb49e1 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -99,25 +99,6 @@ int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
Handle<FixedArray>(), 0);
}
-enum class HolderCellRequest {
- kGlobalPropertyCell,
- kHolder,
-};
-
-Handle<WeakCell> HolderCell(Isolate* isolate, Handle<JSReceiver> holder,
- Handle<Name> name, HolderCellRequest request) {
- if (request == HolderCellRequest::kGlobalPropertyCell) {
- DCHECK(holder->IsJSGlobalObject());
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(holder);
- GlobalDictionary* dict = global->global_dictionary();
- int number = dict->FindEntry(name);
- DCHECK_NE(NameDictionary::kNotFound, number);
- Handle<PropertyCell> cell(dict->CellAt(number), isolate);
- return isolate->factory()->NewWeakCell(cell);
- }
- return Map::GetOrCreatePrototypeWeakCell(holder, isolate);
-}
-
} // namespace
// static
@@ -125,7 +106,8 @@ Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
Handle<Map> receiver_map,
Handle<JSReceiver> holder,
Handle<Name> name,
- Handle<Smi> smi_handler) {
+ Handle<Smi> smi_handler,
+ MaybeHandle<Object> maybe_data) {
int checks_count =
GetPrototypeCheckCount(isolate, receiver_map, holder, name);
DCHECK_LE(0, checks_count);
@@ -144,23 +126,20 @@ Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
DCHECK(!validity_cell.is_null());
- // LoadIC dispatcher expects PropertyCell as a "holder" in case of kGlobal
- // handler kind.
- HolderCellRequest request = GetHandlerKind(*smi_handler) == kGlobal
- ? HolderCellRequest::kGlobalPropertyCell
- : HolderCellRequest::kHolder;
-
- Handle<WeakCell> holder_cell = HolderCell(isolate, holder, name, request);
+ Handle<Object> data;
+ if (!maybe_data.ToHandle(&data)) {
+ data = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
+ }
if (checks_count == 0) {
- return isolate->factory()->NewTuple3(holder_cell, smi_handler,
- validity_cell, TENURED);
+ return isolate->factory()->NewTuple3(data, smi_handler, validity_cell,
+ TENURED);
}
Handle<FixedArray> handler_array(isolate->factory()->NewFixedArray(
kFirstPrototypeIndex + checks_count, TENURED));
handler_array->set(kSmiHandlerIndex, *smi_handler);
handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kHolderCellIndex, *holder_cell);
+ handler_array->set(kDataIndex, *data);
InitPrototypeChecks(isolate, receiver_map, holder, name, handler_array,
kFirstPrototypeIndex);
return handler_array;
@@ -203,13 +182,27 @@ Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
handler_array->set(kSmiHandlerIndex, *smi_handler);
handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kHolderCellIndex, *holder);
+ handler_array->set(kDataIndex, *holder);
InitPrototypeChecks(isolate, receiver_map, end, name, handler_array,
kFirstPrototypeIndex);
return handler_array;
}
// static
+KeyedAccessLoadMode LoadHandler::GetKeyedAccessLoadMode(Object* handler) {
+ DisallowHeapAllocation no_gc;
+ if (handler->IsSmi()) {
+ int const raw_handler = Smi::cast(handler)->value();
+ Kind const kind = KindBits::decode(raw_handler);
+ if ((kind == kElement || kind == kIndexedString) &&
+ AllowOutOfBoundsBits::decode(raw_handler)) {
+ return LOAD_IGNORE_OUT_OF_BOUNDS;
+ }
+ }
+ return STANDARD_LOAD;
+}
+
+// static
Handle<Object> StoreHandler::StoreElementTransition(
Isolate* isolate, Handle<Map> receiver_map, Handle<Map> transition,
KeyedAccessStoreMode store_mode) {
@@ -228,69 +221,39 @@ Handle<Object> StoreHandler::StoreElementTransition(
return isolate->factory()->NewTuple3(cell, stub, validity_cell, TENURED);
}
-// static
-Handle<Object> StoreHandler::StoreTransition(Isolate* isolate,
- Handle<Map> receiver_map,
- Handle<JSObject> holder,
- Handle<HeapObject> transition,
- Handle<Name> name) {
- Handle<Smi> smi_handler;
- Handle<WeakCell> transition_cell;
-
- if (transition->IsMap()) {
- Handle<Map> transition_map = Handle<Map>::cast(transition);
- if (transition_map->is_dictionary_map()) {
- smi_handler = StoreNormal(isolate);
- } else {
- int descriptor = transition_map->LastAdded();
- Handle<DescriptorArray> descriptors(
- transition_map->instance_descriptors());
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
- // Declarative handlers don't support access checks.
- DCHECK(!transition_map->is_access_check_needed());
-
- DCHECK_EQ(kData, details.kind());
- if (details.location() == kDescriptor) {
- smi_handler = TransitionToConstant(isolate, descriptor);
+Handle<Smi> StoreHandler::StoreTransition(Isolate* isolate,
+ Handle<Map> transition_map) {
+ int descriptor = transition_map->LastAdded();
+ Handle<DescriptorArray> descriptors(transition_map->instance_descriptors());
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
- } else {
- DCHECK_EQ(kField, details.location());
- bool extend_storage = Map::cast(transition_map->GetBackPointer())
- ->unused_property_fields() == 0;
-
- FieldIndex index =
- FieldIndex::ForDescriptor(*transition_map, descriptor);
- smi_handler = TransitionToField(isolate, descriptor, index,
- representation, extend_storage);
- }
- }
- // |holder| is either a receiver if the property is non-existent or
- // one of the prototypes.
- DCHECK(!holder.is_null());
- bool is_nonexistent = holder->map() == transition_map->GetBackPointer();
- if (is_nonexistent) holder = Handle<JSObject>::null();
- transition_cell = Map::WeakCellForMap(transition_map);
+ // Declarative handlers don't support access checks.
+ DCHECK(!transition_map->is_access_check_needed());
- } else {
- DCHECK(transition->IsPropertyCell());
- if (receiver_map->IsJSGlobalObjectMap()) {
- // TODO(ishell): this must be handled by StoreGlobalIC once it's finished.
- return StoreGlobal(isolate, Handle<PropertyCell>::cast(transition));
- } else {
- DCHECK(receiver_map->IsJSGlobalProxyMap());
- smi_handler = StoreGlobalProxy(isolate);
- transition_cell = isolate->factory()->NewWeakCell(transition);
- }
+ DCHECK_EQ(kData, details.kind());
+ if (details.location() == PropertyLocation::kDescriptor) {
+ return TransitionToConstant(isolate, descriptor);
}
+ DCHECK_EQ(PropertyLocation::kField, details.location());
+ bool extend_storage =
+ Map::cast(transition_map->GetBackPointer())->UnusedPropertyFields() == 0;
+ FieldIndex index = FieldIndex::ForDescriptor(*transition_map, descriptor);
+ return TransitionToField(isolate, descriptor, index, representation,
+ extend_storage);
+}
+
+// static
+Handle<Object> StoreHandler::StoreThroughPrototype(
+ Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
+ Handle<Name> name, Handle<Smi> smi_handler,
+ MaybeHandle<Object> maybe_data) {
int checks_count =
GetPrototypeCheckCount(isolate, receiver_map, holder, name);
DCHECK_LE(0, checks_count);
- DCHECK(!receiver_map->IsJSGlobalObjectMap());
if (receiver_map->is_access_check_needed()) {
DCHECK(!receiver_map->is_dictionary_map());
@@ -305,16 +268,20 @@ Handle<Object> StoreHandler::StoreTransition(Isolate* isolate,
validity_cell = handle(Smi::kZero, isolate);
}
+ Handle<Object> data;
+ if (!maybe_data.ToHandle(&data)) {
+ data = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
+ }
+
Factory* factory = isolate->factory();
if (checks_count == 0) {
- return factory->NewTuple3(transition_cell, smi_handler, validity_cell,
- TENURED);
+ return factory->NewTuple3(data, smi_handler, validity_cell, TENURED);
}
Handle<FixedArray> handler_array(
factory->NewFixedArray(kFirstPrototypeIndex + checks_count, TENURED));
handler_array->set(kSmiHandlerIndex, *smi_handler);
handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kTransitionMapOrHolderCellIndex, *transition_cell);
+ handler_array->set(kDataIndex, *data);
InitPrototypeChecks(isolate, receiver_map, holder, name, handler_array,
kFirstPrototypeIndex);
return handler_array;
@@ -333,40 +300,10 @@ Handle<Object> StoreHandler::StoreProxy(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<Name> name) {
Handle<Smi> smi_handler = StoreProxy(isolate);
-
if (receiver.is_identical_to(proxy)) return smi_handler;
-
- int checks_count = GetPrototypeCheckCount(isolate, receiver_map, proxy, name);
-
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
- }
-
- Handle<Object> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
- if (validity_cell.is_null()) {
- DCHECK_EQ(0, checks_count);
- validity_cell = handle(Smi::kZero, isolate);
- }
-
- Factory* factory = isolate->factory();
- Handle<WeakCell> holder_cell = factory->NewWeakCell(proxy);
-
- if (checks_count == 0) {
- return factory->NewTuple3(holder_cell, smi_handler, validity_cell, TENURED);
- }
- Handle<FixedArray> handler_array(
- factory->NewFixedArray(kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(kSmiHandlerIndex, *smi_handler);
- handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kTransitionMapOrHolderCellIndex, *holder_cell);
- InitPrototypeChecks(isolate, receiver_map, proxy, name, handler_array,
- kFirstPrototypeIndex);
- return handler_array;
+ Handle<WeakCell> holder_cell = isolate->factory()->NewWeakCell(proxy);
+ return StoreThroughPrototype(isolate, receiver_map, proxy, name, smi_handler,
+ holder_cell);
}
Object* StoreHandler::ValidHandlerOrNull(Object* raw_handler, Name* name,
@@ -388,7 +325,7 @@ Object* StoreHandler::ValidHandlerOrNull(Object* raw_handler, Name* name,
}
} else {
- DCHECK(raw_handler->IsFixedArray());
+ DCHECK(raw_handler->IsFixedArrayExact());
FixedArray* handler = FixedArray::cast(raw_handler);
// Check validity cell.
@@ -415,9 +352,8 @@ Object* StoreHandler::ValidHandlerOrNull(Object* raw_handler, Name* name,
int number = dict->FindEntry(isolate, name_handle);
if (number != NameDictionary::kNotFound) {
PropertyDetails details = dict->DetailsAt(number);
- if (details.IsReadOnly() || details.kind() == kAccessor) {
- return nullptr;
- }
+ if (details.IsReadOnly()) return nullptr;
+ if (details.kind() == PropertyKind::kAccessor) return nullptr;
break;
}
}
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 87ff45a46a..3d0990e826 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -19,11 +19,15 @@ class LoadHandler {
public:
enum Kind {
kElement,
+ kIndexedString,
kNormal,
kGlobal,
kField,
kConstant,
kAccessor,
+ kNativeDataProperty,
+ kApiGetter,
+ kApiGetterHolderIsPrototype,
kInterceptor,
kProxy,
kNonExistent,
@@ -47,10 +51,8 @@ class LoadHandler {
// Encoding when KindBits contains kForConstants.
//
- class IsAccessorInfoBits
- : public BitField<bool, LookupOnReceiverBits::kNext, 1> {};
// Index of a value entry in the descriptor array.
- class DescriptorBits : public BitField<unsigned, IsAccessorInfoBits::kNext,
+ class DescriptorBits : public BitField<unsigned, LookupOnReceiverBits::kNext,
kDescriptorIndexBitCount> {};
// Make sure we don't overflow the smi.
STATIC_ASSERT(DescriptorBits::kNext <= kSmiValueSize);
@@ -62,16 +64,21 @@ class LoadHandler {
};
class IsDoubleBits : public BitField<bool, IsInobjectBits::kNext, 1> {};
// +1 here is to cover all possible JSObject header sizes.
- class FieldOffsetBits
- : public BitField<unsigned, IsDoubleBits::kNext,
- kDescriptorIndexBitCount + 1 + kPointerSizeLog2> {};
+ class FieldIndexBits : public BitField<unsigned, IsDoubleBits::kNext,
+ kDescriptorIndexBitCount + 1> {};
// Make sure we don't overflow the smi.
- STATIC_ASSERT(FieldOffsetBits::kNext <= kSmiValueSize);
+ STATIC_ASSERT(FieldIndexBits::kNext <= kSmiValueSize);
+
+ //
+ // Encoding when KindBits contains kElement or kIndexedString.
+ //
+ class AllowOutOfBoundsBits : public BitField<bool, KindBits::kNext, 1> {};
//
// Encoding when KindBits contains kElement.
//
- class IsJsArrayBits : public BitField<bool, KindBits::kNext, 1> {};
+ class IsJsArrayBits : public BitField<bool, AllowOutOfBoundsBits::kNext, 1> {
+ };
class ConvertHoleBits : public BitField<bool, IsJsArrayBits::kNext, 1> {};
class ElementsKindBits
: public BitField<ElementsKind, ConvertHoleBits::kNext, 8> {};
@@ -87,7 +94,7 @@ class LoadHandler {
// The layout of an Tuple3 handler representing a load of a field from
// prototype when prototype chain checks do not include non-existing lookups
// or access checks.
- static const int kHolderCellOffset = Tuple3::kValue1Offset;
+ static const int kDataOffset = Tuple3::kValue1Offset;
static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
static const int kValidityCellOffset = Tuple3::kValue3Offset;
@@ -96,7 +103,7 @@ class LoadHandler {
// access checks.
static const int kSmiHandlerIndex = 0;
static const int kValidityCellIndex = 1;
- static const int kHolderCellIndex = 2;
+ static const int kDataIndex = 2;
static const int kFirstPrototypeIndex = 3;
// Decodes kind from Smi-handler.
@@ -124,8 +131,13 @@ class LoadHandler {
// Creates a Smi-handler for calling a getter on a proxy.
static inline Handle<Smi> LoadProxy(Isolate* isolate);
- // Creates a Smi-handler for loading an Api getter property from fast object.
- static inline Handle<Smi> LoadApiGetter(Isolate* isolate, int descriptor);
+ // Creates a Smi-handler for loading a native data property from fast object.
+ static inline Handle<Smi> LoadNativeDataProperty(Isolate* isolate,
+ int descriptor);
+
+ // Creates a Smi-handler for calling a native getter on a fast object.
+ static inline Handle<Smi> LoadApiGetter(Isolate* isolate,
+ bool holder_is_receiver);
// Creates a Smi-handler for loading a Module export.
// |index| is the index to the "value" slot in the Module's "exports"
@@ -143,11 +155,10 @@ class LoadHandler {
// Creates a data handler that represents a prototype chain check followed
// by given Smi-handler that encoded a load from the holder.
// Can be used only if GetPrototypeCheckCount() returns non negative value.
- static Handle<Object> LoadFromPrototype(Isolate* isolate,
- Handle<Map> receiver_map,
- Handle<JSReceiver> holder,
- Handle<Name> name,
- Handle<Smi> smi_handler);
+ static Handle<Object> LoadFromPrototype(
+ Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
+ Handle<Name> name, Handle<Smi> smi_handler,
+ MaybeHandle<Object> maybe_data = MaybeHandle<Object>());
// Creates a Smi-handler for loading a non-existent property. Works only as
// a part of prototype chain check.
@@ -157,7 +168,15 @@ class LoadHandler {
static inline Handle<Smi> LoadElement(Isolate* isolate,
ElementsKind elements_kind,
bool convert_hole_to_undefined,
- bool is_js_array);
+ bool is_js_array,
+ KeyedAccessLoadMode load_mode);
+
+ // Creates a Smi-handler for loading from a String.
+ static inline Handle<Smi> LoadIndexedString(Isolate* isolate,
+ KeyedAccessLoadMode load_mode);
+
+ // Decodes the KeyedAccessLoadMode from a {handler}.
+ static KeyedAccessLoadMode GetKeyedAccessLoadMode(Object* handler);
private:
// Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
@@ -175,30 +194,34 @@ class LoadHandler {
class StoreHandler {
public:
enum Kind {
- kStoreElement,
- kStoreField,
- kStoreConstField,
+ kElement,
+ kField,
+ kConstField,
// TODO(ishell): remove once constant field tracking is done.
- kTransitionToConstant = kStoreConstField,
+ kTransitionToConstant = kConstField,
kTransitionToField,
- kStoreGlobalProxy,
- kStoreNormal,
+ kAccessor,
+ kNativeDataProperty,
+ kApiSetter,
+ kApiSetterHolderIsPrototype,
+ kGlobalProxy,
+ kNormal,
kProxy,
kKindsNumber // Keep last
};
- class KindBits : public BitField<Kind, 0, 3> {};
+ class KindBits : public BitField<Kind, 0, 4> {};
enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
static inline bool IsHandler(Object* maybe_handler);
- // Applicable to kStoreGlobalProxy, kProxy kinds.
+ // Applicable to kGlobalProxy, kProxy kinds.
// Defines whether access rights check should be done on receiver object.
class DoAccessCheckOnReceiverBits
: public BitField<bool, KindBits::kNext, 1> {};
- // Applicable to kStoreField, kTransitionToField and kTransitionToConstant
+ // Applicable to kField, kTransitionToField and kTransitionToConstant
// kinds.
// Index of a value entry in the descriptor array.
@@ -213,23 +236,23 @@ class StoreHandler {
STATIC_ASSERT(DescriptorBits::kNext <= kSmiValueSize);
//
- // Encoding when KindBits contains kStoreField or kTransitionToField.
+ // Encoding when KindBits contains kField or kTransitionToField.
//
class ExtendStorageBits : public BitField<bool, DescriptorBits::kNext, 1> {};
class IsInobjectBits : public BitField<bool, ExtendStorageBits::kNext, 1> {};
class FieldRepresentationBits
: public BitField<FieldRepresentation, IsInobjectBits::kNext, 2> {};
// +1 here is to cover all possible JSObject header sizes.
- class FieldOffsetBits
+ class FieldIndexBits
: public BitField<unsigned, FieldRepresentationBits::kNext,
- kDescriptorIndexBitCount + 1 + kPointerSizeLog2> {};
+ kDescriptorIndexBitCount + 1> {};
// Make sure we don't overflow the smi.
- STATIC_ASSERT(FieldOffsetBits::kNext <= kSmiValueSize);
+ STATIC_ASSERT(FieldIndexBits::kNext <= kSmiValueSize);
// The layout of an Tuple3 handler representing a transitioning store
// when prototype chain checks do not include non-existing lookups or access
// checks.
- static const int kTransitionOrHolderCellOffset = Tuple3::kValue1Offset;
+ static const int kDataOffset = Tuple3::kValue1Offset;
static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
static const int kValidityCellOffset = Tuple3::kValue3Offset;
@@ -241,7 +264,7 @@ class StoreHandler {
// when prototype chain checks include non-existing lookups and access checks.
static const int kSmiHandlerIndex = 0;
static const int kValidityCellIndex = 1;
- static const int kTransitionMapOrHolderCellIndex = 2;
+ static const int kDataIndex = 2;
static const int kFirstPrototypeIndex = 3;
// Creates a Smi-handler for storing a field to fast object.
@@ -250,11 +273,24 @@ class StoreHandler {
PropertyConstness constness,
Representation representation);
- static Handle<Object> StoreTransition(Isolate* isolate,
- Handle<Map> receiver_map,
- Handle<JSObject> holder,
- Handle<HeapObject> transition,
- Handle<Name> name);
+ static Handle<Smi> StoreTransition(Isolate* isolate,
+ Handle<Map> transition_map);
+
+ // Creates a Smi-handler for storing a native data property on a fast object.
+ static inline Handle<Smi> StoreNativeDataProperty(Isolate* isolate,
+ int descriptor);
+
+ // Creates a Smi-handler for calling a setter on a fast object.
+ static inline Handle<Smi> StoreAccessor(Isolate* isolate, int descriptor);
+
+ // Creates a Smi-handler for calling a native setter on a fast object.
+ static inline Handle<Smi> StoreApiSetter(Isolate* isolate,
+ bool holder_is_receiver);
+
+ static Handle<Object> StoreThroughPrototype(
+ Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
+ Handle<Name> name, Handle<Smi> smi_handler,
+ MaybeHandle<Object> data = MaybeHandle<Object>());
static Handle<Object> StoreElementTransition(Isolate* isolate,
Handle<Map> receiver_map,
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
deleted file mode 100644
index 411c744659..0000000000
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, ebx, eax, edi};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, ebx, edi};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
deleted file mode 100644
index 9e9a9c58a7..0000000000
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ /dev/null
@@ -1,449 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
-
- // Restore context register.
- __ pop(esi);
- }
- __ ret(0);
-}
-
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ push(slot);
- __ push(vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ pop(vector);
- __ pop(slot);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ add(esp, Immediate(2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(receiver != scratch0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOrHashOffset));
-
- // Check that the properties array is a dictionary.
- __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->hash_table_map()));
- __ j(not_equal, miss_label);
-
- Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
- properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-// Generate call to api function.
-// This function uses push() to generate smaller, faster code than
-// the version above. It is an optimization that should will be removed
-// when api call ICs are generated in hydrogen.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(accessor_holder != scratch);
- // Copy return value.
- __ pop(scratch);
-
- if (is_store) {
- // Discard stack arguments.
- __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
- kPointerSize));
- }
- // Write the receiver and arguments to stack frame.
- __ push(accessor_holder);
- __ push(receiver);
- if (is_store) {
- DCHECK(!AreAliased(receiver, scratch, store_parameter));
- __ push(store_parameter);
- }
- __ push(scratch);
- // Stack now matches JSFunction abi.
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = edi;
- Register data = ebx;
- Register holder = ecx;
- Register api_function_address = edx;
- scratch = no_reg;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ mov(holder, FieldOperand(receiver, HeapObject::kMapOffset));
- __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- __ mov(data, Immediate(isolate->factory()->undefined_value()));
- } else {
- if (optimization.is_constant_call()) {
- __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ mov(data, FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ mov(api_function_address, Immediate(function_address));
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(isolate->factory()->the_hole_value()));
- __ j(not_equal, miss);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[12] : value
- // -- esp[8] : slot
- // -- esp[4] : vector
- // -- esp[0] : return address
- // -----------------------------------
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save context register
- __ push(esi);
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (accessor_index >= 0) {
- DCHECK(holder != scratch);
- DCHECK(receiver != scratch);
- DCHECK(value() != scratch);
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- __ mov(scratch,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ push(receiver);
- __ push(value());
- __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
- __ Set(eax, 1);
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(eax);
- // Restore context register.
- __ pop(esi);
- }
- if (accessor_index >= 0) {
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
- } else {
- // If we generate a global code snippet for deoptimization only, don't try
- // to drop stack arguments for the StoreIC because they are not a part of
- // expression stack and deoptimizer does not reconstruct them.
- __ ret(0);
- }
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ mov(this->name(), Immediate(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ mov(scratch1, NativeContextOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
- __ cmp(scratch1, scratch2);
-
- if (!compare_native_contexts_only) {
- __ j(equal, &done);
-
- // Compare security tokens of current and expected native contexts.
- __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- __ cmp(scratch1, scratch2);
- }
- __ j(not_equal, miss);
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
- DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
- scratch2 != scratch1);
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- __ mov(scratch1, validity_cell);
- __ cmp(FieldOperand(scratch1, Cell::kValueOffset),
- Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
- __ j(not_equal, miss);
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- __ bind(miss);
- if (ShouldPushPopSlotAndVector()) {
- DCHECK_EQ(LOAD, type());
- PopVectorAndSlot();
- }
- TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- GenerateRestoreName(miss, name);
- DCHECK(!ShouldPushPopSlotAndVector());
- TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- // Zap register aliases of the arguments passed on the stack to ensure they
- // are properly loaded by the handler (debug-only).
- STATIC_ASSERT(Descriptor::kPassLastArgsOnStack);
- STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
- __ mov(Descriptor::ValueRegister(), Immediate(kDebugZapValue));
- __ mov(Descriptor::SlotRegister(), Immediate(kDebugZapValue));
- __ mov(Descriptor::VectorRegister(), Immediate(kDebugZapValue));
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-
- __ pop(scratch1()); // remove the return address
- // Discard stack arguments.
- __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
- kPointerSize));
- __ push(receiver());
- __ push(holder_reg);
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ Push(callback);
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ Push(cell);
- }
- __ Push(name);
- __ push(value());
- __ push(Immediate(Smi::FromInt(language_mode)));
- __ push(scratch1()); // restore return address
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(name);
-}
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index d73ea2a759..e705d38679 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -26,7 +26,7 @@ Address IC::constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
return raw_constant_pool();
} else {
- return NULL;
+ return nullptr;
}
}
@@ -35,15 +35,15 @@ Address IC::raw_constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
return *constant_pool_address_;
} else {
- return NULL;
+ return nullptr;
}
}
bool IC::IsHandler(Object* object) {
return (object->IsSmi() && (object != nullptr)) || object->IsTuple2() ||
- object->IsTuple3() || object->IsFixedArray() || object->IsWeakCell() ||
- object->IsCode();
+ object->IsTuple3() || object->IsFixedArrayExact() ||
+ object->IsWeakCell() || object->IsCode();
}
bool IC::AddressIsDeoptimizedCode() const {
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 09920241ee..7e3e6556a1 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -9,13 +9,11 @@
#include "src/api.h"
#include "src/arguments.h"
#include "src/base/bits.h"
-#include "src/codegen.h"
#include "src/conversions.h"
#include "src/execution.h"
#include "src/field-type.h"
#include "src/frames-inl.h"
#include "src/ic/call-optimization.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
#include "src/ic/ic-stats.h"
@@ -52,8 +50,14 @@ char IC::TransitionMarkFromState(IC::State state) {
UNREACHABLE();
}
+namespace {
-const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
+const char* GetModifier(KeyedAccessLoadMode mode) {
+ if (mode == LOAD_IGNORE_OUT_OF_BOUNDS) return ".IGNORE_OOB";
+ return "";
+}
+
+const char* GetModifier(KeyedAccessStoreMode mode) {
if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW";
if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
return ".IGNORE_OOB";
@@ -62,6 +66,8 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
return "";
}
+} // namespace
+
#define TRACE_GENERIC_IC(reason) set_slow_stub_reason(reason);
void IC::TraceIC(const char* type, Handle<Object> name) {
@@ -72,36 +78,6 @@ void IC::TraceIC(const char* type, Handle<Object> name) {
}
}
-Address IC::GetAbstractPC(int* line, int* column) const {
- JavaScriptFrameIterator it(isolate());
-
- JavaScriptFrame* frame = it.frame();
- DCHECK(!frame->is_builtin());
- int position = frame->position();
-
- Object* maybe_script = frame->function()->shared()->script();
- if (maybe_script->IsScript()) {
- Handle<Script> script(Script::cast(maybe_script), isolate());
- Script::PositionInfo info;
- Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
- *line = info.line + 1;
- *column = info.column + 1;
- } else {
- *line = position;
- *column = -1;
- }
-
- if (frame->is_interpreted()) {
- InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
- Address bytecode_start =
- reinterpret_cast<Address>(iframe->GetBytecodeArray()) - kHeapObjectTag +
- BytecodeArray::kHeaderSize;
- return bytecode_start + iframe->GetBytecodeOffset();
- }
-
- return frame->pc();
-}
-
void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state) {
if (V8_LIKELY(!FLAG_ic_stats)) return;
@@ -112,18 +88,19 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
}
const char* modifier = "";
- if (IsKeyedStoreIC()) {
+ if (IsKeyedLoadIC()) {
+ KeyedAccessLoadMode mode =
+ casted_nexus<KeyedLoadICNexus>()->GetKeyedAccessLoadMode();
+ modifier = GetModifier(mode);
+ } else if (IsKeyedStoreIC()) {
KeyedAccessStoreMode mode =
casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
- modifier = GetTransitionMarkModifier(mode);
+ modifier = GetModifier(mode);
}
if (!(FLAG_ic_stats &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- int line;
- int column;
- Address pc = GetAbstractPC(&line, &column);
- LOG(isolate(), ICEvent(type, is_keyed(), pc, line, column, map, *name,
+ LOG(isolate(), ICEvent(type, is_keyed(), map, *name,
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state), modifier,
slow_stub_reason_));
@@ -181,7 +158,7 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
- Address* constant_pool = NULL;
+ Address* constant_pool = nullptr;
if (FLAG_enable_embedded_constant_pool) {
constant_pool = reinterpret_cast<Address*>(
entry + ExitFrameConstants::kConstantPoolOffset);
@@ -288,7 +265,7 @@ bool IC::ShouldRecomputeHandler(Handle<String> name) {
if (maybe_handler_.is_null()) {
if (!receiver_map()->IsJSObjectMap()) return false;
Map* first_map = FirstTargetMap();
- if (first_map == NULL) return false;
+ if (first_map == nullptr) return false;
Handle<Map> old_map(first_map);
if (old_map->is_deprecated()) return true;
return IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
@@ -341,18 +318,43 @@ MaybeHandle<Object> IC::ReferenceError(Handle<Name> name) {
// static
void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector* vector,
- JSFunction* host_function) {
+ FeedbackSlot slot, JSFunction* host_function,
+ const char* reason) {
if (FLAG_trace_opt_verbose) {
// TODO(leszeks): The host function is only needed for this print, we could
// remove it as a parameter if we're of with removing this trace (or only
// tracing the feedback vector, not the function name).
if (vector->profiler_ticks() != 0) {
PrintF("[resetting ticks for ");
- host_function->PrintName();
- PrintF(" due from %d due to IC change]\n", vector->profiler_ticks());
+ host_function->ShortPrint();
+ PrintF(" due from %d due to IC change: %s]\n", vector->profiler_ticks(),
+ reason);
}
}
vector->set_profiler_ticks(0);
+
+#ifdef V8_TRACE_FEEDBACK_UPDATES
+ if (FLAG_trace_feedback_updates) {
+ int slot_count = vector->metadata()->slot_count();
+
+ OFStream os(stdout);
+ if (slot.IsInvalid()) {
+ os << "[Feedback slots in ";
+ } else {
+ os << "[Feedback slot " << slot.ToInt() << "/" << slot_count << " in ";
+ }
+ vector->shared_function_info()->ShortPrint(os);
+ if (slot.IsInvalid()) {
+ os << " updated - ";
+ } else {
+ os << " updated to ";
+ vector->FeedbackSlotPrint(os, slot);
+ os << " - ";
+ }
+ os << reason << "]" << std::endl;
+ }
+#endif
+
isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -367,18 +369,25 @@ static bool MigrateDeprecated(Handle<Object> object) {
return true;
}
-void IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
+bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
+ bool changed = true;
if (new_state == PREMONOMORPHIC) {
nexus()->ConfigurePremonomorphic();
} else if (new_state == MEGAMORPHIC) {
DCHECK_IMPLIES(!is_keyed(), key->IsName());
- nexus()->ConfigureMegamorphic(key->IsName() ? PROPERTY : ELEMENT);
+ // Even though we don't change the feedback data, we still want to reset the
+ // profiler ticks. Real-world observations suggest that optimizing these
+ // functions doesn't improve performance.
+ changed = nexus()->ConfigureMegamorphic(key->IsName() ? PROPERTY : ELEMENT);
} else {
UNREACHABLE();
}
vector_set_ = true;
- OnFeedbackChanged(isolate(), *vector(), GetHostFunction());
+ OnFeedbackChanged(
+ isolate(), *vector(), slot(), GetHostFunction(),
+ new_state == PREMONOMORPHIC ? "Premonomorphic" : "Megamorphic");
+ return changed;
}
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
@@ -393,7 +402,8 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
}
vector_set_ = true;
- OnFeedbackChanged(isolate(), *vector(), GetHostFunction());
+ OnFeedbackChanged(isolate(), *vector(), slot(), GetHostFunction(),
+ IsLoadGlobalIC() ? "LoadGlobal" : "Monomorphic");
}
void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
@@ -404,14 +414,15 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
nexus()->ConfigurePolymorphic(name, maps, handlers);
vector_set_ = true;
- OnFeedbackChanged(isolate(), *vector(), GetHostFunction());
+ OnFeedbackChanged(isolate(), *vector(), slot(), GetHostFunction(),
+ "Polymorphic");
}
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsNullOrUndefined(isolate())) {
- if (FLAG_use_ic && state() != UNINITIALIZED && state() != PREMONOMORPHIC) {
+ if (FLAG_use_ic && state() != PREMONOMORPHIC) {
// Ensure the IC state progresses.
TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver);
update_receiver_map(object);
@@ -635,11 +646,6 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
}
}
-Handle<Smi> LoadIC::SimpleFieldLoad(Isolate* isolate, FieldIndex index) {
- TRACE_HANDLER_STATS(isolate, LoadIC_LoadFieldDH);
- return LoadHandler::LoadField(isolate, index);
-}
-
void LoadIC::UpdateCaches(LookupIterator* lookup) {
if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
@@ -705,38 +711,25 @@ void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
}
}
-Handle<Object> IC::ComputeHandler(LookupIterator* lookup) {
- // Try to find a globally shared handler stub.
- Handle<Object> shared_handler = GetMapIndependentHandler(lookup);
- if (!shared_handler.is_null()) {
- DCHECK(IC::IsHandler(*shared_handler));
- return shared_handler;
- }
-
- return CompileHandler(lookup);
-}
-
-Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
+Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Object> receiver = lookup->GetReceiver();
if (receiver->IsString() &&
*lookup->name() == isolate()->heap()->length_string()) {
- FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
- return SimpleFieldLoad(isolate(), index);
+ TRACE_HANDLER_STATS(isolate(), LoadIC_StringLength);
+ return BUILTIN_CODE(isolate(), LoadIC_StringLength);
}
if (receiver->IsStringWrapper() &&
*lookup->name() == isolate()->heap()->length_string()) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_StringLength);
- return BUILTIN_CODE(isolate(), LoadIC_StringLength);
+ TRACE_HANDLER_STATS(isolate(), LoadIC_StringWrapperLength);
+ return BUILTIN_CODE(isolate(), LoadIC_StringWrapperLength);
}
// Use specialized code for getting prototype of functions.
if (receiver->IsJSFunction() &&
*lookup->name() == isolate()->heap()->prototype_string() &&
- receiver->IsConstructor() &&
- !Handle<JSFunction>::cast(receiver)
- ->map()
- ->has_non_instance_prototype()) {
+ JSFunction::cast(*receiver)->has_prototype_slot() &&
+ !JSFunction::cast(*receiver)->map()->has_non_instance_prototype()) {
Handle<Code> stub;
TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
return BUILTIN_CODE(isolate(), LoadIC_FunctionPrototype);
@@ -779,11 +772,10 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
// Use simple field loads for some well-known callback properties.
// The method will only return true for absolute truths based on the
// receiver maps.
- int object_offset;
- if (Accessors::IsJSObjectFieldAccessor(map, lookup->name(),
- &object_offset)) {
- FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
- return SimpleFieldLoad(isolate(), index);
+ FieldIndex index;
+ if (Accessors::IsJSObjectFieldAccessor(map, lookup->name(), &index)) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
+ return LoadHandler::LoadField(isolate(), index);
}
if (holder->IsJSModuleNamespace()) {
Handle<ObjectHashTable> exports(
@@ -792,7 +784,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
int entry = exports->FindEntry(isolate(), lookup->name(),
Smi::ToInt(lookup->name()->GetHash()));
// We found the accessor, so the entry must exist.
- DCHECK(entry != ObjectHashTable::kNotFound);
+ DCHECK_NE(entry, ObjectHashTable::kNotFound);
int index = ObjectHashTable::EntryToValueIndex(entry);
return LoadHandler::LoadModuleExport(isolate(), index);
}
@@ -817,6 +809,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
return slow_stub();
}
+ Handle<Smi> smi_handler;
+
CallOptimization call_optimization(getter);
if (call_optimization.is_simple_api_call()) {
if (!call_optimization.IsCompatibleReceiverMap(map, holder) ||
@@ -824,38 +818,46 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
- break;
- }
- // FunctionTemplate isn't yet supported as smi-handler.
- if (getter->IsFunctionTemplateInfo()) {
- if (!holder->HasFastProperties()) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
- return slow_stub();
- }
- break;
+ CallOptimization::HolderLookup holder_lookup;
+ call_optimization.LookupHolderOfExpectedType(map, &holder_lookup);
+
+ smi_handler = LoadHandler::LoadApiGetter(
+ isolate(), holder_lookup == CallOptimization::kHolderIsReceiver);
+
+ Handle<Context> context(
+ call_optimization.GetAccessorContext(holder->map()));
+ Handle<WeakCell> context_cell =
+ isolate()->factory()->NewWeakCell(context);
+ Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
+ call_optimization.api_call_info());
+ Handle<Tuple2> data =
+ isolate()->factory()->NewTuple2(context_cell, data_cell, TENURED);
+
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
+ return LoadHandler::LoadFromPrototype(
+ isolate(), map, holder, lookup->name(), smi_handler, data);
}
- Handle<Smi> smi_handler;
if (holder->HasFastProperties()) {
smi_handler =
LoadHandler::LoadAccessor(isolate(), lookup->GetAccessorIndex());
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadAccessorDH);
- return smi_handler;
- }
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadAccessorDH);
+ if (receiver_is_holder) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadAccessorFromPrototypeDH);
} else if (holder->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobalFromPrototypeDH);
smi_handler = LoadHandler::LoadGlobal(isolate());
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(lookup->GetPropertyCell());
+ return LoadHandler::LoadFromPrototype(
+ isolate(), map, holder, lookup->name(), smi_handler, cell);
} else {
smi_handler = LoadHandler::LoadNormal(isolate());
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalDH);
- return smi_handler;
- }
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalDH);
+ if (receiver_is_holder) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalFromPrototypeDH);
}
@@ -873,11 +875,12 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
return slow_stub();
}
- Handle<Smi> smi_handler =
- LoadHandler::LoadApiGetter(isolate(), lookup->GetAccessorIndex());
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
+ Handle<Smi> smi_handler = LoadHandler::LoadNativeDataProperty(
+ isolate(), lookup->GetAccessorIndex());
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNativeDataPropertyDH);
if (receiver_is_holder) return smi_handler;
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
+ TRACE_HANDLER_STATS(isolate(),
+ LoadIC_LoadNativeDataPropertyFromPrototypeDH);
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
lookup->name(), smi_handler);
}
@@ -886,31 +889,26 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
DCHECK_EQ(kData, lookup->property_details().kind());
Handle<Smi> smi_handler;
if (lookup->is_dictionary_holder()) {
- smi_handler = LoadHandler::LoadNormal(isolate());
- if (receiver_is_holder) {
- if (holder->IsJSGlobalObject()) {
- // TODO(verwaest): This is a workaround for code that leaks the
- // global object.
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobalDH);
- smi_handler = LoadHandler::LoadGlobal(isolate());
- return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
- }
- DCHECK(!holder->IsJSGlobalObject());
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalDH);
- return smi_handler;
- }
-
if (holder->IsJSGlobalObject()) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobalFromPrototypeDH);
+ // TODO(verwaest): Also supporting the global object as receiver is a
+ // workaround for code that leaks the global object.
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobalDH);
smi_handler = LoadHandler::LoadGlobal(isolate());
- } else {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalFromPrototypeDH);
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(lookup->GetPropertyCell());
+ return LoadHandler::LoadFromPrototype(
+ isolate(), map, holder, lookup->name(), smi_handler, cell);
}
+ smi_handler = LoadHandler::LoadNormal(isolate());
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalDH);
+ if (receiver_is_holder) return smi_handler;
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalFromPrototypeDH);
+
} else if (lookup->property_details().location() == kField) {
FieldIndex field = lookup->GetFieldIndex();
- smi_handler = SimpleFieldLoad(isolate(), field);
+ smi_handler = LoadHandler::LoadField(isolate(), field);
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
if (receiver_is_holder) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
} else {
@@ -946,28 +944,6 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
return Handle<Code>::null();
}
-Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup) {
- DCHECK_EQ(LookupIterator::ACCESSOR, lookup->state());
- Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- Handle<Map> map = receiver_map();
-
- Handle<Object> accessors = lookup->GetAccessors();
- DCHECK(accessors->IsAccessorPair());
- DCHECK(holder->HasFastProperties());
- DCHECK(!GetHostFunction()->shared()->HasBreakInfo());
- Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
- isolate());
- CallOptimization call_optimization(getter);
- NamedLoadHandlerCompiler compiler(isolate(), map, holder);
- DCHECK(call_optimization.is_simple_api_call());
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadCallback);
- int index = lookup->GetAccessorIndex();
- Handle<Code> code = compiler.CompileLoadCallback(
- lookup->name(), call_optimization, index, slow_stub());
- return code;
-}
-
-
static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
// This helper implements a few common fast cases for converting
// non-smi keys of keyed loads/stores to a smi or a string.
@@ -989,14 +965,21 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
return key;
}
-void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
+bool KeyedLoadIC::CanChangeToAllowOutOfBounds(Handle<Map> receiver_map) {
+ Handle<Object> handler;
+ return nexus()->FindHandlerForMap(receiver_map).ToHandle(&handler) &&
+ LoadHandler::GetKeyedAccessLoadMode(*handler) == STANDARD_LOAD;
+}
+
+void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
+ KeyedAccessLoadMode load_mode) {
Handle<Map> receiver_map(receiver->map(), isolate());
DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE); // Checked by caller.
MapHandles target_receiver_maps;
TargetMaps(&target_receiver_maps);
if (target_receiver_maps.empty()) {
- Handle<Object> handler = LoadElementHandler(receiver_map);
+ Handle<Object> handler = LoadElementHandler(receiver_map, load_mode);
return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
}
@@ -1024,7 +1007,7 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
IsMoreGeneralElementsKindTransition(
target_receiver_maps.at(0)->elements_kind(),
Handle<JSObject>::cast(receiver)->GetElementsKind())) {
- Handle<Object> handler = LoadElementHandler(receiver_map);
+ Handle<Object> handler = LoadElementHandler(receiver_map, load_mode);
return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
}
@@ -1033,10 +1016,16 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
- // If the miss wasn't due to an unseen map, a polymorphic stub
- // won't help, use the generic stub.
- TRACE_GENERIC_IC("same map added twice");
- return;
+ // If the {receiver_map} previously had a handler that didn't handle
+ // out-of-bounds access, but can generally handle it, we can just go
+ // on and update the handler appropriately below.
+ if (load_mode != LOAD_IGNORE_OUT_OF_BOUNDS ||
+ !CanChangeToAllowOutOfBounds(receiver_map)) {
+ // If the miss wasn't due to an unseen map, a polymorphic stub
+ // won't help, use the generic stub.
+ TRACE_GENERIC_IC("same map added twice");
+ return;
+ }
}
// If the maximum number of receiver maps has been exceeded, use the generic
@@ -1048,7 +1037,7 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
ObjectHandles handlers;
handlers.reserve(target_receiver_maps.size());
- LoadElementPolymorphicHandlers(&target_receiver_maps, &handlers);
+ LoadElementPolymorphicHandlers(&target_receiver_maps, &handlers, load_mode);
DCHECK_LE(1, target_receiver_maps.size());
if (target_receiver_maps.size() == 1) {
ConfigureVectorState(Handle<Name>(), target_receiver_maps[0], handlers[0]);
@@ -1057,7 +1046,8 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
}
}
-Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
+Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
+ KeyedAccessLoadMode load_mode) {
if (receiver_map->has_indexed_interceptor() &&
!receiver_map->GetIndexedInterceptor()->getter()->IsUndefined(
isolate()) &&
@@ -1065,11 +1055,11 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadIndexedInterceptorStub);
return LoadIndexedInterceptorStub(isolate()).GetCode();
}
- if (receiver_map->IsStringMap()) {
- TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadIndexedStringStub);
- return BUILTIN_CODE(isolate(), KeyedLoadIC_IndexedString);
- }
InstanceType instance_type = receiver_map->instance_type();
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadIndexedStringDH);
+ return LoadHandler::LoadIndexedString(isolate(), load_mode);
+ }
if (instance_type < FIRST_JS_RECEIVER_TYPE) {
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_SlowStub);
return BUILTIN_CODE(isolate(), KeyedLoadIC_Slow);
@@ -1087,7 +1077,7 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
if (elements_kind == DICTIONARY_ELEMENTS) {
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadElementDH);
return LoadHandler::LoadElement(isolate(), elements_kind, false,
- is_js_array);
+ is_js_array, load_mode);
}
DCHECK(IsFastElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind));
@@ -1098,11 +1088,13 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
isolate()->raw_native_context()->GetInitialJSArrayMap(elements_kind);
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadElementDH);
return LoadHandler::LoadElement(isolate(), elements_kind,
- convert_hole_to_undefined, is_js_array);
+ convert_hole_to_undefined, is_js_array,
+ load_mode);
}
-void KeyedLoadIC::LoadElementPolymorphicHandlers(MapHandles* receiver_maps,
- ObjectHandles* handlers) {
+void KeyedLoadIC::LoadElementPolymorphicHandlers(
+ MapHandles* receiver_maps, ObjectHandles* handlers,
+ KeyedAccessLoadMode load_mode) {
// Filter out deprecated maps to ensure their instances get migrated.
receiver_maps->erase(
std::remove_if(
@@ -1120,10 +1112,61 @@ void KeyedLoadIC::LoadElementPolymorphicHandlers(MapHandles* receiver_maps,
receiver_map->NotifyLeafMapLayoutChange();
}
}
- handlers->push_back(LoadElementHandler(receiver_map));
+ handlers->push_back(LoadElementHandler(receiver_map, load_mode));
+ }
+}
+
+namespace {
+
+bool IsOutOfBoundsAccess(Handle<Object> receiver, uint32_t index) {
+ uint32_t length = 0;
+ if (receiver->IsJSArray()) {
+ JSArray::cast(*receiver)->length()->ToArrayLength(&length);
+ } else if (receiver->IsString()) {
+ length = String::cast(*receiver)->length();
+ } else if (receiver->IsJSObject()) {
+ length = JSObject::cast(*receiver)->elements()->length();
+ } else {
+ return false;
+ }
+ return index >= length;
+}
+
+KeyedAccessLoadMode GetLoadMode(Handle<Object> receiver, uint32_t index) {
+ if (IsOutOfBoundsAccess(receiver, index)) {
+ if (receiver->IsJSTypedArray()) {
+ // For JSTypedArray we never lookup elements in the prototype chain.
+ return LOAD_IGNORE_OUT_OF_BOUNDS;
+ }
+
+ // For other {receiver}s we need to check the "no elements" protector.
+ Isolate* isolate = Handle<HeapObject>::cast(receiver)->GetIsolate();
+ if (isolate->IsNoElementsProtectorIntact()) {
+ if (receiver->IsString()) {
+ // ToObject(receiver) will have the initial String.prototype.
+ return LOAD_IGNORE_OUT_OF_BOUNDS;
+ }
+ if (receiver->IsJSObject()) {
+ // For other JSObjects (including JSArrays) we can only continue if
+ // the {receiver}s prototype is either the initial Object.prototype
+ // or the initial Array.prototype, which are both guarded by the
+ // "no elements" protector checked above.
+ Handle<Object> receiver_prototype(
+ JSObject::cast(*receiver)->map()->prototype(), isolate);
+ if (isolate->IsInAnyContext(*receiver_prototype,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
+ isolate->IsInAnyContext(*receiver_prototype,
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX)) {
+ return LOAD_IGNORE_OUT_OF_BOUNDS;
+ }
+ }
+ }
}
+ return STANDARD_LOAD;
}
+} // namespace
+
MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
Handle<Object> key) {
if (MigrateDeprecated(object)) {
@@ -1149,9 +1192,10 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
Object);
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded() &&
!object->IsJSValue()) {
- if ((object->IsJSReceiver() && key->IsSmi()) ||
- (object->IsString() && key->IsNumber())) {
- UpdateLoadElement(Handle<HeapObject>::cast(object));
+ if ((object->IsJSReceiver() || object->IsString()) &&
+ key->ToArrayIndex(&index)) {
+ KeyedAccessLoadMode load_mode = GetLoadMode(object, index);
+ UpdateLoadElement(Handle<HeapObject>::cast(object), load_mode);
if (is_vector_set()) {
TRACE_IC("LoadIC", key);
}
@@ -1301,7 +1345,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
if (object->IsNullOrUndefined(isolate())) {
- if (FLAG_use_ic && state() != UNINITIALIZED && state() != PREMONOMORPHIC) {
+ if (FLAG_use_ic && state() != PREMONOMORPHIC) {
// Ensure the IC state progresses.
TRACE_HANDLER_STATS(isolate(), StoreIC_NonReceiver);
update_receiver_map(object);
@@ -1353,6 +1397,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (created_new_transition_) {
// The first time a transition is performed, there's a good chance that
// it won't be taken again, so don't bother creating a handler.
+ TRACE_GENERIC_IC("new transition");
TRACE_IC("StoreIC", lookup->name());
return;
}
@@ -1366,7 +1411,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
TRACE_IC("StoreIC", lookup->name());
}
-Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
+Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
switch (lookup->state()) {
case LookupIterator::TRANSITION: {
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
@@ -1375,9 +1420,20 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
if (store_target->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalTransitionDH);
- Handle<Object> handler = StoreHandler::StoreTransition(
- isolate(), receiver_map(), store_target, lookup->transition_cell(),
- lookup->name());
+ if (receiver_map()->IsJSGlobalObject()) {
+ DCHECK(IsStoreGlobalIC());
+ DCHECK_EQ(*lookup->GetReceiver(), *holder);
+ DCHECK_EQ(*store_target, *holder);
+ return StoreHandler::StoreGlobal(isolate(),
+ lookup->transition_cell());
+ }
+
+ Handle<Smi> smi_handler = StoreHandler::StoreGlobalProxy(isolate());
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(lookup->transition_cell());
+ Handle<Object> handler = StoreHandler::StoreThroughPrototype(
+ isolate(), receiver_map(), store_target, lookup->name(),
+ smi_handler, cell);
return handler;
}
// Currently not handled by CompileStoreTransition.
@@ -1389,9 +1445,19 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
DCHECK(lookup->IsCacheableTransition());
Handle<Map> transition = lookup->transition_map();
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
- Handle<Object> handler = StoreHandler::StoreTransition(
- isolate(), receiver_map(), holder, transition, lookup->name());
+
+ Handle<Smi> smi_handler;
+ if (transition->is_dictionary_map()) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreNormalDH);
+ smi_handler = StoreHandler::StoreNormal(isolate());
+ } else {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
+ smi_handler = StoreHandler::StoreTransition(isolate(), transition);
+ }
+
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Handle<Object> handler = StoreHandler::StoreThroughPrototype(
+ isolate(), receiver_map(), holder, lookup->name(), smi_handler, cell);
TransitionsAccessor(receiver_map())
.UpdateHandler(*lookup->name(), *handler);
return handler;
@@ -1438,7 +1504,16 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
- break; // Custom-compiled handler.
+
+ Handle<Smi> smi_handler = StoreHandler::StoreNativeDataProperty(
+ isolate(), lookup->GetAccessorIndex());
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreNativeDataPropertyDH);
+ if (receiver.is_identical_to(holder)) return smi_handler;
+ TRACE_HANDLER_STATS(isolate(),
+ StoreIC_StoreNativeDataPropertyOnPrototypeDH);
+ return StoreHandler::StoreThroughPrototype(
+ isolate(), receiver_map(), holder, lookup->name(), smi_handler);
+
} else if (accessors->IsAccessorPair()) {
Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
isolate());
@@ -1450,13 +1525,45 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
CallOptimization call_optimization(setter);
if (call_optimization.is_simple_api_call()) {
if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
- break; // Custom-compiled handler.
+ CallOptimization::HolderLookup holder_lookup;
+ call_optimization.LookupHolderOfExpectedType(receiver_map(),
+ &holder_lookup);
+
+ Handle<Smi> smi_handler = StoreHandler::StoreApiSetter(
+ isolate(),
+ holder_lookup == CallOptimization::kHolderIsReceiver);
+
+ Handle<Context> context(
+ call_optimization.GetAccessorContext(holder->map()));
+ Handle<WeakCell> context_cell =
+ isolate()->factory()->NewWeakCell(context);
+ Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
+ call_optimization.api_call_info());
+ Handle<Tuple2> data = isolate()->factory()->NewTuple2(
+ context_cell, data_cell, TENURED);
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreApiSetterOnPrototypeDH);
+ return StoreHandler::StoreThroughPrototype(
+ isolate(), receiver_map(), holder, lookup->name(), smi_handler,
+ data);
}
TRACE_GENERIC_IC("incompatible receiver");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
+ } else if (setter->IsFunctionTemplateInfo()) {
+ TRACE_GENERIC_IC("setter non-simple template");
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
- break; // Custom-compiled handler.
+
+ Handle<Smi> smi_handler =
+ StoreHandler::StoreAccessor(isolate(), lookup->GetAccessorIndex());
+
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreAccessorDH);
+ if (receiver.is_identical_to(holder)) return smi_handler;
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreAccessorOnPrototypeDH);
+
+ return StoreHandler::StoreThroughPrototype(
+ isolate(), receiver_map(), holder, lookup->name(), smi_handler);
}
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
@@ -1518,57 +1625,6 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
return Handle<Code>::null();
}
-Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup) {
- DCHECK_EQ(LookupIterator::ACCESSOR, lookup->state());
-
- // This is currently guaranteed by checks in StoreIC::Store.
- Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
- Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
-
- DCHECK(holder->HasFastProperties());
- Handle<Object> accessors = lookup->GetAccessors();
-
- if (accessors->IsAccessorInfo()) {
- Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
- DCHECK(v8::ToCData<Address>(info->setter()) != 0);
- DCHECK(!AccessorInfo::cast(*accessors)->is_special_data_property() ||
- lookup->HolderIsReceiverOrHiddenPrototype());
- DCHECK(
- AccessorInfo::IsCompatibleReceiverMap(isolate(), info, receiver_map()));
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- // TODO(ishell): don't hard-code language mode into the handler because
- // this handler can be re-used through megamorphic stub cache for wrong
- // language mode.
- // Better pass vector/slot to Runtime::kStoreCallbackProperty and
- // let it decode the language mode from the IC kind.
- Handle<Code> code = compiler.CompileStoreCallback(receiver, lookup->name(),
- info, language_mode());
- return code;
- }
-
- DCHECK(accessors->IsAccessorPair());
- Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
- isolate());
- DCHECK(setter->IsJSFunction() || setter->IsFunctionTemplateInfo());
- CallOptimization call_optimization(setter);
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- if (call_optimization.is_simple_api_call()) {
- DCHECK(call_optimization.IsCompatibleReceiver(receiver, holder));
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
- Handle<Code> code = compiler.CompileStoreCallback(
- receiver, lookup->name(), call_optimization, lookup->GetAccessorIndex(),
- slow_stub());
- return code;
- }
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreViaSetter);
- int expected_arguments =
- JSFunction::cast(*setter)->shared()->internal_formal_parameter_count();
- return compiler.CompileStoreViaSetter(
- receiver, lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
-}
-
void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode) {
MapHandles target_receiver_maps;
@@ -1744,6 +1800,7 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
stub =
StoreFastElementStub(isolate(), is_jsarray, elements_kind, store_mode)
.GetCode();
+ if (receiver_map->has_fixed_typed_array_elements()) return stub;
} else {
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
@@ -1812,16 +1869,6 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
}
}
-bool IsOutOfBoundsAccess(Handle<JSObject> receiver, uint32_t index) {
- uint32_t length = 0;
- if (receiver->IsJSArray()) {
- JSArray::cast(*receiver)->length()->ToArrayLength(&length);
- } else {
- length = static_cast<uint32_t>(receiver->elements()->length());
- }
- return index >= length;
-}
-
static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
uint32_t index, Handle<Object> value) {
@@ -1902,16 +1949,15 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
JSReceiver::MAY_BE_STORE_FROM_KEYED),
Object);
if (vector_needs_update()) {
- ConfigureVectorState(MEGAMORPHIC, key);
- TRACE_GENERIC_IC("unhandled internalized string key");
- TRACE_IC("StoreIC", key);
+ if (ConfigureVectorState(MEGAMORPHIC, key)) {
+ TRACE_GENERIC_IC("unhandled internalized string key");
+ TRACE_IC("StoreIC", key);
+ }
}
return store_handle;
}
- if (state() != UNINITIALIZED) {
- JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
- }
+ JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
bool use_ic = FLAG_use_ic && !object->IsStringWrapper() &&
!object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy();
@@ -2139,6 +2185,48 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
}
}
+RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at(0);
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+ Handle<Object> object = args.at(3);
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 4);
+
+ Handle<Context> native_context = isolate->native_context();
+ Handle<ScriptContextTable> script_contexts(
+ native_context->script_context_table());
+
+ ScriptContextTable::LookupResult lookup_result;
+ if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ script_contexts, lookup_result.context_index);
+ if (lookup_result.mode == CONST) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstAssign, object, name));
+ }
+
+ Handle<Object> previous_value =
+ FixedArray::get(*script_context, lookup_result.slot_index, isolate);
+
+ if (previous_value->IsTheHole(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+ }
+
+ script_context->set(lookup_result.slot_index, *value);
+ return *value;
+ }
+
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ Runtime::SetObjectProperty(isolate, object, name, value, language_mode));
+}
+
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
HandleScope scope(isolate);
@@ -2228,10 +2316,10 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorNameSetterCallback fun =
FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
- DCHECK(fun != NULL);
+ DCHECK_NOT_NULL(fun);
- Object::ShouldThrow should_throw =
- is_sloppy(language_mode) ? Object::DONT_THROW : Object::THROW_ON_ERROR;
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
*holder, should_throw);
custom_args.Call(fun, name, value);
@@ -2258,7 +2346,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
InterceptorInfo* interceptor = holder->GetNamedInterceptor();
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
- *holder, Object::DONT_THROW);
+ *holder, kDontThrow);
v8::GenericNamedPropertyGetterCallback getter =
v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
@@ -2314,7 +2402,7 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
DCHECK(!interceptor->non_masking());
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
- *receiver, Object::DONT_THROW);
+ *receiver, kDontThrow);
v8::GenericNamedPropertySetterCallback setter =
v8::ToCData<v8::GenericNamedPropertySetterCallback>(
@@ -2344,12 +2432,12 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
// TODO(verwaest): This should probably get the holder and receiver as input.
HandleScope scope(isolate);
Handle<JSObject> receiver = args.at<JSObject>(0);
- DCHECK(args.smi_at(1) >= 0);
+ DCHECK_GE(args.smi_at(1), 0);
uint32_t index = args.smi_at(1);
InterceptorInfo* interceptor = receiver->GetIndexedInterceptor();
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
- *receiver, Object::DONT_THROW);
+ *receiver, kDontThrow);
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 175b1f42fd..acbfccd4c6 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -36,7 +36,7 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
- IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL);
+ IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = nullptr);
virtual ~IC() {}
State state() const { return state_; }
@@ -64,7 +64,8 @@ class IC {
// Nofity the IC system that a feedback has changed.
static void OnFeedbackChanged(Isolate* isolate, FeedbackVector* vector,
- JSFunction* host_function);
+ FeedbackSlot slot, JSFunction* host_function,
+ const char* reason);
protected:
Address fp() const { return fp_; }
@@ -72,7 +73,6 @@ class IC {
void set_slow_stub_reason(const char* reason) { slow_stub_reason_ = reason; }
- Address GetAbstractPC(int* line, int* column) const;
Isolate* isolate() const { return isolate_; }
// Get the caller function object.
@@ -90,7 +90,7 @@ class IC {
}
// Configure for most states.
- void ConfigureVectorState(IC::State new_state, Handle<Object> key);
+ bool ConfigureVectorState(IC::State new_state, Handle<Object> key);
// Configure the vector for MONOMORPHIC.
void ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler);
@@ -109,15 +109,6 @@ class IC {
void TraceHandlerCacheHitStats(LookupIterator* lookup);
- // Compute the handler either by compiling or by retrieving a cached version.
- Handle<Object> ComputeHandler(LookupIterator* lookup);
- virtual Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) {
- UNREACHABLE();
- }
- virtual Handle<Code> CompileHandler(LookupIterator* lookup) {
- UNREACHABLE();
- }
-
void UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name);
bool UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code);
void UpdateMegamorphicCache(Map* map, Name* name, Object* code);
@@ -156,7 +147,7 @@ class IC {
Map* FirstTargetMap() {
FindTargetMaps();
- return !target_maps_.empty() ? *target_maps_[0] : NULL;
+ return !target_maps_.empty() ? *target_maps_[0] : nullptr;
}
Handle<FeedbackVector> vector() const { return nexus()->vector_handle(); }
@@ -218,7 +209,7 @@ class CallIC : public IC {
public:
CallIC(Isolate* isolate, CallICNexus* nexus)
: IC(EXTRA_CALL_FRAME, isolate, nexus) {
- DCHECK(nexus != NULL);
+ DCHECK_NOT_NULL(nexus);
}
};
@@ -227,7 +218,7 @@ class LoadIC : public IC {
public:
LoadIC(Isolate* isolate, FeedbackNexus* nexus)
: IC(NO_EXTRA_FRAME, isolate, nexus) {
- DCHECK(nexus != NULL);
+ DCHECK_NOT_NULL(nexus);
DCHECK(IsAnyLoad());
}
@@ -251,13 +242,8 @@ class LoadIC : public IC {
// lookup result.
void UpdateCaches(LookupIterator* lookup);
- Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
-
- Handle<Code> CompileHandler(LookupIterator* lookup) override;
-
private:
- // Creates a data handler that represents a load of a field by given index.
- static Handle<Smi> SimpleFieldLoad(Isolate* isolate, FieldIndex index);
+ Handle<Object> ComputeHandler(LookupIterator* lookup);
friend class IC;
friend class NamedLoadHandlerCompiler;
@@ -280,7 +266,7 @@ class KeyedLoadIC : public LoadIC {
public:
KeyedLoadIC(Isolate* isolate, KeyedLoadICNexus* nexus)
: LoadIC(isolate, nexus) {
- DCHECK(nexus != NULL);
+ DCHECK_NOT_NULL(nexus);
}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
@@ -288,15 +274,23 @@ class KeyedLoadIC : public LoadIC {
protected:
// receiver is HeapObject because it could be a String or a JSObject
- void UpdateLoadElement(Handle<HeapObject> receiver);
+ void UpdateLoadElement(Handle<HeapObject> receiver,
+ KeyedAccessLoadMode load_mode);
private:
friend class IC;
- Handle<Object> LoadElementHandler(Handle<Map> receiver_map);
+ Handle<Object> LoadElementHandler(Handle<Map> receiver_map,
+ KeyedAccessLoadMode load_mode);
void LoadElementPolymorphicHandlers(MapHandles* receiver_maps,
- ObjectHandles* handlers);
+ ObjectHandles* handlers,
+ KeyedAccessLoadMode load_mode);
+
+ // Returns true if the receiver_map has a kElement or kIndexedString
+ // handler in the nexus currently but didn't yet allow out of bounds
+ // accesses.
+ bool CanChangeToAllowOutOfBounds(Handle<Map> receiver_map);
};
@@ -321,7 +315,7 @@ class StoreIC : public IC {
protected:
// Stub accessors.
- Handle<Code> slow_stub() const {
+ virtual Handle<Code> slow_stub() const {
// All StoreICs share the same slow stub.
return BUILTIN_CODE(isolate(), KeyedStoreIC_Slow);
}
@@ -331,10 +325,10 @@ class StoreIC : public IC {
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode,
MaybeHandle<Object> cached_handler);
- Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
- Handle<Code> CompileHandler(LookupIterator* lookup) override;
private:
+ Handle<Object> ComputeHandler(LookupIterator* lookup);
+
friend class IC;
bool created_new_transition_ = false;
@@ -348,6 +342,11 @@ class StoreGlobalIC : public StoreIC {
MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
Handle<Name> name,
Handle<Object> value);
+
+ protected:
+ Handle<Code> slow_stub() const override {
+ return BUILTIN_CODE(isolate(), StoreGlobalIC_Slow);
+ }
};
enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap };
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 7ff72bb72f..4263dd8552 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -108,7 +108,7 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
{
Node* map = var_map.value();
Node* prototype = LoadMapPrototype(map);
- GotoIf(WordEqual(prototype, NullConstant()), only_fast_elements);
+ GotoIf(IsNull(prototype), only_fast_elements);
Node* prototype_map = LoadMap(prototype);
var_map.Bind(prototype_map);
Node* instance_type = LoadMapInstanceType(prototype_map);
@@ -236,8 +236,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
Node* intptr_index, Node* value, Node* context, Label* slow,
UpdateLength update_length) {
if (update_length != kDontChangeLength) {
- CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(receiver_map),
- Int32Constant(JS_ARRAY_TYPE)));
+ CSA_ASSERT(this, InstanceTypeEqual(LoadMapInstanceType(receiver_map),
+ JS_ARRAY_TYPE));
// Check if the length property is writable. The fast check is only
// supported for fast properties.
GotoIf(IsDictionaryMap(receiver_map), slow);
@@ -437,7 +437,7 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
BIND(&if_fast);
Label if_array(this);
- GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)), &if_array);
+ GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &if_array);
{
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds, &if_grow);
@@ -529,6 +529,7 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
BIND(&loop);
{
Node* holder = var_holder.value();
+ GotoIf(IsNull(holder), &ok_to_write);
Node* holder_map = var_holder_map.value();
Node* instance_type = LoadMapInstanceType(holder_map);
Label next_proto(this);
@@ -593,10 +594,9 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
BIND(&next_proto);
// Bailout if it can be an integer indexed exotic case.
- GotoIf(Word32Equal(instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
- bailout);
+ GotoIf(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE), bailout);
Node* proto = LoadMapPrototype(holder_map);
- GotoIf(WordEqual(proto, NullConstant()), &ok_to_write);
+ GotoIf(IsNull(proto), &ok_to_write);
var_holder.Bind(proto);
var_holder_map.Bind(LoadMap(proto));
Goto(&loop);
@@ -686,17 +686,17 @@ void KeyedStoreGenericAssembler::OverwriteExistingFastProperty(
slow);
Node* field_index =
DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
- Node* inobject_properties = LoadMapInobjectProperties(object_map);
+ field_index =
+ IntPtrAdd(field_index, LoadMapInobjectPropertiesStartInWords(object_map));
+ Node* instance_size_in_words = LoadMapInstanceSizeInWords(object_map);
Label inobject(this), backing_store(this);
- Branch(UintPtrLessThan(field_index, inobject_properties), &inobject,
+ Branch(UintPtrLessThan(field_index, instance_size_in_words), &inobject,
&backing_store);
BIND(&inobject);
{
- Node* field_offset = TimesPointerSize(IntPtrAdd(
- IntPtrSub(LoadMapInstanceSize(object_map), inobject_properties),
- field_index));
+ Node* field_offset = TimesPointerSize(field_index);
Label tagged_rep(this), double_rep(this);
Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
&double_rep, &tagged_rep);
@@ -722,7 +722,7 @@ void KeyedStoreGenericAssembler::OverwriteExistingFastProperty(
BIND(&backing_store);
{
- Node* backing_store_index = IntPtrSub(field_index, inobject_properties);
+ Node* backing_store_index = IntPtrSub(field_index, instance_size_in_words);
Label tagged_rep(this), double_rep(this);
Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
&double_rep, &tagged_rep);
@@ -810,15 +810,15 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Label check_key(this, &var_transition_cell);
BIND(&tuple3);
{
- var_transition_cell.Bind(LoadObjectField(
- maybe_handler, StoreHandler::kTransitionOrHolderCellOffset));
+ var_transition_cell.Bind(
+ LoadObjectField(maybe_handler, StoreHandler::kDataOffset));
Goto(&check_key);
}
BIND(&fixedarray);
{
- var_transition_cell.Bind(LoadFixedArrayElement(
- maybe_handler, StoreHandler::kTransitionMapOrHolderCellIndex));
+ var_transition_cell.Bind(
+ LoadFixedArrayElement(maybe_handler, StoreHandler::kDataIndex));
Goto(&check_key);
}
@@ -891,8 +891,14 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
LookupPropertyOnPrototypeChain(receiver_map, p->name, &accessor,
&var_accessor_pair, &var_accessor_holder,
&readonly, slow);
- Add<NameDictionary>(properties, p->name, p->value, slow);
+ Label add_dictionary_property_slow(this);
+ Add<NameDictionary>(properties, p->name, p->value,
+ &add_dictionary_property_slow);
Return(p->value);
+
+ BIND(&add_dictionary_property_slow);
+ TailCallRuntime(Runtime::kAddDictionaryProperty, p->context, p->receiver,
+ p->name, p->value);
}
}
@@ -976,7 +982,8 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
VARIABLE(var_index, MachineType::PointerRepresentation());
VARIABLE(var_unique, MachineRepresentation::kTagged);
var_unique.Bind(name); // Dummy initialization.
- Label if_index(this), if_unique_name(this), slow(this);
+ Label if_index(this), if_unique_name(this), not_internalized(this),
+ slow(this);
GotoIf(TaggedIsSmi(receiver), &slow);
Node* receiver_map = LoadMap(receiver);
@@ -987,7 +994,8 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
&slow);
- TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique, &slow);
+ TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
+ &not_internalized);
BIND(&if_index);
{
@@ -1004,14 +1012,24 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
EmitGenericPropertyStore(receiver, receiver_map, &p, &slow);
}
+ BIND(&not_internalized);
+ {
+ if (FLAG_internalize_on_the_fly) {
+ TryInternalizeString(name, &if_index, &var_index, &if_unique_name,
+ &var_unique, &slow, &slow);
+ } else {
+ Goto(&slow);
+ }
+ }
+
BIND(&slow);
{
Comment("KeyedStoreGeneric_slow");
VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
- SmiConstant(STRICT));
+ SmiConstant(LanguageMode::kStrict));
Label call_runtime(this);
BranchIfStrictMode(vector, slot, &call_runtime);
- var_language_mode.Bind(SmiConstant(SLOPPY));
+ var_language_mode.Bind(SmiConstant(LanguageMode::kSloppy));
Goto(&call_runtime);
BIND(&call_runtime);
TailCallRuntime(Runtime::kSetProperty, context, receiver, name, value,
diff --git a/deps/v8/src/ic/mips/OWNERS b/deps/v8/src/ic/mips/OWNERS
deleted file mode 100644
index 3f8fbfc7c8..0000000000
--- a/deps/v8/src/ic/mips/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
deleted file mode 100644
index 1c97ca3cad..0000000000
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, a3, a0, t0};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, a3, t0};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
deleted file mode 100644
index d9edc30ba6..0000000000
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ /dev/null
@@ -1,413 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ pop(cp);
- }
- __ Ret();
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save context and value registers, so we can restore them later.
- __ Push(cp, value());
-
- if (accessor_index >= 0) {
- DCHECK(holder != scratch);
- DCHECK(receiver != scratch);
- DCHECK(value() != scratch);
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ lw(scratch,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ Push(receiver, value());
- __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
- __ li(a0, Operand(1));
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- // Restore context register.
- __ Pop(cp, v0);
- }
- __ Ret();
-}
-
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ Push(slot, vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ Pop(slot, vector);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ Addu(sp, sp, Operand(2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(receiver != scratch0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
-
- // Check that receiver is a JSObject.
- __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // Load properties array.
- Register properties = scratch0;
- __ lw(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- // Check that the properties array is a dictionary.
- __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ Branch(miss_label, ne, map, Operand(tmp));
-
- // Restore the temporarily used register.
- __ lw(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
-
- NameDictionaryLookupStub::GenerateNegativeLookup(
- masm, miss_label, &done, receiver, properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(miss, ne, scratch, Operand(at));
-}
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(accessor_holder != scratch_in);
- DCHECK(receiver != scratch_in);
- __ push(accessor_holder);
- __ push(receiver);
- // Write the arguments to stack frame.
- if (is_store) {
- DCHECK(receiver != store_parameter);
- DCHECK(scratch_in != store_parameter);
- __ push(store_parameter);
- }
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = a0;
- Register data = t0;
- Register holder = a2;
- Register api_function_address = a1;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ lw(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lw(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
- } else {
- if (optimization.is_constant_call()) {
- __ lw(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ lw(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ lw(data,
- FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ lw(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
- ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
- __ li(api_function_address, Operand(ref));
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ li(this->name(), Operand(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ lw(scratch1, NativeContextMemOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
-
- if (!compare_native_contexts_only) {
- __ Branch(&done, eq, scratch1, Operand(scratch2));
-
- // Compare security tokens of current and expected native contexts.
- __ lw(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ lw(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- }
- __ Branch(miss, ne, scratch1, Operand(scratch2));
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
- DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
- scratch2 != scratch1);
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- __ li(scratch1, Operand(validity_cell));
- __ lw(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ Branch(miss, ne, scratch1,
- Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ Branch(&success);
- __ bind(miss);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ Branch(&success);
- GenerateRestoreName(miss, name);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
-
- __ Push(receiver(), holder_reg); // Receiver.
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ li(at, Operand(callback));
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ li(at, Operand(cell));
- }
- __ push(at);
- __ li(at, Operand(name));
- __ Push(at, value());
- __ Push(Smi::FromInt(language_mode));
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(name);
-}
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips64/OWNERS b/deps/v8/src/ic/mips64/OWNERS
deleted file mode 100644
index 3f8fbfc7c8..0000000000
--- a/deps/v8/src/ic/mips64/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
deleted file mode 100644
index 16d7a3d790..0000000000
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, a3, a0, a4};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, a3, a4};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
deleted file mode 100644
index f528ac9fdb..0000000000
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ /dev/null
@@ -1,413 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ pop(cp);
- }
- __ Ret();
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save context and value registers, so we can restore them later.
- __ Push(cp, value());
-
- if (accessor_index >= 0) {
- DCHECK(holder != scratch);
- DCHECK(receiver != scratch);
- DCHECK(value() != scratch);
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ld(scratch,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ Push(receiver, value());
- __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
- __ li(a0, Operand(1));
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- // Restore context register.
- __ Pop(cp, v0);
- }
- __ Ret();
-}
-
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ Push(slot, vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ Pop(slot, vector);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ Daddu(sp, sp, Operand(2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(receiver != scratch0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ Ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
-
- // Check that receiver is a JSObject.
- __ Lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // Load properties array.
- Register properties = scratch0;
- __ Ld(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- // Check that the properties array is a dictionary.
- __ Ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ Branch(miss_label, ne, map, Operand(tmp));
-
- // Restore the temporarily used register.
- __ Ld(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
-
- NameDictionaryLookupStub::GenerateNegativeLookup(
- masm, miss_label, &done, receiver, properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(miss, ne, scratch, Operand(at));
-}
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(accessor_holder != scratch_in);
- DCHECK(receiver != scratch_in);
- __ push(accessor_holder);
- __ push(receiver);
- // Write the arguments to stack frame.
- if (is_store) {
- DCHECK(receiver != store_parameter);
- DCHECK(scratch_in != store_parameter);
- __ push(store_parameter);
- }
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = a0;
- Register data = a4;
- Register holder = a2;
- Register api_function_address = a1;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ Ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
- } else {
- if (optimization.is_constant_call()) {
- __ Ld(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ Ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ Ld(data,
- FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ Ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
- ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
- __ li(api_function_address, Operand(ref));
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ li(this->name(), Operand(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ Ld(scratch1, NativeContextMemOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
-
- if (!compare_native_contexts_only) {
- __ Branch(&done, eq, scratch1, Operand(scratch2));
-
- // Compare security tokens of current and expected native contexts.
- __ Ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ Ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- }
- __ Branch(miss, ne, scratch1, Operand(scratch2));
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
- DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
- scratch2 != scratch1);
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- __ li(scratch1, Operand(validity_cell));
- __ Ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ Branch(miss, ne, scratch1,
- Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ Branch(&success);
- __ bind(miss);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ Branch(&success);
- GenerateRestoreName(miss, name);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
-
- __ Push(receiver(), holder_reg); // Receiver.
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ li(at, Operand(callback));
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ li(at, Operand(cell));
- }
- __ push(at);
- __ li(at, Operand(name));
- __ Push(at, value());
- __ Push(Smi::FromInt(language_mode));
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(name);
-}
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/ppc/OWNERS b/deps/v8/src/ic/ppc/OWNERS
deleted file mode 100644
index 752e8e3d81..0000000000
--- a/deps/v8/src/ic/ppc/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-jyan@ca.ibm.com
-dstence@us.ibm.com
-joransiu@ca.ibm.com
-mbrandy@us.ibm.com
-michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
deleted file mode 100644
index f78ef57e74..0000000000
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, r6, r3, r7};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, r6, r7};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
deleted file mode 100644
index 30686e6184..0000000000
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ pop(cp);
- }
- __ Ret();
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Save context register
- // Save value register, so we can restore it later.
- __ Push(cp, value());
-
- if (accessor_index >= 0) {
- DCHECK(holder != scratch);
- DCHECK(receiver != scratch);
- DCHECK(value() != scratch);
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ LoadP(scratch,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ Push(receiver, value());
- __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_SETTER);
- __ li(r3, Operand(1));
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- // Restore context register.
- __ Pop(cp, r3);
- }
- __ Ret();
-}
-
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ Push(slot, vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ Pop(slot, vector);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ addi(sp, sp, Operand(2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(receiver != scratch0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbz(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ andi(r0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ bne(miss_label, cr0);
-
- // Check that receiver is a JSObject.
- __ lbz(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmpi(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
- __ blt(miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- // Check that the properties array is a dictionary.
- __ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ cmp(map, tmp);
- __ bne(miss_label);
-
- // Restore the temporarily used register.
- __ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
-
- NameDictionaryLookupStub::GenerateNegativeLookup(
- masm, miss_label, &done, receiver, properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ bne(miss);
-}
-
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(accessor_holder != scratch_in);
- DCHECK(receiver != scratch_in);
- __ push(accessor_holder);
- __ push(receiver);
- // Write the arguments to stack frame.
- if (is_store) {
- DCHECK(receiver != store_parameter);
- DCHECK(scratch_in != store_parameter);
- __ push(store_parameter);
- }
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = r3;
- Register data = r7;
- Register holder = r5;
- Register api_function_address = r4;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
- } else {
- if (optimization.is_constant_call()) {
- __ LoadP(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadP(data,
- FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ LoadP(data,
- FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
- ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
- __ mov(api_function_address, Operand(ref));
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ mov(this->name(), Operand(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ LoadP(scratch1, NativeContextMemOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
- __ cmp(scratch1, scratch2);
-
- if (!compare_native_contexts_only) {
- __ beq(&done);
-
- // Compare security tokens of current and expected native contexts.
- __ LoadP(scratch1,
- ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ LoadP(scratch2,
- ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- __ cmp(scratch1, scratch2);
- }
- __ bne(miss);
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
- DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
- scratch2 != scratch1);
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- __ mov(scratch1, Operand(validity_cell));
- __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
- __ bne(miss);
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ b(&success);
- __ bind(miss);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ b(&success);
- GenerateRestoreName(miss, name);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
-
- __ Push(receiver(), holder_reg); // receiver
-
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ mov(ip, Operand(callback));
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ mov(ip, Operand(cell));
- }
- __ push(ip);
- __ mov(ip, Operand(name));
- __ Push(ip, value());
- __ Push(Smi::FromInt(language_mode));
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(name);
-}
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/s390/OWNERS b/deps/v8/src/ic/s390/OWNERS
deleted file mode 100644
index 752e8e3d81..0000000000
--- a/deps/v8/src/ic/s390/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-jyan@ca.ibm.com
-dstence@us.ibm.com
-joransiu@ca.ibm.com
-mbrandy@us.ibm.com
-michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
diff --git a/deps/v8/src/ic/s390/access-compiler-s390.cc b/deps/v8/src/ic/s390/access-compiler-s390.cc
deleted file mode 100644
index ed8c089b9c..0000000000
--- a/deps/v8/src/ic/s390/access-compiler-s390.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, r5, r2, r6};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, r5, r6};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
deleted file mode 100644
index 4fd0013ac0..0000000000
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ pop(cp);
- }
- __ Ret();
-}
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save context register
- // Save value register, so we can restore it later.
- __ Push(cp, value());
-
- if (accessor_index >= 0) {
- DCHECK(holder != scratch);
- DCHECK(receiver != scratch);
- DCHECK(value() != scratch);
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ LoadP(scratch,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ Push(receiver, value());
- __ LoadAccessor(r3, holder, accessor_index, ACCESSOR_SETTER);
- __ LoadImmP(r2, Operand(1));
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- // Restore context register.
- __ Pop(cp, r2);
- }
- __ Ret();
-}
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ Push(slot, vector);
-}
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ Pop(slot, vector);
-}
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ la(sp, MemOperand(sp, 2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(receiver != scratch0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ LoadlB(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ AndP(r0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ bne(miss_label);
-
- // Check that receiver is a JSObject.
- // TODO(joransiu): Merge into SI compare
- __ LoadlB(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ CmpP(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
- __ blt(miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- // Check that the properties array is a dictionary.
- __ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- __ CompareRoot(map, Heap::kHashTableMapRootIndex);
- __ bne(miss_label);
-
- // Restore the temporarily used register.
- __ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
-
- NameDictionaryLookupStub::GenerateNegativeLookup(
- masm, miss_label, &done, receiver, properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ bne(miss);
-}
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(accessor_holder != scratch_in);
- DCHECK(receiver != scratch_in);
- __ Push(accessor_holder);
- __ Push(receiver);
- // Write the arguments to stack frame.
- if (is_store) {
- DCHECK(receiver != store_parameter);
- DCHECK(scratch_in != store_parameter);
- __ Push(store_parameter);
- }
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = r2;
- Register data = r6;
- Register holder = r4;
- Register api_function_address = r3;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
- } else {
- if (optimization.is_constant_call()) {
- __ LoadP(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadP(data,
- FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ LoadP(data,
- FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
- ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
- __ mov(api_function_address, Operand(ref));
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ mov(this->name(), Operand(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ LoadP(scratch1, NativeContextMemOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
- __ CmpP(scratch1, scratch2);
-
- if (!compare_native_contexts_only) {
- __ beq(&done);
-
- // Compare security tokens of current and expected native contexts.
- __ LoadP(scratch1,
- ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ LoadP(scratch2,
- ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- __ CmpP(scratch1, scratch2);
- }
- __ bne(miss);
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
- DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
- scratch2 != scratch1);
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- __ mov(scratch1, Operand(validity_cell));
- __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
- __ bne(miss);
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ b(&success);
- __ bind(miss);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ b(&success);
- GenerateRestoreName(miss, name);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
-
- __ Push(receiver(), holder_reg); // receiver
-
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ mov(ip, Operand(callback));
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ mov(ip, Operand(cell));
- }
- __ Push(ip);
- __ mov(ip, Operand(name));
- __ Push(ip, value());
- __ Push(Smi::FromInt(language_mode));
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(name);
-}
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
deleted file mode 100644
index 4bbbba5b5a..0000000000
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/ic/access-compiler.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, rax, rbx, rdi};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, rbx, rdi};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
deleted file mode 100644
index 99b2a7a41c..0000000000
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ Push(slot);
- __ Push(vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ Pop(vector);
- __ Pop(slot);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ addp(rsp, Immediate(2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(receiver != scratch0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ testb(FieldOperand(scratch0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ movp(properties,
- FieldOperand(receiver, JSObject::kPropertiesOrHashOffset));
-
- // Check that the properties array is a dictionary.
- __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss_label);
-
- Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
- properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(accessor_holder != scratch);
- DCHECK(optimization.is_simple_api_call());
-
- __ PopReturnAddressTo(scratch);
- // accessor_holder
- __ Push(accessor_holder);
- // receiver
- __ Push(receiver);
- // Write the arguments to stack frame.
- if (is_store) {
- DCHECK(receiver != store_parameter);
- DCHECK(scratch != store_parameter);
- __ Push(store_parameter);
- }
- __ PushReturnAddressFrom(scratch);
- // Stack now matches JSFunction abi.
-
- // Abi for CallApiCallbackStub.
- Register callee = rdi;
- Register data = rbx;
- Register holder = rcx;
- Register api_function_address = rdx;
- scratch = no_reg;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ movp(holder, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movp(holder, FieldOperand(holder, Map::kPrototypeOffset));
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
- } else {
- if (optimization.is_constant_call()) {
- __ movp(data,
- FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ movp(data,
- FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ movp(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ movp(data,
- FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ movp(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ Move(api_function_address, function_address,
- RelocInfo::EXTERNAL_REFERENCE);
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ Cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- isolate->factory()->the_hole_value());
- __ j(not_equal, miss);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save context register
- __ pushq(rsi);
- // Save value register, so we can restore it later.
- __ Push(value());
-
- if (accessor_index >= 0) {
- DCHECK(holder != scratch);
- DCHECK(receiver != scratch);
- DCHECK(value() != scratch);
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ movp(scratch,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ Push(receiver);
- __ Push(value());
- __ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_SETTER);
- __ Set(rax, 1);
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ Pop(rax);
-
- // Restore context register.
- __ popq(rsi);
- }
- __ ret(0);
-}
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Remember the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ popq(rsi);
- }
- __ ret(0);
-}
-
-#undef __
-#define __ ACCESS_MASM((masm()))
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ Move(this->name(), name);
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ movp(scratch1, NativeContextOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
- __ cmpp(scratch1, scratch2);
-
- if (!compare_native_contexts_only) {
- __ j(equal, &done);
-
- // Compare security tokens of current and expected native contexts.
- __ movp(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ movp(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- __ cmpp(scratch1, scratch2);
- }
- __ j(not_equal, miss);
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
- DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
- scratch2 != scratch1);
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- __ Move(scratch1, validity_cell);
- __ SmiCompare(FieldOperand(scratch1, Cell::kValueOffset),
- Smi::FromInt(Map::kPrototypeChainValid));
- __ j(not_equal, miss);
- }
-
- // Keep track of the current object in register reg. On the first
- // iteration, reg is an alias for object_reg, on later iterations,
- // it is an alias for holder_reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- __ bind(miss);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- GenerateRestoreName(miss, name);
- PopVectorAndSlot();
- TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
-
- __ PopReturnAddressTo(scratch1());
- __ Push(receiver());
- __ Push(holder_reg);
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ Push(callback);
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ Push(cell);
- }
- __ Push(name);
- __ Push(value());
- __ Push(Smi::FromInt(language_mode));
- __ PushReturnAddressFrom(scratch1());
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(name);
-}
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index 371d09046d..291cce6fe6 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -32,7 +32,7 @@ namespace internal {
#if defined(V8_INTL_SUPPORT) && (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE)
namespace {
-char* g_icu_data_ptr = NULL;
+char* g_icu_data_ptr = nullptr;
void free_icu_data_ptr() {
delete[] g_icu_data_ptr;
@@ -62,7 +62,7 @@ bool InitializeICUDefaultLocation(const char* exec_path,
free(icu_data_file_default);
return result;
#else
- return InitializeICU(NULL);
+ return InitializeICU(nullptr);
#endif
#endif
}
@@ -100,7 +100,7 @@ bool InitializeICU(const char* icu_data_file) {
g_icu_data_ptr = new char[size];
if (fread(g_icu_data_ptr, 1, size, inf) != size) {
delete[] g_icu_data_ptr;
- g_icu_data_ptr = NULL;
+ g_icu_data_ptr = nullptr;
fclose(inf);
return false;
}
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index c6222f80a3..dd9067ca96 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -220,21 +220,6 @@ InjectedScript.prototype = {
},
/**
- * @param {!Array<!Object>} array
- * @param {string} property
- * @param {string} groupName
- * @param {boolean} forceValueType
- * @param {boolean} generatePreview
- */
- wrapPropertyInArray: function(array, property, groupName, forceValueType, generatePreview)
- {
- for (var i = 0; i < array.length; ++i) {
- if (typeof array[i] === "object" && property in array[i])
- array[i][property] = this.wrapObject(array[i][property], groupName, forceValueType, generatePreview);
- }
- },
-
- /**
* @param {!Object} table
* @param {!Array.<string>|string|boolean} columns
* @return {!RuntimeAgent.RemoteObject}
@@ -395,9 +380,18 @@ InjectedScript.prototype = {
var descriptor;
try {
- descriptor = InjectedScriptHost.getOwnPropertyDescriptor(o, property);
- if (descriptor) {
- InjectedScriptHost.nullifyPrototype(descriptor);
+ var nativeAccessorDescriptor = InjectedScriptHost.nativeAccessorDescriptor(o, property);
+ if (nativeAccessorDescriptor && !nativeAccessorDescriptor.isBuiltin) {
+ descriptor = { __proto__: null };
+ if (nativeAccessorDescriptor.hasGetter)
+ descriptor.get = function nativeGetter() { return o[property]; };
+ if (nativeAccessorDescriptor.hasSetter)
+ descriptor.set = function nativeSetter(v) { o[property] = v; };
+ } else {
+ descriptor = InjectedScriptHost.getOwnPropertyDescriptor(o, property);
+ if (descriptor) {
+ InjectedScriptHost.nullifyPrototype(descriptor);
+ }
}
var isAccessorProperty = descriptor && ("get" in descriptor || "set" in descriptor);
if (accessorPropertiesOnly && !isAccessorProperty)
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index 4f24d25698..a5e981cda5 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -222,7 +222,8 @@ class InjectedScript::ProtocolPromiseHandler {
.setException(wrappedValue->clone())
.build();
if (stack)
- exceptionDetails->setStackTrace(stack->buildInspectorObjectImpl());
+ exceptionDetails->setStackTrace(
+ stack->buildInspectorObjectImpl(m_inspector->debugger()));
if (stack && !stack->isEmpty())
exceptionDetails->setScriptId(toString16(stack->topScriptId()));
callback->sendSuccess(std::move(wrappedValue), std::move(exceptionDetails));
@@ -279,8 +280,9 @@ std::unique_ptr<InjectedScript> InjectedScript::create(
if (!inspectedContext->inspector()
->compileAndRunInternalScript(
context, toV8String(isolate, injectedScriptSource))
- .ToLocal(&value))
+ .ToLocal(&value)) {
return nullptr;
+ }
DCHECK(value->IsFunction());
v8::Local<v8::Object> scriptHostWrapper =
V8InjectedScriptHost::create(context, inspectedContext->inspector());
@@ -390,43 +392,6 @@ Response InjectedScript::wrapObject(
return Response::OK();
}
-Response InjectedScript::wrapObjectProperty(v8::Local<v8::Object> object,
- v8::Local<v8::Name> key,
- const String16& groupName,
- bool forceValueType,
- bool generatePreview) const {
- v8::Local<v8::Value> property;
- v8::Local<v8::Context> context = m_context->context();
- if (!object->Get(context, key).ToLocal(&property))
- return Response::InternalError();
- v8::Local<v8::Value> wrappedProperty;
- Response response = wrapValue(property, groupName, forceValueType,
- generatePreview, &wrappedProperty);
- if (!response.isSuccess()) return response;
- v8::Maybe<bool> success =
- createDataProperty(context, object, key, wrappedProperty);
- if (success.IsNothing() || !success.FromJust())
- return Response::InternalError();
- return Response::OK();
-}
-
-Response InjectedScript::wrapPropertyInArray(v8::Local<v8::Array> array,
- v8::Local<v8::String> property,
- const String16& groupName,
- bool forceValueType,
- bool generatePreview) const {
- V8FunctionCall function(m_context->inspector(), m_context->context(),
- v8Value(), "wrapPropertyInArray");
- function.appendArgument(array);
- function.appendArgument(property);
- function.appendArgument(groupName);
- function.appendArgument(forceValueType);
- function.appendArgument(generatePreview);
- bool hadException = false;
- function.call(hadException);
- return hadException ? Response::InternalError() : Response::OK();
-}
-
Response InjectedScript::wrapValue(v8::Local<v8::Value> value,
const String16& groupName,
bool forceValueType, bool generatePreview,
@@ -606,10 +571,11 @@ Response InjectedScript::createExceptionDetails(
static_cast<int>(message->GetScriptOrigin().ScriptID()->Value())));
v8::Local<v8::StackTrace> stackTrace = message->GetStackTrace();
if (!stackTrace.IsEmpty() && stackTrace->GetFrameCount() > 0)
- exceptionDetails->setStackTrace(m_context->inspector()
- ->debugger()
- ->createStackTrace(stackTrace)
- ->buildInspectorObjectImpl());
+ exceptionDetails->setStackTrace(
+ m_context->inspector()
+ ->debugger()
+ ->createStackTrace(stackTrace)
+ ->buildInspectorObjectImpl(m_context->inspector()->debugger()));
}
if (!exception.IsEmpty()) {
std::unique_ptr<protocol::Runtime::RemoteObject> wrapped;
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index e5c393df5b..16938fb317 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -85,15 +85,6 @@ class InjectedScript final {
v8::Local<v8::Value>, const String16& groupName, bool forceValueType,
bool generatePreview,
std::unique_ptr<protocol::Runtime::RemoteObject>* result) const;
- Response wrapObjectProperty(v8::Local<v8::Object>, v8::Local<v8::Name> key,
- const String16& groupName,
- bool forceValueType = false,
- bool generatePreview = false) const;
- Response wrapPropertyInArray(v8::Local<v8::Array>,
- v8::Local<v8::String> property,
- const String16& groupName,
- bool forceValueType = false,
- bool generatePreview = false) const;
std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const;
diff --git a/deps/v8/src/inspector/injected_script_externs.js b/deps/v8/src/inspector/injected_script_externs.js
index 43d765f507..9c5555b624 100644
--- a/deps/v8/src/inspector/injected_script_externs.js
+++ b/deps/v8/src/inspector/injected_script_externs.js
@@ -101,6 +101,13 @@ InjectedScriptHostClass.prototype.getOwnPropertyNames = function(obj) {}
*/
InjectedScriptHostClass.prototype.getOwnPropertySymbols = function(obj) {}
+/**
+ * @param {!Object} obj
+ * @param {string|symbol} name
+ * @return {{isBuiltin:boolean, hasGetter:boolean, hasSetter:boolean}|undefined}
+ */
+InjectedScriptHostClass.prototype.nativeAccessorDescriptor = function(obj, name) {}
+
/** @type {!InjectedScriptHostClass} */
var InjectedScriptHost;
/** @type {!Window} */
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index 125a248919..fdb2b64b90 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -12,7 +12,7 @@
{
"domain": "Runtime",
"async": ["evaluate", "awaitPromise", "callFunctionOn", "runScript"],
- "exported": ["StackTrace", "RemoteObject", "ExecutionContextId"]
+ "exported": ["StackTrace", "StackTraceId", "RemoteObject", "ExecutionContextId"]
},
{
"domain": "Debugger",
diff --git a/deps/v8/src/inspector/js_protocol-1.3.json b/deps/v8/src/inspector/js_protocol-1.3.json
new file mode 100644
index 0000000000..ea573d11a6
--- /dev/null
+++ b/deps/v8/src/inspector/js_protocol-1.3.json
@@ -0,0 +1,1205 @@
+{
+ "version": { "major": "1", "minor": "3" },
+ "domains": [
+ {
+ "domain": "Schema",
+ "description": "This domain is deprecated.",
+ "deprecated": true,
+ "types": [
+ {
+ "id": "Domain",
+ "type": "object",
+ "description": "Description of the protocol domain.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Domain name." },
+ { "name": "version", "type": "string", "description": "Domain version." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "getDomains",
+ "description": "Returns supported domains.",
+ "handlers": ["browser", "renderer"],
+ "returns": [
+ { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Runtime",
+ "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
+ "types": [
+ {
+ "id": "ScriptId",
+ "type": "string",
+ "description": "Unique script identifier."
+ },
+ {
+ "id": "RemoteObjectId",
+ "type": "string",
+ "description": "Unique object identifier."
+ },
+ {
+ "id": "UnserializableValue",
+ "type": "string",
+ "enum": ["Infinity", "NaN", "-Infinity", "-0"],
+ "description": "Primitive value which cannot be JSON-stringified."
+ },
+ {
+ "id": "RemoteObject",
+ "type": "object",
+ "description": "Mirror object referencing original JavaScript object.",
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+ { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
+ { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
+ { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
+ { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
+ { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
+ { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
+ ]
+ },
+ {
+ "id": "CustomPreview",
+ "type": "object",
+ "experimental": true,
+ "properties": [
+ { "name": "header", "type": "string"},
+ { "name": "hasBody", "type": "boolean"},
+ { "name": "formatterObjectId", "$ref": "RemoteObjectId"},
+ { "name": "bindRemoteObjectFunctionId", "$ref": "RemoteObjectId" },
+ { "name": "configObjectId", "$ref": "RemoteObjectId", "optional": true }
+ ]
+ },
+ {
+ "id": "ObjectPreview",
+ "type": "object",
+ "experimental": true,
+ "description": "Object containing abbreviated remote object value.",
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+ { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
+ { "name": "overflow", "type": "boolean", "description": "True iff some of the properties or entries of the original object did not fit." },
+ { "name": "properties", "type": "array", "items": { "$ref": "PropertyPreview" }, "description": "List of the properties." },
+ { "name": "entries", "type": "array", "items": { "$ref": "EntryPreview" }, "optional": true, "description": "List of the entries. Specified for <code>map</code> and <code>set</code> subtype values only." }
+ ]
+ },
+ {
+ "id": "PropertyPreview",
+ "type": "object",
+ "experimental": true,
+ "properties": [
+ { "name": "name", "type": "string", "description": "Property name." },
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
+ { "name": "value", "type": "string", "optional": true, "description": "User-friendly property value string." },
+ { "name": "valuePreview", "$ref": "ObjectPreview", "optional": true, "description": "Nested value preview." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." }
+ ]
+ },
+ {
+ "id": "EntryPreview",
+ "type": "object",
+ "experimental": true,
+ "properties": [
+ { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
+ { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
+ ]
+ },
+ {
+ "id": "PropertyDescriptor",
+ "type": "object",
+ "description": "Object property descriptor.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Property name or symbol description." },
+ { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." },
+ { "name": "writable", "type": "boolean", "optional": true, "description": "True if the value associated with the property may be changed (data descriptors only)." },
+ { "name": "get", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a getter for the property, or <code>undefined</code> if there is no getter (accessor descriptors only)." },
+ { "name": "set", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a setter for the property, or <code>undefined</code> if there is no setter (accessor descriptors only)." },
+ { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
+ { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
+ { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
+ { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
+ { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
+ ]
+ },
+ {
+ "id": "InternalPropertyDescriptor",
+ "type": "object",
+ "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Conventional property name." },
+ { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
+ ]
+ },
+ {
+ "id": "CallArgument",
+ "type": "object",
+ "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
+ "properties": [
+ { "name": "value", "type": "any", "optional": true, "description": "Primitive value or serializable javascript object." },
+ { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
+ ]
+ },
+ {
+ "id": "ExecutionContextId",
+ "type": "integer",
+ "description": "Id of an execution context."
+ },
+ {
+ "id": "ExecutionContextDescription",
+ "type": "object",
+ "description": "Description of an isolated world.",
+ "properties": [
+ { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
+ { "name": "origin", "type": "string", "description": "Execution context origin." },
+ { "name": "name", "type": "string", "description": "Human readable name describing given context." },
+ { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
+ ]
+ },
+ {
+ "id": "ExceptionDetails",
+ "type": "object",
+ "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
+ "properties": [
+ { "name": "exceptionId", "type": "integer", "description": "Exception id." },
+ { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
+ { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
+ { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
+ { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
+ { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
+ { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
+ ]
+ },
+ {
+ "id": "Timestamp",
+ "type": "number",
+ "description": "Number of milliseconds since epoch."
+ },
+ {
+ "id": "CallFrame",
+ "type": "object",
+ "description": "Stack entry for runtime errors and assertions.",
+ "properties": [
+ { "name": "functionName", "type": "string", "description": "JavaScript function name." },
+ { "name": "scriptId", "$ref": "ScriptId", "description": "JavaScript script id." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+ { "name": "lineNumber", "type": "integer", "description": "JavaScript script line number (0-based)." },
+ { "name": "columnNumber", "type": "integer", "description": "JavaScript script column number (0-based)." }
+ ]
+ },
+ {
+ "id": "StackTrace",
+ "type": "object",
+ "description": "Call frames for assertions or error messages.",
+ "properties": [
+ { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
+ { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." },
+ { "name": "parentId", "$ref": "StackTraceId", "optional": true, "experimental": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
+ ]
+ },
+ {
+ "id": "UniqueDebuggerId",
+ "type": "string",
+ "description": "Unique identifier of current debugger.",
+ "experimental": true
+ },
+ {
+ "id": "StackTraceId",
+ "type": "object",
+ "description": "If <code>debuggerId</code> is set stack trace comes from another debugger and can be resolved there. This allows to track cross-debugger calls. See <code>Runtime.StackTrace</code> and <code>Debugger.paused</code> for usages.",
+ "properties": [
+ { "name": "id", "type": "string" },
+ { "name": "debuggerId", "$ref": "UniqueDebuggerId", "optional": true }
+ ],
+ "experimental": true
+ }
+ ],
+ "commands": [
+ {
+ "name": "evaluate",
+ "parameters": [
+ { "name": "expression", "type": "string", "description": "Expression to evaluate." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Evaluates expression on global object."
+ },
+ {
+ "name": "awaitPromise",
+ "parameters": [
+ { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
+ ],
+ "description": "Add handler to promise with given promise object id."
+ },
+ {
+ "name": "callFunctionOn",
+ "parameters": [
+ { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Identifier of the object to call function on. Either objectId or executionContextId should be specified." },
+ { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
+ },
+ {
+ "name": "getProperties",
+ "parameters": [
+ { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
+ { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
+ { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
+ ],
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
+ { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
+ },
+ {
+ "name": "releaseObject",
+ "parameters": [
+ { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to release." }
+ ],
+ "description": "Releases remote object with given id."
+ },
+ {
+ "name": "releaseObjectGroup",
+ "parameters": [
+ { "name": "objectGroup", "type": "string", "description": "Symbolic object group name." }
+ ],
+ "description": "Releases all remote objects that belong to a given group."
+ },
+ {
+ "name": "runIfWaitingForDebugger",
+ "description": "Tells inspected instance to run if it was waiting for debugger to attach."
+ },
+ {
+ "name": "enable",
+ "description": "Enables reporting of execution contexts creation by means of <code>executionContextCreated</code> event. When the reporting gets enabled the event will be sent immediately for each existing execution context."
+ },
+ {
+ "name": "disable",
+ "description": "Disables reporting of execution contexts creation."
+ },
+ {
+ "name": "discardConsoleEntries",
+ "description": "Discards collected exceptions and console API calls."
+ },
+ {
+ "name": "setCustomObjectFormatterEnabled",
+ "parameters": [
+ {
+ "name": "enabled",
+ "type": "boolean"
+ }
+ ],
+ "experimental": true
+ },
+ {
+ "name": "compileScript",
+ "parameters": [
+ { "name": "expression", "type": "string", "description": "Expression to compile." },
+ { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
+ { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
+ ],
+ "returns": [
+ { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Compiles expression."
+ },
+ {
+ "name": "runScript",
+ "parameters": [
+ { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
+ { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Runs script with given id in a given context."
+ },
+ {
+ "name": "queryObjects",
+ "parameters": [
+ { "name": "prototypeObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the prototype to return objects for." }
+ ],
+ "returns": [
+ { "name": "objects", "$ref": "RemoteObject", "description": "Array with objects." }
+ ]
+ },
+ {
+ "name": "globalLexicalScopeNames",
+ "parameters": [
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to lookup global scope variables." }
+ ],
+ "returns": [
+ { "name": "names", "type": "array", "items": { "type": "string" } }
+ ],
+ "description": "Returns all let, const and class variables from global scope."
+ }
+ ],
+ "events": [
+ {
+ "name": "executionContextCreated",
+ "parameters": [
+ { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution context." }
+ ],
+ "description": "Issued when new execution context is created."
+ },
+ {
+ "name": "executionContextDestroyed",
+ "parameters": [
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Id of the destroyed context" }
+ ],
+ "description": "Issued when execution context is destroyed."
+ },
+ {
+ "name": "executionContextsCleared",
+ "description": "Issued when all executionContexts were cleared in browser"
+ },
+ {
+ "name": "exceptionThrown",
+ "description": "Issued when exception was thrown and unhandled.",
+ "parameters": [
+ { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
+ ]
+ },
+ {
+ "name": "exceptionRevoked",
+ "description": "Issued when unhandled exception was revoked.",
+ "parameters": [
+ { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
+ { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionThrown</code>." }
+ ]
+ },
+ {
+ "name": "consoleAPICalled",
+ "description": "Issued when console API was called.",
+ "parameters": [
+ { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd", "count", "timeEnd"], "description": "Type of the call." },
+ { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
+ { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
+ { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." },
+ { "name": "context", "type": "string", "optional": true, "experimental": true, "description": "Console context descriptor for calls on non-default console context (not console.*): 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call on named context." }
+ ]
+ },
+ {
+ "name": "inspectRequested",
+ "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
+ "parameters": [
+ { "name": "object", "$ref": "RemoteObject" },
+ { "name": "hints", "type": "object" }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Debugger",
+ "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing breakpoints, stepping through execution, exploring stack traces, etc.",
+ "dependencies": ["Runtime"],
+ "types": [
+ {
+ "id": "BreakpointId",
+ "type": "string",
+ "description": "Breakpoint identifier."
+ },
+ {
+ "id": "CallFrameId",
+ "type": "string",
+ "description": "Call frame identifier."
+ },
+ {
+ "id": "Location",
+ "type": "object",
+ "properties": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
+ { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
+ { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." }
+ ],
+ "description": "Location in the source code."
+ },
+ {
+ "id": "ScriptPosition",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ { "name": "lineNumber", "type": "integer" },
+ { "name": "columnNumber", "type": "integer" }
+ ],
+ "description": "Location in the source code."
+ },
+ {
+ "id": "CallFrame",
+ "type": "object",
+ "properties": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
+ { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
+ { "name": "functionLocation", "$ref": "Location", "optional": true, "description": "Location in the source code." },
+ { "name": "location", "$ref": "Location", "description": "Location in the source code." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+ { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
+ { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
+ { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
+ ],
+ "description": "JavaScript call frame. Array of call frames form the call stack."
+ },
+ {
+ "id": "Scope",
+ "type": "object",
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script", "eval", "module"], "description": "Scope type." },
+ { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
+ { "name": "name", "type": "string", "optional": true },
+ { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
+ { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
+ ],
+ "description": "Scope description."
+ },
+ {
+ "id": "SearchMatch",
+ "type": "object",
+ "description": "Search match for resource.",
+ "properties": [
+ { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
+ { "name": "lineContent", "type": "string", "description": "Line with match content." }
+ ]
+ },
+ {
+ "id": "BreakLocation",
+ "type": "object",
+ "properties": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
+ { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
+ { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." },
+ { "name": "type", "type": "string", "enum": [ "debuggerStatement", "call", "return" ], "optional": true }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable",
+ "returns": [
+ { "name": "debuggerId", "$ref": "Runtime.UniqueDebuggerId", "experimental": true, "description": "Unique identifier of the debugger." }
+ ],
+ "description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
+ },
+ {
+ "name": "disable",
+ "description": "Disables debugger for given page."
+ },
+ {
+ "name": "setBreakpointsActive",
+ "parameters": [
+ { "name": "active", "type": "boolean", "description": "New value for breakpoints active state." }
+ ],
+ "description": "Activates / deactivates all breakpoints on the page."
+ },
+ {
+ "name": "setSkipAllPauses",
+ "parameters": [
+ { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
+ ],
+ "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
+ },
+ {
+ "name": "setBreakpointByUrl",
+ "parameters": [
+ { "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
+ { "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
+ { "name": "scriptHash", "type": "string", "optional": true, "description": "Script hash of the resources to set breakpoint on." },
+ { "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
+ { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
+ ],
+ "returns": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
+ { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the locations this breakpoint resolved into upon addition." }
+ ],
+ "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this command is issued, all existing parsed scripts will have breakpoints resolved and returned in <code>locations</code> property. Further matching script parsing will result in subsequent <code>breakpointResolved</code> events issued. This logical breakpoint will survive page reloads."
+ },
+ {
+ "name": "setBreakpoint",
+ "parameters": [
+ { "name": "location", "$ref": "Location", "description": "Location to set breakpoint in." },
+ { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
+ ],
+ "returns": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
+ { "name": "actualLocation", "$ref": "Location", "description": "Location this breakpoint resolved into." }
+ ],
+ "description": "Sets JavaScript breakpoint at a given location."
+ },
+ {
+ "name": "removeBreakpoint",
+ "parameters": [
+ { "name": "breakpointId", "$ref": "BreakpointId" }
+ ],
+ "description": "Removes JavaScript breakpoint."
+ },
+ {
+ "name": "getPossibleBreakpoints",
+ "parameters": [
+ { "name": "start", "$ref": "Location", "description": "Start of range to search possible breakpoint locations in." },
+ { "name": "end", "$ref": "Location", "optional": true, "description": "End of range to search possible breakpoint locations in (excluding). When not specified, end of scripts is used as end of range." },
+ { "name": "restrictToFunction", "type": "boolean", "optional": true, "description": "Only consider locations which are in the same (non-nested) function as start." }
+ ],
+ "returns": [
+ { "name": "locations", "type": "array", "items": { "$ref": "BreakLocation" }, "description": "List of the possible breakpoint locations." }
+ ],
+ "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be the same."
+ },
+ {
+ "name": "continueToLocation",
+ "parameters": [
+ { "name": "location", "$ref": "Location", "description": "Location to continue to." },
+ { "name": "targetCallFrames", "type": "string", "enum": ["any", "current"], "optional": true }
+ ],
+ "description": "Continues execution until specific location is reached."
+ },
+ {
+ "name": "pauseOnAsyncCall",
+ "parameters": [
+ { "name": "parentStackTraceId", "$ref": "Runtime.StackTraceId", "description": "Debugger will pause when async call with given stack trace is started." }
+ ],
+ "experimental": true
+ },
+ {
+ "name": "stepOver",
+ "description": "Steps over the statement."
+ },
+ {
+ "name": "stepInto",
+ "parameters": [
+ { "name": "breakOnAsyncCall", "type": "boolean", "optional": true, "experimental": true, "description": "Debugger will issue additional Debugger.paused notification if any async task is scheduled before next pause." }
+ ],
+ "description": "Steps into the function call."
+ },
+ {
+ "name": "stepOut",
+ "description": "Steps out of the function call."
+ },
+ {
+ "name": "pause",
+ "description": "Stops on the next JavaScript statement."
+ },
+ {
+ "name": "scheduleStepIntoAsync",
+ "description": "This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and Debugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled before next pause. Returns success when async task is actually scheduled, returns error if no task were scheduled or another scheduleStepIntoAsync was called.",
+ "experimental": true
+ },
+ {
+ "name": "resume",
+ "description": "Resumes JavaScript execution."
+ },
+ {
+ "name": "getStackTrace",
+ "parameters": [
+ { "name": "stackTraceId", "$ref": "Runtime.StackTraceId" }
+ ],
+ "returns": [
+ { "name": "stackTrace", "$ref": "Runtime.StackTrace" }
+ ],
+ "description": "Returns stack trace with given <code>stackTraceId</code>.",
+ "experimental": true
+ },
+ {
+ "name": "searchInContent",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
+ { "name": "query", "type": "string", "description": "String to search for." },
+ { "name": "caseSensitive", "type": "boolean", "optional": true, "description": "If true, search is case sensitive." },
+ { "name": "isRegex", "type": "boolean", "optional": true, "description": "If true, treats string parameter as regex." }
+ ],
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
+ ],
+ "description": "Searches for given string in script content."
+ },
+ {
+ "name": "setScriptSource",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
+ { "name": "scriptSource", "type": "string", "description": "New content of the script." },
+ { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
+ ],
+ "returns": [
+ { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
+ { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes." },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+ { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
+ { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
+ ],
+ "description": "Edits JavaScript source live."
+ },
+ {
+ "name": "restartFrame",
+ "parameters": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." }
+ ],
+ "returns": [
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+ { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." }
+ ],
+ "description": "Restarts particular call frame from the beginning."
+ },
+ {
+ "name": "getScriptSource",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to get source for." }
+ ],
+ "returns": [
+ { "name": "scriptSource", "type": "string", "description": "Script source." }
+ ],
+ "description": "Returns source for the script with given id."
+ },
+ {
+ "name": "setPauseOnExceptions",
+ "parameters": [
+ { "name": "state", "type": "string", "enum": ["none", "uncaught", "all"], "description": "Pause on exceptions mode." }
+ ],
+ "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>."
+ },
+ {
+ "name": "evaluateOnCallFrame",
+ "parameters": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
+ { "name": "expression", "type": "string", "description": "Expression to evaluate." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+ { "name": "throwOnSideEffect", "type": "boolean", "optional": true, "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
+ { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Evaluates expression on a given call frame."
+ },
+ {
+ "name": "setVariableValue",
+ "parameters": [
+ { "name": "scopeNumber", "type": "integer", "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually." },
+ { "name": "variableName", "type": "string", "description": "Variable name." },
+ { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
+ ],
+ "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
+ },
+ {
+ "name": "setReturnValue",
+ "parameters": [
+ { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New return value." }
+ ],
+ "experimental": true,
+ "description": "Changes return value in top frame. Available only at return break position."
+ },
+ {
+ "name": "setAsyncCallStackDepth",
+ "parameters": [
+ { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
+ ],
+ "description": "Enables or disables async call stacks tracking."
+ },
+ {
+ "name": "setBlackboxPatterns",
+ "parameters": [
+ { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
+ ],
+ "experimental": true,
+ "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
+ },
+ {
+ "name": "setBlackboxedRanges",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
+ { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
+ ],
+ "experimental": true,
+ "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
+ }
+ ],
+ "events": [
+ {
+ "name": "scriptParsed",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
+ { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
+ { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
+ { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
+ { "name": "endLine", "type": "integer", "description": "Last line of the script." },
+ { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
+ { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+ { "name": "hash", "type": "string", "description": "Content hash of the script."},
+ { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
+ { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
+ { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
+ { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
+ { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
+ { "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
+ ],
+ "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
+ },
+ {
+ "name": "scriptFailedToParse",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
+ { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
+ { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
+ { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
+ { "name": "endLine", "type": "integer", "description": "Last line of the script." },
+ { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
+ { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+ { "name": "hash", "type": "string", "description": "Content hash of the script."},
+ { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
+ { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
+ { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
+ { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
+ { "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
+ ],
+ "description": "Fired when virtual machine fails to parse the script."
+ },
+ {
+ "name": "breakpointResolved",
+ "parameters": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Breakpoint unique identifier." },
+ { "name": "location", "$ref": "Location", "description": "Actual breakpoint location." }
+ ],
+ "description": "Fired when breakpoint is resolved to an actual script and location."
+ },
+ {
+ "name": "paused",
+ "parameters": [
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
+ { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "OOM", "other", "ambiguous" ], "description": "Pause reason." },
+ { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
+ { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+ { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
+ { "name": "asyncCallStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Just scheduled async call will have this stack trace as parent stack during async execution. This field is available only after <code>Debugger.stepInto</code> call with <code>breakOnAsynCall</code> flag." }
+ ],
+ "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
+ },
+ {
+ "name": "resumed",
+ "description": "Fired when the virtual machine resumed execution."
+ }
+ ]
+ },
+ {
+ "domain": "Console",
+ "description": "This domain is deprecated - use Runtime or Log instead.",
+ "dependencies": ["Runtime"],
+ "deprecated": true,
+ "types": [
+ {
+ "id": "ConsoleMessage",
+ "type": "object",
+ "description": "Console message.",
+ "properties": [
+ { "name": "source", "type": "string", "enum": ["xml", "javascript", "network", "console-api", "storage", "appcache", "rendering", "security", "other", "deprecation", "worker"], "description": "Message source." },
+ { "name": "level", "type": "string", "enum": ["log", "warning", "error", "debug", "info"], "description": "Message severity." },
+ { "name": "text", "type": "string", "description": "Message text." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the message origin." },
+ { "name": "line", "type": "integer", "optional": true, "description": "Line number in the resource that generated this message (1-based)." },
+ { "name": "column", "type": "integer", "optional": true, "description": "Column number in the resource that generated this message (1-based)." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable",
+ "description": "Enables console domain, sends the messages collected so far to the client by means of the <code>messageAdded</code> notification."
+ },
+ {
+ "name": "disable",
+ "description": "Disables console domain, prevents further console messages from being reported to the client."
+ },
+ {
+ "name": "clearMessages",
+ "description": "Does nothing."
+ }
+ ],
+ "events": [
+ {
+ "name": "messageAdded",
+ "parameters": [
+ { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
+ ],
+ "description": "Issued when new console message is added."
+ }
+ ]
+ },
+ {
+ "domain": "Profiler",
+ "dependencies": ["Runtime", "Debugger"],
+ "types": [
+ {
+ "id": "ProfileNode",
+ "type": "object",
+ "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
+ "properties": [
+ { "name": "id", "type": "integer", "description": "Unique id of the node." },
+ { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+ { "name": "hitCount", "type": "integer", "optional": true, "description": "Number of samples where this node was on top of the call stack." },
+ { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
+ { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
+ { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "description": "An array of source position ticks." }
+ ]
+ },
+ {
+ "id": "Profile",
+ "type": "object",
+ "description": "Profile.",
+ "properties": [
+ { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
+ { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
+ { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
+ { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
+ { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
+ ]
+ },
+ {
+ "id": "PositionTickInfo",
+ "type": "object",
+ "description": "Specifies a number of samples attributed to a certain source position.",
+ "properties": [
+ { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
+ { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
+ ]
+ },
+ { "id": "CoverageRange",
+ "type": "object",
+ "description": "Coverage data for a source range.",
+ "properties": [
+ { "name": "startOffset", "type": "integer", "description": "JavaScript script source offset for the range start." },
+ { "name": "endOffset", "type": "integer", "description": "JavaScript script source offset for the range end." },
+ { "name": "count", "type": "integer", "description": "Collected execution count of the source range." }
+ ]
+ },
+ { "id": "FunctionCoverage",
+ "type": "object",
+ "description": "Coverage data for a JavaScript function.",
+ "properties": [
+ { "name": "functionName", "type": "string", "description": "JavaScript function name." },
+ { "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." },
+ { "name": "isBlockCoverage", "type": "boolean", "description": "Whether coverage data for this function has block granularity." }
+ ]
+ },
+ {
+ "id": "ScriptCoverage",
+ "type": "object",
+ "description": "Coverage data for a JavaScript script.",
+ "properties": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+ { "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." }
+ ]
+ },
+ { "id": "TypeObject",
+ "type": "object",
+ "description": "Describes a type collected during runtime.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Name of a type collected with type profiling." }
+ ],
+ "experimental": true
+ },
+ { "id": "TypeProfileEntry",
+ "type": "object",
+ "description": "Source offset and types for a parameter or return value.",
+ "properties": [
+ { "name": "offset", "type": "integer", "description": "Source offset of the parameter or end of function for return values." },
+ { "name": "types", "type": "array", "items": {"$ref": "TypeObject"}, "description": "The types for this parameter or return value."}
+ ],
+ "experimental": true
+ },
+ {
+ "id": "ScriptTypeProfile",
+ "type": "object",
+ "description": "Type profile data collected during runtime for a JavaScript script.",
+ "properties": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+ { "name": "entries", "type": "array", "items": { "$ref": "TypeProfileEntry" }, "description": "Type profile entries for parameters and return values of the functions in the script." }
+ ],
+ "experimental": true
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable"
+ },
+ {
+ "name": "disable"
+ },
+ {
+ "name": "setSamplingInterval",
+ "parameters": [
+ { "name": "interval", "type": "integer", "description": "New sampling interval in microseconds." }
+ ],
+ "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started."
+ },
+ {
+ "name": "start"
+ },
+ {
+ "name": "stop",
+ "returns": [
+ { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
+ ]
+ },
+ {
+ "name": "startPreciseCoverage",
+ "parameters": [
+ { "name": "callCount", "type": "boolean", "optional": true, "description": "Collect accurate call counts beyond simple 'covered' or 'not covered'." },
+ { "name": "detailed", "type": "boolean", "optional": true, "description": "Collect block-based coverage." }
+ ],
+ "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code coverage may be incomplete. Enabling prevents running optimized code and resets execution counters."
+ },
+ {
+ "name": "stopPreciseCoverage",
+ "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows executing optimized code."
+ },
+ {
+ "name": "takePreciseCoverage",
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
+ ],
+ "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code coverage needs to have started."
+ },
+ {
+ "name": "getBestEffortCoverage",
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
+ ],
+ "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection."
+ },
+ {
+ "name": "startTypeProfile",
+ "description": "Enable type profile.",
+ "experimental": true
+ },
+ {
+ "name": "stopTypeProfile",
+ "description": "Disable type profile. Disabling releases type profile data collected so far.",
+ "experimental": true
+ },
+ {
+ "name": "takeTypeProfile",
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "ScriptTypeProfile" }, "description": "Type profile for all scripts since startTypeProfile() was turned on." }
+ ],
+ "description": "Collect type profile.",
+ "experimental": true
+ }
+ ],
+ "events": [
+ {
+ "name": "consoleProfileStarted",
+ "parameters": [
+ { "name": "id", "type": "string" },
+ { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
+ { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
+ ],
+ "description": "Sent when new profile recording is started using console.profile() call."
+ },
+ {
+ "name": "consoleProfileFinished",
+ "parameters": [
+ { "name": "id", "type": "string" },
+ { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
+ { "name": "profile", "$ref": "Profile" },
+ { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "HeapProfiler",
+ "dependencies": ["Runtime"],
+ "experimental": true,
+ "types": [
+ {
+ "id": "HeapSnapshotObjectId",
+ "type": "string",
+ "description": "Heap snapshot object id."
+ },
+ {
+ "id": "SamplingHeapProfileNode",
+ "type": "object",
+ "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
+ "properties": [
+ { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+ { "name": "selfSize", "type": "number", "description": "Allocations size in bytes for the node excluding children." },
+ { "name": "children", "type": "array", "items": { "$ref": "SamplingHeapProfileNode" }, "description": "Child nodes." }
+ ]
+ },
+ {
+ "id": "SamplingHeapProfile",
+ "type": "object",
+ "description": "Profile.",
+ "properties": [
+ { "name": "head", "$ref": "SamplingHeapProfileNode" }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable"
+ },
+ {
+ "name": "disable"
+ },
+ {
+ "name": "startTrackingHeapObjects",
+ "parameters": [
+ { "name": "trackAllocations", "type": "boolean", "optional": true }
+ ]
+ },
+ {
+ "name": "stopTrackingHeapObjects",
+ "parameters": [
+ { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped." }
+ ]
+ },
+ {
+ "name": "takeHeapSnapshot",
+ "parameters": [
+ { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken." }
+ ]
+ },
+ {
+ "name": "collectGarbage"
+ },
+ {
+ "name": "getObjectByHeapObjectId",
+ "parameters": [
+ { "name": "objectId", "$ref": "HeapSnapshotObjectId" },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Evaluation result." }
+ ]
+ },
+ {
+ "name": "addInspectedHeapObject",
+ "parameters": [
+ { "name": "heapObjectId", "$ref": "HeapSnapshotObjectId", "description": "Heap snapshot object id to be accessible by means of $x command line API." }
+ ],
+ "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details $x functions)."
+ },
+ {
+ "name": "getHeapObjectId",
+ "parameters": [
+ { "name": "objectId", "$ref": "Runtime.RemoteObjectId", "description": "Identifier of the object to get heap object id for." }
+ ],
+ "returns": [
+ { "name": "heapSnapshotObjectId", "$ref": "HeapSnapshotObjectId", "description": "Id of the heap snapshot object corresponding to the passed remote object id." }
+ ]
+ },
+ {
+ "name": "startSampling",
+ "parameters": [
+ { "name": "samplingInterval", "type": "number", "optional": true, "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes." }
+ ]
+ },
+ {
+ "name": "stopSampling",
+ "returns": [
+ { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
+ ]
+ },
+ {
+ "name": "getSamplingProfile",
+ "returns": [
+ { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Return the sampling profile being collected." }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "addHeapSnapshotChunk",
+ "parameters": [
+ { "name": "chunk", "type": "string" }
+ ]
+ },
+ {
+ "name": "resetProfiles"
+ },
+ {
+ "name": "reportHeapSnapshotProgress",
+ "parameters": [
+ { "name": "done", "type": "integer" },
+ { "name": "total", "type": "integer" },
+ { "name": "finished", "type": "boolean", "optional": true }
+ ]
+ },
+ {
+ "name": "lastSeenObjectId",
+ "description": "If heap objects tracking has been started then backend regularly sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
+ "parameters": [
+ { "name": "lastSeenObjectId", "type": "integer" },
+ { "name": "timestamp", "type": "number" }
+ ]
+ },
+ {
+ "name": "heapStatsUpdate",
+ "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
+ "parameters": [
+ { "name": "statsUpdate", "type": "array", "items": { "type": "integer" }, "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment index, the second integer is a total count of objects for the fragment, the third integer is a total size of the objects for the fragment."}
+ ]
+ }
+ ]
+ }]
+}
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index 2d493e5b74..ea573d11a6 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -1,9 +1,10 @@
{
- "version": { "major": "1", "minor": "2" },
+ "version": { "major": "1", "minor": "3" },
"domains": [
{
"domain": "Schema",
- "description": "Provides information about the protocol schema.",
+ "description": "This domain is deprecated.",
+ "deprecated": true,
"types": [
{
"id": "Domain",
@@ -202,8 +203,24 @@
{ "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
{ "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." },
- { "name": "promiseCreationFrame", "$ref": "CallFrame", "optional": true, "experimental": true, "description": "Creation frame of the Promise which produced the next synchronous trace when resolved, if available." }
+ { "name": "parentId", "$ref": "StackTraceId", "optional": true, "experimental": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
]
+ },
+ {
+ "id": "UniqueDebuggerId",
+ "type": "string",
+ "description": "Unique identifier of current debugger.",
+ "experimental": true
+ },
+ {
+ "id": "StackTraceId",
+ "type": "object",
+ "description": "If <code>debuggerId</code> is set stack trace comes from another debugger and can be resolved there. This allows to track cross-debugger calls. See <code>Runtime.StackTrace</code> and <code>Debugger.paused</code> for usages.",
+ "properties": [
+ { "name": "id", "type": "string" },
+ { "name": "debuggerId", "$ref": "UniqueDebuggerId", "optional": true }
+ ],
+ "experimental": true
}
],
"commands": [
@@ -217,7 +234,7 @@
{ "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
{ "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
{ "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
],
"returns": [
@@ -248,7 +265,7 @@
{ "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
{ "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
{ "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." },
{ "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified." },
{ "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object." }
@@ -353,8 +370,7 @@
],
"returns": [
{ "name": "objects", "$ref": "RemoteObject", "description": "Array with objects." }
- ],
- "experimental": true
+ ]
},
{
"name": "globalLexicalScopeNames",
@@ -364,8 +380,7 @@
"returns": [
{ "name": "names", "type": "array", "items": { "type": "string" } }
],
- "description": "Returns all let, const and class variables from global scope.",
- "experimental": true
+ "description": "Returns all let, const and class variables from global scope."
}
],
"events": [
@@ -466,7 +481,7 @@
"properties": [
{ "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
{ "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
- { "name": "functionLocation", "$ref": "Location", "optional": true, "experimental": true, "description": "Location in the source code." },
+ { "name": "functionLocation", "$ref": "Location", "optional": true, "description": "Location in the source code." },
{ "name": "location", "$ref": "Location", "description": "Location in the source code." },
{ "name": "url", "type": "string", "description": "JavaScript script name or url." },
{ "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
@@ -494,8 +509,7 @@
"properties": [
{ "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
{ "name": "lineContent", "type": "string", "description": "Line with match content." }
- ],
- "experimental": true
+ ]
},
{
"id": "BreakLocation",
@@ -505,13 +519,15 @@
{ "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
{ "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." },
{ "name": "type", "type": "string", "enum": [ "debuggerStatement", "call", "return" ], "optional": true }
- ],
- "experimental": true
+ ]
}
],
"commands": [
{
"name": "enable",
+ "returns": [
+ { "name": "debuggerId", "$ref": "Runtime.UniqueDebuggerId", "experimental": true, "description": "Unique identifier of the debugger." }
+ ],
"description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
},
{
@@ -538,7 +554,7 @@
{ "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
{ "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
{ "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
- { "name": "scriptHash", "type": "string", "optional": true, "experimental": true, "description": "Script hash of the resources to set breakpoint on." },
+ { "name": "scriptHash", "type": "string", "optional": true, "description": "Script hash of the resources to set breakpoint on." },
{ "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
{ "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
],
@@ -577,23 +593,32 @@
"returns": [
{ "name": "locations", "type": "array", "items": { "$ref": "BreakLocation" }, "description": "List of the possible breakpoint locations." }
],
- "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be the same.",
- "experimental": true
+ "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be the same."
},
{
"name": "continueToLocation",
"parameters": [
{ "name": "location", "$ref": "Location", "description": "Location to continue to." },
- { "name": "targetCallFrames", "type": "string", "enum": ["any", "current"], "optional": true, "experimental": true }
+ { "name": "targetCallFrames", "type": "string", "enum": ["any", "current"], "optional": true }
],
"description": "Continues execution until specific location is reached."
},
{
+ "name": "pauseOnAsyncCall",
+ "parameters": [
+ { "name": "parentStackTraceId", "$ref": "Runtime.StackTraceId", "description": "Debugger will pause when async call with given stack trace is started." }
+ ],
+ "experimental": true
+ },
+ {
"name": "stepOver",
"description": "Steps over the statement."
},
{
"name": "stepInto",
+ "parameters": [
+ { "name": "breakOnAsyncCall", "type": "boolean", "optional": true, "experimental": true, "description": "Debugger will issue additional Debugger.paused notification if any async task is scheduled before next pause." }
+ ],
"description": "Steps into the function call."
},
{
@@ -606,7 +631,7 @@
},
{
"name": "scheduleStepIntoAsync",
- "description": "Steps into next scheduled async task if any is scheduled before next pause. Returns success when async task is actually scheduled, returns error if no task were scheduled or another scheduleStepIntoAsync was called.",
+ "description": "This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and Debugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled before next pause. Returns success when async task is actually scheduled, returns error if no task were scheduled or another scheduleStepIntoAsync was called.",
"experimental": true
},
{
@@ -614,6 +639,17 @@
"description": "Resumes JavaScript execution."
},
{
+ "name": "getStackTrace",
+ "parameters": [
+ { "name": "stackTraceId", "$ref": "Runtime.StackTraceId" }
+ ],
+ "returns": [
+ { "name": "stackTrace", "$ref": "Runtime.StackTrace" }
+ ],
+ "description": "Returns stack trace with given <code>stackTraceId</code>.",
+ "experimental": true
+ },
+ {
"name": "searchInContent",
"parameters": [
{ "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
@@ -624,7 +660,6 @@
"returns": [
{ "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
],
- "experimental": true,
"description": "Searches for given string in script content."
},
{
@@ -638,6 +673,7 @@
{ "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
{ "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes." },
{ "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+ { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
{ "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
],
"description": "Edits JavaScript source live."
@@ -649,7 +685,8 @@
],
"returns": [
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+ { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." }
],
"description": "Restarts particular call frame from the beginning."
},
@@ -680,7 +717,7 @@
{ "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
{ "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "throwOnSideEffect", "type": "boolean", "optional": true, "experimental": true, "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation." }
+ { "name": "throwOnSideEffect", "type": "boolean", "optional": true, "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation." }
],
"returns": [
{ "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
@@ -699,6 +736,14 @@
"description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
},
{
+ "name": "setReturnValue",
+ "parameters": [
+ { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New return value." }
+ ],
+ "experimental": true,
+ "description": "Changes return value in top frame. Available only at return break position."
+ },
+ {
"name": "setAsyncCallStackDepth",
"parameters": [
{ "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
@@ -738,9 +783,9 @@
{ "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
{ "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
{ "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true },
- { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module.", "experimental": true },
- { "name": "length", "type": "integer", "optional": true, "description": "This script length.", "experimental": true },
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
+ { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
+ { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
{ "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
],
"description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
@@ -758,9 +803,9 @@
{ "name": "hash", "type": "string", "description": "Content hash of the script."},
{ "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
{ "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true },
- { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module.", "experimental": true },
- { "name": "length", "type": "integer", "optional": true, "description": "This script length.", "experimental": true },
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
+ { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
+ { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
{ "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
],
"description": "Fired when virtual machine fails to parse the script."
@@ -780,7 +825,9 @@
{ "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "OOM", "other", "ambiguous" ], "description": "Pause reason." },
{ "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
{ "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+ { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
+ { "name": "asyncCallStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Just scheduled async call will have this stack trace as parent stack during async execution. This field is available only after <code>Debugger.stepInto</code> call with <code>breakOnAsynCall</code> flag." }
],
"description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
},
@@ -845,10 +892,10 @@
"properties": [
{ "name": "id", "type": "integer", "description": "Unique id of the node." },
{ "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "hitCount", "type": "integer", "optional": true, "experimental": true, "description": "Number of samples where this node was on top of the call stack." },
+ { "name": "hitCount", "type": "integer", "optional": true, "description": "Number of samples where this node was on top of the call stack." },
{ "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
{ "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
- { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "experimental": true, "description": "An array of source position ticks." }
+ { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "description": "An array of source position ticks." }
]
},
{
@@ -866,7 +913,6 @@
{
"id": "PositionTickInfo",
"type": "object",
- "experimental": true,
"description": "Specifies a number of samples attributed to a certain source position.",
"properties": [
{ "name": "line", "type": "integer", "description": "Source line number (1-based)." },
@@ -880,8 +926,7 @@
{ "name": "startOffset", "type": "integer", "description": "JavaScript script source offset for the range start." },
{ "name": "endOffset", "type": "integer", "description": "JavaScript script source offset for the range end." },
{ "name": "count", "type": "integer", "description": "Collected execution count of the source range." }
- ],
- "experimental": true
+ ]
},
{ "id": "FunctionCoverage",
"type": "object",
@@ -890,8 +935,7 @@
{ "name": "functionName", "type": "string", "description": "JavaScript function name." },
{ "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." },
{ "name": "isBlockCoverage", "type": "boolean", "description": "Whether coverage data for this function has block granularity." }
- ],
- "experimental": true
+ ]
},
{
"id": "ScriptCoverage",
@@ -901,8 +945,7 @@
{ "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
{ "name": "url", "type": "string", "description": "JavaScript script name or url." },
{ "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." }
- ],
- "experimental": true
+ ]
},
{ "id": "TypeObject",
"type": "object",
@@ -962,29 +1005,25 @@
{ "name": "callCount", "type": "boolean", "optional": true, "description": "Collect accurate call counts beyond simple 'covered' or 'not covered'." },
{ "name": "detailed", "type": "boolean", "optional": true, "description": "Collect block-based coverage." }
],
- "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code coverage may be incomplete. Enabling prevents running optimized code and resets execution counters.",
- "experimental": true
+ "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code coverage may be incomplete. Enabling prevents running optimized code and resets execution counters."
},
{
"name": "stopPreciseCoverage",
- "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows executing optimized code.",
- "experimental": true
+ "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows executing optimized code."
},
{
"name": "takePreciseCoverage",
"returns": [
{ "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
],
- "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code coverage needs to have started.",
- "experimental": true
+ "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code coverage needs to have started."
},
{
"name": "getBestEffortCoverage",
"returns": [
{ "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
],
- "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection.",
- "experimental": true
+ "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection."
},
{
"name": "startTypeProfile",
@@ -1120,6 +1159,12 @@
"returns": [
{ "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
]
+ },
+ {
+ "name": "getSamplingProfile",
+ "returns": [
+ { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Return the sampling profile being collected." }
+ ]
}
],
"events": [
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 30dd7dd14c..36a0cca26c 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -24,8 +24,8 @@ bool isSpaceOrNewLine(UChar c) {
return isASCII(c) && c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9));
}
-int charactersToInteger(const UChar* characters, size_t length,
- bool* ok = nullptr) {
+int64_t charactersToInteger(const UChar* characters, size_t length,
+ bool* ok = nullptr) {
std::vector<char> buffer;
buffer.reserve(length + 1);
for (size_t i = 0; i < length; ++i) {
@@ -39,12 +39,9 @@ int charactersToInteger(const UChar* characters, size_t length,
char* endptr;
int64_t result =
- static_cast<int64_t>(std::strtol(buffer.data(), &endptr, 10));
- if (ok) {
- *ok = !(*endptr) && result <= std::numeric_limits<int>::max() &&
- result >= std::numeric_limits<int>::min();
- }
- return static_cast<int>(result);
+ static_cast<int64_t>(std::strtoll(buffer.data(), &endptr, 10));
+ if (ok) *ok = !(*endptr);
+ return result;
}
const UChar replacementCharacter = 0xFFFD;
@@ -430,10 +427,19 @@ String16 String16::fromDouble(double number, int precision) {
return String16(str.get());
}
-int String16::toInteger(bool* ok) const {
+int64_t String16::toInteger64(bool* ok) const {
return charactersToInteger(characters16(), length(), ok);
}
+int String16::toInteger(bool* ok) const {
+ int64_t result = toInteger64(ok);
+ if (ok && *ok) {
+ *ok = result <= std::numeric_limits<int>::max() &&
+ result >= std::numeric_limits<int>::min();
+ }
+ return static_cast<int>(result);
+}
+
String16 String16::stripWhiteSpace() const {
if (!length()) return String16();
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 1140092374..1dc9350e96 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -37,6 +37,7 @@ class String16 {
static String16 fromDouble(double);
static String16 fromDouble(double, int precision);
+ int64_t toInteger64(bool* ok = nullptr) const;
int toInteger(bool* ok = nullptr) const;
String16 stripWhiteSpace() const;
const UChar* characters16() const { return m_impl.c_str(); }
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index d591daf38e..508229365f 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -4,6 +4,7 @@
#include "src/inspector/string-util.h"
+#include "src/base/platform/platform.h"
#include "src/conversions.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/unicode-cache.h"
@@ -121,6 +122,18 @@ std::unique_ptr<protocol::Value> StringUtil::parseJSON(const String16& string) {
static_cast<int>(string.length()));
}
+// static
+void StringUtil::builderAppendQuotedString(StringBuilder& builder,
+ const String& str) {
+ builder.append('"');
+ if (!str.isEmpty()) {
+ escapeWideStringForJSON(
+ reinterpret_cast<const uint16_t*>(str.characters16()),
+ static_cast<int>(str.length()), &builder);
+ }
+ builder.append('"');
+}
+
} // namespace protocol
// static
@@ -139,4 +152,19 @@ StringBufferImpl::StringBufferImpl(String16& string) {
m_string = toStringView(m_owner);
}
+String16 debuggerIdToString(const std::pair<int64_t, int64_t>& debuggerId) {
+ const size_t kBufferSize = 35;
+
+ char buffer[kBufferSize];
+ v8::base::OS::SNPrintF(buffer, kBufferSize, "(%08" PRIX64 "%08" PRIX64 ")",
+ debuggerId.first, debuggerId.second);
+ return String16(buffer);
+}
+
+String16 stackTraceIdToString(uintptr_t id) {
+ String16Builder builder;
+ builder.appendNumber(static_cast<size_t>(id));
+ return builder.toString();
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 134ff425e1..8aaf3ce850 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -49,6 +49,7 @@ class StringUtil {
static void builderAppend(StringBuilder& builder, const char* s, size_t len) {
builder.append(s, len);
}
+ static void builderAppendQuotedString(StringBuilder&, const String&);
static void builderReserve(StringBuilder& builder, size_t capacity) {
builder.reserveCapacity(capacity);
}
@@ -86,6 +87,9 @@ class StringBufferImpl : public StringBuffer {
DISALLOW_COPY_AND_ASSIGN(StringBufferImpl);
};
+String16 debuggerIdToString(const std::pair<int64_t, int64_t>& debuggerId);
+String16 stackTraceIdToString(uintptr_t id);
+
} // namespace v8_inspector
#endif // V8_INSPECTOR_STRINGUTIL_H_
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 5da18e538f..1129eac676 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -295,8 +295,10 @@ void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend,
if (m_scriptId)
exceptionDetails->setScriptId(String16::fromInteger(m_scriptId));
if (!m_url.isEmpty()) exceptionDetails->setUrl(m_url);
- if (m_stackTrace)
- exceptionDetails->setStackTrace(m_stackTrace->buildInspectorObjectImpl());
+ if (m_stackTrace) {
+ exceptionDetails->setStackTrace(
+ m_stackTrace->buildInspectorObjectImpl(inspector->debugger()));
+ }
if (m_contextId) exceptionDetails->setExecutionContextId(m_contextId);
if (exception) exceptionDetails->setException(std::move(exception));
frontend->exceptionThrown(m_timestamp, std::move(exceptionDetails));
@@ -326,7 +328,9 @@ void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend,
frontend->consoleAPICalled(
consoleAPITypeValue(m_type), std::move(arguments), m_contextId,
m_timestamp,
- m_stackTrace ? m_stackTrace->buildInspectorObjectImpl() : nullptr,
+ m_stackTrace
+ ? m_stackTrace->buildInspectorObjectImpl(inspector->debugger())
+ : nullptr,
std::move(consoleContext));
return;
}
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index fb535f0f24..7a0caf08a1 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -109,8 +109,11 @@ class ConsoleHelper {
return m_info[0]->BooleanValue(m_context).FromMaybe(defaultValue);
}
- String16 firstArgToString(const String16& defaultValue) {
- if (m_info.Length() < 1) return defaultValue;
+ String16 firstArgToString(const String16& defaultValue,
+ bool allowUndefined = true) {
+ if (m_info.Length() < 1 || (!allowUndefined && m_info[0]->IsUndefined())) {
+ return defaultValue;
+ }
v8::Local<v8::String> titleValue;
v8::TryCatch tryCatch(m_context->GetIsolate());
if (m_info[0]->IsObject()) {
@@ -349,7 +352,7 @@ static void timeFunction(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext,
bool timelinePrefix, V8InspectorImpl* inspector) {
ConsoleHelper helper(info, consoleContext, inspector);
- String16 protocolTitle = helper.firstArgToString("default");
+ String16 protocolTitle = helper.firstArgToString("default", false);
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
inspector->client()->consoleTime(toStringView(protocolTitle));
helper.consoleMessageStorage()->time(
@@ -361,7 +364,7 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext,
bool timelinePrefix, V8InspectorImpl* inspector) {
ConsoleHelper helper(info, consoleContext, inspector);
- String16 protocolTitle = helper.firstArgToString("default");
+ String16 protocolTitle = helper.firstArgToString("default", false);
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
inspector->client()->consoleTimeEnd(toStringView(protocolTitle));
double elapsed = helper.consoleMessageStorage()->timeEnd(
@@ -660,9 +663,10 @@ v8::Local<v8::Object> V8Console::createCommandLineAPI(
DCHECK(success);
USE(success);
- // TODO(dgozman): this CommandLineAPIData instance leaks. Use PodArray maybe?
- v8::Local<v8::External> data =
- v8::External::New(isolate, new CommandLineAPIData(this, sessionId));
+ v8::Local<v8::ArrayBuffer> data =
+ v8::ArrayBuffer::New(isolate, sizeof(CommandLineAPIData));
+ *static_cast<CommandLineAPIData*>(data->GetContents().Data()) =
+ CommandLineAPIData(this, sessionId);
createBoundFunctionProperty(context, commandLineAPI, data, "dir",
&V8Console::call<&V8Console::Dir>,
"function dir(value) { [Command Line API] }");
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index a47a0b7cad..ba4dfe328b 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -108,14 +108,14 @@ class V8Console : public v8::debug::ConsoleDelegate {
int)>
static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
CommandLineAPIData* data = static_cast<CommandLineAPIData*>(
- info.Data().As<v8::External>()->Value());
+ info.Data().As<v8::ArrayBuffer>()->GetContents().Data());
(data->first->*func)(info, data->second);
}
template <void (V8Console::*func)(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext&)>
static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
CommandLineAPIData* data = static_cast<CommandLineAPIData*>(
- info.Data().As<v8::External>()->Value());
+ info.Data().As<v8::ArrayBuffer>()->GetContents().Data());
v8::debug::ConsoleCallArguments args(info);
(data->first->*func)(args, v8::debug::ConsoleContext());
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 1b15f04c1a..8e5142d36e 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -370,7 +370,9 @@ void V8DebuggerAgentImpl::enableImpl() {
}
}
-Response V8DebuggerAgentImpl::enable() {
+Response V8DebuggerAgentImpl::enable(String16* outDebuggerId) {
+ *outDebuggerId = debuggerIdToString(
+ m_debugger->debuggerIdFor(m_session->contextGroupId()));
if (enabled()) return Response::OK();
if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
@@ -715,6 +717,32 @@ Response V8DebuggerAgentImpl::continueToLocation(
protocol::Debugger::ContinueToLocation::TargetCallFramesEnum::Any));
}
+Response V8DebuggerAgentImpl::getStackTrace(
+ std::unique_ptr<protocol::Runtime::StackTraceId> inStackTraceId,
+ std::unique_ptr<protocol::Runtime::StackTrace>* outStackTrace) {
+ bool isOk = false;
+ int64_t id = inStackTraceId->getId().toInteger64(&isOk);
+ std::pair<int64_t, int64_t> debuggerId;
+ if (inStackTraceId->hasDebuggerId()) {
+ debuggerId =
+ m_debugger->debuggerIdFor(inStackTraceId->getDebuggerId(String16()));
+ } else {
+ debuggerId = m_debugger->debuggerIdFor(m_session->contextGroupId());
+ }
+ V8StackTraceId v8StackTraceId(id, debuggerId);
+ if (!isOk || v8StackTraceId.IsInvalid()) {
+ return Response::Error("Invalid stack trace id");
+ }
+ auto stack =
+ m_debugger->stackTraceFor(m_session->contextGroupId(), v8StackTraceId);
+ if (!stack) {
+ return Response::Error("Stack trace with given id is not found");
+ }
+ *outStackTrace = stack->buildInspectorObject(
+ m_debugger, m_debugger->maxAsyncCallChainDepth());
+ return Response::OK();
+}
+
bool V8DebuggerAgentImpl::isFunctionBlackboxed(const String16& scriptId,
const v8::debug::Location& start,
const v8::debug::Location& end) {
@@ -816,6 +844,7 @@ Response V8DebuggerAgentImpl::setScriptSource(
Maybe<protocol::Array<protocol::Debugger::CallFrame>>* newCallFrames,
Maybe<bool>* stackChanged,
Maybe<protocol::Runtime::StackTrace>* asyncStackTrace,
+ Maybe<protocol::Runtime::StackTraceId>* asyncStackTraceId,
Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
@@ -858,13 +887,15 @@ Response V8DebuggerAgentImpl::setScriptSource(
if (!response.isSuccess()) return response;
*newCallFrames = std::move(callFrames);
*asyncStackTrace = currentAsyncStackTrace();
+ *asyncStackTraceId = currentExternalStackTrace();
return Response::OK();
}
Response V8DebuggerAgentImpl::restartFrame(
const String16& callFrameId,
std::unique_ptr<Array<CallFrame>>* newCallFrames,
- Maybe<protocol::Runtime::StackTrace>* asyncStackTrace) {
+ Maybe<protocol::Runtime::StackTrace>* asyncStackTrace,
+ Maybe<protocol::Runtime::StackTraceId>* asyncStackTraceId) {
if (!isPaused()) return Response::Error(kDebuggerNotPaused);
InjectedScript::CallFrameScope scope(m_session, callFrameId);
Response response = scope.initialize();
@@ -880,6 +911,7 @@ Response V8DebuggerAgentImpl::restartFrame(
response = currentCallFrames(newCallFrames);
if (!response.isSuccess()) return response;
*asyncStackTrace = currentAsyncStackTrace();
+ *asyncStackTraceId = currentExternalStackTrace();
return Response::OK();
}
@@ -951,10 +983,11 @@ Response V8DebuggerAgentImpl::stepOver() {
return Response::OK();
}
-Response V8DebuggerAgentImpl::stepInto() {
+Response V8DebuggerAgentImpl::stepInto(Maybe<bool> inBreakOnAsyncCall) {
if (!isPaused()) return Response::Error(kDebuggerNotPaused);
m_session->releaseObjectGroup(kBacktraceObjectGroup);
- m_debugger->stepIntoStatement(m_session->contextGroupId());
+ m_debugger->stepIntoStatement(m_session->contextGroupId(),
+ inBreakOnAsyncCall.fromMaybe(false));
return Response::OK();
}
@@ -975,6 +1008,18 @@ void V8DebuggerAgentImpl::scheduleStepIntoAsync(
m_session->contextGroupId());
}
+Response V8DebuggerAgentImpl::pauseOnAsyncCall(
+ std::unique_ptr<protocol::Runtime::StackTraceId> inParentStackTraceId) {
+ bool isOk = false;
+ int64_t stackTraceId = inParentStackTraceId->getId().toInteger64(&isOk);
+ if (!isOk) {
+ return Response::Error("Invalid stack trace id");
+ }
+ m_debugger->pauseOnAsyncCall(m_session->contextGroupId(), stackTraceId,
+ inParentStackTraceId->getDebuggerId(String16()));
+ return Response::OK();
+}
+
Response V8DebuggerAgentImpl::setPauseOnExceptions(
const String16& stringPauseState) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
@@ -1058,6 +1103,7 @@ Response V8DebuggerAgentImpl::setVariableValue(
if (scopeNumber != 0) {
return Response::Error("Could not find scope with given number");
}
+
if (!scopeIterator->SetVariableValue(toV8String(m_isolate, variableName),
newValue) ||
scope.tryCatch().HasCaught()) {
@@ -1066,6 +1112,29 @@ Response V8DebuggerAgentImpl::setVariableValue(
return Response::OK();
}
+Response V8DebuggerAgentImpl::setReturnValue(
+ std::unique_ptr<protocol::Runtime::CallArgument> protocolNewValue) {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
+ if (iterator->Done()) {
+ return Response::Error("Could not find top call frame");
+ }
+ if (iterator->GetReturnValue().IsEmpty()) {
+ return Response::Error(
+ "Could not update return value at non-return position");
+ }
+ InjectedScript::ContextScope scope(m_session, iterator->GetContextId());
+ Response response = scope.initialize();
+ if (!response.isSuccess()) return response;
+ v8::Local<v8::Value> newValue;
+ response = scope.injectedScript()->resolveCallArgument(protocolNewValue.get(),
+ &newValue);
+ if (!response.isSuccess()) return response;
+ v8::debug::SetReturnValue(m_isolate, newValue);
+ return Response::OK();
+}
+
Response V8DebuggerAgentImpl::setAsyncCallStackDepth(int depth) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, depth);
@@ -1177,16 +1246,19 @@ Response V8DebuggerAgentImpl::currentCallFrames(
Response res = buildScopes(scopeIterator.get(), injectedScript, &scopes);
if (!res.isSuccess()) return res;
- std::unique_ptr<RemoteObject> receiver;
+ std::unique_ptr<RemoteObject> protocolReceiver;
if (injectedScript) {
- res = injectedScript->wrapObject(iterator->GetReceiver(),
- kBacktraceObjectGroup, false, false,
- &receiver);
- if (!res.isSuccess()) return res;
- } else {
- receiver = RemoteObject::create()
- .setType(RemoteObject::TypeEnum::Undefined)
- .build();
+ v8::Local<v8::Value> receiver;
+ if (iterator->GetReceiver().ToLocal(&receiver)) {
+ res = injectedScript->wrapObject(receiver, kBacktraceObjectGroup, false,
+ false, &protocolReceiver);
+ if (!res.isSuccess()) return res;
+ }
+ }
+ if (!protocolReceiver) {
+ protocolReceiver = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Undefined)
+ .build();
}
v8::Local<v8::debug::Script> script = iterator->GetScript();
@@ -1213,7 +1285,7 @@ Response V8DebuggerAgentImpl::currentCallFrames(
.setLocation(std::move(location))
.setUrl(url)
.setScopeChain(std::move(scopes))
- .setThis(std::move(receiver))
+ .setThis(std::move(protocolReceiver))
.build();
v8::Local<v8::Function> func = iterator->GetFunction();
@@ -1245,8 +1317,17 @@ V8DebuggerAgentImpl::currentAsyncStackTrace() {
m_debugger->currentAsyncParent();
if (!asyncParent) return nullptr;
return asyncParent->buildInspectorObject(
- m_debugger->currentAsyncCreation().get(),
- m_debugger->maxAsyncCallChainDepth() - 1);
+ m_debugger, m_debugger->maxAsyncCallChainDepth() - 1);
+}
+
+std::unique_ptr<protocol::Runtime::StackTraceId>
+V8DebuggerAgentImpl::currentExternalStackTrace() {
+ V8StackTraceId externalParent = m_debugger->currentExternalParent();
+ if (externalParent.IsInvalid()) return nullptr;
+ return protocol::Runtime::StackTraceId::create()
+ .setId(stackTraceIdToString(externalParent.id))
+ .setDebuggerId(debuggerIdToString(externalParent.debugger_id))
+ .build();
}
bool V8DebuggerAgentImpl::isPaused() const {
@@ -1298,7 +1379,8 @@ void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8StackTraceImpl> stack =
V8StackTraceImpl::capture(m_inspector->debugger(), contextGroupId, 1);
std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
- stack && !stack->isEmpty() ? stack->buildInspectorObjectImpl() : nullptr;
+ stack && !stack->isEmpty() ? stack->buildInspectorObjectImpl(m_debugger)
+ : nullptr;
if (success) {
m_frontend.scriptParsed(
scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
@@ -1449,9 +1531,23 @@ void V8DebuggerAgentImpl::didPause(
std::unique_ptr<Array<CallFrame>> protocolCallFrames;
Response response = currentCallFrames(&protocolCallFrames);
if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
+
+ Maybe<protocol::Runtime::StackTraceId> asyncCallStackTrace;
+ void* rawScheduledAsyncTask = m_debugger->scheduledAsyncTask();
+ if (rawScheduledAsyncTask) {
+ asyncCallStackTrace =
+ protocol::Runtime::StackTraceId::create()
+ .setId(stackTraceIdToString(
+ reinterpret_cast<uintptr_t>(rawScheduledAsyncTask)))
+ .setDebuggerId(debuggerIdToString(
+ m_debugger->debuggerIdFor(m_session->contextGroupId())))
+ .build();
+ }
+
m_frontend.paused(std::move(protocolCallFrames), breakReason,
std::move(breakAuxData), std::move(hitBreakpointIds),
- currentAsyncStackTrace());
+ currentAsyncStackTrace(), currentExternalStackTrace(),
+ std::move(asyncCallStackTrace));
}
void V8DebuggerAgentImpl::didContinue() {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 306e5fb48c..e697b700e9 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -39,7 +39,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void restore();
// Part of the protocol.
- Response enable() override;
+ Response enable(String16* outDebuggerId) override;
Response disable() override;
Response setBreakpointsActive(bool active) override;
Response setSkipAllPauses(bool skip) override;
@@ -57,6 +57,9 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Response removeBreakpoint(const String16& breakpointId) override;
Response continueToLocation(std::unique_ptr<protocol::Debugger::Location>,
Maybe<String16> targetCallFrames) override;
+ Response getStackTrace(
+ std::unique_ptr<protocol::Runtime::StackTraceId> inStackTraceId,
+ std::unique_ptr<protocol::Runtime::StackTrace>* outStackTrace) override;
Response searchInContent(
const String16& scriptId, const String16& query,
Maybe<bool> optionalCaseSensitive, Maybe<bool> optionalIsRegex,
@@ -73,21 +76,25 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Maybe<protocol::Array<protocol::Debugger::CallFrame>>* optOutCallFrames,
Maybe<bool>* optOutStackChanged,
Maybe<protocol::Runtime::StackTrace>* optOutAsyncStackTrace,
+ Maybe<protocol::Runtime::StackTraceId>* optOutAsyncStackTraceId,
Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) override;
Response restartFrame(
const String16& callFrameId,
std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*
newCallFrames,
- Maybe<protocol::Runtime::StackTrace>* asyncStackTrace) override;
+ Maybe<protocol::Runtime::StackTrace>* asyncStackTrace,
+ Maybe<protocol::Runtime::StackTraceId>* asyncStackTraceId) override;
Response getScriptSource(const String16& scriptId,
String16* scriptSource) override;
Response pause() override;
Response resume() override;
Response stepOver() override;
- Response stepInto() override;
+ Response stepInto(Maybe<bool> inBreakOnAsyncCall) override;
Response stepOut() override;
void scheduleStepIntoAsync(
std::unique_ptr<ScheduleStepIntoAsyncCallback> callback) override;
+ Response pauseOnAsyncCall(std::unique_ptr<protocol::Runtime::StackTraceId>
+ inParentStackTraceId) override;
Response setPauseOnExceptions(const String16& pauseState) override;
Response evaluateOnCallFrame(
const String16& callFrameId, const String16& expression,
@@ -100,6 +107,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
int scopeNumber, const String16& variableName,
std::unique_ptr<protocol::Runtime::CallArgument> newValue,
const String16& callFrame) override;
+ Response setReturnValue(
+ std::unique_ptr<protocol::Runtime::CallArgument> newValue) override;
Response setAsyncCallStackDepth(int depth) override;
Response setBlackboxPatterns(
std::unique_ptr<protocol::Array<String16>> patterns) override;
@@ -146,6 +155,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Response currentCallFrames(
std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*);
std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace();
+ std::unique_ptr<protocol::Runtime::StackTraceId> currentExternalStackTrace();
void setPauseOnExceptionsImpl(int);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 3e321a4275..8f843b54b2 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -185,6 +185,8 @@ void V8Debugger::disable() {
clearContinueToLocation();
allAsyncTasksCanceled();
m_taskWithScheduledBreak = nullptr;
+ m_taskWithScheduledBreakDebuggerId = String16();
+ m_pauseOnAsyncCall = false;
m_wasmTranslation.Clear();
v8::debug::SetDebugDelegate(m_isolate, nullptr);
v8::debug::SetOutOfMemoryCallback(m_isolate, nullptr, nullptr);
@@ -284,10 +286,12 @@ void V8Debugger::breakProgramOnAssert(int targetContextGroupId) {
v8::debug::BreakRightNow(m_isolate);
}
-void V8Debugger::stepIntoStatement(int targetContextGroupId) {
+void V8Debugger::stepIntoStatement(int targetContextGroupId,
+ bool breakOnAsyncCall) {
DCHECK(isPaused());
DCHECK(targetContextGroupId);
m_targetContextGroupId = targetContextGroupId;
+ m_pauseOnAsyncCall = breakOnAsyncCall;
v8::debug::PrepareStep(m_isolate, v8::debug::StepIn);
continueProgram(targetContextGroupId);
}
@@ -321,6 +325,19 @@ void V8Debugger::scheduleStepIntoAsync(
m_stepIntoAsyncCallback = std::move(callback);
}
+void V8Debugger::pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
+ const String16& debuggerId) {
+ DCHECK(targetContextGroupId);
+ m_targetContextGroupId = targetContextGroupId;
+
+ m_taskWithScheduledBreak = reinterpret_cast<void*>(task);
+ String16 currentDebuggerId =
+ debuggerIdToString(debuggerIdFor(targetContextGroupId));
+ if (currentDebuggerId != debuggerId) {
+ m_taskWithScheduledBreakDebuggerId = debuggerId;
+ }
+}
+
Response V8Debugger::continueToLocation(
int targetContextGroupId, V8DebuggerScript* script,
std::unique_ptr<protocol::Debugger::Location> location,
@@ -387,6 +404,9 @@ void V8Debugger::handleProgramBreak(
m_stepIntoAsyncCallback.reset();
}
m_breakRequested = false;
+ m_pauseOnAsyncCall = false;
+ m_taskWithScheduledBreak = nullptr;
+ m_taskWithScheduledBreakDebuggerId = String16();
bool scheduledOOMBreak = m_scheduledOOMBreak;
bool scheduledAssertBreak = m_scheduledAssertBreak;
@@ -515,26 +535,26 @@ bool V8Debugger::IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
}
void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
- int id, int parentId,
- bool createdByUser) {
+ int id, bool isBlackboxed) {
// Async task events from Promises are given misaligned pointers to prevent
// from overlapping with other Blink task identifiers.
void* task = reinterpret_cast<void*>(id * 2 + 1);
- void* parentTask =
- parentId ? reinterpret_cast<void*>(parentId * 2 + 1) : nullptr;
switch (type) {
- case v8::debug::kDebugPromiseCreated:
- asyncTaskCreatedForStack(task, parentTask);
- if (createdByUser && parentTask) asyncTaskCandidateForStepping(task);
- break;
- case v8::debug::kDebugEnqueueAsyncFunction:
+ case v8::debug::kDebugAsyncFunctionPromiseCreated:
asyncTaskScheduledForStack("async function", task, true);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
- case v8::debug::kDebugEnqueuePromiseResolve:
- asyncTaskScheduledForStack("Promise.resolve", task, true);
+ case v8::debug::kDebugPromiseThen:
+ asyncTaskScheduledForStack("Promise.then", task, false);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
- case v8::debug::kDebugEnqueuePromiseReject:
- asyncTaskScheduledForStack("Promise.reject", task, true);
+ case v8::debug::kDebugPromiseCatch:
+ asyncTaskScheduledForStack("Promise.catch", task, false);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task);
+ break;
+ case v8::debug::kDebugPromiseFinally:
+ asyncTaskScheduledForStack("Promise.finally", task, false);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugWillHandle:
asyncTaskStartedForStack(task);
@@ -548,15 +568,12 @@ void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
}
std::shared_ptr<AsyncStackTrace> V8Debugger::currentAsyncParent() {
- // TODO(kozyatinskiy): implement creation chain as parent without hack.
- if (!m_currentAsyncCreation.empty() && m_currentAsyncCreation.back()) {
- return m_currentAsyncCreation.back();
- }
return m_currentAsyncParent.empty() ? nullptr : m_currentAsyncParent.back();
}
-std::shared_ptr<AsyncStackTrace> V8Debugger::currentAsyncCreation() {
- return nullptr;
+V8StackTraceId V8Debugger::currentExternalParent() {
+ return m_currentExternalParent.empty() ? V8StackTraceId()
+ : m_currentExternalParent.back();
}
v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
@@ -723,22 +740,79 @@ void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
if (!maxAsyncCallStackDepth) allAsyncTasksCanceled();
}
-void V8Debugger::asyncTaskCreatedForStack(void* task, void* parentTask) {
- if (!m_maxAsyncCallStackDepth) return;
- if (parentTask) m_parentTask[task] = parentTask;
+std::shared_ptr<AsyncStackTrace> V8Debugger::stackTraceFor(
+ int contextGroupId, const V8StackTraceId& id) {
+ if (debuggerIdFor(contextGroupId) != id.debugger_id) return nullptr;
+ auto it = m_storedStackTraces.find(id.id);
+ if (it == m_storedStackTraces.end()) return nullptr;
+ return it->second.lock();
+}
+
+V8StackTraceId V8Debugger::storeCurrentStackTrace(
+ const StringView& description) {
+ if (!m_maxAsyncCallStackDepth) return V8StackTraceId();
+
v8::HandleScope scope(m_isolate);
- std::shared_ptr<AsyncStackTrace> asyncCreation =
- AsyncStackTrace::capture(this, currentContextGroupId(), String16(),
+ int contextGroupId = currentContextGroupId();
+ if (!contextGroupId) return V8StackTraceId();
+
+ std::shared_ptr<AsyncStackTrace> asyncStack =
+ AsyncStackTrace::capture(this, contextGroupId, toString16(description),
V8StackTraceImpl::maxCallStackSizeToCapture);
- // Passing one as maxStackSize forces no async chain for the new stack.
- if (asyncCreation && !asyncCreation->isEmpty()) {
- m_asyncTaskCreationStacks[task] = asyncCreation;
- m_allAsyncStacks.push_back(std::move(asyncCreation));
- ++m_asyncStacksCount;
- collectOldAsyncStacksIfNeeded();
+ if (!asyncStack) return V8StackTraceId();
+
+ uintptr_t id = AsyncStackTrace::store(this, asyncStack);
+
+ m_allAsyncStacks.push_back(std::move(asyncStack));
+ ++m_asyncStacksCount;
+ collectOldAsyncStacksIfNeeded();
+
+ asyncTaskCandidateForStepping(reinterpret_cast<void*>(id));
+
+ return V8StackTraceId(id, debuggerIdFor(contextGroupId));
+}
+
+uintptr_t V8Debugger::storeStackTrace(
+ std::shared_ptr<AsyncStackTrace> asyncStack) {
+ uintptr_t id = ++m_lastStackTraceId;
+ m_storedStackTraces[id] = asyncStack;
+ return id;
+}
+
+void V8Debugger::externalAsyncTaskStarted(const V8StackTraceId& parent) {
+ if (!m_maxAsyncCallStackDepth || parent.IsInvalid()) return;
+ m_currentExternalParent.push_back(parent);
+ m_currentAsyncParent.emplace_back();
+ m_currentTasks.push_back(reinterpret_cast<void*>(parent.id));
+
+ if (m_breakRequested) return;
+ if (!m_taskWithScheduledBreakDebuggerId.isEmpty() &&
+ reinterpret_cast<uintptr_t>(m_taskWithScheduledBreak) == parent.id &&
+ m_taskWithScheduledBreakDebuggerId ==
+ debuggerIdToString(parent.debugger_id)) {
+ v8::debug::DebugBreak(m_isolate);
}
}
+void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) {
+ if (!m_maxAsyncCallStackDepth || m_currentExternalParent.empty()) return;
+ m_currentExternalParent.pop_back();
+ m_currentAsyncParent.pop_back();
+ DCHECK(m_currentTasks.back() == reinterpret_cast<void*>(parent.id));
+ m_currentTasks.pop_back();
+
+ if (m_taskWithScheduledBreakDebuggerId.isEmpty() ||
+ reinterpret_cast<uintptr_t>(m_taskWithScheduledBreak) != parent.id ||
+ m_taskWithScheduledBreakDebuggerId !=
+ debuggerIdToString(parent.debugger_id)) {
+ return;
+ }
+ m_taskWithScheduledBreak = nullptr;
+ m_taskWithScheduledBreakDebuggerId = String16();
+ if (m_breakRequested) return;
+ v8::debug::CancelDebugBreak(m_isolate);
+}
+
void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
bool recurring) {
asyncTaskScheduledForStack(toString16(taskName), task, recurring);
@@ -756,8 +830,8 @@ void V8Debugger::asyncTaskStarted(void* task) {
}
void V8Debugger::asyncTaskFinished(void* task) {
- asyncTaskFinishedForStack(task);
asyncTaskFinishedForStepping(task);
+ asyncTaskFinishedForStack(task);
}
void V8Debugger::asyncTaskScheduledForStack(const String16& taskName,
@@ -780,8 +854,6 @@ void V8Debugger::asyncTaskCanceledForStack(void* task) {
if (!m_maxAsyncCallStackDepth) return;
m_asyncTaskStacks.erase(task);
m_recurringTasks.erase(task);
- m_parentTask.erase(task);
- m_asyncTaskCreationStacks.erase(task);
}
void V8Debugger::asyncTaskStartedForStack(void* task) {
@@ -794,26 +866,13 @@ void V8Debugger::asyncTaskStartedForStack(void* task) {
// <-- async stack requested here -->
// - asyncTaskFinished
m_currentTasks.push_back(task);
- auto parentIt = m_parentTask.find(task);
- AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(
- parentIt == m_parentTask.end() ? task : parentIt->second);
+ AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(task);
if (stackIt != m_asyncTaskStacks.end()) {
m_currentAsyncParent.push_back(stackIt->second.lock());
} else {
m_currentAsyncParent.emplace_back();
}
- auto itCreation = m_asyncTaskCreationStacks.find(task);
- if (itCreation != m_asyncTaskCreationStacks.end()) {
- m_currentAsyncCreation.push_back(itCreation->second.lock());
- // TODO(kozyatinskiy): implement it without hack.
- if (m_currentAsyncParent.back()) {
- m_currentAsyncCreation.back()->setDescription(
- m_currentAsyncParent.back()->description());
- m_currentAsyncParent.back().reset();
- }
- } else {
- m_currentAsyncCreation.emplace_back();
- }
+ m_currentExternalParent.emplace_back();
}
void V8Debugger::asyncTaskFinishedForStack(void* task) {
@@ -823,9 +882,8 @@ void V8Debugger::asyncTaskFinishedForStack(void* task) {
DCHECK(m_currentTasks.back() == task);
m_currentTasks.pop_back();
- DCHECK(m_currentAsyncParent.size() == m_currentAsyncCreation.size());
m_currentAsyncParent.pop_back();
- m_currentAsyncCreation.pop_back();
+ m_currentExternalParent.pop_back();
if (m_recurringTasks.find(task) == m_recurringTasks.end()) {
asyncTaskCanceledForStack(task);
@@ -833,6 +891,12 @@ void V8Debugger::asyncTaskFinishedForStack(void* task) {
}
void V8Debugger::asyncTaskCandidateForStepping(void* task) {
+ if (m_pauseOnAsyncCall) {
+ m_scheduledAsyncTask = task;
+ breakProgram(m_targetContextGroupId);
+ m_scheduledAsyncTask = nullptr;
+ return;
+ }
if (!m_stepIntoAsyncCallback) return;
DCHECK(m_targetContextGroupId);
if (currentContextGroupId() != m_targetContextGroupId) return;
@@ -844,19 +908,28 @@ void V8Debugger::asyncTaskCandidateForStepping(void* task) {
void V8Debugger::asyncTaskStartedForStepping(void* task) {
if (m_breakRequested) return;
- if (task != m_taskWithScheduledBreak) return;
- v8::debug::DebugBreak(m_isolate);
+ // TODO(kozyatinskiy): we should search task in async chain to support
+ // blackboxing.
+ if (m_taskWithScheduledBreakDebuggerId.isEmpty() &&
+ task == m_taskWithScheduledBreak) {
+ v8::debug::DebugBreak(m_isolate);
+ }
}
void V8Debugger::asyncTaskFinishedForStepping(void* task) {
- if (task != m_taskWithScheduledBreak) return;
+ if (!m_taskWithScheduledBreakDebuggerId.isEmpty() ||
+ task != m_taskWithScheduledBreak) {
+ return;
+ }
m_taskWithScheduledBreak = nullptr;
if (m_breakRequested) return;
v8::debug::CancelDebugBreak(m_isolate);
}
void V8Debugger::asyncTaskCanceledForStepping(void* task) {
- if (task != m_taskWithScheduledBreak) return;
+ if (!m_taskWithScheduledBreakDebuggerId.isEmpty() ||
+ task != m_taskWithScheduledBreak)
+ return;
m_taskWithScheduledBreak = nullptr;
}
@@ -864,10 +937,8 @@ void V8Debugger::allAsyncTasksCanceled() {
m_asyncTaskStacks.clear();
m_recurringTasks.clear();
m_currentAsyncParent.clear();
- m_currentAsyncCreation.clear();
+ m_currentExternalParent.clear();
m_currentTasks.clear();
- m_parentTask.clear();
- m_asyncTaskCreationStacks.clear();
m_framesCache.clear();
m_allAsyncStacks.clear();
@@ -918,7 +989,7 @@ void V8Debugger::collectOldAsyncStacksIfNeeded() {
--m_asyncStacksCount;
}
cleanupExpiredWeakPointers(m_asyncTaskStacks);
- cleanupExpiredWeakPointers(m_asyncTaskCreationStacks);
+ cleanupExpiredWeakPointers(m_storedStackTraces);
for (auto it = m_recurringTasks.begin(); it != m_recurringTasks.end();) {
if (m_asyncTaskStacks.find(*it) == m_asyncTaskStacks.end()) {
it = m_recurringTasks.erase(it);
@@ -926,15 +997,6 @@ void V8Debugger::collectOldAsyncStacksIfNeeded() {
++it;
}
}
- for (auto it = m_parentTask.begin(); it != m_parentTask.end();) {
- if (m_asyncTaskCreationStacks.find(it->second) ==
- m_asyncTaskCreationStacks.end() &&
- m_asyncTaskStacks.find(it->second) == m_asyncTaskStacks.end()) {
- it = m_parentTask.erase(it);
- } else {
- ++it;
- }
- }
cleanupExpiredWeakPointers(m_framesCache);
}
@@ -963,12 +1025,29 @@ void V8Debugger::setMaxAsyncTaskStacksForTest(int limit) {
m_maxAsyncCallStacks = limit;
}
+std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor(int contextGroupId) {
+ auto it = m_contextGroupIdToDebuggerId.find(contextGroupId);
+ if (it != m_contextGroupIdToDebuggerId.end()) return it->second;
+ std::pair<int64_t, int64_t> debuggerId(
+ v8::debug::GetNextRandomInt64(m_isolate),
+ v8::debug::GetNextRandomInt64(m_isolate));
+ m_contextGroupIdToDebuggerId.insert(
+ it, std::make_pair(contextGroupId, debuggerId));
+ m_serializedDebuggerIdToDebuggerId.insert(
+ std::make_pair(debuggerIdToString(debuggerId), debuggerId));
+ return debuggerId;
+}
+
+std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor(
+ const String16& serializedDebuggerId) {
+ auto it = m_serializedDebuggerIdToDebuggerId.find(serializedDebuggerId);
+ if (it != m_serializedDebuggerIdToDebuggerId.end()) return it->second;
+ return std::make_pair(0, 0);
+}
+
void V8Debugger::dumpAsyncTaskStacksStateForTest() {
fprintf(stdout, "Async stacks count: %d\n", m_asyncStacksCount);
fprintf(stdout, "Scheduled async tasks: %zu\n", m_asyncTaskStacks.size());
- fprintf(stdout, "Created async tasks: %zu\n",
- m_asyncTaskCreationStacks.size());
- fprintf(stdout, "Async tasks with parent: %zu\n", m_parentTask.size());
fprintf(stdout, "Recurring async tasks: %zu\n", m_recurringTasks.size());
fprintf(stdout, "\n");
}
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 945b2fd115..455bb5952d 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -6,6 +6,7 @@
#define V8_INSPECTOR_V8DEBUGGER_H_
#include <list>
+#include <unordered_map>
#include <vector>
#include "src/base/macros.h"
@@ -26,6 +27,7 @@ class V8Debugger;
class V8DebuggerAgentImpl;
class V8InspectorImpl;
class V8StackTraceImpl;
+struct V8StackTraceId;
using protocol::Response;
using ScheduleStepIntoAsyncCallback =
@@ -49,12 +51,14 @@ class V8Debugger : public v8::debug::DebugDelegate {
void breakProgramOnAssert(int targetContextGroupId);
void setPauseOnNextStatement(bool, int targetContextGroupId);
- void stepIntoStatement(int targetContextGroupId);
+ void stepIntoStatement(int targetContextGroupId, bool breakOnAsyncCall);
void stepOverStatement(int targetContextGroupId);
void stepOutOfFunction(int targetContextGroupId);
void scheduleStepIntoAsync(
std::unique_ptr<ScheduleStepIntoAsyncCallback> callback,
int targetContextGroupId);
+ void pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
+ const String16& debuggerId);
Response continueToLocation(int targetContextGroupId,
V8DebuggerScript* script,
@@ -77,7 +81,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
void setAsyncCallStackDepth(V8DebuggerAgentImpl*, int);
std::shared_ptr<AsyncStackTrace> currentAsyncParent();
- std::shared_ptr<AsyncStackTrace> currentAsyncCreation();
+ V8StackTraceId currentExternalParent();
std::shared_ptr<StackFrame> symbolize(v8::Local<v8::StackFrame> v8Frame);
@@ -97,6 +101,12 @@ class V8Debugger : public v8::debug::DebugDelegate {
void asyncTaskFinished(void* task);
void allAsyncTasksCanceled();
+ V8StackTraceId storeCurrentStackTrace(const StringView& description);
+ void externalAsyncTaskStarted(const V8StackTraceId& parent);
+ void externalAsyncTaskFinished(const V8StackTraceId& parent);
+
+ uintptr_t storeStackTrace(std::shared_ptr<AsyncStackTrace> stack);
+
void muteScriptParsedEvents();
void unmuteScriptParsedEvents();
@@ -107,6 +117,14 @@ class V8Debugger : public v8::debug::DebugDelegate {
void setMaxAsyncTaskStacksForTest(int limit);
void dumpAsyncTaskStacksStateForTest();
+ void* scheduledAsyncTask() { return m_scheduledAsyncTask; }
+
+ std::pair<int64_t, int64_t> debuggerIdFor(int contextGroupId);
+ std::pair<int64_t, int64_t> debuggerIdFor(
+ const String16& serializedDebuggerId);
+ std::shared_ptr<AsyncStackTrace> stackTraceFor(int contextGroupId,
+ const V8StackTraceId& id);
+
private:
void clearContinueToLocation();
bool shouldContinueToCurrentLocation();
@@ -131,7 +149,6 @@ class V8Debugger : public v8::debug::DebugDelegate {
v8::MaybeLocal<v8::Value> generatorScopes(v8::Local<v8::Context>,
v8::Local<v8::Value>);
- void asyncTaskCreatedForStack(void* task, void* parentTask);
void asyncTaskScheduledForStack(const String16& taskName, void* task,
bool recurring);
void asyncTaskCanceledForStack(void* task);
@@ -145,7 +162,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
// v8::debug::DebugEventListener implementation.
void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
- int parentId, bool createdByUser) override;
+ bool isBlackboxed) override;
void ScriptCompiled(v8::Local<v8::debug::Script> script, bool is_live_edited,
bool has_compile_error) override;
void BreakProgramRequested(
@@ -177,31 +194,42 @@ class V8Debugger : public v8::debug::DebugDelegate {
using AsyncTaskToStackTrace =
protocol::HashMap<void*, std::weak_ptr<AsyncStackTrace>>;
AsyncTaskToStackTrace m_asyncTaskStacks;
- AsyncTaskToStackTrace m_asyncTaskCreationStacks;
protocol::HashSet<void*> m_recurringTasks;
- protocol::HashMap<void*, void*> m_parentTask;
int m_maxAsyncCallStacks;
int m_maxAsyncCallStackDepth;
std::vector<void*> m_currentTasks;
std::vector<std::shared_ptr<AsyncStackTrace>> m_currentAsyncParent;
- std::vector<std::shared_ptr<AsyncStackTrace>> m_currentAsyncCreation;
+ std::vector<V8StackTraceId> m_currentExternalParent;
void collectOldAsyncStacksIfNeeded();
int m_asyncStacksCount = 0;
// V8Debugger owns all the async stacks, while most of the other references
// are weak, which allows to collect some stacks when there are too many.
std::list<std::shared_ptr<AsyncStackTrace>> m_allAsyncStacks;
- std::map<int, std::weak_ptr<StackFrame>> m_framesCache;
+ std::unordered_map<int, std::weak_ptr<StackFrame>> m_framesCache;
protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
void* m_taskWithScheduledBreak = nullptr;
+ String16 m_taskWithScheduledBreakDebuggerId;
std::unique_ptr<ScheduleStepIntoAsyncCallback> m_stepIntoAsyncCallback;
bool m_breakRequested = false;
v8::debug::ExceptionBreakState m_pauseOnExceptionsState;
+ bool m_pauseOnAsyncCall = false;
+ void* m_scheduledAsyncTask = nullptr;
+
+ using StackTraceIdToStackTrace =
+ protocol::HashMap<uintptr_t, std::weak_ptr<AsyncStackTrace>>;
+ StackTraceIdToStackTrace m_storedStackTraces;
+ uintptr_t m_lastStackTraceId = 0;
+
+ protocol::HashMap<int, std::pair<int64_t, int64_t>>
+ m_contextGroupIdToDebuggerId;
+ protocol::HashMap<String16, std::pair<int64_t, int64_t>>
+ m_serializedDebuggerIdToDebuggerId;
WasmTranslation m_wasmTranslation;
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index b3e3d11f51..8af3edf7e1 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -363,17 +363,24 @@ buildSampingHeapProfileNode(const v8::AllocationProfile::Node* node) {
Response V8HeapProfilerAgentImpl::stopSampling(
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>* profile) {
+ Response result = getSamplingProfile(profile);
+ if (result.isSuccess()) {
+ m_isolate->GetHeapProfiler()->StopSamplingHeapProfiler();
+ m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
+ false);
+ }
+ return result;
+}
+
+Response V8HeapProfilerAgentImpl::getSamplingProfile(
+ std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>* profile) {
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
- if (!profiler) return Response::Error("Cannot access v8 heap profiler");
v8::HandleScope scope(
- m_isolate); // Allocation profile contains Local handles.
+ m_isolate); // v8::AllocationProfile contains Local handles.
std::unique_ptr<v8::AllocationProfile> v8Profile(
profiler->GetAllocationProfile());
- profiler->StopSamplingHeapProfiler();
- m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
- false);
if (!v8Profile)
- return Response::Error("Cannot access v8 sampled heap profile.");
+ return Response::Error("V8 sampling heap profiler was not started.");
v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
*profile = protocol::HeapProfiler::SamplingHeapProfile::create()
.setHead(buildSampingHeapProfileNode(root))
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
index e0e244715f..7491a80f10 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -46,6 +46,8 @@ class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
Response startSampling(Maybe<double> samplingInterval) override;
Response stopSampling(
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>*) override;
+ Response getSamplingProfile(
+ std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>*) override;
private:
void startTrackingHeapObjectsInternal(bool trackAllocations);
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index b970087917..ef978ceda3 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -5,6 +5,7 @@
#include "src/inspector/v8-injected-script-host.h"
#include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger.h"
@@ -80,6 +81,9 @@ v8::Local<v8::Object> V8InjectedScriptHost::create(
setFunctionProperty(context, injectedScriptHost, "proxyTargetValue",
V8InjectedScriptHost::proxyTargetValueCallback,
debuggerExternal);
+ setFunctionProperty(context, injectedScriptHost, "nativeAccessorDescriptor",
+ V8InjectedScriptHost::nativeAccessorDescriptorCallback,
+ debuggerExternal);
createDataProperty(context, injectedScriptHost,
toV8StringInternalized(isolate, "keys"),
v8::debug::GetBuiltin(isolate, v8::debug::kObjectKeys));
@@ -337,4 +341,37 @@ void V8InjectedScriptHost::proxyTargetValueCallback(
info.GetReturnValue().Set(target);
}
+void V8InjectedScriptHost::nativeAccessorDescriptorCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ if (info.Length() != 2 || !info[0]->IsObject() || !info[1]->IsName()) {
+ info.GetReturnValue().Set(v8::Undefined(isolate));
+ return;
+ }
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ int flags = v8::debug::GetNativeAccessorDescriptor(
+ context, v8::Local<v8::Object>::Cast(info[0]),
+ v8::Local<v8::Name>::Cast(info[1]));
+ if (flags == static_cast<int>(v8::debug::NativeAccessorType::None)) {
+ info.GetReturnValue().Set(v8::Undefined(isolate));
+ return;
+ }
+
+ bool isBuiltin =
+ flags & static_cast<int>(v8::debug::NativeAccessorType::IsBuiltin);
+ bool hasGetter =
+ flags & static_cast<int>(v8::debug::NativeAccessorType::HasGetter);
+ bool hasSetter =
+ flags & static_cast<int>(v8::debug::NativeAccessorType::HasSetter);
+ v8::Local<v8::Object> result = v8::Object::New(isolate);
+ result->SetPrototype(context, v8::Null(isolate)).ToChecked();
+ createDataProperty(context, result, toV8String(isolate, "isBuiltin"),
+ v8::Boolean::New(isolate, isBuiltin));
+ createDataProperty(context, result, toV8String(isolate, "hasGetter"),
+ v8::Boolean::New(isolate, hasGetter));
+ createDataProperty(context, result, toV8String(isolate, "hasSetter"),
+ v8::Boolean::New(isolate, hasSetter));
+ info.GetReturnValue().Set(result);
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-injected-script-host.h b/deps/v8/src/inspector/v8-injected-script-host.h
index 76a77e5eee..491a157ea8 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.h
+++ b/deps/v8/src/inspector/v8-injected-script-host.h
@@ -42,6 +42,8 @@ class V8InjectedScriptHost {
static void bindCallback(const v8::FunctionCallbackInfo<v8::Value>&);
static void proxyTargetValueCallback(
const v8::FunctionCallbackInfo<v8::Value>&);
+ static void nativeAccessorDescriptorCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 448808cd15..a29f07b0bf 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -287,6 +287,19 @@ std::unique_ptr<V8StackTrace> V8InspectorImpl::captureStackTrace(
return m_debugger->captureStackTrace(fullStack);
}
+V8StackTraceId V8InspectorImpl::storeCurrentStackTrace(
+ const StringView& description) {
+ return m_debugger->storeCurrentStackTrace(description);
+}
+
+void V8InspectorImpl::externalAsyncTaskStarted(const V8StackTraceId& parent) {
+ m_debugger->externalAsyncTaskStarted(parent);
+}
+
+void V8InspectorImpl::externalAsyncTaskFinished(const V8StackTraceId& parent) {
+ m_debugger->externalAsyncTaskFinished(parent);
+}
+
void V8InspectorImpl::asyncTaskScheduled(const StringView& taskName, void* task,
bool recurring) {
if (!task) return;
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index fa9fdcf1ee..92e7b21960 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -97,6 +97,10 @@ class V8InspectorImpl : public V8Inspector {
void asyncTaskFinished(void* task) override;
void allAsyncTasksCanceled() override;
+ V8StackTraceId storeCurrentStackTrace(const StringView& description) override;
+ void externalAsyncTaskStarted(const V8StackTraceId& parent) override;
+ void externalAsyncTaskFinished(const V8StackTraceId& parent) override;
+
unsigned nextExceptionId() { return ++m_lastExceptionId; }
void enableStackCapturingIfNeeded();
void disableStackCapturingIfNeeded();
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 12ddd96b58..a8aaa1158b 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -32,43 +32,39 @@ std::vector<std::shared_ptr<StackFrame>> toFramesVector(
void calculateAsyncChain(V8Debugger* debugger, int contextGroupId,
std::shared_ptr<AsyncStackTrace>* asyncParent,
- std::shared_ptr<AsyncStackTrace>* asyncCreation,
- int* maxAsyncDepth) {
+ V8StackTraceId* externalParent, int* maxAsyncDepth) {
*asyncParent = debugger->currentAsyncParent();
- *asyncCreation = debugger->currentAsyncCreation();
+ *externalParent = debugger->currentExternalParent();
+ DCHECK(externalParent->IsInvalid() || !*asyncParent);
if (maxAsyncDepth) *maxAsyncDepth = debugger->maxAsyncCallChainDepth();
- DCHECK(!*asyncParent || !*asyncCreation ||
- (*asyncParent)->contextGroupId() ==
- (*asyncCreation)->contextGroupId());
// Do not accidentally append async call chain from another group. This should
// not happen if we have proper instrumentation, but let's double-check to be
// safe.
if (contextGroupId && *asyncParent &&
(*asyncParent)->contextGroupId() != contextGroupId) {
asyncParent->reset();
- asyncCreation->reset();
+ *externalParent = V8StackTraceId();
if (maxAsyncDepth) *maxAsyncDepth = 0;
return;
}
- // Only the top stack in the chain may be empty and doesn't contain creation
- // stack, so ensure that second stack is non-empty (it's the top of appended
- // chain).
- if (*asyncParent && !(*asyncCreation) && !(*asyncParent)->creation().lock() &&
- (*asyncParent)->isEmpty()) {
+ // Only the top stack in the chain may be empty, so ensure that second stack
+ // is non-empty (it's the top of appended chain).
+ if (*asyncParent && (*asyncParent)->isEmpty()) {
*asyncParent = (*asyncParent)->parent().lock();
}
}
std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
+ V8Debugger* debugger,
const std::vector<std::shared_ptr<StackFrame>>& frames,
const String16& description,
const std::shared_ptr<AsyncStackTrace>& asyncParent,
- const std::shared_ptr<AsyncStackTrace>& asyncCreation, int maxAsyncDepth) {
+ const V8StackTraceId& externalParent, int maxAsyncDepth) {
if (asyncParent && frames.empty() &&
- description == asyncParent->description() && !asyncCreation) {
- return asyncParent->buildInspectorObject(nullptr, maxAsyncDepth);
+ description == asyncParent->description()) {
+ return asyncParent->buildInspectorObject(debugger, maxAsyncDepth);
}
std::unique_ptr<protocol::Array<protocol::Runtime::CallFrame>>
@@ -81,15 +77,38 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
.setCallFrames(std::move(inspectorFrames))
.build();
if (!description.isEmpty()) stackTrace->setDescription(description);
- if (asyncParent && maxAsyncDepth > 0) {
- stackTrace->setParent(asyncParent->buildInspectorObject(asyncCreation.get(),
- maxAsyncDepth - 1));
+ if (asyncParent) {
+ if (maxAsyncDepth > 0) {
+ stackTrace->setParent(
+ asyncParent->buildInspectorObject(debugger, maxAsyncDepth - 1));
+ } else if (debugger) {
+ stackTrace->setParentId(
+ protocol::Runtime::StackTraceId::create()
+ .setId(stackTraceIdToString(
+ AsyncStackTrace::store(debugger, asyncParent)))
+ .build());
+ }
+ }
+ if (!externalParent.IsInvalid()) {
+ stackTrace->setParentId(
+ protocol::Runtime::StackTraceId::create()
+ .setId(stackTraceIdToString(externalParent.id))
+ .setDebuggerId(debuggerIdToString(externalParent.debugger_id))
+ .build());
}
return stackTrace;
}
} // namespace
+V8StackTraceId::V8StackTraceId() : id(0), debugger_id(std::make_pair(0, 0)) {}
+
+V8StackTraceId::V8StackTraceId(uintptr_t id,
+ const std::pair<int64_t, int64_t> debugger_id)
+ : id(id), debugger_id(debugger_id) {}
+
+bool V8StackTraceId::IsInvalid() const { return !id; }
+
StackFrame::StackFrame(v8::Local<v8::StackFrame> v8Frame)
: m_functionName(toProtocolString(v8Frame->GetFunctionName())),
m_scriptId(String16::fromInteger(v8Frame->GetScriptId())),
@@ -155,12 +174,13 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
int maxAsyncDepth = 0;
std::shared_ptr<AsyncStackTrace> asyncParent;
- std::shared_ptr<AsyncStackTrace> asyncCreation;
- calculateAsyncChain(debugger, contextGroupId, &asyncParent, &asyncCreation,
+ V8StackTraceId externalParent;
+ calculateAsyncChain(debugger, contextGroupId, &asyncParent, &externalParent,
&maxAsyncDepth);
- if (frames.empty() && !asyncCreation && !asyncParent) return nullptr;
+ if (frames.empty() && !asyncParent && externalParent.IsInvalid())
+ return nullptr;
return std::unique_ptr<V8StackTraceImpl>(new V8StackTraceImpl(
- std::move(frames), maxAsyncDepth, asyncParent, asyncCreation));
+ std::move(frames), maxAsyncDepth, asyncParent, externalParent));
}
// static
@@ -181,18 +201,17 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
V8StackTraceImpl::V8StackTraceImpl(
std::vector<std::shared_ptr<StackFrame>> frames, int maxAsyncDepth,
std::shared_ptr<AsyncStackTrace> asyncParent,
- std::shared_ptr<AsyncStackTrace> asyncCreation)
+ const V8StackTraceId& externalParent)
: m_frames(std::move(frames)),
m_maxAsyncDepth(maxAsyncDepth),
m_asyncParent(asyncParent),
- m_asyncCreation(asyncCreation) {}
+ m_externalParent(externalParent) {}
V8StackTraceImpl::~V8StackTraceImpl() {}
std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
- return std::unique_ptr<V8StackTrace>(
- new V8StackTraceImpl(m_frames, 0, std::shared_ptr<AsyncStackTrace>(),
- std::shared_ptr<AsyncStackTrace>()));
+ return std::unique_ptr<V8StackTrace>(new V8StackTraceImpl(
+ m_frames, 0, std::shared_ptr<AsyncStackTrace>(), V8StackTraceId()));
}
bool V8StackTraceImpl::isEmpty() const { return m_frames.empty(); }
@@ -218,14 +237,15 @@ StringView V8StackTraceImpl::topFunctionName() const {
}
std::unique_ptr<protocol::Runtime::StackTrace>
-V8StackTraceImpl::buildInspectorObjectImpl() const {
- return buildInspectorObjectCommon(m_frames, String16(), m_asyncParent.lock(),
- m_asyncCreation.lock(), m_maxAsyncDepth);
+V8StackTraceImpl::buildInspectorObjectImpl(V8Debugger* debugger) const {
+ return buildInspectorObjectCommon(debugger, m_frames, String16(),
+ m_asyncParent.lock(), m_externalParent,
+ m_maxAsyncDepth);
}
std::unique_ptr<protocol::Runtime::API::StackTrace>
V8StackTraceImpl::buildInspectorObject() const {
- return buildInspectorObjectImpl();
+ return buildInspectorObjectImpl(nullptr);
}
std::unique_ptr<StringBuffer> V8StackTraceImpl::toString() const {
@@ -307,18 +327,18 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
}
std::shared_ptr<AsyncStackTrace> asyncParent;
- std::shared_ptr<AsyncStackTrace> asyncCreation;
- calculateAsyncChain(debugger, contextGroupId, &asyncParent, &asyncCreation,
+ V8StackTraceId externalParent;
+ calculateAsyncChain(debugger, contextGroupId, &asyncParent, &externalParent,
nullptr);
- if (frames.empty() && !asyncCreation && !asyncParent) return nullptr;
+ if (frames.empty() && !asyncParent && externalParent.IsInvalid())
+ return nullptr;
// When async call chain is empty but doesn't contain useful schedule stack
- // and parent async call chain contains creationg stack but doesn't
- // synchronous we can merge them together.
- // e.g. Promise ThenableJob.
+ // but doesn't synchronous we can merge them together. e.g. Promise
+ // ThenableJob.
if (asyncParent && frames.empty() &&
- asyncParent->m_description == description && !asyncCreation) {
+ asyncParent->m_description == description) {
return asyncParent;
}
@@ -328,42 +348,46 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
}
return std::shared_ptr<AsyncStackTrace>(
new AsyncStackTrace(contextGroupId, description, std::move(frames),
- asyncParent, asyncCreation));
+ asyncParent, externalParent));
}
AsyncStackTrace::AsyncStackTrace(
int contextGroupId, const String16& description,
std::vector<std::shared_ptr<StackFrame>> frames,
std::shared_ptr<AsyncStackTrace> asyncParent,
- std::shared_ptr<AsyncStackTrace> asyncCreation)
+ const V8StackTraceId& externalParent)
: m_contextGroupId(contextGroupId),
+ m_id(0),
m_description(description),
m_frames(std::move(frames)),
m_asyncParent(asyncParent),
- m_asyncCreation(asyncCreation) {
+ m_externalParent(externalParent) {
DCHECK(m_contextGroupId);
}
std::unique_ptr<protocol::Runtime::StackTrace>
-AsyncStackTrace::buildInspectorObject(AsyncStackTrace* asyncCreation,
+AsyncStackTrace::buildInspectorObject(V8Debugger* debugger,
int maxAsyncDepth) const {
- return buildInspectorObjectCommon(m_frames, m_description,
- m_asyncParent.lock(),
- m_asyncCreation.lock(), maxAsyncDepth);
+ return buildInspectorObjectCommon(debugger, m_frames, m_description,
+ m_asyncParent.lock(), m_externalParent,
+ maxAsyncDepth);
}
int AsyncStackTrace::contextGroupId() const { return m_contextGroupId; }
+uintptr_t AsyncStackTrace::store(V8Debugger* debugger,
+ std::shared_ptr<AsyncStackTrace> stack) {
+ if (stack->m_id) return stack->m_id;
+ stack->m_id = debugger->storeStackTrace(stack);
+ return stack->m_id;
+}
+
const String16& AsyncStackTrace::description() const { return m_description; }
std::weak_ptr<AsyncStackTrace> AsyncStackTrace::parent() const {
return m_asyncParent;
}
-std::weak_ptr<AsyncStackTrace> AsyncStackTrace::creation() const {
- return m_asyncCreation;
-}
-
bool AsyncStackTrace::isEmpty() const { return m_frames.empty(); }
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 5ce051bd5c..b8314c8fc4 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -19,6 +19,7 @@ namespace v8_inspector {
class AsyncStackTrace;
class V8Debugger;
class WasmTranslation;
+struct V8StackTraceId;
class StackFrame {
public:
@@ -57,8 +58,8 @@ class V8StackTraceImpl : public V8StackTrace {
int maxStackSize);
~V8StackTraceImpl() override;
- std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectImpl()
- const;
+ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectImpl(
+ V8Debugger* debugger) const;
// V8StackTrace implementation.
// This method drops the async stack trace.
@@ -79,7 +80,7 @@ class V8StackTraceImpl : public V8StackTrace {
V8StackTraceImpl(std::vector<std::shared_ptr<StackFrame>> frames,
int maxAsyncDepth,
std::shared_ptr<AsyncStackTrace> asyncParent,
- std::shared_ptr<AsyncStackTrace> asyncCreation);
+ const V8StackTraceId& externalParent);
class StackFrameIterator {
public:
@@ -98,7 +99,7 @@ class V8StackTraceImpl : public V8StackTrace {
std::vector<std::shared_ptr<StackFrame>> m_frames;
int m_maxAsyncDepth;
std::weak_ptr<AsyncStackTrace> m_asyncParent;
- std::weak_ptr<AsyncStackTrace> m_asyncCreation;
+ V8StackTraceId m_externalParent;
DISALLOW_COPY_AND_ASSIGN(V8StackTraceImpl);
};
@@ -109,20 +110,17 @@ class AsyncStackTrace {
int contextGroupId,
const String16& description,
int maxStackSize);
+ static uintptr_t store(V8Debugger* debugger,
+ std::shared_ptr<AsyncStackTrace> stack);
std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObject(
- AsyncStackTrace* asyncCreation, int maxAsyncDepth) const;
+ V8Debugger* debugger, int maxAsyncDepth) const;
int contextGroupId() const;
const String16& description() const;
std::weak_ptr<AsyncStackTrace> parent() const;
- std::weak_ptr<AsyncStackTrace> creation() const;
bool isEmpty() const;
- void setDescription(const String16& description) {
- // TODO(kozyatinskiy): implement it without hack.
- m_description = description;
- }
const std::vector<std::shared_ptr<StackFrame>>& frames() const {
return m_frames;
}
@@ -131,14 +129,15 @@ class AsyncStackTrace {
AsyncStackTrace(int contextGroupId, const String16& description,
std::vector<std::shared_ptr<StackFrame>> frames,
std::shared_ptr<AsyncStackTrace> asyncParent,
- std::shared_ptr<AsyncStackTrace> asyncCreation);
+ const V8StackTraceId& externalParent);
int m_contextGroupId;
+ uintptr_t m_id;
String16 m_description;
std::vector<std::shared_ptr<StackFrame>> m_frames;
std::weak_ptr<AsyncStackTrace> m_asyncParent;
- std::weak_ptr<AsyncStackTrace> m_asyncCreation;
+ V8StackTraceId m_externalParent;
DISALLOW_COPY_AND_ASSIGN(AsyncStackTrace);
};
diff --git a/deps/v8/src/inspector/v8-value-utils.cc b/deps/v8/src/inspector/v8-value-utils.cc
index e86838a85a..f32369df36 100644
--- a/deps/v8/src/inspector/v8-value-utils.cc
+++ b/deps/v8/src/inspector/v8-value-utils.cc
@@ -76,7 +76,8 @@ protocol::Response toProtocolValue(v8::Local<v8::Context> context,
if (name->IsString()) {
v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
context, v8::Local<v8::String>::Cast(name));
- if (!hasRealNamedProperty.IsJust() || !hasRealNamedProperty.FromJust())
+ if (hasRealNamedProperty.IsNothing() ||
+ !hasRealNamedProperty.FromJust())
continue;
}
v8::Local<v8::String> propertyName;
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 94ad2efc72..970a4ad3ad 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -29,7 +29,7 @@ void CallInterfaceDescriptorData::InitializePlatformIndependent(
param_count_ = parameter_count + extra_parameter_count;
machine_types_.reset(NewArray<MachineType>(param_count_));
for (int i = 0; i < param_count_; i++) {
- if (machine_types == NULL || i >= parameter_count) {
+ if (machine_types == nullptr || i >= parameter_count) {
machine_types_[i] = MachineType::AnyTagged();
} else {
machine_types_[i] = machine_types[i];
@@ -262,12 +262,6 @@ void StringCharCodeAtDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
-void StringCompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {LeftRegister(), RightRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void TypeConversionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ArgumentRegister()};
@@ -281,7 +275,7 @@ void TypeConversionStackParameterDescriptor::InitializePlatformSpecific(
void TypeConversionStackParameterDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformIndependent(data->register_param_count(), 1, NULL);
+ data->InitializePlatformIndependent(data->register_param_count(), 1, nullptr);
}
void MathPowTaggedDescriptor::InitializePlatformSpecific(
@@ -585,7 +579,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformIndependent(
void ApiCallbackDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kFunction, kCallData, kHolder, kApiFunctionAddress
+ // kTargetContext, kCallData, kHolder, kApiFunctionAddress
MachineType machine_types[] = {
MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::Pointer()};
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index e6cdf7fdd2..49c047333a 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -62,8 +62,6 @@ class PlatformInterfaceDescriptor;
V(StringAdd) \
V(StringCharAt) \
V(StringCharCodeAt) \
- V(StringCompare) \
- V(SubString) \
V(ForInPrepare) \
V(GetProperty) \
V(ArgumentAdaptor) \
@@ -94,7 +92,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
void InitializePlatformSpecific(
int register_parameter_count, const Register* registers,
- PlatformInterfaceDescriptor* platform_descriptor = NULL);
+ PlatformInterfaceDescriptor* platform_descriptor = nullptr);
// if machine_types is null, then an array of size
// (parameter_count + extra_parameter_count) will be created with
@@ -121,11 +119,11 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
}
void RestrictAllocatableRegisters(const Register* registers, int num) {
- DCHECK(allocatable_registers_ == 0);
+ DCHECK_EQ(allocatable_registers_, 0);
for (int i = 0; i < num; ++i) {
allocatable_registers_ |= registers[i].bit();
}
- DCHECK(NumRegs(allocatable_registers_) > 0);
+ DCHECK_GT(NumRegs(allocatable_registers_), 0);
}
RegList allocatable_registers() const { return allocatable_registers_; }
@@ -163,7 +161,7 @@ class CallDescriptors {
class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
public:
- CallInterfaceDescriptor() : data_(NULL) {}
+ CallInterfaceDescriptor() : data_(nullptr) {}
virtual ~CallInterfaceDescriptor() {}
CallInterfaceDescriptor(Isolate* isolate, CallDescriptors::Key key)
@@ -210,7 +208,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
virtual void InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformIndependent(data->register_param_count(), 0, NULL);
+ data->InitializePlatformIndependent(data->register_param_count(), 0,
+ nullptr);
}
void Initialize(Isolate* isolate, CallDescriptors::Key key) {
@@ -256,7 +255,8 @@ static const int kMaxBuiltinRegisterParams = 5;
} \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
- data->InitializePlatformIndependent(kRegisterParams, kStackParams, NULL); \
+ data->InitializePlatformIndependent(kRegisterParams, kStackParams, \
+ nullptr); \
} \
name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
\
@@ -283,7 +283,7 @@ static const int kMaxBuiltinRegisterParams = 5;
protected: \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
- data->InitializePlatformIndependent(0, kParameterCount, NULL); \
+ data->InitializePlatformIndependent(0, kParameterCount, nullptr); \
} \
void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
override { \
@@ -739,22 +739,6 @@ class StringCharCodeAtDescriptor final : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
-class StringCompareDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kLeft, kRight)
- DECLARE_DESCRIPTOR(StringCompareDescriptor, CallInterfaceDescriptor)
-
- static const Register LeftRegister();
- static const Register RightRegister();
-};
-
-class SubStringDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kString, kFrom, kTo)
- DECLARE_DESCRIPTOR_WITH_STACK_ARGS(SubStringDescriptor,
- CallInterfaceDescriptor)
-};
-
class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
@@ -765,7 +749,7 @@ class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
class ApiCallbackDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kFunction, kCallData, kHolder, kApiFunctionAddress)
+ DEFINE_PARAMETERS(kTargetContext, kCallData, kHolder, kApiFunctionAddress)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiCallbackDescriptor,
CallInterfaceDescriptor)
};
diff --git a/deps/v8/src/interpreter/block-coverage-builder.h b/deps/v8/src/interpreter/block-coverage-builder.h
index 150d46c49c..49b995a833 100644
--- a/deps/v8/src/interpreter/block-coverage-builder.h
+++ b/deps/v8/src/interpreter/block-coverage-builder.h
@@ -41,6 +41,19 @@ class BlockCoverageBuilder final : public ZoneObject {
return slot;
}
+ int AllocateNaryBlockCoverageSlot(NaryOperation* node, size_t index) {
+ NaryOperationSourceRanges* ranges =
+ static_cast<NaryOperationSourceRanges*>(source_range_map_->Find(node));
+ if (ranges == nullptr) return kNoCoverageArraySlot;
+
+ SourceRange range = ranges->GetRangeAtIndex(index);
+ if (range.IsEmpty()) return kNoCoverageArraySlot;
+
+ const int slot = static_cast<int>(slots_.size());
+ slots_.emplace_back(range);
+ return slot;
+ }
+
void IncrementBlockCounter(int coverage_array_slot) {
if (coverage_array_slot == kNoCoverageArraySlot) return;
builder_->IncBlockCounter(coverage_array_slot);
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index 64b7a219c0..784bb14eb6 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -7,6 +7,7 @@
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
+#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
@@ -154,7 +155,7 @@ Runtime::FunctionId BytecodeArrayAccessor::GetRuntimeIdOperand(
int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kRuntimeId);
+ DCHECK_EQ(operand_type, OperandType::kRuntimeId);
uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
return static_cast<Runtime::FunctionId>(raw_id);
}
@@ -163,7 +164,7 @@ uint32_t BytecodeArrayAccessor::GetNativeContextIndexOperand(
int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kNativeContextIndex);
+ DCHECK_EQ(operand_type, OperandType::kNativeContextIndex);
return GetUnsignedOperand(operand_index, operand_type);
}
@@ -171,7 +172,7 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kIntrinsicId);
+ DCHECK_EQ(operand_type, OperandType::kIntrinsicId);
uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
return IntrinsicsHelper::ToRuntimeId(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
@@ -296,6 +297,7 @@ void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
while (current_->IsTheHole(isolate)) {
++table_offset_;
++index_;
+ if (table_offset_ >= table_end_) break;
current_ = accessor_->GetConstantAtIndex(table_offset_);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 42185feeca..d585e6dc33 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -14,6 +14,9 @@
namespace v8 {
namespace internal {
+
+class BytecodeArray;
+
namespace interpreter {
class BytecodeArrayAccessor;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 09789fe8b2..5be818eb2d 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -38,11 +38,11 @@ class RegisterTransferWriter final
};
BytecodeArrayBuilder::BytecodeArrayBuilder(
- Isolate* isolate, Zone* zone, int parameter_count, int locals_count,
- FunctionLiteral* literal,
+ Zone* zone, int parameter_count, int locals_count,
+ FeedbackVectorSpec* feedback_vector_spec,
SourcePositionTableBuilder::RecordingMode source_position_mode)
: zone_(zone),
- literal_(literal),
+ feedback_vector_spec_(feedback_vector_spec),
bytecode_generated_(false),
constant_array_builder_(zone),
handler_table_builder_(zone),
@@ -362,6 +362,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
case Token::Value::MOD:
OutputMod(reg, feedback_slot);
break;
+ case Token::Value::EXP:
+ OutputExp(reg, feedback_slot);
+ break;
case Token::Value::BIT_OR:
OutputBitwiseOr(reg, feedback_slot);
break;
@@ -404,6 +407,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperationSmiLiteral(
case Token::Value::MOD:
OutputModSmi(literal->value(), feedback_slot);
break;
+ case Token::Value::EXP:
+ OutputExpSmi(literal->value(), feedback_slot);
+ break;
case Token::Value::BIT_OR:
OutputBitwiseOrSmi(literal->value(), feedback_slot);
break;
@@ -493,6 +499,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
case Token::Value::GTE:
OutputTestGreaterThanOrEqual(reg, feedback_slot);
break;
+ case Token::Value::INSTANCEOF:
+ OutputTestInstanceOf(reg, feedback_slot);
+ break;
default:
UNREACHABLE();
}
@@ -505,9 +514,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
case Token::Value::EQ_STRICT:
OutputTestEqualStrictNoFeedback(reg);
break;
- case Token::Value::INSTANCEOF:
- OutputTestInstanceOf(reg);
- break;
case Token::Value::IN:
OutputTestIn(reg);
break;
@@ -549,7 +555,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareNil(Token::Value op,
BytecodeArrayBuilder& BytecodeArrayBuilder::CompareTypeOf(
TestTypeOfFlags::LiteralFlag literal_flag) {
- DCHECK(literal_flag != TestTypeOfFlags::LiteralFlag::kOther);
+ DCHECK_NE(literal_flag, TestTypeOfFlags::LiteralFlag::kOther);
OutputTestTypeOf(TestTypeOfFlags::Encode(literal_flag));
return *this;
}
@@ -571,6 +577,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(double value) {
+ size_t entry = GetConstantPoolEntry(value);
+ OutputLdaConstant(entry);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
const AstRawString* raw_string) {
size_t entry = GetConstantPoolEntry(raw_string);
@@ -584,39 +596,22 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(const Scope* scope) {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
- const AstValue* ast_value) {
- if (ast_value->IsSmi()) {
- return LoadLiteral(ast_value->AsSmi());
- } else if (ast_value->IsUndefined()) {
- return LoadUndefined();
- } else if (ast_value->IsTrue()) {
- return LoadTrue();
- } else if (ast_value->IsFalse()) {
- return LoadFalse();
- } else if (ast_value->IsNull()) {
- return LoadNull();
- } else if (ast_value->IsTheHole()) {
- return LoadTheHole();
- } else if (ast_value->IsString()) {
- return LoadLiteral(ast_value->AsString());
- } else if (ast_value->IsHeapNumber()) {
- size_t entry = GetConstantPoolEntry(ast_value);
- OutputLdaConstant(entry);
- return *this;
- } else {
- // This should be the only ast value type left.
- DCHECK(ast_value->IsSymbol());
- size_t entry;
- switch (ast_value->AsSymbol()) {
- case AstSymbol::kHomeObjectSymbol:
- entry = HomeObjectSymbolConstantPoolEntry();
- break;
- // No default case so that we get a warning if AstSymbol changes
- }
- OutputLdaConstant(entry);
- return *this;
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(AstBigInt bigint) {
+ size_t entry = GetConstantPoolEntry(bigint);
+ OutputLdaConstant(entry);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(AstSymbol symbol) {
+ size_t entry;
+ switch (symbol) {
+ case AstSymbol::kHomeObjectSymbol:
+ entry = HomeObjectSymbolConstantPoolEntry();
+ break;
+ // No default case so that we get a warning if AstSymbol changes
}
+ OutputLdaConstant(entry);
+ return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
@@ -692,14 +687,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(const AstRawString* name,
int feedback_slot,
TypeofMode typeof_mode) {
size_t name_index = GetConstantPoolEntry(name);
- // Ensure that typeof mode is in sync with the IC slot kind if the function
- // literal is available (not a unit test case).
- // TODO(ishell): check only in debug mode.
- if (literal_) {
- FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
- CHECK_EQ(GetTypeofModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
- typeof_mode);
- }
+ // Ensure that typeof mode is in sync with the IC slot kind.
+ DCHECK_EQ(GetTypeofModeFromSlotKind(feedback_vector_spec()->GetKind(
+ FeedbackVector::ToSlot(feedback_slot))),
+ typeof_mode);
if (typeof_mode == INSIDE_TYPEOF) {
OutputLdaGlobalInsideTypeof(name_index, feedback_slot);
} else {
@@ -712,10 +703,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(const AstRawString* name,
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
const AstRawString* name, int feedback_slot, LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
- if (language_mode == SLOPPY) {
+ if (language_mode == LanguageMode::kSloppy) {
OutputStaGlobalSloppy(name_index, feedback_slot);
} else {
- DCHECK_EQ(language_mode, STRICT);
+ DCHECK_EQ(language_mode, LanguageMode::kStrict);
OutputStaGlobalStrict(name_index, feedback_slot);
}
return *this;
@@ -841,16 +832,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CollectTypeProfile(int position) {
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
Register object, size_t name_index, int feedback_slot,
LanguageMode language_mode) {
-#if DEBUG
- // Ensure that language mode is in sync with the IC slot kind if the function
- // literal is available (not a unit test case).
- if (literal_) {
- FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
- DCHECK_EQ(
- GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
- language_mode);
- }
-#endif
+ // Ensure that language mode is in sync with the IC slot kind.
+ DCHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(
+ FeedbackVector::ToSlot(feedback_slot))),
+ language_mode);
OutputStaNamedProperty(object, name_index, feedback_slot);
return *this;
}
@@ -865,15 +850,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedOwnProperty(
Register object, const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
-#if DEBUG
- // Ensure that the store operation is in sync with the IC slot kind if
- // the function literal is available (not a unit test case).
- if (literal_) {
- FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
- DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
- feedback_vector_spec()->GetKind(slot));
- }
-#endif
+ // Ensure that the store operation is in sync with the IC slot kind.
+ DCHECK_EQ(
+ FeedbackSlotKind::kStoreOwnNamed,
+ feedback_vector_spec()->GetKind(FeedbackVector::ToSlot(feedback_slot)));
OutputStaNamedOwnProperty(object, name_index, feedback_slot);
return *this;
}
@@ -881,16 +861,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedOwnProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
-#if DEBUG
- // Ensure that language mode is in sync with the IC slot kind if the function
- // literal is available (not a unit test case).
- if (literal_) {
- FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
- DCHECK_EQ(
- GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
- language_mode);
- }
-#endif
+ // Ensure that language mode is in sync with the IC slot kind.
+ DCHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(
+ FeedbackVector::ToSlot(feedback_slot))),
+ language_mode);
OutputStaKeyedProperty(object, key, feedback_slot);
return *this;
}
@@ -901,6 +875,20 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreHomeObjectProperty(
return StoreNamedProperty(object, name_index, feedback_slot, language_mode);
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreClassFieldsInitializer(
+ Register constructor, int feedback_slot) {
+ size_t name_index = ClassFieldsSymbolConstantPoolEntry();
+ return StoreNamedProperty(constructor, name_index, feedback_slot,
+ LanguageMode::kStrict);
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadClassFieldsInitializer(
+ Register constructor, int feedback_slot) {
+ size_t name_index = ClassFieldsSymbolConstantPoolEntry();
+ OutputLdaNamedProperty(constructor, name_index, feedback_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
size_t shared_function_info_entry, int slot, int flags) {
OutputCreateClosure(shared_function_info_entry, slot, flags);
@@ -1020,6 +1008,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ToNumber(int feedback_slot) {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ToNumeric(int feedback_slot) {
+ OutputToNumeric(feedback_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
// Flush the register optimizer when binding a label to ensure all
// expected registers are valid when jumping to this label.
@@ -1380,7 +1373,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ConstructWithSpread(
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, RegisterList args) {
DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
- DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+ DCHECK_LE(Bytecodes::SizeForUnsignedOperand(function_id),
+ OperandSize::kShort);
if (IntrinsicsHelper::IsSupported(function_id)) {
IntrinsicsHelper::IntrinsicId intrinsic_id =
IntrinsicsHelper::FromRuntimeId(function_id);
@@ -1407,7 +1401,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
Runtime::FunctionId function_id, RegisterList args,
RegisterList return_pair) {
DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
- DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+ DCHECK_LE(Bytecodes::SizeForUnsignedOperand(function_id),
+ OperandSize::kShort);
DCHECK_EQ(2, return_pair.register_count());
OutputCallRuntimeForPair(static_cast<uint16_t>(function_id), args,
args.register_count(), return_pair);
@@ -1428,10 +1423,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
- if (language_mode == SLOPPY) {
+ if (language_mode == LanguageMode::kSloppy) {
OutputDeletePropertySloppy(object);
} else {
- DCHECK_EQ(language_mode, STRICT);
+ DCHECK_EQ(language_mode, LanguageMode::kStrict);
OutputDeletePropertyStrict(object);
}
return *this;
@@ -1442,15 +1437,18 @@ size_t BytecodeArrayBuilder::GetConstantPoolEntry(
return constant_array_builder()->Insert(raw_string);
}
-size_t BytecodeArrayBuilder::GetConstantPoolEntry(const AstValue* heap_number) {
- DCHECK(heap_number->IsHeapNumber());
- return constant_array_builder()->Insert(heap_number);
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(AstBigInt bigint) {
+ return constant_array_builder()->Insert(bigint);
}
size_t BytecodeArrayBuilder::GetConstantPoolEntry(const Scope* scope) {
return constant_array_builder()->Insert(scope);
}
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(double number) {
+ return constant_array_builder()->Insert(number);
+}
+
#define ENTRY_GETTER(NAME, ...) \
size_t BytecodeArrayBuilder::NAME##ConstantPoolEntry() { \
return constant_array_builder()->Insert##NAME(); \
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index e536b98cda..4063791a18 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -31,12 +31,11 @@ class BytecodeRegisterOptimizer;
class BytecodeJumpTable;
class Register;
-class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
- : public NON_EXPORTED_BASE(ZoneObject) {
+class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
public:
BytecodeArrayBuilder(
- Isolate* isolate, Zone* zone, int parameter_count, int locals_count,
- FunctionLiteral* literal = nullptr,
+ Zone* zone, int parameter_count, int locals_count,
+ FeedbackVectorSpec* feedback_vector_spec = nullptr,
SourcePositionTableBuilder::RecordingMode source_position_mode =
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
@@ -71,9 +70,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
+ BytecodeArrayBuilder& LoadLiteral(double value);
BytecodeArrayBuilder& LoadLiteral(const AstRawString* raw_string);
BytecodeArrayBuilder& LoadLiteral(const Scope* scope);
- BytecodeArrayBuilder& LoadLiteral(const AstValue* ast_value);
+ BytecodeArrayBuilder& LoadLiteral(AstBigInt bigint);
+ BytecodeArrayBuilder& LoadLiteral(AstSymbol symbol);
BytecodeArrayBuilder& LoadUndefined();
BytecodeArrayBuilder& LoadNull();
BytecodeArrayBuilder& LoadTheHole();
@@ -165,6 +166,15 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
int feedback_slot,
LanguageMode language_mode);
+ // Store the class fields property. The initializer to be stored should
+ // be in the accumulator.
+ BytecodeArrayBuilder& StoreClassFieldsInitializer(Register constructor,
+ int feedback_slot);
+
+ // Load class fields property.
+ BytecodeArrayBuilder& LoadClassFieldsInitializer(Register constructor,
+ int feedback_slot);
+
// Lookup the variable with |name|.
BytecodeArrayBuilder& LoadLookupSlot(const AstRawString* name,
TypeofMode typeof_mode);
@@ -361,6 +371,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Converts accumulator and stores result back in accumulator.
BytecodeArrayBuilder& ToNumber(int feedback_slot);
+ BytecodeArrayBuilder& ToNumeric(int feedback_slot);
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
@@ -439,8 +450,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Gets a constant pool entry.
size_t GetConstantPoolEntry(const AstRawString* raw_string);
- size_t GetConstantPoolEntry(const AstValue* heap_number);
+ size_t GetConstantPoolEntry(AstBigInt bigint);
size_t GetConstantPoolEntry(const Scope* scope);
+ size_t GetConstantPoolEntry(double number);
#define ENTRY_GETTER(NAME, ...) size_t NAME##ConstantPoolEntry();
SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_GETTER)
#undef ENTRY_GETTER
@@ -458,11 +470,15 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
}
void SetExpressionPosition(Expression* expr) {
- if (expr->position() == kNoSourcePosition) return;
+ SetExpressionPosition(expr->position());
+ }
+
+ void SetExpressionPosition(int position) {
+ if (position == kNoSourcePosition) return;
if (!latest_source_info_.is_statement()) {
// Ensure the current expression position is overwritten with the
// latest value.
- latest_source_info_.MakeExpressionPosition(expr->position());
+ latest_source_info_.MakeExpressionPosition(position);
}
}
@@ -509,7 +525,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
friend class BytecodeNodeBuilder;
const FeedbackVectorSpec* feedback_vector_spec() const {
- return literal_->feedback_vector_spec();
+ return feedback_vector_spec_;
}
// Returns the current source position for the given |bytecode|.
@@ -566,7 +582,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
}
Zone* zone_;
- FunctionLiteral* literal_;
+ FeedbackVectorSpec* feedback_vector_spec_;
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_;
HandlerTableBuilder handler_table_builder_;
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index 0248dfda46..963cd077bf 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects-inl.h"
+#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
index f499887ccb..9d206e2231 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-array-random-iterator.h"
#include "src/objects-inl.h"
+#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index e467e1d527..9aea3d83fa 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -26,7 +26,7 @@ BytecodeArrayWriter::BytecodeArrayWriter(
SourcePositionTableBuilder::RecordingMode source_position_mode)
: bytecodes_(zone),
unbound_jumps_(0),
- source_position_table_builder_(zone, source_position_mode),
+ source_position_table_builder_(source_position_mode),
constant_array_builder_(constant_array_builder),
last_bytecode_(Bytecode::kIllegal),
last_bytecode_offset_(0),
@@ -45,14 +45,16 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
int frame_size = register_count * kPointerSize;
Handle<FixedArray> constant_pool =
constant_array_builder()->ToFixedArray(isolate);
+ Handle<ByteArray> source_position_table =
+ source_position_table_builder()->ToSourcePositionTable(isolate);
Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
constant_pool);
bytecode_array->set_handler_table(*handler_table);
- Handle<ByteArray> source_position_table =
- source_position_table_builder()->ToSourcePositionTable(
- isolate, Handle<AbstractCode>::cast(bytecode_array));
bytecode_array->set_source_position_table(*source_position_table);
+ LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(
+ bytecode_array->GetFirstBytecodeAddress(),
+ *source_position_table));
return bytecode_array;
}
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index b2dfae1ddd..c53df10129 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -13,6 +13,7 @@
namespace v8 {
namespace internal {
+class BytecodeArray;
class SourcePositionTableBuilder;
namespace interpreter {
@@ -22,6 +23,10 @@ class BytecodeNode;
class BytecodeJumpTable;
class ConstantArrayBuilder;
+namespace bytecode_array_writer_unittest {
+class BytecodeArrayWriterUnittest;
+} // namespace bytecode_array_writer_unittest
+
// Class for emitting bytecode as the final stage of the bytecode
// generation pipeline.
class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
@@ -92,7 +97,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
bool exit_seen_in_block_;
- friend class BytecodeArrayWriterUnittest;
+ friend class bytecode_array_writer_unittest::BytecodeArrayWriterUnittest;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
};
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 39f313f249..57ff5cd850 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -43,7 +43,7 @@ uint8_t CreateClosureFlags::Encode(bool pretenure, bool is_function_scope) {
// static
TestTypeOfFlags::LiteralFlag TestTypeOfFlags::GetFlagForLiteral(
const AstStringConstants* ast_constants, Literal* literal) {
- const AstRawString* raw_literal = literal->raw_value()->AsString();
+ const AstRawString* raw_literal = literal->AsRawString();
if (raw_literal == ast_constants->number_string()) {
return LiteralFlag::kNumber;
} else if (raw_literal == ast_constants->string_string()) {
@@ -52,6 +52,8 @@ TestTypeOfFlags::LiteralFlag TestTypeOfFlags::GetFlagForLiteral(
return LiteralFlag::kSymbol;
} else if (raw_literal == ast_constants->boolean_string()) {
return LiteralFlag::kBoolean;
+ } else if (raw_literal == ast_constants->bigint_string()) {
+ return LiteralFlag::kBigInt;
} else if (raw_literal == ast_constants->undefined_string()) {
return LiteralFlag::kUndefined;
} else if (raw_literal == ast_constants->function_string()) {
@@ -78,7 +80,7 @@ TestTypeOfFlags::LiteralFlag TestTypeOfFlags::Decode(uint8_t raw_flag) {
uint8_t StoreLookupSlotFlags::Encode(LanguageMode language_mode,
LookupHoistingMode lookup_hoisting_mode) {
DCHECK_IMPLIES(lookup_hoisting_mode == LookupHoistingMode::kLegacySloppy,
- language_mode == SLOPPY);
+ language_mode == LanguageMode::kSloppy);
return LanguageModeBit::encode(language_mode) |
LookupHoistingModeBit::encode(static_cast<bool>(lookup_hoisting_mode));
}
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index fb08420a10..0e0ae256ed 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -54,6 +54,7 @@ class CreateClosureFlags {
V(String, string) \
V(Symbol, symbol) \
V(Boolean, boolean) \
+ V(BigInt, bigint) \
V(Undefined, undefined) \
V(Function, function) \
V(Object, object) \
@@ -78,9 +79,10 @@ class TestTypeOfFlags {
class StoreLookupSlotFlags {
public:
- class LanguageModeBit : public BitField8<bool, 0, 1> {};
+ class LanguageModeBit : public BitField8<LanguageMode, 0, 1> {};
class LookupHoistingModeBit
: public BitField8<bool, LanguageModeBit::kNext, 1> {};
+ STATIC_ASSERT(LanguageModeSize <= LanguageModeBit::kNumValues);
static uint8_t Encode(LanguageMode language_mode,
LookupHoistingMode lookup_hoisting_mode);
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index ac5367b7e5..45f0d1eca9 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-generator.h"
+#include "src/api.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
@@ -18,6 +19,7 @@
#include "src/interpreter/control-flow-builders.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
@@ -494,6 +496,31 @@ class BytecodeGenerator::ControlScopeForTryFinally final
DeferredCommands* commands_;
};
+// Allocate and fetch the coverage indices tracking NaryLogical Expressions.
+class BytecodeGenerator::NaryCodeCoverageSlots {
+ public:
+ NaryCodeCoverageSlots(BytecodeGenerator* generator, NaryOperation* expr)
+ : generator_(generator) {
+ if (generator_->block_coverage_builder_ == nullptr) return;
+ for (size_t i = 0; i < expr->subsequent_length(); i++) {
+ coverage_slots_.push_back(
+ generator_->AllocateNaryBlockCoverageSlotIfEnabled(expr, i));
+ }
+ }
+
+ int GetSlotFor(size_t subsequent_expr_index) const {
+ if (generator_->block_coverage_builder_ == nullptr) {
+ return BlockCoverageBuilder::kNoCoverageArraySlot;
+ }
+ DCHECK(coverage_slots_.size() > subsequent_expr_index);
+ return coverage_slots_[subsequent_expr_index];
+ }
+
+ private:
+ BytecodeGenerator* generator_;
+ std::vector<int> coverage_slots_;
+};
+
void BytecodeGenerator::ControlScope::PerformCommand(Command command,
Statement* statement,
int source_position) {
@@ -562,7 +589,7 @@ class BytecodeGenerator::ExpressionResultScope {
// Specify expression always returns a Boolean result value.
void SetResultIsBoolean() {
- DCHECK(type_hint_ == TypeHint::kAny);
+ DCHECK_EQ(type_hint_, TypeHint::kAny);
type_hint_ = TypeHint::kBoolean;
}
@@ -679,19 +706,19 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
}
Handle<FixedArray> AllocateDeclarations(CompilationInfo* info,
- Handle<Script> script) {
+ Handle<Script> script,
+ Isolate* isolate) {
DCHECK(has_constant_pool_entry_);
int array_index = 0;
- Handle<FixedArray> data = info->isolate()->factory()->NewFixedArray(
+ Handle<FixedArray> data = isolate->factory()->NewFixedArray(
static_cast<int>(declarations_.size() * 4), TENURED);
for (const Declaration& declaration : declarations_) {
FunctionLiteral* func = declaration.func;
Handle<Object> initial_value;
if (func == nullptr) {
- initial_value = info->isolate()->factory()->undefined_value();
+ initial_value = isolate->factory()->undefined_value();
} else {
- initial_value =
- Compiler::GetSharedFunctionInfo(func, script, info->isolate());
+ initial_value = Compiler::GetSharedFunctionInfo(func, script, isolate);
}
// Return a null handle if any initial values can't be created. Caller
@@ -702,7 +729,7 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
data->set(array_index++, Smi::FromInt(declaration.slot.ToInt()));
Object* undefined_or_literal_slot;
if (declaration.literal_slot.IsInvalid()) {
- undefined_or_literal_slot = info->isolate()->heap()->undefined_value();
+ undefined_or_literal_slot = isolate->heap()->undefined_value();
} else {
undefined_or_literal_slot =
Smi::FromInt(declaration.literal_slot.ToInt());
@@ -770,24 +797,64 @@ class BytecodeGenerator::CurrentScope final {
Scope* outer_scope_;
};
-BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
+class BytecodeGenerator::FeedbackSlotCache : public ZoneObject {
+ public:
+ typedef std::pair<TypeofMode, void*> Key;
+
+ explicit FeedbackSlotCache(Zone* zone) : map_(zone) {}
+
+ void Put(TypeofMode typeof_mode, Variable* variable, FeedbackSlot slot) {
+ Key key = std::make_pair(typeof_mode, variable);
+ auto entry = std::make_pair(key, slot);
+ map_.insert(entry);
+ }
+ void Put(AstNode* node, FeedbackSlot slot) {
+ Key key = std::make_pair(NOT_INSIDE_TYPEOF, node);
+ auto entry = std::make_pair(key, slot);
+ map_.insert(entry);
+ }
+
+ FeedbackSlot Get(TypeofMode typeof_mode, Variable* variable) const {
+ Key key = std::make_pair(typeof_mode, variable);
+ auto iter = map_.find(key);
+ if (iter != map_.end()) {
+ return iter->second;
+ }
+ return FeedbackSlot();
+ }
+ FeedbackSlot Get(AstNode* node) const {
+ Key key = std::make_pair(NOT_INSIDE_TYPEOF, node);
+ auto iter = map_.find(key);
+ if (iter != map_.end()) {
+ return iter->second;
+ }
+ return FeedbackSlot();
+ }
+
+ private:
+ ZoneMap<Key, FeedbackSlot> map_;
+};
+
+BytecodeGenerator::BytecodeGenerator(
+ CompilationInfo* info, const AstStringConstants* ast_string_constants)
: zone_(info->zone()),
- builder_(new (zone()) BytecodeArrayBuilder(
- info->isolate(), info->zone(), info->num_parameters_including_this(),
- info->scope()->num_stack_slots(), info->literal(),
- info->SourcePositionRecordingMode())),
+ builder_(zone(), info->num_parameters_including_this(),
+ info->scope()->num_stack_slots(), info->feedback_vector_spec(),
+ info->SourcePositionRecordingMode()),
info_(info),
- ast_string_constants_(info->isolate()->ast_string_constants()),
+ ast_string_constants_(ast_string_constants),
closure_scope_(info->scope()),
current_scope_(info->scope()),
- globals_builder_(new (zone()) GlobalDeclarationsBuilder(info->zone())),
+ feedback_slot_cache_(new (zone()) FeedbackSlotCache(zone())),
+ globals_builder_(new (zone()) GlobalDeclarationsBuilder(zone())),
block_coverage_builder_(nullptr),
- global_declarations_(0, info->zone()),
- function_literals_(0, info->zone()),
- native_function_literals_(0, info->zone()),
- object_literals_(0, info->zone()),
- array_literals_(0, info->zone()),
- template_objects_(0, info->zone()),
+ global_declarations_(0, zone()),
+ function_literals_(0, zone()),
+ native_function_literals_(0, zone()),
+ object_literals_(0, zone()),
+ array_literals_(0, zone()),
+ class_literals_(0, zone()),
+ template_objects_(0, zone()),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
@@ -833,7 +900,7 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
// Build global declaration pair arrays.
for (GlobalDeclarationsBuilder* globals_builder : global_declarations_) {
Handle<FixedArray> declarations =
- globals_builder->AllocateDeclarations(info(), script);
+ globals_builder->AllocateDeclarations(info(), script, isolate);
if (declarations.is_null()) return SetStackOverflow();
builder()->SetDeferredConstantPoolEntry(
globals_builder->constant_pool_entry(), declarations);
@@ -852,10 +919,18 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
for (std::pair<NativeFunctionLiteral*, size_t> literal :
native_function_literals_) {
NativeFunctionLiteral* expr = literal.first;
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+
+ // Compute the function template for the native function.
+ v8::Local<v8::FunctionTemplate> info =
+ expr->extension()->GetNativeFunctionTemplate(
+ v8_isolate, Utils::ToLocal(expr->name()));
+ DCHECK(!info.IsEmpty());
+
Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfoForNative(expr->extension(),
- expr->name());
- if (shared_info.is_null()) return SetStackOverflow();
+ FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
+ isolate, Utils::OpenHandle(*info), expr->name());
+ DCHECK(!shared_info.is_null());
builder()->SetDeferredConstantPoolEntry(literal.second, shared_info);
}
@@ -881,6 +956,14 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
builder()->SetDeferredConstantPoolEntry(literal.second, constant_elements);
}
+ // Build class literal boilerplates.
+ for (std::pair<ClassLiteral*, size_t> literal : class_literals_) {
+ ClassLiteral* class_literal = literal.first;
+ Handle<ClassBoilerplate> class_boilerplate =
+ ClassBoilerplate::BuildClassBoilerplate(isolate, class_literal);
+ builder()->SetDeferredConstantPoolEntry(literal.second, class_boilerplate);
+ }
+
// Build template literals.
for (std::pair<GetTemplateObject*, size_t> literal : template_objects_) {
GetTemplateObject* get_template_object = literal.first;
@@ -933,7 +1016,9 @@ void BytecodeGenerator::GenerateBytecodeBody() {
Variable* rest_parameter = closure_scope()->rest_parameter();
VisitRestArgumentsArray(rest_parameter);
- // Build assignment to {.this_function} variable if it is used.
+ // Build assignment to the function name or {.this_function}
+ // variables if used.
+ VisitThisFunctionVariable(closure_scope()->function_var());
VisitThisFunctionVariable(closure_scope()->this_function_var());
// Build assignment to {new.target} variable if it is used.
@@ -949,7 +1034,8 @@ void BytecodeGenerator::GenerateBytecodeBody() {
if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
// Emit type profile call.
- if (info()->literal()->feedback_vector_spec()->HasTypeProfileSlot()) {
+ if (info()->collect_type_profile()) {
+ feedback_spec()->AddTypeProfileSlot();
int num_parameters = closure_scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Register parameter(builder()->Parameter(i));
@@ -967,6 +1053,13 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Perform a stack-check before the body.
builder()->StackCheck(info()->literal()->start_position());
+ // The derived constructor case is handled in VisitCallSuper.
+ if (IsBaseConstructor(function_kind()) &&
+ info()->literal()->requires_instance_fields_initializer()) {
+ BuildInstanceFieldInitialization(Register::function_closure(),
+ builder()->Receiver());
+ }
+
// Visit statements in the function body.
VisitStatements(info()->literal()->body());
@@ -1107,7 +1200,8 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
- FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
+ FeedbackSlot slot =
+ GetCachedLoadGlobalICSlot(NOT_INSIDE_TYPEOF, variable);
globals_builder()->AddUndefinedDeclaration(variable->raw_name(), slot);
break;
}
@@ -1145,8 +1239,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
case VariableLocation::MODULE:
if (variable->IsExport() && variable->binding_needs_init()) {
builder()->LoadTheHole();
- BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
}
// Nothing to do for imports.
break;
@@ -1158,17 +1251,17 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK(variable->mode() == LET || variable->mode() == VAR);
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
- FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
- globals_builder()->AddFunctionDeclaration(
- variable->raw_name(), slot, decl->fun()->LiteralFeedbackSlot(),
- decl->fun());
+ FeedbackSlot slot =
+ GetCachedLoadGlobalICSlot(NOT_INSIDE_TYPEOF, variable);
+ FeedbackSlot literal_slot = GetCachedCreateClosureSlot(decl->fun());
+ globals_builder()->AddFunctionDeclaration(variable->raw_name(), slot,
+ literal_slot, decl->fun());
break;
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
VisitForAccumulatorValue(decl->fun());
- BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
break;
}
case VariableLocation::CONTEXT: {
@@ -1192,8 +1285,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK_EQ(variable->mode(), LET);
DCHECK(variable->IsExport());
VisitForAccumulatorValue(decl->fun());
- BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
break;
}
}
@@ -1212,8 +1304,7 @@ void BytecodeGenerator::VisitModuleNamespaceImports() {
.CallRuntime(Runtime::kGetModuleNamespace, module_request);
Variable* var = closure_scope()->LookupLocal(entry->local_name);
DCHECK_NOT_NULL(var);
- BuildVariableAssignment(var, Token::INIT, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
}
}
@@ -1344,6 +1435,9 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value in a register until a case matches.
Register tag = VisitForRegisterValue(stmt->tag());
+ FeedbackSlot slot = clauses->length() > 0
+ ? feedback_spec()->AddCompareICSlot()
+ : FeedbackSlot::Invalid();
// Iterate over all cases and create nodes for label comparison.
for (int i = 0; i < clauses->length(); i++) {
@@ -1357,9 +1451,8 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform label comparison as if via '===' with tag.
VisitForAccumulatorValue(clause->label());
- builder()->CompareOperation(
- Token::Value::EQ_STRICT, tag,
- feedback_index(clause->CompareOperationFeedbackSlot()));
+ builder()->CompareOperation(Token::Value::EQ_STRICT, tag,
+ feedback_index(slot));
switch_builder.Case(ToBooleanMode::kAlreadyBoolean, i);
}
@@ -1457,8 +1550,7 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
loop_builder.JumpToHeader(loop_depth_);
}
-void BytecodeGenerator::VisitForInAssignment(Expression* expr,
- FeedbackSlot slot) {
+void BytecodeGenerator::VisitForInAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
// Evaluate assignment starting with the value to be stored in the
@@ -1468,7 +1560,7 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->AsVariableProxy();
- BuildVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ BuildVariableAssignment(proxy->var(), Token::ASSIGN,
proxy->hole_check_mode());
break;
}
@@ -1480,8 +1572,10 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
const AstRawString* name =
property->key()->AsLiteral()->AsRawPropertyName();
builder()->LoadAccumulatorWithRegister(value);
+ FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
+ builder()->LoadAccumulatorWithRegister(value);
break;
}
case KEYED_PROPERTY: {
@@ -1491,8 +1585,10 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
Register object = VisitForRegisterValue(property->obj());
Register key = VisitForRegisterValue(property->key());
builder()->LoadAccumulatorWithRegister(value);
+ FeedbackSlot slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
builder()->StoreKeyedProperty(object, key, feedback_index(slot),
language_mode());
+ builder()->LoadAccumulatorWithRegister(value);
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -1532,7 +1628,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
BytecodeLabel subject_null_label, subject_undefined_label;
- FeedbackSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = feedback_spec()->AddForInSlot();
// Prepare the state for executing ForIn.
builder()->SetExpressionAsStatementPosition(stmt->subject());
@@ -1563,7 +1659,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->ForInNext(receiver, index, triple.Truncate(2),
feedback_index(slot));
loop_builder.ContinueIfUndefined();
- VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
+ VisitForInAssignment(stmt->each());
VisitIterationBody(stmt, &loop_builder);
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
@@ -1597,7 +1693,8 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
HandlerTable::CatchPrediction outer_catch_prediction = catch_prediction();
set_catch_prediction(stmt->GetCatchPrediction(outer_catch_prediction));
- TryCatchBuilder try_control_builder(builder(), catch_prediction());
+ TryCatchBuilder try_control_builder(builder(), block_coverage_builder_, stmt,
+ catch_prediction());
// Preserve the context in a dedicated register, so that it can be restored
// when the handler is entered by the stack-unwinding machinery.
@@ -1628,17 +1725,15 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
builder()->LoadAccumulatorWithRegister(context);
// Evaluate the catch-block.
- BuildIncrementBlockCoverageCounterIfEnabled(stmt, SourceRangeKind::kCatch);
VisitInScope(stmt->catch_block(), stmt->scope());
try_control_builder.EndCatch();
- BuildIncrementBlockCoverageCounterIfEnabled(stmt,
- SourceRangeKind::kContinuation);
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// We can't know whether the finally block will override ("catch") an
// exception thrown in the try block, so we just adopt the outer prediction.
- TryFinallyBuilder try_control_builder(builder(), catch_prediction());
+ TryFinallyBuilder try_control_builder(builder(), block_coverage_builder_,
+ stmt, catch_prediction());
// We keep a record of all paths that enter the finally-block to be able to
// dispatch to the correct continuation point after the statements in the
@@ -1689,7 +1784,6 @@ void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
message);
// Evaluate the finally-block.
- BuildIncrementBlockCoverageCounterIfEnabled(stmt, SourceRangeKind::kFinally);
Visit(stmt->finally_block());
try_control_builder.EndFinally();
@@ -1698,8 +1792,6 @@ void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Dynamic dispatch after the finally-block.
commands.ApplyDeferredCommands();
- BuildIncrementBlockCoverageCounterIfEnabled(stmt,
- SourceRangeKind::kContinuation);
}
void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
@@ -1708,50 +1800,133 @@ void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- DCHECK_EQ(expr->scope()->outer_scope(), current_scope());
+ DCHECK(expr->scope()->outer_scope() == current_scope());
uint8_t flags = CreateClosureFlags::Encode(
expr->pretenure(), closure_scope()->is_function_scope());
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
- int slot_index = feedback_index(expr->LiteralFeedbackSlot());
- builder()->CreateClosure(entry, slot_index, flags);
+ FeedbackSlot slot = GetCachedCreateClosureSlot(expr);
+ builder()->CreateClosure(entry, feedback_index(slot), flags);
function_literals_.push_back(std::make_pair(expr, entry));
}
void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
+ size_t class_boilerplate_entry =
+ builder()->AllocateDeferredConstantPoolEntry();
+ class_literals_.push_back(std::make_pair(expr, class_boilerplate_entry));
+
VisitDeclarations(expr->scope()->declarations());
- Register constructor = VisitForRegisterValue(expr->constructor());
+ Register class_constructor = register_allocator()->NewRegister();
+
{
RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(4);
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
+
+ Register class_boilerplate = register_allocator()->GrowRegisterList(&args);
+ Register class_constructor_in_args =
+ register_allocator()->GrowRegisterList(&args);
+ Register super_class = register_allocator()->GrowRegisterList(&args);
+ DCHECK_EQ(ClassBoilerplate::kFirstDynamicArgumentIndex,
+ args.register_count());
+
VisitForAccumulatorValueOrTheHole(expr->extends());
+ builder()->StoreAccumulatorInRegister(super_class);
+
+ VisitFunctionLiteral(expr->constructor());
builder()
- ->StoreAccumulatorInRegister(args[0])
- .MoveRegister(constructor, args[1])
- .LoadLiteral(Smi::FromInt(expr->start_position()))
- .StoreAccumulatorInRegister(args[2])
- .LoadLiteral(Smi::FromInt(expr->end_position()))
- .StoreAccumulatorInRegister(args[3])
- .CallRuntime(Runtime::kDefineClass, args);
+ ->StoreAccumulatorInRegister(class_constructor)
+ .MoveRegister(class_constructor, class_constructor_in_args)
+ .LoadConstantPoolEntry(class_boilerplate_entry)
+ .StoreAccumulatorInRegister(class_boilerplate);
+
+ // Create computed names and method values nodes to store into the literal.
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ClassLiteral::Property* property = expr->properties()->at(i);
+ if (property->is_computed_name()) {
+ Register key = register_allocator()->GrowRegisterList(&args);
+
+ BuildLoadPropertyKey(property, key);
+ if (property->is_static()) {
+ // The static prototype property is read only. We handle the non
+ // computed property name case in the parser. Since this is the only
+ // case where we need to check for an own read only property we
+ // special case this so we do not need to do this for every property.
+ BytecodeLabel done;
+ builder()
+ ->LoadLiteral(ast_string_constants()->prototype_string())
+ .CompareOperation(Token::Value::EQ_STRICT, key)
+ .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &done)
+ .CallRuntime(Runtime::kThrowStaticPrototypeError)
+ .Bind(&done);
+ }
+
+ if (property->kind() == ClassLiteral::Property::FIELD) {
+ // Initialize field's name variable with the computed name.
+ DCHECK_NOT_NULL(property->computed_name_var());
+ builder()->LoadAccumulatorWithRegister(key);
+ BuildVariableAssignment(property->computed_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ }
+ }
+ if (property->kind() == ClassLiteral::Property::FIELD) {
+ // We don't compute field's value here, but instead do it in the
+ // initializer function.
+ continue;
+ }
+ Register value = register_allocator()->GrowRegisterList(&args);
+ VisitForRegisterValue(property->value(), value);
+ }
+
+ builder()->CallRuntime(Runtime::kDefineClass, args);
}
Register prototype = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(prototype);
- if (FunctionLiteral::NeedsHomeObject(expr->constructor())) {
- // Prototype is already in the accumulator.
- builder()->StoreHomeObjectProperty(
- constructor, feedback_index(expr->HomeObjectSlot()), language_mode());
- }
-
- VisitClassLiteralProperties(expr, constructor, prototype);
- BuildClassLiteralNameProperty(expr, constructor);
- builder()->CallRuntime(Runtime::kToFastProperties, constructor);
// Assign to class variable.
if (expr->class_variable() != nullptr) {
DCHECK(expr->class_variable()->IsStackLocal() ||
expr->class_variable()->IsContextSlot());
+ builder()->LoadAccumulatorWithRegister(class_constructor);
BuildVariableAssignment(expr->class_variable(), Token::INIT,
- FeedbackSlot::Invalid(), HoleCheckMode::kElided);
+ HoleCheckMode::kElided);
+ }
+
+ if (expr->instance_fields_initializer_function() != nullptr) {
+ Register initializer =
+ VisitForRegisterValue(expr->instance_fields_initializer_function());
+
+ if (FunctionLiteral::NeedsHomeObject(
+ expr->instance_fields_initializer_function())) {
+ FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
+ builder()->LoadAccumulatorWithRegister(prototype).StoreHomeObjectProperty(
+ initializer, feedback_index(slot), language_mode());
+ }
+
+ FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
+ builder()
+ ->LoadAccumulatorWithRegister(initializer)
+ .StoreClassFieldsInitializer(class_constructor, feedback_index(slot))
+ .LoadAccumulatorWithRegister(class_constructor);
+ }
+
+ if (expr->static_fields_initializer() != nullptr) {
+ RegisterList args = register_allocator()->NewRegisterList(1);
+ Register initializer =
+ VisitForRegisterValue(expr->static_fields_initializer());
+
+ if (FunctionLiteral::NeedsHomeObject(expr->static_fields_initializer())) {
+ FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
+ builder()
+ ->LoadAccumulatorWithRegister(class_constructor)
+ .StoreHomeObjectProperty(initializer, feedback_index(slot),
+ language_mode());
+ }
+
+ builder()
+ ->MoveRegister(class_constructor, args[0])
+ .CallProperty(initializer, args,
+ feedback_index(feedback_spec()->AddCallICSlot()));
}
+ builder()->LoadAccumulatorWithRegister(class_constructor);
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
@@ -1766,101 +1941,59 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
}
}
-void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
- Register constructor,
- Register prototype) {
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(4);
- Register receiver = args[0], key = args[1], value = args[2], attr = args[3];
-
- bool attr_assigned = false;
- Register old_receiver = Register::invalid_value();
-
- // Create nodes to store method values into the literal.
- for (int i = 0; i < expr->properties()->length(); i++) {
- ClassLiteral::Property* property = expr->properties()->at(i);
-
- // Set-up receiver.
- Register new_receiver = property->is_static() ? constructor : prototype;
- if (new_receiver != old_receiver) {
- builder()->MoveRegister(new_receiver, receiver);
- old_receiver = new_receiver;
- }
-
- BuildLoadPropertyKey(property, key);
- if (property->is_static() && property->is_computed_name()) {
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we
- // do not need to do this for every property.
- BytecodeLabel done;
- builder()
- ->LoadLiteral(ast_string_constants()->prototype_string())
- .CompareOperation(Token::Value::EQ_STRICT, key)
- .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &done)
- .CallRuntime(Runtime::kThrowStaticPrototypeError)
- .Bind(&done);
+void BytecodeGenerator::VisitInitializeClassFieldsStatement(
+ InitializeClassFieldsStatement* expr) {
+ RegisterList args = register_allocator()->NewRegisterList(3);
+ Register constructor = args[0], key = args[1], value = args[2];
+ builder()->MoveRegister(builder()->Receiver(), constructor);
+
+ for (int i = 0; i < expr->fields()->length(); i++) {
+ ClassLiteral::Property* property = expr->fields()->at(i);
+
+ if (property->is_computed_name()) {
+ Variable* var = property->computed_name_var();
+ DCHECK_NOT_NULL(var);
+ // The computed name is already evaluated and stored in a
+ // variable at class definition time.
+ BuildVariableLoad(var, HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(key);
+ } else {
+ BuildLoadPropertyKey(property, key);
}
VisitForRegisterValue(property->value(), value);
- VisitSetHomeObject(value, receiver, property);
-
- if (!attr_assigned) {
- builder()
- ->LoadLiteral(Smi::FromInt(DONT_ENUM))
- .StoreAccumulatorInRegister(attr);
- attr_assigned = true;
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD: {
- DataPropertyInLiteralFlags flags = DataPropertyInLiteralFlag::kDontEnum;
- if (property->NeedsSetFunctionName()) {
- flags |= DataPropertyInLiteralFlag::kSetFunctionName;
- }
+ VisitSetHomeObject(value, constructor, property);
- FeedbackSlot slot = property->GetStoreDataPropertySlot();
- DCHECK(!slot.IsInvalid());
-
- builder()
- ->LoadAccumulatorWithRegister(value)
- .StoreDataPropertyInLiteral(receiver, key, flags,
- feedback_index(slot));
- break;
- }
- case ClassLiteral::Property::GETTER: {
- builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked, args);
- break;
- }
- case ClassLiteral::Property::SETTER: {
- builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked, args);
- break;
- }
- case ClassLiteral::Property::FIELD: {
- UNREACHABLE();
- break;
- }
- }
+ builder()->CallRuntime(Runtime::kCreateDataProperty, args);
}
}
-void BytecodeGenerator::BuildClassLiteralNameProperty(ClassLiteral* expr,
- Register literal) {
- if (!expr->has_name_static_property() &&
- expr->constructor()->has_shared_name()) {
- Runtime::FunctionId runtime_id =
- expr->has_static_computed_names()
- ? Runtime::kInstallClassNameAccessorWithCheck
- : Runtime::kInstallClassNameAccessor;
- builder()->CallRuntime(runtime_id, literal);
- }
+void BytecodeGenerator::BuildInstanceFieldInitialization(Register constructor,
+ Register instance) {
+ RegisterList args = register_allocator()->NewRegisterList(1);
+ Register initializer = register_allocator()->NewRegister();
+
+ FeedbackSlot slot = feedback_spec()->AddLoadICSlot();
+ BytecodeLabel done;
+
+ builder()
+ ->LoadClassFieldsInitializer(constructor, feedback_index(slot))
+ // TODO(gsathya): This jump can be elided for the base
+ // constructor and derived constructor. This is only required
+ // when called from an arrow function.
+ .JumpIfUndefined(&done)
+ .StoreAccumulatorInRegister(initializer)
+ .MoveRegister(instance, args[0])
+ .CallProperty(initializer, args,
+ feedback_index(feedback_spec()->AddCallICSlot()))
+ .Bind(&done);
}
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
- int slot_index = feedback_index(expr->LiteralFeedbackSlot());
- builder()->CreateClosure(entry, slot_index, NOT_TENURED);
+ FeedbackSlot slot = feedback_spec()->AddCreateClosureSlot();
+ builder()->CreateClosure(entry, feedback_index(slot), NOT_TENURED);
native_function_literals_.push_back(std::make_pair(expr, entry));
}
@@ -1895,19 +2028,44 @@ void BytecodeGenerator::VisitConditional(Conditional* expr) {
}
void BytecodeGenerator::VisitLiteral(Literal* expr) {
- if (!execution_result()->IsEffect()) {
- const AstValue* raw_value = expr->raw_value();
- builder()->LoadLiteral(raw_value);
- if (raw_value->IsTrue() || raw_value->IsFalse()) {
+ if (execution_result()->IsEffect()) return;
+ switch (expr->type()) {
+ case Literal::kSmi:
+ builder()->LoadLiteral(expr->AsSmiLiteral());
+ break;
+ case Literal::kHeapNumber:
+ builder()->LoadLiteral(expr->AsNumber());
+ break;
+ case Literal::kUndefined:
+ builder()->LoadUndefined();
+ break;
+ case Literal::kBoolean:
+ builder()->LoadBoolean(expr->ToBooleanIsTrue());
execution_result()->SetResultIsBoolean();
- }
+ break;
+ case Literal::kNull:
+ builder()->LoadNull();
+ break;
+ case Literal::kTheHole:
+ builder()->LoadTheHole();
+ break;
+ case Literal::kString:
+ builder()->LoadLiteral(expr->AsRawString());
+ break;
+ case Literal::kSymbol:
+ builder()->LoadLiteral(expr->AsSymbol());
+ break;
+ case Literal::kBigInt:
+ builder()->LoadLiteral(expr->AsBigInt());
+ break;
}
}
void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Materialize a regular expression literal.
builder()->CreateRegExpLiteral(
- expr->raw_pattern(), feedback_index(expr->literal_slot()), expr->flags());
+ expr->raw_pattern(), feedback_index(feedback_spec()->AddLiteralSlot()),
+ expr->flags());
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
@@ -1919,7 +2077,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
return;
}
- int literal_index = feedback_index(expr->literal_slot());
+ int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
// Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
expr->ComputeFlags(), expr->IsFastCloningSupported());
@@ -1963,18 +2121,17 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(property->value());
+ FeedbackSlot slot = feedback_spec()->AddStoreOwnICSlot();
if (FunctionLiteral::NeedsHomeObject(property->value())) {
RegisterAllocationScope register_scope(this);
Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
builder()->StoreNamedOwnProperty(
- literal, key->AsRawPropertyName(),
- feedback_index(property->GetSlot(0)));
- VisitSetHomeObject(value, literal, property, 1);
+ literal, key->AsRawPropertyName(), feedback_index(slot));
+ VisitSetHomeObject(value, literal, property);
} else {
builder()->StoreNamedOwnProperty(
- literal, key->AsRawPropertyName(),
- feedback_index(property->GetSlot(0)));
+ literal, key->AsRawPropertyName(), feedback_index(slot));
}
} else {
VisitForEffect(property->value());
@@ -1987,7 +2144,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForRegisterValue(property->value(), args[2]);
if (property->emit_store()) {
builder()
- ->LoadLiteral(Smi::FromInt(SLOPPY))
+ ->LoadLiteral(Smi::FromEnum(LanguageMode::kSloppy))
.StoreAccumulatorInRegister(args[3])
.CallRuntime(Runtime::kSetProperty, args);
Register value = args[2];
@@ -2076,9 +2233,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
data_property_flags |= DataPropertyInLiteralFlag::kSetFunctionName;
}
- FeedbackSlot slot = property->GetStoreDataPropertySlot();
- DCHECK(!slot.IsInvalid());
-
+ FeedbackSlot slot =
+ feedback_spec()->AddStoreDataPropertyInLiteralICSlot();
builder()
->LoadAccumulatorWithRegister(value)
.StoreDataPropertyInLiteral(literal, key, data_property_flags,
@@ -2120,7 +2276,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Deep-copy the literal boilerplate.
- int literal_index = feedback_index(expr->literal_slot());
+ int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
if (expr->is_empty()) {
// Empty array literal fast-path.
DCHECK(expr->IsFastCloningSupported());
@@ -2136,9 +2292,13 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Register index, literal;
+ // We'll reuse the same literal slot for all of the non-constant
+ // subexpressions that use a keyed store IC.
+
// Evaluate all the non-constant subexpressions and store them into the
// newly cloned array.
bool literal_in_accumulator = true;
+ FeedbackSlot slot;
for (int array_index = 0; array_index < expr->values()->length();
array_index++) {
Expression* subexpr = expr->values()->at(array_index);
@@ -2151,8 +2311,10 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
builder()->StoreAccumulatorInRegister(literal);
literal_in_accumulator = false;
}
+ if (slot.IsInvalid()) {
+ slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
+ }
- FeedbackSlot slot = expr->LiteralFeedbackSlot();
builder()
->LoadLiteral(Smi::FromInt(array_index))
.StoreAccumulatorInRegister(index);
@@ -2169,11 +2331,10 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
builder()->SetExpressionPosition(proxy);
- BuildVariableLoad(proxy->var(), proxy->VariableFeedbackSlot(),
- proxy->hole_check_mode());
+ BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
}
-void BytecodeGenerator::BuildVariableLoad(Variable* variable, FeedbackSlot slot,
+void BytecodeGenerator::BuildVariableLoad(Variable* variable,
HoleCheckMode hole_check_mode,
TypeofMode typeof_mode) {
switch (variable->location()) {
@@ -2211,6 +2372,7 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable, FeedbackSlot slot,
if (variable->raw_name() == ast_string_constants()->undefined_string()) {
builder()->LoadUndefined();
} else {
+ FeedbackSlot slot = GetCachedLoadGlobalICSlot(typeof_mode, variable);
builder()->LoadGlobal(variable->raw_name(), feedback_index(slot),
typeof_mode);
}
@@ -2254,7 +2416,8 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable, FeedbackSlot slot,
}
case DYNAMIC_GLOBAL: {
int depth =
- closure_scope()->ContextChainLengthUntilOutermostSloppyEval();
+ current_scope()->ContextChainLengthUntilOutermostSloppyEval();
+ FeedbackSlot slot = GetCachedLoadGlobalICSlot(typeof_mode, variable);
builder()->LoadLookupGlobalSlot(variable->raw_name(), typeof_mode,
feedback_index(slot), depth);
break;
@@ -2276,10 +2439,9 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable, FeedbackSlot slot,
}
void BytecodeGenerator::BuildVariableLoadForAccumulatorValue(
- Variable* variable, FeedbackSlot slot, HoleCheckMode hole_check_mode,
- TypeofMode typeof_mode) {
+ Variable* variable, HoleCheckMode hole_check_mode, TypeofMode typeof_mode) {
ValueResultScope accumulator_result(this);
- BuildVariableLoad(variable, slot, hole_check_mode, typeof_mode);
+ BuildVariableLoad(variable, hole_check_mode, typeof_mode);
}
void BytecodeGenerator::BuildReturn(int source_position) {
@@ -2290,7 +2452,7 @@ void BytecodeGenerator::BuildReturn(int source_position) {
builder()->StoreAccumulatorInRegister(result).CallRuntime(
Runtime::kTraceExit, result);
}
- if (info()->literal()->feedback_vector_spec()->HasTypeProfileSlot()) {
+ if (info()->collect_type_profile()) {
builder()->CollectTypeProfile(info()->literal()->return_position());
}
builder()->SetReturnPosition(source_position, info()->literal());
@@ -2317,8 +2479,7 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
Variable* var_promise = closure_scope()->promise_var();
DCHECK_NOT_NULL(var_promise);
- BuildVariableLoad(var_promise, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableLoad(var_promise, HoleCheckMode::kElided);
builder()
->StoreAccumulatorInRegister(promise)
.CallJSRuntime(Context::PROMISE_RESOLVE_INDEX, args)
@@ -2355,8 +2516,8 @@ void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
}
void BytecodeGenerator::BuildVariableAssignment(
- Variable* variable, Token::Value op, FeedbackSlot slot,
- HoleCheckMode hole_check_mode, LookupHoistingMode lookup_hoisting_mode) {
+ Variable* variable, Token::Value op, HoleCheckMode hole_check_mode,
+ LookupHoistingMode lookup_hoisting_mode) {
VariableMode mode = variable->mode();
RegisterAllocationScope assignment_register_scope(this);
BytecodeLabel end_label;
@@ -2393,6 +2554,9 @@ void BytecodeGenerator::BuildVariableAssignment(
break;
}
case VariableLocation::UNALLOCATED: {
+ // TODO(ishell): consider using FeedbackSlotCache for variables here.
+ FeedbackSlot slot =
+ feedback_spec()->AddStoreGlobalICSlot(language_mode());
builder()->StoreGlobal(variable->raw_name(), feedback_index(slot),
language_mode());
break;
@@ -2518,19 +2682,18 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->target()->AsVariableProxy();
- BuildVariableLoad(proxy->var(), proxy->VariableFeedbackSlot(),
- proxy->hole_check_mode());
+ BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
- FeedbackSlot slot = property->PropertyFeedbackSlot();
+ FeedbackSlot slot = feedback_spec()->AddLoadICSlot();
builder()->LoadNamedProperty(object, name, feedback_index(slot));
break;
}
case KEYED_PROPERTY: {
// Key is already in accumulator at this point due to evaluating the
// LHS above.
- FeedbackSlot slot = property->PropertyFeedbackSlot();
+ FeedbackSlot slot = feedback_spec()->AddKeyedLoadICSlot();
builder()->LoadKeyedProperty(object, feedback_index(slot));
break;
}
@@ -2546,7 +2709,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
}
BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
- FeedbackSlot slot = binop->BinaryOperationFeedbackSlot();
+ FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
if (expr->value()->IsSmiLiteral()) {
builder()->BinaryOperationSmiLiteral(
binop->op(), expr->value()->AsLiteral()->AsSmiLiteral(),
@@ -2563,25 +2726,44 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
builder()->SetExpressionPosition(expr);
- FeedbackSlot slot = expr->AssignmentSlot();
switch (assign_type) {
case VARIABLE: {
// TODO(oth): The BuildVariableAssignment() call is hard to reason about.
// Is the value in the accumulator safe? Yes, but scary.
VariableProxy* proxy = expr->target()->AsVariableProxy();
- BuildVariableAssignment(proxy->var(), expr->op(), slot,
+ BuildVariableAssignment(proxy->var(), expr->op(),
proxy->hole_check_mode(),
expr->lookup_hoisting_mode());
break;
}
- case NAMED_PROPERTY:
+ case NAMED_PROPERTY: {
+ FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
+ Register value;
+ if (!execution_result()->IsEffect()) {
+ value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ }
builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
break;
- case KEYED_PROPERTY:
+ }
+ case KEYED_PROPERTY: {
+ FeedbackSlot slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
+ Register value;
+ if (!execution_result()->IsEffect()) {
+ value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ }
builder()->StoreKeyedProperty(object, key, feedback_index(slot),
language_mode());
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
break;
+ }
case NAMED_SUPER_PROPERTY: {
builder()
->StoreAccumulatorInRegister(super_property_args[3])
@@ -2786,11 +2968,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
Register iterator = iterator_and_input[0];
- BuildGetIterator(expr->expression(), iterator_type,
- expr->load_iterable_iterator_slot(),
- expr->call_iterable_iterator_slot(),
- expr->load_iterable_async_iterator_slot(),
- expr->call_iterable_async_iterator_slot());
+ BuildGetIterator(expr->expression(), iterator_type);
builder()->StoreAccumulatorInRegister(iterator);
Register input = iterator_and_input[1];
builder()->LoadUndefined().StoreAccumulatorInRegister(input);
@@ -2825,13 +3003,15 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
RegisterAllocationScope register_scope(this);
// output = iterator.next(input);
Register iterator_next = register_allocator()->NewRegister();
+ FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
+ FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
builder()
- ->LoadNamedProperty(
- iterator, ast_string_constants()->next_string(),
- feedback_index(expr->load_iterator_next_slot()))
+ ->LoadNamedProperty(iterator,
+ ast_string_constants()->next_string(),
+ feedback_index(load_slot))
.StoreAccumulatorInRegister(iterator_next)
.CallProperty(iterator_next, iterator_and_input,
- feedback_index(expr->call_iterator_next_slot()))
+ feedback_index(call_slot))
.Jump(after_switch.New());
}
@@ -2842,15 +3022,17 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
BytecodeLabels return_input(zone());
// Trigger return from within the inner iterator.
Register iterator_return = register_allocator()->NewRegister();
+ FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
+ FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
builder()
- ->LoadNamedProperty(
- iterator, ast_string_constants()->return_string(),
- feedback_index(expr->load_iterator_return_slot()))
+ ->LoadNamedProperty(iterator,
+ ast_string_constants()->return_string(),
+ feedback_index(load_slot))
.JumpIfUndefined(return_input.New())
.JumpIfNull(return_input.New())
.StoreAccumulatorInRegister(iterator_return)
.CallProperty(iterator_return, iterator_and_input,
- feedback_index(expr->call_iterator_return_slot1()))
+ feedback_index(call_slot))
.Jump(after_switch.New());
return_input.Bind(builder());
@@ -2873,16 +3055,18 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// If the inner iterator has a throw method, use it to trigger an
// exception inside.
Register iterator_throw = register_allocator()->NewRegister();
+ FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
+ FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
builder()
- ->LoadNamedProperty(
- iterator, ast_string_constants()->throw_string(),
- feedback_index(expr->load_iterator_throw_slot()))
+ ->LoadNamedProperty(iterator,
+ ast_string_constants()->throw_string(),
+ feedback_index(load_slot))
.JumpIfUndefined(iterator_throw_is_undefined.New())
.JumpIfNull(iterator_throw_is_undefined.New())
.StoreAccumulatorInRegister(iterator_throw);
builder()
->CallProperty(iterator_throw, iterator_and_input,
- feedback_index(expr->call_iterator_throw_slot()))
+ feedback_index(call_slot))
.Jump(after_switch.New());
}
@@ -2893,17 +3077,18 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
Register iterator_return = register_allocator()->NewRegister();
// If iterator.throw does not exist, try to use iterator.return to
// inform the iterator that it should stop.
+ FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
+ FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
builder()
- ->LoadNamedProperty(
- iterator, ast_string_constants()->return_string(),
- feedback_index(expr->load_iterator_return_slot()))
+ ->LoadNamedProperty(iterator,
+ ast_string_constants()->return_string(),
+ feedback_index(load_slot))
.StoreAccumulatorInRegister(iterator_return);
builder()
->JumpIfUndefined(throw_throw_method_missing.New())
.JumpIfNull(throw_throw_method_missing.New())
- .CallProperty(
- iterator_return, RegisterList(iterator),
- feedback_index(expr->call_iterator_return_slot2()));
+ .CallProperty(iterator_return, RegisterList(iterator),
+ feedback_index(call_slot));
if (iterator_type == IteratorType::kAsync) {
// For async generators, await the result of the .return() call.
@@ -2939,7 +3124,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// Break once output.done is true.
builder()->LoadNamedProperty(
output, ast_string_constants()->done_string(),
- feedback_index(expr->load_output_done_slot()));
+ feedback_index(feedback_spec()->AddLoadICSlot()));
loop.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
@@ -2948,13 +3133,13 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
builder()->LoadAccumulatorWithRegister(output);
} else {
RegisterAllocationScope register_scope(this);
- DCHECK(iterator_type == IteratorType::kAsync);
+ DCHECK_EQ(iterator_type, IteratorType::kAsync);
// If generatorKind is async, perform AsyncGeneratorYield(output.value),
// which will await `output.value` before resolving the current
// AsyncGeneratorRequest's promise.
builder()->LoadNamedProperty(
output, ast_string_constants()->value_string(),
- feedback_index(expr->load_output_value_slot()));
+ feedback_index(feedback_spec()->AddLoadICSlot()));
RegisterList args = register_allocator()->NewRegisterList(3);
builder()
@@ -2983,7 +3168,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
Register output_value = register_allocator()->NewRegister();
builder()
->LoadNamedProperty(output, ast_string_constants()->value_string(),
- feedback_index(expr->load_output_value_slot()))
+ feedback_index(feedback_spec()->AddLoadICSlot()))
.StoreAccumulatorInRegister(output_value)
.LoadLiteral(Smi::FromInt(JSGeneratorObject::kReturn))
.CompareOperation(Token::EQ_STRICT, resume_mode)
@@ -3038,8 +3223,7 @@ void BytecodeGenerator::BuildAwait(int suspend_id) {
// AsyncFunction Await builtins require a 3rd parameter to hold the outer
// promise.
Variable* var_promise = closure_scope()->promise_var();
- BuildVariableLoadForAccumulatorValue(var_promise, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableLoadForAccumulatorValue(var_promise, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(args[2]);
}
@@ -3088,7 +3272,6 @@ void BytecodeGenerator::VisitThrow(Throw* expr) {
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
LhsKind property_kind = Property::GetAssignType(property);
- FeedbackSlot slot = property->PropertyFeedbackSlot();
switch (property_kind) {
case VARIABLE:
UNREACHABLE();
@@ -3096,13 +3279,14 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
builder()->SetExpressionPosition(property);
builder()->LoadNamedProperty(
obj, property->key()->AsLiteral()->AsRawPropertyName(),
- feedback_index(slot));
+ feedback_index(feedback_spec()->AddLoadICSlot()));
break;
}
case KEYED_PROPERTY: {
VisitForAccumulatorValue(property->key());
builder()->SetExpressionPosition(property);
- builder()->LoadKeyedProperty(obj, feedback_index(slot));
+ builder()->LoadKeyedProperty(
+ obj, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
break;
}
case NAMED_SUPER_PROPERTY:
@@ -3225,7 +3409,6 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// Load callee as a global variable.
VariableProxy* proxy = callee_expr->AsVariableProxy();
BuildVariableLoadForAccumulatorValue(proxy->var(),
- proxy->VariableFeedbackSlot(),
proxy->hole_check_mode());
builder()->StoreAccumulatorInRegister(callee);
break;
@@ -3302,7 +3485,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
->MoveRegister(callee, runtime_call_args[0])
.MoveRegister(first_arg, runtime_call_args[1])
.MoveRegister(Register::function_closure(), runtime_call_args[2])
- .LoadLiteral(Smi::FromInt(language_mode()))
+ .LoadLiteral(Smi::FromEnum(language_mode()))
.StoreAccumulatorInRegister(runtime_call_args[3])
.LoadLiteral(Smi::FromInt(current_scope()->start_position()))
.StoreAccumulatorInRegister(runtime_call_args[4])
@@ -3317,7 +3500,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
builder()->SetExpressionPosition(expr);
- int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ int feedback_slot_index = feedback_index(feedback_spec()->AddCallICSlot());
if (is_spread_call) {
DCHECK(!implicit_undefined_receiver);
@@ -3338,9 +3521,11 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
SuperCallReference* super = expr->expression()->AsSuperCallReference();
// Prepare the constructor to the super call.
- VisitForAccumulatorValue(super->this_function_var());
+ Register this_function = VisitForRegisterValue(super->this_function_var());
Register constructor = register_allocator()->NewRegister();
- builder()->GetSuperConstructor(constructor);
+ builder()
+ ->LoadAccumulatorWithRegister(this_function)
+ .GetSuperConstructor(constructor);
ZoneList<Expression*>* args = expr->arguments();
RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
@@ -3350,9 +3535,10 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
VisitForAccumulatorValue(super->new_target_var());
builder()->SetExpressionPosition(expr);
+ int feedback_slot_index = feedback_index(feedback_spec()->AddCallICSlot());
+
// When a super call contains a spread, a CallSuper AST node is only created
// if there is exactly one spread, and it is the last argument.
- int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
if (expr->only_last_arg_is_spread()) {
builder()->ConstructWithSpread(constructor, args_regs, feedback_slot_index);
} else {
@@ -3365,6 +3551,24 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// and come up with a better way.
builder()->Construct(constructor, args_regs, feedback_slot_index);
}
+
+ // The derived constructor has the correct bit set always, so we
+ // don't emit code to load and call the initializer if not
+ // required.
+ //
+ // For the arrow function or eval case, we always emit code to load
+ // and call the initializer.
+ //
+ // TODO(gsathya): In the future, we could tag nested arrow functions
+ // or eval with the correct bit so that we do the load conditionally
+ // if required.
+ if (info()->literal()->requires_instance_fields_initializer() ||
+ !IsDerivedConstructor(info()->literal()->kind())) {
+ Register instance = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(instance);
+ BuildInstanceFieldInitialization(this_function, instance);
+ builder()->LoadAccumulatorWithRegister(instance);
+ }
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
@@ -3377,7 +3581,7 @@ void BytecodeGenerator::VisitCallNew(CallNew* expr) {
builder()->SetExpressionPosition(expr);
builder()->LoadAccumulatorWithRegister(constructor);
- int const feedback_slot_index = feedback_index(expr->CallNewFeedbackSlot());
+ int feedback_slot_index = feedback_index(feedback_spec()->AddCallICSlot());
if (expr->only_last_arg_is_spread()) {
builder()->ConstructWithSpread(constructor, args, feedback_slot_index);
} else {
@@ -3409,9 +3613,8 @@ void BytecodeGenerator::VisitForTypeOfValue(Expression* expr) {
// Typeof does not throw a reference error on global variables, hence we
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->AsVariableProxy();
- BuildVariableLoadForAccumulatorValue(
- proxy->var(), proxy->VariableFeedbackSlot(), proxy->hole_check_mode(),
- INSIDE_TYPEOF);
+ BuildVariableLoadForAccumulatorValue(proxy->var(), proxy->hole_check_mode(),
+ INSIDE_TYPEOF);
} else {
VisitForAccumulatorValue(expr);
}
@@ -3460,7 +3663,7 @@ void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForAccumulatorValue(expr->expression());
builder()->SetExpressionPosition(expr);
builder()->UnaryOperation(
- expr->op(), feedback_index(expr->UnaryOperationFeedbackSlot()));
+ expr->op(), feedback_index(feedback_spec()->AddBinaryOpICSlot()));
break;
default:
UNREACHABLE();
@@ -3538,26 +3741,24 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
BuildVariableLoadForAccumulatorValue(proxy->var(),
- proxy->VariableFeedbackSlot(),
proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
- FeedbackSlot slot = property->PropertyFeedbackSlot();
object = VisitForRegisterValue(property->obj());
name = property->key()->AsLiteral()->AsRawPropertyName();
- builder()->LoadNamedProperty(object, name, feedback_index(slot));
+ builder()->LoadNamedProperty(
+ object, name, feedback_index(feedback_spec()->AddLoadICSlot()));
break;
}
case KEYED_PROPERTY: {
- FeedbackSlot slot = property->PropertyFeedbackSlot();
object = VisitForRegisterValue(property->obj());
// Use visit for accumulator here since we need the key in the accumulator
// for the LoadKeyedProperty.
key = register_allocator()->NewRegister();
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
- object, feedback_index(slot));
+ object, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -3587,14 +3788,14 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Save result for postfix expressions.
- FeedbackSlot count_slot = expr->CountBinaryOpFeedbackSlot();
+ FeedbackSlot count_slot = feedback_spec()->AddBinaryOpICSlot();
if (is_postfix) {
old_value = register_allocator()->NewRegister();
// Convert old value into a number before saving it.
// TODO(ignition): Think about adding proper PostInc/PostDec bytecodes
- // instead of this ToNumber + Inc/Dec dance.
+ // instead of this ToNumeric + Inc/Dec dance.
builder()
- ->ToNumber(feedback_index(count_slot))
+ ->ToNumeric(feedback_index(count_slot))
.StoreAccumulatorInRegister(old_value);
}
@@ -3603,22 +3804,39 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value.
builder()->SetExpressionPosition(expr);
- FeedbackSlot feedback_slot = expr->CountSlot();
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- BuildVariableAssignment(proxy->var(), expr->op(), feedback_slot,
+ BuildVariableAssignment(proxy->var(), expr->op(),
proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
- builder()->StoreNamedProperty(object, name, feedback_index(feedback_slot),
+ FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
+ Register value;
+ if (!execution_result()->IsEffect()) {
+ value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ }
+ builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
break;
}
case KEYED_PROPERTY: {
- builder()->StoreKeyedProperty(object, key, feedback_index(feedback_slot),
+ FeedbackSlot slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
+ Register value;
+ if (!execution_result()->IsEffect()) {
+ value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ }
+ builder()->StoreKeyedProperty(object, key, feedback_index(slot),
language_mode());
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -3658,6 +3876,23 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
}
}
+void BytecodeGenerator::VisitNaryOperation(NaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::COMMA:
+ VisitNaryCommaExpression(expr);
+ break;
+ case Token::OR:
+ VisitNaryLogicalOrExpression(expr);
+ break;
+ case Token::AND:
+ VisitNaryLogicalAndExpression(expr);
+ break;
+ default:
+ VisitNaryArithmeticExpression(expr);
+ break;
+ }
+}
+
void BytecodeGenerator::BuildLiteralCompareNil(Token::Value op, NilValue nil) {
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
@@ -3706,10 +3941,13 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
builder()->SetExpressionPosition(expr);
- FeedbackSlot slot = expr->CompareOperationFeedbackSlot();
- if (slot.IsInvalid()) {
+ if (expr->op() == Token::IN) {
builder()->CompareOperation(expr->op(), lhs);
+ } else if (expr->op() == Token::INSTANCEOF) {
+ FeedbackSlot slot = feedback_spec()->AddInstanceOfSlot();
+ builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
} else {
+ FeedbackSlot slot = feedback_spec()->AddCompareICSlot();
builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
}
}
@@ -3718,7 +3956,7 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
- FeedbackSlot slot = expr->BinaryOperationFeedbackSlot();
+ FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
Expression* subexpr;
Smi* literal;
if (expr->IsSmiLiteralOperation(&subexpr, &literal)) {
@@ -3734,6 +3972,29 @@ void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
}
}
+void BytecodeGenerator::VisitNaryArithmeticExpression(NaryOperation* expr) {
+ // TODO(leszeks): Add support for lhs smi in commutative ops.
+ VisitForAccumulatorValue(expr->first());
+
+ for (size_t i = 0; i < expr->subsequent_length(); ++i) {
+ RegisterAllocationScope register_scope(this);
+ if (expr->subsequent(i)->IsSmiLiteral()) {
+ builder()->SetExpressionPosition(expr->subsequent_op_position(i));
+ builder()->BinaryOperationSmiLiteral(
+ expr->op(), expr->subsequent(i)->AsLiteral()->AsSmiLiteral(),
+ feedback_index(feedback_spec()->AddBinaryOpICSlot()));
+ } else {
+ Register lhs = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(lhs);
+ VisitForAccumulatorValue(expr->subsequent(i));
+ builder()->SetExpressionPosition(expr->subsequent_op_position(i));
+ builder()->BinaryOperation(
+ expr->op(), lhs,
+ feedback_index(feedback_spec()->AddBinaryOpICSlot()));
+ }
+ }
+}
+
void BytecodeGenerator::VisitSpread(Spread* expr) { Visit(expr->expression()); }
void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
@@ -3749,11 +4010,7 @@ void BytecodeGenerator::VisitImportCallExpression(ImportCallExpression* expr) {
}
void BytecodeGenerator::BuildGetIterator(Expression* iterable,
- IteratorType hint,
- FeedbackSlot load_slot,
- FeedbackSlot call_slot,
- FeedbackSlot async_load_slot,
- FeedbackSlot async_call_slot) {
+ IteratorType hint) {
RegisterList args = register_allocator()->NewRegisterList(1);
Register method = register_allocator()->NewRegister();
Register obj = args[0];
@@ -3763,7 +4020,7 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
if (hint == IteratorType::kAsync) {
// Set method to GetMethod(obj, @@asyncIterator)
builder()->StoreAccumulatorInRegister(obj).LoadAsyncIteratorProperty(
- obj, feedback_index(async_load_slot));
+ obj, feedback_index(feedback_spec()->AddLoadICSlot()));
BytecodeLabel async_iterator_undefined, async_iterator_null, done;
// TODO(ignition): Add a single opcode for JumpIfNullOrUndefined
@@ -3772,7 +4029,7 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
// Let iterator be Call(method, obj)
builder()->StoreAccumulatorInRegister(method).CallProperty(
- method, args, feedback_index(async_call_slot));
+ method, args, feedback_index(feedback_spec()->AddCallICSlot()));
// If Type(iterator) is not Object, throw a TypeError exception.
builder()->JumpIfJSReceiver(&done);
@@ -3783,11 +4040,13 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
// If method is undefined,
// Let syncMethod be GetMethod(obj, @@iterator)
builder()
- ->LoadIteratorProperty(obj, feedback_index(load_slot))
+ ->LoadIteratorProperty(obj,
+ feedback_index(feedback_spec()->AddLoadICSlot()))
.StoreAccumulatorInRegister(method);
// Let syncIterator be Call(syncMethod, obj)
- builder()->CallProperty(method, args, feedback_index(call_slot));
+ builder()->CallProperty(method, args,
+ feedback_index(feedback_spec()->AddCallICSlot()));
// Return CreateAsyncFromSyncIterator(syncIterator)
// alias `method` register as it's no longer used
@@ -3800,11 +4059,13 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
// Let method be GetMethod(obj, @@iterator).
builder()
->StoreAccumulatorInRegister(obj)
- .LoadIteratorProperty(obj, feedback_index(load_slot))
+ .LoadIteratorProperty(obj,
+ feedback_index(feedback_spec()->AddLoadICSlot()))
.StoreAccumulatorInRegister(method);
// Let iterator be Call(method, obj).
- builder()->CallProperty(method, args, feedback_index(call_slot));
+ builder()->CallProperty(method, args,
+ feedback_index(feedback_spec()->AddCallICSlot()));
// If Type(iterator) is not Object, throw a TypeError exception.
BytecodeLabel no_type_error;
@@ -3816,11 +4077,7 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
builder()->SetExpressionPosition(expr);
- BuildGetIterator(expr->iterable(), expr->hint(),
- expr->IteratorPropertyFeedbackSlot(),
- expr->IteratorCallFeedbackSlot(),
- expr->AsyncIteratorPropertyFeedbackSlot(),
- expr->AsyncIteratorCallFeedbackSlot());
+ BuildGetIterator(expr->iterable(), expr->hint());
}
void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) {
@@ -3849,57 +4106,165 @@ void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
Visit(binop->right());
}
-void BytecodeGenerator::BuildLogicalTest(Token::Value token, Expression* left,
- Expression* right) {
+void BytecodeGenerator::VisitNaryCommaExpression(NaryOperation* expr) {
+ DCHECK_GT(expr->subsequent_length(), 0);
+
+ VisitForEffect(expr->first());
+ for (size_t i = 0; i < expr->subsequent_length() - 1; ++i) {
+ VisitForEffect(expr->subsequent(i));
+ }
+ Visit(expr->subsequent(expr->subsequent_length() - 1));
+}
+
+void BytecodeGenerator::VisitLogicalTestSubExpression(
+ Token::Value token, Expression* expr, BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels, int coverage_slot) {
+ DCHECK(token == Token::OR || token == Token::AND);
+
+ BytecodeLabels test_next(zone());
+ if (token == Token::OR) {
+ VisitForTest(expr, then_labels, &test_next, TestFallthrough::kElse);
+ } else {
+ DCHECK_EQ(Token::AND, token);
+ VisitForTest(expr, &test_next, else_labels, TestFallthrough::kThen);
+ }
+ test_next.Bind(builder());
+
+ BuildIncrementBlockCoverageCounterIfEnabled(coverage_slot);
+}
+
+void BytecodeGenerator::VisitLogicalTest(Token::Value token, Expression* left,
+ Expression* right,
+ int right_coverage_slot) {
DCHECK(token == Token::OR || token == Token::AND);
TestResultScope* test_result = execution_result()->AsTest();
BytecodeLabels* then_labels = test_result->then_labels();
BytecodeLabels* else_labels = test_result->else_labels();
TestFallthrough fallthrough = test_result->fallthrough();
- {
- // Visit the left side using current TestResultScope.
- BytecodeLabels test_right(zone());
- if (token == Token::OR) {
- test_result->set_fallthrough(TestFallthrough::kElse);
- test_result->set_else_labels(&test_right);
- } else {
- DCHECK_EQ(Token::AND, token);
- test_result->set_fallthrough(TestFallthrough::kThen);
- test_result->set_then_labels(&test_right);
- }
- VisitInSameTestExecutionScope(left);
- test_right.Bind(builder());
- }
- // Visit the right side in a new TestResultScope.
+
+ VisitLogicalTestSubExpression(token, left, then_labels, else_labels,
+ right_coverage_slot);
+ // The last test has the same then, else and fallthrough as the parent test.
VisitForTest(right, then_labels, else_labels, fallthrough);
}
+void BytecodeGenerator::VisitNaryLogicalTest(
+ Token::Value token, NaryOperation* expr,
+ const NaryCodeCoverageSlots* coverage_slots) {
+ DCHECK(token == Token::OR || token == Token::AND);
+ DCHECK_GT(expr->subsequent_length(), 0);
+
+ TestResultScope* test_result = execution_result()->AsTest();
+ BytecodeLabels* then_labels = test_result->then_labels();
+ BytecodeLabels* else_labels = test_result->else_labels();
+ TestFallthrough fallthrough = test_result->fallthrough();
+
+ VisitLogicalTestSubExpression(token, expr->first(), then_labels, else_labels,
+ coverage_slots->GetSlotFor(0));
+ for (size_t i = 0; i < expr->subsequent_length() - 1; ++i) {
+ VisitLogicalTestSubExpression(token, expr->subsequent(i), then_labels,
+ else_labels,
+ coverage_slots->GetSlotFor(i + 1));
+ }
+ // The last test has the same then, else and fallthrough as the parent test.
+ VisitForTest(expr->subsequent(expr->subsequent_length() - 1), then_labels,
+ else_labels, fallthrough);
+}
+
+bool BytecodeGenerator::VisitLogicalOrSubExpression(Expression* expr,
+ BytecodeLabels* end_labels,
+ int coverage_slot) {
+ if (expr->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(expr);
+ end_labels->Bind(builder());
+ return true;
+ } else if (!expr->ToBooleanIsFalse()) {
+ TypeHint type_hint = VisitForAccumulatorValue(expr);
+ builder()->JumpIfTrue(ToBooleanModeFromTypeHint(type_hint),
+ end_labels->New());
+ }
+
+ BuildIncrementBlockCoverageCounterIfEnabled(coverage_slot);
+
+ return false;
+}
+
+bool BytecodeGenerator::VisitLogicalAndSubExpression(Expression* expr,
+ BytecodeLabels* end_labels,
+ int coverage_slot) {
+ if (expr->ToBooleanIsFalse()) {
+ VisitForAccumulatorValue(expr);
+ end_labels->Bind(builder());
+ return true;
+ } else if (!expr->ToBooleanIsTrue()) {
+ TypeHint type_hint = VisitForAccumulatorValue(expr);
+ builder()->JumpIfFalse(ToBooleanModeFromTypeHint(type_hint),
+ end_labels->New());
+ }
+
+ BuildIncrementBlockCoverageCounterIfEnabled(coverage_slot);
+
+ return false;
+}
+
void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
Expression* left = binop->left();
Expression* right = binop->right();
+ int right_coverage_slot =
+ AllocateBlockCoverageSlotIfEnabled(binop, SourceRangeKind::kRight);
+
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
if (left->ToBooleanIsTrue()) {
builder()->Jump(test_result->NewThenLabel());
} else if (left->ToBooleanIsFalse() && right->ToBooleanIsFalse()) {
+ BuildIncrementBlockCoverageCounterIfEnabled(right_coverage_slot);
builder()->Jump(test_result->NewElseLabel());
} else {
- BuildLogicalTest(Token::OR, left, right);
+ VisitLogicalTest(Token::OR, left, right, right_coverage_slot);
}
test_result->SetResultConsumedByTest();
} else {
- if (left->ToBooleanIsTrue()) {
- VisitForAccumulatorValue(left);
- } else if (left->ToBooleanIsFalse()) {
- VisitForAccumulatorValue(right);
+ BytecodeLabels end_labels(zone());
+ if (VisitLogicalOrSubExpression(left, &end_labels, right_coverage_slot)) {
+ return;
+ }
+ VisitForAccumulatorValue(right);
+ end_labels.Bind(builder());
+ }
+}
+
+void BytecodeGenerator::VisitNaryLogicalOrExpression(NaryOperation* expr) {
+ Expression* first = expr->first();
+ DCHECK_GT(expr->subsequent_length(), 0);
+
+ NaryCodeCoverageSlots coverage_slots(this, expr);
+
+ if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+ if (first->ToBooleanIsTrue()) {
+ builder()->Jump(test_result->NewThenLabel());
} else {
- BytecodeLabel end_label;
- TypeHint type_hint = VisitForAccumulatorValue(left);
- builder()->JumpIfTrue(ToBooleanModeFromTypeHint(type_hint), &end_label);
- VisitForAccumulatorValue(right);
- builder()->Bind(&end_label);
+ VisitNaryLogicalTest(Token::OR, expr, &coverage_slots);
+ }
+ test_result->SetResultConsumedByTest();
+ } else {
+ BytecodeLabels end_labels(zone());
+ if (VisitLogicalOrSubExpression(first, &end_labels,
+ coverage_slots.GetSlotFor(0))) {
+ return;
+ }
+ for (size_t i = 0; i < expr->subsequent_length() - 1; ++i) {
+ if (VisitLogicalOrSubExpression(expr->subsequent(i), &end_labels,
+ coverage_slots.GetSlotFor(i + 1))) {
+ return;
+ }
}
+ // We have to visit the last value even if it's true, because we need its
+ // actual value.
+ VisitForAccumulatorValue(expr->subsequent(expr->subsequent_length() - 1));
+ end_labels.Bind(builder());
}
}
@@ -3907,28 +4272,60 @@ void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
Expression* left = binop->left();
Expression* right = binop->right();
+ int right_coverage_slot =
+ AllocateBlockCoverageSlotIfEnabled(binop, SourceRangeKind::kRight);
+
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
if (left->ToBooleanIsFalse()) {
builder()->Jump(test_result->NewElseLabel());
} else if (left->ToBooleanIsTrue() && right->ToBooleanIsTrue()) {
+ BuildIncrementBlockCoverageCounterIfEnabled(right_coverage_slot);
builder()->Jump(test_result->NewThenLabel());
} else {
- BuildLogicalTest(Token::AND, left, right);
+ VisitLogicalTest(Token::AND, left, right, right_coverage_slot);
}
test_result->SetResultConsumedByTest();
} else {
- if (left->ToBooleanIsFalse()) {
- VisitForAccumulatorValue(left);
- } else if (left->ToBooleanIsTrue()) {
- VisitForAccumulatorValue(right);
+ BytecodeLabels end_labels(zone());
+ if (VisitLogicalAndSubExpression(left, &end_labels, right_coverage_slot)) {
+ return;
+ }
+ VisitForAccumulatorValue(right);
+ end_labels.Bind(builder());
+ }
+}
+
+void BytecodeGenerator::VisitNaryLogicalAndExpression(NaryOperation* expr) {
+ Expression* first = expr->first();
+ DCHECK_GT(expr->subsequent_length(), 0);
+
+ NaryCodeCoverageSlots coverage_slots(this, expr);
+
+ if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+ if (first->ToBooleanIsFalse()) {
+ builder()->Jump(test_result->NewElseLabel());
} else {
- BytecodeLabel end_label;
- TypeHint type_hint = VisitForAccumulatorValue(left);
- builder()->JumpIfFalse(ToBooleanModeFromTypeHint(type_hint), &end_label);
- VisitForAccumulatorValue(right);
- builder()->Bind(&end_label);
+ VisitNaryLogicalTest(Token::AND, expr, &coverage_slots);
+ }
+ test_result->SetResultConsumedByTest();
+ } else {
+ BytecodeLabels end_labels(zone());
+ if (VisitLogicalAndSubExpression(first, &end_labels,
+ coverage_slots.GetSlotFor(0))) {
+ return;
}
+ for (size_t i = 0; i < expr->subsequent_length() - 1; ++i) {
+ if (VisitLogicalAndSubExpression(expr->subsequent(i), &end_labels,
+ coverage_slots.GetSlotFor(i + 1))) {
+ return;
+ }
+ }
+ // We have to visit the last value even if it's false, because we need its
+ // actual value.
+ VisitForAccumulatorValue(expr->subsequent(expr->subsequent_length() - 1));
+ end_labels.Bind(builder());
}
}
@@ -4054,11 +4451,10 @@ void BytecodeGenerator::VisitObjectLiteralAccessor(
}
void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
- LiteralProperty* property,
- int slot_number) {
+ LiteralProperty* property) {
Expression* expr = property->value();
if (FunctionLiteral::NeedsHomeObject(expr)) {
- FeedbackSlot slot = property->GetSlot(slot_number);
+ FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()
->LoadAccumulatorWithRegister(home_object)
.StoreHomeObjectProperty(value, feedback_index(slot), language_mode());
@@ -4077,8 +4473,7 @@ void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
? CreateArgumentsType::kUnmappedArguments
: CreateArgumentsType::kMappedArguments;
builder()->CreateArguments(type);
- BuildVariableAssignment(variable, Token::ASSIGN, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableAssignment(variable, Token::ASSIGN, HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
@@ -4088,8 +4483,7 @@ void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
// variable.
builder()->CreateArguments(CreateArgumentsType::kRestParameter);
DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- BuildVariableAssignment(rest, Token::ASSIGN, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableAssignment(rest, Token::ASSIGN, HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
@@ -4097,8 +4491,7 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
// Store the closure we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::function_closure());
- BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
@@ -4119,8 +4512,7 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
// Store the new target we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(incoming_new_target_or_generator_);
- BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
- HoleCheckMode::kElided);
+ BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
}
void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
@@ -4142,7 +4534,7 @@ void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
GetRegisterForLocalVariable(generator_object_var).index());
} else {
BuildVariableAssignment(generator_object_var, Token::INIT,
- FeedbackSlot::Invalid(), HoleCheckMode::kElided);
+ HoleCheckMode::kElided);
}
}
@@ -4196,6 +4588,14 @@ int BytecodeGenerator::AllocateBlockCoverageSlotIfEnabled(
: block_coverage_builder_->AllocateBlockCoverageSlot(node, kind);
}
+int BytecodeGenerator::AllocateNaryBlockCoverageSlotIfEnabled(
+ NaryOperation* node, size_t index) {
+ return (block_coverage_builder_ == nullptr)
+ ? BlockCoverageBuilder::kNoCoverageArraySlot
+ : block_coverage_builder_->AllocateNaryBlockCoverageSlot(node,
+ index);
+}
+
void BytecodeGenerator::BuildIncrementBlockCoverageCounterIfEnabled(
AstNode* node, SourceRangeKind kind) {
if (block_coverage_builder_ == nullptr) return;
@@ -4351,11 +4751,37 @@ Register BytecodeGenerator::generator_object() const {
return incoming_new_target_or_generator_;
}
+FeedbackVectorSpec* BytecodeGenerator::feedback_spec() {
+ return info()->feedback_vector_spec();
+}
+
int BytecodeGenerator::feedback_index(FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
return FeedbackVector::GetIndex(slot);
}
+FeedbackSlot BytecodeGenerator::GetCachedLoadGlobalICSlot(
+ TypeofMode typeof_mode, Variable* variable) {
+ FeedbackSlot slot = feedback_slot_cache()->Get(typeof_mode, variable);
+ if (!slot.IsInvalid()) {
+ return slot;
+ }
+ slot = feedback_spec()->AddLoadGlobalICSlot(typeof_mode);
+ feedback_slot_cache()->Put(typeof_mode, variable, slot);
+ return slot;
+}
+
+FeedbackSlot BytecodeGenerator::GetCachedCreateClosureSlot(
+ FunctionLiteral* literal) {
+ FeedbackSlot slot = feedback_slot_cache()->Get(literal);
+ if (!slot.IsInvalid()) {
+ return slot;
+ }
+ slot = feedback_spec()->AddCreateClosureSlot();
+ feedback_slot_cache()->Put(literal, slot);
+ return slot;
+}
+
Runtime::FunctionId BytecodeGenerator::StoreToSuperRuntimeId() {
return is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy;
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 15bcdddbec..9b7b572db3 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -28,7 +28,8 @@ class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
- explicit BytecodeGenerator(CompilationInfo* info);
+ explicit BytecodeGenerator(CompilationInfo* info,
+ const AstStringConstants* ast_string_constants);
void GenerateBytecode(uintptr_t stack_limit);
Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
@@ -53,7 +54,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class CurrentScope;
class ExpressionResultScope;
class EffectResultScope;
+ class FeedbackSlotCache;
class GlobalDeclarationsBuilder;
+ class NaryCodeCoverageSlots;
class RegisterAllocationScope;
class TestResultScope;
class ValueResultScope;
@@ -74,6 +77,12 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitLogicalOrExpression(BinaryOperation* binop);
void VisitLogicalAndExpression(BinaryOperation* binop);
+ // Dispatched from VisitNaryOperation.
+ void VisitNaryArithmeticExpression(NaryOperation* expr);
+ void VisitNaryCommaExpression(NaryOperation* expr);
+ void VisitNaryLogicalOrExpression(NaryOperation* expr);
+ void VisitNaryLogicalAndExpression(NaryOperation* expr);
+
// Dispatched from VisitUnaryOperation.
void VisitVoid(UnaryOperation* expr);
void VisitTypeOf(UnaryOperation* expr);
@@ -108,15 +117,13 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitPropertyLoadForRegister(Register obj, Property* expr,
Register destination);
- void BuildVariableLoad(Variable* variable, FeedbackSlot slot,
- HoleCheckMode hole_check_mode,
+ void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
void BuildVariableLoadForAccumulatorValue(
- Variable* variable, FeedbackSlot slot, HoleCheckMode hole_check_mode,
+ Variable* variable, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
void BuildVariableAssignment(
- Variable* variable, Token::Value op, FeedbackSlot slot,
- HoleCheckMode hole_check_mode,
+ Variable* variable, Token::Value op, HoleCheckMode hole_check_mode,
LookupHoistingMode lookup_hoisting_mode = LookupHoistingMode::kNormal);
void BuildLiteralCompareNil(Token::Value compare_op, NilValue nil);
void BuildReturn(int source_position = kNoSourcePosition);
@@ -142,36 +149,47 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildAwait(int suspend_id);
- void BuildGetIterator(Expression* iterable, IteratorType hint,
- FeedbackSlot load_slot, FeedbackSlot call_slot,
- FeedbackSlot async_load_slot,
- FeedbackSlot async_call_slot);
+ void BuildGetIterator(Expression* iterable, IteratorType hint);
void AllocateTopLevelRegisters();
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
- void VisitClassLiteralProperties(ClassLiteral* expr, Register constructor,
- Register prototype);
- void BuildClassLiteralNameProperty(ClassLiteral* expr, Register constructor);
void BuildClassLiteral(ClassLiteral* expr);
void VisitNewTargetVariable(Variable* variable);
void VisitThisFunctionVariable(Variable* variable);
+ void BuildInstanceFieldInitialization(Register constructor,
+ Register instance);
void BuildGeneratorObjectVariableInitialization();
void VisitBlockDeclarationsAndStatements(Block* stmt);
void VisitFunctionClosureForContext();
void VisitSetHomeObject(Register value, Register home_object,
- LiteralProperty* property, int slot_number = 0);
+ LiteralProperty* property);
void VisitObjectLiteralAccessor(Register home_object,
ObjectLiteralProperty* property,
Register value_out);
- void VisitForInAssignment(Expression* expr, FeedbackSlot slot);
+ void VisitForInAssignment(Expression* expr);
void VisitModuleNamespaceImports();
- // Builds a logical OR/AND within a test context by rewiring the jumps based
+ // Visit a logical OR/AND within a test context, rewiring the jumps based
// on the expression values.
- void BuildLogicalTest(Token::Value token, Expression* left,
- Expression* right);
+ void VisitLogicalTest(Token::Value token, Expression* left, Expression* right,
+ int right_coverage_slot);
+ void VisitNaryLogicalTest(Token::Value token, NaryOperation* expr,
+ const NaryCodeCoverageSlots* coverage_slots);
+ // Visit a (non-RHS) test for a logical op, which falls through if the test
+ // fails or jumps to the appropriate labels if it succeeds.
+ void VisitLogicalTestSubExpression(Token::Value token, Expression* expr,
+ BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels,
+ int coverage_slot);
+
+ // Helpers for binary and nary logical op value expressions.
+ bool VisitLogicalOrSubExpression(Expression* expr, BytecodeLabels* end_labels,
+ int coverage_slot);
+ bool VisitLogicalAndSubExpression(Expression* expr,
+ BytecodeLabels* end_labels,
+ int coverage_slot);
// Visit the header/body of a loop iteration.
void VisitIterationHeader(IterationStatement* stmt,
@@ -188,6 +206,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildLoadPropertyKey(LiteralProperty* property, Register out_reg);
int AllocateBlockCoverageSlotIfEnabled(AstNode* node, SourceRangeKind kind);
+ int AllocateNaryBlockCoverageSlotIfEnabled(NaryOperation* node, size_t index);
+
void BuildIncrementBlockCoverageCounterIfEnabled(AstNode* node,
SourceRangeKind kind);
void BuildIncrementBlockCoverageCounterIfEnabled(int coverage_array_slot);
@@ -216,6 +236,12 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
inline Runtime::FunctionId StoreToSuperRuntimeId();
inline Runtime::FunctionId StoreKeyedToSuperRuntimeId();
+ // Returns a cached slot, or create and cache a new slot if one doesn't
+ // already exists.
+ FeedbackSlot GetCachedLoadGlobalICSlot(TypeofMode typeof_mode,
+ Variable* variable);
+ FeedbackSlot GetCachedCreateClosureSlot(FunctionLiteral* literal);
+
static constexpr ToBooleanMode ToBooleanModeFromTypeHint(TypeHint type_hint) {
return type_hint == TypeHint::kBoolean ? ToBooleanMode::kAlreadyBoolean
: ToBooleanMode::kConvertToBoolean;
@@ -223,7 +249,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
inline Register generator_object() const;
- inline BytecodeArrayBuilder* builder() const { return builder_; }
+ inline BytecodeArrayBuilder* builder() { return &builder_; }
inline Zone* zone() const { return zone_; }
inline DeclarationScope* closure_scope() const { return closure_scope_; }
inline CompilationInfo* info() const { return info_; }
@@ -246,7 +272,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
execution_result_ = execution_result;
}
ExpressionResultScope* execution_result() const { return execution_result_; }
- BytecodeRegisterAllocator* register_allocator() const {
+ BytecodeRegisterAllocator* register_allocator() {
return builder()->register_allocator();
}
@@ -256,7 +282,12 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
}
inline LanguageMode language_mode() const;
inline FunctionKind function_kind() const;
- int feedback_index(FeedbackSlot slot) const;
+ inline FeedbackVectorSpec* feedback_spec();
+ inline int feedback_index(FeedbackSlot slot) const;
+
+ inline FeedbackSlotCache* feedback_slot_cache() {
+ return feedback_slot_cache_;
+ }
inline HandlerTable::CatchPrediction catch_prediction() const {
return catch_prediction_;
@@ -266,12 +297,14 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
}
Zone* zone_;
- BytecodeArrayBuilder* builder_;
+ BytecodeArrayBuilder builder_;
CompilationInfo* info_;
const AstStringConstants* ast_string_constants_;
DeclarationScope* closure_scope_;
Scope* current_scope_;
+ FeedbackSlotCache* feedback_slot_cache_;
+
GlobalDeclarationsBuilder* globals_builder_;
BlockCoverageBuilder* block_coverage_builder_;
ZoneVector<GlobalDeclarationsBuilder*> global_declarations_;
@@ -280,6 +313,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
native_function_literals_;
ZoneVector<std::pair<ObjectLiteral*, size_t>> object_literals_;
ZoneVector<std::pair<ArrayLiteral*, size_t>> array_literals_;
+ ZoneVector<std::pair<ClassLiteral*, size_t>> class_literals_;
ZoneVector<std::pair<GetTemplateObject*, size_t>> template_objects_;
ControlScope* execution_control_;
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index 9195a72467..04d1e35821 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -134,11 +134,17 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const OperandType& operand_type);
-class BytecodeOperands {
+class BytecodeOperands : public AllStatic {
public:
- // The total number of bytecodes used.
+ // The total number of bytecode operand types used.
static const int kOperandTypeCount = static_cast<int>(OperandType::kLast) + 1;
+// The total number of bytecode operand scales used.
+#define OPERAND_SCALE_COUNT(...) +1
+ static const int kOperandScaleCount =
+ 0 OPERAND_SCALE_LIST(OPERAND_SCALE_COUNT);
+#undef OPERAND_SCALE_COUNT
+
// Returns true if |accumulator_use| reads the accumulator.
static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
return accumulator_use == AccumulatorUse::kRead ||
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index af41e92365..94dc930920 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -232,7 +232,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
// Calculate offset so register index values can be mapped into
// a vector of register metadata.
// There is at least one parameter, which is the JS receiver.
- DCHECK(parameter_count != 0);
+ DCHECK_NE(parameter_count, 0);
register_info_table_offset_ =
-Register::FromParameterIndex(0, parameter_count).index();
@@ -298,7 +298,7 @@ void BytecodeRegisterOptimizer::Flush() {
}
} else {
// Equivalernce class containing only unallocated registers.
- DCHECK(reg_info->GetAllocatedEquivalent() == nullptr);
+ DCHECK_NULL(reg_info->GetAllocatedEquivalent());
reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), false);
}
}
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index e7bcd50611..2d3fc2c96e 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -8,6 +8,7 @@
#include <cstdint>
#include <iosfwd>
#include <string>
+#include <vector>
#include "src/globals.h"
#include "src/interpreter/bytecode-operands.h"
@@ -93,11 +94,11 @@ namespace interpreter {
OperandType::kUImm) \
\
/* Propery stores (StoreIC) operations */ \
- V(StaNamedProperty, AccumulatorUse::kRead, OperandType::kReg, \
+ V(StaNamedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
- V(StaNamedOwnProperty, AccumulatorUse::kRead, OperandType::kReg, \
+ V(StaNamedOwnProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
- V(StaKeyedProperty, AccumulatorUse::kRead, OperandType::kReg, \
+ V(StaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kReg, OperandType::kIdx) \
V(StaDataPropertyInLiteral, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kReg, OperandType::kFlag8, OperandType::kIdx) \
@@ -109,6 +110,7 @@ namespace interpreter {
V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
V(Div, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Exp, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg, \
@@ -128,6 +130,7 @@ namespace interpreter {
V(MulSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
V(DivSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
V(ModSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
+ V(ExpSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
V(BitwiseOrSmi, AccumulatorUse::kReadWrite, OperandType::kImm, \
OperandType::kIdx) \
V(BitwiseXorSmi, AccumulatorUse::kReadWrite, OperandType::kImm, \
@@ -208,7 +211,8 @@ namespace interpreter {
V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
V(TestEqualStrictNoFeedback, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
V(TestUndetectable, AccumulatorUse::kReadWrite) \
V(TestNull, AccumulatorUse::kReadWrite) \
@@ -218,6 +222,7 @@ namespace interpreter {
/* Cast operators */ \
V(ToName, AccumulatorUse::kRead, OperandType::kRegOut) \
V(ToNumber, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(ToNumeric, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(ToObject, AccumulatorUse::kRead, OperandType::kRegOut) \
\
/* Literals */ \
@@ -323,19 +328,20 @@ namespace interpreter {
\
/* Debug Breakpoints - one for each possible size of unscaled bytecodes */ \
/* and one for each operand widening prefix bytecode */ \
- V(DebugBreak0, AccumulatorUse::kRead) \
- V(DebugBreak1, AccumulatorUse::kRead, OperandType::kReg) \
- V(DebugBreak2, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg) \
- V(DebugBreak3, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+ V(DebugBreak0, AccumulatorUse::kReadWrite) \
+ V(DebugBreak1, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(DebugBreak2, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kReg) \
- V(DebugBreak4, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+ V(DebugBreak3, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kReg, OperandType::kReg) \
- V(DebugBreak5, AccumulatorUse::kRead, OperandType::kRuntimeId, \
+ V(DebugBreak4, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kReg, OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak5, AccumulatorUse::kReadWrite, OperandType::kRuntimeId, \
OperandType::kReg, OperandType::kReg) \
- V(DebugBreak6, AccumulatorUse::kRead, OperandType::kRuntimeId, \
+ V(DebugBreak6, AccumulatorUse::kReadWrite, OperandType::kRuntimeId, \
OperandType::kReg, OperandType::kReg, OperandType::kReg) \
- V(DebugBreakWide, AccumulatorUse::kRead) \
- V(DebugBreakExtraWide, AccumulatorUse::kRead) \
+ V(DebugBreakWide, AccumulatorUse::kReadWrite) \
+ V(DebugBreakExtraWide, AccumulatorUse::kReadWrite) \
\
/* Block Coverage */ \
V(IncBlockCounter, AccumulatorUse::kNone, OperandType::kIdx) \
@@ -441,7 +447,7 @@ enum class Bytecode : uint8_t {
#undef COUNT_BYTECODE
};
-class V8_EXPORT_PRIVATE Bytecodes final {
+class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
public:
// The maximum number of operands a bytecode may have.
static const int kMaxOperands = 5;
@@ -464,7 +470,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns bytecode for |value|.
static Bytecode FromByte(uint8_t value) {
Bytecode bytecode = static_cast<Bytecode>(value);
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
return bytecode;
}
@@ -503,7 +509,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns how accumulator is used by |bytecode|.
static AccumulatorUse GetAccumulatorUse(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
return kAccumulatorUse[static_cast<size_t>(bytecode)];
}
@@ -672,6 +678,12 @@ class V8_EXPORT_PRIVATE Bytecodes final {
bytecode == Bytecode::kDebugBreakWide;
}
+ // Returns true if the bytecode can be lazily deserialized.
+ static constexpr bool IsLazy(Bytecode bytecode) {
+ // Currently, all handlers are deserialized lazily.
+ return true;
+ }
+
// Returns the number of values which |bytecode| returns.
static constexpr size_t ReturnCount(Bytecode bytecode) {
return bytecode == Bytecode::kReturn ? 1 : 0;
@@ -679,7 +691,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns the number of operands expected by |bytecode|.
static int NumberOfOperands(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
return kOperandCount[static_cast<size_t>(bytecode)];
}
@@ -694,20 +706,20 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns a pointer to an array of operand types terminated in
// OperandType::kNone.
static const OperandType* GetOperandTypes(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
return kOperandTypes[static_cast<size_t>(bytecode)];
}
static bool OperandIsScalableSignedByte(Bytecode bytecode,
int operand_index) {
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
return kOperandTypeInfos[static_cast<size_t>(bytecode)][operand_index] ==
OperandTypeInfo::kScalableSignedByte;
}
static bool OperandIsScalableUnsignedByte(Bytecode bytecode,
int operand_index) {
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
return kOperandTypeInfos[static_cast<size_t>(bytecode)][operand_index] ==
OperandTypeInfo::kScalableUnsignedByte;
}
@@ -730,7 +742,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns the operand sizes of |bytecode| with scale |operand_scale|.
static const OperandSize* GetOperandSizes(Bytecode bytecode,
OperandScale operand_scale) {
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
DCHECK_GE(operand_scale, OperandScale::kSingle);
DCHECK_LE(operand_scale, OperandScale::kLast);
STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
@@ -747,7 +759,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns the size of the bytecode including its operands for the
// given |operand_scale|.
static int Size(Bytecode bytecode, OperandScale operand_scale) {
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
OperandScale::kLast == OperandScale::kQuadruple);
int scale_index = static_cast<int>(operand_scale) >> 1;
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index ca2351fcd6..70b8bc5c1a 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/constant-array-builder.h"
+#include <cmath>
#include <functional>
#include <set>
@@ -65,17 +66,50 @@ const ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
#if DEBUG
void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
Isolate* isolate) const {
- std::set<Object*> elements;
+ std::set<Smi*> smis;
+ std::set<double> heap_numbers;
+ std::set<const AstRawString*> strings;
+ std::set<const char*> bigints;
+ std::set<const Scope*> scopes;
+ std::set<Object*> deferred_objects;
for (const Entry& entry : constants_) {
- // TODO(leszeks): Ignore jump tables because they have to be contiguous,
- // so they can contain duplicates.
- if (entry.IsJumpTableEntry()) continue;
-
- Handle<Object> handle = entry.ToHandle(isolate);
-
- if (elements.find(*handle) != elements.end()) {
+ bool duplicate = false;
+ switch (entry.tag_) {
+ case Entry::Tag::kSmi:
+ duplicate = !smis.insert(entry.smi_).second;
+ break;
+ case Entry::Tag::kHeapNumber:
+ duplicate = !heap_numbers.insert(entry.heap_number_).second;
+ break;
+ case Entry::Tag::kRawString:
+ duplicate = !strings.insert(entry.raw_string_).second;
+ break;
+ case Entry::Tag::kBigInt:
+ duplicate = !bigints.insert(entry.bigint_.c_str()).second;
+ break;
+ case Entry::Tag::kScope:
+ duplicate = !scopes.insert(entry.scope_).second;
+ break;
+ case Entry::Tag::kHandle:
+ duplicate = !deferred_objects.insert(*entry.handle_).second;
+ break;
+ case Entry::Tag::kDeferred:
+ UNREACHABLE(); // Should be kHandle at this point.
+ case Entry::Tag::kJumpTableSmi:
+ case Entry::Tag::kUninitializedJumpTableSmi:
+ // TODO(leszeks): Ignore jump tables because they have to be contiguous,
+ // so they can contain duplicates.
+ break;
+#define CASE_TAG(NAME, ...) case Entry::Tag::k##NAME:
+ SINGLETON_CONSTANT_ENTRY_TYPES(CASE_TAG)
+#undef CASE_TAG
+ // Singletons are non-duplicated by definition.
+ break;
+ }
+ if (duplicate) {
std::ostringstream os;
- os << "Duplicate constant found: " << Brief(*handle) << std::endl;
+ os << "Duplicate constant found: " << Brief(*entry.ToHandle(isolate))
+ << std::endl;
// Print all the entries in the slice to help debug duplicates.
size_t i = start_index();
for (const Entry& prev_entry : constants_) {
@@ -83,7 +117,6 @@ void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
}
FATAL(os.str().c_str());
}
- elements.insert(*handle);
}
}
#endif
@@ -99,6 +132,7 @@ ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone)
ZoneAllocationPolicy(zone)),
smi_map_(zone),
smi_pairs_(zone),
+ heap_number_map_(zone),
#define INIT_SINGLETON_ENTRY_FIELD(NAME, LOWER_NAME) LOWER_NAME##_(-1),
SINGLETON_CONSTANT_ENTRY_TYPES(INIT_SINGLETON_ENTRY_FIELD)
#undef INIT_SINGLETON_ENTRY_FIELD
@@ -153,14 +187,14 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
base::bits::IsPowerOfTwo(static_cast<uint32_t>(array_index)));
#if DEBUG
// Different slices might contain the same element due to reservations, but
- // all elements within a slice should be unique. If this DCHECK fails, then
- // the AST nodes are not being internalized within a CanonicalHandleScope.
+ // all elements within a slice should be unique.
slice->CheckAllElementsAreUnique(isolate);
#endif
// Copy objects from slice into array.
for (size_t i = 0; i < slice->size(); ++i) {
- fixed_array->set(array_index++,
- *slice->At(slice->start_index() + i).ToHandle(isolate));
+ Handle<Object> value =
+ slice->At(slice->start_index() + i).ToHandle(isolate);
+ fixed_array->set(array_index++, *value);
}
// Leave holes where reservations led to unused slots.
size_t padding = slice->capacity() - slice->size();
@@ -181,6 +215,17 @@ size_t ConstantArrayBuilder::Insert(Smi* smi) {
return entry->second;
}
+size_t ConstantArrayBuilder::Insert(double number) {
+ if (std::isnan(number)) return InsertNaN();
+ auto entry = heap_number_map_.find(number);
+ if (entry == heap_number_map_.end()) {
+ index_t index = static_cast<index_t>(AllocateIndex(Entry(number)));
+ heap_number_map_[number] = index;
+ return index;
+ }
+ return entry->second;
+}
+
size_t ConstantArrayBuilder::Insert(const AstRawString* raw_string) {
return constants_map_
.LookupOrInsert(reinterpret_cast<intptr_t>(raw_string),
@@ -190,17 +235,11 @@ size_t ConstantArrayBuilder::Insert(const AstRawString* raw_string) {
->value;
}
-size_t ConstantArrayBuilder::Insert(const AstValue* heap_number) {
- // This method only accepts heap numbers. Other types of ast value should
- // either be passed through as raw values (in the case of strings), use the
- // singleton Insert methods (in the case of symbols), or skip the constant
- // pool entirely and use bytecodes with immediate values (Smis, booleans,
- // undefined, etc.).
- DCHECK(heap_number->IsHeapNumber());
+size_t ConstantArrayBuilder::Insert(AstBigInt bigint) {
return constants_map_
- .LookupOrInsert(reinterpret_cast<intptr_t>(heap_number),
- static_cast<uint32_t>(base::hash_value(heap_number)),
- [&]() { return AllocateIndex(Entry(heap_number)); },
+ .LookupOrInsert(reinterpret_cast<intptr_t>(bigint.c_str()),
+ static_cast<uint32_t>(base::hash_value(bigint.c_str())),
+ [&]() { return AllocateIndex(Entry(bigint)); },
ZoneAllocationPolicy(zone_))
->value;
}
@@ -340,8 +379,11 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
case Tag::kRawString:
return raw_string_->string();
case Tag::kHeapNumber:
- DCHECK(heap_number_->IsHeapNumber());
- return heap_number_->value();
+ return isolate->factory()->NewNumber(heap_number_, TENURED);
+ case Tag::kBigInt:
+ // This should never fail: the parser will never create a BigInt
+ // literal that cannot be allocated.
+ return BigIntLiteral(isolate, bigint_.c_str()).ToHandleChecked();
case Tag::kScope:
return scope_->scope_info();
#define ENTRY_LOOKUP(Name, name) \
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index a50aa3519c..549b2edefc 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -5,6 +5,7 @@
#ifndef V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
#define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+#include "src/ast/ast-value-factory.h"
#include "src/globals.h"
#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
@@ -21,10 +22,12 @@ namespace interpreter {
// Constant array entries that represent singletons.
#define SINGLETON_CONSTANT_ENTRY_TYPES(V) \
+ V(NaN, nan_value) \
V(IteratorSymbol, iterator_symbol) \
V(AsyncIteratorSymbol, async_iterator_symbol) \
V(HomeObjectSymbol, home_object_symbol) \
- V(EmptyFixedArray, empty_fixed_array)
+ V(EmptyFixedArray, empty_fixed_array) \
+ V(ClassFieldsSymbol, class_fields_symbol)
// A helper class for constructing constant arrays for the
// interpreter. Each instance of this class is intended to be used to
@@ -58,8 +61,9 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
// Insert an object into the constants array if it is not already present.
// Returns the array index associated with the object.
size_t Insert(Smi* smi);
+ size_t Insert(double number);
size_t Insert(const AstRawString* raw_string);
- size_t Insert(const AstValue* heap_number);
+ size_t Insert(AstBigInt bigint);
size_t Insert(const Scope* scope);
#define INSERT_ENTRY(NAME, ...) size_t Insert##NAME();
SINGLETON_CONSTANT_ENTRY_TYPES(INSERT_ENTRY)
@@ -97,16 +101,19 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
private:
typedef uint32_t index_t;
+ struct ConstantArraySlice;
+
class Entry {
private:
enum class Tag : uint8_t;
public:
explicit Entry(Smi* smi) : smi_(smi), tag_(Tag::kSmi) {}
+ explicit Entry(double heap_number)
+ : heap_number_(heap_number), tag_(Tag::kHeapNumber) {}
explicit Entry(const AstRawString* raw_string)
: raw_string_(raw_string), tag_(Tag::kRawString) {}
- explicit Entry(const AstValue* heap_number)
- : heap_number_(heap_number), tag_(Tag::kHeapNumber) {}
+ explicit Entry(AstBigInt bigint) : bigint_(bigint), tag_(Tag::kBigInt) {}
explicit Entry(const Scope* scope) : scope_(scope), tag_(Tag::kScope) {}
#define CONSTRUCT_ENTRY(NAME, LOWER_NAME) \
@@ -128,13 +135,13 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
}
void SetDeferred(Handle<Object> handle) {
- DCHECK(tag_ == Tag::kDeferred);
+ DCHECK_EQ(tag_, Tag::kDeferred);
tag_ = Tag::kHandle;
handle_ = handle;
}
void SetJumpTableSmi(Smi* smi) {
- DCHECK(tag_ == Tag::kUninitializedJumpTableSmi);
+ DCHECK_EQ(tag_, Tag::kUninitializedJumpTableSmi);
tag_ = Tag::kJumpTableSmi;
smi_ = smi;
}
@@ -147,8 +154,9 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
union {
Handle<Object> handle_;
Smi* smi_;
+ double heap_number_;
const AstRawString* raw_string_;
- const AstValue* heap_number_;
+ AstBigInt bigint_;
const Scope* scope_;
};
@@ -158,6 +166,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
kSmi,
kRawString,
kHeapNumber,
+ kBigInt,
kScope,
kUninitializedJumpTableSmi,
kJumpTableSmi,
@@ -165,6 +174,11 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_TAG)
#undef ENTRY_TAG
} tag_;
+
+#if DEBUG
+ // Required by CheckAllElementsAreUnique().
+ friend struct ConstantArraySlice;
+#endif
};
index_t AllocateIndex(Entry constant_entry);
@@ -212,6 +226,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
constants_map_;
ZoneMap<Smi*, index_t> smi_map_;
ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+ ZoneMap<double, index_t> heap_number_map_;
#define SINGLETON_ENTRY_FIELD(NAME, LOWER_NAME) int LOWER_NAME##_;
SINGLETON_CONSTANT_ENTRY_TYPES(SINGLETON_ENTRY_FIELD)
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index baf4ae55e3..ea316f286f 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -117,6 +117,12 @@ void SwitchBuilder::SetCaseTarget(int index, CaseClause* clause) {
}
}
+TryCatchBuilder::~TryCatchBuilder() {
+ if (block_coverage_builder_ != nullptr) {
+ block_coverage_builder_->IncrementBlockCounter(
+ statement_, SourceRangeKind::kContinuation);
+ }
+}
void TryCatchBuilder::BeginTry(Register context) {
builder()->MarkTryBegin(handler_id_, context);
@@ -128,11 +134,21 @@ void TryCatchBuilder::EndTry() {
builder()->Jump(&exit_);
builder()->Bind(&handler_);
builder()->MarkHandler(handler_id_, catch_prediction_);
-}
+ if (block_coverage_builder_ != nullptr) {
+ block_coverage_builder_->IncrementBlockCounter(statement_,
+ SourceRangeKind::kCatch);
+ }
+}
void TryCatchBuilder::EndCatch() { builder()->Bind(&exit_); }
+TryFinallyBuilder::~TryFinallyBuilder() {
+ if (block_coverage_builder_ != nullptr) {
+ block_coverage_builder_->IncrementBlockCounter(
+ statement_, SourceRangeKind::kContinuation);
+ }
+}
void TryFinallyBuilder::BeginTry(Register context) {
builder()->MarkTryBegin(handler_id_, context);
@@ -154,7 +170,14 @@ void TryFinallyBuilder::BeginHandler() {
builder()->MarkHandler(handler_id_, catch_prediction_);
}
-void TryFinallyBuilder::BeginFinally() { finalization_sites_.Bind(builder()); }
+void TryFinallyBuilder::BeginFinally() {
+ finalization_sites_.Bind(builder());
+
+ if (block_coverage_builder_ != nullptr) {
+ block_coverage_builder_->IncrementBlockCounter(statement_,
+ SourceRangeKind::kFinally);
+ }
+}
void TryFinallyBuilder::EndFinally() {
// Nothing to be done here.
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 0e9852f757..4a81b1f205 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -188,10 +188,16 @@ class V8_EXPORT_PRIVATE SwitchBuilder final
class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
public:
TryCatchBuilder(BytecodeArrayBuilder* builder,
+ BlockCoverageBuilder* block_coverage_builder,
+ TryCatchStatement* statement,
HandlerTable::CatchPrediction catch_prediction)
: ControlFlowBuilder(builder),
handler_id_(builder->NewHandlerEntry()),
- catch_prediction_(catch_prediction) {}
+ catch_prediction_(catch_prediction),
+ block_coverage_builder_(block_coverage_builder),
+ statement_(statement) {}
+
+ ~TryCatchBuilder();
void BeginTry(Register context);
void EndTry();
@@ -202,6 +208,9 @@ class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
HandlerTable::CatchPrediction catch_prediction_;
BytecodeLabel handler_;
BytecodeLabel exit_;
+
+ BlockCoverageBuilder* block_coverage_builder_;
+ TryCatchStatement* statement_;
};
@@ -209,11 +218,17 @@ class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
class V8_EXPORT_PRIVATE TryFinallyBuilder final : public ControlFlowBuilder {
public:
TryFinallyBuilder(BytecodeArrayBuilder* builder,
+ BlockCoverageBuilder* block_coverage_builder,
+ TryFinallyStatement* statement,
HandlerTable::CatchPrediction catch_prediction)
: ControlFlowBuilder(builder),
handler_id_(builder->NewHandlerEntry()),
catch_prediction_(catch_prediction),
- finalization_sites_(builder->zone()) {}
+ finalization_sites_(builder->zone()),
+ block_coverage_builder_(block_coverage_builder),
+ statement_(statement) {}
+
+ ~TryFinallyBuilder();
void BeginTry(Register context);
void LeaveTry();
@@ -229,6 +244,9 @@ class V8_EXPORT_PRIVATE TryFinallyBuilder final : public ControlFlowBuilder {
// Unbound labels that identify jumps to the finally block in the code.
BytecodeLabels finalization_sites_;
+
+ BlockCoverageBuilder* block_coverage_builder_;
+ TryFinallyStatement* statement_;
};
class V8_EXPORT_PRIVATE ConditionalControlFlowBuilder final
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 2db780b979..e4cc104b76 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -323,7 +323,7 @@ compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
// Read the most signicant bytecode into bytes[0] and then in order
// down to least significant in bytes[count - 1].
- DCHECK(count <= kMaxCount);
+ DCHECK_LE(count, kMaxCount);
compiler::Node* bytes[kMaxCount];
for (int i = 0; i < count; i++) {
MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
@@ -484,8 +484,8 @@ Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
}
Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
- DCHECK(OperandType::kIdx ==
- Bytecodes::GetOperandType(bytecode_, operand_index));
+ DCHECK_EQ(OperandType::kIdx,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return BytecodeUnsignedOperand(operand_index, operand_size);
@@ -509,8 +509,8 @@ Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
}
Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
- DCHECK(OperandType::kRuntimeId ==
- Bytecodes::GetOperandType(bytecode_, operand_index));
+ DCHECK_EQ(OperandType::kRuntimeId,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
DCHECK_EQ(operand_size, OperandSize::kShort);
@@ -519,8 +519,8 @@ Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
int operand_index) {
- DCHECK(OperandType::kNativeContextIndex ==
- Bytecodes::GetOperandType(bytecode_, operand_index));
+ DCHECK_EQ(OperandType::kNativeContextIndex,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeUint32ToWord(
@@ -528,8 +528,8 @@ Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
}
Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
- DCHECK(OperandType::kIntrinsicId ==
- Bytecodes::GetOperandType(bytecode_, operand_index));
+ DCHECK_EQ(OperandType::kIntrinsicId,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
DCHECK_EQ(operand_size, OperandSize::kByte);
@@ -564,7 +564,7 @@ void InterpreterAssembler::CallPrologue() {
}
if (FLAG_debug_code && !disable_stack_check_across_call_) {
- DCHECK(stack_pointer_before_call_ == nullptr);
+ DCHECK_NULL(stack_pointer_before_call_);
stack_pointer_before_call_ = LoadStackPointer();
}
bytecode_array_valid_ = false;
@@ -581,80 +581,95 @@ void InterpreterAssembler::CallEpilogue() {
}
}
-Node* InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
- Node* slot_id) {
+void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
+ Node* slot_id) {
Comment("increment call count");
Node* call_count =
LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize);
Node* new_count = SmiAdd(call_count, SmiConstant(1));
// Count is Smi, so we don't need a write barrier.
- return StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
- SKIP_WRITE_BARRIER, kPointerSize);
+ StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
+ SKIP_WRITE_BARRIER, kPointerSize);
}
-void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
- Node* feedback_vector,
- Node* slot_id) {
+void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
+ Node* feedback_vector,
+ Node* slot_id) {
Label extra_checks(this, Label::kDeferred), done(this);
- // Increment the call count.
- IncrementCallCount(feedback_vector, slot_id);
-
// Check if we have monomorphic {target} feedback already.
Node* feedback_element = LoadFeedbackVectorSlot(feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
- Branch(WordEqual(target, feedback_value), &done, &extra_checks);
+ Comment("check if monomorphic");
+ Node* is_monomorphic = WordEqual(target, feedback_value);
+ GotoIf(is_monomorphic, &done);
+
+ // Check if it is a megamorphic {target}.
+ Comment("check if megamorphic");
+ Node* is_megamorphic =
+ WordEqual(feedback_element,
+ HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
+ Branch(is_megamorphic, &done, &extra_checks);
BIND(&extra_checks);
{
- Label check_initialized(this), initialize(this), mark_megamorphic(this);
-
- // Check if it is a megamorphic {target}.
- Comment("check if megamorphic");
- Node* is_megamorphic =
- WordEqual(feedback_element,
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
- GotoIf(is_megamorphic, &done);
+ Label initialize(this), mark_megamorphic(this);
Comment("check if weak cell");
- Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
- LoadRoot(Heap::kWeakCellMapRootIndex));
- GotoIfNot(is_weak_cell, &check_initialized);
+ Node* is_uninitialized = WordEqual(
+ feedback_element,
+ HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
+ GotoIf(is_uninitialized, &initialize);
+ CSA_ASSERT(this, IsWeakCell(feedback_element));
// If the weak cell is cleared, we have a new chance to become monomorphic.
Comment("check if weak cell is cleared");
Node* is_smi = TaggedIsSmi(feedback_value);
Branch(is_smi, &initialize, &mark_megamorphic);
- BIND(&check_initialized);
- {
- // Check if it is uninitialized.
- Comment("check if uninitialized");
- Node* is_uninitialized = WordEqual(
- feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
- Branch(is_uninitialized, &initialize, &mark_megamorphic);
- }
-
BIND(&initialize);
{
- // Check if {target} is a JSFunction in the current native
- // context.
+ // Check if {target} is a JSFunction in the current native context.
Comment("check if function in same native context");
GotoIf(TaggedIsSmi(target), &mark_megamorphic);
- // TODO(bmeurer): Add support for arbitrary callables here, and
- // check via GetFunctionRealm (see src/objects.cc).
- GotoIfNot(IsJSFunction(target), &mark_megamorphic);
- Node* target_context =
- LoadObjectField(target, JSFunction::kContextOffset);
- Node* target_native_context = LoadNativeContext(target_context);
- GotoIfNot(WordEqual(LoadNativeContext(context), target_native_context),
- &mark_megamorphic);
-
- CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id), target);
- // Reset profiler ticks.
- StoreObjectFieldNoWriteBarrier(feedback_vector,
- FeedbackVector::kProfilerTicksOffset,
- SmiConstant(0));
+ // Check if the {target} is a JSFunction or JSBoundFunction
+ // in the current native context.
+ VARIABLE(var_current, MachineRepresentation::kTagged, target);
+ Label loop(this, &var_current), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Label if_boundfunction(this), if_function(this);
+ Node* current = var_current.value();
+ CSA_ASSERT(this, TaggedIsNotSmi(current));
+ Node* current_instance_type = LoadInstanceType(current);
+ GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
+ &if_boundfunction);
+ Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
+ &if_function, &mark_megamorphic);
+
+ BIND(&if_function);
+ {
+ // Check that the JSFunction {current} is in the current native
+ // context.
+ Node* current_context =
+ LoadObjectField(current, JSFunction::kContextOffset);
+ Node* current_native_context = LoadNativeContext(current_context);
+ Branch(WordEqual(LoadNativeContext(context), current_native_context),
+ &done_loop, &mark_megamorphic);
+ }
+
+ BIND(&if_boundfunction);
+ {
+ // Continue with the [[BoundTargetFunction]] of {target}.
+ var_current.Bind(LoadObjectField(
+ current, JSBoundFunction::kBoundTargetFunctionOffset));
+ Goto(&loop);
+ }
+ }
+ BIND(&done_loop);
+ CreateWeakCellInFeedbackVector(feedback_vector, slot_id, target);
+ ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
Goto(&done);
}
@@ -668,10 +683,8 @@ void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
SKIP_WRITE_BARRIER);
- // Reset profiler ticks.
- StoreObjectFieldNoWriteBarrier(feedback_vector,
- FeedbackVector::kProfilerTicksOffset,
- SmiConstant(0));
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "Call:TransitionMegamorphic");
Goto(&done);
}
}
@@ -679,6 +692,16 @@ void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
BIND(&done);
}
+void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
+ Node* feedback_vector,
+ Node* slot_id) {
+ // Increment the call count.
+ IncrementCallCount(feedback_vector, slot_id);
+
+ // Collect the callable {target} feedback.
+ CollectCallableFeedback(target, context, feedback_vector, slot_id);
+}
+
void InterpreterAssembler::CallJSAndDispatch(
Node* function, Node* context, Node* first_arg, Node* arg_count,
ConvertReceiverMode receiver_mode) {
@@ -819,24 +842,50 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
BIND(&initialize);
{
- // Check if {new_target} is a JSFunction in the current native context.
- Label create_allocation_site(this), create_weak_cell(this);
Comment("check if function in same native context");
GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
- // TODO(bmeurer): Add support for arbitrary constructors here, and
- // check via GetFunctionRealm (see src/objects.cc).
- GotoIfNot(IsJSFunction(new_target), &mark_megamorphic);
- Node* new_target_context =
- LoadObjectField(new_target, JSFunction::kContextOffset);
- Node* new_target_native_context = LoadNativeContext(new_target_context);
- GotoIfNot(
- WordEqual(LoadNativeContext(context), new_target_native_context),
- &mark_megamorphic);
+ // Check if the {new_target} is a JSFunction or JSBoundFunction
+ // in the current native context.
+ VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
+ Label loop(this, &var_current), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Label if_boundfunction(this), if_function(this);
+ Node* current = var_current.value();
+ CSA_ASSERT(this, TaggedIsNotSmi(current));
+ Node* current_instance_type = LoadInstanceType(current);
+ GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
+ &if_boundfunction);
+ Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
+ &if_function, &mark_megamorphic);
+
+ BIND(&if_function);
+ {
+ // Check that the JSFunction {current} is in the current native
+ // context.
+ Node* current_context =
+ LoadObjectField(current, JSFunction::kContextOffset);
+ Node* current_native_context = LoadNativeContext(current_context);
+ Branch(WordEqual(LoadNativeContext(context), current_native_context),
+ &done_loop, &mark_megamorphic);
+ }
+
+ BIND(&if_boundfunction);
+ {
+ // Continue with the [[BoundTargetFunction]] of {current}.
+ var_current.Bind(LoadObjectField(
+ current, JSBoundFunction::kBoundTargetFunctionOffset));
+ Goto(&loop);
+ }
+ }
+ BIND(&done_loop);
// Create an AllocationSite if {target} and {new_target} refer
// to the current native context's Array constructor.
+ Label create_allocation_site(this), create_weak_cell(this);
GotoIfNot(WordEqual(target, new_target), &create_weak_cell);
- Node* array_function = LoadContextElement(new_target_native_context,
+ Node* array_function = LoadContextElement(LoadNativeContext(context),
Context::ARRAY_FUNCTION_INDEX);
Branch(WordEqual(target, array_function), &create_allocation_site,
&create_weak_cell);
@@ -845,21 +894,16 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
{
var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector,
SmiTag(slot_id)));
- // Reset profiler ticks.
- StoreObjectFieldNoWriteBarrier(feedback_vector,
- FeedbackVector::kProfilerTicksOffset,
- SmiConstant(0));
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "Construct:CreateAllocationSite");
Goto(&construct_array);
}
BIND(&create_weak_cell);
{
- CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
- new_target);
- // Reset profiler ticks.
- StoreObjectFieldNoWriteBarrier(feedback_vector,
- FeedbackVector::kProfilerTicksOffset,
- SmiConstant(0));
+ CreateWeakCellInFeedbackVector(feedback_vector, slot_id, new_target);
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "Construct:CreateWeakCell");
Goto(&construct);
}
}
@@ -874,10 +918,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
SKIP_WRITE_BARRIER);
- // Reset profiler ticks.
- StoreObjectFieldNoWriteBarrier(feedback_vector,
- FeedbackVector::kProfilerTicksOffset,
- SmiConstant(0));
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "Construct:TransitionMegamorphic");
Goto(&construct);
}
}
@@ -964,25 +1006,47 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
BIND(&initialize);
{
- // Check if {new_target} is a JSFunction in the current native
- // context.
Comment("check if function in same native context");
GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
- // TODO(bmeurer): Add support for arbitrary constructors here, and
- // check via GetFunctionRealm (see src/objects.cc).
- GotoIfNot(IsJSFunction(new_target), &mark_megamorphic);
- Node* target_context =
- LoadObjectField(new_target, JSFunction::kContextOffset);
- Node* target_native_context = LoadNativeContext(target_context);
- GotoIfNot(WordEqual(LoadNativeContext(context), target_native_context),
- &mark_megamorphic);
-
- CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
- new_target);
- // Reset profiler ticks.
- StoreObjectFieldNoWriteBarrier(feedback_vector,
- FeedbackVector::kProfilerTicksOffset,
- SmiConstant(0));
+ // Check if the {new_target} is a JSFunction or JSBoundFunction
+ // in the current native context.
+ VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
+ Label loop(this, &var_current), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Label if_boundfunction(this), if_function(this);
+ Node* current = var_current.value();
+ CSA_ASSERT(this, TaggedIsNotSmi(current));
+ Node* current_instance_type = LoadInstanceType(current);
+ GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
+ &if_boundfunction);
+ Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
+ &if_function, &mark_megamorphic);
+
+ BIND(&if_function);
+ {
+ // Check that the JSFunction {current} is in the current native
+ // context.
+ Node* current_context =
+ LoadObjectField(current, JSFunction::kContextOffset);
+ Node* current_native_context = LoadNativeContext(current_context);
+ Branch(WordEqual(LoadNativeContext(context), current_native_context),
+ &done_loop, &mark_megamorphic);
+ }
+
+ BIND(&if_boundfunction);
+ {
+ // Continue with the [[BoundTargetFunction]] of {current}.
+ var_current.Bind(LoadObjectField(
+ current, JSBoundFunction::kBoundTargetFunctionOffset));
+ Goto(&loop);
+ }
+ }
+ BIND(&done_loop);
+ CreateWeakCellInFeedbackVector(feedback_vector, slot_id, new_target);
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "ConstructWithSpread:Initialize");
Goto(&construct);
}
@@ -996,10 +1060,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
SKIP_WRITE_BARRIER);
- // Reset profiler ticks.
- StoreObjectFieldNoWriteBarrier(feedback_vector,
- FeedbackVector::kProfilerTicksOffset,
- SmiConstant(0));
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "ConstructWithSpread:TransitionMegamorphic");
Goto(&construct);
}
}
@@ -1257,92 +1319,6 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
}
-Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
- Node* context, Node* value, Variable* var_type_feedback) {
- // We might need to loop once due to ToNumber conversion.
- Variable var_value(this, MachineRepresentation::kTagged),
- var_result(this, MachineRepresentation::kWord32);
- Variable* loop_vars[] = {&var_value, var_type_feedback};
- Label loop(this, 2, loop_vars), done_loop(this, &var_result);
- var_value.Bind(value);
- var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
- Goto(&loop);
- BIND(&loop);
- {
- // Load the current {value}.
- value = var_value.value();
-
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- BIND(&if_valueissmi);
- {
- // Convert the Smi {value}.
- var_result.Bind(SmiToWord32(value));
- var_type_feedback->Bind(
- SmiOr(var_type_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kSignedSmall)));
- Goto(&done_loop);
- }
-
- BIND(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this),
- if_valueisnotheapnumber(this, Label::kDeferred);
- Node* value_map = LoadMap(value);
- Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber,
- &if_valueisnotheapnumber);
-
- BIND(&if_valueisheapnumber);
- {
- // Truncate the floating point value.
- var_result.Bind(TruncateHeapNumberValueToWord32(value));
- var_type_feedback->Bind(
- SmiOr(var_type_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kNumber)));
- Goto(&done_loop);
- }
-
- BIND(&if_valueisnotheapnumber);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_type_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kNone)));
-
- Label if_valueisoddball(this),
- if_valueisnotoddball(this, Label::kDeferred);
- Node* is_oddball = Word32Equal(LoadMapInstanceType(value_map),
- Int32Constant(ODDBALL_TYPE));
- Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);
-
- BIND(&if_valueisoddball);
- {
- // Convert Oddball to a Number and perform checks again.
- var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
- var_type_feedback->Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&loop);
- }
-
- BIND(&if_valueisnotoddball);
- {
- // Convert the {value} to a Number first.
- var_value.Bind(
- CallBuiltin(Builtins::kNonNumberToNumber, context, value));
- var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&loop);
- }
- }
- }
- }
- BIND(&done_loop);
- return var_result.value();
-}
-
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// TODO(rmcilroy): Investigate whether it is worth supporting self
// optimization of primitive functions like FullCodegen.
@@ -1368,14 +1344,6 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
UpdateInterruptBudget(profiling_weight, true);
}
-Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
- Node* sp = LoadStackPointer();
- Node* stack_limit = Load(
- MachineType::Pointer(),
- ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
- return UintPtrLessThan(sp, stack_limit);
-}
-
Node* InterpreterAssembler::LoadOSRNestingLevel() {
return LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kOSRNestingLevelOffset,
@@ -1549,6 +1517,77 @@ int InterpreterAssembler::CurrentBytecodeSize() const {
return Bytecodes::Size(bytecode_, operand_scale_);
}
+void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
+ Node* object = GetAccumulator();
+ Node* context = GetContext();
+
+ Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
+ if_objectisother(this, Label::kDeferred);
+
+ GotoIf(TaggedIsSmi(object), &if_objectissmi);
+ Branch(IsHeapNumber(object), &if_objectisheapnumber, &if_objectisother);
+
+ BIND(&if_objectissmi);
+ {
+ var_result.Bind(object);
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ Goto(&if_done);
+ }
+
+ BIND(&if_objectisheapnumber);
+ {
+ var_result.Bind(object);
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ Goto(&if_done);
+ }
+
+ BIND(&if_objectisother);
+ {
+ auto builtin = Builtins::kNonNumberToNumber;
+ if (mode == Object::Conversion::kToNumeric) {
+ builtin = Builtins::kNonNumberToNumeric;
+ // Special case for collecting BigInt feedback.
+ Label not_bigint(this);
+ GotoIfNot(IsBigInt(object), &not_bigint);
+ {
+ var_result.Bind(object);
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
+ Goto(&if_done);
+ }
+ BIND(&not_bigint);
+ }
+
+ // Convert {object} by calling out to the appropriate builtin.
+ var_result.Bind(CallBuiltin(builtin, context, object));
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ Goto(&if_done);
+ }
+
+ BIND(&if_done);
+
+ // Record the type feedback collected for {object}.
+ Node* slot_index = BytecodeOperandIdx(0);
+ Node* feedback_vector = LoadFeedbackVector();
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+
+ SetAccumulator(var_result.value());
+ Dispatch();
+}
+
+void InterpreterAssembler::DeserializeLazyAndDispatch() {
+ Node* context = GetContext();
+ Node* bytecode_offset = BytecodeOffset();
+ Node* bytecode = LoadBytecode(bytecode_offset);
+
+ Node* target_handler =
+ CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
+ SmiTag(bytecode), SmiConstant(operand_scale()));
+
+ DispatchToBytecodeHandler(target_handler, bytecode_offset);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 312fa3198d..2b38508441 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -119,14 +119,21 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Increment the call count for a CALL_IC or construct call.
// The call count is located at feedback_vector[slot_id + 1].
- compiler::Node* IncrementCallCount(compiler::Node* feedback_vector,
- compiler::Node* slot_id);
+ void IncrementCallCount(compiler::Node* feedback_vector,
+ compiler::Node* slot_id);
+
+ // Collect the callable |target| feedback for either a CALL_IC or
+ // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
+ void CollectCallableFeedback(compiler::Node* target, compiler::Node* context,
+ compiler::Node* feedback_vector,
+ compiler::Node* slot_id);
// Collect CALL_IC feedback for |target| function in the
- // |feedback_vector| at |slot_id|.
+ // |feedback_vector| at |slot_id|, and the call counts in
+ // the |feedback_vector| at |slot_id+1|.
void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
- compiler::Node* slot_id,
- compiler::Node* feedback_vector);
+ compiler::Node* feedback_vector,
+ compiler::Node* slot_id);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
// including receiver) and the first argument located at |first_arg|, possibly
@@ -201,9 +208,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset);
- // Returns true if the stack guard check triggers an interrupt.
- compiler::Node* StackCheckTriggeredInterrupt();
-
// Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn();
@@ -221,12 +225,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
- // Truncate tagged |value| to word32 and store the type feedback in
- // |var_type_feedback|.
- compiler::Node* TruncateTaggedToWord32WithFeedback(
- compiler::Node* context, compiler::Node* value,
- Variable* var_type_feedback);
-
// Abort with the given bailout reason.
void Abort(BailoutReason bailout_reason);
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
@@ -245,6 +243,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
Bytecode bytecode() const { return bytecode_; }
static bool TargetSupportsUnalignedAccess();
+ void ToNumberOrNumeric(Object::Conversion mode);
+
+ // Lazily deserializes the current bytecode's handler and tail-calls into it.
+ void DeserializeLazyAndDispatch();
+
private:
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 6b2e2d8190..1665aff29b 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -12,7 +12,6 @@
#include "src/code-events.h"
#include "src/code-factory.h"
#include "src/debug/debug.h"
-#include "src/factory.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/binary-op-assembler.h"
#include "src/interpreter/bytecode-flags.h"
@@ -277,7 +276,8 @@ class InterpreterStoreGlobalAssembler : public InterpreterAssembler {
// Store the value in the accumulator into the global with name in constant pool
// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
IGNITION_HANDLER(StaGlobalSloppy, InterpreterStoreGlobalAssembler) {
- Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(isolate(), SLOPPY);
+ Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(
+ isolate(), LanguageMode::kSloppy);
StaGlobal(ic);
}
@@ -286,7 +286,8 @@ IGNITION_HANDLER(StaGlobalSloppy, InterpreterStoreGlobalAssembler) {
// Store the value in the accumulator into the global with name in constant pool
// entry <name_index> using FeedBackVector slot <slot> in strict mode.
IGNITION_HANDLER(StaGlobalStrict, InterpreterStoreGlobalAssembler) {
- Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(isolate(), STRICT);
+ Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(
+ isolate(), LanguageMode::kStrict);
StaGlobal(ic);
}
@@ -310,8 +311,14 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
- // Same as LdaContextSlot, should never be called.
- UNREACHABLE();
+ Node* reg_index = BytecodeOperandReg(0);
+ Node* context = LoadRegister(reg_index);
+ Node* slot_index = BytecodeOperandIdx(1);
+ Node* depth = BytecodeOperandUImm(2);
+ Node* slot_context = GetContextAtDepth(context, depth);
+ Node* result = LoadContextElement(slot_context, slot_index);
+ SetAccumulator(result);
+ Dispatch();
}
// LdaCurrentContextSlot <slot_index>
@@ -329,8 +336,11 @@ IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) {
//
// Load the object in |slot_index| of the current context into the accumulator.
IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
- // Same as LdaCurrentContextSlot, should never be called.
- UNREACHABLE();
+ Node* slot_index = BytecodeOperandIdx(0);
+ Node* slot_context = GetContext();
+ Node* result = LoadContextElement(slot_context, slot_index);
+ SetAccumulator(result);
+ Dispatch();
}
// StaContextSlot <context> <slot_index> <depth>
@@ -511,8 +521,8 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
Variable var_result(this, MachineRepresentation::kTagged);
Label sloppy(this), strict(this), end(this);
- DCHECK_EQ(0, SLOPPY);
- DCHECK_EQ(1, STRICT);
+ DCHECK_EQ(0, LanguageMode::kSloppy);
+ DCHECK_EQ(1, LanguageMode::kStrict);
DCHECK_EQ(0, static_cast<int>(LookupHoistingMode::kNormal));
DCHECK_EQ(1, static_cast<int>(LookupHoistingMode::kLegacySloppy));
Branch(IsSetWord32<StoreLookupSlotFlags::LanguageModeBit>(bytecode_flags),
@@ -630,8 +640,12 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
Node* smi_slot = SmiTag(raw_slot);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- CallStub(ic.descriptor(), code_target, context, object, name, value,
- smi_slot, feedback_vector);
+ Node* result = CallStub(ic.descriptor(), code_target, context, object, name,
+ value, smi_slot, feedback_vector);
+ // It doesn't really matter what we write to the accumulator here, since we
+ // restore to the correct value on the outside. Storing the result means we
+ // don't need to keep unnecessary state alive across the callstub.
+ SetAccumulator(result);
Dispatch();
}
};
@@ -672,8 +686,12 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
Node* smi_slot = SmiTag(raw_slot);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot,
- feedback_vector);
+ Node* result = CallStub(ic.descriptor(), code_target, context, object, name,
+ value, smi_slot, feedback_vector);
+ // It doesn't really matter what we write to the accumulator here, since we
+ // restore to the correct value on the outside. Storing the result means we
+ // don't need to keep unnecessary state alive across the callstub.
+ SetAccumulator(result);
Dispatch();
}
@@ -892,6 +910,13 @@ IGNITION_HANDLER(Mod, InterpreterBinaryOpAssembler) {
BinaryOpWithFeedback(&BinaryOpAssembler::Generate_ModulusWithFeedback);
}
+// Exp <src>
+//
+// Exponentiate register <src> (base) with accumulator (exponent).
+IGNITION_HANDLER(Exp, InterpreterBinaryOpAssembler) {
+ BinaryOpWithFeedback(&BinaryOpAssembler::Generate_ExponentiateWithFeedback);
+}
+
// AddSmi <imm>
//
// Adds an immediate value <imm> to the value in the accumulator.
@@ -927,6 +952,14 @@ IGNITION_HANDLER(ModSmi, InterpreterBinaryOpAssembler) {
BinaryOpSmiWithFeedback(&BinaryOpAssembler::Generate_ModulusWithFeedback);
}
+// ExpSmi <imm>
+//
+// Exponentiate accumulator (base) with immediate value <imm> (exponent).
+IGNITION_HANDLER(ExpSmi, InterpreterBinaryOpAssembler) {
+ BinaryOpSmiWithFeedback(
+ &BinaryOpAssembler::Generate_ExponentiateWithFeedback);
+}
+
class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
public:
InterpreterBitwiseBinaryOpAssembler(CodeAssemblerState* state,
@@ -934,74 +967,86 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- void BitwiseBinaryOpWithFeedback(Token::Value bitwise_op) {
+ void BitwiseBinaryOpWithFeedback(Operation bitwise_op) {
Node* reg_index = BytecodeOperandReg(0);
- Node* lhs = LoadRegister(reg_index);
- Node* rhs = GetAccumulator();
+ Node* left = LoadRegister(reg_index);
+ Node* right = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
- Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned),
- var_rhs_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Node* lhs_value = TruncateTaggedToWord32WithFeedback(
- context, lhs, &var_lhs_type_feedback);
- Node* rhs_value = TruncateTaggedToWord32WithFeedback(
- context, rhs, &var_rhs_type_feedback);
- Node* result = nullptr;
-
- switch (bitwise_op) {
- case Token::BIT_OR: {
- Node* value = Word32Or(lhs_value, rhs_value);
- result = ChangeInt32ToTagged(value);
- } break;
- case Token::BIT_AND: {
- Node* value = Word32And(lhs_value, rhs_value);
- result = ChangeInt32ToTagged(value);
- } break;
- case Token::BIT_XOR: {
- Node* value = Word32Xor(lhs_value, rhs_value);
- result = ChangeInt32ToTagged(value);
- } break;
- case Token::SHL: {
- Node* value =
- Word32Shl(lhs_value, Word32And(rhs_value, Int32Constant(0x1f)));
- result = ChangeInt32ToTagged(value);
- } break;
- case Token::SHR: {
- Node* value =
- Word32Shr(lhs_value, Word32And(rhs_value, Int32Constant(0x1f)));
- result = ChangeUint32ToTagged(value);
- } break;
- case Token::SAR: {
- Node* value =
- Word32Sar(lhs_value, Word32And(rhs_value, Int32Constant(0x1f)));
- result = ChangeInt32ToTagged(value);
- } break;
- default:
- UNREACHABLE();
- }
-
+ VARIABLE(var_left_feedback, MachineRepresentation::kTaggedSigned);
+ VARIABLE(var_right_feedback, MachineRepresentation::kTaggedSigned);
+ VARIABLE(var_left_word32, MachineRepresentation::kWord32);
+ VARIABLE(var_right_word32, MachineRepresentation::kWord32);
+ VARIABLE(var_left_bigint, MachineRepresentation::kTagged, left);
+ VARIABLE(var_right_bigint, MachineRepresentation::kTagged);
+ Label if_left_number(this), do_number_op(this);
+ Label if_left_bigint(this), do_bigint_op(this);
+
+ TaggedToWord32OrBigIntWithFeedback(context, left, &if_left_number,
+ &var_left_word32, &if_left_bigint,
+ &var_left_bigint, &var_left_feedback);
+ BIND(&if_left_number);
+ TaggedToWord32OrBigIntWithFeedback(context, right, &do_number_op,
+ &var_right_word32, &do_bigint_op,
+ &var_right_bigint, &var_right_feedback);
+ BIND(&do_number_op);
+ Node* result = BitwiseOp(var_left_word32.value(), var_right_word32.value(),
+ bitwise_op);
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
-
- if (FLAG_debug_code) {
- Label ok(this);
- GotoIf(TaggedIsSmi(result), &ok);
- Node* result_map = LoadMap(result);
- AbortIfWordNotEqual(result_map, HeapNumberMapConstant(),
- kExpectedHeapNumber);
- Goto(&ok);
- BIND(&ok);
- }
-
Node* input_feedback =
- SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
+ SmiOr(var_left_feedback.value(), var_right_feedback.value());
UpdateFeedback(SmiOr(result_type, input_feedback), feedback_vector,
slot_index);
SetAccumulator(result);
Dispatch();
+
+ // BigInt cases.
+ BIND(&if_left_bigint);
+ TaggedToNumericWithFeedback(context, right, &do_bigint_op,
+ &var_right_bigint, &var_right_feedback);
+
+ BIND(&do_bigint_op);
+ SetAccumulator(
+ CallRuntime(Runtime::kBigIntBinaryOp, context, var_left_bigint.value(),
+ var_right_bigint.value(), SmiConstant(bitwise_op)));
+ UpdateFeedback(SmiOr(var_left_feedback.value(), var_right_feedback.value()),
+ feedback_vector, slot_index);
+ Dispatch();
+ }
+
+ void BitwiseBinaryOpWithSmi(Operation bitwise_op) {
+ Node* left = GetAccumulator();
+ Node* right = BytecodeOperandImmSmi(0);
+ Node* slot_index = BytecodeOperandIdx(1);
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* context = GetContext();
+
+ VARIABLE(var_left_feedback, MachineRepresentation::kTaggedSigned);
+ VARIABLE(var_left_word32, MachineRepresentation::kWord32);
+ VARIABLE(var_left_bigint, MachineRepresentation::kTagged);
+ Label do_smi_op(this), if_bigint_mix(this);
+
+ TaggedToWord32OrBigIntWithFeedback(context, left, &do_smi_op,
+ &var_left_word32, &if_bigint_mix,
+ &var_left_bigint, &var_left_feedback);
+ BIND(&do_smi_op);
+ Node* result =
+ BitwiseOp(var_left_word32.value(), SmiToWord32(right), bitwise_op);
+ Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
+ BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ UpdateFeedback(SmiOr(result_type, var_left_feedback.value()),
+ feedback_vector, slot_index);
+ SetAccumulator(result);
+ Dispatch();
+
+ BIND(&if_bigint_mix);
+ UpdateFeedback(var_left_feedback.value(), feedback_vector, slot_index);
+ ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
}
};
@@ -1009,21 +1054,21 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
//
// BitwiseOr register <src> to accumulator.
IGNITION_HANDLER(BitwiseOr, InterpreterBitwiseBinaryOpAssembler) {
- BitwiseBinaryOpWithFeedback(Token::BIT_OR);
+ BitwiseBinaryOpWithFeedback(Operation::kBitwiseOr);
}
// BitwiseXor <src>
//
// BitwiseXor register <src> to accumulator.
IGNITION_HANDLER(BitwiseXor, InterpreterBitwiseBinaryOpAssembler) {
- BitwiseBinaryOpWithFeedback(Token::BIT_XOR);
+ BitwiseBinaryOpWithFeedback(Operation::kBitwiseXor);
}
// BitwiseAnd <src>
//
// BitwiseAnd register <src> to accumulator.
IGNITION_HANDLER(BitwiseAnd, InterpreterBitwiseBinaryOpAssembler) {
- BitwiseBinaryOpWithFeedback(Token::BIT_AND);
+ BitwiseBinaryOpWithFeedback(Operation::kBitwiseAnd);
}
// ShiftLeft <src>
@@ -1033,7 +1078,7 @@ IGNITION_HANDLER(BitwiseAnd, InterpreterBitwiseBinaryOpAssembler) {
// before the operation. 5 lsb bits from the accumulator are used as count
// i.e. <src> << (accumulator & 0x1F).
IGNITION_HANDLER(ShiftLeft, InterpreterBitwiseBinaryOpAssembler) {
- BitwiseBinaryOpWithFeedback(Token::SHL);
+ BitwiseBinaryOpWithFeedback(Operation::kShiftLeft);
}
// ShiftRight <src>
@@ -1043,7 +1088,7 @@ IGNITION_HANDLER(ShiftLeft, InterpreterBitwiseBinaryOpAssembler) {
// accumulator to uint32 before the operation. 5 lsb bits from the accumulator
// are used as count i.e. <src> >> (accumulator & 0x1F).
IGNITION_HANDLER(ShiftRight, InterpreterBitwiseBinaryOpAssembler) {
- BitwiseBinaryOpWithFeedback(Token::SAR);
+ BitwiseBinaryOpWithFeedback(Operation::kShiftRight);
}
// ShiftRightLogical <src>
@@ -1053,82 +1098,28 @@ IGNITION_HANDLER(ShiftRight, InterpreterBitwiseBinaryOpAssembler) {
// uint32 before the operation 5 lsb bits from the accumulator are used as
// count i.e. <src> << (accumulator & 0x1F).
IGNITION_HANDLER(ShiftRightLogical, InterpreterBitwiseBinaryOpAssembler) {
- BitwiseBinaryOpWithFeedback(Token::SHR);
+ BitwiseBinaryOpWithFeedback(Operation::kShiftRightLogical);
}
// BitwiseOrSmi <imm>
//
// BitwiseOrSmi accumulator with <imm>.
-IGNITION_HANDLER(BitwiseOrSmi, InterpreterAssembler) {
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- Node* context = GetContext();
-
- Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Node* lhs_value =
- TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback);
- Node* rhs_value = SmiToWord32(right);
- Node* value = Word32Or(lhs_value, rhs_value);
- Node* result = ChangeInt32ToTagged(value);
- Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
- SetAccumulator(result);
- Dispatch();
+IGNITION_HANDLER(BitwiseOrSmi, InterpreterBitwiseBinaryOpAssembler) {
+ BitwiseBinaryOpWithSmi(Operation::kBitwiseOr);
}
// BitwiseXorSmi <imm>
//
// BitwiseXorSmi accumulator with <imm>.
-IGNITION_HANDLER(BitwiseXorSmi, InterpreterAssembler) {
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- Node* context = GetContext();
-
- Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Node* lhs_value =
- TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback);
- Node* rhs_value = SmiToWord32(right);
- Node* value = Word32Xor(lhs_value, rhs_value);
- Node* result = ChangeInt32ToTagged(value);
- Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
- SetAccumulator(result);
- Dispatch();
+IGNITION_HANDLER(BitwiseXorSmi, InterpreterBitwiseBinaryOpAssembler) {
+ BitwiseBinaryOpWithSmi(Operation::kBitwiseXor);
}
// BitwiseAndSmi <imm>
//
// BitwiseAndSmi accumulator with <imm>.
-IGNITION_HANDLER(BitwiseAndSmi, InterpreterAssembler) {
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- Node* context = GetContext();
-
- Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Node* lhs_value =
- TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback);
- Node* rhs_value = SmiToWord32(right);
- Node* value = Word32And(lhs_value, rhs_value);
- Node* result = ChangeInt32ToTagged(value);
- Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
- SetAccumulator(result);
- Dispatch();
+IGNITION_HANDLER(BitwiseAndSmi, InterpreterBitwiseBinaryOpAssembler) {
+ BitwiseBinaryOpWithSmi(Operation::kBitwiseAnd);
}
// BitwiseNot <feedback_slot>
@@ -1140,18 +1131,32 @@ IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Node* truncated_value =
- TruncateTaggedToWord32WithFeedback(context, operand, &var_type_feedback);
- Node* value = Word32Not(truncated_value);
- Node* result = ChangeInt32ToTagged(value);
+ VARIABLE(var_word32, MachineRepresentation::kWord32);
+ VARIABLE(var_feedback, MachineRepresentation::kTaggedSigned);
+ VARIABLE(var_bigint, MachineRepresentation::kTagged);
+ Label if_number(this), if_bigint(this);
+ TaggedToWord32OrBigIntWithFeedback(context, operand, &if_number, &var_word32,
+ &if_bigint, &var_bigint, &var_feedback);
+
+ // Number case.
+ BIND(&if_number);
+ Node* result = ChangeInt32ToTagged(Signed(Word32Not(var_word32.value())));
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_type_feedback.value()), feedback_vector,
+ UpdateFeedback(SmiOr(result_type, var_feedback.value()), feedback_vector,
slot_index);
SetAccumulator(result);
Dispatch();
+
+ // BigInt case.
+ BIND(&if_bigint);
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt), feedback_vector,
+ slot_index);
+ SetAccumulator(CallRuntime(Runtime::kBigIntUnaryOp, context,
+ var_bigint.value(),
+ SmiConstant(Operation::kBitwiseNot)));
+ Dispatch();
}
// ShiftLeftSmi <imm>
@@ -1159,158 +1164,183 @@ IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
// Left shifts accumulator by the count specified in <imm>.
// The accumulator is converted to an int32 before the operation. The 5
// lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
-IGNITION_HANDLER(ShiftLeftSmi, InterpreterAssembler) {
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- Node* context = GetContext();
-
- Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Node* lhs_value =
- TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback);
- Node* rhs_value = SmiToWord32(right);
- Node* shift_count = Word32And(rhs_value, Int32Constant(0x1f));
- Node* value = Word32Shl(lhs_value, shift_count);
- Node* result = ChangeInt32ToTagged(value);
- Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
- SetAccumulator(result);
- Dispatch();
+IGNITION_HANDLER(ShiftLeftSmi, InterpreterBitwiseBinaryOpAssembler) {
+ BitwiseBinaryOpWithSmi(Operation::kShiftLeft);
}
// ShiftRightSmi <imm>
//
// Right shifts accumulator by the count specified in <imm>. Result is sign
// extended. The accumulator is converted to an int32 before the operation. The
-// 5 lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
-IGNITION_HANDLER(ShiftRightSmi, InterpreterAssembler) {
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- Node* context = GetContext();
-
- Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Node* lhs_value =
- TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback);
- Node* rhs_value = SmiToWord32(right);
- Node* shift_count = Word32And(rhs_value, Int32Constant(0x1f));
- Node* value = Word32Sar(lhs_value, shift_count);
- Node* result = ChangeInt32ToTagged(value);
- Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
- SetAccumulator(result);
- Dispatch();
+// 5 lsb bits from <imm> are used as count i.e. <src> >> (<imm> & 0x1F).
+IGNITION_HANDLER(ShiftRightSmi, InterpreterBitwiseBinaryOpAssembler) {
+ BitwiseBinaryOpWithSmi(Operation::kShiftRight);
}
// ShiftRightLogicalSmi <imm>
//
// Right shifts accumulator by the count specified in <imm>. Result is zero
// extended. The accumulator is converted to an int32 before the operation. The
-// 5 lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
-IGNITION_HANDLER(ShiftRightLogicalSmi, InterpreterAssembler) {
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- Node* context = GetContext();
-
- Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Node* lhs_value =
- TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback);
- Node* rhs_value = SmiToWord32(right);
- Node* shift_count = Word32And(rhs_value, Int32Constant(0x1f));
- Node* value = Word32Shr(lhs_value, shift_count);
- Node* result = ChangeUint32ToTagged(value);
- Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
- SetAccumulator(result);
- Dispatch();
+// 5 lsb bits from <imm> are used as count i.e. <src> >>> (<imm> & 0x1F).
+IGNITION_HANDLER(ShiftRightLogicalSmi, InterpreterBitwiseBinaryOpAssembler) {
+ BitwiseBinaryOpWithSmi(Operation::kShiftRightLogical);
}
-// Negate <feedback_slot>
-//
-// Perform arithmetic negation on the accumulator.
-IGNITION_HANDLER(Negate, InterpreterAssembler) {
- Node* operand = GetAccumulator();
+class UnaryNumericOpAssembler : public InterpreterAssembler {
+ public:
+ UnaryNumericOpAssembler(CodeAssemblerState* state, Bytecode bytecode,
+ OperandScale operand_scale)
+ : InterpreterAssembler(state, bytecode, operand_scale) {}
- Label end(this);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ virtual ~UnaryNumericOpAssembler() {}
- Label if_smi(this), if_heapnumber(this), if_notnumber(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(operand), &if_smi);
- Branch(IsHeapNumber(operand), &if_heapnumber, &if_notnumber);
+ // Must return a tagged value.
+ virtual Node* SmiOp(Node* smi_value, Variable* var_feedback,
+ Label* do_float_op, Variable* var_float) = 0;
+ // Must return a Float64 value.
+ virtual Node* FloatOp(Node* float_value) = 0;
+ // Must return a tagged value.
+ virtual Node* BigIntOp(Node* bigint_value) = 0;
- BIND(&if_smi);
- {
- Label if_zero(this), if_min_smi(this);
+ void UnaryOpWithFeedback() {
+ VARIABLE(var_value, MachineRepresentation::kTagged, GetAccumulator());
+ Node* slot_index = BytecodeOperandIdx(0);
+ Node* feedback_vector = LoadFeedbackVector();
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ VARIABLE(var_float_value, MachineRepresentation::kFloat64);
+ VARIABLE(var_feedback, MachineRepresentation::kTaggedSigned,
+ SmiConstant(BinaryOperationFeedback::kNone));
+ Variable* loop_vars[] = {&var_value, &var_feedback};
+ Label start(this, arraysize(loop_vars), loop_vars), end(this);
+ Label do_float_op(this, &var_float_value);
+ Goto(&start);
+ // We might have to try again after ToNumeric conversion.
+ BIND(&start);
+ {
+ Label if_smi(this), if_heapnumber(this), if_bigint(this);
+ Label if_oddball(this), if_other(this);
+ Node* value = var_value.value();
+ GotoIf(TaggedIsSmi(value), &if_smi);
+ Node* map = LoadMap(value);
+ GotoIf(IsHeapNumberMap(map), &if_heapnumber);
+ Node* instance_type = LoadMapInstanceType(map);
+ GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
+ Branch(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &if_oddball,
+ &if_other);
+
+ BIND(&if_smi);
+ {
+ var_result.Bind(
+ SmiOp(value, &var_feedback, &do_float_op, &var_float_value));
+ Goto(&end);
+ }
+
+ BIND(&if_heapnumber);
+ {
+ var_float_value.Bind(LoadHeapNumberValue(value));
+ Goto(&do_float_op);
+ }
+
+ BIND(&if_bigint);
+ {
+ var_result.Bind(BigIntOp(value));
+ CombineFeedback(&var_feedback,
+ SmiConstant(BinaryOperationFeedback::kBigInt));
+ Goto(&end);
+ }
+
+ BIND(&if_oddball);
+ {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(this, SmiEqual(var_feedback.value(),
+ SmiConstant(BinaryOperationFeedback::kNone)));
+ var_feedback.Bind(
+ SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ Goto(&start);
+ }
+
+ BIND(&if_other);
+ {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(this, SmiEqual(var_feedback.value(),
+ SmiConstant(BinaryOperationFeedback::kNone)));
+ var_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ var_value.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(), value));
+ Goto(&start);
+ }
+ }
+
+ BIND(&do_float_op);
+ {
+ CombineFeedback(&var_feedback,
+ SmiConstant(BinaryOperationFeedback::kNumber));
+ var_result.Bind(
+ AllocateHeapNumberWithValue(FloatOp(var_float_value.value())));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ UpdateFeedback(var_feedback.value(), feedback_vector, slot_index);
+ SetAccumulator(var_result.value());
+ Dispatch();
+ }
+};
+
+class NegateAssemblerImpl : public UnaryNumericOpAssembler {
+ public:
+ explicit NegateAssemblerImpl(CodeAssemblerState* state, Bytecode bytecode,
+ OperandScale operand_scale)
+ : UnaryNumericOpAssembler(state, bytecode, operand_scale) {}
+
+ Node* SmiOp(Node* smi_value, Variable* var_feedback, Label* do_float_op,
+ Variable* var_float) override {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_zero(this), if_min_smi(this), end(this);
// Return -0 if operand is 0.
- GotoIf(SmiEqual(operand, SmiConstant(0)), &if_zero);
+ GotoIf(SmiEqual(smi_value, SmiConstant(0)), &if_zero);
- // Special-case the minimum smi to avoid overflow.
- GotoIf(SmiEqual(operand, SmiConstant(Smi::kMinValue)), &if_min_smi);
+ // Special-case the minimum Smi to avoid overflow.
+ GotoIf(SmiEqual(smi_value, SmiConstant(Smi::kMinValue)), &if_min_smi);
// Else simply subtract operand from 0.
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
- var_result.Bind(SmiSub(SmiConstant(0), operand));
+ CombineFeedback(var_feedback,
+ SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(SmiSub(SmiConstant(0), smi_value));
Goto(&end);
BIND(&if_zero);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ CombineFeedback(var_feedback,
+ SmiConstant(BinaryOperationFeedback::kNumber));
var_result.Bind(MinusZeroConstant());
Goto(&end);
BIND(&if_min_smi);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- var_result.Bind(AllocateHeapNumberWithValue(
- Float64Constant(-static_cast<double>(Smi::kMinValue))));
- Goto(&end);
- }
+ var_float->Bind(SmiToFloat64(smi_value));
+ Goto(do_float_op);
- BIND(&if_heapnumber);
- {
- Node* result = Float64Neg(LoadHeapNumberValue(operand));
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- var_result.Bind(AllocateHeapNumberWithValue(result));
- Goto(&end);
+ BIND(&end);
+ return var_result.value();
}
- BIND(&if_notnumber);
- {
- Node* instance_type = LoadInstanceType(operand);
- Node* is_oddball = Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE));
-
- var_type_feedback.Bind(
- SelectSmiConstant(is_oddball, BinaryOperationFeedback::kNumberOrOddball,
- BinaryOperationFeedback::kAny));
+ Node* FloatOp(Node* float_value) override { return Float64Neg(float_value); }
- Node* context = GetContext();
- Node* result =
- CallBuiltin(Builtins::kMultiply, context, operand, SmiConstant(-1));
- var_result.Bind(result);
- Goto(&end);
+ Node* BigIntOp(Node* bigint_value) override {
+ return CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
+ SmiConstant(Operation::kNegate));
}
+};
- BIND(&end);
-
- Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
-
- SetAccumulator(var_result.value());
- Dispatch();
-}
+// Negate <feedback_slot>
+//
+// Perform arithmetic negation on the accumulator.
+IGNITION_HANDLER(Negate, NegateAssemblerImpl) { UnaryOpWithFeedback(); }
// ToName <dst>
//
@@ -1327,49 +1357,14 @@ IGNITION_HANDLER(ToName, InterpreterAssembler) {
//
// Convert the object referenced by the accumulator to a number.
IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
- Node* object = GetAccumulator();
- Node* context = GetContext();
-
- // Convert the {object} to a Number and collect feedback for the {object}.
- Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Variable var_result(this, MachineRepresentation::kTagged);
- Label if_done(this), if_objectissmi(this), if_objectisnumber(this),
- if_objectisother(this, Label::kDeferred);
-
- GotoIf(TaggedIsSmi(object), &if_objectissmi);
- Branch(IsHeapNumber(object), &if_objectisnumber, &if_objectisother);
-
- BIND(&if_objectissmi);
- {
- var_result.Bind(object);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
- Goto(&if_done);
- }
-
- BIND(&if_objectisnumber);
- {
- var_result.Bind(object);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Goto(&if_done);
- }
-
- BIND(&if_objectisother);
- {
- // Convert the {object} to a Number.
- var_result.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, object));
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&if_done);
- }
-
- BIND(&if_done);
-
- // Record the type feedback collected for {object}.
- Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ ToNumberOrNumeric(Object::Conversion::kToNumber);
+}
- SetAccumulator(var_result.value());
- Dispatch();
+// ToNumeric <slot>
+//
+// Convert the object referenced by the accumulator to a numeric.
+IGNITION_HANDLER(ToNumeric, InterpreterAssembler) {
+ ToNumberOrNumeric(Object::Conversion::kToNumeric);
}
// ToObject <dst>
@@ -1385,251 +1380,77 @@ IGNITION_HANDLER(ToObject, InterpreterAssembler) {
Dispatch();
}
-// Inc
-//
-// Increments value in the accumulator by one.
-IGNITION_HANDLER(Inc, InterpreterAssembler) {
- Node* value = GetAccumulator();
- Node* context = GetContext();
- Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
-
- // Shared entry for floating point increment.
- Label do_finc(this), end(this);
- Variable var_finc_value(this, MachineRepresentation::kFloat64);
-
- // We might need to try again due to ToNumber conversion.
- Variable value_var(this, MachineRepresentation::kTagged);
- Variable result_var(this, MachineRepresentation::kTagged);
- Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Variable* loop_vars[] = {&value_var, &var_type_feedback};
- Label start(this, 2, loop_vars);
- value_var.Bind(value);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNone));
- Goto(&start);
- BIND(&start);
- {
- value = value_var.value();
-
- Label if_issmi(this), if_isnotsmi(this);
- Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
-
- BIND(&if_issmi);
+class IncDecAssembler : public UnaryNumericOpAssembler {
+ public:
+ explicit IncDecAssembler(CodeAssemblerState* state, Bytecode bytecode,
+ OperandScale operand_scale)
+ : UnaryNumericOpAssembler(state, bytecode, operand_scale) {}
+
+ Operation op() {
+ DCHECK(op_ == Operation::kIncrement || op_ == Operation::kDecrement);
+ return op_;
+ }
+
+ Node* SmiOp(Node* smi_value, Variable* var_feedback, Label* do_float_op,
+ Variable* var_float) override {
+ // Try fast Smi operation first.
+ Node* value = BitcastTaggedToWord(smi_value);
+ Node* one = BitcastTaggedToWord(SmiConstant(1));
+ Node* pair = op() == Operation::kIncrement
+ ? IntPtrAddWithOverflow(value, one)
+ : IntPtrSubWithOverflow(value, one);
+ Node* overflow = Projection(1, pair);
+
+ // Check if the Smi operation overflowed.
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ BIND(&if_overflow);
{
- // Try fast Smi addition first.
- Node* one = SmiConstant(1);
- Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value),
- BitcastTaggedToWord(one));
- Node* overflow = Projection(1, pair);
-
- // Check if the Smi addition overflowed.
- Label if_overflow(this), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
-
- BIND(&if_notoverflow);
- var_type_feedback.Bind(
- SmiOr(var_type_feedback.value(),
- SmiConstant(BinaryOperationFeedback::kSignedSmall)));
- result_var.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
-
- BIND(&if_overflow);
- {
- var_finc_value.Bind(SmiToFloat64(value));
- Goto(&do_finc);
- }
+ var_float->Bind(SmiToFloat64(smi_value));
+ Goto(do_float_op);
}
- BIND(&if_isnotsmi);
- {
- // Check if the value is a HeapNumber.
- Label if_valueisnumber(this), if_valuenotnumber(this, Label::kDeferred);
- Node* value_map = LoadMap(value);
- Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber);
+ BIND(&if_notoverflow);
+ CombineFeedback(var_feedback,
+ SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ return BitcastWordToTaggedSigned(Projection(0, pair));
+ }
- BIND(&if_valueisnumber);
- {
- // Load the HeapNumber value.
- var_finc_value.Bind(LoadHeapNumberValue(value));
- Goto(&do_finc);
- }
+ Node* FloatOp(Node* float_value) override {
+ return op() == Operation::kIncrement
+ ? Float64Add(float_value, Float64Constant(1.0))
+ : Float64Sub(float_value, Float64Constant(1.0));
+ }
- BIND(&if_valuenotnumber);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_type_feedback.value(),
- SmiConstant(BinaryOperationFeedback::kNone)));
+ Node* BigIntOp(Node* bigint_value) override {
+ return CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
+ SmiConstant(op()));
+ }
- Label if_valueisoddball(this), if_valuenotoddball(this);
- Node* instance_type = LoadMapInstanceType(value_map);
- Node* is_oddball =
- Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
-
- BIND(&if_valueisoddball);
- {
- // Convert Oddball to Number and check again.
- value_var.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&start);
- }
-
- BIND(&if_valuenotoddball);
- {
- // Convert to a Number first and try again.
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- value_var.Bind(
- CallBuiltin(Builtins::kNonNumberToNumber, context, value));
- Goto(&start);
- }
- }
- }
+ void IncWithFeedback() {
+ op_ = Operation::kIncrement;
+ UnaryOpWithFeedback();
}
- BIND(&do_finc);
- {
- Node* finc_value = var_finc_value.value();
- Node* one = Float64Constant(1.0);
- Node* finc_result = Float64Add(finc_value, one);
- var_type_feedback.Bind(
- SmiOr(var_type_feedback.value(),
- SmiConstant(BinaryOperationFeedback::kNumber)));
- result_var.Bind(AllocateHeapNumberWithValue(finc_result));
- Goto(&end);
+ void DecWithFeedback() {
+ op_ = Operation::kDecrement;
+ UnaryOpWithFeedback();
}
- BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ private:
+ Operation op_ = Operation::kEqual; // Dummy initialization.
+};
- SetAccumulator(result_var.value());
- Dispatch();
-}
+// Inc
+//
+// Increments value in the accumulator by one.
+IGNITION_HANDLER(Inc, IncDecAssembler) { IncWithFeedback(); }
// Dec
//
// Decrements value in the accumulator by one.
-IGNITION_HANDLER(Dec, InterpreterAssembler) {
- Node* value = GetAccumulator();
- Node* context = GetContext();
- Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
-
- // Shared entry for floating point decrement.
- Label do_fdec(this), end(this);
- Variable var_fdec_value(this, MachineRepresentation::kFloat64);
-
- // We might need to try again due to ToNumber conversion.
- Variable value_var(this, MachineRepresentation::kTagged);
- Variable result_var(this, MachineRepresentation::kTagged);
- Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Variable* loop_vars[] = {&value_var, &var_type_feedback};
- Label start(this, 2, loop_vars);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNone));
- value_var.Bind(value);
- Goto(&start);
- BIND(&start);
- {
- value = value_var.value();
-
- Label if_issmi(this), if_isnotsmi(this);
- Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
-
- BIND(&if_issmi);
- {
- // Try fast Smi subtraction first.
- Node* one = SmiConstant(1);
- Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(value),
- BitcastTaggedToWord(one));
- Node* overflow = Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_overflow(this), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
-
- BIND(&if_notoverflow);
- var_type_feedback.Bind(
- SmiOr(var_type_feedback.value(),
- SmiConstant(BinaryOperationFeedback::kSignedSmall)));
- result_var.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
-
- BIND(&if_overflow);
- {
- var_fdec_value.Bind(SmiToFloat64(value));
- Goto(&do_fdec);
- }
- }
-
- BIND(&if_isnotsmi);
- {
- // Check if the value is a HeapNumber.
- Label if_valueisnumber(this), if_valuenotnumber(this, Label::kDeferred);
- Node* value_map = LoadMap(value);
- Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber);
-
- BIND(&if_valueisnumber);
- {
- // Load the HeapNumber value.
- var_fdec_value.Bind(LoadHeapNumberValue(value));
- Goto(&do_fdec);
- }
-
- BIND(&if_valuenotnumber);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_type_feedback.value(),
- SmiConstant(BinaryOperationFeedback::kNone)));
-
- Label if_valueisoddball(this), if_valuenotoddball(this);
- Node* instance_type = LoadMapInstanceType(value_map);
- Node* is_oddball =
- Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
-
- BIND(&if_valueisoddball);
- {
- // Convert Oddball to Number and check again.
- value_var.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&start);
- }
-
- BIND(&if_valuenotoddball);
- {
- // Convert to a Number first and try again.
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- value_var.Bind(
- CallBuiltin(Builtins::kNonNumberToNumber, context, value));
- Goto(&start);
- }
- }
- }
- }
-
- BIND(&do_fdec);
- {
- Node* fdec_value = var_fdec_value.value();
- Node* one = Float64Constant(1.0);
- Node* fdec_result = Float64Sub(fdec_value, one);
- var_type_feedback.Bind(
- SmiOr(var_type_feedback.value(),
- SmiConstant(BinaryOperationFeedback::kNumber)));
- result_var.Bind(AllocateHeapNumberWithValue(fdec_result));
- Goto(&end);
- }
-
- BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
-
- SetAccumulator(result_var.value());
- Dispatch();
-}
+IGNITION_HANDLER(Dec, IncDecAssembler) { DecWithFeedback(); }
// LogicalNot
//
@@ -1640,17 +1461,15 @@ IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
Node* value = GetAccumulator();
Variable result(this, MachineRepresentation::kTagged);
Label if_true(this), if_false(this), end(this);
- Node* true_value = BooleanConstant(true);
- Node* false_value = BooleanConstant(false);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
{
- result.Bind(false_value);
+ result.Bind(FalseConstant());
Goto(&end);
}
BIND(&if_false);
{
- result.Bind(true_value);
+ result.Bind(TrueConstant());
Goto(&end);
}
BIND(&end);
@@ -1666,8 +1485,8 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
Node* value = GetAccumulator();
Variable result(this, MachineRepresentation::kTagged);
Label if_true(this), if_false(this), end(this);
- Node* true_value = BooleanConstant(true);
- Node* false_value = BooleanConstant(false);
+ Node* true_value = TrueConstant();
+ Node* false_value = FalseConstant();
Branch(WordEqual(value, true_value), &if_true, &if_false);
BIND(&if_true);
{
@@ -1676,10 +1495,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
}
BIND(&if_false);
{
- if (FLAG_debug_code) {
- AbortIfWordNotEqual(value, false_value,
- BailoutReason::kExpectedBooleanValue);
- }
+ CSA_ASSERT(this, WordEqual(value, false_value));
result.Bind(true_value);
Goto(&end);
}
@@ -1709,7 +1525,7 @@ IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
Node* key = GetAccumulator();
Node* context = GetContext();
Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
- SmiConstant(STRICT));
+ SmiConstant(Smi::FromEnum(LanguageMode::kStrict)));
SetAccumulator(result);
Dispatch();
}
@@ -1724,7 +1540,7 @@ IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
Node* key = GetAccumulator();
Node* context = GetContext();
Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
- SmiConstant(SLOPPY));
+ SmiConstant(Smi::FromEnum(LanguageMode::kSloppy)));
SetAccumulator(result);
Dispatch();
}
@@ -2016,7 +1832,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- void CompareOpWithFeedback(Token::Value compare_op) {
+ void CompareOpWithFeedback(Operation compare_op) {
Node* reg_index = BytecodeOperandReg(0);
Node* lhs = LoadRegister(reg_index);
Node* rhs = GetAccumulator();
@@ -2025,27 +1841,18 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
Variable var_type_feedback(this, MachineRepresentation::kTagged);
Node* result;
switch (compare_op) {
- case Token::EQ:
+ case Operation::kEqual:
result = Equal(lhs, rhs, context, &var_type_feedback);
break;
- case Token::EQ_STRICT:
+ case Operation::kStrictEqual:
result = StrictEqual(lhs, rhs, &var_type_feedback);
break;
- case Token::LT:
- result = RelationalComparison(CodeStubAssembler::kLessThan, lhs, rhs,
- context, &var_type_feedback);
- break;
- case Token::GT:
- result = RelationalComparison(CodeStubAssembler::kGreaterThan, lhs, rhs,
- context, &var_type_feedback);
- break;
- case Token::LTE:
- result = RelationalComparison(CodeStubAssembler::kLessThanOrEqual, lhs,
- rhs, context, &var_type_feedback);
- break;
- case Token::GTE:
- result = RelationalComparison(CodeStubAssembler::kGreaterThanOrEqual,
- lhs, rhs, context, &var_type_feedback);
+ case Operation::kLessThan:
+ case Operation::kGreaterThan:
+ case Operation::kLessThanOrEqual:
+ case Operation::kGreaterThanOrEqual:
+ result = RelationalComparison(compare_op, lhs, rhs, context,
+ &var_type_feedback);
break;
default:
UNREACHABLE();
@@ -2063,28 +1870,28 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
//
// Test if the value in the <src> register equals the accumulator.
IGNITION_HANDLER(TestEqual, InterpreterCompareOpAssembler) {
- CompareOpWithFeedback(Token::Value::EQ);
+ CompareOpWithFeedback(Operation::kEqual);
}
// TestEqualStrict <src>
//
// Test if the value in the <src> register is strictly equal to the accumulator.
IGNITION_HANDLER(TestEqualStrict, InterpreterCompareOpAssembler) {
- CompareOpWithFeedback(Token::Value::EQ_STRICT);
+ CompareOpWithFeedback(Operation::kStrictEqual);
}
// TestLessThan <src>
//
// Test if the value in the <src> register is less than the accumulator.
IGNITION_HANDLER(TestLessThan, InterpreterCompareOpAssembler) {
- CompareOpWithFeedback(Token::Value::LT);
+ CompareOpWithFeedback(Operation::kLessThan);
}
// TestGreaterThan <src>
//
// Test if the value in the <src> register is greater than the accumulator.
IGNITION_HANDLER(TestGreaterThan, InterpreterCompareOpAssembler) {
- CompareOpWithFeedback(Token::Value::GT);
+ CompareOpWithFeedback(Operation::kGreaterThan);
}
// TestLessThanOrEqual <src>
@@ -2092,7 +1899,7 @@ IGNITION_HANDLER(TestGreaterThan, InterpreterCompareOpAssembler) {
// Test if the value in the <src> register is less than or equal to the
// accumulator.
IGNITION_HANDLER(TestLessThanOrEqual, InterpreterCompareOpAssembler) {
- CompareOpWithFeedback(Token::Value::LTE);
+ CompareOpWithFeedback(Operation::kLessThanOrEqual);
}
// TestGreaterThanOrEqual <src>
@@ -2100,7 +1907,7 @@ IGNITION_HANDLER(TestLessThanOrEqual, InterpreterCompareOpAssembler) {
// Test if the value in the <src> register is greater than or equal to the
// accumulator.
IGNITION_HANDLER(TestGreaterThanOrEqual, InterpreterCompareOpAssembler) {
- CompareOpWithFeedback(Token::Value::GTE);
+ CompareOpWithFeedback(Operation::kGreaterThanOrEqual);
}
// TestEqualStrictNoFeedback <src>
@@ -2134,16 +1941,23 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) {
Dispatch();
}
-// TestInstanceOf <src>
+// TestInstanceOf <src> <feedback_slot>
//
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* name = LoadRegister(reg_index);
- Node* object = GetAccumulator();
+ Node* object_reg = BytecodeOperandReg(0);
+ Node* object = LoadRegister(object_reg);
+ Node* callable = GetAccumulator();
+ Node* slot_id = BytecodeOperandIdx(1);
+ Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- SetAccumulator(InstanceOf(name, object, context));
+
+ // Record feedback for the {callable} in the {feedback_vector}.
+ CollectCallableFeedback(callable, context, feedback_vector, slot_id);
+
+ // Perform the actual instanceof operation.
+ SetAccumulator(InstanceOf(object, callable, context));
Dispatch();
}
@@ -2156,7 +1970,7 @@ IGNITION_HANDLER(TestUndetectable, InterpreterAssembler) {
Node* object = GetAccumulator();
// If the object is an Smi then return false.
- SetAccumulator(BooleanConstant(false));
+ SetAccumulator(FalseConstant());
GotoIf(TaggedIsSmi(object), &end);
// If it is a HeapObject, load the map and check for undetectable bit.
@@ -2173,8 +1987,7 @@ IGNITION_HANDLER(TestUndetectable, InterpreterAssembler) {
// Test if the value in accumulator is strictly equal to null.
IGNITION_HANDLER(TestNull, InterpreterAssembler) {
Node* object = GetAccumulator();
- Node* null_value = HeapConstant(isolate()->factory()->null_value());
- Node* result = SelectBooleanConstant(WordEqual(object, null_value));
+ Node* result = SelectBooleanConstant(WordEqual(object, NullConstant()));
SetAccumulator(result);
Dispatch();
}
@@ -2184,8 +1997,7 @@ IGNITION_HANDLER(TestNull, InterpreterAssembler) {
// Test if the value in the accumulator is strictly equal to undefined.
IGNITION_HANDLER(TestUndefined, InterpreterAssembler) {
Node* object = GetAccumulator();
- Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
- Node* result = SelectBooleanConstant(WordEqual(object, undefined_value));
+ Node* result = SelectBooleanConstant(WordEqual(object, UndefinedConstant()));
SetAccumulator(result);
Dispatch();
}
@@ -2241,8 +2053,14 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
BIND(&if_boolean);
{
Comment("IfBoolean");
- GotoIf(WordEqual(object, BooleanConstant(true)), &if_true);
- Branch(WordEqual(object, BooleanConstant(false)), &if_true, &if_false);
+ GotoIf(WordEqual(object, TrueConstant()), &if_true);
+ Branch(WordEqual(object, FalseConstant()), &if_true, &if_false);
+ }
+ BIND(&if_bigint);
+ {
+ Comment("IfBigInt");
+ GotoIf(TaggedIsSmi(object), &if_false);
+ Branch(IsBigInt(object), &if_true, &if_false);
}
BIND(&if_undefined);
{
@@ -2271,7 +2089,7 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
GotoIf(TaggedIsSmi(object), &if_false);
// If the object is null then return true.
- GotoIf(WordEqual(object, NullConstant()), &if_true);
+ GotoIf(IsNull(object), &if_true);
// Check if the object is a receiver type and is not undefined or callable.
Node* map = LoadMap(object);
@@ -2291,12 +2109,12 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
BIND(&if_false);
{
- SetAccumulator(BooleanConstant(false));
+ SetAccumulator(FalseConstant());
Goto(&end);
}
BIND(&if_true);
{
- SetAccumulator(BooleanConstant(true));
+ SetAccumulator(TrueConstant());
Goto(&end);
}
BIND(&end);
@@ -2329,10 +2147,9 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* relative_jump = BytecodeOperandUImmWord(0);
- Node* true_value = BooleanConstant(true);
CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
CSA_ASSERT(this, IsBoolean(accumulator));
- JumpIfWordEqual(accumulator, true_value, relative_jump);
+ JumpIfWordEqual(accumulator, TrueConstant(), relative_jump);
}
// JumpIfTrueConstant <idx>
@@ -2344,10 +2161,9 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
- Node* true_value = BooleanConstant(true);
CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
CSA_ASSERT(this, IsBoolean(accumulator));
- JumpIfWordEqual(accumulator, true_value, relative_jump);
+ JumpIfWordEqual(accumulator, TrueConstant(), relative_jump);
}
// JumpIfFalse <imm>
@@ -2358,10 +2174,9 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* relative_jump = BytecodeOperandUImmWord(0);
- Node* false_value = BooleanConstant(false);
CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
CSA_ASSERT(this, IsBoolean(accumulator));
- JumpIfWordEqual(accumulator, false_value, relative_jump);
+ JumpIfWordEqual(accumulator, FalseConstant(), relative_jump);
}
// JumpIfFalseConstant <idx>
@@ -2373,10 +2188,9 @@ IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
- Node* false_value = BooleanConstant(false);
CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
CSA_ASSERT(this, IsBoolean(accumulator));
- JumpIfWordEqual(accumulator, false_value, relative_jump);
+ JumpIfWordEqual(accumulator, FalseConstant(), relative_jump);
}
// JumpIfToBooleanTrue <imm>
@@ -2449,9 +2263,8 @@ IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
// referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* null_value = HeapConstant(isolate()->factory()->null_value());
Node* relative_jump = BytecodeOperandUImmWord(0);
- JumpIfWordEqual(accumulator, null_value, relative_jump);
+ JumpIfWordEqual(accumulator, NullConstant(), relative_jump);
}
// JumpIfNullConstant <idx>
@@ -2460,10 +2273,9 @@ IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
// pool if the object referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* null_value = HeapConstant(isolate()->factory()->null_value());
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
- JumpIfWordEqual(accumulator, null_value, relative_jump);
+ JumpIfWordEqual(accumulator, NullConstant(), relative_jump);
}
// JumpIfNotNull <imm>
@@ -2472,9 +2284,8 @@ IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
// referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* null_value = HeapConstant(isolate()->factory()->null_value());
Node* relative_jump = BytecodeOperandUImmWord(0);
- JumpIfWordNotEqual(accumulator, null_value, relative_jump);
+ JumpIfWordNotEqual(accumulator, NullConstant(), relative_jump);
}
// JumpIfNotNullConstant <idx>
@@ -2483,10 +2294,9 @@ IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
// pool if the object referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* null_value = HeapConstant(isolate()->factory()->null_value());
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
- JumpIfWordNotEqual(accumulator, null_value, relative_jump);
+ JumpIfWordNotEqual(accumulator, NullConstant(), relative_jump);
}
// JumpIfUndefined <imm>
@@ -2495,9 +2305,8 @@ IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
// referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
Node* relative_jump = BytecodeOperandUImmWord(0);
- JumpIfWordEqual(accumulator, undefined_value, relative_jump);
+ JumpIfWordEqual(accumulator, UndefinedConstant(), relative_jump);
}
// JumpIfUndefinedConstant <idx>
@@ -2506,10 +2315,9 @@ IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
// pool if the object referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
- JumpIfWordEqual(accumulator, undefined_value, relative_jump);
+ JumpIfWordEqual(accumulator, UndefinedConstant(), relative_jump);
}
// JumpIfNotUndefined <imm>
@@ -2518,9 +2326,8 @@ IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
// referenced by the accumulator is not the undefined constant.
IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
Node* relative_jump = BytecodeOperandUImmWord(0);
- JumpIfWordNotEqual(accumulator, undefined_value, relative_jump);
+ JumpIfWordNotEqual(accumulator, UndefinedConstant(), relative_jump);
}
// JumpIfNotUndefinedConstant <idx>
@@ -2530,10 +2337,9 @@ IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
// constant.
IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
- JumpIfWordNotEqual(accumulator, undefined_value, relative_jump);
+ JumpIfWordNotEqual(accumulator, UndefinedConstant(), relative_jump);
}
// JumpIfJSReceiver <imm>
@@ -2964,20 +2770,9 @@ IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
//
// Performs a stack guard check.
IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
- Label ok(this), stack_check_interrupt(this, Label::kDeferred);
-
- Node* interrupt = StackCheckTriggeredInterrupt();
- Branch(interrupt, &stack_check_interrupt, &ok);
-
- BIND(&ok);
+ Node* context = GetContext();
+ PerformStackCheck(context);
Dispatch();
-
- BIND(&stack_check_interrupt);
- {
- Node* context = GetContext();
- CallRuntime(Runtime::kStackGuard, context);
- Dispatch();
- }
}
// SetPendingMessage
@@ -3040,10 +2835,9 @@ IGNITION_HANDLER(Return, InterpreterAssembler) {
// Throws an exception if the value in the accumulator is TheHole.
IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
Label throw_error(this, Label::kDeferred);
- GotoIf(WordEqual(value, the_hole_value), &throw_error);
+ GotoIf(WordEqual(value, TheHoleConstant()), &throw_error);
Dispatch();
BIND(&throw_error);
@@ -3060,10 +2854,9 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
// Throws an exception if the value in the accumulator is TheHole.
IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
Label throw_error(this, Label::kDeferred);
- GotoIf(WordEqual(value, the_hole_value), &throw_error);
+ GotoIf(WordEqual(value, TheHoleConstant()), &throw_error);
Dispatch();
BIND(&throw_error);
@@ -3080,10 +2873,9 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
// TheHole.
IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
Label throw_error(this, Label::kDeferred);
- GotoIf(WordNotEqual(value, the_hole_value), &throw_error);
+ GotoIf(WordNotEqual(value, TheHoleConstant()), &throw_error);
Dispatch();
BIND(&throw_error);
@@ -3110,9 +2902,12 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
IGNITION_HANDLER(Name, InterpreterAssembler) { \
Node* context = GetContext(); \
Node* accumulator = GetAccumulator(); \
- Node* original_handler = \
+ Node* result_pair = \
CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
+ Node* return_value = Projection(0, result_pair); \
+ Node* original_handler = Projection(1, result_pair); \
MaybeDropFrames(context); \
+ SetAccumulator(return_value); \
DispatchToBytecodeHandler(original_handler); \
}
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
@@ -3303,12 +3098,12 @@ IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
Branch(WordEqual(index, cache_length), &if_true, &if_false);
BIND(&if_true);
{
- SetAccumulator(BooleanConstant(false));
+ SetAccumulator(FalseConstant());
Goto(&end);
}
BIND(&if_false);
{
- SetAccumulator(BooleanConstant(true));
+ SetAccumulator(TrueConstant());
Goto(&end);
}
BIND(&end);
@@ -3473,6 +3268,71 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
return code;
}
+namespace {
+
+// DeserializeLazy
+//
+// Deserialize the bytecode handler, store it in the dispatch table, and
+// finally jump there (preserving existing args).
+// We manually create a custom assembler instead of using the helper macros
+// above since no corresponding bytecode exists.
+class DeserializeLazyAssembler : public InterpreterAssembler {
+ public:
+ static const Bytecode kFakeBytecode = Bytecode::kIllegal;
+
+ explicit DeserializeLazyAssembler(compiler::CodeAssemblerState* state,
+ OperandScale operand_scale)
+ : InterpreterAssembler(state, kFakeBytecode, operand_scale) {}
+
+ static void Generate(compiler::CodeAssemblerState* state,
+ OperandScale operand_scale) {
+ DeserializeLazyAssembler assembler(state, operand_scale);
+ state->SetInitialDebugInformation("DeserializeLazy", __FILE__, __LINE__);
+ assembler.GenerateImpl();
+ }
+
+ private:
+ void GenerateImpl() { DeserializeLazyAndDispatch(); }
+
+ DISALLOW_COPY_AND_ASSIGN(DeserializeLazyAssembler);
+};
+
+} // namespace
+
+Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
+ OperandScale operand_scale) {
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ const size_t return_count = 0;
+
+ std::string debug_name = std::string("DeserializeLazy");
+ if (operand_scale > OperandScale::kSingle) {
+ Bytecode prefix_bytecode =
+ Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
+ debug_name = debug_name.append(Bytecodes::ToString(prefix_bytecode));
+ }
+
+ InterpreterDispatchDescriptor descriptor(isolate);
+ compiler::CodeAssemblerState state(isolate, &zone, descriptor,
+ Code::BYTECODE_HANDLER, debug_name.c_str(),
+ return_count);
+
+ DeserializeLazyAssembler::Generate(&state, operand_scale);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
+ PROFILE(isolate,
+ CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
+ AbstractCode::cast(*code), debug_name.c_str()));
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_trace_ignition_codegen) {
+ OFStream os(stdout);
+ code->Disassemble(debug_name.c_str(), os);
+ os << std::flush;
+ }
+#endif // ENABLE_DISASSEMBLER
+
+ return code;
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-generator.h b/deps/v8/src/interpreter/interpreter-generator.h
index eab411d810..3dbdcf829d 100644
--- a/deps/v8/src/interpreter/interpreter-generator.h
+++ b/deps/v8/src/interpreter/interpreter-generator.h
@@ -15,6 +15,9 @@ namespace interpreter {
extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
OperandScale operand_scale);
+extern Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
+ OperandScale operand_scale);
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 029cf12594..39cb45c96c 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -11,6 +11,8 @@
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/objects-inl.h"
+#include "src/objects/module.h"
namespace v8 {
namespace internal {
@@ -121,7 +123,7 @@ Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
if (mode == kInstanceTypeEqual) {
return __ Word32Equal(instance_type, __ Int32Constant(type));
} else {
- DCHECK(mode == kInstanceTypeGreaterThanOrEqual);
+ DCHECK_EQ(mode, kInstanceTypeGreaterThanOrEqual);
return __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
}
}
@@ -140,13 +142,13 @@ Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) {
__ BIND(&return_true);
{
- return_value.Bind(__ BooleanConstant(true));
+ return_value.Bind(__ TrueConstant());
__ Goto(&end);
}
__ BIND(&return_false);
{
- return_value.Bind(__ BooleanConstant(false));
+ return_value.Bind(__ FalseConstant());
__ Goto(&end);
}
@@ -173,13 +175,13 @@ Node* IntrinsicsGenerator::IsJSReceiver(Node* input, Node* arg_count,
__ BIND(&return_true);
{
- return_value.Bind(__ BooleanConstant(true));
+ return_value.Bind(__ TrueConstant());
__ Goto(&end);
}
__ BIND(&return_false);
{
- return_value.Bind(__ BooleanConstant(false));
+ return_value.Bind(__ FalseConstant());
__ Goto(&end);
}
@@ -234,13 +236,13 @@ Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) {
__ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi);
__ BIND(&if_smi);
{
- return_value.Bind(__ BooleanConstant(true));
+ return_value.Bind(__ TrueConstant());
__ Goto(&end);
}
__ BIND(&if_not_smi);
{
- return_value.Bind(__ BooleanConstant(false));
+ return_value.Bind(__ FalseConstant());
__ Goto(&end);
}
@@ -282,11 +284,6 @@ Node* IntrinsicsGenerator::HasProperty(Node* input, Node* arg_count,
input, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::SubString(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsStubCall(input, context, CodeFactory::SubString(isolate()));
-}
-
Node* IntrinsicsGenerator::ToString(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsStubCall(
@@ -432,6 +429,28 @@ Node* IntrinsicsGenerator::GeneratorClose(Node* args_reg, Node* arg_count,
return __ UndefinedConstant();
}
+Node* IntrinsicsGenerator::GetImportMetaObject(Node* args_reg, Node* arg_count,
+ Node* context) {
+ Node* const module_context = __ LoadModuleContext(context);
+ Node* const module =
+ __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+ Node* const import_meta =
+ __ LoadObjectField(module, Module::kImportMetaOffset);
+
+ InterpreterAssembler::Variable return_value(assembler_,
+ MachineRepresentation::kTagged);
+ return_value.Bind(import_meta);
+
+ InterpreterAssembler::Label end(assembler_);
+ __ GotoIfNot(__ IsTheHole(import_meta), &end);
+
+ return_value.Bind(__ CallRuntime(Runtime::kGetImportMetaObject, context));
+ __ Goto(&end);
+
+ __ BIND(&end);
+ return return_value.value();
+}
+
Node* IntrinsicsGenerator::AsyncGeneratorReject(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsBuiltinCall(input, context,
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 2ffba6c0cd..b9137c8559 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -22,6 +22,7 @@ namespace interpreter {
V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
V(GeneratorGetInputOrDebugPos, generator_get_input_or_debug_pos, 1) \
V(GeneratorClose, generator_close, 1) \
+ V(GetImportMetaObject, get_import_meta_object, 0) \
V(Call, call, -1) \
V(ClassOf, class_of, 1) \
V(CreateIterResultObject, create_iter_result_object, 2) \
@@ -36,7 +37,6 @@ namespace interpreter {
V(IsJSWeakSet, is_js_weak_set, 1) \
V(IsSmi, is_smi, 1) \
V(IsTypedArray, is_typed_array, 1) \
- V(SubString, sub_string, 3) \
V(ToString, to_string, 1) \
V(ToLength, to_length, 1) \
V(ToInteger, to_integer, 1) \
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 420f17adc6..1f359f1a0f 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -9,7 +9,6 @@
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
-#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/counters-inl.h"
@@ -20,6 +19,7 @@
#include "src/objects/shared-function-info.h"
#include "src/parsing/parse-info.h"
#include "src/setup-isolate.h"
+#include "src/snapshot/snapshot.h"
#include "src/visitors.h"
namespace v8 {
@@ -29,43 +29,19 @@ namespace interpreter {
class InterpreterCompilationJob final : public CompilationJob {
public:
InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
- Isolate* isolate);
+ AccountingAllocator* allocator);
protected:
- Status PrepareJobImpl() final;
+ Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
- Status FinalizeJobImpl() final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
private:
- class TimerScope final {
- public:
- explicit TimerScope(RuntimeCallCounter* counter)
- : runtime_stats_enabled_(FLAG_runtime_stats) {
- if (V8_UNLIKELY(runtime_stats_enabled_ && counter != nullptr)) {
- timer_.Start(counter, nullptr);
- }
- }
-
- ~TimerScope() {
- if (V8_UNLIKELY(runtime_stats_enabled_)) {
- timer_.Stop();
- }
- }
-
- private:
- RuntimeCallTimer timer_;
- bool runtime_stats_enabled_;
-
- DISALLOW_COPY_AND_ASSIGN(TimerScope);
- };
-
BytecodeGenerator* generator() { return &generator_; }
Zone zone_;
CompilationInfo compilation_info_;
BytecodeGenerator generator_;
- RuntimeCallStats* runtime_call_stats_;
- RuntimeCallCounter background_execute_counter_;
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
@@ -82,6 +58,31 @@ Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
}
}
+Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
+ Bytecode bytecode, OperandScale operand_scale) {
+ Code* code = GetBytecodeHandler(bytecode, operand_scale);
+
+ // Already deserialized? Then just return the handler.
+ if (!isolate_->heap()->IsDeserializeLazyHandler(code)) return code;
+
+ DCHECK(FLAG_lazy_handler_deserialization);
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Lazy-deserializing handler %s\n",
+ Bytecodes::ToString(bytecode, operand_scale).c_str());
+ }
+
+ DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
+ code = Snapshot::DeserializeHandler(isolate_, bytecode, operand_scale);
+
+ DCHECK(code->IsCode());
+ DCHECK_EQ(code->kind(), Code::BYTECODE_HANDLER);
+ DCHECK(!isolate_->heap()->IsDeserializeLazyHandler(code));
+
+ SetBytecodeHandler(bytecode, operand_scale, code);
+
+ return code;
+}
+
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale) {
DCHECK(IsDispatchTableInitialized());
@@ -91,6 +92,14 @@ Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
return Code::GetCodeFromTargetAddress(code_entry);
}
+void Interpreter::SetBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale,
+ Code* handler) {
+ DCHECK(handler->kind() == Code::BYTECODE_HANDLER);
+ size_t index = GetDispatchTableIndex(bytecode, operand_scale);
+ dispatch_table_[index] = handler->entry();
+}
+
// static
size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
OperandScale operand_scale) {
@@ -124,20 +133,7 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) {
namespace {
void MaybePrintAst(ParseInfo* parse_info, CompilationInfo* compilation_info) {
- Isolate* isolate = compilation_info->isolate();
- bool print_ast = isolate->bootstrapper()->IsActive() ? FLAG_print_builtin_ast
- : FLAG_print_ast;
- if (!print_ast) return;
-
- // Requires internalizing the AST, so make sure we are on the main thread and
- // allow handle dereference and allocations.
- // TODO(rmcilroy): Make ast-printer print ast raw strings instead of
- // internalized strings to avoid internalizing here.
- DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
- AllowHandleDereference allow_deref;
- AllowHandleAllocation allow_handles;
- AllowHeapAllocation allow_gc;
- parse_info->ast_value_factory()->Internalize(isolate);
+ if (!FLAG_print_ast) return;
OFStream os(stdout);
std::unique_ptr<char[]> name = compilation_info->GetDebugName();
@@ -145,7 +141,9 @@ void MaybePrintAst(ParseInfo* parse_info, CompilationInfo* compilation_info) {
<< compilation_info->GetDebugName().get() << "]" << std::endl;
#ifdef DEBUG
os << "--- AST ---" << std::endl
- << AstPrinter(isolate).PrintProgram(parse_info->literal()) << std::endl;
+ << AstPrinter(parse_info->stack_limit())
+ .PrintProgram(compilation_info->literal())
+ << std::endl;
#endif // DEBUG
}
@@ -163,31 +161,34 @@ bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
} // namespace
-InterpreterCompilationJob::InterpreterCompilationJob(ParseInfo* parse_info,
- FunctionLiteral* literal,
- Isolate* isolate)
- : CompilationJob(isolate, parse_info, &compilation_info_, "Ignition"),
- zone_(isolate->allocator(), ZONE_NAME),
- compilation_info_(&zone_, isolate, parse_info, literal),
- generator_(&compilation_info_),
- runtime_call_stats_(isolate->counters()->runtime_call_stats()),
- background_execute_counter_("CompileBackgroundIgnition") {}
-
-InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
- MaybePrintAst(parse_info(), compilation_info());
+InterpreterCompilationJob::InterpreterCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator)
+ : CompilationJob(parse_info->stack_limit(), parse_info, &compilation_info_,
+ "Ignition", State::kReadyToExecute),
+ zone_(allocator, ZONE_NAME),
+ compilation_info_(&zone_, parse_info, literal),
+ generator_(&compilation_info_, parse_info->ast_string_constants()) {}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl(
+ Isolate* isolate) {
+ UNREACHABLE(); // Prepare should always be skipped.
return SUCCEEDED;
}
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
- TimerScope runtimeTimer(
- executed_on_background_thread() ? &background_execute_counter_ : nullptr);
RuntimeCallTimerScope runtimeTimerScope(
- !executed_on_background_thread() ? runtime_call_stats_ : nullptr,
- &RuntimeCallStats::CompileIgnition);
-
+ parse_info()->runtime_call_stats(),
+ parse_info()->on_background_thread()
+ ? &RuntimeCallStats::CompileBackgroundIgnition
+ : &RuntimeCallStats::CompileIgnition);
// TODO(lpy): add support for background compilation RCS trace.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
+ // Print AST if flag is enabled. Note, if compiling on a background thread
+ // then ASTs from different functions may be intersperse when printed.
+ MaybePrintAst(parse_info(), compilation_info());
+
generator()->GenerateBytecode(stack_limit());
if (generator()->HasStackOverflow()) {
@@ -196,19 +197,16 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
return SUCCEEDED;
}
-InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
- // Add background runtime call stats.
- if (V8_UNLIKELY(FLAG_runtime_stats && executed_on_background_thread())) {
- runtime_call_stats_->CompileBackgroundIgnition.Add(
- &background_execute_counter_);
- }
-
+InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
+ Isolate* isolate) {
RuntimeCallTimerScope runtimeTimerScope(
- !executed_on_background_thread() ? runtime_call_stats_ : nullptr,
+ parse_info()->runtime_call_stats(),
&RuntimeCallStats::CompileIgnitionFinalization);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileIgnitionFinalization");
Handle<BytecodeArray> bytecodes =
- generator()->FinalizeBytecode(isolate(), parse_info()->script());
+ generator()->FinalizeBytecode(isolate, parse_info()->script());
if (generator()->HasStackOverflow()) {
return FAILED;
}
@@ -224,17 +222,17 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
compilation_info()->SetBytecodeArray(bytecodes);
compilation_info()->SetCode(
- BUILTIN_CODE(compilation_info()->isolate(), InterpreterEntryTrampoline));
+ BUILTIN_CODE(isolate, InterpreterEntryTrampoline));
return SUCCEEDED;
}
CompilationJob* Interpreter::NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
- Isolate* isolate) {
- return new InterpreterCompilationJob(parse_info, literal, isolate);
+ AccountingAllocator* allocator) {
+ return new InterpreterCompilationJob(parse_info, literal, allocator);
}
-bool Interpreter::IsDispatchTableInitialized() {
+bool Interpreter::IsDispatchTableInitialized() const {
return dispatch_table_[0] != nullptr;
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 04d6435620..7e6d013a29 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -13,20 +13,20 @@
#include "src/base/macros.h"
#include "src/builtins/builtins.h"
#include "src/interpreter/bytecodes.h"
-#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
class Isolate;
+class BuiltinDeserializerAllocator;
class Callable;
class CompilationInfo;
class CompilationJob;
class FunctionLiteral;
class ParseInfo;
-class SetupIsolateDelegate;
class RootVisitor;
+class SetupIsolateDelegate;
namespace interpreter {
@@ -40,11 +40,20 @@ class Interpreter {
// Creates a compilation job which will generate bytecode for |literal|.
static CompilationJob* NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
- Isolate* isolate);
+ AccountingAllocator* allocator);
+
+ // If the bytecode handler for |bytecode| and |operand_scale| has not yet
+ // been loaded, deserialize it. Then return the handler.
+ Code* GetAndMaybeDeserializeBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale);
- // Return bytecode handler for |bytecode|.
+ // Return bytecode handler for |bytecode| and |operand_scale|.
Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
+ // Set the bytecode handler for |bytecode| and |operand_scale|.
+ void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
+ Code* handler);
+
// GC support.
void IterateDispatchTable(RootVisitor* v);
@@ -53,6 +62,8 @@ class Interpreter {
V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
+ bool IsDispatchTableInitialized() const;
+
Address dispatch_table_address() {
return reinterpret_cast<Address>(&dispatch_table_[0]);
}
@@ -67,6 +78,7 @@ class Interpreter {
private:
friend class SetupInterpreter;
friend class v8::internal::SetupIsolateDelegate;
+ friend class v8::internal::BuiltinDeserializerAllocator;
uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
@@ -74,9 +86,7 @@ class Interpreter {
static size_t GetDispatchTableIndex(Bytecode bytecode,
OperandScale operand_scale);
- bool IsDispatchTableInitialized();
-
- static const int kNumberOfWideVariants = 3;
+ static const int kNumberOfWideVariants = BytecodeOperands::kOperandScaleCount;
static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
static const int kNumberOfBytecodes = static_cast<int>(Bytecode::kLast) + 1;
diff --git a/deps/v8/src/interpreter/setup-interpreter-internal.cc b/deps/v8/src/interpreter/setup-interpreter-internal.cc
index efb0b81bf9..1bba10422c 100644
--- a/deps/v8/src/interpreter/setup-interpreter-internal.cc
+++ b/deps/v8/src/interpreter/setup-interpreter-internal.cc
@@ -56,42 +56,28 @@ void SetupInterpreter::InstallBytecodeHandlers(Interpreter* interpreter) {
}
}
+ // Generate the DeserializeLazy handlers, one for each operand scale.
+ Heap* heap = interpreter->isolate_->heap();
+ DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler());
+ heap->SetDeserializeLazyHandler(*GenerateDeserializeLazyHandler(
+ interpreter->isolate_, OperandScale::kSingle));
+ DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_wide());
+ heap->SetDeserializeLazyHandlerWide(*GenerateDeserializeLazyHandler(
+ interpreter->isolate_, OperandScale::kDouble));
+ DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_extra_wide());
+ heap->SetDeserializeLazyHandlerExtraWide(*GenerateDeserializeLazyHandler(
+ interpreter->isolate_, OperandScale::kQuadruple));
+
// Initialization should have been successful.
DCHECK(interpreter->IsDispatchTableInitialized());
}
// static
-bool SetupInterpreter::ReuseExistingHandler(Address* dispatch_table,
- Bytecode bytecode,
- OperandScale operand_scale) {
- size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
- switch (bytecode) {
- case Bytecode::kLdaImmutableContextSlot:
- STATIC_ASSERT(static_cast<int>(Bytecode::kLdaContextSlot) <
- static_cast<int>(Bytecode::kLdaImmutableContextSlot));
- dispatch_table[index] = dispatch_table[Interpreter::GetDispatchTableIndex(
- Bytecode::kLdaContextSlot, operand_scale)];
- return true;
- case Bytecode::kLdaImmutableCurrentContextSlot:
- STATIC_ASSERT(
- static_cast<int>(Bytecode::kLdaCurrentContextSlot) <
- static_cast<int>(Bytecode::kLdaImmutableCurrentContextSlot));
- dispatch_table[index] = dispatch_table[Interpreter::GetDispatchTableIndex(
- Bytecode::kLdaCurrentContextSlot, operand_scale)];
- return true;
- default:
- return false;
- }
- return false;
-}
-
-// static
void SetupInterpreter::InstallBytecodeHandler(Isolate* isolate,
Address* dispatch_table,
Bytecode bytecode,
OperandScale operand_scale) {
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
- if (ReuseExistingHandler(dispatch_table, bytecode, operand_scale)) return;
size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
Handle<Code> code = GenerateBytecodeHandler(isolate, bytecode, operand_scale);
diff --git a/deps/v8/src/interpreter/setup-interpreter.h b/deps/v8/src/interpreter/setup-interpreter.h
index e9adad9537..19b03f7f7f 100644
--- a/deps/v8/src/interpreter/setup-interpreter.h
+++ b/deps/v8/src/interpreter/setup-interpreter.h
@@ -19,10 +19,6 @@ class SetupInterpreter {
static void InstallBytecodeHandlers(Interpreter* interpreter);
private:
- // In the case of bytecodes that share handler implementations, copy the code
- // into the bytecode's dispatcher table entry and return true.
- static bool ReuseExistingHandler(Address* dispatch_table, Bytecode bytecode,
- OperandScale operand_scale);
// Generates handler for given |bytecode| and |operand_scale|
// and installs it into the |dispatch_table|.
static void InstallBytecodeHandler(Isolate* isolate, Address* dispatch_table,
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index a914811e5c..f51c1cd29a 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -13,7 +13,7 @@ namespace internal {
void Isolate::set_context(Context* context) {
- DCHECK(context == NULL || context->IsContext());
+ DCHECK(context == nullptr || context->IsContext());
thread_local_top_.context_ = context;
}
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 9312432763..e3ee968f79 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -21,7 +21,7 @@
#include "src/basic-block-profiler.h"
#include "src/bootstrapper.h"
#include "src/cancelable-task.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
@@ -31,7 +31,6 @@
#include "src/elements.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
-#include "src/ic/access-compiler-data.h"
#include "src/ic/stub-cache.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/interpreter.h"
@@ -48,12 +47,14 @@
#include "src/simulator.h"
#include "src/snapshot/startup-deserializer.h"
#include "src/tracing/tracing-category-observer.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache.h"
#include "src/v8.h"
#include "src/version.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
#include "src/wasm/compilation-manager.h"
+#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
@@ -88,26 +89,26 @@ void ThreadLocalTop::InitializeInternal() {
c_function_ = 0;
handler_ = 0;
#ifdef USE_SIMULATOR
- simulator_ = NULL;
+ simulator_ = nullptr;
#endif
- js_entry_sp_ = NULL;
- external_callback_scope_ = NULL;
+ js_entry_sp_ = nullptr;
+ external_callback_scope_ = nullptr;
current_vm_state_ = EXTERNAL;
- try_catch_handler_ = NULL;
- context_ = NULL;
+ try_catch_handler_ = nullptr;
+ context_ = nullptr;
thread_id_ = ThreadId::Invalid();
external_caught_exception_ = false;
- failed_access_check_callback_ = NULL;
- save_context_ = NULL;
- promise_on_stack_ = NULL;
+ failed_access_check_callback_ = nullptr;
+ save_context_ = nullptr;
+ promise_on_stack_ = nullptr;
// These members are re-initialized later after deserialization
// is complete.
- pending_exception_ = NULL;
- wasm_caught_exception_ = NULL;
+ pending_exception_ = nullptr;
+ wasm_caught_exception_ = nullptr;
rethrowing_message_ = false;
- pending_message_obj_ = NULL;
- scheduled_exception_ = NULL;
+ pending_message_obj_ = nullptr;
+ scheduled_exception_ = nullptr;
}
@@ -131,7 +132,7 @@ base::Thread::LocalStorageKey Isolate::isolate_key_;
base::Thread::LocalStorageKey Isolate::thread_id_key_;
base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
base::LazyMutex Isolate::thread_data_table_mutex_ = LAZY_MUTEX_INITIALIZER;
-Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
+Isolate::ThreadDataTable* Isolate::thread_data_table_ = nullptr;
base::Atomic32 Isolate::isolate_counter_ = 0;
#if DEBUG
base::Atomic32 Isolate::isolate_key_created_ = 0;
@@ -140,11 +141,11 @@ base::Atomic32 Isolate::isolate_key_created_ = 0;
Isolate::PerIsolateThreadData*
Isolate::FindOrAllocatePerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
- PerIsolateThreadData* per_thread = NULL;
+ PerIsolateThreadData* per_thread = nullptr;
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
per_thread = thread_data_table_->Lookup(this, thread_id);
- if (per_thread == NULL) {
+ if (per_thread == nullptr) {
per_thread = new PerIsolateThreadData(this, thread_id);
thread_data_table_->Insert(per_thread);
}
@@ -178,7 +179,7 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
ThreadId thread_id) {
- PerIsolateThreadData* per_thread = NULL;
+ PerIsolateThreadData* per_thread = nullptr;
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
per_thread = thread_data_table_->Lookup(this, thread_id);
@@ -189,7 +190,7 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
void Isolate::InitializeOncePerProcess() {
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
- CHECK(thread_data_table_ == NULL);
+ CHECK_NULL(thread_data_table_);
isolate_key_ = base::Thread::CreateThreadLocalKey();
#if DEBUG
base::Relaxed_Store(&isolate_key_created_, 1);
@@ -223,8 +224,7 @@ void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(thread->context_)));
v->VisitRootPointer(Root::kTop, &thread->scheduled_exception_);
- for (v8::TryCatch* block = thread->try_catch_handler();
- block != NULL;
+ for (v8::TryCatch* block = thread->try_catch_handler(); block != nullptr;
block = block->next_) {
v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(block->exception_)));
v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(block->message_obj_)));
@@ -242,8 +242,7 @@ void Isolate::Iterate(RootVisitor* v) {
}
void Isolate::IterateDeferredHandles(RootVisitor* visitor) {
- for (DeferredHandles* deferred = deferred_handles_head_;
- deferred != NULL;
+ for (DeferredHandles* deferred = deferred_handles_head_; deferred != nullptr;
deferred = deferred->next_) {
deferred->Iterate(visitor);
}
@@ -258,8 +257,7 @@ bool Isolate::IsDeferredHandle(Object** handle) {
// not be fully filled.
// We iterate through all the blocks to see whether the argument handle
// belongs to one of the blocks. If so, it is deferred.
- for (DeferredHandles* deferred = deferred_handles_head_;
- deferred != NULL;
+ for (DeferredHandles* deferred = deferred_handles_head_; deferred != nullptr;
deferred = deferred->next_) {
std::vector<Object**>* blocks = &deferred->blocks_;
for (size_t i = 0; i < blocks->size(); i++) {
@@ -293,7 +291,7 @@ Handle<String> Isolate::StackTraceString() {
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
Handle<String> stack_trace = accumulator.ToString(this);
- incomplete_message_ = NULL;
+ incomplete_message_ = nullptr;
stack_trace_nesting_level_ = 0;
return stack_trace;
} else if (stack_trace_nesting_level_ == 1) {
@@ -393,7 +391,6 @@ class FrameArrayBuilder {
void AppendStandardFrame(StandardFrame* frame) {
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
// A standard frame may include many summarized frames (due to inlining).
for (size_t i = frames.size(); i != 0 && !full(); i--) {
@@ -431,6 +428,10 @@ class FrameArrayBuilder {
// Handle a WASM compiled frame.
//====================================================================
const auto& summary = summ.AsWasmCompiled();
+ if (!summary.code().IsCodeObject() &&
+ summary.code().GetWasmCode()->kind() != wasm::WasmCode::Function) {
+ continue;
+ }
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = 0;
if (instance->compiled_module()->is_asm_js()) {
@@ -443,9 +444,8 @@ class FrameArrayBuilder {
}
elements_ = FrameArray::AppendWasmFrame(
- elements_, instance, summary.function_index(),
- Handle<AbstractCode>::cast(summary.code()), summary.code_offset(),
- flags);
+ elements_, instance, summary.function_index(), summary.code(),
+ summary.code_offset(), flags);
} else if (summ.IsWasmInterpreted()) {
//====================================================================
// Handle a WASM interpreted frame.
@@ -454,9 +454,9 @@ class FrameArrayBuilder {
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = FrameArray::kIsWasmInterpretedFrame;
DCHECK(!instance->compiled_module()->is_asm_js());
- elements_ = FrameArray::AppendWasmFrame(
- elements_, instance, summary.function_index(),
- Handle<AbstractCode>::null(), summary.byte_offset(), flags);
+ elements_ = FrameArray::AppendWasmFrame(elements_, instance,
+ summary.function_index(), {},
+ summary.byte_offset(), flags);
}
}
}
@@ -572,6 +572,11 @@ bool GetStackTraceLimit(Isolate* isolate, int* result) {
// Ensure that limit is not negative.
*result = Max(FastD2IChecked(stack_trace_limit->Number()), 0);
+
+ if (*result != FLAG_stack_trace_limit) {
+ isolate->CountUsage(v8::Isolate::kErrorStackTraceLimit);
+ }
+
return true;
}
@@ -628,9 +633,10 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
Handle<FixedArray> stack_trace = CaptureCurrentStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
- RETURN_ON_EXCEPTION(
- this, JSReceiver::SetProperty(error_object, key, stack_trace, STRICT),
- JSReceiver);
+ RETURN_ON_EXCEPTION(this,
+ JSReceiver::SetProperty(error_object, key, stack_trace,
+ LanguageMode::kStrict),
+ JSReceiver);
}
return error_object;
}
@@ -642,9 +648,10 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
Handle<Name> key = factory()->stack_trace_symbol();
Handle<Object> stack_trace =
CaptureSimpleStackTrace(error_object, mode, caller);
- RETURN_ON_EXCEPTION(
- this, JSReceiver::SetProperty(error_object, key, stack_trace, STRICT),
- JSReceiver);
+ RETURN_ON_EXCEPTION(this,
+ JSReceiver::SetProperty(error_object, key, stack_trace,
+ LanguageMode::kStrict),
+ JSReceiver);
return error_object;
}
@@ -657,6 +664,35 @@ Handle<FixedArray> Isolate::GetDetailedStackTrace(
return Handle<FixedArray>();
}
+Address Isolate::GetAbstractPC(int* line, int* column) {
+ JavaScriptFrameIterator it(this);
+
+ JavaScriptFrame* frame = it.frame();
+ DCHECK(!frame->is_builtin());
+ int position = frame->position();
+
+ Object* maybe_script = frame->function()->shared()->script();
+ if (maybe_script->IsScript()) {
+ Handle<Script> script(Script::cast(maybe_script), this);
+ Script::PositionInfo info;
+ Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
+ *line = info.line + 1;
+ *column = info.column + 1;
+ } else {
+ *line = position;
+ *column = -1;
+ }
+
+ if (frame->is_interpreted()) {
+ InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
+ Address bytecode_start =
+ reinterpret_cast<Address>(iframe->GetBytecodeArray()) - kHeapObjectTag +
+ BytecodeArray::kHeaderSize;
+ return bytecode_start + iframe->GetBytecodeOffset();
+ }
+
+ return frame->pc();
+}
class CaptureStackTraceHelper {
public:
@@ -673,19 +709,19 @@ class CaptureStackTraceHelper {
int code_offset;
Handle<ByteArray> source_position_table;
Handle<Object> maybe_cache;
- Handle<UnseededNumberDictionary> cache;
+ Handle<NumberDictionary> cache;
if (!FLAG_optimize_for_size) {
code_offset = summ.code_offset();
source_position_table =
handle(summ.abstract_code()->source_position_table(), isolate_);
maybe_cache = handle(summ.abstract_code()->stack_frame_cache(), isolate_);
- if (maybe_cache->IsUnseededNumberDictionary()) {
- cache = Handle<UnseededNumberDictionary>::cast(maybe_cache);
+ if (maybe_cache->IsNumberDictionary()) {
+ cache = Handle<NumberDictionary>::cast(maybe_cache);
} else {
- cache = UnseededNumberDictionary::New(isolate_, 1);
+ cache = NumberDictionary::New(isolate_, 1);
}
int entry = cache->FindEntry(code_offset);
- if (entry != UnseededNumberDictionary::kNotFound) {
+ if (entry != NumberDictionary::kNotFound) {
Handle<StackFrameInfo> frame(
StackFrameInfo::cast(cache->ValueAt(entry)));
DCHECK(frame->function_name()->IsString());
@@ -715,8 +751,8 @@ class CaptureStackTraceHelper {
frame->set_is_constructor(summ.is_constructor());
frame->set_is_wasm(false);
if (!FLAG_optimize_for_size) {
- auto new_cache = UnseededNumberDictionary::Set(cache, code_offset, frame);
- if (*new_cache != *cache || !maybe_cache->IsUnseededNumberDictionary()) {
+ auto new_cache = NumberDictionary::Set(cache, code_offset, frame);
+ if (*new_cache != *cache || !maybe_cache->IsNumberDictionary()) {
AbstractCode::SetStackFrameCache(summ.abstract_code(), new_cache);
}
}
@@ -774,7 +810,6 @@ Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
// Set initial size to the maximum inlining level + 1 for the outermost
// function.
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
for (size_t i = frames.size(); i != 0 && frames_seen < limit; i--) {
FrameSummary& frame = frames[i - 1];
@@ -804,7 +839,7 @@ void Isolate::PrintStack(FILE* out, PrintStackMode mode) {
accumulator.OutputToFile(out);
InitializeLoggingAndCounters();
accumulator.Log(this);
- incomplete_message_ = NULL;
+ incomplete_message_ = nullptr;
stack_trace_nesting_level_ = 0;
} else if (stack_trace_nesting_level_ == 1) {
stack_trace_nesting_level_++;
@@ -1014,7 +1049,7 @@ void Isolate::InvokeApiInterruptCallbacks() {
void ReportBootstrappingException(Handle<Object> exception,
MessageLocation* location) {
base::OS::PrintError("Exception thrown during bootstrapping\n");
- if (location == NULL || location->script().is_null()) return;
+ if (location == nullptr || location->script().is_null()) return;
// We are bootstrapping and caught an error where the location is set
// and we have a script for the location.
// In this case we could have an extension (or an internal error
@@ -1139,7 +1174,7 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
if (requires_message && !rethrowing_message) {
MessageLocation computed_location;
// If no location was specified we try to use a computed one instead.
- if (location == NULL && ComputeLocation(&computed_location)) {
+ if (location == nullptr && ComputeLocation(&computed_location)) {
location = &computed_location;
}
@@ -1195,12 +1230,15 @@ Object* Isolate::ReThrow(Object* exception) {
Object* Isolate::UnwindAndFindHandler() {
Object* exception = pending_exception();
- auto FoundHandler = [&](Context* context, Code* code, intptr_t offset,
- Address handler_sp, Address handler_fp) {
+ auto FoundHandler = [&](Context* context, Address instruction_start,
+ intptr_t handler_offset,
+ Address constant_pool_address, Address handler_sp,
+ Address handler_fp) {
// Store information to be consumed by the CEntryStub.
thread_local_top()->pending_handler_context_ = context;
- thread_local_top()->pending_handler_code_ = code;
- thread_local_top()->pending_handler_offset_ = offset;
+ thread_local_top()->pending_handler_entrypoint_ =
+ instruction_start + handler_offset;
+ thread_local_top()->pending_handler_constant_pool_ = constant_pool_address;
thread_local_top()->pending_handler_fp_ = handler_fp;
thread_local_top()->pending_handler_sp_ = handler_sp;
@@ -1233,7 +1271,8 @@ Object* Isolate::UnwindAndFindHandler() {
// Gather information from the handler.
Code* code = frame->LookupCode();
return FoundHandler(
- nullptr, code, Smi::ToInt(code->handler_table()->get(0)),
+ nullptr, code->instruction_start(),
+ Smi::ToInt(code->handler_table()->get(0)), code->constant_pool(),
handler->address() + StackHandlerConstants::kSize, 0);
}
@@ -1260,8 +1299,17 @@ Object* Isolate::UnwindAndFindHandler() {
trap_handler::SetThreadInWasm();
set_wasm_caught_exception(exception);
- return FoundHandler(nullptr, frame->LookupCode(), offset, return_sp,
- frame->fp());
+ if (FLAG_wasm_jit_to_native) {
+ wasm::WasmCode* wasm_code =
+ wasm_code_manager()->LookupCode(frame->pc());
+ return FoundHandler(nullptr, wasm_code->instructions().start(),
+ offset, wasm_code->constant_pool(), return_sp,
+ frame->fp());
+ } else {
+ Code* code = frame->LookupCode();
+ return FoundHandler(nullptr, code->instruction_start(), offset,
+ code->constant_pool(), return_sp, frame->fp());
+ }
}
case StackFrame::OPTIMIZED: {
@@ -1292,7 +1340,8 @@ Object* Isolate::UnwindAndFindHandler() {
set_deoptimizer_lazy_throw(true);
}
- return FoundHandler(nullptr, code, offset, return_sp, frame->fp());
+ return FoundHandler(nullptr, code->instruction_start(), offset,
+ code->constant_pool(), return_sp, frame->fp());
}
case StackFrame::STUB: {
@@ -1315,7 +1364,8 @@ Object* Isolate::UnwindAndFindHandler() {
StandardFrameConstants::kFixedFrameSizeAboveFp -
stack_slots * kPointerSize;
- return FoundHandler(nullptr, code, offset, return_sp, frame->fp());
+ return FoundHandler(nullptr, code->instruction_start(), offset,
+ code->constant_pool(), return_sp, frame->fp());
}
case StackFrame::INTERPRETED: {
@@ -1347,7 +1397,8 @@ Object* Isolate::UnwindAndFindHandler() {
Code* code =
builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- return FoundHandler(context, code, 0, return_sp, frame->fp());
+ return FoundHandler(context, code->instruction_start(), 0,
+ code->constant_pool(), return_sp, frame->fp());
}
case StackFrame::BUILTIN:
@@ -1573,7 +1624,6 @@ bool Isolate::ComputeLocation(MessageLocation* target) {
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
FrameSummary& summary = frames.back();
int pos = summary.SourcePosition();
@@ -1635,12 +1685,19 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
Handle<WasmCompiledModule> compiled_module(
WasmInstanceObject::cast(elements->WasmInstance(i))
->compiled_module());
- int func_index = elements->WasmFunctionIndex(i)->value();
+ uint32_t func_index =
+ static_cast<uint32_t>(elements->WasmFunctionIndex(i)->value());
int code_offset = elements->Offset(i)->value();
- // TODO(wasm): Clean this up (bug 5007).
- int byte_offset = code_offset < 0
- ? (-1 - code_offset)
- : elements->Code(i)->SourcePosition(code_offset);
+
+ // TODO(titzer): store a reference to the code object in FrameArray;
+ // a second lookup here could lead to inconsistency.
+ int byte_offset =
+ FLAG_wasm_jit_to_native
+ ? FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
+ compiled_module->GetNativeModule()->GetCode(func_index),
+ code_offset)
+ : elements->Code(i)->SourcePosition(code_offset);
+
bool is_at_number_conversion =
elements->IsAsmJsWasmFrame(i) &&
elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
@@ -1691,7 +1748,7 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
}
}
MessageLocation computed_location;
- if (location == NULL &&
+ if (location == nullptr &&
(ComputeLocationFromException(&computed_location, exception) ||
ComputeLocationFromStackTrace(&computed_location, exception) ||
ComputeLocation(&computed_location))) {
@@ -1840,7 +1897,7 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
// If the exception is externally caught, clear it if there are no
// JavaScript frames on the way to the C++ frame that has the
// external handler.
- DCHECK(thread_local_top()->try_catch_handler_address() != NULL);
+ DCHECK_NOT_NULL(thread_local_top()->try_catch_handler_address());
Address external_handler_address =
thread_local_top()->try_catch_handler_address();
JavaScriptFrameIterator it(this);
@@ -1872,7 +1929,7 @@ void Isolate::PushPromise(Handle<JSObject> promise) {
void Isolate::PopPromise() {
ThreadLocalTop* tltop = thread_local_top();
- if (tltop->promise_on_stack_ == NULL) return;
+ if (tltop->promise_on_stack_ == nullptr) return;
PromiseOnStack* prev = tltop->promise_on_stack_->prev();
Handle<Object> global_promise = tltop->promise_on_stack_->promise();
delete tltop->promise_on_stack_;
@@ -1976,7 +2033,7 @@ bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<Object> promise) {
Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
Handle<Object> undefined = factory()->undefined_value();
ThreadLocalTop* tltop = thread_local_top();
- if (tltop->promise_on_stack_ == NULL) return undefined;
+ if (tltop->promise_on_stack_ == nullptr) return undefined;
// Find the top-most try-catch or try-finally handler.
CatchType prediction = PredictExceptionCatcher();
if (prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) {
@@ -2111,15 +2168,11 @@ char* Isolate::RestoreThread(char* from) {
#ifdef USE_SIMULATOR
thread_local_top()->simulator_ = Simulator::current(this);
#endif
- DCHECK(context() == NULL || context()->IsContext());
+ DCHECK(context() == nullptr || context()->IsContext());
return from + sizeof(ThreadLocalTop);
}
-
-Isolate::ThreadDataTable::ThreadDataTable()
- : list_(NULL) {
-}
-
+Isolate::ThreadDataTable::ThreadDataTable() : list_(nullptr) {}
Isolate::ThreadDataTable::~ThreadDataTable() {
// TODO(svenpanne) The assertion below would fire if an embedder does not
@@ -2177,15 +2230,16 @@ Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
Isolate::PerIsolateThreadData*
Isolate::ThreadDataTable::Lookup(Isolate* isolate,
ThreadId thread_id) {
- for (PerIsolateThreadData* data = list_; data != NULL; data = data->next_) {
+ for (PerIsolateThreadData* data = list_; data != nullptr;
+ data = data->next_) {
if (data->Matches(isolate, thread_id)) return data;
}
- return NULL;
+ return nullptr;
}
void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
- if (list_ != NULL) list_->prev_ = data;
+ if (list_ != nullptr) list_->prev_ = data;
data->next_ = list_;
list_ = data;
}
@@ -2193,15 +2247,15 @@ void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
if (list_ == data) list_ = data->next_;
- if (data->next_ != NULL) data->next_->prev_ = data->prev_;
- if (data->prev_ != NULL) data->prev_->next_ = data->next_;
+ if (data->next_ != nullptr) data->next_->prev_ = data->prev_;
+ if (data->prev_ != nullptr) data->prev_->next_ = data->next_;
delete data;
}
void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
PerIsolateThreadData* data = list_;
- while (data != NULL) {
+ while (data != nullptr) {
PerIsolateThreadData* next = data->next_;
if (data->isolate() == isolate) Remove(data);
data = next;
@@ -2264,27 +2318,20 @@ class VerboseAccountingAllocator : public AccountingAllocator {
}
void ZoneCreation(const Zone* zone) override {
- double time = heap_->isolate()->time_millis_since_init();
- PrintF(
- "{"
- "\"type\": \"zonecreation\", "
- "\"isolate\": \"%p\", "
- "\"time\": %f, "
- "\"ptr\": \"%p\", "
- "\"name\": \"%s\","
- "\"nesting\": %" PRIuS "}\n",
- reinterpret_cast<void*>(heap_->isolate()), time,
- reinterpret_cast<const void*>(zone), zone->name(),
- nesting_deepth_.Value());
+ PrintZoneModificationSample(zone, "zonecreation");
nesting_deepth_.Increment(1);
}
void ZoneDestruction(const Zone* zone) override {
nesting_deepth_.Decrement(1);
- double time = heap_->isolate()->time_millis_since_init();
+ PrintZoneModificationSample(zone, "zonedestruction");
+ }
+
+ private:
+ void PrintZoneModificationSample(const Zone* zone, const char* type) {
PrintF(
"{"
- "\"type\": \"zonedestruction\", "
+ "\"type\": \"%s\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
"\"ptr\": \"%p\", "
@@ -2292,12 +2339,12 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"size\": %" PRIuS
","
"\"nesting\": %" PRIuS "}\n",
- reinterpret_cast<void*>(heap_->isolate()), time,
+ type, reinterpret_cast<void*>(heap_->isolate()),
+ heap_->isolate()->time_millis_since_init(),
reinterpret_cast<const void*>(zone), zone->name(),
zone->allocation_size(), nesting_deepth_.Value());
}
- private:
void PrintMemoryJSON(size_t malloced, size_t pooled) {
// Note: Neither isolate, nor heap is locked, so be careful with accesses
// as the allocator is potentially used on a concurrent thread.
@@ -2320,67 +2367,73 @@ class VerboseAccountingAllocator : public AccountingAllocator {
size_t allocation_sample_bytes_, pool_sample_bytes_;
};
+#ifdef DEBUG
+base::AtomicNumber<size_t> Isolate::non_disposed_isolates_;
+#endif // DEBUG
+
Isolate::Isolate(bool enable_serializer)
: embedder_data_(),
- entry_stack_(NULL),
+ entry_stack_(nullptr),
stack_trace_nesting_level_(0),
- incomplete_message_(NULL),
- bootstrapper_(NULL),
- runtime_profiler_(NULL),
- compilation_cache_(NULL),
- logger_(NULL),
- load_stub_cache_(NULL),
- store_stub_cache_(NULL),
- deoptimizer_data_(NULL),
+ incomplete_message_(nullptr),
+ bootstrapper_(nullptr),
+ runtime_profiler_(nullptr),
+ compilation_cache_(nullptr),
+ logger_(nullptr),
+ load_stub_cache_(nullptr),
+ store_stub_cache_(nullptr),
+ deoptimizer_data_(nullptr),
deoptimizer_lazy_throw_(false),
- materialized_object_store_(NULL),
+ materialized_object_store_(nullptr),
capture_stack_trace_for_uncaught_exceptions_(false),
stack_trace_for_uncaught_exceptions_frame_limit_(0),
stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
- context_slot_cache_(NULL),
- descriptor_lookup_cache_(NULL),
- handle_scope_implementer_(NULL),
- unicode_cache_(NULL),
+ context_slot_cache_(nullptr),
+ descriptor_lookup_cache_(nullptr),
+ handle_scope_implementer_(nullptr),
+ unicode_cache_(nullptr),
allocator_(FLAG_trace_gc_object_stats ? new VerboseAccountingAllocator(
&heap_, 256 * KB, 128 * KB)
: new AccountingAllocator()),
- inner_pointer_to_code_cache_(NULL),
- global_handles_(NULL),
- eternal_handles_(NULL),
- thread_manager_(NULL),
- setup_delegate_(NULL),
- regexp_stack_(NULL),
- date_cache_(NULL),
- call_descriptor_data_(NULL),
+ inner_pointer_to_code_cache_(nullptr),
+ global_handles_(nullptr),
+ eternal_handles_(nullptr),
+ thread_manager_(nullptr),
+ setup_delegate_(nullptr),
+ regexp_stack_(nullptr),
+ date_cache_(nullptr),
+ call_descriptor_data_(nullptr),
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
- random_number_generator_(NULL),
+ random_number_generator_(nullptr),
+ fuzzer_rng_(nullptr),
rail_mode_(PERFORMANCE_ANIMATION),
promise_hook_or_debug_is_active_(false),
- promise_hook_(NULL),
+ promise_hook_(nullptr),
load_start_time_ms_(0),
serializer_enabled_(enable_serializer),
has_fatal_error_(false),
initialized_from_snapshot_(false),
is_tail_call_elimination_enabled_(true),
is_isolate_in_background_(false),
- cpu_profiler_(NULL),
- heap_profiler_(NULL),
+ cpu_profiler_(nullptr),
+ heap_profiler_(nullptr),
code_event_dispatcher_(new CodeEventDispatcher()),
- function_entry_hook_(NULL),
- deferred_handles_head_(NULL),
- optimizing_compile_dispatcher_(NULL),
+ function_entry_hook_(nullptr),
+ deferred_handles_head_(nullptr),
+ optimizing_compile_dispatcher_(nullptr),
stress_deopt_count_(0),
+ force_slow_path_(false),
next_optimization_id_(0),
#if V8_SFI_HAS_UNIQUE_ID
next_unique_sfi_id_(0),
#endif
is_running_microtasks_(false),
- use_counter_callback_(NULL),
- basic_block_profiler_(NULL),
+ use_counter_callback_(nullptr),
+ basic_block_profiler_(nullptr),
cancelable_task_manager_(new CancelableTaskManager()),
wasm_compilation_manager_(new wasm::CompilationManager()),
- abort_on_uncaught_exception_callback_(NULL),
+ abort_on_uncaught_exception_callback_(nullptr),
total_regexp_code_generated_(0) {
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
@@ -2403,7 +2456,9 @@ Isolate::Isolate(bool enable_serializer)
#ifdef DEBUG
// heap_histograms_ initializes itself.
memset(&js_spill_information_, 0, sizeof(js_spill_information_));
-#endif
+
+ non_disposed_isolates_.Increment(1);
+#endif // DEBUG
handle_scope_data_.Initialize();
@@ -2432,10 +2487,10 @@ void Isolate::TearDown() {
// direct pointer. We don't use Enter/Exit here to avoid
// initializing the thread data.
PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
- DCHECK(base::Relaxed_Load(&isolate_key_created_) == 1);
+ DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
Isolate* saved_isolate =
reinterpret_cast<Isolate*>(base::Thread::GetThreadLocal(isolate_key_));
- SetIsolateThreadLocals(this, NULL);
+ SetIsolateThreadLocals(this, nullptr);
Deinit();
@@ -2444,6 +2499,10 @@ void Isolate::TearDown() {
thread_data_table_->RemoveAllThreads(this);
}
+#ifdef DEBUG
+ non_disposed_isolates_.Decrement(1);
+#endif // DEBUG
+
delete this;
// Restore the previous current isolate.
@@ -2453,15 +2512,15 @@ void Isolate::TearDown() {
void Isolate::GlobalTearDown() {
delete thread_data_table_;
- thread_data_table_ = NULL;
+ thread_data_table_ = nullptr;
}
void Isolate::ClearSerializerData() {
delete external_reference_table_;
- external_reference_table_ = NULL;
+ external_reference_table_ = nullptr;
delete external_reference_map_;
- external_reference_map_ = NULL;
+ external_reference_map_ = nullptr;
}
@@ -2473,7 +2532,7 @@ void Isolate::Deinit() {
if (concurrent_recompilation_enabled()) {
optimizing_compile_dispatcher_->Stop();
delete optimizing_compile_dispatcher_;
- optimizing_compile_dispatcher_ = NULL;
+ optimizing_compile_dispatcher_ = nullptr;
}
wasm_compilation_manager_->TearDown();
@@ -2501,20 +2560,20 @@ void Isolate::Deinit() {
ReleaseManagedObjects();
delete deoptimizer_data_;
- deoptimizer_data_ = NULL;
+ deoptimizer_data_ = nullptr;
builtins_.TearDown();
bootstrapper_->TearDown();
- if (runtime_profiler_ != NULL) {
+ if (runtime_profiler_ != nullptr) {
delete runtime_profiler_;
- runtime_profiler_ = NULL;
+ runtime_profiler_ = nullptr;
}
delete basic_block_profiler_;
- basic_block_profiler_ = NULL;
+ basic_block_profiler_ = nullptr;
delete heap_profiler_;
- heap_profiler_ = NULL;
+ heap_profiler_ = nullptr;
compiler_dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kBlock);
delete compiler_dispatcher_;
@@ -2526,18 +2585,18 @@ void Isolate::Deinit() {
logger_->TearDown();
delete interpreter_;
- interpreter_ = NULL;
+ interpreter_ = nullptr;
delete ast_string_constants_;
ast_string_constants_ = nullptr;
delete cpu_profiler_;
- cpu_profiler_ = NULL;
+ cpu_profiler_ = nullptr;
code_event_dispatcher_.reset();
delete root_index_map_;
- root_index_map_ = NULL;
+ root_index_map_ = nullptr;
ClearSerializerData();
}
@@ -2554,71 +2613,71 @@ Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
// The entry stack must be empty when we get here.
- DCHECK(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
+ DCHECK(entry_stack_ == nullptr || entry_stack_->previous_item == nullptr);
delete entry_stack_;
- entry_stack_ = NULL;
+ entry_stack_ = nullptr;
delete unicode_cache_;
- unicode_cache_ = NULL;
+ unicode_cache_ = nullptr;
delete date_cache_;
- date_cache_ = NULL;
+ date_cache_ = nullptr;
delete[] call_descriptor_data_;
- call_descriptor_data_ = NULL;
-
- delete access_compiler_data_;
- access_compiler_data_ = NULL;
+ call_descriptor_data_ = nullptr;
delete regexp_stack_;
- regexp_stack_ = NULL;
+ regexp_stack_ = nullptr;
delete descriptor_lookup_cache_;
- descriptor_lookup_cache_ = NULL;
+ descriptor_lookup_cache_ = nullptr;
delete context_slot_cache_;
- context_slot_cache_ = NULL;
+ context_slot_cache_ = nullptr;
delete load_stub_cache_;
- load_stub_cache_ = NULL;
+ load_stub_cache_ = nullptr;
delete store_stub_cache_;
- store_stub_cache_ = NULL;
+ store_stub_cache_ = nullptr;
delete materialized_object_store_;
- materialized_object_store_ = NULL;
+ materialized_object_store_ = nullptr;
delete logger_;
- logger_ = NULL;
+ logger_ = nullptr;
delete handle_scope_implementer_;
- handle_scope_implementer_ = NULL;
+ handle_scope_implementer_ = nullptr;
delete code_tracer();
- set_code_tracer(NULL);
+ set_code_tracer(nullptr);
delete compilation_cache_;
- compilation_cache_ = NULL;
+ compilation_cache_ = nullptr;
delete bootstrapper_;
- bootstrapper_ = NULL;
+ bootstrapper_ = nullptr;
delete inner_pointer_to_code_cache_;
- inner_pointer_to_code_cache_ = NULL;
+ inner_pointer_to_code_cache_ = nullptr;
delete thread_manager_;
- thread_manager_ = NULL;
+ thread_manager_ = nullptr;
delete global_handles_;
- global_handles_ = NULL;
+ global_handles_ = nullptr;
delete eternal_handles_;
- eternal_handles_ = NULL;
+ eternal_handles_ = nullptr;
delete string_stream_debug_object_cache_;
- string_stream_debug_object_cache_ = NULL;
+ string_stream_debug_object_cache_ = nullptr;
delete random_number_generator_;
- random_number_generator_ = NULL;
+ random_number_generator_ = nullptr;
+
+ delete fuzzer_rng_;
+ fuzzer_rng_ = nullptr;
delete debug_;
- debug_ = NULL;
+ debug_ = nullptr;
delete cancelable_task_manager_;
cancelable_task_manager_ = nullptr;
@@ -2680,7 +2739,7 @@ bool Isolate::InitializeCounters() {
}
void Isolate::InitializeLoggingAndCounters() {
- if (logger_ == NULL) {
+ if (logger_ == nullptr) {
logger_ = new Logger(this);
}
InitializeCounters();
@@ -2702,16 +2761,19 @@ void PrintBuiltinSizes(Isolate* isolate) {
bool Isolate::Init(StartupDeserializer* des) {
TRACE_ISOLATE(init);
+ time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
+
stress_deopt_count_ = FLAG_deopt_every_n_times;
+ force_slow_path_ = FLAG_force_slow_path;
has_fatal_error_ = false;
- if (function_entry_hook() != NULL) {
+ if (function_entry_hook() != nullptr) {
// When function entry hooking is in effect, we have to create the code
// stubs from scratch to get entry hooks, rather than loading the previously
// generated stubs from disk.
// If this assert fires, the initialization path has regressed.
- DCHECK(des == NULL);
+ DCHECK_NULL(des);
}
// The initialization process does not handle memory exhaustion.
@@ -2743,7 +2805,6 @@ bool Isolate::Init(StartupDeserializer* des) {
date_cache_ = new DateCache();
call_descriptor_data_ =
new CallInterfaceDescriptorData[CallDescriptors::NUMBER_OF_DESCRIPTORS];
- access_compiler_data_ = new AccessCompilerData();
cpu_profiler_ = new CpuProfiler(this);
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
@@ -2776,15 +2837,26 @@ bool Isolate::Init(StartupDeserializer* des) {
return false;
}
+ // Setup the wasm code manager. Currently, there's one per Isolate.
+ if (!wasm_code_manager_) {
+ size_t max_code_size = kMaxWasmCodeMemory;
+ if (kRequiresCodeRange) {
+ max_code_size = std::min(max_code_size,
+ heap_.memory_allocator()->code_range()->size());
+ }
+ wasm_code_manager_.reset(new wasm::WasmCodeManager(
+ reinterpret_cast<v8::Isolate*>(this), max_code_size));
+ }
+
// Initialize the interface descriptors ahead of time.
#define INTERFACE_DESCRIPTOR(Name, ...) \
{ Name##Descriptor(this); }
INTERFACE_DESCRIPTOR_LIST(INTERFACE_DESCRIPTOR)
#undef INTERFACE_DESCRIPTOR
- deoptimizer_data_ = new DeoptimizerData(heap()->memory_allocator());
+ deoptimizer_data_ = new DeoptimizerData(heap());
- const bool create_heap_objects = (des == NULL);
+ const bool create_heap_objects = (des == nullptr);
if (setup_delegate_ == nullptr) {
setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
}
@@ -2822,6 +2894,7 @@ bool Isolate::Init(StartupDeserializer* des) {
// If we are deserializing, read the state into the now-empty heap.
{
AlwaysAllocateScope always_allocate(this);
+ CodeSpaceMemoryModificationScope modification_scope(&heap_);
if (!create_heap_objects) des->DeserializeInto(this);
load_stub_cache_->Initialize();
@@ -2864,8 +2937,6 @@ bool Isolate::Init(StartupDeserializer* des) {
OFFSET_OF(Isolate, heap_.external_memory_at_last_mark_compact_)),
Internals::kExternalMemoryAtLastMarkCompactOffset);
- time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
-
{
HandleScope scope(this);
ast_string_constants_ = new AstStringConstants(this, heap()->HashSeed());
@@ -2876,10 +2947,9 @@ bool Isolate::Init(StartupDeserializer* des) {
// cannot be serialized into the snapshot have been generated.
HandleScope scope(this);
CodeStub::GenerateFPStubs(this);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
}
- initialized_from_snapshot_ = (des != NULL);
+ initialized_from_snapshot_ = (des != nullptr);
if (!FLAG_inline_new) heap_.DisableInlineAllocation();
@@ -2888,15 +2958,15 @@ bool Isolate::Init(StartupDeserializer* des) {
void Isolate::Enter() {
- Isolate* current_isolate = NULL;
+ Isolate* current_isolate = nullptr;
PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
- if (current_data != NULL) {
+ if (current_data != nullptr) {
current_isolate = current_data->isolate_;
- DCHECK(current_isolate != NULL);
+ DCHECK_NOT_NULL(current_isolate);
if (current_isolate == this) {
DCHECK(Current() == this);
- DCHECK(entry_stack_ != NULL);
- DCHECK(entry_stack_->previous_thread_data == NULL ||
+ DCHECK_NOT_NULL(entry_stack_);
+ DCHECK(entry_stack_->previous_thread_data == nullptr ||
entry_stack_->previous_thread_data->thread_id().Equals(
ThreadId::Current()));
// Same thread re-enters the isolate, no need to re-init anything.
@@ -2906,7 +2976,7 @@ void Isolate::Enter() {
}
PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread();
- DCHECK(data != NULL);
+ DCHECK_NOT_NULL(data);
DCHECK(data->isolate_ == this);
EntryStackItem* item = new EntryStackItem(current_data,
@@ -2922,14 +2992,14 @@ void Isolate::Enter() {
void Isolate::Exit() {
- DCHECK(entry_stack_ != NULL);
- DCHECK(entry_stack_->previous_thread_data == NULL ||
+ DCHECK_NOT_NULL(entry_stack_);
+ DCHECK(entry_stack_->previous_thread_data == nullptr ||
entry_stack_->previous_thread_data->thread_id().Equals(
ThreadId::Current()));
if (--entry_stack_->entry_count > 0) return;
- DCHECK(CurrentPerIsolateThreadData() != NULL);
+ DCHECK_NOT_NULL(CurrentPerIsolateThreadData());
DCHECK(CurrentPerIsolateThreadData()->isolate_ == this);
// Pop the stack.
@@ -2948,7 +3018,7 @@ void Isolate::Exit() {
void Isolate::LinkDeferredHandles(DeferredHandles* deferred) {
deferred->next_ = deferred_handles_head_;
- if (deferred_handles_head_ != NULL) {
+ if (deferred_handles_head_ != nullptr) {
deferred_handles_head_->previous_ = deferred;
}
deferred_handles_head_ = deferred;
@@ -2959,7 +3029,7 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
#ifdef DEBUG
// In debug mode assert that the linked list is well-formed.
DeferredHandles* deferred_iterator = deferred;
- while (deferred_iterator->previous_ != NULL) {
+ while (deferred_iterator->previous_ != nullptr) {
deferred_iterator = deferred_iterator->previous_;
}
DCHECK(deferred_handles_head_ == deferred_iterator);
@@ -2967,10 +3037,10 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
if (deferred_handles_head_ == deferred) {
deferred_handles_head_ = deferred_handles_head_->next_;
}
- if (deferred->next_ != NULL) {
+ if (deferred->next_ != nullptr) {
deferred->next_->previous_ = deferred->previous_;
}
- if (deferred->previous_ != NULL) {
+ if (deferred->previous_ != nullptr) {
deferred->previous_->next_ = deferred->next_;
}
}
@@ -2993,22 +3063,21 @@ void Isolate::DumpAndResetStats() {
turbo_statistics_ = nullptr;
if (V8_UNLIKELY(FLAG_runtime_stats ==
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
- OFStream os(stdout);
- counters()->runtime_call_stats()->Print(os);
+ counters()->runtime_call_stats()->Print();
counters()->runtime_call_stats()->Reset();
}
}
CompilationStatistics* Isolate::GetTurboStatistics() {
- if (turbo_statistics() == NULL)
+ if (turbo_statistics() == nullptr)
set_turbo_statistics(new CompilationStatistics());
return turbo_statistics();
}
CodeTracer* Isolate::GetCodeTracer() {
- if (code_tracer() == NULL) set_code_tracer(new CodeTracer(id()));
+ if (code_tracer() == nullptr) set_code_tracer(new CodeTracer(id()));
return code_tracer();
}
@@ -3024,17 +3093,45 @@ bool Isolate::NeedsSourcePositionsForProfiling() const {
debug_->is_active() || logger_->is_logging();
}
-void Isolate::SetCodeCoverageList(Object* value) {
+void Isolate::SetFeedbackVectorsForProfilingTools(Object* value) {
DCHECK(value->IsUndefined(this) || value->IsArrayList());
- heap()->set_code_coverage_list(value);
+ heap()->set_feedback_vectors_for_profiling_tools(value);
}
-bool Isolate::IsArrayOrObjectPrototype(Object* object) {
+void Isolate::InitializeVectorListFromHeap() {
+ // Collect existing feedback vectors.
+ std::vector<Handle<FeedbackVector>> vectors;
+ {
+ HeapIterator heap_iterator(heap());
+ while (HeapObject* current_obj = heap_iterator.next()) {
+ if (current_obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(current_obj);
+ shared->set_has_reported_binary_coverage(false);
+ } else if (current_obj->IsFeedbackVector()) {
+ FeedbackVector* vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo* shared = vector->shared_function_info();
+ if (!shared->IsSubjectToDebugging()) continue;
+ vector->clear_invocation_count();
+ vectors.emplace_back(vector, this);
+ }
+ }
+ }
+
+ // Add collected feedback vectors to the root list lest we lose them to
+ // GC.
+ Handle<ArrayList> list =
+ ArrayList::New(this, static_cast<int>(vectors.size()));
+ for (const auto& vector : vectors) list = ArrayList::Add(list, vector);
+ SetFeedbackVectorsForProfilingTools(*list);
+}
+
+bool Isolate::IsArrayOrObjectOrStringPrototype(Object* object) {
Object* context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
Context* current_context = Context::cast(context);
if (current_context->initial_object_prototype() == object ||
- current_context->initial_array_prototype() == object) {
+ current_context->initial_array_prototype() == object ||
+ current_context->initial_string_prototype() == object) {
return true;
}
context = current_context->next_context_link();
@@ -3055,8 +3152,8 @@ bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
return false;
}
-bool Isolate::IsFastArrayConstructorPrototypeChainIntact(Context* context) {
- PropertyCell* no_elements_cell = heap()->array_protector();
+bool Isolate::IsNoElementsProtectorIntact(Context* context) {
+ PropertyCell* no_elements_cell = heap()->no_elements_protector();
bool cell_reports_intact =
no_elements_cell->value()->IsSmi() &&
Smi::ToInt(no_elements_cell->value()) == kProtectorValid;
@@ -3070,8 +3167,11 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact(Context* context) {
native_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
JSObject* initial_object_proto = JSObject::cast(
native_context->get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX));
+ JSObject* initial_string_proto = JSObject::cast(
+ native_context->get(Context::INITIAL_STRING_PROTOTYPE_INDEX));
- if (root_array_map == NULL || initial_array_proto == initial_object_proto) {
+ if (root_array_map == nullptr ||
+ initial_array_proto == initial_object_proto) {
// We are in the bootstrapping process, and the entire check sequence
// shouldn't be performed.
return cell_reports_intact;
@@ -3090,25 +3190,41 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact(Context* context) {
return cell_reports_intact;
}
- // Check that the object prototype hasn't been altered WRT empty elements.
+ // Check that the Object.prototype hasn't been altered WRT empty elements.
+ elements = initial_object_proto->elements();
+ if (elements != heap()->empty_fixed_array() &&
+ elements != heap()->empty_slow_element_dictionary()) {
+ DCHECK_EQ(false, cell_reports_intact);
+ return cell_reports_intact;
+ }
+
+ // Check that the Array.prototype has the Object.prototype as its
+ // [[Prototype]] and that the Object.prototype has a null [[Prototype]].
PrototypeIterator iter(this, initial_array_proto);
if (iter.IsAtEnd() || iter.GetCurrent() != initial_object_proto) {
DCHECK_EQ(false, cell_reports_intact);
DCHECK(!has_pending_exception());
return cell_reports_intact;
}
+ iter.Advance();
+ if (!iter.IsAtEnd()) {
+ DCHECK_EQ(false, cell_reports_intact);
DCHECK(!has_pending_exception());
+ return cell_reports_intact;
+ }
DCHECK(!has_pending_exception());
- elements = initial_object_proto->elements();
+ // Check that the String.prototype hasn't been altered WRT empty elements.
+ elements = initial_string_proto->elements();
if (elements != heap()->empty_fixed_array() &&
elements != heap()->empty_slow_element_dictionary()) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
- iter.Advance();
- if (!iter.IsAtEnd()) {
+ // Check that the String.prototype has the Object.prototype
+ // as its [[Prototype]] still.
+ if (initial_string_proto->map()->prototype() != initial_object_proto) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
@@ -3117,8 +3233,8 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact(Context* context) {
return cell_reports_intact;
}
-bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
- return Isolate::IsFastArrayConstructorPrototypeChainIntact(context());
+bool Isolate::IsNoElementsProtectorIntact() {
+ return Isolate::IsNoElementsProtectorIntact(context());
}
bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
@@ -3128,7 +3244,7 @@ bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
#ifdef DEBUG
Map* root_array_map =
raw_native_context()->GetInitialJSArrayMap(GetInitialFastElementsKind());
- if (root_array_map == NULL) {
+ if (root_array_map == nullptr) {
// Ignore the value of is_concat_spreadable during bootstrap.
return !is_is_concat_spreadable_set;
}
@@ -3153,13 +3269,13 @@ bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver) {
return !receiver->HasProxyInPrototype(this);
}
-void Isolate::UpdateArrayProtectorOnSetElement(Handle<JSObject> object) {
+void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
if (!object->map()->is_prototype_map()) return;
- if (!IsFastArrayConstructorPrototypeChainIntact()) return;
- if (!IsArrayOrObjectPrototype(*object)) return;
+ if (!IsNoElementsProtectorIntact()) return;
+ if (!IsArrayOrObjectOrStringPrototype(*object)) return;
PropertyCell::SetValueWithInvalidation(
- factory()->array_protector(),
+ factory()->no_elements_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
}
@@ -3223,17 +3339,24 @@ CallInterfaceDescriptorData* Isolate::call_descriptor_data(int index) {
return &call_descriptor_data_[index];
}
-
-base::RandomNumberGenerator* Isolate::random_number_generator() {
- if (random_number_generator_ == NULL) {
- if (FLAG_random_seed != 0) {
- random_number_generator_ =
- new base::RandomNumberGenerator(FLAG_random_seed);
+static base::RandomNumberGenerator* ensure_rng_exists(
+ base::RandomNumberGenerator** rng, int seed) {
+ if (*rng == nullptr) {
+ if (seed != 0) {
+ *rng = new base::RandomNumberGenerator(seed);
} else {
- random_number_generator_ = new base::RandomNumberGenerator();
+ *rng = new base::RandomNumberGenerator();
}
}
- return random_number_generator_;
+ return *rng;
+}
+
+base::RandomNumberGenerator* Isolate::random_number_generator() {
+ return ensure_rng_exists(&random_number_generator_, FLAG_random_seed);
+}
+
+base::RandomNumberGenerator* Isolate::fuzzer_rng() {
+ return ensure_rng_exists(&fuzzer_rng_, FLAG_fuzzer_random_seed);
}
int Isolate::GenerateIdentityHash(uint32_t mask) {
@@ -3246,7 +3369,7 @@ int Isolate::GenerateIdentityHash(uint32_t mask) {
}
Code* Isolate::FindCodeObject(Address a) {
- return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
+ return heap()->GcSafeFindCodeForInnerPointer(a);
}
@@ -3415,6 +3538,27 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
host_import_module_dynamically_callback_ = callback;
}
+Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
+ Handle<Module> module) {
+ Handle<Object> host_meta(module->import_meta(), this);
+ if (host_meta->IsTheHole(this)) {
+ host_meta = factory()->NewJSObjectWithNullProto();
+ if (host_initialize_import_meta_object_callback_ != nullptr) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(native_context());
+ host_initialize_import_meta_object_callback_(
+ api_context, Utils::ToLocal(module),
+ v8::Local<v8::Object>::Cast(v8::Utils::ToLocal(host_meta)));
+ }
+ module->set_import_meta(*host_meta);
+ }
+ return Handle<JSObject>::cast(host_meta);
+}
+
+void Isolate::SetHostInitializeImportMetaObjectCallback(
+ HostInitializeImportMetaObjectCallback callback) {
+ host_initialize_import_meta_object_callback_ = callback;
+}
+
void Isolate::SetPromiseHook(PromiseHook hook) {
promise_hook_ = hook;
DebugStateUpdated();
@@ -3436,7 +3580,7 @@ void Isolate::ReportPromiseReject(Handle<JSPromise> promise,
Handle<Object> value,
v8::PromiseRejectEvent event) {
DCHECK_EQ(v8::Promise::kRejected, promise->status());
- if (promise_reject_callback_ == NULL) return;
+ if (promise_reject_callback_ == nullptr) return;
Handle<FixedArray> stack_trace;
if (event == v8::kPromiseRejectWithNoHandler && value->IsJSObject()) {
stack_trace = GetDetailedStackTrace(Handle<JSObject>::cast(value));
@@ -3633,7 +3777,7 @@ void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
BasicBlockProfiler* Isolate::GetOrCreateBasicBlockProfiler() {
- if (basic_block_profiler_ == NULL) {
+ if (basic_block_profiler_ == nullptr) {
basic_block_profiler_ = new BasicBlockProfiler();
}
return basic_block_profiler_;
@@ -3641,7 +3785,7 @@ BasicBlockProfiler* Isolate::GetOrCreateBasicBlockProfiler() {
std::string Isolate::GetTurboCfgFileName() {
- if (FLAG_trace_turbo_cfg_file == NULL) {
+ if (FLAG_trace_turbo_cfg_file == nullptr) {
std::ostringstream os;
os << "turbo-" << base::OS::GetCurrentProcessId() << "-" << id() << ".cfg";
return os.str();
@@ -3740,6 +3884,10 @@ void Isolate::PrintWithTimestamp(const char* format, ...) {
va_end(arguments);
}
+wasm::WasmCodeManager* Isolate::wasm_code_manager() {
+ return wasm_code_manager_.get();
+}
+
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
@@ -3753,7 +3901,7 @@ bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
SaveContext::SaveContext(Isolate* isolate)
: isolate_(isolate), prev_(isolate->save_context()) {
- if (isolate->context() != NULL) {
+ if (isolate->context() != nullptr) {
context_ = Handle<Context>(isolate->context());
}
isolate->set_save_context(this);
@@ -3762,7 +3910,7 @@ SaveContext::SaveContext(Isolate* isolate)
}
SaveContext::~SaveContext() {
- isolate_->set_context(context_.is_null() ? NULL : *context_);
+ isolate_->set_context(context_.is_null() ? nullptr : *context_);
isolate_->set_save_context(prev_);
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 44a5250808..9e3de53675 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -23,6 +23,7 @@
#include "src/handles.h"
#include "src/heap/heap.h"
#include "src/messages.h"
+#include "src/objects/code.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
#include "src/zone/zone.h"
@@ -109,6 +110,7 @@ class Interpreter;
namespace wasm {
class CompilationManager;
+class WasmCodeManager;
}
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
@@ -297,14 +299,14 @@ class ThreadLocalTop BASE_EMBEDDED {
// Initialize the thread data.
void Initialize();
- // Get the top C++ try catch handler or NULL if none are registered.
+ // Get the top C++ try catch handler or nullptr if none are registered.
//
// This method is not guaranteed to return an address that can be
// used for comparison with addresses into the JS stack. If such an
// address is needed, use try_catch_handler_address.
FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
- // Get the address of the top C++ try catch handler or NULL if
+ // Get the address of the top C++ try catch handler or nullptr if
// none are registered.
//
// This method always returns an address that can be compared to
@@ -333,8 +335,8 @@ class ThreadLocalTop BASE_EMBEDDED {
// Communication channel between Isolate::FindHandler and the CEntryStub.
Context* pending_handler_context_;
- Code* pending_handler_code_;
- intptr_t pending_handler_offset_;
+ Address pending_handler_entrypoint_;
+ Address pending_handler_constant_pool_;
Address pending_handler_fp_;
Address pending_handler_sp_;
@@ -380,10 +382,10 @@ class ThreadLocalTop BASE_EMBEDDED {
#if USE_SIMULATOR
-#define ISOLATE_INIT_SIMULATOR_LIST(V) \
- V(bool, simulator_initialized, false) \
- V(base::CustomMatcherHashMap*, simulator_i_cache, NULL) \
- V(Redirection*, simulator_redirection, NULL)
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(bool, simulator_initialized, false) \
+ V(base::CustomMatcherHashMap*, simulator_i_cache, nullptr) \
+ V(Redirection*, simulator_redirection, nullptr)
#else
#define ISOLATE_INIT_SIMULATOR_LIST(V)
@@ -419,6 +421,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(OOMErrorCallback, oom_behavior, nullptr) \
V(LogEventCallback, event_logger, nullptr) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+ V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr) \
@@ -478,12 +481,13 @@ class Isolate {
: isolate_(isolate),
thread_id_(thread_id),
stack_limit_(0),
- thread_state_(NULL),
+ thread_state_(nullptr),
#if USE_SIMULATOR
- simulator_(NULL),
+ simulator_(nullptr),
#endif
- next_(NULL),
- prev_(NULL) { }
+ next_(nullptr),
+ prev_(nullptr) {
+ }
~PerIsolateThreadData();
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
@@ -521,8 +525,8 @@ class Isolate {
static void InitializeOncePerProcess();
- // Returns the PerIsolateThreadData for the current thread (or NULL if one is
- // not currently set).
+ // Returns the PerIsolateThreadData for the current thread (or nullptr if one
+ // is not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
return reinterpret_cast<PerIsolateThreadData*>(
base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
@@ -530,7 +534,7 @@ class Isolate {
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
- DCHECK(base::Relaxed_Load(&isolate_key_created_) == 1);
+ DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
Isolate* isolate = reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
DCHECK_NOT_NULL(isolate);
@@ -548,7 +552,7 @@ class Isolate {
bool Init(StartupDeserializer* des);
// True if at least one thread Enter'ed this isolate.
- bool IsInUse() { return entry_stack_ != NULL; }
+ bool IsInUse() { return entry_stack_ != nullptr; }
// Destroys the non-default isolates.
// Sets default isolate into "has_been_disposed" state rather then destroying,
@@ -617,8 +621,8 @@ class Isolate {
inline bool has_pending_exception();
THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
- THREAD_LOCAL_TOP_ADDRESS(Code*, pending_handler_code)
- THREAD_LOCAL_TOP_ADDRESS(intptr_t, pending_handler_offset)
+ THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
+ THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
@@ -722,7 +726,8 @@ class Isolate {
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator,
PrintStackMode mode = kPrintStackVerbose);
- void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
+ V8_EXPORT_PRIVATE void PrintStack(FILE* out,
+ PrintStackMode mode = kPrintStackVerbose);
Handle<String> StackTraceString();
// Stores a stack trace in a stack-allocated temporary buffer which will
// end up in the minidump for debugging purposes.
@@ -748,6 +753,8 @@ class Isolate {
Handle<Object> caller);
Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
+ Address GetAbstractPC(int* line, int* column);
+
// Returns if the given context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
@@ -758,12 +765,12 @@ class Isolate {
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
- Object* Throw(Object* exception, MessageLocation* location = NULL);
+ Object* Throw(Object* exception, MessageLocation* location = nullptr);
Object* ThrowIllegalOperation();
template <typename T>
MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
- MessageLocation* location = NULL) {
+ MessageLocation* location = nullptr) {
Throw(*exception, location);
return MaybeHandle<T>();
}
@@ -899,6 +906,7 @@ class Isolate {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
+ V8_EXPORT_PRIVATE wasm::WasmCodeManager* wasm_code_manager();
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
@@ -980,6 +988,10 @@ class Isolate {
HeapProfiler* heap_profiler() const { return heap_profiler_; }
#ifdef DEBUG
+ static size_t non_disposed_isolates() {
+ return non_disposed_isolates_.Value();
+ }
+
HistogramInfo* heap_histograms() { return heap_histograms_; }
JSObject::SpillInformation* js_spill_information() {
@@ -996,20 +1008,17 @@ class Isolate {
THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
void SetData(uint32_t slot, void* data) {
- DCHECK(slot < Internals::kNumIsolateDataSlots);
+ DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
embedder_data_[slot] = data;
}
void* GetData(uint32_t slot) {
- DCHECK(slot < Internals::kNumIsolateDataSlots);
+ DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
return embedder_data_[slot];
}
bool serializer_enabled() const { return serializer_enabled_; }
- void set_serializer_enabled_for_test(bool serializer_enabled) {
- serializer_enabled_ = serializer_enabled;
- }
bool snapshot_available() const {
- return snapshot_blob_ != NULL && snapshot_blob_->raw_size != 0;
+ return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
}
bool IsDead() { return has_fatal_error_; }
@@ -1049,7 +1058,14 @@ class Isolate {
return type_profile_mode() == debug::TypeProfile::kCollect;
}
- void SetCodeCoverageList(Object* value);
+ // Collect feedback vectors with data for code coverage or type profile.
+ // Reset the list, when both code coverage and type profile are not
+ // needed anymore. This keeps many feedback vectors alive, but code
+ // coverage or type profile are used for debugging only and increase in
+ // memory usage is expected.
+ void SetFeedbackVectorsForProfilingTools(Object* value);
+
+ void InitializeVectorListFromHeap();
double time_millis_since_init() {
return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
@@ -1074,8 +1090,8 @@ class Isolate {
// The version with an explicit context parameter can be used when
// Isolate::context is not set up, e.g. when calling directly into C++ from
// CSA.
- bool IsFastArrayConstructorPrototypeChainIntact(Context* context);
- bool IsFastArrayConstructorPrototypeChainIntact();
+ bool IsNoElementsProtectorIntact(Context* context);
+ bool IsNoElementsProtectorIntact();
inline bool IsArraySpeciesLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact();
@@ -1093,15 +1109,15 @@ class Isolate {
// notifications occur if the set is on the elements of the array or
// object prototype. Also ensure that changes to prototype chain between
// Array and Object fire notifications.
- void UpdateArrayProtectorOnSetElement(Handle<JSObject> object);
- void UpdateArrayProtectorOnSetLength(Handle<JSObject> object) {
- UpdateArrayProtectorOnSetElement(object);
+ void UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object);
+ void UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object) {
+ UpdateNoElementsProtectorOnSetElement(object);
}
- void UpdateArrayProtectorOnSetPrototype(Handle<JSObject> object) {
- UpdateArrayProtectorOnSetElement(object);
+ void UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object) {
+ UpdateNoElementsProtectorOnSetElement(object);
}
- void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) {
- UpdateArrayProtectorOnSetElement(object);
+ void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
+ UpdateNoElementsProtectorOnSetElement(object);
}
void InvalidateArrayConstructorProtector();
void InvalidateArraySpeciesProtector();
@@ -1116,8 +1132,6 @@ class Isolate {
V8_EXPORT_PRIVATE CallInterfaceDescriptorData* call_descriptor_data(
int index);
- AccessCompilerData* access_compiler_data() { return access_compiler_data_; }
-
void IterateDeferredHandles(RootVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1128,9 +1142,9 @@ class Isolate {
bool concurrent_recompilation_enabled() {
// Thread is only available with flag enabled.
- DCHECK(optimizing_compile_dispatcher_ == NULL ||
+ DCHECK(optimizing_compile_dispatcher_ == nullptr ||
FLAG_concurrent_recompilation);
- return optimizing_compile_dispatcher_ != NULL;
+ return optimizing_compile_dispatcher_ != nullptr;
}
OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
@@ -1151,8 +1165,14 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
+ bool force_slow_path() { return force_slow_path_; }
+
+ bool* force_slow_path_address() { return &force_slow_path_; }
+
V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
+ V8_EXPORT_PRIVATE base::RandomNumberGenerator* fuzzer_rng();
+
// Generates a random number that is non-zero when masked
// with the provided mask.
int GenerateIdentityHash(uint32_t mask);
@@ -1262,6 +1282,11 @@ class Isolate {
MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
Handle<Script> referrer, Handle<Object> specifier);
+ void SetHostInitializeImportMetaObjectCallback(
+ HostInitializeImportMetaObjectCallback callback);
+ Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
+ Handle<Module> module);
+
void SetRAILMode(RAILMode rail_mode);
RAILMode rail_mode() { return rail_mode_.Value(); }
@@ -1337,7 +1362,7 @@ class Isolate {
protected:
explicit Isolate(bool enable_serializer);
- bool IsArrayOrObjectPrototype(Object* object);
+ bool IsArrayOrObjectOrStringPrototype(Object* object);
private:
friend struct GlobalState;
@@ -1500,12 +1525,14 @@ class Isolate {
std::vector<int> regexp_indices_;
DateCache* date_cache_;
CallInterfaceDescriptorData* call_descriptor_data_;
- AccessCompilerData* access_compiler_data_;
base::RandomNumberGenerator* random_number_generator_;
+ base::RandomNumberGenerator* fuzzer_rng_;
base::AtomicValue<RAILMode> rail_mode_;
bool promise_hook_or_debug_is_active_;
PromiseHook promise_hook_;
HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_;
+ HostInitializeImportMetaObjectCallback
+ host_initialize_import_meta_object_callback_;
base::Mutex rail_mutex_;
double load_start_time_ms_;
@@ -1529,6 +1556,8 @@ class Isolate {
double time_millis_at_init_;
#ifdef DEBUG
+ static base::AtomicNumber<size_t> non_disposed_isolates_;
+
// A static array of histogram info for each type.
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
@@ -1576,6 +1605,8 @@ class Isolate {
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
+ bool force_slow_path_;
+
int next_optimization_id_;
#if V8_SFI_HAS_UNIQUE_ID
@@ -1623,6 +1654,8 @@ class Isolate {
size_t elements_deletion_counter_ = 0;
+ std::unique_ptr<wasm::WasmCodeManager> wasm_code_manager_;
+
// The top entry of the v8::Context::BackupIncumbentScope stack.
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
nullptr;
@@ -1792,15 +1825,13 @@ class PostponeInterruptsScope BASE_EMBEDDED {
class CodeTracer final : public Malloced {
public:
- explicit CodeTracer(int isolate_id)
- : file_(NULL),
- scope_depth_(0) {
+ explicit CodeTracer(int isolate_id) : file_(nullptr), scope_depth_(0) {
if (!ShouldRedirect()) {
file_ = stdout;
return;
}
- if (FLAG_redirect_code_traces_to == NULL) {
+ if (FLAG_redirect_code_traces_to == nullptr) {
SNPrintF(filename_,
"code-%d-%d.asm",
base::OS::GetCurrentProcessId(),
@@ -1828,7 +1859,7 @@ class CodeTracer final : public Malloced {
return;
}
- if (file_ == NULL) {
+ if (file_ == nullptr) {
file_ = base::OS::FOpen(filename_.start(), "ab");
}
@@ -1842,7 +1873,7 @@ class CodeTracer final : public Malloced {
if (--scope_depth_ == 0) {
fclose(file_);
- file_ = NULL;
+ file_ = nullptr;
}
}
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 7a009030c4..80fd250d22 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -29,6 +29,13 @@ utils.Import(function(from) {
// -------------------------------------------------------------------
+macro IS_PROXY(arg)
+(%_IsJSProxy(arg))
+endmacro
+
+macro INVERT_NEG_ZERO(arg)
+((arg) + 0)
+endmacro
function ArraySpeciesCreate(array, length) {
length = INVERT_NEG_ZERO(length);
@@ -383,8 +390,6 @@ function InnerArrayJoin(separator, array, length) {
DEFINE_METHOD(
GlobalArray.prototype,
join(separator) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
-
var array = TO_OBJECT(this);
var length = TO_LENGTH(array.length);
@@ -396,8 +401,6 @@ DEFINE_METHOD(
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPopFallback() {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
-
var array = TO_OBJECT(this);
var n = TO_LENGTH(array.length);
if (n == 0) {
@@ -416,8 +419,6 @@ function ArrayPopFallback() {
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPushFallback() {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.push");
-
var array = TO_OBJECT(this);
var n = TO_LENGTH(array.length);
var m = arguments.length;
@@ -520,8 +521,6 @@ function GenericArrayReverse(array, len) {
DEFINE_METHOD(
GlobalArray.prototype,
reverse() {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
-
var array = TO_OBJECT(this);
var len = TO_LENGTH(array.length);
var isArray = IS_ARRAY(array);
@@ -540,8 +539,6 @@ DEFINE_METHOD(
function ArrayShiftFallback() {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
-
var array = TO_OBJECT(this);
var len = TO_LENGTH(array.length);
@@ -567,8 +564,6 @@ function ArrayShiftFallback() {
function ArrayUnshiftFallback(arg1) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.unshift");
-
var array = TO_OBJECT(this);
var len = TO_LENGTH(array.length);
var num_arguments = arguments.length;
@@ -591,8 +586,6 @@ function ArrayUnshiftFallback(arg1) { // length == 1
function ArraySliceFallback(start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
-
var array = TO_OBJECT(this);
var len = TO_LENGTH(array.length);
var start_i = TO_INTEGER(start);
@@ -664,8 +657,6 @@ function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
function ArraySpliceFallback(start, delete_count) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.splice");
-
var num_arguments = arguments.length;
var array = TO_OBJECT(this);
var len = TO_LENGTH(array.length);
@@ -1003,8 +994,6 @@ function InnerArraySort(array, length, comparefn) {
DEFINE_METHOD(
GlobalArray.prototype,
sort(comparefn) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
-
if (!IS_UNDEFINED(comparefn) && !IS_CALLABLE(comparefn)) {
throw %make_type_error(kBadSortComparisonFunction, comparefn);
}
@@ -1018,9 +1007,7 @@ DEFINE_METHOD(
DEFINE_METHOD_LEN(
GlobalArray.prototype,
lastIndexOf(element, index) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
-
- var array = this;
+ var array = TO_OBJECT(this);
var length = TO_LENGTH(this.length);
if (length == 0) return -1;
@@ -1079,8 +1066,6 @@ DEFINE_METHOD_LEN(
DEFINE_METHOD_LEN(
GlobalArray.prototype,
copyWithin(target, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
-
var array = TO_OBJECT(this);
var length = TO_LENGTH(array.length);
@@ -1153,8 +1138,6 @@ function InnerArrayFind(predicate, thisArg, array, length) {
DEFINE_METHOD_LEN(
GlobalArray.prototype,
find(predicate, thisArg) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
-
var array = TO_OBJECT(this);
var length = TO_INTEGER(array.length);
@@ -1184,8 +1167,6 @@ function InnerArrayFindIndex(predicate, thisArg, array, length) {
DEFINE_METHOD_LEN(
GlobalArray.prototype,
findIndex(predicate, thisArg) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
-
var array = TO_OBJECT(this);
var length = TO_INTEGER(array.length);
@@ -1199,8 +1180,6 @@ DEFINE_METHOD_LEN(
DEFINE_METHOD_LEN(
GlobalArray.prototype,
fill(value, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
-
var array = TO_OBJECT(this);
var length = TO_LENGTH(array.length);
@@ -1375,8 +1354,6 @@ utils.Export(function(to) {
to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
to.ArrayValues = ArrayValues;
- to.InnerArrayFind = InnerArrayFind;
- to.InnerArrayFindIndex = InnerArrayFindIndex;
to.InnerArrayJoin = InnerArrayJoin;
to.InnerArraySort = InnerArraySort;
to.InnerArrayToLocaleString = InnerArrayToLocaleString;
diff --git a/deps/v8/src/js/intl.js b/deps/v8/src/js/intl.js
index bc702dada1..53fbe1f947 100644
--- a/deps/v8/src/js/intl.js
+++ b/deps/v8/src/js/intl.js
@@ -46,6 +46,24 @@ utils.Import(function(from) {
// Utilities for definitions
+macro IS_OBJECT(arg)
+(typeof(arg) === 'object')
+endmacro
+
+macro NUMBER_IS_NAN(arg)
+(%IS_VAR(arg) !== arg)
+endmacro
+
+macro NUMBER_IS_FINITE(arg)
+(%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)))
+endmacro
+
+// To avoid ES2015 Function name inference.
+
+macro ANONYMOUS_FUNCTION(fn)
+(0, (fn))
+endmacro
+
/**
* Adds bound method to the prototype of the given object.
*/
@@ -272,7 +290,7 @@ function supportedLocalesOf(service, locales, options) {
// Provide defaults if matcher was not specified.
if (IS_UNDEFINED(options)) {
- options = {};
+ options = {__proto__: null};
} else {
options = TO_OBJECT(options);
}
@@ -483,12 +501,12 @@ function parseExtension(extension) {
// Assume ['', 'u', ...] input, but don't throw.
if (extensionSplit.length <= 2 ||
(extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
- return {};
+ return {__proto__: null};
}
// Key is {2}alphanum, value is {3,8}alphanum.
// Some keys may not have explicit values (booleans).
- var extensionMap = {};
+ var extensionMap = {__proto__: null};
var key = UNDEFINED;
var value = UNDEFINED;
for (var i = 2; i < extensionSplit.length; ++i) {
@@ -510,7 +528,7 @@ function parseExtension(extension) {
}
} else {
// There is a value that's too long, or that doesn't have a key.
- return {};
+ return {__proto__: null};
}
}
if (!IS_UNDEFINED(key) && !(key in extensionMap)) {
@@ -915,12 +933,12 @@ DEFINE_METHOD(
*/
function CreateCollator(locales, options) {
if (IS_UNDEFINED(options)) {
- options = {};
+ options = {__proto__: null};
}
var getOption = getGetOption(options, 'collator');
- var internalOptions = {};
+ var internalOptions = {__proto__: null};
defineWEProperty(internalOptions, 'usage', getOption(
'usage', 'string', ['sort', 'search'], 'sort'));
@@ -988,7 +1006,7 @@ function CreateCollator(locales, options) {
// problems. If malicious user decides to redefine Object.prototype.locale
// we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
// %object_define_properties will either succeed defining or throw an error.
- var resolved = %object_define_properties({}, {
+ var resolved = %object_define_properties({__proto__: null}, {
caseFirst: {writable: true},
collation: {value: internalOptions.collation, writable: true},
ignorePunctuation: {writable: true},
@@ -1083,21 +1101,21 @@ function PluralRulesConstructor() {
var options = arguments[1];
if (IS_UNDEFINED(options)) {
- options = {};
+ options = {__proto__: null};
}
var getOption = getGetOption(options, 'pluralrules');
var locale = resolveLocale('pluralrules', locales, options);
- var internalOptions = {};
+ var internalOptions = {__proto__: null};
defineWEProperty(internalOptions, 'type', getOption(
'type', 'string', ['cardinal', 'ordinal'], 'cardinal'));
SetNumberFormatDigitOptions(internalOptions, options, 0, 3);
var requestedLocale = locale.locale;
- var resolved = %object_define_properties({}, {
+ var resolved = %object_define_properties({__proto__: null}, {
type: {value: internalOptions.type, writable: true},
locale: {writable: true},
maximumFractionDigits: {writable: true},
@@ -1244,14 +1262,14 @@ function SetNumberFormatDigitOptions(internalOptions, options,
*/
function CreateNumberFormat(locales, options) {
if (IS_UNDEFINED(options)) {
- options = {};
+ options = {__proto__: null};
}
var getOption = getGetOption(options, 'numberformat');
var locale = resolveLocale('numberformat', locales, options);
- var internalOptions = {};
+ var internalOptions = {__proto__: null};
defineWEProperty(internalOptions, 'style', getOption(
'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
@@ -1301,7 +1319,7 @@ function CreateNumberFormat(locales, options) {
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
- var resolved = %object_define_properties({}, {
+ var resolved = %object_define_properties({__proto__: null}, {
currency: {writable: true},
currencyDisplay: {writable: true},
locale: {writable: true},
@@ -1486,7 +1504,7 @@ function fromLDMLString(ldmlString) {
// First remove '' quoted text, so we lose 'Uhr' strings.
ldmlString = %RegExpInternalReplace(GetQuotedStringRE(), ldmlString, '');
- var options = {};
+ var options = {__proto__: null};
var match = %regexp_internal_match(/E{3,5}/, ldmlString);
options = appendToDateTimeObject(
options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
@@ -1562,7 +1580,7 @@ function appendToDateTimeObject(options, option, match, pairs) {
*/
function toDateTimeOptions(options, required, defaults) {
if (IS_UNDEFINED(options)) {
- options = {};
+ options = {__proto__: null};
} else {
options = TO_OBJECT(options);
}
@@ -1622,7 +1640,7 @@ function toDateTimeOptions(options, required, defaults) {
*/
function CreateDateTimeFormat(locales, options) {
if (IS_UNDEFINED(options)) {
- options = {};
+ options = {__proto__: null};
}
var locale = resolveLocale('dateformat', locales, options);
@@ -1646,7 +1664,7 @@ function CreateDateTimeFormat(locales, options) {
// ICU prefers options to be passed using -u- extension key/values, so
// we need to build that.
- var internalOptions = {};
+ var internalOptions = {__proto__: null};
var extensionMap = parseExtension(locale.extension);
/**
@@ -1662,7 +1680,7 @@ function CreateDateTimeFormat(locales, options) {
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
- var resolved = %object_define_properties({}, {
+ var resolved = %object_define_properties({__proto__: null}, {
calendar: {writable: true},
day: {writable: true},
era: {writable: true},
@@ -1786,15 +1804,13 @@ function formatDate(formatter, dateValue) {
dateMs = TO_NUMBER(dateValue);
}
- if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
-
- return %InternalDateFormat(formatter, new GlobalDate(dateMs));
+ return %InternalDateFormat(formatter, dateMs);
}
DEFINE_METHOD(
GlobalIntlDateTimeFormat.prototype,
formatToParts(dateValue) {
- CHECK_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
+ REQUIRE_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
if (!IS_OBJECT(this)) {
throw %make_type_error(kCalledOnNonObject, this);
}
@@ -1810,9 +1826,7 @@ DEFINE_METHOD(
dateMs = TO_NUMBER(dateValue);
}
- if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
-
- return %InternalDateFormatToParts(this, new GlobalDate(dateMs));
+ return %InternalDateFormatToParts(this, dateMs);
}
);
@@ -1869,18 +1883,18 @@ function canonicalizeTimeZoneID(tzID) {
*/
function CreateBreakIterator(locales, options) {
if (IS_UNDEFINED(options)) {
- options = {};
+ options = {__proto__: null};
}
var getOption = getGetOption(options, 'breakiterator');
- var internalOptions = {};
+ var internalOptions = {__proto__: null};
defineWEProperty(internalOptions, 'type', getOption(
'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
var locale = resolveLocale('breakiterator', locales, options);
- var resolved = %object_define_properties({}, {
+ var resolved = %object_define_properties({__proto__: null}, {
requestedLocale: {value: locale.locale, writable: true},
type: {value: internalOptions.type, writable: true},
locale: {writable: true}
@@ -2093,17 +2107,16 @@ DEFINE_METHOD(
}
);
-var StringPrototypeMethods = {};
DEFINE_METHODS_LEN(
GlobalString.prototype,
{
toLocaleLowerCase(locales) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
+ REQUIRE_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
return LocaleConvertCase(TO_STRING(this), locales, false);
}
toLocaleUpperCase(locales) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
+ REQUIRE_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
return LocaleConvertCase(TO_STRING(this), locales, true);
}
},
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index c14d253192..75f01e38c9 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -45,26 +45,12 @@ define kMaxUint32 = 4294967295;
# It will *not* generate a runtime typeof call for the most important
# values of 'bar'.
macro IS_ARRAY(arg) = (%_IsArray(arg));
-macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
-macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
-macro IS_DATE(arg) = (%IsDate(arg));
-macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_FUNCTION(arg) = (%IsFunction(arg));
-macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
-macro IS_MAP(arg) = (%_IsJSMap(arg));
-macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
-macro IS_OBJECT(arg) = (typeof(arg) === 'object');
-macro IS_PROXY(arg) = (%_IsJSProxy(arg));
-macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
-macro IS_SET(arg) = (%_IsJSSet(arg));
-macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
-macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
-macro IS_TYPEDARRAY(arg) = (%_IsTypedArray(arg));
macro IS_UNDEFINED(arg) = (arg === (void 0));
macro IS_WEAKMAP(arg) = (%_IsJSWeakMap(arg));
macro IS_WEAKSET(arg) = (%_IsJSWeakSet(arg));
@@ -75,29 +61,20 @@ macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
# Macro for ES queries of the type: "IsCallable(O)"
macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
-# Macro for ES6 CheckObjectCoercible
-# Will throw a TypeError of the form "[functionName] called on null or undefined".
-macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw %make_type_error(kCalledOnNullOrUndefined, functionName);
+# Macro for ES RequireObjectCoercible
+# https://tc39.github.io/ecma262/#sec-requireobjectcoercible
+# Throws a TypeError of the form "[functionName] called on null or undefined".
+macro REQUIRE_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw %make_type_error(kCalledOnNullOrUndefined, functionName);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
-macro NUMBER_IS_NAN(arg) = (%IS_VAR(arg) !== arg);
-macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
-macro INVERT_NEG_ZERO(arg) = ((arg) + 0);
macro TO_LENGTH(arg) = (%_ToLength(arg));
macro TO_STRING(arg) = (%_ToString(arg));
macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
-# Private names.
-macro GET_PRIVATE(obj, sym) = (obj[sym]);
-macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
-
-# To avoid ES2015 Function name inference.
-macro ANONYMOUS_FUNCTION(fn) = (0, (fn));
-
macro DEFINE_METHODS_LEN(obj, class_def, len) = %DefineMethodsInternal(obj, class class_def, len);
macro DEFINE_METHOD_LEN(obj, method_def, len) = %DefineMethodsInternal(obj, class { method_def }, len);
macro DEFINE_METHODS(obj, class_def) = DEFINE_METHODS_LEN(obj, class_def, -1);
@@ -106,9 +83,3 @@ macro DEFINE_METHOD(obj, method_def) = DEFINE_METHOD_LEN(obj, method_def, -1);
# Constants. The compiler constant folds them.
define INFINITY = (1/0);
define UNDEFINED = (void 0);
-
-# Must match PropertyFilter in property-details.h
-define PROPERTY_FILTER_NONE = 0;
-
-# Check whether debug is active.
-define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
deleted file mode 100644
index b066f3b3d6..0000000000
--- a/deps/v8/src/js/string.js
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalString = global.String;
-var matchSymbol = utils.ImportNow("match_symbol");
-var searchSymbol = utils.ImportNow("search_symbol");
-
-//-------------------------------------------------------------------
-
-// Set up the non-enumerable functions on the String prototype object.
-DEFINE_METHODS(
- GlobalString.prototype,
- {
- /* ES#sec-string.prototype.match */
- match(pattern) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
-
- if (!IS_NULL_OR_UNDEFINED(pattern)) {
- var matcher = pattern[matchSymbol];
- if (!IS_UNDEFINED(matcher)) {
- return %_Call(matcher, pattern, this);
- }
- }
-
- var subject = TO_STRING(this);
-
- // Equivalent to RegExpCreate (ES#sec-regexpcreate)
- var regexp = %RegExpCreate(pattern);
- return regexp[matchSymbol](subject);
- }
-
- /* ES#sec-string.prototype.search */
- search(pattern) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
-
- if (!IS_NULL_OR_UNDEFINED(pattern)) {
- var searcher = pattern[searchSymbol];
- if (!IS_UNDEFINED(searcher)) {
- return %_Call(searcher, pattern, this);
- }
- }
-
- var subject = TO_STRING(this);
-
- // Equivalent to RegExpCreate (ES#sec-regexpcreate)
- var regexp = %RegExpCreate(pattern);
- return %_Call(regexp[searchSymbol], regexp, subject);
- }
- }
-);
-
-function StringPad(thisString, maxLength, fillString) {
- maxLength = TO_LENGTH(maxLength);
- var stringLength = thisString.length;
-
- if (maxLength <= stringLength) return "";
-
- if (IS_UNDEFINED(fillString)) {
- fillString = " ";
- } else {
- fillString = TO_STRING(fillString);
- if (fillString === "") {
- // If filler is the empty String, return S.
- return "";
- }
- }
-
- var fillLength = maxLength - stringLength;
- var repetitions = (fillLength / fillString.length) | 0;
- var remainingChars = (fillLength - fillString.length * repetitions) | 0;
-
- var filler = "";
- while (true) {
- if (repetitions & 1) filler += fillString;
- repetitions >>= 1;
- if (repetitions === 0) break;
- fillString += fillString;
- }
-
- if (remainingChars) {
- filler += %_SubString(fillString, 0, remainingChars);
- }
-
- return filler;
-}
-
-DEFINE_METHODS_LEN(
- GlobalString.prototype,
- {
- /* ES#sec-string.prototype.padstart */
- /* String.prototype.padStart(maxLength [, fillString]) */
- padStart(maxLength, fillString) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.padStart");
- var thisString = TO_STRING(this);
-
- return StringPad(thisString, maxLength, fillString) + thisString;
- }
-
- /* ES#sec-string.prototype.padend */
- /* String.prototype.padEnd(maxLength [, fillString]) */
- padEnd(maxLength, fillString) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.padEnd");
- var thisString = TO_STRING(this);
-
- return thisString + StringPad(thisString, maxLength, fillString);
- }
- },
- 1 /* Set functions length */
-);
-
-// -------------------------------------------------------------------
-// String methods related to templates
-
-// Set up the non-enumerable functions on the String object.
-DEFINE_METHOD(
- GlobalString,
-
- /* ES#sec-string.raw */
- raw(callSite) {
- var numberOfSubstitutions = arguments.length;
- var cooked = TO_OBJECT(callSite);
- var raw = TO_OBJECT(cooked.raw);
- var literalSegments = TO_LENGTH(raw.length);
- if (literalSegments <= 0) return "";
-
- var result = TO_STRING(raw[0]);
-
- for (var i = 1; i < literalSegments; ++i) {
- if (i < numberOfSubstitutions) {
- result += TO_STRING(arguments[i]);
- }
- result += TO_STRING(raw[i]);
- }
-
- return result;
- }
-);
-
-// -------------------------------------------------------------------
-
-})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index e361f2f58d..7fa638fa89 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -19,8 +19,6 @@ var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
var GlobalObject = global.Object;
-var InnerArrayFind;
-var InnerArrayFindIndex;
var InnerArrayJoin;
var InnerArraySort;
var InnerArrayToLocaleString;
@@ -49,13 +47,23 @@ endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
+macro IS_ARRAYBUFFER(arg)
+(%_ClassOf(arg) === 'ArrayBuffer')
+endmacro
+
+macro IS_SHAREDARRAYBUFFER(arg)
+(%_ClassOf(arg) === 'SharedArrayBuffer')
+endmacro
+
+macro IS_TYPEDARRAY(arg)
+(%_IsTypedArray(arg))
+endmacro
+
var GlobalTypedArray = %object_get_prototype_of(GlobalUint8Array);
utils.Import(function(from) {
GetIterator = from.GetIterator;
GetMethod = from.GetMethod;
- InnerArrayFind = from.InnerArrayFind;
- InnerArrayFindIndex = from.InnerArrayFindIndex;
InnerArrayJoin = from.InnerArrayJoin;
InnerArraySort = from.InnerArraySort;
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
@@ -285,35 +293,6 @@ DEFINE_METHOD_LEN(
1 /* Set function length. */
);
-
-// ES6 draft 07-15-13, section 22.2.3.10
-DEFINE_METHOD_LEN(
- GlobalTypedArray.prototype,
- find(predicate, thisArg) {
- ValidateTypedArray(this, "%TypedArray%.prototype.find");
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayFind(predicate, thisArg, this, length);
- },
- 1 /* Set function length. */
-);
-
-
-// ES6 draft 07-15-13, section 22.2.3.11
-DEFINE_METHOD_LEN(
- GlobalTypedArray.prototype,
- findIndex(predicate, thisArg) {
- ValidateTypedArray(this, "%TypedArray%.prototype.findIndex");
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayFindIndex(predicate, thisArg, this, length);
- },
- 1 /* Set function length. */
-);
-
-
// ES6 draft 05-18-15, section 22.2.3.25
DEFINE_METHOD(
GlobalTypedArray.prototype,
@@ -441,18 +420,6 @@ function TypedArrayConstructor() {
macro SETUP_TYPED_ARRAY(NAME, ELEMENT_SIZE)
%SetCode(GlobalNAME, NAMEConstructor);
- %FunctionSetPrototype(GlobalNAME, new GlobalObject());
- %InternalSetPrototype(GlobalNAME, GlobalTypedArray);
- %InternalSetPrototype(GlobalNAME.prototype, GlobalTypedArray.prototype);
-
- %AddNamedProperty(GlobalNAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
- READ_ONLY | DONT_ENUM | DONT_DELETE);
-
- %AddNamedProperty(GlobalNAME.prototype,
- "constructor", global.NAME, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype,
- "BYTES_PER_ELEMENT", ELEMENT_SIZE,
- READ_ONLY | DONT_ENUM | DONT_DELETE);
endmacro
TYPED_ARRAYS(SETUP_TYPED_ARRAY)
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 24eb6dc1bf..26dada3759 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -22,7 +22,7 @@ DEFINE_METHOD(
GlobalObject.prototype,
// ES6 19.1.3.5 Object.prototype.toLocaleString([reserved1 [,reserved2]])
toLocaleString() {
- CHECK_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
+ REQUIRE_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
return this.toString();
}
);
diff --git a/deps/v8/src/js/weak-collection.js b/deps/v8/src/js/weak-collection.js
deleted file mode 100644
index 3d75ab4138..0000000000
--- a/deps/v8/src/js/weak-collection.js
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalWeakMap = global.WeakMap;
-var GlobalWeakSet = global.WeakSet;
-var MathRandom = global.Math.random;
-
-// -------------------------------------------------------------------
-// Harmony WeakMap
-
-function WeakMapConstructor(iterable) {
- if (IS_UNDEFINED(new.target)) {
- throw %make_type_error(kConstructorNotFunction, "WeakMap");
- }
-
- %WeakCollectionInitialize(this);
-
- if (!IS_NULL_OR_UNDEFINED(iterable)) {
- var adder = this.set;
- if (!IS_CALLABLE(adder)) {
- throw %make_type_error(kPropertyNotFunction, adder, 'set', this);
- }
- for (var nextItem of iterable) {
- if (!IS_RECEIVER(nextItem)) {
- throw %make_type_error(kIteratorValueNotAnObject, nextItem);
- }
- %_Call(adder, this, nextItem[0], nextItem[1]);
- }
- }
-}
-
-
-// Set up the non-enumerable functions on the WeakMap prototype object.
-DEFINE_METHODS(
- GlobalWeakMap.prototype,
- {
- set(key, value) {
- if (!IS_WEAKMAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakMap.prototype.set', this);
- }
- if (!IS_RECEIVER(key)) throw %make_type_error(kInvalidWeakMapKey);
- return %WeakCollectionSet(this, key, value, %GenericHash(key));
- }
-
- delete(key) {
- if (!IS_WEAKMAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakMap.prototype.delete', this);
- }
- if (!IS_RECEIVER(key)) return false;
- var hash = %GetExistingHash(key);
- if (IS_UNDEFINED(hash)) return false;
- return %WeakCollectionDelete(this, key, hash);
- }
- }
-);
-
-// -------------------------------------------------------------------
-
-%SetCode(GlobalWeakMap, WeakMapConstructor);
-%FunctionSetLength(GlobalWeakMap, 0);
-
-// -------------------------------------------------------------------
-// Harmony WeakSet
-
-function WeakSetConstructor(iterable) {
- if (IS_UNDEFINED(new.target)) {
- throw %make_type_error(kConstructorNotFunction, "WeakSet");
- }
-
- %WeakCollectionInitialize(this);
-
- if (!IS_NULL_OR_UNDEFINED(iterable)) {
- var adder = this.add;
- if (!IS_CALLABLE(adder)) {
- throw %make_type_error(kPropertyNotFunction, adder, 'add', this);
- }
- for (var value of iterable) {
- %_Call(adder, this, value);
- }
- }
-}
-
-
-// Set up the non-enumerable functions on the WeakSet prototype object.
-DEFINE_METHODS(
- GlobalWeakSet.prototype,
- {
- add(value) {
- if (!IS_WEAKSET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakSet.prototype.add', this);
- }
- if (!IS_RECEIVER(value)) throw %make_type_error(kInvalidWeakSetValue);
- return %WeakCollectionSet(this, value, true, %GenericHash(value));
- }
-
- delete(value) {
- if (!IS_WEAKSET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakSet.prototype.delete', this);
- }
- if (!IS_RECEIVER(value)) return false;
- var hash = %GetExistingHash(value);
- if (IS_UNDEFINED(hash)) return false;
- return %WeakCollectionDelete(this, value, hash);
- }
- }
-);
-
-// -------------------------------------------------------------------
-
-%SetCode(GlobalWeakSet, WeakSetConstructor);
-%FunctionSetLength(GlobalWeakSet, 0);
-
-})
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 32e4187c8e..13f65705a3 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -11,11 +11,11 @@
#include "src/field-type.h"
#include "src/messages.h"
#include "src/objects-inl.h"
-#include "src/parsing/token.h"
#include "src/property-descriptor.h"
#include "src/string-hasher.h"
#include "src/transitions.h"
#include "src/unicode-cache.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -87,7 +87,8 @@ bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
isolate_, result, InternalizeJsonProperty(holder, name), false);
Maybe<bool> change_result = Nothing<bool>();
if (result->IsUndefined(isolate_)) {
- change_result = JSReceiver::DeletePropertyOrElement(holder, name, SLOPPY);
+ change_result = JSReceiver::DeletePropertyOrElement(holder, name,
+ LanguageMode::kSloppy);
} else {
PropertyDescriptor desc;
desc.set_value(result);
@@ -95,7 +96,7 @@ bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
desc.set_enumerable(true);
desc.set_writable(true);
change_result = JSReceiver::DefineOwnProperty(isolate_, holder, name, &desc,
- Object::DONT_THROW);
+ kDontThrow);
}
MAYBE_RETURN(change_result, false);
return true;
@@ -332,7 +333,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
factory()->NewJSObject(object_constructor(), pretenure_);
Handle<Map> map(json_object->map());
int descriptor = 0;
- ZoneList<Handle<Object> > properties(8, zone());
+ ZoneVector<Handle<Object>> properties(zone());
DCHECK_EQ(c0_, '{');
bool transitioning = true;
@@ -411,7 +412,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
DCHECK(target->instance_descriptors()
->GetFieldType(descriptor)
->NowContains(value));
- properties.Add(value, zone());
+ properties.push_back(value);
map = target;
descriptor++;
continue;
@@ -479,13 +480,13 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
template <bool seq_one_byte>
void JsonParser<seq_one_byte>::CommitStateToJsonObject(
Handle<JSObject> json_object, Handle<Map> map,
- ZoneList<Handle<Object> >* properties) {
+ ZoneVector<Handle<Object>>* properties) {
JSObject::AllocateStorageForMap(json_object, map);
DCHECK(!json_object->map()->is_dictionary_map());
DisallowHeapAllocation no_gc;
DescriptorArray* descriptors = json_object->map()->instance_descriptors();
- int length = properties->length();
+ int length = static_cast<int>(properties->size());
for (int i = 0; i < length; i++) {
Handle<Object> value = (*properties)[i];
// Initializing store.
@@ -537,7 +538,7 @@ class ElementKindLattice {
template <bool seq_one_byte>
Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
HandleScope scope(isolate());
- ZoneList<Handle<Object> > elements(4, zone());
+ ZoneVector<Handle<Object>> elements(zone());
DCHECK_EQ(c0_, '[');
ElementKindLattice lattice;
@@ -547,7 +548,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
do {
Handle<Object> element = ParseJsonValue();
if (element.is_null()) return ReportUnexpectedCharacter();
- elements.Add(element, zone());
+ elements.push_back(element);
lattice.Update(element);
} while (MatchSkipWhiteSpace(','));
if (c0_ != ']') {
@@ -560,20 +561,21 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
Handle<Object> json_array;
const ElementsKind kind = lattice.GetElementsKind();
+ int elements_size = static_cast<int>(elements.size());
switch (kind) {
case PACKED_ELEMENTS:
case PACKED_SMI_ELEMENTS: {
Handle<FixedArray> elems =
- factory()->NewFixedArray(elements.length(), pretenure_);
- for (int i = 0; i < elements.length(); i++) elems->set(i, *elements[i]);
+ factory()->NewFixedArray(elements_size, pretenure_);
+ for (int i = 0; i < elements_size; i++) elems->set(i, *elements[i]);
json_array = factory()->NewJSArrayWithElements(elems, kind, pretenure_);
break;
}
case PACKED_DOUBLE_ELEMENTS: {
Handle<FixedDoubleArray> elems = Handle<FixedDoubleArray>::cast(
- factory()->NewFixedDoubleArray(elements.length(), pretenure_));
- for (int i = 0; i < elements.length(); i++) {
+ factory()->NewFixedDoubleArray(elements_size, pretenure_));
+ for (int i = 0; i < elements_size; i++) {
elems->set(i, elements[i]->Number());
}
json_array = factory()->NewJSArrayWithElements(elems, kind, pretenure_);
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 2d08fefda9..cab094591f 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -39,6 +39,7 @@ class JsonParser BASE_EMBEDDED {
MUST_USE_RESULT static MaybeHandle<Object> Parse(Isolate* isolate,
Handle<String> source,
Handle<Object> reviver) {
+ PostponeInterruptsScope no_debug_breaks(isolate, StackGuard::DEBUGBREAK);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
JsonParser(isolate, source).ParseJson(), Object);
@@ -144,7 +145,7 @@ class JsonParser BASE_EMBEDDED {
Zone* zone() { return &zone_; }
void CommitStateToJsonObject(Handle<JSObject> json_object, Handle<Map> map,
- ZoneList<Handle<Object> >* properties);
+ ZoneVector<Handle<Object>>* properties);
Handle<String> source_;
int source_length_;
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index bec628deca..c2b53a85bd 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -94,6 +94,7 @@ MaybeHandle<Object> JsonStringifier::Stringify(Handle<Object> object,
if (!gap->IsUndefined(isolate_) && !InitializeGap(gap)) {
return MaybeHandle<Object>();
}
+ PostponeInterruptsScope no_debug_breaks(isolate_, StackGuard::DEBUGBREAK);
Result result = SerializeObject(object);
if (result == UNCHANGED) return factory()->undefined_value();
if (result == SUCCESS) return builder_.Finish();
@@ -187,11 +188,22 @@ bool JsonStringifier::InitializeGap(Handle<Object> gap) {
MaybeHandle<Object> JsonStringifier::ApplyToJsonFunction(Handle<Object> object,
Handle<Object> key) {
HandleScope scope(isolate_);
- LookupIterator it(object, tojson_string_,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+
+ Handle<Object> object_for_lookup = object;
+ if (object->IsBigInt()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, object_for_lookup,
+ Object::ToObject(isolate_, object), Object);
+ }
+ DCHECK(object_for_lookup->IsJSReceiver());
+
+ // Retrieve toJSON function.
Handle<Object> fun;
- ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
- if (!fun->IsCallable()) return object;
+ {
+ LookupIterator it(object_for_lookup, tojson_string_,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
+ if (!fun->IsCallable()) return object;
+ }
// Call toJSON function.
if (key->IsSmi()) key = factory()->NumberToString(key);
@@ -271,7 +283,7 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
isolate_->stack_guard()->HandleInterrupts()->IsException(isolate_)) {
return EXCEPTION;
}
- if (object->IsJSReceiver()) {
+ if (object->IsJSReceiver() || object->IsBigInt()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, object, ApplyToJsonFunction(object, key), EXCEPTION);
}
@@ -291,6 +303,10 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
case MUTABLE_HEAP_NUMBER_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
+ case BIGINT_TYPE:
+ isolate_->Throw(
+ *factory()->NewTypeError(MessageTemplate::kBigIntSerializeJSON));
+ return EXCEPTION;
case ODDBALL_TYPE:
switch (Oddball::cast(*object)->kind()) {
case Oddball::kFalse:
@@ -338,22 +354,24 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
JsonStringifier::Result JsonStringifier::SerializeJSValue(
Handle<JSValue> object) {
- String* class_name = object->class_name();
- if (class_name == isolate_->heap()->String_string()) {
+ Object* raw = object->value();
+ if (raw->IsString()) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, value, Object::ToString(isolate_, object), EXCEPTION);
SerializeString(Handle<String>::cast(value));
- } else if (class_name == isolate_->heap()->Number_string()) {
+ } else if (raw->IsNumber()) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, value, Object::ToNumber(object),
EXCEPTION);
if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
SerializeHeapNumber(Handle<HeapNumber>::cast(value));
- } else if (class_name == isolate_->heap()->Boolean_string()) {
- Object* value = JSValue::cast(*object)->value();
- DCHECK(value->IsBoolean());
- builder_.AppendCString(value->IsTrue(isolate_) ? "true" : "false");
+ } else if (raw->IsBigInt()) {
+ isolate_->Throw(
+ *factory()->NewTypeError(MessageTemplate::kBigIntSerializeJSON));
+ return EXCEPTION;
+ } else if (raw->IsBoolean()) {
+ builder_.AppendCString(raw->IsTrue(isolate_) ? "true" : "false");
} else {
// ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
return SerializeJSObject(object);
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 27dd7ff8ca..9ac0079ac2 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -472,7 +472,7 @@ void FilterForEnumerableProperties(Handle<JSReceiver> receiver,
// args are invalid after args.Call(), create a new one in every iteration.
PropertyCallbackArguments args(accumulator->isolate(), interceptor->data(),
- *receiver, *object, Object::DONT_THROW);
+ *receiver, *object, kDontThrow);
Handle<Object> element = accessor->Get(result, i);
Handle<Object> attributes;
@@ -507,7 +507,7 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
IndexedOrNamed type) {
Isolate* isolate = accumulator->isolate();
PropertyCallbackArguments enum_args(isolate, interceptor->data(), *receiver,
- *object, Object::DONT_THROW);
+ *object, kDontThrow);
Handle<JSObject> result;
if (!interceptor->enumerator()->IsUndefined(isolate)) {
@@ -519,7 +519,7 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
LOG(isolate, ApiObjectAccess(log_tag, *object));
result = enum_args.Call(enum_fun);
} else {
- DCHECK(type == kNamed);
+ DCHECK_EQ(type, kNamed);
v8::GenericNamedPropertyEnumeratorCallback enum_fun =
v8::ToCData<v8::GenericNamedPropertyEnumeratorCallback>(
interceptor->enumerator());
@@ -726,7 +726,7 @@ Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
return Just(false);
}
// ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
- DCHECK(KeyCollectionMode::kOwnOnly == mode_);
+ DCHECK_EQ(KeyCollectionMode::kOwnOnly, mode_);
Handle<AccessCheckInfo> access_check_info;
{
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/label.h b/deps/v8/src/label.h
index 680754cf20..1dc8849812 100644
--- a/deps/v8/src/label.h
+++ b/deps/v8/src/label.h
@@ -20,10 +20,28 @@ class Label {
public:
enum Distance { kNear, kFar };
- INLINE(Label()) {
- Unuse();
- UnuseNear();
+ Label() = default;
+
+// On ARM64, the Assembler keeps track of pointers to Labels to resolve
+// branches to distant targets. Copying labels would confuse the Assembler.
+// On other platforms, allow move construction.
+#if !V8_TARGET_ARCH_ARM64
+// In debug builds, the old Label has to be cleared in order to avoid a DCHECK
+// failure in it's destructor.
+#ifdef DEBUG
+ Label(Label&& other) { *this = std::move(other); }
+ Label& operator=(Label&& other) {
+ pos_ = other.pos_;
+ near_link_pos_ = other.near_link_pos_;
+ other.Unuse();
+ other.UnuseNear();
+ return *this;
}
+#else
+ Label(Label&&) = default;
+ Label& operator=(Label&&) = default;
+#endif
+#endif
INLINE(~Label()) {
DCHECK(!is_linked());
@@ -55,10 +73,10 @@ class Label {
// pos_ < 0 bound label, pos() returns the jump target position
// pos_ == 0 unused label
// pos_ > 0 linked label, pos() returns the last reference position
- int pos_;
+ int pos_ = 0;
// Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
- int near_link_pos_;
+ int near_link_pos_ = 0;
void bind_to(int pos) {
pos_ = -pos - 1;
@@ -78,11 +96,9 @@ class Label {
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
-#if V8_TARGET_ARCH_ARM64
- // On ARM64, the Assembler keeps track of pointers to Labels to resolve
- // branches to distant targets. Copying labels would confuse the Assembler.
- DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
-#endif
+ // Disallow copy construction and assignment, but allow move construction and
+ // move assignment on selected platforms (see above).
+ DISALLOW_COPY_AND_ASSIGN(Label);
};
} // namespace internal
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index ddec009c83..c75eea6fd8 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -219,10 +219,8 @@ LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)
return;
}
- int inobject_properties = map->GetInObjectProperties();
- DCHECK(inobject_properties > 0);
- header_size_ = map->instance_size() - (inobject_properties * kPointerSize);
- DCHECK(header_size_ >= 0);
+ header_size_ = map->GetInObjectPropertiesStartInWords() * kPointerSize;
+ DCHECK_GE(header_size_, 0);
all_fields_tagged_ = false;
}
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index ed3f738735..41882cedaa 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -10,8 +10,6 @@
#include "src/handles-inl.h"
#include "src/objects-inl.h"
-using v8::base::bits::CountTrailingZeros32;
-
namespace v8 {
namespace internal {
@@ -122,7 +120,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
int* out_sequence_length) {
- DCHECK(max_sequence_length > 0);
+ DCHECK_GT(max_sequence_length, 0);
if (IsFastPointerLayout()) {
*out_sequence_length = max_sequence_length;
return true;
@@ -144,7 +142,8 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
bool is_tagged = (value & layout_mask) == 0;
if (!is_tagged) value = ~value; // Count set bits instead of cleared bits.
value = value & ~(layout_mask - 1); // Clear bits we are not interested in.
- int sequence_length = CountTrailingZeros32(value) - layout_bit_index;
+ int sequence_length =
+ base::bits::CountTrailingZeros(value) - layout_bit_index;
if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
// This is a contiguous sequence till the end of current word, proceed
@@ -157,7 +156,7 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
bool cur_is_tagged = (value & 1) == 0;
if (cur_is_tagged != is_tagged) break;
if (!is_tagged) value = ~value; // Count set bits instead.
- int cur_sequence_length = CountTrailingZeros32(value);
+ int cur_sequence_length = base::bits::CountTrailingZeros(value);
sequence_length += cur_sequence_length;
if (sequence_length >= max_sequence_length) break;
if (cur_sequence_length != kBitsPerLayoutWord) break;
@@ -203,7 +202,7 @@ bool LayoutDescriptorHelper::IsTagged(
int sequence_length;
bool tagged = layout_descriptor_->IsTagged(field_index, max_sequence_length,
&sequence_length);
- DCHECK(sequence_length > 0);
+ DCHECK_GT(sequence_length, 0);
if (offset_in_bytes < header_size_) {
// Object headers do not contain non-tagged fields. Check if the contiguous
// region continues after the header.
diff --git a/deps/v8/src/libplatform/default-background-task-runner.cc b/deps/v8/src/libplatform/default-background-task-runner.cc
new file mode 100644
index 0000000000..b556b6c3fe
--- /dev/null
+++ b/deps/v8/src/libplatform/default-background-task-runner.cc
@@ -0,0 +1,59 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/default-background-task-runner.h"
+
+#include "src/base/platform/mutex.h"
+#include "src/libplatform/worker-thread.h"
+
+namespace v8 {
+namespace platform {
+
+DefaultBackgroundTaskRunner::DefaultBackgroundTaskRunner(
+ uint32_t thread_pool_size) {
+ for (uint32_t i = 0; i < thread_pool_size; ++i) {
+ thread_pool_.push_back(base::make_unique<WorkerThread>(&queue_));
+ }
+}
+
+DefaultBackgroundTaskRunner::~DefaultBackgroundTaskRunner() {
+ // This destructor is needed because we have unique_ptr to the WorkerThreads,
+ // und the {WorkerThread} class is forward declared in the header file.
+}
+
+void DefaultBackgroundTaskRunner::Terminate() {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ terminated_ = true;
+ queue_.Terminate();
+ // Clearing the thread pool lets all worker threads join.
+ thread_pool_.clear();
+}
+
+void DefaultBackgroundTaskRunner::PostTask(std::unique_ptr<Task> task) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (terminated_) return;
+ queue_.Append(std::move(task));
+}
+
+void DefaultBackgroundTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (terminated_) return;
+ // There is no use case for this function on a background thread at the
+ // moment, but it is still part of the interface.
+ UNIMPLEMENTED();
+}
+
+void DefaultBackgroundTaskRunner::PostIdleTask(std::unique_ptr<IdleTask> task) {
+ // There are no idle background tasks.
+ UNREACHABLE();
+}
+
+bool DefaultBackgroundTaskRunner::IdleTasksEnabled() {
+ // There are no idle background tasks.
+ return false;
+}
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/default-background-task-runner.h b/deps/v8/src/libplatform/default-background-task-runner.h
new file mode 100644
index 0000000000..ce2f7efa05
--- /dev/null
+++ b/deps/v8/src/libplatform/default-background-task-runner.h
@@ -0,0 +1,45 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_DEFAULT_BACKGROUND_TASK_RUNNER_H_
+#define V8_LIBPLATFORM_DEFAULT_BACKGROUND_TASK_RUNNER_H_
+
+#include "include/v8-platform.h"
+#include "src/libplatform/task-queue.h"
+
+namespace v8 {
+namespace platform {
+
+class Thread;
+class WorkerThread;
+
+class V8_PLATFORM_EXPORT DefaultBackgroundTaskRunner
+ : public NON_EXPORTED_BASE(TaskRunner) {
+ public:
+ DefaultBackgroundTaskRunner(uint32_t thread_pool_size);
+
+ ~DefaultBackgroundTaskRunner();
+
+ void Terminate();
+
+ // v8::TaskRunner implementation.
+ void PostTask(std::unique_ptr<Task> task) override;
+
+ void PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) override;
+
+ void PostIdleTask(std::unique_ptr<IdleTask> task) override;
+
+ bool IdleTasksEnabled() override;
+
+ private:
+ bool terminated_ = false;
+ base::Mutex lock_;
+ TaskQueue queue_;
+ std::vector<std::unique_ptr<WorkerThread>> thread_pool_;
+};
+
+} // namespace platform
+} // namespace v8
+#endif // V8_LIBPLATFORM_DEFAULT_BACKGROUND_TASK_RUNNER_H_
diff --git a/deps/v8/src/libplatform/default-foreground-task-runner.cc b/deps/v8/src/libplatform/default-foreground-task-runner.cc
new file mode 100644
index 0000000000..c9cb5fa4d7
--- /dev/null
+++ b/deps/v8/src/libplatform/default-foreground-task-runner.cc
@@ -0,0 +1,115 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/default-foreground-task-runner.h"
+
+#include "src/base/platform/mutex.h"
+#include "src/libplatform/default-platform.h"
+
+namespace v8 {
+namespace platform {
+
+DefaultForegroundTaskRunner::DefaultForegroundTaskRunner(
+ IdleTaskSupport idle_task_support, TimeFunction time_function)
+ : event_loop_control_(0),
+ idle_task_support_(idle_task_support),
+ time_function_(time_function) {}
+
+void DefaultForegroundTaskRunner::Terminate() {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ terminated_ = true;
+
+ // Drain the task queues.
+ while (!task_queue_.empty()) task_queue_.pop();
+ while (!delayed_task_queue_.empty()) delayed_task_queue_.pop();
+ while (!idle_task_queue_.empty()) idle_task_queue_.pop();
+}
+
+void DefaultForegroundTaskRunner::PostTaskLocked(
+ std::unique_ptr<Task> task, const base::LockGuard<base::Mutex>& guard) {
+ if (terminated_) return;
+ task_queue_.push(std::move(task));
+ event_loop_control_.Signal();
+}
+
+void DefaultForegroundTaskRunner::PostTask(std::unique_ptr<Task> task) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ PostTaskLocked(std::move(task), guard);
+}
+
+double DefaultForegroundTaskRunner::MonotonicallyIncreasingTime() {
+ return time_function_();
+}
+
+void DefaultForegroundTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) {
+ DCHECK_GE(delay_in_seconds, 0.0);
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (terminated_) return;
+ double deadline = MonotonicallyIncreasingTime() + delay_in_seconds;
+ delayed_task_queue_.push(std::make_pair(deadline, std::move(task)));
+}
+
+void DefaultForegroundTaskRunner::PostIdleTask(std::unique_ptr<IdleTask> task) {
+ CHECK_EQ(IdleTaskSupport::kEnabled, idle_task_support_);
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (terminated_) return;
+ idle_task_queue_.push(std::move(task));
+}
+
+bool DefaultForegroundTaskRunner::IdleTasksEnabled() {
+ return idle_task_support_ == IdleTaskSupport::kEnabled;
+}
+
+std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue() {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ // Move delayed tasks that hit their deadline to the main queue.
+ std::unique_ptr<Task> task = PopTaskFromDelayedQueueLocked(guard);
+ while (task) {
+ PostTaskLocked(std::move(task), guard);
+ task = PopTaskFromDelayedQueueLocked(guard);
+ }
+
+ if (task_queue_.empty()) return {};
+
+ task = std::move(task_queue_.front());
+ task_queue_.pop();
+
+ return task;
+}
+
+std::unique_ptr<Task>
+DefaultForegroundTaskRunner::PopTaskFromDelayedQueueLocked(
+ const base::LockGuard<base::Mutex>& guard) {
+ if (delayed_task_queue_.empty()) return {};
+
+ double now = MonotonicallyIncreasingTime();
+ const DelayedEntry& deadline_and_task = delayed_task_queue_.top();
+ if (deadline_and_task.first > now) return {};
+ // The const_cast here is necessary because there does not exist a clean way
+ // to get a unique_ptr out of the priority queue. We provide the priority
+ // queue with a custom comparison operator to make sure that the priority
+ // queue does not access the unique_ptr. Therefore it should be safe to reset
+ // the unique_ptr in the priority queue here. Note that the DelayedEntry is
+ // removed from the priority_queue immediately afterwards.
+ std::unique_ptr<Task> result =
+ std::move(const_cast<DelayedEntry&>(deadline_and_task).second);
+ delayed_task_queue_.pop();
+ return result;
+}
+
+std::unique_ptr<IdleTask> DefaultForegroundTaskRunner::PopTaskFromIdleQueue() {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (idle_task_queue_.empty()) return {};
+
+ std::unique_ptr<IdleTask> task = std::move(idle_task_queue_.front());
+ idle_task_queue_.pop();
+
+ return task;
+}
+
+void DefaultForegroundTaskRunner::WaitForTask() { event_loop_control_.Wait(); }
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/default-foreground-task-runner.h b/deps/v8/src/libplatform/default-foreground-task-runner.h
new file mode 100644
index 0000000000..7dfb487828
--- /dev/null
+++ b/deps/v8/src/libplatform/default-foreground-task-runner.h
@@ -0,0 +1,84 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_DEFAULT_FOREGROUND_TASK_RUNNER_H_
+#define V8_LIBPLATFORM_DEFAULT_FOREGROUND_TASK_RUNNER_H_
+
+#include <queue>
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8-platform.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+
+namespace v8 {
+namespace platform {
+
+class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
+ : public NON_EXPORTED_BASE(TaskRunner) {
+ public:
+ using TimeFunction = double (*)();
+
+ DefaultForegroundTaskRunner(IdleTaskSupport idle_task_support,
+ TimeFunction time_function);
+
+ void Terminate();
+
+ std::unique_ptr<Task> PopTaskFromQueue();
+
+ std::unique_ptr<IdleTask> PopTaskFromIdleQueue();
+
+ void WaitForTask();
+
+ double MonotonicallyIncreasingTime();
+
+ // v8::TaskRunner implementation.
+ void PostTask(std::unique_ptr<Task> task) override;
+
+ void PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) override;
+
+ void PostIdleTask(std::unique_ptr<IdleTask> task) override;
+
+ bool IdleTasksEnabled() override;
+
+ private:
+ // The same as PostTask, but the lock is already held by the caller. The
+ // {guard} parameter should make sure that the caller is holding the lock.
+ void PostTaskLocked(std::unique_ptr<Task> task,
+ const base::LockGuard<base::Mutex>& guard);
+
+ // A caller of this function has to hold {lock_}. The {guard} parameter should
+ // make sure that the caller is holding the lock.
+ std::unique_ptr<Task> PopTaskFromDelayedQueueLocked(
+ const base::LockGuard<base::Mutex>& guard);
+
+ bool terminated_ = false;
+ base::Mutex lock_;
+ base::Semaphore event_loop_control_;
+ std::queue<std::unique_ptr<Task>> task_queue_;
+ IdleTaskSupport idle_task_support_;
+ std::queue<std::unique_ptr<IdleTask>> idle_task_queue_;
+
+ // Some helper constructs for the {delayed_task_queue_}.
+ using DelayedEntry = std::pair<double, std::unique_ptr<Task>>;
+ // Define a comparison operator for the delayed_task_queue_ to make sure
+ // that the unique_ptr in the DelayedEntry is not accessed in the priority
+ // queue. This is necessary because we have to reset the unique_ptr when we
+ // remove a DelayedEntry from the priority queue.
+ struct DelayedEntryCompare {
+ bool operator()(DelayedEntry& left, DelayedEntry& right) {
+ return left.first > right.first;
+ }
+ };
+ std::priority_queue<DelayedEntry, std::vector<DelayedEntry>,
+ DelayedEntryCompare>
+ delayed_task_queue_;
+
+ TimeFunction time_function_;
+};
+
+} // namespace platform
+} // namespace v8
+#endif // V8_LIBPLATFORM_DEFAULT_FOREGROUND_TASK_RUNNER_H_
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index fec3ab2365..833e37a290 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -13,7 +13,8 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
-#include "src/libplatform/worker-thread.h"
+#include "src/libplatform/default-background-task-runner.h"
+#include "src/libplatform/default-foreground-task-runner.h"
namespace v8 {
namespace platform {
@@ -29,18 +30,28 @@ void PrintStackTrace() {
} // namespace
-v8::Platform* CreateDefaultPlatform(
+std::unique_ptr<v8::Platform> NewDefaultPlatform(
int thread_pool_size, IdleTaskSupport idle_task_support,
InProcessStackDumping in_process_stack_dumping,
- v8::TracingController* tracing_controller) {
+ std::unique_ptr<v8::TracingController> tracing_controller) {
if (in_process_stack_dumping == InProcessStackDumping::kEnabled) {
v8::base::debug::EnableInProcessStackDumping();
}
- DefaultPlatform* platform =
- new DefaultPlatform(idle_task_support, tracing_controller);
+ std::unique_ptr<DefaultPlatform> platform(
+ new DefaultPlatform(idle_task_support, std::move(tracing_controller)));
platform->SetThreadPoolSize(thread_pool_size);
- platform->EnsureInitialized();
- return platform;
+ platform->EnsureBackgroundTaskRunnerInitialized();
+ return std::move(platform);
+}
+
+v8::Platform* CreateDefaultPlatform(
+ int thread_pool_size, IdleTaskSupport idle_task_support,
+ InProcessStackDumping in_process_stack_dumping,
+ v8::TracingController* tracing_controller) {
+ return NewDefaultPlatform(
+ thread_pool_size, idle_task_support, in_process_stack_dumping,
+ std::unique_ptr<v8::TracingController>(tracing_controller))
+ .release();
}
bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate,
@@ -50,8 +61,6 @@ bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate,
}
void EnsureEventLoopInitialized(v8::Platform* platform, v8::Isolate* isolate) {
- return static_cast<DefaultPlatform*>(platform)->EnsureEventLoopInitialized(
- isolate);
}
void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate,
@@ -64,19 +73,19 @@ void SetTracingController(
v8::Platform* platform,
v8::platform::tracing::TracingController* tracing_controller) {
static_cast<DefaultPlatform*>(platform)->SetTracingController(
- tracing_controller);
+ std::unique_ptr<v8::TracingController>(tracing_controller));
}
const int DefaultPlatform::kMaxThreadPoolSize = 8;
-DefaultPlatform::DefaultPlatform(IdleTaskSupport idle_task_support,
- v8::TracingController* tracing_controller)
- : initialized_(false),
- thread_pool_size_(0),
- idle_task_support_(idle_task_support) {
- if (tracing_controller) {
- tracing_controller_.reset(tracing_controller);
- } else {
+DefaultPlatform::DefaultPlatform(
+ IdleTaskSupport idle_task_support,
+ std::unique_ptr<v8::TracingController> tracing_controller)
+ : thread_pool_size_(0),
+ idle_task_support_(idle_task_support),
+ tracing_controller_(std::move(tracing_controller)),
+ time_function_for_testing_(nullptr) {
+ if (!tracing_controller_) {
tracing::TracingController* controller = new tracing::TracingController();
controller->Initialize(nullptr);
tracing_controller_.reset(controller);
@@ -85,38 +94,15 @@ DefaultPlatform::DefaultPlatform(IdleTaskSupport idle_task_support,
DefaultPlatform::~DefaultPlatform() {
base::LockGuard<base::Mutex> guard(&lock_);
- queue_.Terminate();
- if (initialized_) {
- for (auto i = thread_pool_.begin(); i != thread_pool_.end(); ++i) {
- delete *i;
- }
- }
- for (auto i = main_thread_queue_.begin(); i != main_thread_queue_.end();
- ++i) {
- while (!i->second.empty()) {
- delete i->second.front();
- i->second.pop();
- }
- }
- for (auto i = main_thread_delayed_queue_.begin();
- i != main_thread_delayed_queue_.end(); ++i) {
- while (!i->second.empty()) {
- delete i->second.top().second;
- i->second.pop();
- }
- }
- for (auto& i : main_thread_idle_queue_) {
- while (!i.second.empty()) {
- delete i.second.front();
- i.second.pop();
- }
+ if (background_task_runner_) background_task_runner_->Terminate();
+ for (auto it : foreground_task_runner_map_) {
+ it.second->Terminate();
}
}
-
void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
base::LockGuard<base::Mutex> guard(&lock_);
- DCHECK(thread_pool_size >= 0);
+ DCHECK_GE(thread_pool_size, 0);
if (thread_pool_size < 1) {
thread_pool_size = base::SysInfo::NumberOfProcessors() - 1;
}
@@ -124,149 +110,117 @@ void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
}
-
-void DefaultPlatform::EnsureInitialized() {
+void DefaultPlatform::EnsureBackgroundTaskRunnerInitialized() {
base::LockGuard<base::Mutex> guard(&lock_);
- if (initialized_) return;
- initialized_ = true;
-
- for (int i = 0; i < thread_pool_size_; ++i)
- thread_pool_.push_back(new WorkerThread(&queue_));
-}
-
-
-Task* DefaultPlatform::PopTaskInMainThreadQueue(v8::Isolate* isolate) {
- auto it = main_thread_queue_.find(isolate);
- if (it == main_thread_queue_.end() || it->second.empty()) {
- return NULL;
+ if (!background_task_runner_) {
+ background_task_runner_ =
+ std::make_shared<DefaultBackgroundTaskRunner>(thread_pool_size_);
}
- Task* task = it->second.front();
- it->second.pop();
- return task;
}
+namespace {
-Task* DefaultPlatform::PopTaskInMainThreadDelayedQueue(v8::Isolate* isolate) {
- auto it = main_thread_delayed_queue_.find(isolate);
- if (it == main_thread_delayed_queue_.end() || it->second.empty()) {
- return NULL;
- }
- double now = MonotonicallyIncreasingTime();
- std::pair<double, Task*> deadline_and_task = it->second.top();
- if (deadline_and_task.first > now) {
- return NULL;
- }
- it->second.pop();
- return deadline_and_task.second;
+double DefaultTimeFunction() {
+ return base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ static_cast<double>(base::Time::kMicrosecondsPerSecond);
}
-IdleTask* DefaultPlatform::PopTaskInMainThreadIdleQueue(v8::Isolate* isolate) {
- auto it = main_thread_idle_queue_.find(isolate);
- if (it == main_thread_idle_queue_.end() || it->second.empty()) {
- return nullptr;
- }
- IdleTask* task = it->second.front();
- it->second.pop();
- return task;
-}
+} // namespace
-void DefaultPlatform::EnsureEventLoopInitialized(v8::Isolate* isolate) {
+void DefaultPlatform::SetTimeFunctionForTesting(
+ DefaultPlatform::TimeFunction time_function) {
base::LockGuard<base::Mutex> guard(&lock_);
- if (event_loop_control_.count(isolate) == 0) {
- event_loop_control_.insert(std::make_pair(
- isolate, std::unique_ptr<base::Semaphore>(new base::Semaphore(0))));
- }
-}
-
-void DefaultPlatform::WaitForForegroundWork(v8::Isolate* isolate) {
- base::Semaphore* semaphore = nullptr;
- {
- base::LockGuard<base::Mutex> guard(&lock_);
- DCHECK_EQ(event_loop_control_.count(isolate), 1);
- semaphore = event_loop_control_[isolate].get();
- }
- DCHECK_NOT_NULL(semaphore);
- semaphore->Wait();
+ time_function_for_testing_ = time_function;
+ // The time function has to be right after the construction of the platform.
+ DCHECK(foreground_task_runner_map_.empty());
}
bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate,
MessageLoopBehavior behavior) {
- if (behavior == MessageLoopBehavior::kWaitForWork) {
- WaitForForegroundWork(isolate);
- }
- Task* task = NULL;
+ bool failed_result = behavior == MessageLoopBehavior::kWaitForWork;
+ std::shared_ptr<DefaultForegroundTaskRunner> task_runner;
{
base::LockGuard<base::Mutex> guard(&lock_);
-
- // Move delayed tasks that hit their deadline to the main queue.
- task = PopTaskInMainThreadDelayedQueue(isolate);
- while (task != NULL) {
- ScheduleOnForegroundThread(isolate, task);
- task = PopTaskInMainThreadDelayedQueue(isolate);
+ if (foreground_task_runner_map_.find(isolate) ==
+ foreground_task_runner_map_.end()) {
+ return failed_result;
}
+ task_runner = foreground_task_runner_map_[isolate];
+ }
+ if (behavior == MessageLoopBehavior::kWaitForWork) {
+ task_runner->WaitForTask();
+ }
- task = PopTaskInMainThreadQueue(isolate);
+ std::unique_ptr<Task> task = task_runner->PopTaskFromQueue();
+ if (!task) return failed_result;
- if (task == NULL) {
- return behavior == MessageLoopBehavior::kWaitForWork;
- }
- }
task->Run();
- delete task;
return true;
}
void DefaultPlatform::RunIdleTasks(v8::Isolate* isolate,
double idle_time_in_seconds) {
- DCHECK(IdleTaskSupport::kEnabled == idle_task_support_);
+ DCHECK_EQ(IdleTaskSupport::kEnabled, idle_task_support_);
+ std::shared_ptr<DefaultForegroundTaskRunner> task_runner;
+ {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (foreground_task_runner_map_.find(isolate) ==
+ foreground_task_runner_map_.end()) {
+ return;
+ }
+ task_runner = foreground_task_runner_map_[isolate];
+ }
double deadline_in_seconds =
MonotonicallyIncreasingTime() + idle_time_in_seconds;
+
while (deadline_in_seconds > MonotonicallyIncreasingTime()) {
- {
- IdleTask* task;
- {
- base::LockGuard<base::Mutex> guard(&lock_);
- task = PopTaskInMainThreadIdleQueue(isolate);
- }
- if (task == nullptr) return;
- task->Run(deadline_in_seconds);
- delete task;
- }
+ std::unique_ptr<IdleTask> task = task_runner->PopTaskFromIdleQueue();
+ if (!task) return;
+ task->Run(deadline_in_seconds);
}
}
-void DefaultPlatform::CallOnBackgroundThread(Task* task,
- ExpectedRuntime expected_runtime) {
- EnsureInitialized();
- queue_.Append(task);
+std::shared_ptr<TaskRunner> DefaultPlatform::GetForegroundTaskRunner(
+ v8::Isolate* isolate) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (foreground_task_runner_map_.find(isolate) ==
+ foreground_task_runner_map_.end()) {
+ foreground_task_runner_map_.insert(std::make_pair(
+ isolate, std::make_shared<DefaultForegroundTaskRunner>(
+ idle_task_support_, time_function_for_testing_
+ ? time_function_for_testing_
+ : DefaultTimeFunction)));
+ }
+ return foreground_task_runner_map_[isolate];
}
-void DefaultPlatform::ScheduleOnForegroundThread(v8::Isolate* isolate,
- Task* task) {
- main_thread_queue_[isolate].push(task);
- if (event_loop_control_.count(isolate) != 0) {
- event_loop_control_[isolate]->Signal();
- }
+std::shared_ptr<TaskRunner> DefaultPlatform::GetBackgroundTaskRunner(
+ v8::Isolate*) {
+ EnsureBackgroundTaskRunnerInitialized();
+ return background_task_runner_;
+}
+
+void DefaultPlatform::CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) {
+ GetBackgroundTaskRunner(nullptr)->PostTask(std::unique_ptr<Task>(task));
}
void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
- base::LockGuard<base::Mutex> guard(&lock_);
- ScheduleOnForegroundThread(isolate, task);
+ GetForegroundTaskRunner(isolate)->PostTask(std::unique_ptr<Task>(task));
}
void DefaultPlatform::CallDelayedOnForegroundThread(Isolate* isolate,
Task* task,
double delay_in_seconds) {
- base::LockGuard<base::Mutex> guard(&lock_);
- double deadline = MonotonicallyIncreasingTime() + delay_in_seconds;
- main_thread_delayed_queue_[isolate].push(std::make_pair(deadline, task));
+ GetForegroundTaskRunner(isolate)->PostDelayedTask(std::unique_ptr<Task>(task),
+ delay_in_seconds);
}
void DefaultPlatform::CallIdleOnForegroundThread(Isolate* isolate,
IdleTask* task) {
- base::LockGuard<base::Mutex> guard(&lock_);
- main_thread_idle_queue_[isolate].push(task);
+ GetForegroundTaskRunner(isolate)->PostIdleTask(
+ std::unique_ptr<IdleTask>(task));
}
bool DefaultPlatform::IdleTasksEnabled(Isolate* isolate) {
@@ -274,8 +228,8 @@ bool DefaultPlatform::IdleTasksEnabled(Isolate* isolate) {
}
double DefaultPlatform::MonotonicallyIncreasingTime() {
- return base::TimeTicks::HighResolutionNow().ToInternalValue() /
- static_cast<double>(base::Time::kMicrosecondsPerSecond);
+ if (time_function_for_testing_) return time_function_for_testing_();
+ return DefaultTimeFunction();
}
double DefaultPlatform::CurrentClockTimeMillis() {
@@ -287,9 +241,9 @@ TracingController* DefaultPlatform::GetTracingController() {
}
void DefaultPlatform::SetTracingController(
- v8::TracingController* tracing_controller) {
- DCHECK_NOT_NULL(tracing_controller);
- tracing_controller_.reset(tracing_controller);
+ std::unique_ptr<v8::TracingController> tracing_controller) {
+ DCHECK_NOT_NULL(tracing_controller.get());
+ tracing_controller_ = std::move(tracing_controller);
}
size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 281ca27e89..3280a7aa7c 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -18,37 +18,47 @@
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
-#include "src/libplatform/task-queue.h"
+#include "src/base/platform/time.h"
namespace v8 {
namespace platform {
-class TaskQueue;
class Thread;
class WorkerThread;
+class DefaultForegroundTaskRunner;
+class DefaultBackgroundTaskRunner;
class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
public:
explicit DefaultPlatform(
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
- v8::TracingController* tracing_controller = nullptr);
+ std::unique_ptr<v8::TracingController> tracing_controller = {});
+
virtual ~DefaultPlatform();
void SetThreadPoolSize(int thread_pool_size);
- void EnsureInitialized();
+ void EnsureBackgroundTaskRunnerInitialized();
bool PumpMessageLoop(
v8::Isolate* isolate,
MessageLoopBehavior behavior = MessageLoopBehavior::kDoNotWait);
- void EnsureEventLoopInitialized(v8::Isolate* isolate);
void RunIdleTasks(v8::Isolate* isolate, double idle_time_in_seconds);
- void SetTracingController(v8::TracingController* tracing_controller);
+ void SetTracingController(
+ std::unique_ptr<v8::TracingController> tracing_controller);
+
+ using TimeFunction = double (*)();
+
+ void SetTimeFunctionForTesting(TimeFunction time_function);
// v8::Platform implementation.
size_t NumberOfAvailableBackgroundThreads() override;
+ std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate* isolate) override;
+ std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
+ v8::Isolate* isolate) override;
void CallOnBackgroundThread(Task* task,
ExpectedRuntime expected_runtime) override;
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override;
@@ -64,34 +74,19 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
private:
static const int kMaxThreadPoolSize;
- Task* PopTaskInMainThreadQueue(v8::Isolate* isolate);
- Task* PopTaskInMainThreadDelayedQueue(v8::Isolate* isolate);
- IdleTask* PopTaskInMainThreadIdleQueue(v8::Isolate* isolate);
-
- void WaitForForegroundWork(v8::Isolate* isolate);
- void ScheduleOnForegroundThread(v8::Isolate* isolate, Task* task);
-
base::Mutex lock_;
- bool initialized_;
int thread_pool_size_;
IdleTaskSupport idle_task_support_;
- std::vector<WorkerThread*> thread_pool_;
- TaskQueue queue_;
- std::map<v8::Isolate*, std::queue<Task*>> main_thread_queue_;
- std::map<v8::Isolate*, std::queue<IdleTask*>> main_thread_idle_queue_;
- std::map<v8::Isolate*, std::unique_ptr<base::Semaphore>> event_loop_control_;
-
- typedef std::pair<double, Task*> DelayedEntry;
- std::map<v8::Isolate*,
- std::priority_queue<DelayedEntry, std::vector<DelayedEntry>,
- std::greater<DelayedEntry> > >
- main_thread_delayed_queue_;
+ std::shared_ptr<DefaultBackgroundTaskRunner> background_task_runner_;
+ std::map<v8::Isolate*, std::shared_ptr<DefaultForegroundTaskRunner>>
+ foreground_task_runner_map_;
+
std::unique_ptr<TracingController> tracing_controller_;
+ TimeFunction time_function_for_testing_;
DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
};
-
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/task-queue.cc b/deps/v8/src/libplatform/task-queue.cc
index ada13d9fe9..19d668c095 100644
--- a/deps/v8/src/libplatform/task-queue.cc
+++ b/deps/v8/src/libplatform/task-queue.cc
@@ -4,6 +4,7 @@
#include "src/libplatform/task-queue.h"
+#include "include/v8-platform.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
@@ -20,27 +21,25 @@ TaskQueue::~TaskQueue() {
DCHECK(task_queue_.empty());
}
-
-void TaskQueue::Append(Task* task) {
+void TaskQueue::Append(std::unique_ptr<Task> task) {
base::LockGuard<base::Mutex> guard(&lock_);
DCHECK(!terminated_);
- task_queue_.push(task);
+ task_queue_.push(std::move(task));
process_queue_semaphore_.Signal();
}
-
-Task* TaskQueue::GetNext() {
+std::unique_ptr<Task> TaskQueue::GetNext() {
for (;;) {
{
base::LockGuard<base::Mutex> guard(&lock_);
if (!task_queue_.empty()) {
- Task* result = task_queue_.front();
+ std::unique_ptr<Task> result = std::move(task_queue_.front());
task_queue_.pop();
return result;
}
if (terminated_) {
process_queue_semaphore_.Signal();
- return NULL;
+ return nullptr;
}
}
process_queue_semaphore_.Wait();
diff --git a/deps/v8/src/libplatform/task-queue.h b/deps/v8/src/libplatform/task-queue.h
index 441b5b2d7f..f8c76498f2 100644
--- a/deps/v8/src/libplatform/task-queue.h
+++ b/deps/v8/src/libplatform/task-queue.h
@@ -25,11 +25,11 @@ class V8_PLATFORM_EXPORT TaskQueue {
~TaskQueue();
// Appends a task to the queue. The queue takes ownership of |task|.
- void Append(Task* task);
+ void Append(std::unique_ptr<Task> task);
// Returns the next task to process. Blocks if no task is available. Returns
- // NULL if the queue is terminated.
- Task* GetNext();
+ // nullptr if the queue is terminated.
+ std::unique_ptr<Task> GetNext();
// Terminate the queue.
void Terminate();
@@ -41,7 +41,7 @@ class V8_PLATFORM_EXPORT TaskQueue {
base::Semaphore process_queue_semaphore_;
base::Mutex lock_;
- std::queue<Task*> task_queue_;
+ std::queue<std::unique_ptr<Task>> task_queue_;
bool terminated_;
DISALLOW_COPY_AND_ASSIGN(TaskQueue);
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.cc b/deps/v8/src/libplatform/tracing/trace-buffer.cc
index 354f0459f6..c7142ea520 100644
--- a/deps/v8/src/libplatform/tracing/trace-buffer.cc
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.cc
@@ -41,9 +41,9 @@ TraceObject* TraceBufferRingBuffer::GetEventByHandle(uint64_t handle) {
size_t chunk_index, event_index;
uint32_t chunk_seq;
ExtractHandle(handle, &chunk_index, &chunk_seq, &event_index);
- if (chunk_index >= chunks_.size()) return NULL;
+ if (chunk_index >= chunks_.size()) return nullptr;
auto& chunk = chunks_[chunk_index];
- if (!chunk || chunk->seq() != chunk_seq) return NULL;
+ if (!chunk || chunk->seq() != chunk_seq) return nullptr;
return chunk->GetEventAt(event_index);
}
diff --git a/deps/v8/src/libplatform/tracing/trace-config.cc b/deps/v8/src/libplatform/tracing/trace-config.cc
index ff90eff71c..04ac3f03e7 100644
--- a/deps/v8/src/libplatform/tracing/trace-config.cc
+++ b/deps/v8/src/libplatform/tracing/trace-config.cc
@@ -33,7 +33,7 @@ bool TraceConfig::IsCategoryGroupEnabled(const char* category_group) const {
}
void TraceConfig::AddIncludedCategory(const char* included_category) {
- DCHECK(included_category != NULL && strlen(included_category) > 0);
+ DCHECK(included_category != nullptr && strlen(included_category) > 0);
included_categories_.push_back(included_category);
}
diff --git a/deps/v8/src/libplatform/tracing/trace-object.cc b/deps/v8/src/libplatform/tracing/trace-object.cc
index bc05f797fa..6d1a6d62b5 100644
--- a/deps/v8/src/libplatform/tracing/trace-object.cc
+++ b/deps/v8/src/libplatform/tracing/trace-object.cc
@@ -13,8 +13,8 @@ namespace v8 {
namespace platform {
namespace tracing {
-// We perform checks for NULL strings since it is possible that a string arg
-// value is NULL.
+// We perform checks for nullptr strings since it is possible that a string arg
+// value is nullptr.
V8_INLINE static size_t GetAllocLength(const char* str) {
return str ? strlen(str) + 1 : 0;
}
@@ -37,7 +37,7 @@ void TraceObject::Initialize(
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
- unsigned int flags, int64_t timestamp, int64_t cpu_timestamp) {
+ unsigned int flags) {
pid_ = base::OS::GetCurrentProcessId();
tid_ = base::OS::GetCurrentThreadId();
phase_ = phase;
@@ -47,8 +47,8 @@ void TraceObject::Initialize(
id_ = id;
bind_id_ = bind_id;
flags_ = flags;
- ts_ = timestamp;
- tts_ = cpu_timestamp;
+ ts_ = base::TimeTicks::HighResolutionNow().ToInternalValue();
+ tts_ = base::ThreadTicks::Now().ToInternalValue();
duration_ = 0;
cpu_duration_ = 0;
@@ -103,9 +103,9 @@ void TraceObject::Initialize(
TraceObject::~TraceObject() { delete[] parameter_copy_storage_; }
-void TraceObject::UpdateDuration(int64_t timestamp, int64_t cpu_timestamp) {
- duration_ = timestamp - ts_;
- cpu_duration_ = cpu_timestamp - tts_;
+void TraceObject::UpdateDuration() {
+ duration_ = base::TimeTicks::HighResolutionNow().ToInternalValue() - ts_;
+ cpu_duration_ = base::ThreadTicks::Now().ToInternalValue() - tts_;
}
void TraceObject::InitializeForTesting(
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.cc b/deps/v8/src/libplatform/tracing/trace-writer.cc
index 7445087c56..36a8783499 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.cc
+++ b/deps/v8/src/libplatform/tracing/trace-writer.cc
@@ -102,7 +102,7 @@ void JSONTraceWriter::AppendArgValue(uint8_t type,
case TRACE_VALUE_TYPE_STRING:
case TRACE_VALUE_TYPE_COPY_STRING:
if (value.as_string == nullptr) {
- stream_ << "\"NULL\"";
+ stream_ << "\"nullptr\"";
} else {
WriteJSONStringToStream(value.as_string, stream_);
}
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index c024f18291..0c44e22734 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -9,7 +9,6 @@
#include "src/base/atomicops.h"
#include "src/base/platform/mutex.h"
-#include "src/base/platform/time.h"
namespace v8 {
namespace platform {
@@ -49,14 +48,6 @@ void TracingController::Initialize(TraceBuffer* trace_buffer) {
mutex_.reset(new base::Mutex());
}
-int64_t TracingController::CurrentTimestampMicroseconds() {
- return base::TimeTicks::HighResolutionNow().ToInternalValue();
-}
-
-int64_t TracingController::CurrentCpuTimestampMicroseconds() {
- return base::ThreadTicks::Now().ToInternalValue();
-}
-
uint64_t TracingController::AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
@@ -67,28 +58,9 @@ uint64_t TracingController::AddTraceEvent(
uint64_t handle;
TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
if (trace_object) {
- trace_object->Initialize(
- phase, category_enabled_flag, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, arg_convertables, flags,
- CurrentTimestampMicroseconds(), CurrentCpuTimestampMicroseconds());
- }
- return handle;
-}
-
-uint64_t TracingController::AddTraceEventWithTimestamp(
- char phase, const uint8_t* category_enabled_flag, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, int num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values,
- std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
- unsigned int flags, int64_t timestamp) {
- uint64_t handle;
- TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
- if (trace_object) {
trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
bind_id, num_args, arg_names, arg_types,
- arg_values, arg_convertables, flags, timestamp,
- CurrentCpuTimestampMicroseconds());
+ arg_values, arg_convertables, flags);
}
return handle;
}
@@ -97,8 +69,7 @@ void TracingController::UpdateTraceEventDuration(
const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {
TraceObject* trace_object = trace_buffer_->GetEventByHandle(handle);
if (!trace_object) return;
- trace_object->UpdateDuration(CurrentTimestampMicroseconds(),
- CurrentCpuTimestampMicroseconds());
+ trace_object->UpdateDuration();
}
const uint8_t* TracingController::GetCategoryGroupEnabled(
@@ -199,7 +170,7 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
}
}
- unsigned char* category_group_enabled = NULL;
+ unsigned char* category_group_enabled = nullptr;
size_t category_index = base::Acquire_Load(&g_category_index);
for (size_t i = 0; i < category_index; ++i) {
if (strcmp(g_category_groups[i], category_group) == 0) {
diff --git a/deps/v8/src/libplatform/worker-thread.cc b/deps/v8/src/libplatform/worker-thread.cc
index a8e714a896..c4fdd19110 100644
--- a/deps/v8/src/libplatform/worker-thread.cc
+++ b/deps/v8/src/libplatform/worker-thread.cc
@@ -22,9 +22,8 @@ WorkerThread::~WorkerThread() {
void WorkerThread::Run() {
- while (Task* task = queue_->GetNext()) {
+ while (std::unique_ptr<Task> task = queue_->GetNext()) {
task->Run();
- delete task;
}
}
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 7519720beb..75161fc7d6 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -227,7 +227,7 @@ class SamplerManager {
base::HashMap::Entry* entry =
sampler_map_.LookupOrInsert(ThreadKey(thread_id),
ThreadHash(thread_id));
- DCHECK(entry != nullptr);
+ DCHECK_NOT_NULL(entry);
if (entry->value == nullptr) {
SamplerList* samplers = new SamplerList();
samplers->push_back(sampler);
@@ -256,7 +256,7 @@ class SamplerManager {
void* thread_key = ThreadKey(thread_id);
uint32_t thread_hash = ThreadHash(thread_id);
base::HashMap::Entry* entry = sampler_map_.Lookup(thread_key, thread_hash);
- DCHECK(entry != nullptr);
+ DCHECK_NOT_NULL(entry);
SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
for (SamplerListIterator iter = samplers->begin(); iter != samplers->end();
++iter) {
diff --git a/deps/v8/src/locked-queue-inl.h b/deps/v8/src/locked-queue-inl.h
index eb18f649ae..31e8bd2fd8 100644
--- a/deps/v8/src/locked-queue-inl.h
+++ b/deps/v8/src/locked-queue-inl.h
@@ -22,7 +22,7 @@ struct LockedQueue<Record>::Node : Malloced {
template <typename Record>
inline LockedQueue<Record>::LockedQueue() {
head_ = new Node();
- CHECK(head_ != nullptr);
+ CHECK_NOT_NULL(head_);
tail_ = head_;
}
@@ -43,7 +43,7 @@ inline LockedQueue<Record>::~LockedQueue() {
template <typename Record>
inline void LockedQueue<Record>::Enqueue(const Record& record) {
Node* n = new Node();
- CHECK(n != nullptr);
+ CHECK_NOT_NULL(n);
n->value = record;
{
base::LockGuard<base::Mutex> guard(&tail_mutex_);
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 82d24a111e..b2eb44796d 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -18,15 +18,26 @@ namespace internal {
const char* const Log::kLogToTemporaryFile = "&";
const char* const Log::kLogToConsole = "-";
-Log::Log(Logger* logger)
- : is_stopped_(false),
- output_handle_(NULL),
- message_buffer_(NULL),
- logger_(logger) {}
-
-void Log::Initialize(const char* log_file_name) {
- message_buffer_ = NewArray<char>(kMessageBufferSize);
+// static
+FILE* Log::CreateOutputHandle(const char* file_name) {
+ // If we're logging anything, we need to open the log file.
+ if (!Log::InitLogAtStart()) {
+ return nullptr;
+ } else if (strcmp(file_name, kLogToConsole) == 0) {
+ return stdout;
+ } else if (strcmp(file_name, kLogToTemporaryFile) == 0) {
+ return base::OS::OpenTemporaryFile();
+ } else {
+ return base::OS::FOpen(file_name, base::OS::LogFileOpenMode);
+ }
+}
+Log::Log(Logger* logger, const char* file_name)
+ : is_stopped_(false),
+ output_handle_(Log::CreateOutputHandle(file_name)),
+ os_(output_handle_ == nullptr ? stdout : output_handle_),
+ format_buffer_(NewArray<char>(kMessageBufferSize)),
+ logger_(logger) {
// --log-all enables all the log flags.
if (FLAG_log_all) {
FLAG_log_api = true;
@@ -35,245 +46,198 @@ void Log::Initialize(const char* log_file_name) {
FLAG_log_suspect = true;
FLAG_log_handles = true;
FLAG_log_internal_timer_events = true;
+ FLAG_log_function_events = true;
}
// --prof implies --log-code.
if (FLAG_prof) FLAG_log_code = true;
- // If we're logging anything, we need to open the log file.
- if (Log::InitLogAtStart()) {
- if (strcmp(log_file_name, kLogToConsole) == 0) {
- OpenStdout();
- } else if (strcmp(log_file_name, kLogToTemporaryFile) == 0) {
- OpenTemporaryFile();
- } else {
- OpenFile(log_file_name);
- }
-
- if (output_handle_ != nullptr) {
- Log::MessageBuilder msg(this);
- if (strlen(Version::GetEmbedder()) == 0) {
- msg.Append("v8-version,%d,%d,%d,%d,%d", Version::GetMajor(),
- Version::GetMinor(), Version::GetBuild(),
- Version::GetPatch(), Version::IsCandidate());
- } else {
- msg.Append("v8-version,%d,%d,%d,%d,%s,%d", Version::GetMajor(),
- Version::GetMinor(), Version::GetBuild(),
- Version::GetPatch(), Version::GetEmbedder(),
- Version::IsCandidate());
- }
- msg.WriteToLogFile();
- }
+ if (output_handle_ == nullptr) return;
+ Log::MessageBuilder msg(this);
+ LogSeparator kNext = LogSeparator::kSeparator;
+ msg << "v8-version" << kNext << Version::GetMajor() << kNext
+ << Version::GetMinor() << kNext << Version::GetBuild() << kNext
+ << Version::GetPatch();
+ if (strlen(Version::GetEmbedder()) != 0) {
+ msg << kNext << Version::GetEmbedder();
}
-}
-
-
-void Log::OpenStdout() {
- DCHECK(!IsEnabled());
- output_handle_ = stdout;
-}
-
-
-void Log::OpenTemporaryFile() {
- DCHECK(!IsEnabled());
- output_handle_ = base::OS::OpenTemporaryFile();
-}
-
-
-void Log::OpenFile(const char* name) {
- DCHECK(!IsEnabled());
- output_handle_ = base::OS::FOpen(name, base::OS::LogFileOpenMode);
+ msg << kNext << Version::IsCandidate();
+ msg.WriteToLogFile();
}
FILE* Log::Close() {
- FILE* result = NULL;
- if (output_handle_ != NULL) {
+ FILE* result = nullptr;
+ if (output_handle_ != nullptr) {
if (strcmp(FLAG_logfile, kLogToTemporaryFile) != 0) {
fclose(output_handle_);
} else {
result = output_handle_;
}
}
- output_handle_ = NULL;
+ output_handle_ = nullptr;
- DeleteArray(message_buffer_);
- message_buffer_ = NULL;
+ DeleteArray(format_buffer_);
+ format_buffer_ = nullptr;
is_stopped_ = false;
return result;
}
-
Log::MessageBuilder::MessageBuilder(Log* log)
- : log_(log),
- lock_guard_(&log_->mutex_),
- pos_(0) {
- DCHECK(log_->message_buffer_ != NULL);
+ : log_(log), lock_guard_(&log_->mutex_) {
+ DCHECK_NOT_NULL(log_->format_buffer_);
}
void Log::MessageBuilder::Append(const char* format, ...) {
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
va_list args;
va_start(args, format);
AppendVA(format, args);
va_end(args);
- DCHECK(pos_ <= Log::kMessageBufferSize);
}
void Log::MessageBuilder::AppendVA(const char* format, va_list args) {
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- int result = v8::internal::VSNPrintF(buf, format, args);
-
- // Result is -1 if output was truncated.
- if (result >= 0) {
- pos_ += result;
- } else {
- pos_ = Log::kMessageBufferSize;
- }
- DCHECK(pos_ <= Log::kMessageBufferSize);
+ Vector<char> buf(log_->format_buffer_, Log::kMessageBufferSize);
+ int length = v8::internal::VSNPrintF(buf, format, args);
+ // {length} is -1 if output was truncated.
+ if (length == -1) length = Log::kMessageBufferSize;
+ DCHECK_LE(length, Log::kMessageBufferSize);
+ AppendStringPart(log_->format_buffer_, length);
}
-
-void Log::MessageBuilder::Append(const char c) {
- if (pos_ < Log::kMessageBufferSize) {
- log_->message_buffer_[pos_++] = c;
- }
- DCHECK(pos_ <= Log::kMessageBufferSize);
-}
-
-
-void Log::MessageBuilder::AppendDoubleQuotedString(const char* string) {
- Append('"');
- for (const char* p = string; *p != '\0'; p++) {
- if (*p == '"') {
- Append('\\');
- }
- Append(*p);
+void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
+ DCHECK(symbol);
+ OFStream& os = log_->os_;
+ os << "symbol(";
+ if (!symbol->name()->IsUndefined(symbol->GetIsolate())) {
+ os << "\"";
+ AppendDetailed(String::cast(symbol->name()), false);
+ os << "\" ";
}
- Append('"');
+ os << "hash " << std::hex << symbol->Hash() << std::dec << ")";
}
-
-void Log::MessageBuilder::Append(String* str) {
+void Log::MessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+ if (str == nullptr) return;
DisallowHeapAllocation no_gc; // Ensure string stay valid.
- int length = str->length();
- for (int i = 0; i < length; i++) {
- Append(static_cast<char>(str->Get(i)));
+ OFStream& os = log_->os_;
+ int limit = str->length();
+ if (limit > 0x1000) limit = 0x1000;
+ if (show_impl_info) {
+ os << (str->IsOneByteRepresentation() ? 'a' : '2');
+ if (StringShape(str).IsExternal()) os << 'e';
+ if (StringShape(str).IsInternalized()) os << '#';
+ os << ':' << str->length() << ':';
}
+ AppendStringPart(str, limit);
}
-void Log::MessageBuilder::AppendAddress(Address addr) {
- Append("0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(addr));
+void Log::MessageBuilder::AppendString(String* str) {
+ if (str == nullptr) return;
+ int len = str->length();
+ AppendStringPart(str, len);
}
-void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
- DCHECK(symbol);
- Append("symbol(");
- if (!symbol->name()->IsUndefined(symbol->GetIsolate())) {
- Append("\"");
- AppendDetailed(String::cast(symbol->name()), false);
- Append("\" ");
+void Log::MessageBuilder::AppendString(const char* string) {
+ if (string == nullptr) return;
+ for (const char* p = string; *p != '\0'; p++) {
+ this->AppendCharacter(*p);
}
- Append("hash %x)", symbol->Hash());
}
-
-void Log::MessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
- if (str == NULL) return;
+void Log::MessageBuilder::AppendStringPart(String* str, int len) {
+ DCHECK_LE(len, str->length());
DisallowHeapAllocation no_gc; // Ensure string stay valid.
- int len = str->length();
- if (len > 0x1000)
- len = 0x1000;
- if (show_impl_info) {
- Append(str->IsOneByteRepresentation() ? 'a' : '2');
- if (StringShape(str).IsExternal())
- Append('e');
- if (StringShape(str).IsInternalized())
- Append('#');
- Append(":%i:", str->length());
- }
+ // TODO(cbruni): unify escaping.
for (int i = 0; i < len; i++) {
uc32 c = str->Get(i);
- if (c > 0xff) {
- Append("\\u%04x", c);
- } else if (c < 32 || c > 126) {
- Append("\\x%02x", c);
- } else if (c == ',') {
- Append("\\,");
- } else if (c == '\\') {
- Append("\\\\");
- } else if (c == '\"') {
- Append("\"\"");
+ if (c <= 0xff) {
+ AppendCharacter(static_cast<char>(c));
} else {
- Append("%lc", c);
+ // Escape any non-ascii range characters.
+ Append("\\u%04x", c);
}
}
}
-void Log::MessageBuilder::AppendUnbufferedHeapString(String* str) {
- if (str == NULL) return;
- DisallowHeapAllocation no_gc; // Ensure string stay valid.
- ScopedVector<char> buffer(16);
- int len = str->length();
- for (int i = 0; i < len; i++) {
- uc32 c = str->Get(i);
- if (c >= 32 && c <= 126) {
- if (c == '\"') {
- AppendUnbufferedCString("\"\"");
- } else if (c == '\\') {
- AppendUnbufferedCString("\\\\");
- } else {
- AppendUnbufferedChar(c);
- }
- } else if (c > 0xff) {
- int length = v8::internal::SNPrintF(buffer, "\\u%04x", c);
- DCHECK_EQ(6, length);
- log_->WriteToFile(buffer.start(), length);
+void Log::MessageBuilder::AppendStringPart(const char* str, size_t len) {
+ for (size_t i = 0; i < len; i++) {
+ DCHECK_NE(str[i], '\0');
+ this->AppendCharacter(str[i]);
+ }
+}
+
+void Log::MessageBuilder::AppendCharacter(char c) {
+ OFStream& os = log_->os_;
+ // A log entry (separate by commas) cannot contain commas or line-breaks.
+ if (c >= 32 && c <= 126) {
+ if (c == ',') {
+ // Escape commas (log field separator) directly.
+ os << "\x2c";
} else {
- DCHECK(c <= 0xffff);
- int length = v8::internal::SNPrintF(buffer, "\\x%02x", c);
- DCHECK_EQ(4, length);
- log_->WriteToFile(buffer.start(), length);
+ // Directly append any printable ascii character.
+ os << c;
}
+ } else if (c == '\n') {
+ os << "\\n";
+ } else {
+ // Escape any non-printable characters.
+ Append("\\x%02x", c);
}
}
-void Log::MessageBuilder::AppendUnbufferedChar(char c) {
- log_->WriteToFile(&c, 1);
+void Log::MessageBuilder::WriteToLogFile() { log_->os_ << std::endl; }
+
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<const char*>(
+ const char* string) {
+ this->AppendString(string);
+ return *this;
}
-void Log::MessageBuilder::AppendUnbufferedCString(const char* str) {
- log_->WriteToFile(str, static_cast<int>(strlen(str)));
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<void*>(void* pointer) {
+ OFStream& os = log_->os_;
+ // Manually format the pointer since on Windows we do not consistently
+ // get a "0x" prefix.
+ os << "0x" << std::hex << reinterpret_cast<intptr_t>(pointer) << std::dec;
+ return *this;
}
-void Log::MessageBuilder::AppendStringPart(const char* str, int len) {
- if (pos_ + len > Log::kMessageBufferSize) {
- len = Log::kMessageBufferSize - pos_;
- DCHECK(len >= 0);
- if (len == 0) return;
- }
- Vector<char> buf(log_->message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- StrNCpy(buf, str, len);
- pos_ += len;
- DCHECK(pos_ <= Log::kMessageBufferSize);
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<char>(char c) {
+ this->AppendCharacter(c);
+ return *this;
+}
+
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<String*>(String* string) {
+ this->AppendString(string);
+ return *this;
+}
+
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<Symbol*>(Symbol* symbol) {
+ this->AppendSymbolName(symbol);
+ return *this;
}
-void Log::MessageBuilder::WriteToLogFile() {
- DCHECK(pos_ <= Log::kMessageBufferSize);
- // Assert that we do not already have a new line at the end.
- DCHECK(pos_ == 0 || log_->message_buffer_[pos_ - 1] != '\n');
- if (pos_ == Log::kMessageBufferSize) pos_--;
- log_->message_buffer_[pos_++] = '\n';
- const int written = log_->WriteToFile(log_->message_buffer_, pos_);
- if (written != pos_) {
- log_->stop();
- log_->logger_->LogFailure();
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<Name*>(Name* name) {
+ if (name->IsString()) {
+ this->AppendString(String::cast(name));
+ } else {
+ this->AppendSymbolName(Symbol::cast(name));
}
+ return *this;
+}
+
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<LogSeparator>(
+ LogSeparator separator) {
+ log_->os_ << ',';
+ return *this;
}
} // namespace internal
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 991e9c252b..99ed03f34a 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -13,18 +13,19 @@
#include "src/base/compiler-specific.h"
#include "src/base/platform/mutex.h"
#include "src/flags.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
class Logger;
+enum class LogSeparator { kSeparator };
+
// Functions and data for performing output of log messages.
class Log {
public:
- // Performs process-wide initialization.
- void Initialize(const char* log_file_name);
-
+ Log(Logger* log, const char* log_file_name);
// Disables logging, but preserves acquired resources.
void stop() { is_stopped_ = true; }
@@ -32,7 +33,8 @@ class Log {
return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
FLAG_log_handles || FLAG_log_suspect || FLAG_ll_prof ||
FLAG_perf_basic_prof || FLAG_perf_prof || FLAG_log_source_code ||
- FLAG_log_internal_timer_events || FLAG_prof_cpp || FLAG_trace_ic;
+ FLAG_log_internal_timer_events || FLAG_prof_cpp || FLAG_trace_ic ||
+ FLAG_log_function_events;
}
// Frees all resources acquired in Initialize and Open... functions.
@@ -41,9 +43,7 @@ class Log {
FILE* Close();
// Returns whether logging is enabled.
- bool IsEnabled() {
- return !is_stopped_ && output_handle_ != NULL;
- }
+ bool IsEnabled() { return !is_stopped_ && output_handle_ != nullptr; }
// Size of buffer used for formatting log messages.
static const int kMessageBufferSize = 2048;
@@ -68,59 +68,44 @@ class Log {
// Append string data to the log message.
void PRINTF_FORMAT(2, 0) AppendVA(const char* format, va_list args);
- // Append a character to the log message.
- void Append(const char c);
-
- // Append double quoted string to the log message.
- void AppendDoubleQuotedString(const char* string);
-
- // Append a heap string.
- void Append(String* str);
-
- // Appends an address.
- void AppendAddress(Address addr);
-
void AppendSymbolName(Symbol* symbol);
void AppendDetailed(String* str, bool show_impl_info);
- // Append a portion of a string.
- void AppendStringPart(const char* str, int len);
+ // Append and escape a full string.
+ void AppendString(String* source);
+ void AppendString(const char* string);
+
+ // Append and escpae a portion of a string.
+ void AppendStringPart(String* source, int len);
+ void AppendStringPart(const char* str, size_t len);
+
+ void AppendCharacter(const char character);
- // Helpers for appending char, C-string and heap string without
- // buffering. This is useful for entries that can exceed the 2kB
- // limit.
- void AppendUnbufferedChar(char c);
- void AppendUnbufferedCString(const char* str);
- void AppendUnbufferedHeapString(String* source);
+ // Delegate insertion to the underlying {log_}.
+ // All appened srings are escaped to maintain one-line log entries.
+ template <typename T>
+ MessageBuilder& operator<<(T value) {
+ log_->os_ << value;
+ return *this;
+ }
- // Write the log message to the log file currently opened.
+ // Finish the current log line an flush the it to the log file.
void WriteToLogFile();
private:
Log* log_;
base::LockGuard<base::Mutex> lock_guard_;
- int pos_;
};
private:
- explicit Log(Logger* logger);
-
- // Opens stdout for logging.
- void OpenStdout();
-
- // Opens file for logging.
- void OpenFile(const char* name);
-
- // Opens a temporary file for logging.
- void OpenTemporaryFile();
+ static FILE* CreateOutputHandle(const char* file_name);
// Implementation of writing to a log file.
int WriteToFile(const char* msg, int length) {
DCHECK_NOT_NULL(output_handle_);
- size_t rv = fwrite(msg, 1, length, output_handle_);
- DCHECK_EQ(length, rv);
- USE(rv);
+ os_.write(msg, length);
+ DCHECK(!os_.bad());
return length;
}
@@ -130,6 +115,7 @@ class Log {
// When logging is active output_handle_ is used to store a pointer to log
// destination. mutex_ should be acquired before using output_handle_.
FILE* output_handle_;
+ OFStream os_;
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
@@ -137,13 +123,29 @@ class Log {
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
- char* message_buffer_;
+ char* format_buffer_;
Logger* logger_;
friend class Logger;
};
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<LogSeparator>(
+ LogSeparator separator);
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<void*>(void* pointer);
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<const char*>(
+ const char* string);
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<char>(char c);
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<String*>(String* string);
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<Symbol*>(Symbol* symbol);
+template <>
+Log::MessageBuilder& Log::MessageBuilder::operator<<<Name*>(Name* name);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 006acf71b9..b529df7bbe 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -20,7 +20,6 @@
#include "src/interpreter/interpreter.h"
#include "src/libsampler/sampler.h"
#include "src/log-inl.h"
-#include "src/log-utils.h"
#include "src/macro-assembler.h"
#include "src/perf-jit.h"
#include "src/profiler/profiler-listener.h"
@@ -32,6 +31,9 @@
#include "src/unicode-inl.h"
#include "src/vm-state-inl.h"
+#include "src/utils.h"
+#include "src/version.h"
+
namespace v8 {
namespace internal {
@@ -85,7 +87,7 @@ class CodeEventLogger::NameBuffer {
}
void AppendString(String* str) {
- if (str == NULL) return;
+ if (str == nullptr) return;
int uc16_length = Min(str->length(), kUtf16BufferSize);
String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
int previous = unibrow::Utf16::kNoPreviousCharacter;
@@ -159,14 +161,14 @@ void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, const char* comment) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(comment);
- LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
+ LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
}
void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, Name* name) {
name_buffer_->Init(tag);
name_buffer_->AppendName(name);
- LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
+ LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
}
void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
@@ -198,18 +200,11 @@ void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
-void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, int args_count) {
- name_buffer_->Init(tag);
- name_buffer_->AppendInt(args_count);
- LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
-}
-
void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
String* source) {
name_buffer_->Init(CodeEventListener::REG_EXP_TAG);
name_buffer_->AppendString(source);
- LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
+ LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
}
@@ -238,8 +233,7 @@ const char PerfBasicLogger::kFilenameFormatString[] = "/tmp/perf-%d.map";
// Extra space for the PID in the filename
const int PerfBasicLogger::kFilenameBufferPadding = 16;
-PerfBasicLogger::PerfBasicLogger()
- : perf_output_handle_(NULL) {
+PerfBasicLogger::PerfBasicLogger() : perf_output_handle_(nullptr) {
// Open the perf JIT dump file.
int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
ScopedVector<char> perf_dump_name(bufferSize);
@@ -251,13 +245,13 @@ PerfBasicLogger::PerfBasicLogger()
perf_output_handle_ =
base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
CHECK_NOT_NULL(perf_output_handle_);
- setvbuf(perf_output_handle_, NULL, _IOLBF, 0);
+ setvbuf(perf_output_handle_, nullptr, _IOLBF, 0);
}
PerfBasicLogger::~PerfBasicLogger() {
fclose(perf_output_handle_);
- perf_output_handle_ = NULL;
+ perf_output_handle_ = nullptr;
}
void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
@@ -336,8 +330,7 @@ class LowLevelLogger : public CodeEventLogger {
const char LowLevelLogger::kLogExt[] = ".ll";
-LowLevelLogger::LowLevelLogger(const char* name)
- : ll_output_handle_(NULL) {
+LowLevelLogger::LowLevelLogger(const char* name) : ll_output_handle_(nullptr) {
// Open the low-level log file.
size_t len = strlen(name);
ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt)));
@@ -345,7 +338,7 @@ LowLevelLogger::LowLevelLogger(const char* name)
MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
ll_output_handle_ =
base::OS::FOpen(ll_name.start(), base::OS::LogFileOpenMode);
- setvbuf(ll_output_handle_, NULL, _IOLBF, 0);
+ setvbuf(ll_output_handle_, nullptr, _IOLBF, 0);
LogCodeInfo();
}
@@ -353,7 +346,7 @@ LowLevelLogger::LowLevelLogger(const char* name)
LowLevelLogger::~LowLevelLogger() {
fclose(ll_output_handle_);
- ll_output_handle_ = NULL;
+ ll_output_handle_ = nullptr;
}
@@ -427,7 +420,7 @@ class JitLogger : public CodeEventLogger {
JitCodeEvent::PositionType position_type);
void* StartCodePosInfoEvent();
- void EndCodePosInfoEvent(AbstractCode* code, void* jit_handler_data);
+ void EndCodePosInfoEvent(Address start_address, void* jit_handler_data);
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
@@ -503,12 +496,12 @@ void* JitLogger::StartCodePosInfoEvent() {
return event.user_data;
}
-void JitLogger::EndCodePosInfoEvent(AbstractCode* code,
+void JitLogger::EndCodePosInfoEvent(Address start_address,
void* jit_handler_data) {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
- event.code_start = code->instruction_start();
+ event.code_start = start_address;
event.user_data = jit_handler_data;
code_event_handler_(&event);
@@ -569,8 +562,8 @@ class Profiler: public base::Thread {
virtual void Run();
// Pause and Resume TickSample data collection.
- void pause() { paused_ = true; }
- void resume() { paused_ = false; }
+ void Pause() { paused_ = true; }
+ void Resume() { paused_ = false; }
private:
// Waits for a signal and removes profiling data.
@@ -595,7 +588,7 @@ class Profiler: public base::Thread {
int head_; // Index to the buffer head.
base::Atomic32 tail_; // Index to the buffer tail.
bool overflow_; // Tell whether a buffer overflow has occurred.
- // Sempahore used for buffer synchronization.
+ // Semaphore used for buffer synchronization.
base::Semaphore buffer_semaphore_;
// Tells whether profiler is engaged, that is, processing thread is stated.
@@ -626,7 +619,7 @@ class Ticker: public sampler::Sampler {
}
void SetProfiler(Profiler* profiler) {
- DCHECK(profiler_ == nullptr);
+ DCHECK_NULL(profiler_);
profiler_ = profiler;
IncreaseProfilingDepth();
if (!IsActive()) Start();
@@ -653,9 +646,8 @@ class Ticker: public sampler::Sampler {
SamplingThread* sampling_thread_;
};
-
//
-// Profiler implementation.
+// Profiler implementation when invoking with --prof.
//
Profiler::Profiler(Isolate* isolate)
: base::Thread(Options("v8:Profiler")),
@@ -676,10 +668,9 @@ void Profiler::Engage() {
std::vector<base::OS::SharedLibraryAddress> addresses =
base::OS::GetSharedLibraryAddresses();
- for (size_t i = 0; i < addresses.size(); ++i) {
- LOG(isolate_,
- SharedLibraryEvent(addresses[i].library_path, addresses[i].start,
- addresses[i].end, addresses[i].aslr_slide));
+ for (const auto& address : addresses) {
+ LOG(isolate_, SharedLibraryEvent(address.library_path, address.start,
+ address.end, address.aslr_slide));
}
// Start thread processing the profiler buffer.
@@ -706,7 +697,7 @@ void Profiler::Disengage() {
base::Relaxed_Store(&running_, 0);
v8::TickSample sample;
// Reset 'paused_' flag, otherwise semaphore may not be signalled.
- resume();
+ Resume();
Insert(&sample);
Join();
@@ -730,15 +721,15 @@ void Profiler::Run() {
Logger::Logger(Isolate* isolate)
: isolate_(isolate),
- ticker_(NULL),
- profiler_(NULL),
- log_events_(NULL),
+ ticker_(nullptr),
+ profiler_(nullptr),
+ log_events_(nullptr),
is_logging_(false),
- log_(new Log(this)),
- perf_basic_logger_(NULL),
- perf_jit_logger_(NULL),
- ll_logger_(NULL),
- jit_logger_(NULL),
+ log_(nullptr),
+ perf_basic_logger_(nullptr),
+ perf_jit_logger_(nullptr),
+ ll_logger_(nullptr),
+ jit_logger_(nullptr),
is_initialized_(false) {}
Logger::~Logger() {
@@ -758,7 +749,7 @@ void Logger::removeCodeEventListener(CodeEventListener* listener) {
void Logger::ProfilerBeginEvent() {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- msg.Append("profiler,\"begin\",%d", FLAG_prof_sampling_interval);
+ msg << "profiler" << kNext << "begin" << kNext << FLAG_prof_sampling_interval;
msg.WriteToLogFile();
}
@@ -771,33 +762,21 @@ void Logger::StringEvent(const char* name, const char* value) {
void Logger::UncheckedStringEvent(const char* name, const char* value) {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,\"%s\"", name, value);
+ msg << name << kNext << value;
msg.WriteToLogFile();
}
-void Logger::IntEvent(const char* name, int value) {
- if (FLAG_log) UncheckedIntEvent(name, value);
-}
-
-
void Logger::IntPtrTEvent(const char* name, intptr_t value) {
if (FLAG_log) UncheckedIntPtrTEvent(name, value);
}
-void Logger::UncheckedIntEvent(const char* name, int value) {
- if (!log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
- msg.Append("%s,%d", name, value);
- msg.WriteToLogFile();
-}
-
-
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,%" V8PRIdPTR, name, value);
+ msg << name << kNext;
+ msg.Append("%" V8PRIdPTR, value);
msg.WriteToLogFile();
}
@@ -805,28 +784,16 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
void Logger::HandleEvent(const char* name, Object** location) {
if (!log_->IsEnabled() || !FLAG_log_handles) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,%p", name, static_cast<void*>(location));
- msg.WriteToLogFile();
-}
-
-
-// ApiEvent is private so all the calls come from the Logger class. It is the
-// caller's responsibility to ensure that log is enabled and that
-// FLAG_log_api is true.
-void Logger::ApiEvent(const char* format, ...) {
- DCHECK(log_->IsEnabled() && FLAG_log_api);
- Log::MessageBuilder msg(log_);
- va_list ap;
- va_start(ap, format);
- msg.AppendVA(format, ap);
- va_end(ap);
+ msg << name << kNext << static_cast<void*>(location);
msg.WriteToLogFile();
}
void Logger::ApiSecurityCheck() {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- ApiEvent("api,check-security");
+ Log::MessageBuilder msg(log_);
+ msg << "api" << kNext << "check-security";
+ msg.WriteToLogFile();
}
void Logger::SharedLibraryEvent(const std::string& library_path,
@@ -834,9 +801,9 @@ void Logger::SharedLibraryEvent(const std::string& library_path,
intptr_t aslr_slide) {
if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
Log::MessageBuilder msg(log_);
- msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR
- ",%" V8PRIdPTR,
- library_path.c_str(), start, end, aslr_slide);
+ msg << "shared-library" << kNext << library_path.c_str() << kNext
+ << reinterpret_cast<void*>(start) << kNext << reinterpret_cast<void*>(end)
+ << kNext << aslr_slide;
msg.WriteToLogFile();
}
@@ -845,11 +812,9 @@ void Logger::CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
if (!log_->IsEnabled()) return;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
Log::MessageBuilder msg(log_);
- int since_epoch = timer_.IsStarted()
- ? static_cast<int>(timer_.Elapsed().InMicroseconds())
- : -1;
- msg.Append("code-deopt,%d,%d,", since_epoch, code->CodeSize());
- msg.AppendAddress(code->instruction_start());
+ msg << "code-deopt" << kNext << timer_.Elapsed().InMicroseconds() << kNext
+ << code->CodeSize() << kNext
+ << reinterpret_cast<void*>(code->instruction_start());
// Deoptimization position.
std::ostringstream deopt_location;
@@ -862,21 +827,20 @@ void Logger::CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
} else {
deopt_location << "<unknown>";
}
- msg.Append(",%d,%d,", inlining_id, script_offset);
+ msg << kNext << inlining_id << kNext << script_offset << kNext;
switch (kind) {
case kLazy:
- msg.Append("\"lazy\",");
+ msg << "lazy" << kNext;
break;
case kSoft:
- msg.Append("\"soft\",");
+ msg << "soft" << kNext;
break;
case kEager:
- msg.Append("\"eager\",");
+ msg << "eager" << kNext;
break;
}
- msg.AppendDoubleQuotedString(deopt_location.str().c_str());
- msg.Append(",");
- msg.AppendDoubleQuotedString(DeoptimizeReasonToString(info.deopt_reason));
+ msg << deopt_location.str().c_str() << kNext
+ << DeoptimizeReasonToString(info.deopt_reason);
msg.WriteToLogFile();
}
@@ -885,32 +849,39 @@ void Logger::CurrentTimeEvent() {
if (!log_->IsEnabled()) return;
DCHECK(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
- msg.Append("current-time,%d", since_epoch);
+ msg << "current-time" << kNext << timer_.Elapsed().InMicroseconds();
msg.WriteToLogFile();
}
void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
if (!log_->IsEnabled()) return;
- DCHECK(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
- const char* format = (se == START) ? "timer-event-start,\"%s\",%ld"
- : "timer-event-end,\"%s\",%ld";
- msg.Append(format, name, since_epoch);
+ switch (se) {
+ case START:
+ msg << "timer-event-start";
+ break;
+ case END:
+ msg << "timer-event-end";
+ break;
+ case STAMP:
+ msg << "timer-event";
+ }
+ msg << kNext << name << kNext << timer_.Elapsed().InMicroseconds();
msg.WriteToLogFile();
}
-
+// static
void Logger::EnterExternal(Isolate* isolate) {
+ DCHECK(FLAG_log_internal_timer_events);
LOG(isolate, TimerEvent(START, TimerEventExternal::name()));
DCHECK(isolate->current_vm_state() == JS);
isolate->set_current_vm_state(EXTERNAL);
}
-
+// static
void Logger::LeaveExternal(Isolate* isolate) {
+ DCHECK(FLAG_log_internal_timer_events);
LOG(isolate, TimerEvent(END, TimerEventExternal::name()));
DCHECK(isolate->current_vm_state() == EXTERNAL);
isolate->set_current_vm_state(JS);
@@ -923,64 +894,48 @@ void Logger::LeaveExternal(Isolate* isolate) {
TIMER_EVENTS_LIST(V)
#undef V
-void Logger::ApiNamedPropertyAccess(const char* tag,
- JSObject* holder,
- Object* name) {
- DCHECK(name->IsName());
+void Logger::ApiNamedPropertyAccess(const char* tag, JSObject* holder,
+ Object* property_name) {
+ DCHECK(property_name->IsName());
if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = holder->class_name();
- std::unique_ptr<char[]> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- if (name->IsString()) {
- std::unique_ptr<char[]> property_name =
- String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",\"%s\"", tag, class_name.get(),
- property_name.get());
- } else {
- Symbol* symbol = Symbol::cast(name);
- uint32_t hash = symbol->Hash();
- if (symbol->name()->IsUndefined(symbol->GetIsolate())) {
- ApiEvent("api,%s,\"%s\",symbol(hash %x)", tag, class_name.get(), hash);
- } else {
- std::unique_ptr<char[]> str =
- String::cast(symbol->name())
- ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)", tag, class_name.get(),
- str.get(), hash);
- }
- }
+ Log::MessageBuilder msg(log_);
+ msg << "api" << kNext << tag << kNext << holder->class_name() << kNext
+ << Name::cast(property_name);
+ msg.WriteToLogFile();
}
void Logger::ApiIndexedPropertyAccess(const char* tag,
JSObject* holder,
uint32_t index) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = holder->class_name();
- std::unique_ptr<char[]> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",%u", tag, class_name.get(), index);
+ Log::MessageBuilder msg(log_);
+ msg << "api" << kNext << tag << kNext << holder->class_name() << kNext
+ << index;
+ msg.WriteToLogFile();
}
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- String* class_name_obj = object->class_name();
- std::unique_ptr<char[]> class_name =
- class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\"", tag, class_name.get());
+ Log::MessageBuilder msg(log_);
+ msg << "api" << kNext << tag << kNext << object->class_name();
+ msg.WriteToLogFile();
}
void Logger::ApiEntryCall(const char* name) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- ApiEvent("api,%s", name);
+ Log::MessageBuilder msg(log_);
+ msg << "api" << kNext << name;
+ msg.WriteToLogFile();
}
void Logger::NewEvent(const char* name, void* object, size_t size) {
if (!log_->IsEnabled() || !FLAG_log) return;
Log::MessageBuilder msg(log_);
- msg.Append("new,%s,%p,%u", name, object, static_cast<unsigned int>(size));
+ msg << "new" << kNext << name << kNext << object << kNext
+ << static_cast<unsigned int>(size);
msg.WriteToLogFile();
}
@@ -988,7 +943,7 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
void Logger::DeleteEvent(const char* name, void* object) {
if (!log_->IsEnabled() || !FLAG_log) return;
Log::MessageBuilder msg(log_);
- msg.Append("delete,%s,%p", name, object);
+ msg << "delete" << kNext << name << kNext << object;
msg.WriteToLogFile();
}
@@ -997,30 +952,11 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
Address entry_point) {
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,%s,-2,",
- kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT],
- kLogEventsNames[CodeEventListener::CALLBACK_TAG]);
- int timestamp = timer_.IsStarted()
- ? static_cast<int>(timer_.Elapsed().InMicroseconds())
- : -1;
- msg.Append("%d,", timestamp);
- msg.AppendAddress(entry_point);
- if (name->IsString()) {
- std::unique_ptr<char[]> str =
- String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append(",1,\"%s%s\"", prefix, str.get());
- } else {
- Symbol* symbol = Symbol::cast(name);
- if (symbol->name()->IsUndefined(symbol->GetIsolate())) {
- msg.Append(",1,symbol(hash %x)", symbol->Hash());
- } else {
- std::unique_ptr<char[]> str =
- String::cast(symbol->name())
- ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append(",1,symbol(\"%s%s\" hash %x)", prefix, str.get(),
- symbol->Hash());
- }
- }
+ msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT] << kNext
+ << kLogEventsNames[CodeEventListener::CALLBACK_TAG] << kNext << -2
+ << kNext << timer_.Elapsed().InMicroseconds() << kNext
+ << reinterpret_cast<void*>(entry_point) << kNext << 1 << kNext << prefix
+ << name;
msg.WriteToLogFile();
}
@@ -1041,19 +977,14 @@ void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
namespace {
-void AppendCodeCreateHeader(Log::MessageBuilder* msg,
+void AppendCodeCreateHeader(Log::MessageBuilder& msg,
CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, base::ElapsedTimer* timer) {
- DCHECK(msg);
- msg->Append("%s,%s,%d,",
- kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT],
- kLogEventsNames[tag], code->kind());
- int timestamp = timer->IsStarted()
- ? static_cast<int>(timer->Elapsed().InMicroseconds())
- : -1;
- msg->Append("%d,", timestamp);
- msg->AppendAddress(code->instruction_start());
- msg->Append(",%d,", code->instruction_size());
+ msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
+ << Logger::kNext << kLogEventsNames[tag] << Logger::kNext << code->kind()
+ << Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
+ << reinterpret_cast<void*>(code->instruction_start()) << Logger::kNext
+ << code->instruction_size() << Logger::kNext;
}
} // namespace
@@ -1063,8 +994,8 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
if (!is_logging_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(&msg, tag, code, &timer_);
- msg.AppendDoubleQuotedString(comment);
+ AppendCodeCreateHeader(msg, tag, code, &timer_);
+ msg << comment;
msg.WriteToLogFile();
}
@@ -1073,14 +1004,8 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
if (!is_logging_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(&msg, tag, code, &timer_);
- if (name->IsString()) {
- msg.Append('"');
- msg.AppendDetailed(String::cast(name), false);
- msg.Append('"');
- } else {
- msg.AppendSymbolName(Symbol::cast(name));
- }
+ AppendCodeCreateHeader(msg, tag, code, &timer_);
+ msg << name;
msg.WriteToLogFile();
}
@@ -1095,17 +1020,9 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(&msg, tag, code, &timer_);
- if (name->IsString()) {
- std::unique_ptr<char[]> str =
- String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("\"%s\"", str.get());
- } else {
- msg.AppendSymbolName(Symbol::cast(name));
- }
- msg.Append(',');
- msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(shared, code));
+ AppendCodeCreateHeader(msg, tag, code, &timer_);
+ msg << name << kNext << reinterpret_cast<void*>(shared->address()) << kNext
+ << ComputeMarker(shared, code);
msg.WriteToLogFile();
}
@@ -1119,149 +1036,116 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
if (!is_logging_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
- {
- Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(&msg, tag, code, &timer_);
- std::unique_ptr<char[]> name =
- shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("\"%s ", name.get());
- if (source->IsString()) {
- std::unique_ptr<char[]> sourcestr = String::cast(source)->ToCString(
- DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s", sourcestr.get());
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(msg, tag, code, &timer_);
+ msg << shared->DebugName() << " " << source << ":" << line << ":" << column
+ << kNext << reinterpret_cast<void*>(shared->address()) << kNext
+ << ComputeMarker(shared, code);
+ msg.WriteToLogFile();
+
+ if (!FLAG_log_source_code) return;
+ Object* script_object = shared->script();
+ if (!script_object->IsScript()) return;
+ // Make sure the script is written to the log file.
+ Script* script = Script::cast(script_object);
+ int script_id = script->id();
+ if (logged_source_code_.find(script_id) != logged_source_code_.end()) {
+ return;
+ }
+
+ // This script has not been logged yet.
+ logged_source_code_.insert(script_id);
+ Object* source_object = script->source();
+ if (source_object->IsString()) {
+ String* source_code = String::cast(source_object);
+ msg << "script" << kNext << script_id << kNext;
+
+ // Log the script name.
+ if (script->name()->IsString()) {
+ msg << String::cast(script->name()) << kNext;
} else {
- msg.AppendSymbolName(Symbol::cast(source));
+ msg << "<unknown>" << kNext;
}
- msg.Append(":%d:%d\",", line, column);
- msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(shared, code));
+
+ // Log the source code.
+ msg << source_code;
msg.WriteToLogFile();
}
- if (FLAG_log_source_code) {
- Object* script_object = shared->script();
- if (script_object->IsScript()) {
- // Make sure the script is written to the log file.
- std::ostringstream os;
- Script* script = Script::cast(script_object);
- int script_id = script->id();
- if (logged_source_code_.find(script_id) == logged_source_code_.end()) {
- // This script has not been logged yet.
- logged_source_code_.insert(script_id);
- Object* source_object = script->source();
- if (source_object->IsString()) {
- Log::MessageBuilder msg(log_);
- String* source_code = String::cast(source_object);
- os << "script," << script_id << ",\"";
- msg.AppendUnbufferedCString(os.str().c_str());
-
- // Log the script name.
- if (script->name()->IsString()) {
- msg.AppendUnbufferedHeapString(String::cast(script->name()));
- msg.AppendUnbufferedCString("\",\"");
- } else {
- msg.AppendUnbufferedCString("<unknown>\",\"");
- }
-
- // Log the source code.
- msg.AppendUnbufferedHeapString(source_code);
- os.str("");
- os << "\"" << std::endl;
- msg.AppendUnbufferedCString(os.str().c_str());
- os.str("");
- }
- }
-
- // We log source code information in the form:
- //
- // code-source-info <addr>,<script>,<start>,<end>,<pos>,<inline-pos>,<fns>
- //
- // where
- // <addr> is code object address
- // <script> is script id
- // <start> is the starting position inside the script
- // <end> is the end position inside the script
- // <pos> is source position table encoded in the string,
- // it is a sequence of C<code-offset>O<script-offset>[I<inlining-id>]
- // where
- // <code-offset> is the offset within the code object
- // <script-offset> is the position within the script
- // <inlining-id> is the offset in the <inlining> table
- // <inlining> table is a sequence of strings of the form
- // F<function-id>O<script-offset>[I<inlining-id>
- // where
- // <function-id> is an index into the <fns> function table
- // <fns> is the function table encoded as a sequence of strings
- // S<shared-function-info-address>
- os << "code-source-info," << static_cast<void*>(code->instruction_start())
- << "," << script_id << "," << shared->start_position() << ","
- << shared->end_position() << ",";
-
- SourcePositionTableIterator iterator(code->source_position_table());
- bool is_first = true;
- bool hasInlined = false;
- for (; !iterator.done(); iterator.Advance()) {
- if (is_first) {
- is_first = false;
- }
- SourcePosition pos = iterator.source_position();
- os << "C" << iterator.code_offset();
- os << "O" << pos.ScriptOffset();
- if (pos.isInlined()) {
- os << "I" << pos.InliningId();
- hasInlined = true;
- }
- }
- os << ",";
- int maxInlinedId = -1;
- if (hasInlined) {
- PodArray<InliningPosition>* inlining_positions =
- DeoptimizationInputData::cast(
- Code::cast(code)->deoptimization_data())
- ->InliningPositions();
- for (int i = 0; i < inlining_positions->length(); i++) {
- InliningPosition inlining_pos = inlining_positions->get(i);
- os << "F";
- if (inlining_pos.inlined_function_id != -1) {
- os << inlining_pos.inlined_function_id;
- if (inlining_pos.inlined_function_id > maxInlinedId) {
- maxInlinedId = inlining_pos.inlined_function_id;
- }
- }
- SourcePosition pos = inlining_pos.position;
- os << "O" << pos.ScriptOffset();
- if (pos.isInlined()) {
- os << "I" << pos.InliningId();
- }
+ // We log source code information in the form:
+ //
+ // code-source-info <addr>,<script>,<start>,<end>,<pos>,<inline-pos>,<fns>
+ //
+ // where
+ // <addr> is code object address
+ // <script> is script id
+ // <start> is the starting position inside the script
+ // <end> is the end position inside the script
+ // <pos> is source position table encoded in the string,
+ // it is a sequence of C<code-offset>O<script-offset>[I<inlining-id>]
+ // where
+ // <code-offset> is the offset within the code object
+ // <script-offset> is the position within the script
+ // <inlining-id> is the offset in the <inlining> table
+ // <inlining> table is a sequence of strings of the form
+ // F<function-id>O<script-offset>[I<inlining-id>
+ // where
+ // <function-id> is an index into the <fns> function table
+ // <fns> is the function table encoded as a sequence of strings
+ // S<shared-function-info-address>
+ msg << "code-source-info" << kNext
+ << static_cast<void*>(code->instruction_start()) << kNext << script_id
+ << kNext << shared->start_position() << kNext << shared->end_position()
+ << kNext;
+
+ SourcePositionTableIterator iterator(code->source_position_table());
+ bool is_first = true;
+ bool hasInlined = false;
+ for (; !iterator.done(); iterator.Advance()) {
+ if (is_first) {
+ is_first = false;
+ }
+ SourcePosition pos = iterator.source_position();
+ msg << "C" << iterator.code_offset() << "O" << pos.ScriptOffset();
+ if (pos.isInlined()) {
+ msg << "I" << pos.InliningId();
+ hasInlined = true;
+ }
+ }
+ msg << kNext;
+ int maxInlinedId = -1;
+ if (hasInlined) {
+ PodArray<InliningPosition>* inlining_positions =
+ DeoptimizationData::cast(Code::cast(code)->deoptimization_data())
+ ->InliningPositions();
+ for (int i = 0; i < inlining_positions->length(); i++) {
+ InliningPosition inlining_pos = inlining_positions->get(i);
+ msg << "F";
+ if (inlining_pos.inlined_function_id != -1) {
+ msg << inlining_pos.inlined_function_id;
+ if (inlining_pos.inlined_function_id > maxInlinedId) {
+ maxInlinedId = inlining_pos.inlined_function_id;
}
}
- os << ",";
- if (hasInlined) {
- DeoptimizationInputData* deopt_data = DeoptimizationInputData::cast(
- Code::cast(code)->deoptimization_data());
-
- os << std::hex;
- for (int i = 0; i <= maxInlinedId; i++) {
- os << "S"
- << static_cast<void*>(
- deopt_data->GetInlinedFunction(i)->address());
- }
- os << std::dec;
+ SourcePosition pos = inlining_pos.position;
+ msg << "O" << pos.ScriptOffset();
+ if (pos.isInlined()) {
+ msg << "I" << pos.InliningId();
}
- os << std::endl;
- Log::MessageBuilder msg(log_);
- msg.AppendUnbufferedCString(os.str().c_str());
}
}
-}
+ msg << kNext;
+ if (hasInlined) {
+ DeoptimizationData* deopt_data =
+ DeoptimizationData::cast(Code::cast(code)->deoptimization_data());
-void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, int args_count) {
- if (!is_logging_code_events()) return;
- if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(&msg, tag, code, &timer_);
- msg.Append("\"args_count: %d\"", args_count);
+ msg << std::hex;
+ for (int i = 0; i <= maxInlinedId; i++) {
+ msg << "S"
+ << static_cast<void*>(deopt_data->GetInlinedFunction(i)->address());
+ }
+ msg << std::dec;
+ }
msg.WriteToLogFile();
}
@@ -1270,11 +1154,9 @@ void Logger::CodeDisableOptEvent(AbstractCode* code,
if (!is_logging_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,", kLogEventsNames[CodeEventListener::CODE_DISABLE_OPT_EVENT]);
- std::unique_ptr<char[]> name =
- shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("\"%s\",", name.get());
- msg.Append("\"%s\"", GetBailoutReason(shared->disable_optimization_reason()));
+ msg << kLogEventsNames[CodeEventListener::CODE_DISABLE_OPT_EVENT] << kNext
+ << shared->DebugName() << kNext
+ << GetBailoutReason(shared->disable_optimization_reason());
msg.WriteToLogFile();
}
@@ -1282,17 +1164,15 @@ void Logger::CodeDisableOptEvent(AbstractCode* code,
void Logger::CodeMovingGCEvent() {
if (!is_logging_code_events()) return;
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
- base::OS::SignalCodeMovingGC(GetRandomMmapAddr());
+ base::OS::SignalCodeMovingGC();
}
void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
if (!is_logging_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(&msg, CodeEventListener::REG_EXP_TAG, code, &timer_);
- msg.Append('"');
- msg.AppendDetailed(source, false);
- msg.Append('"');
+ AppendCodeCreateHeader(msg, CodeEventListener::REG_EXP_TAG, code, &timer_);
+ msg << source;
msg.WriteToLogFile();
}
@@ -1301,7 +1181,7 @@ void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(), to);
}
-void Logger::CodeLinePosInfoRecordEvent(AbstractCode* code,
+void Logger::CodeLinePosInfoRecordEvent(Address code_start,
ByteArray* source_position_table) {
if (jit_logger_) {
void* jit_handler_data = jit_logger_->StartCodePosInfoEvent();
@@ -1317,16 +1197,15 @@ void Logger::CodeLinePosInfoRecordEvent(AbstractCode* code,
jit_handler_data, iter.code_offset(),
iter.source_position().ScriptOffset(), JitCodeEvent::POSITION);
}
- jit_logger_->EndCodePosInfoEvent(code, jit_handler_data);
+ jit_logger_->EndCodePosInfoEvent(code_start, jit_handler_data);
}
}
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
- if (code_name == NULL) return; // Not a code object.
+ if (code_name == nullptr) return; // Not a code object.
Log::MessageBuilder msg(log_);
- msg.Append("%s,%d,",
- kLogEventsNames[CodeEventListener::SNAPSHOT_CODE_NAME_EVENT], pos);
- msg.AppendDoubleQuotedString(code_name);
+ msg << kLogEventsNames[CodeEventListener::SNAPSHOT_CODE_NAME_EVENT] << kNext
+ << pos << kNext << code_name;
msg.WriteToLogFile();
}
@@ -1340,10 +1219,8 @@ void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event,
Address from, Address to) {
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,", kLogEventsNames[event]);
- msg.AppendAddress(from);
- msg.Append(',');
- msg.AppendAddress(to);
+ msg << kLogEventsNames[event] << kNext << reinterpret_cast<void*>(from)
+ << kNext << reinterpret_cast<void*>(to);
msg.WriteToLogFile();
}
@@ -1351,11 +1228,11 @@ void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event,
void Logger::ResourceEvent(const char* name, const char* tag) {
if (!log_->IsEnabled() || !FLAG_log) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,%s,", name, tag);
+ msg << name << kNext << tag << kNext;
uint32_t sec, usec;
if (base::OS::GetUserTime(&sec, &usec) != -1) {
- msg.Append("%d,%d,", sec, usec);
+ msg << sec << kNext << usec << kNext;
}
msg.Append("%.0f", V8::GetCurrentPlatform()->CurrentClockTimeMillis());
msg.WriteToLogFile();
@@ -1368,27 +1245,62 @@ void Logger::SuspectReadEvent(Name* name, Object* obj) {
String* class_name = obj->IsJSObject()
? JSObject::cast(obj)->class_name()
: isolate_->heap()->empty_string();
- msg.Append("suspect-read,");
- msg.Append(class_name);
- msg.Append(',');
- if (name->IsString()) {
- msg.Append('"');
- msg.Append(String::cast(name));
- msg.Append('"');
+ msg << "suspect-read" << kNext << class_name << kNext << name;
+ msg.WriteToLogFile();
+}
+
+namespace {
+void AppendFunctionMessage(Log::MessageBuilder& msg, const char* reason,
+ Script* script, int script_id, double time_delta,
+ int start_position, int end_position,
+ base::ElapsedTimer* timer) {
+ msg << "function" << Logger::kNext << reason << Logger::kNext;
+ if (script) {
+ if (script->name()->IsString()) {
+ msg << String::cast(script->name());
+ }
+ msg << Logger::kNext << script->id();
} else {
- msg.AppendSymbolName(Symbol::cast(name));
+ msg << Logger::kNext << script_id;
}
+ msg << Logger::kNext << start_position << Logger::kNext << end_position
+ << Logger::kNext << time_delta << Logger::kNext
+ << timer->Elapsed().InMicroseconds() << Logger::kNext;
+}
+} // namespace
+
+void Logger::FunctionEvent(const char* reason, Script* script, int script_id,
+ double time_delta, int start_position,
+ int end_position, String* function_name) {
+ if (!log_->IsEnabled() || !FLAG_log_function_events) return;
+ Log::MessageBuilder msg(log_);
+ AppendFunctionMessage(msg, reason, script, script_id, time_delta,
+ start_position, end_position, &timer_);
+ if (function_name) msg << function_name;
msg.WriteToLogFile();
}
+void Logger::FunctionEvent(const char* reason, Script* script, int script_id,
+ double time_delta, int start_position,
+ int end_position, const char* function_name,
+ size_t function_name_length) {
+ if (!log_->IsEnabled() || !FLAG_log_function_events) return;
+ Log::MessageBuilder msg(log_);
+ AppendFunctionMessage(msg, reason, script, script_id, time_delta,
+ start_position, end_position, &timer_);
+ if (function_name_length > 0) {
+ msg.AppendStringPart(function_name, function_name_length);
+ }
+ msg.WriteToLogFile();
+}
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
Log::MessageBuilder msg(log_);
// Using non-relative system time in order to be able to synchronize with
// external memory profiling events (e.g. DOM memory size).
- msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f", space, kind,
- V8::GetCurrentPlatform()->CurrentClockTimeMillis());
+ msg << "heap-sample-begin" << kNext << space << kNext << kind << kNext;
+ msg.Append("%.0f", V8::GetCurrentPlatform()->CurrentClockTimeMillis());
msg.WriteToLogFile();
}
@@ -1396,7 +1308,7 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
Log::MessageBuilder msg(log_);
- msg.Append("heap-sample-end,\"%s\",\"%s\"", space, kind);
+ msg << "heap-sample-end" << kNext << space << kNext << kind;
msg.WriteToLogFile();
}
@@ -1404,7 +1316,8 @@ void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
Log::MessageBuilder msg(log_);
- msg.Append("heap-sample-item,%s,%d,%d", type, number, bytes);
+ msg << "heap-sample-item" << kNext << type << kNext << number << kNext
+ << bytes;
msg.WriteToLogFile();
}
@@ -1414,8 +1327,7 @@ void Logger::RuntimeCallTimerEvent() {
RuntimeCallCounter* counter = stats->current_counter();
if (counter == nullptr) return;
Log::MessageBuilder msg(log_);
- msg.Append("active-runtime-timer,");
- msg.AppendDoubleQuotedString(counter->name());
+ msg << "active-runtime-timer" << kNext << counter->name();
msg.WriteToLogFile();
}
@@ -1426,70 +1338,104 @@ void Logger::TickEvent(v8::TickSample* sample, bool overflow) {
RuntimeCallTimerEvent();
}
Log::MessageBuilder msg(log_);
- msg.Append("%s,", kLogEventsNames[CodeEventListener::TICK_EVENT]);
- msg.AppendAddress(reinterpret_cast<Address>(sample->pc));
- msg.Append(",%d", static_cast<int>(timer_.Elapsed().InMicroseconds()));
+ msg << kLogEventsNames[CodeEventListener::TICK_EVENT] << kNext
+ << reinterpret_cast<void*>(sample->pc) << kNext
+ << timer_.Elapsed().InMicroseconds();
if (sample->has_external_callback) {
- msg.Append(",1,");
- msg.AppendAddress(
- reinterpret_cast<Address>(sample->external_callback_entry));
+ msg << kNext << 1 << kNext
+ << reinterpret_cast<void*>(sample->external_callback_entry);
} else {
- msg.Append(",0,");
- msg.AppendAddress(reinterpret_cast<Address>(sample->tos));
- }
- msg.Append(",%d", static_cast<int>(sample->state));
- if (overflow) {
- msg.Append(",overflow");
+ msg << kNext << 0 << kNext << reinterpret_cast<void*>(sample->tos);
}
+ msg << kNext << static_cast<int>(sample->state);
+ if (overflow) msg << kNext << "overflow";
for (unsigned i = 0; i < sample->frames_count; ++i) {
- msg.Append(',');
- msg.AppendAddress(reinterpret_cast<Address>(sample->stack[i]));
+ msg << kNext << reinterpret_cast<void*>(sample->stack[i]);
}
msg.WriteToLogFile();
}
-void Logger::ICEvent(const char* type, bool keyed, const Address pc, int line,
- int column, Map* map, Object* key, char old_state,
- char new_state, const char* modifier,
+void Logger::ICEvent(const char* type, bool keyed, Map* map, Object* key,
+ char old_state, char new_state, const char* modifier,
const char* slow_stub_reason) {
if (!log_->IsEnabled() || !FLAG_trace_ic) return;
Log::MessageBuilder msg(log_);
- if (keyed) msg.Append("Keyed");
- msg.Append("%s,", type);
- msg.AppendAddress(pc);
- msg.Append(",%d,%d,", line, column);
- msg.Append(old_state);
- msg.Append(",");
- msg.Append(new_state);
- msg.Append(",");
- msg.AppendAddress(reinterpret_cast<Address>(map));
- msg.Append(",");
+ if (keyed) msg << "Keyed";
+ int line;
+ int column;
+ Address pc = isolate_->GetAbstractPC(&line, &column);
+ msg << type << kNext << reinterpret_cast<void*>(pc) << kNext << line << kNext
+ << column << kNext << old_state << kNext << new_state << kNext
+ << reinterpret_cast<void*>(map) << kNext;
if (key->IsSmi()) {
- msg.Append("%d", Smi::ToInt(key));
+ msg << Smi::ToInt(key);
} else if (key->IsNumber()) {
- msg.Append("%lf", key->Number());
- } else if (key->IsString()) {
- msg.AppendDetailed(String::cast(key), false);
- } else if (key->IsSymbol()) {
- msg.AppendSymbolName(Symbol::cast(key));
+ msg << key->Number();
+ } else if (key->IsName()) {
+ msg << Name::cast(key);
}
- msg.Append(",%s,", modifier);
+ msg << kNext << modifier << kNext;
if (slow_stub_reason != nullptr) {
- msg.AppendDoubleQuotedString(slow_stub_reason);
+ msg << slow_stub_reason;
+ }
+ msg.WriteToLogFile();
+}
+
+void Logger::MapEvent(const char* type, Map* from, Map* to, const char* reason,
+ HeapObject* name_or_sfi) {
+ DisallowHeapAllocation no_gc;
+ if (!log_->IsEnabled() || !FLAG_trace_maps) return;
+ if (to) MapDetails(to);
+ int line = -1;
+ int column = -1;
+ Address pc = 0;
+ if (!isolate_->bootstrapper()->IsActive()) {
+ pc = isolate_->GetAbstractPC(&line, &column);
+ }
+ Log::MessageBuilder msg(log_);
+ msg << "map" << kNext << type << kNext << timer_.Elapsed().InMicroseconds()
+ << kNext << reinterpret_cast<void*>(from) << kNext
+ << reinterpret_cast<void*>(to) << kNext << reinterpret_cast<void*>(pc)
+ << kNext << line << kNext << column << kNext << reason << kNext;
+
+ if (name_or_sfi) {
+ if (name_or_sfi->IsName()) {
+ msg << Name::cast(name_or_sfi);
+ } else if (name_or_sfi->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(name_or_sfi);
+ msg << sfi->DebugName();
+#if V8_SFI_HAS_UNIQUE_ID
+ msg << " " << sfi->unique_id();
+#endif // V8_SFI_HAS_UNIQUE_ID
+ }
}
msg.WriteToLogFile();
}
+void Logger::MapDetails(Map* map) {
+ if (!log_->IsEnabled() || !FLAG_trace_maps) return;
+ // Disable logging Map details during bootstrapping since we use LogMaps() to
+ // log all creating
+ if (isolate_->bootstrapper()->IsActive()) return;
+ DisallowHeapAllocation no_gc;
+ Log::MessageBuilder msg(log_);
+ msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext
+ << reinterpret_cast<void*>(map) << kNext;
+ std::ostringstream buffer;
+ map->PrintMapDetails(buffer);
+ msg << buffer.str().c_str();
+ msg.WriteToLogFile();
+}
+
void Logger::StopProfiler() {
if (!log_->IsEnabled()) return;
- if (profiler_ != NULL) {
- profiler_->pause();
+ if (profiler_ != nullptr) {
+ profiler_->Pause();
is_logging_ = false;
removeCodeEventListener(this);
}
}
-
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
void Logger::LogFailure() {
@@ -1500,10 +1446,10 @@ static void AddFunctionAndCode(SharedFunctionInfo* sfi,
AbstractCode* code_object,
Handle<SharedFunctionInfo>* sfis,
Handle<AbstractCode>* code_objects, int offset) {
- if (sfis != NULL) {
+ if (sfis != nullptr) {
sfis[offset] = Handle<SharedFunctionInfo>(sfi);
}
- if (code_objects != NULL) {
+ if (code_objects != nullptr) {
code_objects[offset] = Handle<AbstractCode>(code_object);
}
}
@@ -1517,7 +1463,8 @@ static int EnumerateCompiledFunctions(Heap* heap,
// Iterate the heap to find shared function info objects and record
// the unoptimized code for them.
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
if (sfi->is_compiled() &&
@@ -1574,8 +1521,7 @@ void Logger::LogCodeObject(Object* object) {
case AbstractCode::STUB:
description =
CodeStub::MajorName(CodeStub::GetMajorKey(code_object->GetCode()));
- if (description == NULL)
- description = "A stub from the snapshot";
+ if (description == nullptr) description = "A stub from the snapshot";
tag = CodeEventListener::STUB_TAG;
break;
case AbstractCode::REGEXP:
@@ -1595,6 +1541,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A JavaScript to Wasm adapter";
tag = CodeEventListener::STUB_TAG;
break;
+ case AbstractCode::WASM_TO_WASM_FUNCTION:
+ description = "A cross-instance Wasm adapter";
+ tag = CodeEventListener::STUB_TAG;
+ break;
case AbstractCode::WASM_TO_JS_FUNCTION:
description = "A Wasm to JavaScript adapter";
tag = CodeEventListener::STUB_TAG;
@@ -1618,7 +1568,8 @@ void Logger::LogCodeObjects() {
Heap* heap = isolate_->heap();
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
if (obj->IsBytecodeArray()) LogCodeObject(obj);
}
@@ -1638,6 +1589,7 @@ void Logger::LogBytecodeHandlers() {
interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
+ if (isolate_->heap()->IsDeserializeLazyHandler(code)) continue;
std::string bytecode_name =
interpreter::Bytecodes::ToString(bytecode, operand_scale);
PROFILE(isolate_, CodeCreateEvent(
@@ -1701,7 +1653,8 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
void Logger::LogCompiledFunctions() {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
- const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL);
+ const int compiled_funcs_count =
+ EnumerateCompiledFunctions(heap, nullptr, nullptr);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
ScopedVector<Handle<AbstractCode> > code_objects(compiled_funcs_count);
EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
@@ -1720,7 +1673,8 @@ void Logger::LogAccessorCallbacks() {
Heap* heap = isolate_->heap();
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
if (!obj->IsAccessorInfo()) continue;
AccessorInfo* ai = AccessorInfo::cast(obj);
if (!ai->name()->IsName()) continue;
@@ -1742,6 +1696,16 @@ void Logger::LogAccessorCallbacks() {
}
}
+void Logger::LogMaps() {
+ Heap* heap = isolate_->heap();
+ HeapIterator iterator(heap);
+ DisallowHeapAllocation no_gc;
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
+ if (!obj->IsMap()) continue;
+ MapDetails(Map::cast(obj));
+ }
+}
static void AddIsolateIdIfNeeded(std::ostream& os, // NOLINT
Isolate* isolate) {
@@ -1802,7 +1766,7 @@ bool Logger::SetUp(Isolate* isolate) {
std::ostringstream log_file_name;
std::ostringstream source_log_file_name;
PrepareLogFileName(log_file_name, isolate, FLAG_logfile);
- log_->Initialize(log_file_name.str().c_str());
+ log_ = new Log(this, log_file_name.str().c_str());
if (FLAG_perf_basic_prof) {
perf_basic_logger_ = new PerfBasicLogger();
@@ -1825,7 +1789,7 @@ bool Logger::SetUp(Isolate* isolate) {
is_logging_ = true;
}
- if (FLAG_log_internal_timer_events || FLAG_prof_cpp) timer_.Start();
+ timer_.Start();
if (FLAG_prof_cpp) {
profiler_ = new Profiler(isolate);
@@ -1848,7 +1812,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
if (jit_logger_) {
removeCodeEventListener(jit_logger_);
delete jit_logger_;
- jit_logger_ = NULL;
+ jit_logger_ = nullptr;
}
if (event_handler) {
@@ -1881,41 +1845,41 @@ sampler::Sampler* Logger::sampler() {
FILE* Logger::TearDown() {
- if (!is_initialized_) return NULL;
+ if (!is_initialized_) return nullptr;
is_initialized_ = false;
// Stop the profiler before closing the file.
- if (profiler_ != NULL) {
+ if (profiler_ != nullptr) {
profiler_->Disengage();
delete profiler_;
- profiler_ = NULL;
+ profiler_ = nullptr;
}
delete ticker_;
- ticker_ = NULL;
+ ticker_ = nullptr;
if (perf_basic_logger_) {
removeCodeEventListener(perf_basic_logger_);
delete perf_basic_logger_;
- perf_basic_logger_ = NULL;
+ perf_basic_logger_ = nullptr;
}
if (perf_jit_logger_) {
removeCodeEventListener(perf_jit_logger_);
delete perf_jit_logger_;
- perf_jit_logger_ = NULL;
+ perf_jit_logger_ = nullptr;
}
if (ll_logger_) {
removeCodeEventListener(ll_logger_);
delete ll_logger_;
- ll_logger_ = NULL;
+ ll_logger_ = nullptr;
}
if (jit_logger_) {
removeCodeEventListener(jit_logger_);
delete jit_logger_;
- jit_logger_ = NULL;
+ jit_logger_ = nullptr;
}
if (profiler_listener_.get() != nullptr) {
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 91672875ef..7efa50b8de 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -14,6 +14,7 @@
#include "src/base/platform/platform.h"
#include "src/code-events.h"
#include "src/isolate.h"
+#include "src/log-utils.h"
#include "src/objects.h"
namespace v8 {
@@ -93,7 +94,10 @@ class Ticker;
class Logger : public CodeEventListener {
public:
- enum StartEnd { START = 0, END = 1 };
+ enum StartEnd { START = 0, END = 1, STAMP = 2 };
+
+ // The separator is used to write an unescaped "," into the log.
+ static const LogSeparator kNext = LogSeparator::kSeparator;
// Acquires resources for logging if the right flags are set.
bool SetUp(Isolate* isolate);
@@ -121,7 +125,6 @@ class Logger : public CodeEventListener {
void StringEvent(const char* name, const char* value);
// Emits an event with an int value -> (name, value).
- void IntEvent(const char* name, int value);
void IntPtrTEvent(const char* name, intptr_t value);
// Emits an event with an handle value -> (name, location).
@@ -141,6 +144,14 @@ class Logger : public CodeEventListener {
// object.
void SuspectReadEvent(Name* name, Object* obj);
+ void FunctionEvent(const char* reason, Script* script, int script_id,
+ double time_delta_ms, int start_position = -1,
+ int end_position = -1, String* function_name = nullptr);
+ void FunctionEvent(const char* reason, Script* script, int script_id,
+ double time_delta_ms, int start_position, int end_position,
+ const char* function_name = nullptr,
+ size_t function_name_length = 0);
+
// ==== Events logged by --log-api. ====
void ApiSecurityCheck();
void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
@@ -169,8 +180,6 @@ class Logger : public CodeEventListener {
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, SharedFunctionInfo* shared,
Name* source, int line, int column);
- void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, int args_count);
// Emits a code deoptimization event.
void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
void CodeMovingGCEvent();
@@ -179,7 +188,7 @@ class Logger : public CodeEventListener {
// Emits a code move event.
void CodeMoveEvent(AbstractCode* from, Address to);
// Emits a code line info record event.
- void CodeLinePosInfoRecordEvent(AbstractCode* code,
+ void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray* source_position_table);
void SharedFunctionInfoMoveEvent(Address from, Address to);
@@ -189,11 +198,15 @@ class Logger : public CodeEventListener {
void CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
int fp_to_sp_delta);
- void ICEvent(const char* type, bool keyed, const Address pc, int line,
- int column, Map* map, Object* key, char old_state,
- char new_state, const char* modifier,
+ void ICEvent(const char* type, bool keyed, Map* map, Object* key,
+ char old_state, char new_state, const char* modifier,
const char* slow_stub_reason);
+ void MapEvent(const char* type, Map* from, Map* to,
+ const char* reason = nullptr,
+ HeapObject* name_or_sfi = nullptr);
+ void MapDetails(Map* map);
+
// ==== Events logged by --log-gc. ====
// Heap sampling events: start, end, and individual types.
void HeapSampleBeginEvent(const char* space, const char* kind);
@@ -213,7 +226,7 @@ class Logger : public CodeEventListener {
void CurrentTimeEvent();
- void TimerEvent(StartEnd se, const char* name);
+ V8_EXPORT_PRIVATE void TimerEvent(StartEnd se, const char* name);
static void EnterExternal(Isolate* isolate);
static void LeaveExternal(Isolate* isolate);
@@ -228,7 +241,7 @@ class Logger : public CodeEventListener {
}
bool is_logging_code_events() {
- return is_logging() || jit_logger_ != NULL;
+ return is_logging() || jit_logger_ != nullptr;
}
// Stop collection of profiling data.
@@ -245,6 +258,8 @@ class Logger : public CodeEventListener {
void LogCodeObjects();
// Used for logging bytecode handlers found in the snapshot.
void LogBytecodeHandlers();
+ // Logs all Mpas foind in the heap.
+ void LogMaps();
// Converts tag to a corresponding NATIVE_... if the script is native.
INLINE(static CodeEventListener::LogEventsAndTags ToNativeByScript(
@@ -253,6 +268,9 @@ class Logger : public CodeEventListener {
// Callback from Log, stops profiling in case of insufficient resources.
void LogFailure();
+ // Used for logging stubs found in the snapshot.
+ void LogCodeObject(Object* code_object);
+
private:
explicit Logger(Isolate* isolate);
~Logger();
@@ -269,9 +287,6 @@ class Logger : public CodeEventListener {
void MoveEventInternal(CodeEventListener::LogEventsAndTags event,
Address from, Address to);
- // Used for logging stubs found in the snapshot.
- void LogCodeObject(Object* code_object);
-
// Helper method. It resets name_buffer_ and add tag name into it.
void InitNameBuffer(CodeEventListener::LogEventsAndTags tag);
@@ -279,13 +294,10 @@ class Logger : public CodeEventListener {
void TickEvent(TickSample* sample, bool overflow);
void RuntimeCallTimerEvent();
- PRINTF_FORMAT(2, 3) void ApiEvent(const char* format, ...);
-
// Logs a StringEvent regardless of whether FLAG_log is true.
void UncheckedStringEvent(const char* name, const char* value);
- // Logs an IntEvent regardless of whether FLAG_log is true.
- void UncheckedIntEvent(const char* name, int value);
+ // Logs an IntPtrTEvent regardless of whether FLAG_log is true.
void UncheckedIntPtrTEvent(const char* name, intptr_t value);
Isolate* isolate_;
@@ -329,22 +341,25 @@ class Logger : public CodeEventListener {
friend class CpuProfiler;
};
-#define TIMER_EVENTS_LIST(V) \
- V(RecompileSynchronous, true) \
- V(RecompileConcurrent, true) \
- V(CompileIgnition, true) \
- V(CompileFullCode, true) \
- V(OptimizeCode, true) \
- V(CompileCode, true) \
- V(DeoptimizeCode, true) \
- V(Execute, true) \
+#define TIMER_EVENTS_LIST(V) \
+ V(RecompileSynchronous, true) \
+ V(RecompileConcurrent, true) \
+ V(CompileIgnition, true) \
+ V(CompileFullCode, true) \
+ V(OptimizeCode, true) \
+ V(CompileCode, true) \
+ V(CompileCodeBackground, true) \
+ V(DeoptimizeCode, true) \
+ V(Execute, true) \
V(External, true)
-#define V(TimerName, expose) \
- class TimerEvent##TimerName : public AllStatic { \
- public: \
- static const char* name(void* unused = NULL) { return "V8." #TimerName; } \
- static bool expose_to_api() { return expose; } \
+#define V(TimerName, expose) \
+ class TimerEvent##TimerName : public AllStatic { \
+ public: \
+ static const char* name(void* unused = nullptr) { \
+ return "V8." #TimerName; \
+ } \
+ static bool expose_to_api() { return expose; } \
};
TIMER_EVENTS_LIST(V)
#undef V
@@ -374,8 +389,6 @@ class CodeEventLogger : public CodeEventListener {
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
Name* name) override;
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- int args_count) override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* name) override;
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source, int line,
diff --git a/deps/v8/src/lookup-cache-inl.h b/deps/v8/src/lookup-cache-inl.h
index 9b27268756..34435c0e8e 100644
--- a/deps/v8/src/lookup-cache-inl.h
+++ b/deps/v8/src/lookup-cache-inl.h
@@ -31,7 +31,7 @@ int DescriptorLookupCache::Lookup(Map* source, Name* name) {
}
void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
- DCHECK(result != kAbsent);
+ DCHECK_NE(result, kAbsent);
int index = Hash(source, name);
Key& key = keys_[index];
key.source = source;
diff --git a/deps/v8/src/lookup-cache.cc b/deps/v8/src/lookup-cache.cc
index b740fdbf11..2ec0230889 100644
--- a/deps/v8/src/lookup-cache.cc
+++ b/deps/v8/src/lookup-cache.cc
@@ -10,7 +10,7 @@ namespace v8 {
namespace internal {
void DescriptorLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
+ for (int index = 0; index < kLength; index++) keys_[index].source = nullptr;
}
} // namespace internal
diff --git a/deps/v8/src/lookup-cache.h b/deps/v8/src/lookup-cache.h
index bf64cc00d2..b8a59c9c1a 100644
--- a/deps/v8/src/lookup-cache.h
+++ b/deps/v8/src/lookup-cache.h
@@ -31,8 +31,8 @@ class DescriptorLookupCache {
private:
DescriptorLookupCache() {
for (int i = 0; i < kLength; ++i) {
- keys_[i].source = NULL;
- keys_[i].name = NULL;
+ keys_[i].source = nullptr;
+ keys_[i].name = nullptr;
results_[i] = kAbsent;
}
}
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 91d87ebbff..2d3cc3253e 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -212,7 +212,7 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
if (root->IsNull(isolate)) {
unsigned int magic = 0xbbbbbbbb;
- isolate->PushStackTraceAndDie(magic, *receiver, NULL, magic);
+ isolate->PushStackTraceAndDie(magic, *receiver, nullptr, magic);
}
return Handle<JSReceiver>::cast(root);
}
@@ -381,7 +381,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
PropertyDetails original_details =
dictionary->DetailsAt(dictionary_entry());
int enumeration_index = original_details.dictionary_index();
- DCHECK(enumeration_index > 0);
+ DCHECK_GT(enumeration_index, 0);
details = details.set_index(enumeration_index);
dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
property_details_ = details;
@@ -618,11 +618,11 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
if (IsElement()) {
// TODO(verwaest): Move code into the element accessor.
- Handle<SeededNumberDictionary> dictionary =
- JSObject::NormalizeElements(receiver);
+ isolate_->CountUsage(v8::Isolate::kIndexAccessor);
+ Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(receiver);
- dictionary = SeededNumberDictionary::Set(dictionary, index_, pair, receiver,
- details);
+ dictionary =
+ NumberDictionary::Set(dictionary, index_, pair, receiver, details);
receiver->RequireSlowElements(*dictionary);
if (receiver->HasSlowArgumentsElements()) {
@@ -682,7 +682,7 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
Handle<Object> LookupIterator::FetchValue() const {
- Object* result = NULL;
+ Object* result = nullptr;
if (IsElement()) {
Handle<JSObject> holder = GetHolder<JSObject>();
ElementsAccessor* accessor = holder->GetElementsAccessor();
@@ -778,11 +778,7 @@ FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
- Map* holder_map = holder_->map();
- int index =
- holder_map->instance_descriptors()->GetFieldIndex(descriptor_number());
- bool is_double = representation().IsDouble();
- return FieldIndex::ForPropertyIndex(holder_map, index, is_double);
+ return FieldIndex::ForDescriptor(holder_->map(), descriptor_number());
}
Handle<FieldType> LookupIterator::GetFieldType() const {
@@ -868,8 +864,8 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
JSReceiver* LookupIterator::NextHolder(Map* map) {
DisallowHeapAllocation no_gc;
- if (map->prototype() == heap()->null_value()) return NULL;
- if (!check_prototype_chain() && !map->has_hidden_prototype()) return NULL;
+ if (map->prototype() == heap()->null_value()) return nullptr;
+ if (!check_prototype_chain() && !map->has_hidden_prototype()) return nullptr;
return JSReceiver::cast(map->prototype());
}
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 701fa09f32..4502b2fdc2 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-enum class MachineRepresentation {
+enum class MachineRepresentation : uint8_t {
kNone,
kBit,
kWord8,
@@ -41,7 +41,7 @@ static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
const char* MachineReprToString(MachineRepresentation);
-enum class MachineSemantic {
+enum class MachineSemantic : uint8_t {
kNone,
kBool,
kInt32,
@@ -56,131 +56,142 @@ V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep);
class MachineType {
public:
- MachineType()
+ constexpr MachineType()
: representation_(MachineRepresentation::kNone),
semantic_(MachineSemantic::kNone) {}
- MachineType(MachineRepresentation representation, MachineSemantic semantic)
+ constexpr MachineType(MachineRepresentation representation,
+ MachineSemantic semantic)
: representation_(representation), semantic_(semantic) {}
- bool operator==(MachineType other) const {
+ constexpr bool operator==(MachineType other) const {
return representation() == other.representation() &&
semantic() == other.semantic();
}
- bool operator!=(MachineType other) const { return !(*this == other); }
-
+ constexpr bool operator!=(MachineType other) const {
+ return !(*this == other);
+ }
- MachineRepresentation representation() const { return representation_; }
- MachineSemantic semantic() const { return semantic_; }
+ constexpr MachineRepresentation representation() const {
+ return representation_;
+ }
+ constexpr MachineSemantic semantic() const { return semantic_; }
- bool IsNone() { return representation() == MachineRepresentation::kNone; }
+ constexpr bool IsNone() const {
+ return representation() == MachineRepresentation::kNone;
+ }
- bool IsSigned() {
+ constexpr bool IsSigned() const {
return semantic() == MachineSemantic::kInt32 ||
semantic() == MachineSemantic::kInt64;
}
- bool IsUnsigned() {
+ constexpr bool IsUnsigned() const {
return semantic() == MachineSemantic::kUint32 ||
semantic() == MachineSemantic::kUint64;
}
- static MachineRepresentation PointerRepresentation() {
+ constexpr bool IsTagged() const {
+ return representation() == MachineRepresentation::kTaggedPointer ||
+ representation() == MachineRepresentation::kTaggedSigned ||
+ representation() == MachineRepresentation::kTagged;
+ }
+ constexpr static MachineRepresentation PointerRepresentation() {
return (kPointerSize == 4) ? MachineRepresentation::kWord32
: MachineRepresentation::kWord64;
}
- static MachineType UintPtr() {
+ constexpr static MachineType UintPtr() {
return (kPointerSize == 4) ? Uint32() : Uint64();
}
- static MachineType IntPtr() {
+ constexpr static MachineType IntPtr() {
return (kPointerSize == 4) ? Int32() : Int64();
}
- static MachineType Int8() {
+ constexpr static MachineType Int8() {
return MachineType(MachineRepresentation::kWord8, MachineSemantic::kInt32);
}
- static MachineType Uint8() {
+ constexpr static MachineType Uint8() {
return MachineType(MachineRepresentation::kWord8, MachineSemantic::kUint32);
}
- static MachineType Int16() {
+ constexpr static MachineType Int16() {
return MachineType(MachineRepresentation::kWord16, MachineSemantic::kInt32);
}
- static MachineType Uint16() {
+ constexpr static MachineType Uint16() {
return MachineType(MachineRepresentation::kWord16,
MachineSemantic::kUint32);
}
- static MachineType Int32() {
+ constexpr static MachineType Int32() {
return MachineType(MachineRepresentation::kWord32, MachineSemantic::kInt32);
}
- static MachineType Uint32() {
+ constexpr static MachineType Uint32() {
return MachineType(MachineRepresentation::kWord32,
MachineSemantic::kUint32);
}
- static MachineType Int64() {
+ constexpr static MachineType Int64() {
return MachineType(MachineRepresentation::kWord64, MachineSemantic::kInt64);
}
- static MachineType Uint64() {
+ constexpr static MachineType Uint64() {
return MachineType(MachineRepresentation::kWord64,
MachineSemantic::kUint64);
}
- static MachineType Float32() {
+ constexpr static MachineType Float32() {
return MachineType(MachineRepresentation::kFloat32,
MachineSemantic::kNumber);
}
- static MachineType Float64() {
+ constexpr static MachineType Float64() {
return MachineType(MachineRepresentation::kFloat64,
MachineSemantic::kNumber);
}
- static MachineType Simd128() {
+ constexpr static MachineType Simd128() {
return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
}
- static MachineType Pointer() {
+ constexpr static MachineType Pointer() {
return MachineType(PointerRepresentation(), MachineSemantic::kNone);
}
- static MachineType TaggedPointer() {
+ constexpr static MachineType TaggedPointer() {
return MachineType(MachineRepresentation::kTaggedPointer,
MachineSemantic::kAny);
}
- static MachineType TaggedSigned() {
+ constexpr static MachineType TaggedSigned() {
return MachineType(MachineRepresentation::kTaggedSigned,
MachineSemantic::kInt32);
}
- static MachineType AnyTagged() {
+ constexpr static MachineType AnyTagged() {
return MachineType(MachineRepresentation::kTagged, MachineSemantic::kAny);
}
- static MachineType Bool() {
+ constexpr static MachineType Bool() {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kBool);
}
- static MachineType TaggedBool() {
+ constexpr static MachineType TaggedBool() {
return MachineType(MachineRepresentation::kTagged, MachineSemantic::kBool);
}
- static MachineType None() {
+ constexpr static MachineType None() {
return MachineType(MachineRepresentation::kNone, MachineSemantic::kNone);
}
// These naked representations should eventually go away.
- static MachineType RepWord8() {
+ constexpr static MachineType RepWord8() {
return MachineType(MachineRepresentation::kWord8, MachineSemantic::kNone);
}
- static MachineType RepWord16() {
+ constexpr static MachineType RepWord16() {
return MachineType(MachineRepresentation::kWord16, MachineSemantic::kNone);
}
- static MachineType RepWord32() {
+ constexpr static MachineType RepWord32() {
return MachineType(MachineRepresentation::kWord32, MachineSemantic::kNone);
}
- static MachineType RepWord64() {
+ constexpr static MachineType RepWord64() {
return MachineType(MachineRepresentation::kWord64, MachineSemantic::kNone);
}
- static MachineType RepFloat32() {
+ constexpr static MachineType RepFloat32() {
return MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone);
}
- static MachineType RepFloat64() {
+ constexpr static MachineType RepFloat64() {
return MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone);
}
- static MachineType RepSimd128() {
+ constexpr static MachineType RepSimd128() {
return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
}
- static MachineType RepTagged() {
+ constexpr static MachineType RepTagged() {
return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
}
- static MachineType RepBit() {
+ constexpr static MachineType RepBit() {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
}
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
index 3a9a9caf14..1b8bdfdf12 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/map-updater.cc
@@ -310,7 +310,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
Map* transition = TransitionsAccessor(target_map_)
.SearchTransition(GetKey(i), old_details.kind(),
old_details.attributes());
- if (transition == NULL) break;
+ if (transition == nullptr) break;
Handle<Map> tmp_map(transition, isolate_);
Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
@@ -393,7 +393,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
Map* transition = TransitionsAccessor(target_map_)
.SearchTransition(GetKey(i), old_details.kind(),
old_details.attributes());
- if (transition == NULL) break;
+ if (transition == nullptr) break;
Handle<Map> tmp_map(transition, isolate_);
Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
isolate_);
@@ -598,7 +598,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
Map* next =
TransitionsAccessor(current, &no_allocation)
.SearchTransition(name, details.kind(), details.attributes());
- if (next == NULL) break;
+ if (next == nullptr) break;
DescriptorArray* next_descriptors = next->instance_descriptors();
PropertyDetails next_details = next_descriptors->GetDetails(i);
@@ -637,14 +637,14 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
// Invalidate a transition target at |key|.
Map* maybe_transition = transitions.SearchTransition(
GetKey(split_nof), split_details.kind(), split_details.attributes());
- if (maybe_transition != NULL) {
+ if (maybe_transition != nullptr) {
maybe_transition->DeprecateTransitionTree();
}
- // If |maybe_transition| is not NULL then the transition array already
+ // If |maybe_transition| is not nullptr then the transition array already
// contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
- if (maybe_transition == NULL && !transitions.CanHaveMoreTransitions()) {
+ if (maybe_transition == nullptr && !transitions.CanHaveMoreTransitions()) {
return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
}
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index ddc5124cfc..e9d2be1843 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -12,6 +12,7 @@
#include "src/keys.h"
#include "src/objects/frame-array-inl.h"
#include "src/string-builder.h"
+#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -34,7 +35,7 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
const MessageLocation* loc,
Handle<Object> message_obj) {
std::unique_ptr<char[]> str = GetLocalizedMessage(isolate, message_obj);
- if (loc == NULL) {
+ if (loc == nullptr) {
PrintF("%s\n", str.get());
} else {
HandleScope scope(isolate);
@@ -56,7 +57,7 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
int start = -1;
int end = -1;
Handle<Object> script_handle = factory->undefined_value();
- if (location != NULL) {
+ if (location != nullptr) {
start = location->start_pos();
end = location->end_pos();
script_handle = Script::GetWrapper(location->script());
@@ -649,9 +650,18 @@ void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
wasm_instance_ = handle(array->WasmInstance(frame_ix), isolate);
wasm_func_index_ = array->WasmFunctionIndex(frame_ix)->value();
if (array->IsWasmInterpretedFrame(frame_ix)) {
- code_ = Handle<AbstractCode>::null();
+ code_ = {};
} else {
- code_ = handle(array->Code(frame_ix), isolate);
+ code_ =
+ FLAG_wasm_jit_to_native
+ ? WasmCodeWrapper(
+ wasm_instance_->compiled_module()->GetNativeModule()->GetCode(
+ wasm_func_index_))
+ : WasmCodeWrapper(handle(
+ Code::cast(
+ wasm_instance_->compiled_module()->code_table()->get(
+ wasm_func_index_)),
+ isolate));
}
offset_ = array->Offset(frame_ix)->value();
}
@@ -712,9 +722,13 @@ MaybeHandle<String> WasmStackFrame::ToString() {
}
int WasmStackFrame::GetPosition() const {
- if (IsInterpreted()) return offset_;
- // TODO(wasm): Clean this up (bug 5007).
- return (offset_ < 0) ? (-1 - offset_) : code_->SourcePosition(offset_);
+ return IsInterpreted()
+ ? offset_
+ : (code_.IsCodeObject()
+ ? Handle<AbstractCode>::cast(code_.GetCode())
+ ->SourcePosition(offset_)
+ : FrameSummary::WasmCompiledFrameSummary::
+ GetWasmSourcePosition(code_.GetWasmCode(), offset_));
}
Handle<Object> WasmStackFrame::Null() const {
@@ -761,7 +775,11 @@ Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
int AsmJsWasmStackFrame::GetPosition() const {
DCHECK_LE(0, offset_);
- int byte_offset = code_->SourcePosition(offset_);
+ int byte_offset =
+ code_.IsCodeObject()
+ ? Handle<AbstractCode>::cast(code_.GetCode())->SourcePosition(offset_)
+ : FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
+ code_.GetWasmCode(), offset_);
Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
isolate_);
DCHECK_LE(0, byte_offset);
@@ -957,6 +975,8 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
if (prepare_stack_trace->IsJSFunction() && !in_recursion) {
PrepareStackTraceScope scope(isolate);
+ isolate->CountUsage(v8::Isolate::kErrorPrepareStackTrace);
+
Handle<JSArray> sites;
ASSIGN_RETURN_ON_EXCEPTION(isolate, sites, GetStackFrames(isolate, elems),
Object);
@@ -1049,7 +1069,7 @@ const char* MessageTemplate::TemplateString(int template_index) {
#undef CASE
case kLastMessage:
default:
- return NULL;
+ return nullptr;
}
}
@@ -1060,7 +1080,7 @@ MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
Handle<String> arg2) {
Isolate* isolate = arg0->GetIsolate();
const char* template_string = TemplateString(template_index);
- if (template_string == NULL) {
+ if (template_string == nullptr) {
isolate->ThrowIllegalOperation();
return MaybeHandle<String>();
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 9237f7a231..bf0c8db355 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -13,9 +13,13 @@
#include <memory>
#include "src/handles.h"
+#include "src/wasm/wasm-code-wrapper.h"
namespace v8 {
namespace internal {
+namespace wasm {
+class WasmCode;
+}
// Forward declarations.
class AbstractCode;
@@ -161,7 +165,7 @@ class WasmStackFrame : public StackFrameBase {
Handle<WasmInstanceObject> wasm_instance_;
uint32_t wasm_func_index_;
- Handle<AbstractCode> code_; // null handle for interpreted frames.
+ WasmCodeWrapper code_; // null for interpreted frames.
int offset_;
private:
@@ -273,9 +277,15 @@ class ErrorUtils : public AllStatic {
T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
T(BadSortComparisonFunction, \
"The comparison function must be either a function or undefined") \
+ T(BigIntFromNumber, \
+ "The number % is not a safe integer and thus cannot be converted to a " \
+ "BigInt") \
+ T(BigIntFromObject, "Cannot convert % to a BigInt") \
T(BigIntMixedTypes, \
"Cannot mix BigInt and other types, use explicit conversions") \
+ T(BigIntSerializeJSON, "Do not know how to serialize a BigInt") \
T(BigIntShr, "BigInts have no unsigned right shift, use >> instead") \
+ T(BigIntToNumber, "Cannot convert a BigInt value to a number") \
T(CalledNonCallable, "% is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
@@ -292,6 +302,7 @@ class ErrorUtils : public AllStatic {
T(CircularStructure, "Converting circular structure to JSON") \
T(ConstructAbstractClass, "Abstract class % not directly constructable") \
T(ConstAssign, "Assignment to constant variable.") \
+ T(ConstructorClassField, "Classes may not have a field named 'constructor'") \
T(ConstructorNonCallable, \
"Class constructor % cannot be invoked without 'new'") \
T(ConstructorNotFunction, "Constructor % requires 'new'") \
@@ -318,6 +329,7 @@ class ErrorUtils : public AllStatic {
"Immutable prototype object '%' cannot have their prototype set") \
T(ImportCallNotNewExpression, "Cannot use new with import") \
T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
+ T(ImportMissingSpecifier, "import() requires a specifier") \
T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
T(InstanceofNonobjectProto, \
"Function has non-object prototype '%' in instanceof check") \
@@ -479,7 +491,8 @@ class ErrorUtils : public AllStatic {
"small") \
T(SharedArrayBufferSpeciesThis, \
"SharedArrayBuffer subclass returned this from species constructor") \
- T(StaticPrototype, "Classes may not have static property named prototype") \
+ T(StaticPrototype, \
+ "Classes may not have a static property named 'prototype'") \
T(StrictDeleteProperty, "Cannot delete property '%' of %") \
T(StrictPoisonPill, \
"'caller', 'callee', and 'arguments' properties may not be accessed on " \
@@ -526,6 +539,7 @@ class ErrorUtils : public AllStatic {
T(InvalidDataViewLength, "Invalid DataView length %") \
T(InvalidOffset, "Start offset % is outside the bounds of the buffer") \
T(InvalidHint, "Invalid hint: %") \
+ T(InvalidIndex, "Invalid value: not (convertible to) a safe integer") \
T(InvalidLanguageTag, "Invalid language tag: %") \
T(InvalidWeakMapKey, "Invalid value used as weak map key") \
T(InvalidWeakSetValue, "Invalid value used in weak set") \
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 3f8fbfc7c8..978563cab5 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index cbe6fcbca9..e42210ea0e 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -124,14 +124,14 @@ int RelocInfo::target_address_size() {
}
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -164,7 +164,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
int32_t imm = reinterpret_cast<int32_t>(target);
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
if (Assembler::IsJicOrJialc(instr2)) {
// Encoded internal references are lui/jic load of 32-bit absolute address.
uint32_t lui_offset_u, jic_offset_u;
@@ -217,7 +217,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -277,11 +277,11 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 926c64d4d9..b5719a3add 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -210,6 +210,17 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
reinterpret_cast<Address>(size), flush_mode);
}
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ set_embedded_address(isolate, address, icache_flush_mode);
+}
+
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return embedded_address();
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
@@ -654,7 +665,7 @@ bool Assembler::IsOri(Instr instr) {
bool Assembler::IsNop(Instr instr, unsigned int type) {
// See Assembler::nop(type).
- DCHECK(type < 32);
+ DCHECK_LT(type, 32);
uint32_t opcode = GetOpcodeField(instr);
uint32_t function = GetFunctionField(instr);
uint32_t rt = GetRt(instr);
@@ -857,7 +868,7 @@ static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
Instr instr) {
int32_t bits = OffsetSizeInBits(instr);
int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
imm >>= 2;
const int32_t mask = (1 << bits) - 1;
@@ -894,7 +905,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
@@ -913,14 +924,14 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
}
}
-
-void Assembler::print(Label* L) {
+void Assembler::print(const Label* L) {
if (L->is_unused()) {
PrintF("unused label\n");
} else if (L->is_bound()) {
PrintF("bound label to %d\n", L->pos());
} else if (L->is_linked()) {
- Label l = *L;
+ Label l;
+ l.link_to(L->pos());
PrintF("unbound label");
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
@@ -964,7 +975,7 @@ void Assembler::bind_to(Label* L, int pos) {
if (dist > branch_offset) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry(fixup_pos);
- CHECK(trampoline_pos != kInvalidSlotPos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
}
CHECK((trampoline_pos - fixup_pos) <= branch_offset);
target_at_put(fixup_pos, trampoline_pos, false);
@@ -997,7 +1008,7 @@ void Assembler::next(Label* L, bool is_internal) {
if (link == kEndOfChain) {
L->Unuse();
} else {
- DCHECK(link >= 0);
+ DCHECK_GE(link, 0);
L->link_to(link);
}
}
@@ -1274,7 +1285,7 @@ void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
MSARegister wt, MSARegister ws, MSARegister wd) {
DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
- DCHECK(df < 2);
+ DCHECK_LT(df, 2);
Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
(ws.code() << kWsShift) | (wd.code() << kWdShift);
emit(instr);
@@ -1362,7 +1373,7 @@ uint32_t Assembler::jump_address(Label* L) {
}
uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
return imm;
}
@@ -1390,7 +1401,7 @@ int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
DCHECK(is_intn(offset, bits + 2));
- DCHECK((offset & 3) == 0);
+ DCHECK_EQ(offset & 3, 0);
return offset;
}
@@ -1405,7 +1416,7 @@ void Assembler::label_at_put(Label* L, int at_offset) {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
int32_t imm18 = target_pos - at_offset;
- DCHECK((imm18 & 3) == 0);
+ DCHECK_EQ(imm18 & 3, 0);
int32_t imm16 = imm18 >> 2;
DCHECK(is_int16(imm16));
instr_at_put(at_offset, (imm16 & kImm16Mask));
@@ -1939,7 +1950,7 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
- DCHECK(sa <= 3);
+ DCHECK_LE(sa, 3);
DCHECK(IsMipsArchVariant(kMips32r6));
Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
rd.code() << kRdShift | sa << kSaShift | LSA;
@@ -1964,7 +1975,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
- DCHECK(second_access_add_to_offset <= 7); // Must be <= 7.
+ DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
if (is_int16(src.offset()) &&
@@ -2226,7 +2237,7 @@ void Assembler::aluipc(Register rs, int16_t imm16) {
// Break / Trap instructions.
void Assembler::break_(uint32_t code, bool break_as_stop) {
- DCHECK((code & ~0xfffff) == 0);
+ DCHECK_EQ(code & ~0xfffff, 0);
// We need to invalidate breaks that could be stops as well because the
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
@@ -2242,8 +2253,8 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
void Assembler::stop(const char* msg, uint32_t code) {
- DCHECK(code > kMaxWatchpointCode);
- DCHECK(code <= kMaxStopCode);
+ DCHECK_GT(code, kMaxWatchpointCode);
+ DCHECK_LE(code, kMaxStopCode);
#if V8_HOST_ARCH_MIPS
break_(0x54321);
#else // V8_HOST_ARCH_MIPS
@@ -3007,7 +3018,7 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister fs, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt & ~(31 << kRsShift)) == 0);
+ DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
Instr instr = COP1 | fmt | ft.code() << kFtShift |
fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
emit(instr);
@@ -3044,7 +3055,7 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
DCHECK(fmt == S || fmt == D);
- DCHECK((fmt & ~(31 << kRsShift)) == 0);
+ DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond;
emit(instr);
@@ -3065,7 +3076,7 @@ void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
- DCHECK(src2 == 0.0);
+ DCHECK_EQ(src2, 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
c(cond, D, src1, f14, 0);
@@ -3619,7 +3630,7 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
return 0; // Number of instructions patched.
}
imm += pc_delta;
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
@@ -3729,14 +3740,14 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(pc_, rmode, data, nullptr);
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!serializer_enabled() && !emit_debug_code()) {
return;
}
- DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
reloc_info_writer.Write(&rinfo);
}
}
@@ -3767,7 +3778,7 @@ void Assembler::CheckTrampolinePool() {
}
DCHECK(!trampoline_emitted_);
- DCHECK(unbound_labels_count_ >= 0);
+ DCHECK_GE(unbound_labels_count_, 0);
if (unbound_labels_count_ > 0) {
// First we emit jump (2 instructions), then we emit trampoline pool.
{ BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3924,8 +3935,8 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
}
Register UseScratchRegisterScope::Acquire() {
- DCHECK(available_ != nullptr);
- DCHECK(*available_ != 0);
+ DCHECK_NOT_NULL(available_);
+ DCHECK_NE(*available_, 0);
int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
*available_ &= ~(1UL << index);
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index d2a9802b5e..76f3245e2c 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -54,8 +54,9 @@ namespace internal {
V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
- V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7)
+ V(a0) V(a1) V(a2) V(a3) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7) \
+ V(v0) V(v1)
#define DOUBLE_REGISTERS(V) \
V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
@@ -252,12 +253,12 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
public:
FPURegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK(code() % 2 == 0); // Specified Double reg must be even.
+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
return FPURegister::from_code(code());
}
FPURegister high() const {
// Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK(code() % 2 == 0); // Specified Double reg must be even.
+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
return FPURegister::from_code(code() + 1);
}
@@ -481,14 +482,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -686,7 +688,7 @@ class Assembler : public AssemblerBase {
// sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
// marking, to avoid conflict with ssnop and ehb instructions.
void nop(unsigned int type = 0) {
- DCHECK(type < 32);
+ DCHECK_LT(type, 32);
Register nop_rt_reg = (type == 0) ? zero_reg : at;
sll(zero_reg, nop_rt_reg, type, true);
}
@@ -1993,12 +1995,8 @@ class Assembler : public AssemblerBase {
// few aliases, but mixing both does not look clean to me.
// Anyway we could surely implement this differently.
- void GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- Register rd,
- uint16_t sa = 0,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, Register rs, Register rt, Register rd,
+ uint16_t sa = 0, SecondaryField func = nullptrSF);
void GenInstrRegister(Opcode opcode,
Register rs,
@@ -2007,32 +2005,20 @@ class Assembler : public AssemblerBase {
uint16_t lsb,
SecondaryField func);
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, SecondaryField fmt, FPURegister ft,
+ FPURegister fs, FPURegister fd,
+ SecondaryField func = nullptrSF);
- void GenInstrRegister(Opcode opcode,
- FPURegister fr,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft,
+ FPURegister fs, FPURegister fd,
+ SecondaryField func = nullptrSF);
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
+ FPURegister fs, FPURegister fd,
+ SecondaryField func = nullptrSF);
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPUControlRegister fs,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
+ FPUControlRegister fs, SecondaryField func = nullptrSF);
void GenInstrImmediate(
Opcode opcode, Register rs, Register rt, int32_t j,
@@ -2127,7 +2113,7 @@ class Assembler : public AssemblerBase {
}
// Labels.
- void print(Label* L);
+ void print(const Label* L);
void bind_to(Label* L, int pos);
void next(Label* L, bool is_internal);
@@ -2226,7 +2212,6 @@ class Assembler : public AssemblerBase {
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
- friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 29583eca1a..3485e146ea 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -8,11 +8,9 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -40,61 +38,52 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
- Register input_reg = source();
Register result_reg = destination();
- int double_offset = offset();
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += 3 * kPointerSize;
-
- Register scratch =
- GetRegisterThatIsNotOneOf(input_reg, result_reg);
- Register scratch2 =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
- Register scratch3 =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
+ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
DoubleRegister double_scratch = kLithiumScratchDouble;
+ // Account for saved regs.
+ const int kArgumentOffset = 3 * kPointerSize;
+
__ Push(scratch, scratch2, scratch3);
- if (!skip_fastpath()) {
- // Load double input.
- __ Ldc1(double_scratch, MemOperand(input_reg, double_offset));
-
- // Clear cumulative exception flags and save the FCSR.
- __ cfc1(scratch2, FCSR);
- __ ctc1(zero_reg, FCSR);
-
- // Try a conversion to a signed integer.
- __ Trunc_w_d(double_scratch, double_scratch);
- // Move the converted value into the result register.
- __ mfc1(scratch3, double_scratch);
-
- // Retrieve and restore the FCSR.
- __ cfc1(scratch, FCSR);
- __ ctc1(scratch2, FCSR);
-
- // Check for overflow and NaNs.
- __ And(
- scratch, scratch,
- kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
- | kFCSRInvalidOpFlagMask);
- // If we had no exceptions then set result_reg and we are done.
- Label error;
- __ Branch(&error, ne, scratch, Operand(zero_reg));
- __ Move(result_reg, scratch3);
- __ Branch(&done);
- __ bind(&error);
- }
+ // Load double input.
+ __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
+
+ // Clear cumulative exception flags and save the FCSR.
+ __ cfc1(scratch2, FCSR);
+ __ ctc1(zero_reg, FCSR);
+
+ // Try a conversion to a signed integer.
+ __ Trunc_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
+ __ mfc1(scratch3, double_scratch);
+
+ // Retrieve and restore the FCSR.
+ __ cfc1(scratch, FCSR);
+ __ ctc1(scratch2, FCSR);
+
+ // Check for overflow and NaNs.
+ __ And(
+ scratch, scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low,
- MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ __ lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
__ lw(input_high,
- MemOperand(input_reg, double_offset + Register::kExponentOffset));
+ MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -178,49 +167,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ mov(t9, ra);
- __ pop(ra);
- __ PushSafepointRegisters();
- __ Jump(t9);
-}
-
-
-void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ mov(t9, ra);
- __ pop(ra);
- __ PopSafepointRegisters();
- __ Jump(t9);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ MultiPush(kJSCallerSaved | ra.bit());
- if (save_doubles()) {
- __ MultiPushFPU(kCallerSavedFPU);
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = a1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- if (save_doubles()) {
- __ MultiPopFPU(kCallerSavedFPU);
- }
-
- __ MultiPop(kJSCallerSaved | ra.bit());
- __ Ret();
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == a2);
@@ -317,7 +263,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ div_d(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
+ __ BranchF(&done, nullptr, ne, double_result, kDoubleRegZero);
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
@@ -341,38 +287,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-bool CEntryStub::NeedsImmovableCode() {
- return true;
-}
-
+Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreRegistersStateStub::GenerateAheadOfTime(isolate);
- RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
-void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- StoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-
-void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- RestoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-
void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
- StoreBufferOverflowStub(isolate, mode).GetCode();
}
@@ -496,17 +423,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
? no_reg
// s0: still holds argc (callee-saved).
: s0;
- __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles(), argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -541,12 +466,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&zero);
// Compute the handler entry address and jump to it.
- __ li(a1, Operand(pending_handler_code_address));
- __ lw(a1, MemOperand(a1));
- __ li(a2, Operand(pending_handler_offset_address));
- __ lw(a2, MemOperand(a2));
- __ Addu(t9, a1, a2);
- __ Jump(t9, Code::kHeaderSize - kHeapObjectTag);
+ __ li(t9, Operand(pending_handler_entrypoint_address));
+ __ lw(t9, MemOperand(t9));
+ __ Jump(t9);
}
@@ -706,111 +628,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
-void StringHelper::GenerateFlatOneByteStringEquals(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ lw(length, FieldMemOperand(left, String::kLengthOffset));
- __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Branch(&check_zero_length, eq, length, Operand(scratch2));
- __ bind(&strings_not_equal);
- DCHECK(is_int16(NOT_EQUAL));
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&compare_chars, ne, length, Operand(zero_reg));
- DCHECK(is_int16(EQUAL));
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
-
- // Compare characters.
- __ bind(&compare_chars);
-
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
- v0, &strings_not_equal);
-
- // Characters are equal.
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4) {
- Label result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Subu(scratch3, scratch1, Operand(scratch2));
- Register length_delta = scratch3;
- __ slt(scratch4, scratch2, scratch1);
- __ Movn(scratch1, scratch2, scratch4);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
-
- // Compare loop.
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- scratch4, v0, &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ mov(scratch2, length_delta);
- __ mov(scratch4, zero_reg);
- __ mov(v0, zero_reg);
-
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- Label ret;
- __ Branch(&ret, eq, scratch2, Operand(scratch4));
- __ li(v0, Operand(Smi::FromInt(GREATER)));
- __ Branch(&ret, gt, scratch2, Operand(scratch4));
- __ li(v0, Operand(Smi::FromInt(LESS)));
- __ bind(&ret);
- __ Ret();
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Register scratch3,
- Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ Addu(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ Addu(left, left, Operand(scratch1));
- __ Addu(right, right, Operand(scratch1));
- __ Subu(length, zero_reg, length);
- Register index = length; // index = -length;
-
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ Addu(scratch3, left, index);
- __ lbu(scratch1, MemOperand(scratch3));
- __ Addu(scratch3, right, index);
- __ lbu(scratch2, MemOperand(scratch3));
- __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
- __ Addu(index, index, 1);
- __ Branch(&loop, ne, index, Operand(zero_reg));
-}
-
-
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// Make place for arguments to fit C calling convention. Most of the callers
// of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
@@ -846,391 +663,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0) {
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ lw(index, FieldMemOperand(properties, kCapacityOffset));
- __ Subu(index, index, Operand(1));
- __ And(index, index, Operand(
- Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ Lsa(index, index, index, 1);
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- STATIC_ASSERT(kSmiTagSize == 1);
- Register tmp = properties;
- __ Lsa(tmp, properties, index, 1);
- __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- DCHECK(tmp != entity_name);
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ Branch(done, eq, entity_name, Operand(tmp));
-
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
- // Stop if found the property.
- __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
-
- Label good;
- __ Branch(&good, eq, entity_name, Operand(tmp));
-
- // Check if the entry name is not a unique name.
- __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbu(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
- __ bind(&good);
-
- // Restore the properties.
- __ lw(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- }
-
- const int spill_mask =
- (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
- a2.bit() | a1.bit() | a0.bit() | v0.bit());
-
- __ MultiPush(spill_mask);
- __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- __ li(a1, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ mov(at, v0);
- __ MultiPop(spill_mask);
-
- __ Branch(done, eq, at, Operand(zero_reg));
- __ Branch(miss, ne, at, Operand(zero_reg));
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: NameDictionary to probe
- // a1: key
- // dictionary: NameDictionary to probe.
- // index: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = v0;
- Register dictionary = a0;
- Register key = a1;
- Register index = a2;
- Register mask = a3;
- Register hash = t0;
- Register undefined = t1;
- Register entry_key = t2;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ sra(mask, mask, kSmiTagSize);
- __ Subu(mask, mask, Operand(1));
-
- __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Addu(index, hash, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- } else {
- __ mov(index, hash);
- }
- __ srl(index, index, Name::kHashShift);
- __ And(index, mask, index);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // index *= 3.
- __ Lsa(index, index, index, 1);
-
- STATIC_ASSERT(kSmiTagSize == 1);
- __ Lsa(index, dictionary, index, 2);
- __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
-
- // Stop if found the property.
- __ Branch(&in_dictionary, eq, entry_key, Operand(key));
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ lbu(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ Ret(USE_DELAY_SLOT);
- __ mov(result, zero_reg);
- }
-
- __ bind(&in_dictionary);
- __ Ret(USE_DELAY_SLOT);
- __ li(result, 1);
-
- __ bind(&not_in_dictionary);
- __ Ret(USE_DELAY_SLOT);
- __ mov(result, zero_reg);
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- // Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- 2 * Assembler::kInstrSize);
-
- if (Assembler::IsBeq(first_instruction)) {
- return INCREMENTAL;
- }
-
- DCHECK(Assembler::IsBne(first_instruction));
-
- if (Assembler::IsBeq(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(Assembler::IsBne(second_instruction));
-
- return STORE_BUFFER_ONLY;
-}
-
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
- 4 * Assembler::kInstrSize);
-}
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two branch+nop instructions are generated with labels so as to
- // get the offset fixed up correctly by the bind(Label*) call. We patch it
- // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
- // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
- // incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
- __ nop();
- __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
- __ nop();
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address = a0 == regs_.address() ? regs_.scratch0() : regs_.address();
- DCHECK(address != regs_.object());
- DCHECK(address != a0);
- __ Move(address, regs_.address());
- __ Move(a0, regs_.object());
- __ Move(a1, address);
- __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-#endif
-
- // Get the value from the slot.
- __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->push(ra);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(ra);
@@ -1238,7 +673,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
@@ -1464,7 +899,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ SmiTst(t0, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
@@ -1546,7 +981,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
@@ -1589,10 +1024,12 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
-static void CallApiFunctionAndReturn(
- MacroAssembler* masm, Register function_address,
- ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
- MemOperand return_value_operand, MemOperand* context_restore_operand) {
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int32_t stack_space_offset,
+ MemOperand return_value_operand) {
Isolate* isolate = masm->isolate();
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate);
@@ -1676,10 +1113,6 @@ static void CallApiFunctionAndReturn(
// Leave the API exit frame.
__ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ lw(cp, *context_restore_operand);
- }
if (stack_space_offset != kInvalidStackOffset) {
// ExitFrame contains four MIPS argument slots after DirectCEntryStub call
// so this must be accounted for.
@@ -1687,7 +1120,7 @@ static void CallApiFunctionAndReturn(
} else {
__ li(s0, Operand(stack_space));
}
- __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
+ __ LeaveExitFrame(false, s0, NO_EMIT_RETURN,
stack_space_offset != kInvalidStackOffset);
// Check if the function scheduled an exception.
@@ -1717,7 +1150,6 @@ static void CallApiFunctionAndReturn(
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- a0 : callee
// -- t0 : call_data
// -- a2 : holder
// -- a1 : api_function_address
@@ -1727,21 +1159,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1)* 4] : first argument
// -- sp[argc * 4] : receiver
- // -- sp[(argc + 1)* 4] : accessor_holder
// -----------------------------------
- Register callee = a0;
Register call_data = t0;
Register holder = a2;
Register api_function_address = a1;
- Register context = cp;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1751,8 +1178,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
- // Save context, callee and call data.
- __ Push(context, callee, call_data);
+ // call data.
+ __ Push(call_data);
Register scratch = call_data;
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
@@ -1762,38 +1189,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Push isolate and holder.
__ Push(scratch, holder);
- // Enter a new context
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- sp[0] : holder
- // -- ...
- // -- sp[(FCA::kArgsLength - 1) * 4] : new_target
- // -- sp[FCA::kArgsLength * 4] : last argument
- // -- ...
- // -- sp[(FCA::kArgsLength + argc - 1) * 4] : first argument
- // -- sp[(FCA::kArgsLength + argc) * 4] : receiver
- // -- sp[(FCA::kArgsLength + argc + 1) * 4] : accessor_holder
- // -----------------------------------------------------------
-
- // Load context from accessor_holder
- Register accessor_holder = context;
- Register scratch2 = callee;
- __ lw(accessor_holder,
- MemOperand(sp, (FCA::kArgsLength + 1 + argc()) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ lw(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
- __ lbu(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(scratch2, scratch2, Operand(1 << Map::kIsConstructor));
- __ Branch(&skip_looking_for_constructor, ne, scratch2, Operand(zero_reg));
- __ GetMapConstructor(context, scratch, scratch, scratch2);
- __ bind(&skip_looking_for_constructor);
- __ lw(context, FieldMemOperand(context, JSFunction::kContextOffset));
- } else {
- // Load context from callee.
- __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
-
// Prepare arguments.
__ mov(scratch, sp);
@@ -1821,22 +1216,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument.
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 2;
+ const int stack_space = argc() + FCA::kArgsLength + 1;
// TODO(adamk): Why are we clobbering this immediately?
const int32_t stack_space_offset = kInvalidStackOffset;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_offset, return_value_operand,
- &context_restore_operand);
+ stack_space_offset, return_value_operand);
}
@@ -1874,7 +1261,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
__ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
// should_throw_on_error -> false
- DCHECK(Smi::kZero == nullptr);
+ DCHECK_NULL(Smi::kZero);
__ sw(zero_reg,
MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
__ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
@@ -1908,7 +1295,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kInvalidStackOffset,
- return_value_operand, NULL);
+ return_value_operand);
}
#undef __
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index f0a365bd6b..95a253c20e 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -8,215 +8,6 @@
namespace v8 {
namespace internal {
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one-byte strings and returns result in v0.
- static void GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4);
-
- // Compares two flat one-byte strings for equality and returns result in v0.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- static void GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Register scratch3,
- Label* chars_not_equal);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class StoreRegistersStateStub: public PlatformCodeStub {
- public:
- explicit StoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
-};
-
-
-class RestoreRegistersStateStub: public PlatformCodeStub {
- public:
- explicit RestoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate,
- Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- const unsigned offset = masm->instr_at(pos) & kImm16Mask;
- masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
- (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
- DCHECK(Assembler::IsBne(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- const unsigned offset = masm->instr_at(pos) & kImm16Mask;
- masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
- (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
- DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
- }
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0),
- scratch1_(no_reg) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- masm->MultiPushFPU(kCallerSavedFPU);
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- masm->MultiPopFPU(kCallerSavedFPU);
- }
- masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits: public BitField<int, 0, 5> {};
- class ValueBits: public BitField<int, 5, 5> {};
- class AddressBits: public BitField<int, 10, 5> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
-
- Label slow_;
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
@@ -228,53 +19,12 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() override { return true; }
+ Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
-
-class NameDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class LookupModeBits: public BitField<LookupMode, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 791f95f7c7..6205bcd202 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips/codegen-mips.h"
-
#if V8_TARGET_ARCH_MIPS
#include <memory>
@@ -15,26 +13,26 @@
namespace v8 {
namespace internal {
-
#define __ masm.
#if defined(V8_HOST_ARCH_MIPS)
+
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
- if (buffer == nullptr) return stub;
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ if (buffer == nullptr) return nullptr;
- // This code assumes that cache lines are 32 bytes and if the cache line is
- // larger it will not work correctly.
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
+ // This code assumes that cache lines are 32 bytes and if the cache line is
+ // larger it will not work correctly.
{
Label lastb, unaligned, aligned, chkw,
loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
@@ -546,8 +544,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -557,12 +556,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
@@ -574,116 +573,15 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ And(at, result, Operand(kIsIndirectStringMask));
- __ Branch(&check_sequential, eq, at, Operand(zero_reg));
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string, thin_string;
- __ And(at, result, Operand(kStringRepresentationMask));
- __ Branch(&cons_string, eq, at, Operand(kConsStringTag));
- __ Branch(&thin_string, eq, at, Operand(kThinStringTag));
-
- // Handle slices.
- __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ sra(at, result, kSmiTagSize);
- __ Addu(index, index, at);
- __ jmp(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ lw(string, FieldMemOperand(string, ThinString::kActualOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(at, Heap::kempty_stringRootIndex);
- __ Branch(call_runtime, ne, result, Operand(at));
- // Get the first of the two strings and load its instance type.
- __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
- __ jmp(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(at, result, Operand(kStringRepresentationMask));
- __ Branch(&external_string, ne, at, Operand(zero_reg));
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Addu(string,
- string,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ jmp(&check_encoding);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ And(at, result, Operand(kIsIndirectStringMask));
- __ Assert(eq, kExternalStringExpectedButNotFound,
- at, Operand(zero_reg));
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(at, result, Operand(kShortExternalStringMask));
- __ Branch(call_runtime, ne, at, Operand(zero_reg));
- __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label one_byte, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(at, result, Operand(kStringEncodingMask));
- __ Branch(&one_byte, ne, at, Operand(zero_reg));
- // Two-byte string.
- __ Lsa(at, string, index, 1);
- __ lhu(result, MemOperand(at));
- __ jmp(&done);
- __ bind(&one_byte);
- // One_byte string.
- __ Addu(at, string, index);
- __ lbu(result, MemOperand(at));
- __ bind(&done);
-}
-
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
deleted file mode 100644
index 48853de659..0000000000
--- a/deps/v8/src/mips/codegen-mips.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_H_
-#define V8_MIPS_CODEGEN_MIPS_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 46fffbbafd..1955d593ed 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -36,13 +36,11 @@ const char* Registers::names_[kNumSimuRegisters] = {
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
- {0, "zero"},
- {23, "cp"},
- {30, "s8"},
- {30, "s8_fp"},
- {kInvalidRegister, NULL}
-};
-
+ {0, "zero"},
+ {23, "cp"},
+ {30, "s8"},
+ {30, "s8_fp"},
+ {kInvalidRegister, nullptr}};
const char* Registers::Name(int reg) {
const char* result;
@@ -86,9 +84,7 @@ const char* FPURegisters::names_[kNumFPURegisters] = {
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
- {kInvalidRegister, NULL}
-};
-
+ {kInvalidRegister, nullptr}};
const char* FPURegisters::Name(int creg) {
const char* result;
@@ -128,7 +124,7 @@ const char* MSARegisters::names_[kNumMSARegisters] = {
"w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "w31"};
const MSARegisters::RegisterAlias MSARegisters::aliases_[] = {
- {kInvalidRegister, NULL}};
+ {kInvalidRegister, nullptr}};
const char* MSARegisters::Name(int creg) {
const char* result;
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 352dbb1181..565fcd9a68 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -956,7 +956,7 @@ enum SecondaryField : uint32_t {
BIT_DF_w = ((2U << 5) << 16),
BIT_DF_d = ((0U << 6) << 16),
- NULLSF = 0U
+ nullptrSF = 0U
};
enum MSAMinorOpcode : uint32_t {
@@ -1406,22 +1406,22 @@ class InstructionGetters : public T {
}
inline int RdValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kRdShift + kRdBits - 1, kRdShift);
}
inline int BaseValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kBaseShift + kBaseBits - 1, kBaseShift);
}
inline int SaValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kSaShift + kSaBits - 1, kSaShift);
}
inline int LsaSaValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift);
}
@@ -1460,7 +1460,7 @@ class InstructionGetters : public T {
}
inline int Bp2Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
}
@@ -1497,7 +1497,7 @@ class InstructionGetters : public T {
}
inline int RdFieldRaw() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->InstructionBits() & kRdFieldMask;
}
@@ -1521,37 +1521,37 @@ class InstructionGetters : public T {
case REGIMM:
return RtValue();
default:
- return NULLSF;
+ return nullptrSF;
}
}
inline int32_t ImmValue(int bits) const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(bits - 1, 0);
}
inline int32_t Imm9Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm9Shift + kImm9Bits - 1, kImm9Shift);
}
inline int32_t Imm16Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm18Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
}
inline int32_t Imm19Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
}
inline int32_t Imm21Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
}
@@ -1562,27 +1562,27 @@ class InstructionGetters : public T {
}
inline int32_t MsaImm8Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kMsaImm8Shift + kMsaImm8Bits - 1, kMsaImm8Shift);
}
inline int32_t MsaImm5Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kMsaImm5Shift + kMsaImm5Bits - 1, kMsaImm5Shift);
}
inline int32_t MsaImm10Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kMsaImm10Shift + kMsaImm10Bits - 1, kMsaImm10Shift);
}
inline int32_t MsaImmMI10Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kMsaImmMI10Shift + kMsaImmMI10Bits - 1, kMsaImmMI10Shift);
}
inline int32_t MsaBitDf() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
int32_t df_m = this->Bits(22, 16);
if (((df_m >> 6) & 1U) == 0) {
return 3;
@@ -1598,7 +1598,7 @@ class InstructionGetters : public T {
}
inline int32_t MsaBitMValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(16 + this->MsaBitDf() + 3, 16);
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 616224053c..078440431b 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -104,7 +103,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((saved_regs & (1 << i)) != 0) {
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 21b46bbac4..420453aad0 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -648,7 +648,7 @@ void Decoder::PrintInstructionName(Instruction* instr) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'r');
+ DCHECK_EQ(format[0], 'r');
if (format[1] == 's') { // 'rs: Rs register.
int reg = instr->RsValue();
PrintRegister(reg);
@@ -669,7 +669,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// Handle all FPUregister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'f');
+ DCHECK_EQ(format[0], 'f');
if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) {
if (format[1] == 's') { // 'fs: fs register.
int reg = instr->FsValue();
@@ -713,7 +713,7 @@ int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
// Handle all MSARegister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatMSARegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'w');
+ DCHECK_EQ(format[0], 'w');
if (format[1] == 's') {
int reg = instr->WsValue();
PrintMSARegister(reg);
@@ -1848,7 +1848,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg >= rt_reg) {
Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- DCHECK(rt_reg > 0);
+ DCHECK_GT(rt_reg, 0);
if (rs_reg == 0) {
Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
} else {
@@ -1865,7 +1865,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg >= rt_reg) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- DCHECK(rt_reg > 0);
+ DCHECK_GT(rt_reg, 0);
if (rs_reg == 0) {
Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
} else {
diff --git a/deps/v8/src/mips/frame-constants-mips.cc b/deps/v8/src/mips/frame-constants-mips.cc
index 73072a212f..fde4306f62 100644
--- a/deps/v8/src/mips/frame-constants-mips.cc
+++ b/deps/v8/src/mips/frame-constants-mips.cc
@@ -22,6 +22,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 92e0e958b7..4d8b9966fa 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -56,9 +56,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return t0; }
const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
const Register StoreTransitionDescriptor::MapRegister() { return t1; }
-const Register StringCompareDescriptor::LeftRegister() { return a1; }
-const Register StringCompareDescriptor::RightRegister() { return a0; }
-
const Register ApiGetterDescriptor::HolderRegister() { return a0; }
const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
@@ -76,7 +73,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a2, a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
// static
@@ -85,13 +82,13 @@ const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
@@ -202,7 +199,7 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
@@ -216,7 +213,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -226,7 +223,7 @@ void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
// a1 -- function
// a2 -- allocation site with elements kind
Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -236,7 +233,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// a1 -- function
// a2 -- allocation site with elements kind
Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@@ -249,20 +246,20 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
@@ -279,10 +276,10 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- a0, // callee
- t0, // call_data
- a2, // holder
- a1, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ t0, // call_data
+ a2, // holder
+ a1, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -331,8 +328,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
v0, // the value to pass to the generator
- a1, // the JSGeneratorObject to resume
- a2 // the resume mode (tagged)
+ a1 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index caf6b85cc7..de5de02f09 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -10,7 +10,7 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
@@ -153,7 +153,7 @@ void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK(num_unsaved >= 0);
+ DCHECK_GE(num_unsaved, 0);
if (num_unsaved > 0) {
Subu(sp, sp, Operand(num_unsaved * kPointerSize));
}
@@ -176,15 +176,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
}
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch) {
- DCHECK(cc == eq || cc == ne);
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
-}
-
-
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@@ -231,7 +222,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -242,7 +233,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -336,13 +327,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
@@ -366,39 +351,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address, Register scratch,
- SaveFPRegsMode fp_mode) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- li(t8, Operand(store_buffer));
- lw(scratch, MemOperand(t8));
- // Store pointer to buffer and increment buffer top.
- sw(address, MemOperand(scratch));
- Addu(scratch, scratch, kPointerSize);
- // Write back new top of buffer.
- sw(scratch, MemOperand(t8));
- // Call stub on end of buffer.
- // Check for end of buffer.
- And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
- Ret(ne, t8, Operand(zero_reg));
- push(ra);
- StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
- CallStub(&store_buffer_overflow);
- pop(ra);
- bind(&done);
- Ret();
-}
-
-
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -1564,8 +1516,8 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(pos + size < 33);
+ DCHECK_LT(pos, 32);
+ DCHECK_LT(pos + size, 33);
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ext_(rt, rs, pos, size);
@@ -1584,9 +1536,9 @@ void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(pos + size <= 32);
- DCHECK(size != 0);
+ DCHECK_LT(pos, 32);
+ DCHECK_LE(pos + size, 32);
+ DCHECK_NE(size, 0);
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ins_(rt, rs, pos, size);
@@ -1810,7 +1762,7 @@ void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF(&simple_convert, NULL, lt, fd, scratch);
+ BranchF(&simple_convert, nullptr, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -1844,7 +1796,7 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF32(&simple_convert, NULL, lt, fd, scratch);
+ BranchF32(&simple_convert, nullptr, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -2285,7 +2237,7 @@ void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
- DCHECK(cc == 0);
+ DCHECK_EQ(cc, 0);
DCHECK(rs != t8 && rd != t8);
Label done;
Register scratch = t8;
@@ -2310,7 +2262,7 @@ void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
- DCHECK(cc == 0);
+ DCHECK_EQ(cc, 0);
DCHECK(rs != t8 && rd != t8);
Label done;
Register scratch = t8;
@@ -2377,7 +2329,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
- BranchF(&done, NULL, eq, double_input, double_scratch);
+ BranchF(&done, nullptr, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
@@ -2456,7 +2408,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
- CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, result));
Addu(sp, sp, Operand(kDoubleSize));
pop(ra);
@@ -2980,7 +2932,7 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
}
} else {
- DCHECK(offset == 0);
+ DCHECK_EQ(offset, 0);
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
return BranchShortHelperR6(0, L, cond, rs, rt);
} else {
@@ -3335,7 +3287,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
}
} else {
- DCHECK(offset == 0);
+ DCHECK_EQ(offset, 0);
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
} else {
@@ -3836,39 +3788,11 @@ void MacroAssembler::PopStackHandler() {
sw(a1, MemOperand(scratch));
}
-
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
- Label* not_unique_name) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- And(scratch, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
- Branch(&succeed, eq, scratch, Operand(zero_reg));
- }
- Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
-
- bind(&succeed);
-}
-
void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
sub_d(dst, src, kDoubleRegZero);
}
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- li(value, Operand(cell));
- lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
-}
-
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
void TurboAssembler::MovFromFloatResult(DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
@@ -4190,24 +4114,11 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// ---------------------------------------------------------------------------
// Support functions.
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp, Register temp2) {
- Label done, loop;
- lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done);
- GetObjectType(result, temp, temp2);
- Branch(&done, ne, temp2, Operand(MAP_TYPE));
- lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
- Branch(&loop);
- bind(&done);
-}
-
void MacroAssembler::GetObjectType(Register object,
Register map,
Register type_reg) {
lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
@@ -4555,7 +4466,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
lw(scratch1, MemOperand(scratch2));
@@ -4567,7 +4478,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
lw(scratch1, MemOperand(scratch2));
@@ -4600,7 +4511,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
+ if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@@ -4631,7 +4542,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
// generated instructions is 10, so we use this as a maximum value.
static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
- DCHECK(abort_instructions <= kExpectedAbortInstructions);
+ DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
nop();
}
@@ -4751,7 +4662,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
// The stack must be align to 0 modulo 8 for stores with sdc1.
- DCHECK(kDoubleSize == frame_alignment);
+ DCHECK_EQ(kDoubleSize, frame_alignment);
if (frame_alignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
@@ -4768,7 +4679,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Reserve place for the return address, stack space and an optional slot
// (used by the DirectCEntryStub to hold the return value if a struct is
// returned) and align the frame preparing for calling the runtime function.
- DCHECK(stack_space >= 0);
+ DCHECK_GE(stack_space, 0);
Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
@@ -4783,9 +4694,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
sw(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context, bool do_return,
+ bool do_return,
bool argument_count_is_length) {
// Optionally restore all double registers.
if (save_doubles) {
@@ -4803,11 +4713,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- if (restore_context) {
- li(t8, Operand(ExternalReference(IsolateAddressId::kContextAddress,
- isolate())));
- lw(cp, MemOperand(t8));
- }
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ lw(cp, MemOperand(t8));
+
#ifdef DEBUG
li(t8,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
@@ -5311,98 +5220,6 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- And(t8, t9, Operand(mask_scratch));
- Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
- // Shift left 1 by adding.
- Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
- Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
- And(t8, t9, Operand(mask_scratch));
- Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
- jmp(&other_color);
-
- bind(&word_boundary);
- lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
- And(t9, t9, Operand(1));
- Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
- Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- Lsa(bitmap_reg, bitmap_reg, t8, kPointerSizeLog2, t8);
- li(t8, Operand(1));
- sllv(mask_reg, t8, mask_reg);
-}
-
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Register load_scratch,
- Label* value_is_white) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- And(t8, mask_scratch, load_scratch);
- Branch(value_is_white, eq, t8, Operand(zero_reg));
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- lw(dst,
- FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
- : AccessorPair::kSetterOffset;
- lw(dst, FieldMemOperand(dst, offset));
-}
-
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
@@ -5451,49 +5268,6 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache)
- : address_(address),
- size_(instructions * Assembler::kInstrSize),
- masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
- flush_cache_(flush_cache) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- if (flush_cache_ == FLUSH) {
- Assembler::FlushICache(masm_.isolate(), address_, size_);
- }
-
- // Check that the code was patched as expected.
-
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void CodePatcher::Emit(Instr instr) {
- masm()->emit(instr);
-}
-
-
-void CodePatcher::Emit(Address addr) {
- masm()->emit(reinterpret_cast<Instr>(addr));
-}
-
-
-void CodePatcher::ChangeBranchCondition(Instr current_instr,
- uint32_t new_opcode) {
- current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
- masm_.emit(current_instr);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 86934ee5a6..52525ad9bc 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -115,7 +115,7 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
inline MemOperand CFunctionArgumentOperand(int index) {
- DCHECK(index > kCArgSlotCount);
+ DCHECK_GT(index, kCArgSlotCount);
// Argument 5 takes the slot just past the four Arg-slots.
int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
return MemOperand(sp, offset);
@@ -911,18 +911,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
@@ -954,40 +942,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// GC Support
- void IncrementalMarkingRecordWriteHelper(Register object, Register value,
- Register address);
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object, Register scratch0, Register scratch1,
- Label* has_color, int first_bit, int second_bit);
-
- void JumpIfBlack(Register object, Register scratch0, Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Register scratch3, Label* value_is_white);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -1035,7 +989,7 @@ class MacroAssembler : public TurboAssembler {
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles, Register arg_count,
- bool restore_context, bool do_return = NO_EMIT_RETURN,
+ bool do_return = NO_EMIT_RETURN,
bool argument_count_is_length = false);
// Make sure the stack is aligned. Only emits code in debug mode.
@@ -1088,22 +1042,10 @@ class MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// Support functions.
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done, and |temp2| its instance type.
- void GetMapConstructor(Register result, Register map, Register temp,
- Register temp2);
-
void GetObjectType(Register function,
Register map,
Register type_reg);
- // Get value of the weak cell.
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the
- // given miss label is the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1204,15 +1146,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- // -------------------------------------------------------------------------
- // String utilities.
-
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template<typename Field>
void DecodeField(Register dst, Register src) {
Ext(dst, src, Field::kShift, Field::kSize);
@@ -1233,18 +1166,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object, Register scratch,
- Condition cond, // ne for new space, eq otherwise.
- Label* branch);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@@ -1253,42 +1174,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
friend class StandardFrame;
};
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- enum FlushICache {
- FLUSH,
- DONT_FLUSH
- };
-
- CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache = FLUSH);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit an address directly.
- void Emit(Address addr);
-
- // Change the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
- FlushICache flush_cache_; // Whether to flush the I cache after patching.
-};
-
template <typename Func>
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 7b2e38cbd5..342f27666d 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -13,6 +13,7 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
+#include "src/macro-assembler.h"
#include "src/mips/constants-mips.h"
#include "src/mips/simulator-mips.h"
#include "src/ostreams.h"
@@ -181,7 +182,7 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
return false;
}
@@ -195,25 +196,25 @@ bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
- sim_->break_pc_ = NULL;
+ sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
return true;
}
void MipsDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void MipsDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
@@ -368,11 +369,11 @@ void MipsDebugger::Debug() {
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
- if (line == NULL) {
+ if (line == nullptr) {
break;
} else {
char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
line = last_input;
} else {
// Ownership is transferred to sim_;
@@ -489,8 +490,8 @@ void MipsDebugger::Debug() {
PrintF("printobject <value>\n");
}
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int32_t* cur = NULL;
- int32_t* end = NULL;
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@@ -556,8 +557,8 @@ void MipsDebugger::Debug() {
// Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte* cur = NULL;
- byte* end = NULL;
+ byte* cur = nullptr;
+ byte* end = nullptr;
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
@@ -614,7 +615,7 @@ void MipsDebugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
+ if (!DeleteBreakpoint(nullptr)) {
PrintF("deleting breakpoint failed\n");
}
} else if (strcmp(cmd, "flags") == 0) {
@@ -690,8 +691,8 @@ void MipsDebugger::Debug() {
// Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte* cur = NULL;
- byte* end = NULL;
+ byte* cur = nullptr;
+ byte* end = nullptr;
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
@@ -784,8 +785,8 @@ void MipsDebugger::Debug() {
static bool ICacheMatch(void* one, void* two) {
- DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
}
@@ -832,7 +833,7 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page) {
base::CustomMatcherHashMap::Entry* entry =
i_cache->LookupOrInsert(page, ICacheHash(page));
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
CachePage* new_page = new CachePage();
entry->value = new_page;
}
@@ -843,10 +844,10 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
- DCHECK(size <= CachePage::kPageSize);
+ DCHECK_LE(size, CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
- DCHECK((start & CachePage::kLineMask) == 0);
- DCHECK((size & CachePage::kLineMask) == 0);
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@@ -887,7 +888,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
+ if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
@@ -898,7 +899,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
pc_modified_ = false;
icount_ = 0;
break_count_ = 0;
- break_pc_ = NULL;
+ break_pc_ = nullptr;
break_instr_ = 0;
// Set up architecture state.
@@ -926,7 +927,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
- last_debugger_input_ = NULL;
+ last_debugger_input_ = nullptr;
}
@@ -947,7 +948,7 @@ class Redirection {
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
type_(type),
- next_(NULL) {
+ next_(nullptr) {
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -966,8 +967,11 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
+ return current;
+ }
}
return new Redirection(isolate, external_function, type);
}
@@ -1029,11 +1033,11 @@ void* Simulator::RedirectExternalReference(Isolate* isolate,
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- DCHECK(isolate_data != NULL);
- DCHECK(isolate_data != NULL);
+ DCHECK_NOT_NULL(isolate_data);
+ DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
+ if (sim == nullptr) {
// TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator(isolate);
isolate_data->set_simulator(sim);
@@ -2066,7 +2070,7 @@ void Simulator::TraceMemWr(int32_t addr, int64_t value, TraceType t) {
int Simulator::ReadW(int32_t addr, Instruction* instr, TraceType t) {
if (addr >=0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
+ // This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
MipsDebugger dbg(this);
@@ -2096,7 +2100,7 @@ int Simulator::ReadW(int32_t addr, Instruction* instr, TraceType t) {
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
+ // This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory write to bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
MipsDebugger dbg(this);
@@ -2577,8 +2581,8 @@ bool Simulator::IsStopInstruction(Instruction* instr) {
bool Simulator::IsEnabledStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
- DCHECK(code > kMaxWatchpointCode);
+ DCHECK_LE(code, kMaxStopCode);
+ DCHECK_GT(code, kMaxWatchpointCode);
return !(watched_stops_[code].count & kStopDisabledBit);
}
@@ -2598,7 +2602,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
@@ -3150,7 +3154,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
(posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
(negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
- DCHECK(result != 0);
+ DCHECK_NE(result, 0);
dResult = bit_cast<double>(result);
SetFPUDoubleResult(fd_reg(), dResult);
@@ -3465,7 +3469,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
(posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
(negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
- DCHECK(result != 0);
+ DCHECK_NE(result, 0);
fResult = bit_cast<float>(result);
SetFPUFloatResult(fd_reg(), fResult);
@@ -3770,7 +3774,7 @@ void Simulator::DecodeTypeRegisterCOP1() {
switch (instr_.RsFieldRaw()) {
case CFC1:
// At the moment only FCSR is supported.
- DCHECK(fs_reg() == kFCSRRegister);
+ DCHECK_EQ(fs_reg(), kFCSRRegister);
SetResult(rt_reg(), FCSR_);
break;
case MFC1:
@@ -3785,7 +3789,7 @@ void Simulator::DecodeTypeRegisterCOP1() {
break;
case CTC1: {
// At the moment only FCSR is supported.
- DCHECK(fs_reg() == kFCSRRegister);
+ DCHECK_EQ(fs_reg(), kFCSRRegister);
int32_t reg = registers_[rt_reg()];
if (IsMipsArchVariant(kMips32r6)) {
FCSR_ = reg | kFCSRNaN2008FlagMask;
@@ -3972,12 +3976,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
}
case MFHI: // MFHI == CLZ on R6.
if (!IsMipsArchVariant(kMips32r6)) {
- DCHECK(sa() == 0);
+ DCHECK_EQ(sa(), 0);
alu_out = get_register(HI);
} else {
// MIPS spec: If no bits were set in GPR rs, the result written to
// GPR rd is 32.
- DCHECK(sa() == 1);
+ DCHECK_EQ(sa(), 1);
alu_out = base::bits::CountLeadingZeros32(rs_u());
}
SetResult(rd_reg(), static_cast<int32_t>(alu_out));
@@ -4659,17 +4663,20 @@ void Simulator::DecodeTypeMsaELM() {
int32_t alu_out;
switch (opcode) {
case CTCMSA:
- DCHECK(sa() == kMSACSRRegister);
+ DCHECK_EQ(sa(), kMSACSRRegister);
MSACSR_ = bit_cast<uint32_t>(registers_[rd_reg()]);
TraceRegWr(static_cast<int32_t>(MSACSR_));
break;
case CFCMSA:
- DCHECK(rd_reg() == kMSACSRRegister);
+ DCHECK_EQ(rd_reg(), kMSACSRRegister);
SetResult(sa(), bit_cast<int32_t>(MSACSR_));
break;
- case MOVE_V:
- UNIMPLEMENTED();
- break;
+ case MOVE_V: {
+ msa_reg_t ws;
+ get_msa_register(ws_reg(), &ws);
+ set_msa_register(wd_reg(), &ws);
+ TraceMSARegWr(&ws);
+ } break;
default:
opcode &= kMsaELMMask;
switch (opcode) {
@@ -4678,7 +4685,7 @@ void Simulator::DecodeTypeMsaELM() {
msa_reg_t ws;
switch (DecodeMsaDataFormat()) {
case MSA_BYTE: {
- DCHECK(n < kMSALanesByte);
+ DCHECK_LT(n, kMSALanesByte);
get_msa_register(instr_.WsValue(), ws.b);
alu_out = static_cast<int32_t>(ws.b[n]);
SetResult(wd_reg(),
@@ -4686,7 +4693,7 @@ void Simulator::DecodeTypeMsaELM() {
break;
}
case MSA_HALF: {
- DCHECK(n < kMSALanesHalf);
+ DCHECK_LT(n, kMSALanesHalf);
get_msa_register(instr_.WsValue(), ws.h);
alu_out = static_cast<int32_t>(ws.h[n]);
SetResult(wd_reg(),
@@ -4694,7 +4701,7 @@ void Simulator::DecodeTypeMsaELM() {
break;
}
case MSA_WORD: {
- DCHECK(n < kMSALanesWord);
+ DCHECK_LT(n, kMSALanesWord);
get_msa_register(instr_.WsValue(), ws.w);
alu_out = static_cast<int32_t>(ws.w[n]);
SetResult(wd_reg(), alu_out);
@@ -4708,7 +4715,7 @@ void Simulator::DecodeTypeMsaELM() {
msa_reg_t wd;
switch (DecodeMsaDataFormat()) {
case MSA_BYTE: {
- DCHECK(n < kMSALanesByte);
+ DCHECK_LT(n, kMSALanesByte);
int32_t rs = get_register(instr_.WsValue());
get_msa_register(instr_.WdValue(), wd.b);
wd.b[n] = rs & 0xFFu;
@@ -4717,7 +4724,7 @@ void Simulator::DecodeTypeMsaELM() {
break;
}
case MSA_HALF: {
- DCHECK(n < kMSALanesHalf);
+ DCHECK_LT(n, kMSALanesHalf);
int32_t rs = get_register(instr_.WsValue());
get_msa_register(instr_.WdValue(), wd.h);
wd.h[n] = rs & 0xFFFFu;
@@ -4726,7 +4733,7 @@ void Simulator::DecodeTypeMsaELM() {
break;
}
case MSA_WORD: {
- DCHECK(n < kMSALanesWord);
+ DCHECK_LT(n, kMSALanesWord);
int32_t rs = get_register(instr_.WsValue());
get_msa_register(instr_.WdValue(), wd.w);
wd.w[n] = rs;
@@ -4738,7 +4745,50 @@ void Simulator::DecodeTypeMsaELM() {
UNREACHABLE();
}
} break;
- case SLDI:
+ case SLDI: {
+ uint8_t v[32];
+ msa_reg_t ws;
+ msa_reg_t wd;
+ get_msa_register(ws_reg(), &ws);
+ get_msa_register(wd_reg(), &wd);
+#define SLDI_DF(s, k) \
+ for (unsigned i = 0; i < s; i++) { \
+ v[i] = ws.b[s * k + i]; \
+ v[i + s] = wd.b[s * k + i]; \
+ } \
+ for (unsigned i = 0; i < s; i++) { \
+ wd.b[s * k + i] = v[i + n]; \
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ DCHECK(n < kMSALanesByte);
+ SLDI_DF(kMSARegSize / sizeof(int8_t) / kBitsPerByte, 0)
+ break;
+ case MSA_HALF:
+ DCHECK(n < kMSALanesHalf);
+ for (int k = 0; k < 2; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int16_t) / kBitsPerByte, k)
+ }
+ break;
+ case MSA_WORD:
+ DCHECK(n < kMSALanesWord);
+ for (int k = 0; k < 4; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int32_t) / kBitsPerByte, k)
+ }
+ break;
+ case MSA_DWORD:
+ DCHECK(n < kMSALanesDword);
+ for (int k = 0; k < 8; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int64_t) / kBitsPerByte, k)
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ set_msa_register(wd_reg(), &wd);
+ TraceMSARegWr(&wd);
+ } break;
+#undef SLDI_DF
case SPLATI:
case INSVE:
UNIMPLEMENTED();
@@ -4875,6 +4925,7 @@ void Simulator::DecodeTypeMsaBIT() {
default:
UNREACHABLE();
}
+#undef MSA_BIT_DF
}
void Simulator::DecodeTypeMsaMI10() {
@@ -5157,13 +5208,6 @@ T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) {
case DPSUB_U:
case SLD:
case SPLAT:
- case PCKEV:
- case PCKOD:
- case ILVL:
- case ILVR:
- case ILVEV:
- case ILVOD:
- case VSHF:
UNIMPLEMENTED();
break;
case SRAR: {
@@ -5175,194 +5219,703 @@ T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) {
int bit = wt_modulo == 0 ? 0 : (wsu >> (wt_modulo - 1)) & 1;
res = static_cast<T>((wsu >> wt_modulo) + bit);
} break;
+ default:
+ UNREACHABLE();
+ }
+ return res;
+}
+
+template <typename T_int, typename T_reg>
+void Msa3RInstrHelper_shuffle(const uint32_t opcode, T_reg ws, T_reg wt,
+ T_reg wd, const int i, const int num_of_lanes) {
+ T_int *ws_p, *wt_p, *wd_p;
+ ws_p = reinterpret_cast<T_int*>(ws);
+ wt_p = reinterpret_cast<T_int*>(wt);
+ wd_p = reinterpret_cast<T_int*>(wd);
+ switch (opcode) {
+ case PCKEV:
+ wd_p[i] = wt_p[2 * i];
+ wd_p[i + num_of_lanes / 2] = ws_p[2 * i];
+ break;
+ case PCKOD:
+ wd_p[i] = wt_p[2 * i + 1];
+ wd_p[i + num_of_lanes / 2] = ws_p[2 * i + 1];
+ break;
+ case ILVL:
+ wd_p[2 * i] = wt_p[i + num_of_lanes / 2];
+ wd_p[2 * i + 1] = ws_p[i + num_of_lanes / 2];
+ break;
+ case ILVR:
+ wd_p[2 * i] = wt_p[i];
+ wd_p[2 * i + 1] = ws_p[i];
+ break;
+ case ILVEV:
+ wd_p[2 * i] = wt_p[2 * i];
+ wd_p[2 * i + 1] = ws_p[2 * i];
+ break;
+ case ILVOD:
+ wd_p[2 * i] = wt_p[2 * i + 1];
+ wd_p[2 * i + 1] = ws_p[2 * i + 1];
+ break;
+ case VSHF: {
+ const int mask_not_valid = 0xc0;
+ const int mask_6_bits = 0x3f;
+ if ((wd_p[i] & mask_not_valid)) {
+ wd_p[i] = 0;
+ } else {
+ int k = (wd_p[i] & mask_6_bits) % (num_of_lanes * 2);
+ wd_p[i] = k >= num_of_lanes ? ws_p[k - num_of_lanes] : wt_p[k];
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <typename T_int, typename T_smaller_int, typename T_reg>
+void Msa3RInstrHelper_horizontal(const uint32_t opcode, T_reg ws, T_reg wt,
+ T_reg wd, const int i,
+ const int num_of_lanes) {
+ typedef typename std::make_unsigned<T_int>::type T_uint;
+ typedef typename std::make_unsigned<T_smaller_int>::type T_smaller_uint;
+ T_int* wd_p;
+ T_smaller_int *ws_p, *wt_p;
+ ws_p = reinterpret_cast<T_smaller_int*>(ws);
+ wt_p = reinterpret_cast<T_smaller_int*>(wt);
+ wd_p = reinterpret_cast<T_int*>(wd);
+ T_uint* wd_pu;
+ T_smaller_uint *ws_pu, *wt_pu;
+ ws_pu = reinterpret_cast<T_smaller_uint*>(ws);
+ wt_pu = reinterpret_cast<T_smaller_uint*>(wt);
+ wd_pu = reinterpret_cast<T_uint*>(wd);
+ switch (opcode) {
case HADD_S:
+ wd_p[i] =
+ static_cast<T_int>(ws_p[2 * i + 1]) + static_cast<T_int>(wt_p[2 * i]);
+ break;
case HADD_U:
+ wd_pu[i] = static_cast<T_uint>(ws_pu[2 * i + 1]) +
+ static_cast<T_uint>(wt_pu[2 * i]);
+ break;
case HSUB_S:
+ wd_p[i] =
+ static_cast<T_int>(ws_p[2 * i + 1]) - static_cast<T_int>(wt_p[2 * i]);
+ break;
case HSUB_U:
- UNIMPLEMENTED();
+ wd_pu[i] = static_cast<T_uint>(ws_pu[2 * i + 1]) -
+ static_cast<T_uint>(wt_pu[2 * i]);
break;
default:
UNREACHABLE();
}
- return res;
- }
-
- void Simulator::DecodeTypeMsa3R() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
- msa_reg_t ws, wd, wt;
+}
+void Simulator::DecodeTypeMsa3R() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
+ msa_reg_t ws, wd, wt;
+ get_msa_register(ws_reg(), &ws);
+ get_msa_register(wt_reg(), &wt);
+ get_msa_register(wd_reg(), &wd);
+ switch (opcode) {
+ case HADD_S:
+ case HADD_U:
+ case HSUB_S:
+ case HSUB_U:
+#define HORIZONTAL_ARITHMETIC_DF(num_of_lanes, int_type, lesser_int_type) \
+ for (int i = 0; i < num_of_lanes; ++i) { \
+ Msa3RInstrHelper_horizontal<int_type, lesser_int_type>( \
+ opcode, &ws, &wt, &wd, i, num_of_lanes); \
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_HALF:
+ HORIZONTAL_ARITHMETIC_DF(kMSALanesHalf, int16_t, int8_t);
+ break;
+ case MSA_WORD:
+ HORIZONTAL_ARITHMETIC_DF(kMSALanesWord, int32_t, int16_t);
+ break;
+ case MSA_DWORD:
+ HORIZONTAL_ARITHMETIC_DF(kMSALanesDword, int64_t, int32_t);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef HORIZONTAL_ARITHMETIC_DF
+ case VSHF:
+#define VSHF_DF(num_of_lanes, int_type) \
+ for (int i = 0; i < num_of_lanes; ++i) { \
+ Msa3RInstrHelper_shuffle<int_type>(opcode, &ws, &wt, &wd, i, \
+ num_of_lanes); \
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ VSHF_DF(kMSALanesByte, int8_t);
+ break;
+ case MSA_HALF:
+ VSHF_DF(kMSALanesHalf, int16_t);
+ break;
+ case MSA_WORD:
+ VSHF_DF(kMSALanesWord, int32_t);
+ break;
+ case MSA_DWORD:
+ VSHF_DF(kMSALanesDword, int64_t);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef VSHF_DF
+ break;
+ case PCKEV:
+ case PCKOD:
+ case ILVL:
+ case ILVR:
+ case ILVEV:
+ case ILVOD:
+#define INTERLEAVE_PACK_DF(num_of_lanes, int_type) \
+ for (int i = 0; i < num_of_lanes / 2; ++i) { \
+ Msa3RInstrHelper_shuffle<int_type>(opcode, &ws, &wt, &wd, i, \
+ num_of_lanes); \
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ INTERLEAVE_PACK_DF(kMSALanesByte, int8_t);
+ break;
+ case MSA_HALF:
+ INTERLEAVE_PACK_DF(kMSALanesHalf, int16_t);
+ break;
+ case MSA_WORD:
+ INTERLEAVE_PACK_DF(kMSALanesWord, int32_t);
+ break;
+ case MSA_DWORD:
+ INTERLEAVE_PACK_DF(kMSALanesDword, int64_t);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef INTERLEAVE_PACK_DF
+ default:
#define MSA_3R_DF(elem, num_of_lanes) \
- get_msa_register(instr_.WdValue(), wd.elem); \
- get_msa_register(instr_.WsValue(), ws.elem); \
- get_msa_register(instr_.WtValue(), wt.elem); \
for (int i = 0; i < num_of_lanes; i++) { \
wd.elem[i] = Msa3RInstrHelper(opcode, wd.elem[i], ws.elem[i], wt.elem[i]); \
- } \
- set_msa_register(instr_.WdValue(), wd.elem); \
- TraceMSARegWr(wd.elem);
+ }
- switch (DecodeMsaDataFormat()) {
- case MSA_BYTE:
- MSA_3R_DF(b, kMSALanesByte);
- break;
- case MSA_HALF:
- MSA_3R_DF(h, kMSALanesHalf);
- break;
- case MSA_WORD:
- MSA_3R_DF(w, kMSALanesWord);
- break;
- case MSA_DWORD:
- MSA_3R_DF(d, kMSALanesDword);
- break;
- default:
- UNREACHABLE();
- }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ MSA_3R_DF(b, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ MSA_3R_DF(h, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ MSA_3R_DF(w, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ MSA_3R_DF(d, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
#undef MSA_3R_DF
+ break;
}
+ set_msa_register(wd_reg(), &wd);
+ TraceMSARegWr(&wd);
+}
- void Simulator::DecodeTypeMsa3RF() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
- switch (opcode) {
- case FCAF:
- case FCUN:
- case FCEQ:
- case FCUEQ:
- case FCLT:
- case FCULT:
- case FCLE:
- case FCULE:
- case FSAF:
- case FSUN:
- case FSEQ:
- case FSUEQ:
- case FSLT:
- case FSULT:
- case FSLE:
- case FSULE:
- case FADD:
- case FSUB:
- case FMUL:
- case FDIV:
- case FMADD:
- case FMSUB:
- case FEXP2:
- case FEXDO:
- case FTQ:
- case FMIN:
- case FMIN_A:
- case FMAX:
- case FMAX_A:
- case FCOR:
- case FCUNE:
- case FCNE:
- case MUL_Q:
- case MADD_Q:
- case MSUB_Q:
- case FSOR:
- case FSUNE:
- case FSNE:
- case MULR_Q:
- case MADDR_Q:
- case MSUBR_Q:
- UNIMPLEMENTED();
- break;
- default:
- UNREACHABLE();
- }
+template <typename T_int, typename T_fp, typename T_reg>
+void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+ const T_int all_ones = static_cast<T_int>(-1);
+ const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
+ const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
+ switch (opcode) {
+ case FCUN: {
+ if (std::isnan(s_element) || std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCEQ: {
+ if (s_element != t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FCUEQ: {
+ if (s_element == t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCLT: {
+ if (s_element >= t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FCULT: {
+ if (s_element < t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCLE: {
+ if (s_element > t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FCULE: {
+ if (s_element <= t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCOR: {
+ if (std::isnan(s_element) || std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FCUNE: {
+ if (s_element != t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCNE: {
+ if (s_element == t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FADD:
+ wd = bit_cast<T_int>(s_element + t_element);
+ break;
+ case FSUB:
+ wd = bit_cast<T_int>(s_element - t_element);
+ break;
+ case FMUL:
+ wd = bit_cast<T_int>(s_element * t_element);
+ break;
+ case FDIV: {
+ if (t_element == 0) {
+ wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ wd = bit_cast<T_int>(s_element / t_element);
+ }
+ } break;
+ case FMADD:
+ wd = bit_cast<T_int>(
+ std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ break;
+ case FMSUB:
+ wd = bit_cast<T_int>(
+ std::fma(s_element, -t_element, *reinterpret_cast<T_fp*>(&wd)));
+ break;
+ case FEXP2:
+ wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
+ break;
+ case FMIN:
+ wd = bit_cast<T_int>(std::min(s_element, t_element));
+ break;
+ case FMAX:
+ wd = bit_cast<T_int>(std::max(s_element, t_element));
+ break;
+ case FMIN_A: {
+ wd = bit_cast<T_int>(
+ std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element);
+ } break;
+ case FMAX_A: {
+ wd = bit_cast<T_int>(
+ std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element);
+ } break;
+ case FSOR:
+ case FSUNE:
+ case FSNE:
+ case FSAF:
+ case FSUN:
+ case FSEQ:
+ case FSUEQ:
+ case FSLT:
+ case FSULT:
+ case FSLE:
+ case FSULE:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
}
+}
- void Simulator::DecodeTypeMsaVec() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsaVECMask;
- msa_reg_t wd, ws, wt;
-
- get_msa_register(instr_.WsValue(), ws.w);
- get_msa_register(instr_.WtValue(), wt.w);
- if (opcode == BMNZ_V || opcode == BMZ_V || opcode == BSEL_V) {
- get_msa_register(instr_.WdValue(), wd.w);
- }
+template <typename T_int, typename T_int_dbl, typename T_reg>
+void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+ // typedef typename std::make_unsigned<T_int>::type T_uint;
+ typedef typename std::make_unsigned<T_int_dbl>::type T_uint_dbl;
+ const T_int max_int = std::numeric_limits<T_int>::max();
+ const T_int min_int = std::numeric_limits<T_int>::min();
+ const int shift = kBitsPerByte * sizeof(T_int) - 1;
+ const T_int_dbl reg_s = ws;
+ const T_int_dbl reg_t = wt;
+ T_int_dbl product, result;
+ product = reg_s * reg_t;
+ switch (opcode) {
+ case MUL_Q: {
+ const T_int_dbl min_fix_dbl =
+ bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
+ const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
+ if (product == min_fix_dbl) {
+ product = max_fix_dbl;
+ }
+ wd = static_cast<T_int>(product >> shift);
+ } break;
+ case MADD_Q: {
+ result = (product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
+ wd = static_cast<T_int>(
+ result > max_int ? max_int : result < min_int ? min_int : result);
+ } break;
+ case MSUB_Q: {
+ result = (-product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
+ wd = static_cast<T_int>(
+ result > max_int ? max_int : result < min_int ? min_int : result);
+ } break;
+ case MULR_Q: {
+ const T_int_dbl min_fix_dbl =
+ bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
+ const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
+ if (product == min_fix_dbl) {
+ wd = static_cast<T_int>(max_fix_dbl >> shift);
+ break;
+ }
+ wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
+ } break;
+ case MADDR_Q: {
+ result = (product + (static_cast<T_int_dbl>(wd) << shift) +
+ (1 << (shift - 1))) >>
+ shift;
+ wd = static_cast<T_int>(
+ result > max_int ? max_int : result < min_int ? min_int : result);
+ } break;
+ case MSUBR_Q: {
+ result = (-product + (static_cast<T_int_dbl>(wd) << shift) +
+ (1 << (shift - 1))) >>
+ shift;
+ wd = static_cast<T_int>(
+ result > max_int ? max_int : result < min_int ? min_int : result);
+ } break;
+ default:
+ UNREACHABLE();
+ }
+}
- for (int i = 0; i < kMSALanesWord; i++) {
- switch (opcode) {
- case AND_V:
- wd.w[i] = ws.w[i] & wt.w[i];
+void Simulator::DecodeTypeMsa3RF() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
+ msa_reg_t wd, ws, wt;
+ if (opcode != FCAF) {
+ get_msa_register(ws_reg(), &ws);
+ get_msa_register(wt_reg(), &wt);
+ }
+ switch (opcode) {
+ case FCAF:
+ wd.d[0] = 0;
+ wd.d[1] = 0;
+ break;
+ case FEXDO:
+#define PACK_FLOAT16(sign, exp, frac) \
+ static_cast<uint16_t>(((sign) << 15) + ((exp) << 10) + (frac))
+#define FEXDO_DF(source, dst) \
+ do { \
+ element = source; \
+ aSign = element >> 31; \
+ aExp = element >> 23 & 0xFF; \
+ aFrac = element & 0x007FFFFF; \
+ if (aExp == 0xFF) { \
+ if (aFrac) { \
+ /* Input is a NaN */ \
+ dst = 0x7DFFU; \
+ break; \
+ } \
+ /* Infinity */ \
+ dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ break; \
+ } else if (aExp == 0 && aFrac == 0) { \
+ dst = PACK_FLOAT16(aSign, 0, 0); \
+ break; \
+ } else { \
+ int maxexp = 29; \
+ uint32_t mask; \
+ uint32_t increment; \
+ bool rounding_bumps_exp; \
+ aFrac |= 0x00800000; \
+ aExp -= 0x71; \
+ if (aExp < 1) { \
+ /* Will be denormal in halfprec */ \
+ mask = 0x00ffffff; \
+ if (aExp >= -11) { \
+ mask >>= 11 + aExp; \
+ } \
+ } else { \
+ /* Normal number in halfprec */ \
+ mask = 0x00001fff; \
+ } \
+ switch (MSACSR_ & 3) { \
+ case kRoundToNearest: \
+ increment = (mask + 1) >> 1; \
+ if ((aFrac & mask) == increment) { \
+ increment = aFrac & (increment << 1); \
+ } \
+ break; \
+ case kRoundToPlusInf: \
+ increment = aSign ? 0 : mask; \
+ break; \
+ case kRoundToMinusInf: \
+ increment = aSign ? mask : 0; \
+ break; \
+ case kRoundToZero: \
+ increment = 0; \
+ break; \
+ } \
+ rounding_bumps_exp = (aFrac + increment >= 0x01000000); \
+ if (aExp > maxexp || (aExp == maxexp && rounding_bumps_exp)) { \
+ dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ break; \
+ } \
+ aFrac += increment; \
+ if (rounding_bumps_exp) { \
+ aFrac >>= 1; \
+ aExp++; \
+ } \
+ if (aExp < -10) { \
+ dst = PACK_FLOAT16(aSign, 0, 0); \
+ break; \
+ } \
+ if (aExp < 0) { \
+ aFrac >>= -aExp; \
+ aExp = 0; \
+ } \
+ dst = PACK_FLOAT16(aSign, aExp, aFrac >> 13); \
+ } \
+ } while (0);
+ switch (DecodeMsaDataFormat()) {
+ case MSA_HALF:
+ for (int i = 0; i < kMSALanesWord; i++) {
+ uint_fast32_t element;
+ uint_fast32_t aSign, aFrac;
+ int_fast32_t aExp;
+ FEXDO_DF(ws.uw[i], wd.uh[i + kMSALanesHalf / 2])
+ FEXDO_DF(wt.uw[i], wd.uh[i])
+ }
break;
- case OR_V:
- wd.w[i] = ws.w[i] | wt.w[i];
+ case MSA_WORD:
+ for (int i = 0; i < kMSALanesDword; i++) {
+ wd.w[i + kMSALanesWord / 2] = bit_cast<int32_t>(
+ static_cast<float>(bit_cast<double>(ws.d[i])));
+ wd.w[i] = bit_cast<int32_t>(
+ static_cast<float>(bit_cast<double>(wt.d[i])));
+ }
break;
- case NOR_V:
- wd.w[i] = ~(ws.w[i] | wt.w[i]);
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef PACK_FLOAT16
+#undef FEXDO_DF
+ case FTQ:
+#define FTQ_DF(source, dst, fp_type, int_type) \
+ element = bit_cast<fp_type>(source) * \
+ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
+ if (element > std::numeric_limits<int_type>::max()) { \
+ dst = std::numeric_limits<int_type>::max(); \
+ } else if (element < std::numeric_limits<int_type>::min()) { \
+ dst = std::numeric_limits<int_type>::min(); \
+ } else if (std::isnan(element)) { \
+ dst = 0; \
+ } else { \
+ int_type fixed_point; \
+ round_according_to_msacsr(element, element, fixed_point); \
+ dst = fixed_point; \
+ }
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_HALF:
+ for (int i = 0; i < kMSALanesWord; i++) {
+ float element;
+ FTQ_DF(ws.w[i], wd.h[i + kMSALanesHalf / 2], float, int16_t)
+ FTQ_DF(wt.w[i], wd.h[i], float, int16_t)
+ }
break;
- case XOR_V:
- wd.w[i] = ws.w[i] ^ wt.w[i];
+ case MSA_WORD:
+ double element;
+ for (int i = 0; i < kMSALanesDword; i++) {
+ FTQ_DF(ws.d[i], wd.w[i + kMSALanesWord / 2], double, int32_t)
+ FTQ_DF(wt.d[i], wd.w[i], double, int32_t)
+ }
break;
- case BMNZ_V:
- wd.w[i] = (wt.w[i] & ws.w[i]) | (~wt.w[i] & wd.w[i]);
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef FTQ_DF
+#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, wd); \
+ }
+#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, wd); \
+ }
+ case MADD_Q:
+ case MSUB_Q:
+ case MADDR_Q:
+ case MSUBR_Q:
+ get_msa_register(wd_reg(), &wd); // fall-through
+ case MUL_Q:
+ case MULR_Q:
+ switch (DecodeMsaDataFormat()) {
+ case MSA_HALF:
+ MSA_3RF_DF2(int16_t, int32_t, kMSALanesHalf, ws.h[i], wt.h[i],
+ wd.h[i])
break;
- case BMZ_V:
- wd.w[i] = (~wt.w[i] & ws.w[i]) | (wt.w[i] & wd.w[i]);
+ case MSA_WORD:
+ MSA_3RF_DF2(int32_t, int64_t, kMSALanesWord, ws.w[i], wt.w[i],
+ wd.w[i])
break;
- case BSEL_V:
- wd.w[i] = (~wd.w[i] & ws.w[i]) | (wd.w[i] & wt.w[i]);
+ default:
+ UNREACHABLE();
+ }
+ break;
+ default:
+ if (opcode == FMADD || opcode == FMSUB) {
+ get_msa_register(wd_reg(), &wd);
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_WORD:
+ MSA_3RF_DF(int32_t, float, kMSALanesWord, ws.w[i], wt.w[i], wd.w[i])
+ break;
+ case MSA_DWORD:
+ MSA_3RF_DF(int64_t, double, kMSALanesDword, ws.d[i], wt.d[i], wd.d[i])
break;
default:
UNREACHABLE();
}
- }
- set_msa_register(instr_.WdValue(), wd.w);
- TraceMSARegWr(wd.d);
+ break;
+#undef MSA_3RF_DF
+#undef MSA_3RF_DF2
+ }
+ set_msa_register(wd_reg(), &wd);
+ TraceMSARegWr(&wd);
+}
+
+void Simulator::DecodeTypeMsaVec() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaVECMask;
+ msa_reg_t wd, ws, wt;
+
+ get_msa_register(instr_.WsValue(), ws.w);
+ get_msa_register(instr_.WtValue(), wt.w);
+ if (opcode == BMNZ_V || opcode == BMZ_V || opcode == BSEL_V) {
+ get_msa_register(instr_.WdValue(), wd.w);
}
- void Simulator::DecodeTypeMsa2R() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa2RMask;
- msa_reg_t wd, ws;
+ for (int i = 0; i < kMSALanesWord; i++) {
switch (opcode) {
- case FILL:
- switch (DecodeMsaDataFormat()) {
- case MSA_BYTE: {
- int32_t rs = get_register(instr_.WsValue());
- for (int i = 0; i < kMSALanesByte; i++) {
- wd.b[i] = rs & 0xFFu;
- }
- set_msa_register(instr_.WdValue(), wd.b);
- TraceMSARegWr(wd.b);
- break;
+ case AND_V:
+ wd.w[i] = ws.w[i] & wt.w[i];
+ break;
+ case OR_V:
+ wd.w[i] = ws.w[i] | wt.w[i];
+ break;
+ case NOR_V:
+ wd.w[i] = ~(ws.w[i] | wt.w[i]);
+ break;
+ case XOR_V:
+ wd.w[i] = ws.w[i] ^ wt.w[i];
+ break;
+ case BMNZ_V:
+ wd.w[i] = (wt.w[i] & ws.w[i]) | (~wt.w[i] & wd.w[i]);
+ break;
+ case BMZ_V:
+ wd.w[i] = (~wt.w[i] & ws.w[i]) | (wt.w[i] & wd.w[i]);
+ break;
+ case BSEL_V:
+ wd.w[i] = (~wd.w[i] & ws.w[i]) | (wd.w[i] & wt.w[i]);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.d);
+}
+
+void Simulator::DecodeTypeMsa2R() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa2RMask;
+ msa_reg_t wd, ws;
+ switch (opcode) {
+ case FILL:
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = rs & 0xFFu;
}
- case MSA_HALF: {
- int32_t rs = get_register(instr_.WsValue());
- for (int i = 0; i < kMSALanesHalf; i++) {
- wd.h[i] = rs & 0xFFFFu;
- }
- set_msa_register(instr_.WdValue(), wd.h);
- TraceMSARegWr(wd.h);
- break;
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ }
+ case MSA_HALF: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesHalf; i++) {
+ wd.h[i] = rs & 0xFFFFu;
}
- case MSA_WORD: {
- int32_t rs = get_register(instr_.WsValue());
- for (int i = 0; i < kMSALanesWord; i++) {
- wd.w[i] = rs;
- }
- set_msa_register(instr_.WdValue(), wd.w);
- TraceMSARegWr(wd.w);
- break;
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ }
+ case MSA_WORD: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesWord; i++) {
+ wd.w[i] = rs;
}
- default:
- UNREACHABLE();
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
}
- break;
- case PCNT:
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case PCNT:
#define PCNT_DF(elem, num_of_lanes) \
get_msa_register(instr_.WsValue(), ws.elem); \
for (int i = 0; i < num_of_lanes; i++) { \
uint64_t u64elem = static_cast<uint64_t>(ws.elem[i]); \
- wd.elem[i] = base::bits::CountPopulation64(u64elem); \
+ wd.elem[i] = base::bits::CountPopulation(u64elem); \
} \
set_msa_register(instr_.WdValue(), wd.elem); \
TraceMSARegWr(wd.elem)
@@ -5534,8 +6087,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
dst = 0;
- } else if (element > max_int || element < min_int) {
- dst = element > max_int ? max_int : min_int;
+ } else if (element >= max_int || element <= min_int) {
+ dst = element >= max_int ? max_int : min_int;
} else {
dst = static_cast<T_int>(std::trunc(element));
}
@@ -5546,8 +6099,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
dst = 0;
- } else if (element > max_int || element < 0) {
- dst = element > max_int ? max_int : 0;
+ } else if (element >= max_int || element <= 0) {
+ dst = element >= max_int ? max_int : 0;
} else {
dst = static_cast<T_uint>(std::trunc(element));
}
@@ -5656,8 +6209,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
return 0;
}
-template <typename T_int, typename T_fp, typename T_reg, typename T_i>
-T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, T_i i) {
+template <typename T_int, typename T_fp, typename T_reg>
+T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
switch (opcode) {
#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15)
#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1f)
@@ -5878,6 +6431,30 @@ void Simulator::DecodeTypeImmediate() {
}
};
+ auto BranchHelper_MSA = [this, &next_pc, imm16,
+ &execute_branch_delay_instruction](bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int32_t current_pc = get_pc();
+ const int32_t bitsIn16Int = sizeof(int16_t) * kBitsPerByte;
+ if (do_branch) {
+ if (FLAG_debug_code) {
+ int16_t bits = imm16 & 0xfc;
+ if (imm16 >= 0) {
+ CHECK_EQ(bits, 0);
+ } else {
+ CHECK_EQ(bits ^ 0xfc, 0);
+ }
+ }
+ // jump range :[pc + kInstrSize - 512 * kInstrSize,
+ // pc + kInstrSize + 511 * kInstrSize]
+ int16_t offset = static_cast<int16_t>(imm16 << (bitsIn16Int - 10)) >>
+ (bitsIn16Int - 12);
+ next_pc = current_pc + offset + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) {
int32_t current_pc = get_pc();
CheckForbiddenSlot(current_pc);
@@ -5920,18 +6497,66 @@ void Simulator::DecodeTypeImmediate() {
case BC1NEZ:
BranchHelper(get_fpu_register(ft_reg) & 0x1);
break;
- case BZ_V:
+ case BZ_V: {
+ msa_reg_t wt;
+ get_msa_register(wt_reg(), &wt);
+ BranchHelper_MSA(wt.d[0] == 0 && wt.d[1] == 0);
+ } break;
+#define BZ_DF(witdh, lanes) \
+ { \
+ msa_reg_t wt; \
+ get_msa_register(wt_reg(), &wt); \
+ int i; \
+ for (i = 0; i < lanes; ++i) { \
+ if (wt.witdh[i] == 0) { \
+ break; \
+ } \
+ } \
+ BranchHelper_MSA(i != lanes); \
+ }
case BZ_B:
+ BZ_DF(b, kMSALanesByte)
+ break;
case BZ_H:
+ BZ_DF(h, kMSALanesHalf)
+ break;
case BZ_W:
+ BZ_DF(w, kMSALanesWord)
+ break;
case BZ_D:
- case BNZ_V:
+ BZ_DF(d, kMSALanesDword)
+ break;
+#undef BZ_DF
+ case BNZ_V: {
+ msa_reg_t wt;
+ get_msa_register(wt_reg(), &wt);
+ BranchHelper_MSA(wt.d[0] != 0 || wt.d[1] != 0);
+ } break;
+#define BNZ_DF(witdh, lanes) \
+ { \
+ msa_reg_t wt; \
+ get_msa_register(wt_reg(), &wt); \
+ int i; \
+ for (i = 0; i < lanes; ++i) { \
+ if (wt.witdh[i] == 0) { \
+ break; \
+ } \
+ } \
+ BranchHelper_MSA(i == lanes); \
+ }
case BNZ_B:
+ BNZ_DF(b, kMSALanesByte)
+ break;
case BNZ_H:
+ BNZ_DF(h, kMSALanesHalf)
+ break;
case BNZ_W:
+ BNZ_DF(w, kMSALanesWord)
+ break;
case BNZ_D:
- UNIMPLEMENTED();
+ BNZ_DF(d, kMSALanesDword)
break;
+#undef BNZ_DF
default:
UNREACHABLE();
}
@@ -6532,7 +7157,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
// Set up arguments.
// First four arguments passed in registers.
- DCHECK(argument_count >= 4);
+ DCHECK_GE(argument_count, 4);
set_register(a0, va_arg(parameters, int32_t));
set_register(a1, va_arg(parameters, int32_t));
set_register(a2, va_arg(parameters, int32_t));
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
index 3f8fbfc7c8..3fce7dd688 100644
--- a/deps/v8/src/mips64/OWNERS
+++ b/deps/v8/src/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index dea9906e49..2cb3374f8e 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -117,14 +117,14 @@ int RelocInfo::target_address_size() {
}
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -187,8 +187,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
@@ -243,11 +242,11 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 46d4fca740..5099ec1db9 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -189,6 +189,17 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
reinterpret_cast<Address>(size), flush_mode);
}
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ set_embedded_address(isolate, address, icache_flush_mode);
+}
+
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return embedded_address();
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
@@ -624,7 +635,7 @@ bool Assembler::IsOri(Instr instr) {
bool Assembler::IsNop(Instr instr, unsigned int type) {
// See Assembler::nop(type).
- DCHECK(type < 32);
+ DCHECK_LT(type, 32);
uint32_t opcode = GetOpcodeField(instr);
uint32_t function = GetFunctionField(instr);
uint32_t rt = GetRt(instr);
@@ -804,7 +815,7 @@ static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
Instr instr) {
int32_t bits = OffsetSizeInBits(instr);
int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
imm >>= 2;
const int32_t mask = (1 << bits) - 1;
@@ -841,7 +852,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
DCHECK(IsOri(instr_ori2));
uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
@@ -855,7 +866,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_ori2 | (imm & kImm16Mask));
} else if (IsJ(instr) || IsJal(instr)) {
int32_t imm28 = target_pos - pos;
- DCHECK((imm28 & 3) == 0);
+ DCHECK_EQ(imm28 & 3, 0);
uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
DCHECK(is_uint26(imm26));
@@ -865,7 +876,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_at_put(pos, mark | (imm26 & kImm26Mask));
} else {
int32_t imm28 = target_pos - pos;
- DCHECK((imm28 & 3) == 0);
+ DCHECK_EQ(imm28 & 3, 0);
uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
DCHECK(is_uint26(imm26));
@@ -876,14 +887,14 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
}
}
-
-void Assembler::print(Label* L) {
+void Assembler::print(const Label* L) {
if (L->is_unused()) {
PrintF("unused label\n");
} else if (L->is_bound()) {
PrintF("bound label to %d\n", L->pos());
} else if (L->is_linked()) {
- Label l = *L;
+ Label l;
+ l.link_to(L->pos());
PrintF("unbound label");
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
@@ -927,7 +938,7 @@ void Assembler::bind_to(Label* L, int pos) {
if (dist > branch_offset) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry(fixup_pos);
- CHECK(trampoline_pos != kInvalidSlotPos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
}
CHECK((trampoline_pos - fixup_pos) <= branch_offset);
target_at_put(fixup_pos, trampoline_pos, false);
@@ -962,7 +973,7 @@ void Assembler::next(Label* L, bool is_internal) {
if (link == kEndOfChain) {
L->Unuse();
} else {
- DCHECK(link >= 0);
+ DCHECK_GE(link, 0);
L->link_to(link);
}
}
@@ -1240,7 +1251,7 @@ void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
MSARegister wt, MSARegister ws, MSARegister wd) {
DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
- DCHECK(df < 2);
+ DCHECK_LT(df, 2);
Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
(ws.code() << kWsShift) | (wd.code() << kWdShift);
emit(instr);
@@ -1325,7 +1336,7 @@ uint64_t Assembler::jump_address(Label* L) {
}
}
uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
return imm;
}
@@ -1347,7 +1358,7 @@ uint64_t Assembler::jump_offset(Label* L) {
}
}
int64_t imm = target_pos - (pc_offset() + pad);
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
return static_cast<uint64_t>(imm);
}
@@ -1375,7 +1386,7 @@ int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
DCHECK(is_intn(offset, bits + 2));
- DCHECK((offset & 3) == 0);
+ DCHECK_EQ(offset & 3, 0);
return offset;
}
@@ -1390,7 +1401,7 @@ void Assembler::label_at_put(Label* L, int at_offset) {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
int32_t imm18 = target_pos - at_offset;
- DCHECK((imm18 & 3) == 0);
+ DCHECK_EQ(imm18 & 3, 0);
int32_t imm16 = imm18 >> 2;
DCHECK(is_int16(imm16));
instr_at_put(at_offset, (imm16 & kImm16Mask));
@@ -1420,13 +1431,13 @@ void Assembler::bal(int16_t offset) {
void Assembler::bc(int32_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::balc(int32_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1446,14 +1457,14 @@ void Assembler::bgez(Register rs, int16_t offset) {
void Assembler::bgezc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
@@ -1462,7 +1473,7 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
void Assembler::bgec(Register rs, Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
@@ -1487,7 +1498,7 @@ void Assembler::bgtz(Register rs, int16_t offset) {
void Assembler::bgtzc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
GenInstrImmediate(BGTZL, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
@@ -1502,7 +1513,7 @@ void Assembler::blez(Register rs, int16_t offset) {
void Assembler::blezc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
GenInstrImmediate(BLEZL, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
@@ -1510,14 +1521,14 @@ void Assembler::blezc(Register rt, int16_t offset) {
void Assembler::bltzc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
@@ -1526,7 +1537,7 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
@@ -1558,7 +1569,7 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
void Assembler::bovc(Register rs, Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
if (rs.code() >= rt.code()) {
GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
} else {
@@ -1568,7 +1579,7 @@ void Assembler::bovc(Register rs, Register rt, int16_t offset) {
void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
if (rs.code() >= rt.code()) {
GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
} else {
@@ -1578,7 +1589,7 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
void Assembler::blezalc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
DCHECK(rt != ra);
GenInstrImmediate(BLEZ, zero_reg, rt, offset,
@@ -1587,7 +1598,7 @@ void Assembler::blezalc(Register rt, int16_t offset) {
void Assembler::bgezalc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
DCHECK(rt != ra);
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
@@ -1595,7 +1606,7 @@ void Assembler::bgezalc(Register rt, int16_t offset) {
void Assembler::bgezall(Register rs, int16_t offset) {
- DCHECK(kArchVariant != kMips64r6);
+ DCHECK_NE(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
DCHECK(rs != ra);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1605,7 +1616,7 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
DCHECK(rt != ra);
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
@@ -1613,7 +1624,7 @@ void Assembler::bltzalc(Register rt, int16_t offset) {
void Assembler::bgtzalc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
DCHECK(rt != ra);
GenInstrImmediate(BGTZ, zero_reg, rt, offset,
@@ -1622,7 +1633,7 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
void Assembler::beqzalc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
DCHECK(rt != ra);
GenInstrImmediate(ADDI, zero_reg, rt, offset,
@@ -1631,7 +1642,7 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
void Assembler::bnezalc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
DCHECK(rt != ra);
GenInstrImmediate(DADDI, zero_reg, rt, offset,
@@ -1640,7 +1651,7 @@ void Assembler::bnezalc(Register rt, int16_t offset) {
void Assembler::beqc(Register rs, Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
if (rs.code() < rt.code()) {
GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
@@ -1651,14 +1662,14 @@ void Assembler::beqc(Register rs, Register rt, int16_t offset) {
void Assembler::beqzc(Register rs, int32_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnec(Register rs, Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
if (rs.code() < rt.code()) {
GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
@@ -1669,7 +1680,7 @@ void Assembler::bnec(Register rs, Register rt, int16_t offset) {
void Assembler::bnezc(Register rs, int32_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1735,13 +1746,13 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::jic(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrImmediate(POP66, zero_reg, rt, offset);
}
void Assembler::jialc(Register rt, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrImmediate(POP76, zero_reg, rt, offset);
}
@@ -1775,55 +1786,55 @@ void Assembler::mul(Register rd, Register rs, Register rt) {
void Assembler::muh(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
}
void Assembler::mulu(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
}
void Assembler::muhu(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
}
void Assembler::dmul(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
}
void Assembler::dmuh(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
}
void Assembler::dmulu(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
}
void Assembler::dmuhu(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
}
void Assembler::mult(Register rs, Register rt) {
- DCHECK(kArchVariant != kMips64r6);
+ DCHECK_NE(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
}
void Assembler::multu(Register rs, Register rt) {
- DCHECK(kArchVariant != kMips64r6);
+ DCHECK_NE(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
}
@@ -1839,13 +1850,13 @@ void Assembler::div(Register rs, Register rt) {
void Assembler::div(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
}
void Assembler::mod(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
}
@@ -1856,13 +1867,13 @@ void Assembler::divu(Register rs, Register rt) {
void Assembler::divu(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
}
void Assembler::modu(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
}
@@ -1893,13 +1904,13 @@ void Assembler::ddiv(Register rs, Register rt) {
void Assembler::ddiv(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
}
void Assembler::dmod(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
}
@@ -1910,13 +1921,13 @@ void Assembler::ddivu(Register rs, Register rt) {
void Assembler::ddivu(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
}
void Assembler::dmodu(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
}
@@ -2088,8 +2099,8 @@ void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
- DCHECK(sa <= 3);
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_LE(sa, 3);
+ DCHECK_EQ(kArchVariant, kMips64r6);
Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
rd.code() << kRdShift | sa << kSaShift | LSA;
emit(instr);
@@ -2098,8 +2109,8 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
- DCHECK(sa <= 3);
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_LE(sa, 3);
+ DCHECK_EQ(kArchVariant, kMips64r6);
Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
rd.code() << kRdShift | sa << kSaShift | DLSA;
emit(instr);
@@ -2123,7 +2134,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
- DCHECK(second_access_add_to_offset <= 7); // Must be <= 7.
+ DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
if (is_int16(src.offset()) &&
@@ -2268,14 +2279,14 @@ void Assembler::lwu(Register rd, const MemOperand& rs) {
void Assembler::lwl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
}
void Assembler::lwr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
@@ -2297,14 +2308,14 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
void Assembler::swl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
}
void Assembler::swr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
}
@@ -2313,7 +2324,7 @@ void Assembler::ll(Register rd, const MemOperand& rs) {
DCHECK(is_int9(rs.offset_));
GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(is_int16(rs.offset_));
GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
}
@@ -2324,7 +2335,7 @@ void Assembler::lld(Register rd, const MemOperand& rs) {
DCHECK(is_int9(rs.offset_));
GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LLD_R6);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(is_int16(rs.offset_));
GenInstrImmediate(LLD, rs.rm(), rd, rs.offset_);
}
@@ -2335,7 +2346,7 @@ void Assembler::sc(Register rd, const MemOperand& rs) {
DCHECK(is_int9(rs.offset_));
GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
}
}
@@ -2345,7 +2356,7 @@ void Assembler::scd(Register rd, const MemOperand& rs) {
DCHECK(is_int9(rs.offset_));
GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SCD_R6);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SCD, rs.rm(), rd, rs.offset_);
}
}
@@ -2385,28 +2396,28 @@ void Assembler::dati(Register rs, int32_t j) {
void Assembler::ldl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
}
void Assembler::ldr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
}
void Assembler::sdl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
}
void Assembler::sdr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
}
@@ -2424,7 +2435,7 @@ void Assembler::sd(Register rd, const MemOperand& rs) {
// ---------PC-Relative instructions-----------
void Assembler::addiupc(Register rs, int32_t imm19) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid() && is_int19(imm19));
uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
@@ -2432,7 +2443,7 @@ void Assembler::addiupc(Register rs, int32_t imm19) {
void Assembler::lwpc(Register rs, int32_t offset19) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid() && is_int19(offset19));
uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
@@ -2440,7 +2451,7 @@ void Assembler::lwpc(Register rs, int32_t offset19) {
void Assembler::lwupc(Register rs, int32_t offset19) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid() && is_int19(offset19));
uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
@@ -2448,7 +2459,7 @@ void Assembler::lwupc(Register rs, int32_t offset19) {
void Assembler::ldpc(Register rs, int32_t offset18) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid() && is_int18(offset18));
uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
GenInstrImmediate(PCREL, rs, imm21);
@@ -2456,7 +2467,7 @@ void Assembler::ldpc(Register rs, int32_t offset18) {
void Assembler::auipc(Register rs, int16_t imm16) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid());
uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
@@ -2464,7 +2475,7 @@ void Assembler::auipc(Register rs, int16_t imm16) {
void Assembler::aluipc(Register rs, int16_t imm16) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid());
uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
@@ -2475,7 +2486,7 @@ void Assembler::aluipc(Register rs, int16_t imm16) {
// Break / Trap instructions.
void Assembler::break_(uint32_t code, bool break_as_stop) {
- DCHECK((code & ~0xfffff) == 0);
+ DCHECK_EQ(code & ~0xfffff, 0);
// We need to invalidate breaks that could be stops as well because the
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
@@ -2491,8 +2502,8 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
void Assembler::stop(const char* msg, uint32_t code) {
- DCHECK(code > kMaxWatchpointCode);
- DCHECK(code <= kMaxStopCode);
+ DCHECK_GT(code, kMaxWatchpointCode);
+ DCHECK_LE(code, kMaxStopCode);
#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
break_(0x54321);
#else // V8_HOST_ARCH_MIPS
@@ -2652,7 +2663,7 @@ void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
}
@@ -2660,7 +2671,7 @@ void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
}
@@ -2668,14 +2679,14 @@ void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
// GPR.
void Assembler::seleqz(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
}
// GPR.
void Assembler::selnez(Register rd, Register rs, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
}
@@ -2761,13 +2772,13 @@ void Assembler::dextu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::bitswap(Register rd, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
}
void Assembler::dbitswap(Register rd, Register rt) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
}
@@ -2781,7 +2792,7 @@ void Assembler::pref(int32_t hint, const MemOperand& rs) {
void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(is_uint3(bp));
uint16_t sa = (ALIGN << kBp2Bits) | bp;
GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
@@ -2789,7 +2800,7 @@ void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(is_uint3(bp));
uint16_t sa = (DALIGN << kBp3Bits) | bp;
GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
@@ -2892,7 +2903,7 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
@@ -2938,53 +2949,53 @@ void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
}
void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
}
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
}
void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
}
@@ -2992,7 +3003,7 @@ void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
// FPR.
void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
}
@@ -3058,22 +3069,22 @@ void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
}
void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
}
void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
}
void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
}
void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
}
@@ -3205,7 +3216,7 @@ void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
}
@@ -3265,20 +3276,20 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
void Assembler::class_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
}
void Assembler::class_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
}
void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
}
@@ -3286,7 +3297,7 @@ void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
}
@@ -3327,8 +3338,8 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
// Conditions for >= MIPSr6.
void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister fs, FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK((fmt & ~(31 << kRsShift)) == 0);
+ DCHECK_EQ(kArchVariant, kMips64r6);
+ DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
Instr instr = COP1 | fmt | ft.code() << kFtShift |
fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
emit(instr);
@@ -3347,14 +3358,14 @@ void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
emit(instr);
}
void Assembler::bc1nez(int16_t offset, FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
emit(instr);
}
@@ -3363,10 +3374,10 @@ void Assembler::bc1nez(int16_t offset, FPURegister ft) {
// Conditions for < MIPSr6.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
- DCHECK(kArchVariant != kMips64r6);
+ DCHECK_NE(kArchVariant, kMips64r6);
DCHECK(is_uint3(cc));
DCHECK(fmt == S || fmt == D);
- DCHECK((fmt & ~(31 << kRsShift)) == 0);
+ DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond;
emit(instr);
@@ -3387,7 +3398,7 @@ void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
- DCHECK(src2 == 0.0);
+ DCHECK_EQ(src2, 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
c(cond, D, src1, f14, 0);
@@ -3950,7 +3961,7 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
return 0; // Number of instructions patched.
}
imm += pc_delta;
- DCHECK((imm & 3) == 0);
+ DCHECK_EQ(imm & 3, 0);
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
@@ -3969,7 +3980,7 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
imm28 += pc_delta;
imm28 &= kImm28Mask;
instr &= ~kImm26Mask;
- DCHECK((imm28 & 3) == 0);
+ DCHECK_EQ(imm28 & 3, 0);
uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
instr_at_put(pc, instr | (imm26 & kImm26Mask));
return 1; // Number of instructions patched.
@@ -3983,7 +3994,7 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
uint64_t target =
static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
target &= kImm28Mask;
- DCHECK((imm28 & 3) == 0);
+ DCHECK_EQ(imm28 & 3, 0);
uint32_t imm26 = static_cast<uint32_t>(target >> 2);
// Check markings whether to emit j or jal.
uint32_t unbox = (instr & kJRawMark) ? J : JAL;
@@ -4081,14 +4092,14 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(pc_, rmode, data, nullptr);
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!serializer_enabled() && !emit_debug_code()) {
return;
}
- DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
reloc_info_writer.Write(&rinfo);
}
}
@@ -4119,7 +4130,7 @@ void Assembler::CheckTrampolinePool() {
}
DCHECK(!trampoline_emitted_);
- DCHECK(unbound_labels_count_ >= 0);
+ DCHECK_GE(unbound_labels_count_, 0);
if (unbound_labels_count_ > 0) {
// First we emit jump (2 instructions), then we emit trampoline pool.
{ BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4250,8 +4261,8 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
}
Register UseScratchRegisterScope::Acquire() {
- DCHECK(available_ != nullptr);
- DCHECK(*available_ != 0);
+ DCHECK_NOT_NULL(available_);
+ DCHECK_NE(*available_, 0);
int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
*available_ &= ~(1UL << index);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index ce47cb761e..cdb8be46cd 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -54,8 +54,9 @@ namespace internal {
V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
- V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7)
+ V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7) \
+ V(v0) V(v1)
#define DOUBLE_REGISTERS(V) \
V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
@@ -257,13 +258,13 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
FPURegister low() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
// Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK(code() % 2 == 0); // Specified Double reg must be even.
+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
return FPURegister::from_code(code());
}
FPURegister high() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
// Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK(code() % 2 == 0); // Specified Double reg must be even.
+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
return FPURegister::from_code(code() + 1);
}
@@ -489,14 +490,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -535,7 +537,7 @@ class Assembler : public AssemblerBase {
return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
}
inline bool is_near_r6(Label* L) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
}
@@ -694,7 +696,7 @@ class Assembler : public AssemblerBase {
// sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
// marking, to avoid conflict with ssnop and ehb instructions.
void nop(unsigned int type = 0) {
- DCHECK(type < 32);
+ DCHECK_LT(type, 32);
Register nop_rt_reg = (type == 0) ? zero_reg : at;
sll(zero_reg, nop_rt_reg, type, true);
}
@@ -2057,12 +2059,8 @@ class Assembler : public AssemblerBase {
// few aliases, but mixing both does not look clean to me.
// Anyway we could surely implement this differently.
- void GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- Register rd,
- uint16_t sa = 0,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, Register rs, Register rt, Register rd,
+ uint16_t sa = 0, SecondaryField func = nullptrSF);
void GenInstrRegister(Opcode opcode,
Register rs,
@@ -2071,33 +2069,20 @@ class Assembler : public AssemblerBase {
uint16_t lsb,
SecondaryField func);
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
-
- void GenInstrRegister(Opcode opcode,
- FPURegister fr,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, SecondaryField fmt, FPURegister ft,
+ FPURegister fs, FPURegister fd,
+ SecondaryField func = nullptrSF);
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPURegister fs,
- FPURegister fd,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft,
+ FPURegister fs, FPURegister fd,
+ SecondaryField func = nullptrSF);
- void GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPUControlRegister fs,
- SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
+ FPURegister fs, FPURegister fd,
+ SecondaryField func = nullptrSF);
+ void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
+ FPUControlRegister fs, SecondaryField func = nullptrSF);
void GenInstrImmediate(
Opcode opcode, Register rs, Register rt, int32_t j,
@@ -2191,7 +2176,7 @@ class Assembler : public AssemblerBase {
}
// Labels.
- void print(Label* L);
+ void print(const Label* L);
void bind_to(Label* L, int pos);
void next(Label* L, bool is_internal);
@@ -2292,7 +2277,6 @@ class Assembler : public AssemblerBase {
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
- friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 1025bcd928..5d8cee7787 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -7,11 +7,9 @@
#include "src/api-arguments.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -39,60 +37,52 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
- Register input_reg = source();
Register result_reg = destination();
- int double_offset = offset();
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += 3 * kPointerSize;
-
- Register scratch =
- GetRegisterThatIsNotOneOf(input_reg, result_reg);
- Register scratch2 =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
- Register scratch3 =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
+ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
DoubleRegister double_scratch = kLithiumScratchDouble;
+ // Account for saved regs.
+ const int kArgumentOffset = 3 * kPointerSize;
+
__ Push(scratch, scratch2, scratch3);
- if (!skip_fastpath()) {
- // Load double input.
- __ Ldc1(double_scratch, MemOperand(input_reg, double_offset));
-
- // Clear cumulative exception flags and save the FCSR.
- __ cfc1(scratch2, FCSR);
- __ ctc1(zero_reg, FCSR);
-
- // Try a conversion to a signed integer.
- __ Trunc_w_d(double_scratch, double_scratch);
- // Move the converted value into the result register.
- __ mfc1(scratch3, double_scratch);
-
- // Retrieve and restore the FCSR.
- __ cfc1(scratch, FCSR);
- __ ctc1(scratch2, FCSR);
-
- // Check for overflow and NaNs.
- __ And(
- scratch, scratch,
- kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
- | kFCSRInvalidOpFlagMask);
- // If we had no exceptions then set result_reg and we are done.
- Label error;
- __ Branch(&error, ne, scratch, Operand(zero_reg));
- __ Move(result_reg, scratch3);
- __ Branch(&done);
- __ bind(&error);
- }
+
+ // Load double input.
+ __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
+
+ // Clear cumulative exception flags and save the FCSR.
+ __ cfc1(scratch2, FCSR);
+ __ ctc1(zero_reg, FCSR);
+
+ // Try a conversion to a signed integer.
+ __ Trunc_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
+ __ mfc1(scratch3, double_scratch);
+
+ // Retrieve and restore the FCSR.
+ __ cfc1(scratch, FCSR);
+ __ ctc1(scratch2, FCSR);
+
+ // Check for overflow and NaNs.
+ __ And(
+ scratch, scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ Lw(input_low,
- MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
__ Lw(input_high,
- MemOperand(input_reg, double_offset + Register::kExponentOffset));
+ MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -176,49 +166,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ mov(t9, ra);
- __ pop(ra);
- __ PushSafepointRegisters();
- __ Jump(t9);
-}
-
-
-void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ mov(t9, ra);
- __ pop(ra);
- __ PopSafepointRegisters();
- __ Jump(t9);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ MultiPush(kJSCallerSaved | ra.bit());
- if (save_doubles()) {
- __ MultiPushFPU(kCallerSavedFPU);
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = a1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- if (save_doubles()) {
- __ MultiPopFPU(kCallerSavedFPU);
- }
-
- __ MultiPop(kJSCallerSaved | ra.bit());
- __ Ret();
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == a2);
@@ -315,7 +262,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ div_d(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
+ __ BranchF(&done, nullptr, ne, double_result, kDoubleRegZero);
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
@@ -339,38 +286,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-bool CEntryStub::NeedsImmovableCode() {
- return true;
-}
-
+Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreRegistersStateStub::GenerateAheadOfTime(isolate);
- RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
-void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- StoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-
-void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- RestoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-
void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
- StoreBufferOverflowStub(isolate, mode).GetCode();
}
@@ -494,17 +422,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
? no_reg
// s0: still holds argc (callee-saved).
: s0;
- __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles(), argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -539,12 +465,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&zero);
// Compute the handler entry address and jump to it.
- __ li(a1, Operand(pending_handler_code_address));
- __ Ld(a1, MemOperand(a1));
- __ li(a2, Operand(pending_handler_offset_address));
- __ Ld(a2, MemOperand(a2));
- __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Daddu(t9, a1, a2);
+ __ li(t9, Operand(pending_handler_entrypoint_address));
+ __ Ld(t9, MemOperand(t9));
__ Jump(t9);
}
@@ -704,111 +626,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
-void StringHelper::GenerateFlatOneByteStringEquals(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ Ld(length, FieldMemOperand(left, String::kLengthOffset));
- __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Branch(&check_zero_length, eq, length, Operand(scratch2));
- __ bind(&strings_not_equal);
- // Can not put li in delayslot, it has multi instructions.
- __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
- __ Ret();
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&compare_chars, ne, length, Operand(zero_reg));
- DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
-
- // Compare characters.
- __ bind(&compare_chars);
-
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
- v0, &strings_not_equal);
-
- // Characters are equal.
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4) {
- Label result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ Ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Dsubu(scratch3, scratch1, Operand(scratch2));
- Register length_delta = scratch3;
- __ slt(scratch4, scratch2, scratch1);
- __ Movn(scratch1, scratch2, scratch4);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
-
- // Compare loop.
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- scratch4, v0, &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ mov(scratch2, length_delta);
- __ mov(scratch4, zero_reg);
- __ mov(v0, zero_reg);
-
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- Label ret;
- __ Branch(&ret, eq, scratch2, Operand(scratch4));
- __ li(v0, Operand(Smi::FromInt(GREATER)));
- __ Branch(&ret, gt, scratch2, Operand(scratch4));
- __ li(v0, Operand(Smi::FromInt(LESS)));
- __ bind(&ret);
- __ Ret();
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Register scratch3,
- Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ Daddu(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ Daddu(left, left, Operand(scratch1));
- __ Daddu(right, right, Operand(scratch1));
- __ Dsubu(length, zero_reg, length);
- Register index = length; // index = -length;
-
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ Daddu(scratch3, left, index);
- __ Lbu(scratch1, MemOperand(scratch3));
- __ Daddu(scratch3, right, index);
- __ Lbu(scratch2, MemOperand(scratch3));
- __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
- __ Daddu(index, index, 1);
- __ Branch(&loop, ne, index, Operand(zero_reg));
-}
-
-
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// Make place for arguments to fit C calling convention. Most of the callers
// of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
@@ -844,390 +661,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0) {
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
- __ Dsubu(index, index, Operand(1));
- __ And(index, index,
- Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ Dlsa(index, index, index, 1); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- STATIC_ASSERT(kSmiTagSize == 1);
- Register tmp = properties;
-
- __ Dlsa(tmp, properties, index, kPointerSizeLog2);
- __ Ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- DCHECK(tmp != entity_name);
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ Branch(done, eq, entity_name, Operand(tmp));
-
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
- // Stop if found the property.
- __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
-
- Label good;
- __ Branch(&good, eq, entity_name, Operand(tmp));
-
- // Check if the entry name is not a unique name.
- __ Ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ Lbu(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
- __ bind(&good);
-
- // Restore the properties.
- __ Ld(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- }
-
- const int spill_mask =
- (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
- a2.bit() | a1.bit() | a0.bit() | v0.bit());
-
- __ MultiPush(spill_mask);
- __ Ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- __ li(a1, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ mov(at, v0);
- __ MultiPop(spill_mask);
-
- __ Branch(done, eq, at, Operand(zero_reg));
- __ Branch(miss, ne, at, Operand(zero_reg));
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: NameDictionary to probe
- // a1: key
- // dictionary: NameDictionary to probe.
- // index: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = v0;
- Register dictionary = a0;
- Register key = a1;
- Register index = a2;
- Register mask = a3;
- Register hash = a4;
- Register undefined = a5;
- Register entry_key = a6;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ Ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ SmiUntag(mask);
- __ Dsubu(mask, mask, Operand(1));
-
- __ Lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Daddu(index, hash, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- } else {
- __ mov(index, hash);
- }
- __ dsrl(index, index, Name::kHashShift);
- __ And(index, mask, index);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // index *= 3.
- __ Dlsa(index, index, index, 1);
-
- STATIC_ASSERT(kSmiTagSize == 1);
- __ Dlsa(index, dictionary, index, kPointerSizeLog2);
- __ Ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
-
- // Stop if found the property.
- __ Branch(&in_dictionary, eq, entry_key, Operand(key));
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ Ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ Lbu(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ Ret(USE_DELAY_SLOT);
- __ mov(result, zero_reg);
- }
-
- __ bind(&in_dictionary);
- __ Ret(USE_DELAY_SLOT);
- __ li(result, 1);
-
- __ bind(&not_in_dictionary);
- __ Ret(USE_DELAY_SLOT);
- __ mov(result, zero_reg);
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- // Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- 2 * Assembler::kInstrSize);
-
- if (Assembler::IsBeq(first_instruction)) {
- return INCREMENTAL;
- }
-
- DCHECK(Assembler::IsBne(first_instruction));
-
- if (Assembler::IsBeq(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(Assembler::IsBne(second_instruction));
-
- return STORE_BUFFER_ONLY;
-}
-
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
- 4 * Assembler::kInstrSize);
-}
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two branch+nop instructions are generated with labels so as to
- // get the offset fixed up correctly by the bind(Label*) call. We patch it
- // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
- // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
- // incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
- __ nop();
- __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
- __ nop();
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address = a0 == regs_.address() ? regs_.scratch0() : regs_.address();
- DCHECK(address != regs_.object());
- DCHECK(address != a0);
- __ Move(address, regs_.address());
- __ Move(a0, regs_.object());
- __ Move(a1, address);
- __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-#endif
-
- // Get the value from the slot.
- __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->push(ra);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(ra);
@@ -1235,7 +671,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
@@ -1462,7 +898,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ SmiTst(a4, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
@@ -1544,7 +980,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
@@ -1589,10 +1025,12 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
-static void CallApiFunctionAndReturn(
- MacroAssembler* masm, Register function_address,
- ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
- MemOperand return_value_operand, MemOperand* context_restore_operand) {
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int32_t stack_space_offset,
+ MemOperand return_value_operand) {
Isolate* isolate = masm->isolate();
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate);
@@ -1676,17 +1114,13 @@ static void CallApiFunctionAndReturn(
// Leave the API exit frame.
__ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ Ld(cp, *context_restore_operand);
- }
if (stack_space_offset != kInvalidStackOffset) {
- DCHECK(kCArgsSlotsSize == 0);
+ DCHECK_EQ(kCArgsSlotsSize, 0);
__ Ld(s0, MemOperand(sp, stack_space_offset));
} else {
__ li(s0, Operand(stack_space));
}
- __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
+ __ LeaveExitFrame(false, s0, NO_EMIT_RETURN,
stack_space_offset != kInvalidStackOffset);
// Check if the function scheduled an exception.
@@ -1716,7 +1150,6 @@ static void CallApiFunctionAndReturn(
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- a0 : callee
// -- a4 : call_data
// -- a2 : holder
// -- a1 : api_function_address
@@ -1726,21 +1159,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1) * 8] : first argument
// -- sp[argc * 8] : receiver
- // -- sp[(argc + 1) * 8] : accessor_holder
// -----------------------------------
- Register callee = a0;
Register call_data = a4;
Register holder = a2;
Register api_function_address = a1;
- Register context = cp;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1750,8 +1178,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
- // Save context, callee and call data.
- __ Push(context, callee, call_data);
+ // call data.
+ __ Push(call_data);
Register scratch = call_data;
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
@@ -1761,38 +1189,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Push isolate and holder.
__ Push(scratch, holder);
- // Enter a new context
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- sp[0] : holder
- // -- ...
- // -- sp[(FCA::kArgsLength - 1) * 8] : new_target
- // -- sp[FCA::kArgsLength * 8] : last argument
- // -- ...
- // -- sp[(FCA::kArgsLength + argc - 1) * 8] : first argument
- // -- sp[(FCA::kArgsLength + argc) * 8] : receiver
- // -- sp[(FCA::kArgsLength + argc + 1) * 8] : accessor_holder
- // -----------------------------------------------------------
-
- // Load context from accessor_holder
- Register accessor_holder = context;
- Register scratch2 = callee;
- __ Ld(accessor_holder,
- MemOperand(sp, (FCA::kArgsLength + 1 + argc()) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ Ld(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
- __ Lbu(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(scratch2, scratch2, Operand(1 << Map::kIsConstructor));
- __ Branch(&skip_looking_for_constructor, ne, scratch2, Operand(zero_reg));
- __ GetMapConstructor(context, scratch, scratch, scratch2);
- __ bind(&skip_looking_for_constructor);
- __ Ld(context, FieldMemOperand(context, JSFunction::kContextOffset));
- } else {
- // Load context from callee.
- __ Ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
-
// Prepare arguments.
__ mov(scratch, sp);
@@ -1823,22 +1219,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument.
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 2;
+ const int stack_space = argc() + FCA::kArgsLength + 1;
// TODO(adamk): Why are we clobbering this immediately?
const int32_t stack_space_offset = kInvalidStackOffset;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_offset, return_value_operand,
- &context_restore_operand);
+ stack_space_offset, return_value_operand);
}
@@ -1876,7 +1264,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
__ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
// should_throw_on_error -> false
- DCHECK(Smi::kZero == nullptr);
+ DCHECK_NULL(Smi::kZero);
__ Sd(zero_reg,
MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
__ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
@@ -1911,7 +1299,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kInvalidStackOffset,
- return_value_operand, NULL);
+ return_value_operand);
}
#undef __
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index ca82b96c25..0513611664 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -8,274 +8,23 @@
namespace v8 {
namespace internal {
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one-byte strings and returns result in v0.
- static void GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4);
-
- // Compares two flat one-byte strings for equality and returns result in v0.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- static void GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Register scratch3,
- Label* chars_not_equal);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class StoreRegistersStateStub: public PlatformCodeStub {
- public:
- explicit StoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
-};
-
-
-class RestoreRegistersStateStub: public PlatformCodeStub {
- public:
- explicit RestoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate,
- Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- const unsigned offset = masm->instr_at(pos) & kImm16Mask;
- masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
- (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
- DCHECK(Assembler::IsBne(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- const unsigned offset = masm->instr_at(pos) & kImm16Mask;
- masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
- (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
- DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
- }
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0),
- scratch1_(no_reg) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- masm->MultiPushFPU(kCallerSavedFPU);
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- masm->MultiPopFPU(kCallerSavedFPU);
- }
- masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits: public BitField<int, 0, 5> {};
- class ValueBits: public BitField<int, 5, 5> {};
- class AddressBits: public BitField<int, 10, 5> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
-
- Label slow_;
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub: public PlatformCodeStub {
+// moved by GC.
+class DirectCEntryStub : public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() override { return true; }
+ Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
-
-class NameDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class LookupModeBits: public BitField<LookupMode, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 814c46a326..970e0efe56 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips64/codegen-mips64.h"
-
#if V8_TARGET_ARCH_MIPS64
#include <memory>
@@ -15,27 +13,26 @@
namespace v8 {
namespace internal {
-
#define __ masm.
-
#if defined(V8_HOST_ARCH_MIPS)
+
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
- // This code assumes that cache lines are 32 bytes and if the cache line is
- // larger it will not work correctly.
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
+ // This code assumes that cache lines are 32 bytes and if the cache line is
+ // larger it will not work correctly.
{
Label lastb, unaligned, aligned, chkw,
loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
@@ -548,8 +545,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolte, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -559,12 +557,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
@@ -576,116 +574,15 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ Ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ And(at, result, Operand(kIsIndirectStringMask));
- __ Branch(&check_sequential, eq, at, Operand(zero_reg));
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string, thin_string;
- __ And(at, result, Operand(kStringRepresentationMask));
- __ Branch(&cons_string, eq, at, Operand(kConsStringTag));
- __ Branch(&thin_string, eq, at, Operand(kThinStringTag));
-
- // Handle slices.
- __ Ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ Ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ dsra32(at, result, 0);
- __ Daddu(index, index, at);
- __ jmp(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ Ld(string, FieldMemOperand(string, ThinString::kActualOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ Ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(at, Heap::kempty_stringRootIndex);
- __ Branch(call_runtime, ne, result, Operand(at));
- // Get the first of the two strings and load its instance type.
- __ Ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
- __ jmp(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(at, result, Operand(kStringRepresentationMask));
- __ Branch(&external_string, ne, at, Operand(zero_reg));
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Daddu(string,
- string,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ jmp(&check_encoding);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ And(at, result, Operand(kIsIndirectStringMask));
- __ Assert(eq, kExternalStringExpectedButNotFound,
- at, Operand(zero_reg));
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(at, result, Operand(kShortExternalStringMask));
- __ Branch(call_runtime, ne, at, Operand(zero_reg));
- __ Ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label one_byte, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(at, result, Operand(kStringEncodingMask));
- __ Branch(&one_byte, ne, at, Operand(zero_reg));
- // Two-byte string.
- __ Dlsa(at, string, index, 1);
- __ Lhu(result, MemOperand(at));
- __ jmp(&done);
- __ bind(&one_byte);
- // One_byte string.
- __ Daddu(at, string, index);
- __ Lbu(result, MemOperand(at));
- __ bind(&done);
-}
-
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/codegen-mips64.h b/deps/v8/src/mips64/codegen-mips64.h
deleted file mode 100644
index 48853de659..0000000000
--- a/deps/v8/src/mips64/codegen-mips64.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_H_
-#define V8_MIPS_CODEGEN_MIPS_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips64/constants-mips64.cc b/deps/v8/src/mips64/constants-mips64.cc
index 9b497a7d9b..c087753aee 100644
--- a/deps/v8/src/mips64/constants-mips64.cc
+++ b/deps/v8/src/mips64/constants-mips64.cc
@@ -36,13 +36,11 @@ const char* Registers::names_[kNumSimuRegisters] = {
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
- {0, "zero"},
- {23, "cp"},
- {30, "s8"},
- {30, "s8_fp"},
- {kInvalidRegister, NULL}
-};
-
+ {0, "zero"},
+ {23, "cp"},
+ {30, "s8"},
+ {30, "s8_fp"},
+ {kInvalidRegister, nullptr}};
const char* Registers::Name(int reg) {
const char* result;
@@ -86,9 +84,7 @@ const char* FPURegisters::names_[kNumFPURegisters] = {
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
- {kInvalidRegister, NULL}
-};
-
+ {kInvalidRegister, nullptr}};
const char* FPURegisters::Name(int creg) {
const char* result;
@@ -128,7 +124,7 @@ const char* MSARegisters::names_[kNumMSARegisters] = {
"w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "w31"};
const MSARegisters::RegisterAlias MSARegisters::aliases_[] = {
- {kInvalidRegister, NULL}};
+ {kInvalidRegister, nullptr}};
const char* MSARegisters::Name(int creg) {
const char* result;
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 139f7514d8..0c107d1e1b 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -989,7 +989,7 @@ enum SecondaryField : uint32_t {
BIT_DF_w = ((2U << 5) << 16),
BIT_DF_d = ((0U << 6) << 16),
- NULLSF = 0U
+ nullptrSF = 0U
};
enum MSAMinorOpcode : uint32_t {
@@ -1452,22 +1452,22 @@ class InstructionGetters : public T {
}
inline int RdValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kRdShift + kRdBits - 1, kRdShift);
}
inline int BaseValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kBaseShift + kBaseBits - 1, kBaseShift);
}
inline int SaValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kSaShift + kSaBits - 1, kSaShift);
}
inline int LsaSaValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift);
}
@@ -1506,12 +1506,12 @@ class InstructionGetters : public T {
}
inline int Bp2Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
}
inline int Bp3Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->Bits(kBp3Shift + kBp3Bits - 1, kBp3Shift);
}
@@ -1553,7 +1553,7 @@ class InstructionGetters : public T {
}
inline int RdFieldRaw() const {
- DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType);
return this->InstructionBits() & kRdFieldMask;
}
@@ -1577,37 +1577,37 @@ class InstructionGetters : public T {
case REGIMM:
return RtValue();
default:
- return NULLSF;
+ return nullptrSF;
}
}
inline int32_t ImmValue(int bits) const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(bits - 1, 0);
}
inline int32_t Imm9Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm9Shift + kImm9Bits - 1, kImm9Shift);
}
inline int32_t Imm16Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm18Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
}
inline int32_t Imm19Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
}
inline int32_t Imm21Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
}
@@ -1618,27 +1618,27 @@ class InstructionGetters : public T {
}
inline int32_t MsaImm8Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kMsaImm8Shift + kMsaImm8Bits - 1, kMsaImm8Shift);
}
inline int32_t MsaImm5Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kMsaImm5Shift + kMsaImm5Bits - 1, kMsaImm5Shift);
}
inline int32_t MsaImm10Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kMsaImm10Shift + kMsaImm10Bits - 1, kMsaImm10Shift);
}
inline int32_t MsaImmMI10Value() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(kMsaImmMI10Shift + kMsaImmMI10Bits - 1, kMsaImmMI10Shift);
}
inline int32_t MsaBitDf() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
int32_t df_m = this->Bits(22, 16);
if (((df_m >> 6) & 1U) == 0) {
return 3;
@@ -1654,7 +1654,7 @@ class InstructionGetters : public T {
}
inline int32_t MsaBitMValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType);
return this->Bits(16 + this->MsaBitDf() + 3, 16);
}
@@ -1807,7 +1807,7 @@ InstructionBase::Type InstructionBase::InstructionType() const {
case LLD_R6:
case SC_R6:
case SCD_R6: {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
return kImmediateType;
}
case DBSHFL: {
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 642eabdfc8..506143fe73 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -104,7 +103,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((saved_regs & (1 << i)) != 0) {
@@ -280,7 +279,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
__ Push(at);
} else {
- DCHECK(kArchVariant != kMips64r6);
+ DCHECK_NE(kArchVariant, kMips64r6);
// Uncommon case, the branch cannot reach.
// Create mini trampoline to reach the end of the table
for (int i = 0, j = 0; i < count(); i++, j++) {
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index 2c35653e88..523e268532 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -689,7 +689,7 @@ void Decoder::PrintInstructionName(Instruction* instr) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'r');
+ DCHECK_EQ(format[0], 'r');
if (format[1] == 's') { // 'rs: Rs register.
int reg = instr->RsValue();
PrintRegister(reg);
@@ -710,7 +710,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// Handle all FPUregister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'f');
+ DCHECK_EQ(format[0], 'f');
if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) {
if (format[1] == 's') { // 'fs: fs register.
int reg = instr->FsValue();
@@ -754,7 +754,7 @@ int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
// Handle all MSARegister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatMSARegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'w');
+ DCHECK_EQ(format[0], 'w');
if (format[1] == 's') {
int reg = instr->WsValue();
PrintMSARegister(reg);
@@ -2107,7 +2107,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg >= rt_reg) {
Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- DCHECK(rt_reg > 0);
+ DCHECK_GT(rt_reg, 0);
if (rs_reg == 0) {
Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
} else {
@@ -2126,7 +2126,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg >= rt_reg) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- DCHECK(rt_reg > 0);
+ DCHECK_GT(rt_reg, 0);
if (rs_reg == 0) {
Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
} else {
diff --git a/deps/v8/src/mips64/frame-constants-mips64.cc b/deps/v8/src/mips64/frame-constants-mips64.cc
index 0d5348e526..5a19a0c364 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.cc
+++ b/deps/v8/src/mips64/frame-constants-mips64.cc
@@ -22,6 +22,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 679a10ad68..e55a0c57ed 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -56,9 +56,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
const Register StoreTransitionDescriptor::MapRegister() { return a5; }
-const Register StringCompareDescriptor::LeftRegister() { return a1; }
-const Register StringCompareDescriptor::RightRegister() { return a0; }
-
const Register ApiGetterDescriptor::HolderRegister() { return a0; }
const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
@@ -76,7 +73,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a2, a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
// static
@@ -85,13 +82,13 @@ const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
@@ -202,7 +199,7 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
@@ -216,7 +213,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -226,7 +223,7 @@ void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
// a1 -- function
// a2 -- allocation site with elements kind
Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -236,7 +233,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// a1 -- function
// a2 -- allocation site with elements kind
Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@@ -249,20 +246,20 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
@@ -279,10 +276,10 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- a0, // callee
- a4, // call_data
- a2, // holder
- a1, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ a4, // call_data
+ a2, // holder
+ a1, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -331,8 +328,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
v0, // the value to pass to the generator
- a1, // the JSGeneratorObject to resume
- a2 // the resume mode (tagged)
+ a1 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 06b2c262eb..25bc8baf80 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -10,7 +10,7 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
@@ -153,7 +153,7 @@ void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK(num_unsaved >= 0);
+ DCHECK_GE(num_unsaved, 0);
if (num_unsaved > 0) {
Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
}
@@ -176,15 +176,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
}
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch) {
- DCHECK(cc == eq || cc == ne);
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
-}
-
-
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@@ -231,7 +222,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -242,7 +233,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -336,13 +327,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
@@ -366,40 +351,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address, Register scratch,
- SaveFPRegsMode fp_mode) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- li(t8, Operand(store_buffer));
- Ld(scratch, MemOperand(t8));
- // Store pointer to buffer and increment buffer top.
- Sd(address, MemOperand(scratch));
- Daddu(scratch, scratch, kPointerSize);
- // Write back new top of buffer.
- Sd(scratch, MemOperand(t8));
- // Call stub on end of buffer.
- // Check for end of buffer.
- And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
- DCHECK(scratch != t8);
- Ret(ne, t8, Operand(zero_reg));
- push(ra);
- StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
- CallStub(&store_buffer_overflow);
- pop(ra);
- bind(&done);
- Ret();
-}
-
-
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -1139,7 +1090,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Lw(rd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
@@ -1161,7 +1112,7 @@ void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Lwu(rd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
Ulw(rd, rs);
Dext(rd, rd, 0, 32);
}
@@ -1174,7 +1125,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Sw(rd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
@@ -1190,7 +1141,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Lh(rd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
@@ -1224,7 +1175,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Lhu(rd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
@@ -1260,7 +1211,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
if (kArchVariant == kMips64r6) {
Sh(rd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
@@ -1287,7 +1238,7 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Ld(rd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
@@ -1323,7 +1274,7 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Sd(rd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
@@ -1347,7 +1298,7 @@ void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
if (kArchVariant == kMips64r6) {
Lwc1(fd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
Ulw(scratch, rs);
mtc1(scratch, fd);
}
@@ -1358,7 +1309,7 @@ void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
if (kArchVariant == kMips64r6) {
Swc1(fd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
mfc1(scratch, fd);
Usw(scratch, rs);
}
@@ -1370,7 +1321,7 @@ void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
if (kArchVariant == kMips64r6) {
Ldc1(fd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
Uld(scratch, rs);
dmtc1(scratch, fd);
}
@@ -1382,7 +1333,7 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
if (kArchVariant == kMips64r6) {
Sdc1(fd, rs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
dmfc1(scratch, fd);
Usd(scratch, rs);
}
@@ -1962,8 +1913,8 @@ void TurboAssembler::MultiPopFPU(RegList regs) {
void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(pos + size < 33);
+ DCHECK_LT(pos, 32);
+ DCHECK_LT(pos + size, 33);
ext_(rt, rs, pos, size);
}
@@ -1982,9 +1933,9 @@ void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos,
void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(pos + size <= 32);
- DCHECK(size != 0);
+ DCHECK_LT(pos, 32);
+ DCHECK_LE(pos + size, 32);
+ DCHECK_NE(size, 0);
ins_(rt, rs, pos, size);
}
@@ -2041,7 +1992,7 @@ void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
// r6 neg_s changes the sign for NaN-like operands as well.
neg_s(fd, fs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
@@ -2064,7 +2015,7 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
// r6 neg_d changes the sign for NaN-like operands as well.
neg_d(fd, fs);
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
@@ -2279,7 +2230,7 @@ void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF(&simple_convert, NULL, lt, fd, scratch);
+ BranchF(&simple_convert, nullptr, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -2313,7 +2264,7 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF32(&simple_convert, NULL, lt, fd, scratch);
+ BranchF32(&simple_convert, nullptr, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -2845,7 +2796,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
- BranchF(&done, NULL, eq, double_input, double_scratch);
+ BranchF(&done, nullptr, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
@@ -2924,7 +2875,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
- CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, result));
Daddu(sp, sp, Operand(kDoubleSize));
pop(ra);
@@ -2940,7 +2891,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
(cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
- DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
+ DCHECK_EQ(kArchVariant, kMips64r6 ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
@@ -3450,7 +3401,7 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
}
} else {
- DCHECK(offset == 0);
+ DCHECK_EQ(offset, 0);
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
return BranchShortHelperR6(0, L, cond, rs, rt);
} else {
@@ -3805,7 +3756,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
}
} else {
- DCHECK(offset == 0);
+ DCHECK_EQ(offset, 0);
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
} else {
@@ -4112,38 +4063,11 @@ void MacroAssembler::PopStackHandler() {
Sd(a1, MemOperand(scratch));
}
-
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
- Label* not_unique_name) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- And(scratch, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
- Branch(&succeed, eq, scratch, Operand(zero_reg));
- }
- Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
-
- bind(&succeed);
-}
-
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- li(value, Operand(cell));
- Ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
-}
-
void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
sub_d(dst, src, kDoubleRegZero);
}
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
@@ -4468,24 +4392,11 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// ---------------------------------------------------------------------------
// Support functions.
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp, Register temp2) {
- Label done, loop;
- ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done);
- GetObjectType(result, temp, temp2);
- Branch(&done, ne, temp2, Operand(MAP_TYPE));
- ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
- Branch(&loop);
- bind(&done);
-}
-
void MacroAssembler::GetObjectType(Register object,
Register map,
Register type_reg) {
Ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
- Lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
@@ -4811,7 +4722,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
Lw(scratch1, MemOperand(scratch2));
@@ -4823,7 +4734,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
Lw(scratch1, MemOperand(scratch2));
@@ -4856,7 +4767,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
+ if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@@ -4887,7 +4798,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
// generated instructions is 10, so we use this as a maximum value.
static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
- DCHECK(abort_instructions <= kExpectedAbortInstructions);
+ DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
nop();
}
@@ -5021,7 +4932,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Reserve place for the return address, stack space and an optional slot
// (used by the DirectCEntryStub to hold the return value if a struct is
// returned) and align the frame preparing for calling the runtime function.
- DCHECK(stack_space >= 0);
+ DCHECK_GE(stack_space, 0);
Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
@@ -5036,9 +4947,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
Sd(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context, bool do_return,
+ bool do_return,
bool argument_count_is_length) {
// Optionally restore all double registers.
if (save_doubles) {
@@ -5058,11 +4968,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
Sd(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- if (restore_context) {
- li(t8, Operand(ExternalReference(IsolateAddressId::kContextAddress,
- isolate())));
- Ld(cp, MemOperand(t8));
- }
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ Ld(cp, MemOperand(t8));
+
#ifdef DEBUG
li(t8,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
@@ -5576,101 +5485,6 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
}
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color;
- // Note that we are using two 4-byte aligned loads.
- LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- And(t8, t9, Operand(mask_scratch));
- Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
- // Shift left 1 by adding.
- Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
- And(t8, t9, Operand(mask_scratch));
- Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
-
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- // addr_reg is divided into fields:
- // |63 page base 20|19 high 8|7 shift 3|2 0|
- // 'high' gives the index of the cell holding color bits for the object.
- // 'shift' gives the offset in the cell for this object's color.
- And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
- Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
- li(t8, Operand(1));
- dsllv(mask_reg, t8, mask_reg);
-}
-
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Register load_scratch,
- Label* value_is_white) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- // Note that we are using a 4-byte aligned 8-byte load.
- if (emit_debug_code()) {
- LoadWordPair(load_scratch,
- MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- } else {
- Lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- }
- And(t8, mask_scratch, load_scratch);
- Branch(value_is_white, eq, t8, Operand(zero_reg));
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- Ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- Ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- Ld(dst,
- FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
- : AccessorPair::kSetterOffset;
- Ld(dst, FieldMemOperand(dst, offset));
-}
-
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
@@ -5719,48 +5533,6 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache)
- : address_(address),
- size_(instructions * Assembler::kInstrSize),
- masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
- flush_cache_(flush_cache) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- if (flush_cache_ == FLUSH) {
- Assembler::FlushICache(masm_.isolate(), address_, size_);
- }
- // Check that the code was patched as expected.
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void CodePatcher::Emit(Instr instr) {
- masm()->emit(instr);
-}
-
-
-void CodePatcher::Emit(Address addr) {
- // masm()->emit(reinterpret_cast<Instr>(addr));
-}
-
-
-void CodePatcher::ChangeBranchCondition(Instr current_instr,
- uint32_t new_opcode) {
- current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
- masm_.emit(current_instr);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 1f1bb4bdb0..a29c79635c 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -144,7 +144,7 @@ inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
// n64, and used in RegExp code, and other places
// with more than 8 arguments.
inline MemOperand CFunctionArgumentOperand(int index) {
- DCHECK(index > kCArgSlotCount);
+ DCHECK_GT(index, kCArgSlotCount);
// Argument 5 takes the slot just past the four Arg-slots.
int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
return MemOperand(sp, offset);
@@ -942,18 +942,6 @@ class MacroAssembler : public TurboAssembler {
bool IsNear(Label* L, Condition cond, int rs_reg);
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
@@ -985,51 +973,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// GC Support
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
- Register address);
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Register scratch3, Label* value_is_white);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -1108,7 +1051,7 @@ class MacroAssembler : public TurboAssembler {
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles, Register arg_count,
- bool restore_context, bool do_return = NO_EMIT_RETURN,
+ bool do_return = NO_EMIT_RETURN,
bool argument_count_is_length = false);
// Make sure the stack is aligned. Only emits code in debug mode.
@@ -1167,22 +1110,10 @@ class MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// Support functions.
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done, and |temp2| its instance type.
- void GetMapConstructor(Register result, Register map, Register temp,
- Register temp2);
-
void GetObjectType(Register function,
Register map,
Register type_reg);
- // Get value of the weak cell.
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the
- // given miss label is the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1256,7 +1187,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// The int portion is upper 32-bits of 64-bit word.
dsra(dst, src, kSmiShift - scale);
} else {
- DCHECK(scale >= kSmiTagSize);
+ DCHECK_GE(scale, kSmiTagSize);
sll(dst, src, scale - kSmiTagSize);
}
}
@@ -1303,15 +1234,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- // -------------------------------------------------------------------------
- // String utilities.
-
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template<typename Field>
void DecodeField(Register dst, Register src) {
Ext(dst, src, Field::kShift, Field::kSize);
@@ -1331,18 +1253,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object, Register scratch,
- Condition cond, // ne for new space, eq otherwise.
- Label* branch);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@@ -1351,43 +1261,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
friend class StandardFrame;
};
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- enum FlushICache {
- FLUSH,
- DONT_FLUSH
- };
-
- CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache = FLUSH);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit an address directly.
- void Emit(Address addr);
-
- // Change the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
- FlushICache flush_cache_; // Whether to flush the I cache after patching.
-};
-
template <typename Func>
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 65ed498e5a..e992efebf5 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -13,6 +13,7 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
+#include "src/macro-assembler.h"
#include "src/mips64/constants-mips64.h"
#include "src/mips64/simulator-mips64.h"
#include "src/ostreams.h"
@@ -163,7 +164,7 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
return false;
}
@@ -177,25 +178,25 @@ bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
- sim_->break_pc_ = NULL;
+ sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
return true;
}
void MipsDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void MipsDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
@@ -333,11 +334,11 @@ void MipsDebugger::Debug() {
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
- if (line == NULL) {
+ if (line == nullptr) {
break;
} else {
char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
line = last_input;
} else {
// Ownership is transferred to sim_;
@@ -434,8 +435,8 @@ void MipsDebugger::Debug() {
PrintF("printobject <value>\n");
}
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int64_t* cur = NULL;
- int64_t* end = NULL;
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@@ -488,8 +489,8 @@ void MipsDebugger::Debug() {
// Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte* cur = NULL;
- byte* end = NULL;
+ byte* cur = nullptr;
+ byte* end = nullptr;
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
@@ -546,7 +547,7 @@ void MipsDebugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
+ if (!DeleteBreakpoint(nullptr)) {
PrintF("deleting breakpoint failed\n");
}
} else if (strcmp(cmd, "flags") == 0) {
@@ -622,8 +623,8 @@ void MipsDebugger::Debug() {
// Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte* cur = NULL;
- byte* end = NULL;
+ byte* cur = nullptr;
+ byte* end = nullptr;
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
@@ -716,8 +717,8 @@ void MipsDebugger::Debug() {
static bool ICacheMatch(void* one, void* two) {
- DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
}
@@ -763,7 +764,7 @@ void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
CachePage* new_page = new CachePage();
entry->value = new_page;
}
@@ -774,10 +775,10 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, size_t size) {
- DCHECK(size <= CachePage::kPageSize);
+ DCHECK_LE(size, CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
- DCHECK((start & CachePage::kLineMask) == 0);
- DCHECK((size & CachePage::kLineMask) == 0);
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@@ -818,7 +819,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
+ if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
@@ -830,7 +831,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
pc_modified_ = false;
icount_ = 0;
break_count_ = 0;
- break_pc_ = NULL;
+ break_pc_ = nullptr;
break_instr_ = 0;
// Set up architecture state.
@@ -859,7 +860,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
- last_debugger_input_ = NULL;
+ last_debugger_input_ = nullptr;
}
@@ -880,7 +881,7 @@ class Redirection {
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
type_(type),
- next_(NULL) {
+ next_(nullptr) {
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -899,8 +900,11 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
+ return current;
+ }
}
return new Redirection(isolate, external_function, type);
}
@@ -962,11 +966,11 @@ void* Simulator::RedirectExternalReference(Isolate* isolate,
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- DCHECK(isolate_data != NULL);
- DCHECK(isolate_data != NULL);
+ DCHECK_NOT_NULL(isolate_data);
+ DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
+ if (sim == nullptr) {
// TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator(isolate);
isolate_data->set_simulator(sim);
@@ -1971,7 +1975,7 @@ void Simulator::TraceMemWr(int64_t addr, T value) {
// on all the ReadXX functions, I don't think re-interpret cast does it.
int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
if (addr >=0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
+ // This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
" \n",
addr, reinterpret_cast<intptr_t>(instr));
@@ -1991,7 +1995,7 @@ int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
+ // This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
" \n",
addr, reinterpret_cast<intptr_t>(instr));
@@ -2011,7 +2015,7 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
+ // This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
" \n",
addr, reinterpret_cast<intptr_t>(instr));
@@ -2031,7 +2035,7 @@ void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
+ // This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
" \n",
addr, reinterpret_cast<intptr_t>(instr));
@@ -2051,7 +2055,7 @@ int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
- // This has to be a NULL-dereference, drop into debugger.
+ // This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
"\n",
addr, reinterpret_cast<intptr_t>(instr));
@@ -2520,8 +2524,8 @@ bool Simulator::IsStopInstruction(Instruction* instr) {
bool Simulator::IsEnabledStop(uint64_t code) {
- DCHECK(code <= kMaxStopCode);
- DCHECK(code > kMaxWatchpointCode);
+ DCHECK_LE(code, kMaxStopCode);
+ DCHECK_GT(code, kMaxWatchpointCode);
return !(watched_stops_[code].count & kStopDisabledBit);
}
@@ -2541,7 +2545,7 @@ void Simulator::DisableStop(uint64_t code) {
void Simulator::IncreaseStopCounter(uint64_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %" PRId64
" has overflowed.\n"
@@ -2725,7 +2729,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr_.FunctionFieldRaw()) {
case RINT: {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
float result, temp_result;
double temp;
float upper = std::ceil(fs);
@@ -2775,11 +2779,11 @@ void Simulator::DecodeTypeRegisterSRsType() {
fs, ft));
break;
case MADDF_S:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_S:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_S:
@@ -2913,7 +2917,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
(posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
(negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
- DCHECK(result != 0);
+ DCHECK_NE(result, 0);
fResult = bit_cast<float>(result);
SetFPUFloatResult(fd_reg(), fResult);
@@ -3022,46 +3026,46 @@ void Simulator::DecodeTypeRegisterSRsType() {
break;
}
case MINA:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(fd_reg(), FPUMaxA(ft, fs));
break;
case MIN:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(fd_reg(), FPUMax(ft, fs));
break;
case SEL:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case SELEQZ_C:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(
fd_reg(),
(ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg()) : 0.0);
break;
case SELNEZ_C:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUFloatResult(
fd_reg(),
(ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg()) : 0.0);
break;
case MOVZ_C: {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
if (rt() == 0) {
SetFPUFloatResult(fd_reg(), fs);
}
break;
}
case MOVN_C: {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
if (rt() != 0) {
SetFPUFloatResult(fd_reg(), fs);
}
@@ -3102,7 +3106,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t fd_int = bit_cast<int64_t>(fd);
switch (instr_.FunctionFieldRaw()) {
case RINT: {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
double result, temp, temp_result;
double upper = std::ceil(fs);
double lower = std::floor(fs);
@@ -3139,26 +3143,26 @@ void Simulator::DecodeTypeRegisterDRsType() {
break;
}
case SEL:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case SELEQZ_C:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
break;
case SELNEZ_C:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
break;
case MOVZ_C: {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
if (rt() == 0) {
SetFPUDoubleResult(fd_reg(), fs);
}
break;
}
case MOVN_C: {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
if (rt() != 0) {
SetFPUDoubleResult(fd_reg(), fs);
}
@@ -3178,19 +3182,19 @@ void Simulator::DecodeTypeRegisterDRsType() {
break;
}
case MINA:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), FPUMaxA(ft, fs));
break;
case MIN:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), FPUMax(ft, fs));
break;
case ADD_D:
@@ -3206,11 +3210,11 @@ void Simulator::DecodeTypeRegisterDRsType() {
[](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
break;
case MADDF_D:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_D:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetFPUDoubleResult(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_D:
@@ -3444,7 +3448,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
(posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
(negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
- DCHECK(result != 0);
+ DCHECK_NE(result, 0);
dResult = bit_cast<double>(result);
SetFPUDoubleResult(fd_reg(), dResult);
@@ -3654,7 +3658,7 @@ void Simulator::DecodeTypeRegisterCOP1() {
break;
case CFC1:
// At the moment only FCSR is supported.
- DCHECK(fs_reg() == kFCSRRegister);
+ DCHECK_EQ(fs_reg(), kFCSRRegister);
SetResult(rt_reg(), FCSR_);
break;
case MFC1:
@@ -3670,12 +3674,12 @@ void Simulator::DecodeTypeRegisterCOP1() {
break;
case CTC1: {
// At the moment only FCSR is supported.
- DCHECK(fs_reg() == kFCSRRegister);
+ DCHECK_EQ(fs_reg(), kFCSRRegister);
uint32_t reg = static_cast<uint32_t>(rt());
if (kArchVariant == kMips64r6) {
FCSR_ = reg | kFCSRNaN2008FlagMask;
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
FCSR_ = reg & ~kFCSRNaN2008FlagMask;
}
TraceRegWr(FCSR_);
@@ -3715,7 +3719,7 @@ void Simulator::DecodeTypeRegisterCOP1() {
void Simulator::DecodeTypeRegisterCOP1X() {
switch (instr_.FunctionFieldRaw()) {
case MADD_S: {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
float fr, ft, fs;
fr = get_fpu_register_float(fr_reg());
fs = get_fpu_register_float(fs_reg());
@@ -3724,7 +3728,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
break;
}
case MSUB_S: {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
float fr, ft, fs;
fr = get_fpu_register_float(fr_reg());
fs = get_fpu_register_float(fs_reg());
@@ -3733,7 +3737,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
break;
}
case MADD_D: {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
double fr, ft, fs;
fr = get_fpu_register_double(fr_reg());
fs = get_fpu_register_double(fs_reg());
@@ -3742,7 +3746,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
break;
}
case MSUB_D: {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
double fr, ft, fs;
fr = get_fpu_register_double(fr_reg());
fs = get_fpu_register_double(fs_reg());
@@ -3764,11 +3768,11 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
switch (instr_.FunctionFieldRaw()) {
case SELEQZ_S:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetResult(rd_reg(), rt() == 0 ? rs() : 0);
break;
case SELNEZ_S:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetResult(rd_reg(), rt() != 0 ? rs() : 0);
break;
case JR: {
@@ -3904,7 +3908,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
SetResult(rd_reg(), rt() >> rs());
break;
case LSA: {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
int8_t sa = lsa_sa() + 1;
int32_t _rt = static_cast<int32_t>(rt());
int32_t _rs = static_cast<int32_t>(rs());
@@ -3914,29 +3918,29 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
break;
}
case DLSA:
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
SetResult(rd_reg(), (rs() << (lsa_sa() + 1)) + rt());
break;
case MFHI: // MFHI == CLZ on R6.
if (kArchVariant != kMips64r6) {
- DCHECK(sa() == 0);
+ DCHECK_EQ(sa(), 0);
alu_out = get_register(HI);
} else {
// MIPS spec: If no bits were set in GPR rs(), the result written to
// GPR rd() is 32.
- DCHECK(sa() == 1);
+ DCHECK_EQ(sa(), 1);
alu_out = base::bits::CountLeadingZeros32(static_cast<int32_t>(rs_u()));
}
SetResult(rd_reg(), alu_out);
break;
case MFLO: // MFLO == DCLZ on R6.
if (kArchVariant != kMips64r6) {
- DCHECK(sa() == 0);
+ DCHECK_EQ(sa(), 0);
alu_out = get_register(LO);
} else {
// MIPS spec: If no bits were set in GPR rs(), the result written to
// GPR rd() is 64.
- DCHECK(sa() == 1);
+ DCHECK_EQ(sa(), 1);
alu_out = base::bits::CountLeadingZeros64(static_cast<int64_t>(rs_u()));
}
SetResult(rd_reg(), alu_out);
@@ -4640,7 +4644,7 @@ int Simulator::DecodeMsaDataFormat() {
}
void Simulator::DecodeTypeMsaI8() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsaI8Mask;
int8_t i8 = instr_.MsaImm8Value();
@@ -4790,7 +4794,7 @@ T Simulator::MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5) {
}
void Simulator::DecodeTypeMsaI5() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask;
msa_reg_t ws, wd;
@@ -4826,7 +4830,7 @@ void Simulator::DecodeTypeMsaI5() {
}
void Simulator::DecodeTypeMsaI10() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask;
int64_t s10 = (static_cast<int64_t>(instr_.MsaImm10Value()) << 54) >> 54;
@@ -4863,25 +4867,28 @@ void Simulator::DecodeTypeMsaI10() {
}
void Simulator::DecodeTypeMsaELM() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsaLongerELMMask;
int32_t n = instr_.MsaElmNValue();
int64_t alu_out;
switch (opcode) {
case CTCMSA:
- DCHECK(sa() == kMSACSRRegister);
+ DCHECK_EQ(sa(), kMSACSRRegister);
MSACSR_ = bit_cast<uint32_t>(
static_cast<int32_t>(registers_[rd_reg()] & kMaxUInt32));
TraceRegWr(static_cast<int32_t>(MSACSR_));
break;
case CFCMSA:
- DCHECK(rd_reg() == kMSACSRRegister);
+ DCHECK_EQ(rd_reg(), kMSACSRRegister);
SetResult(sa(), static_cast<int64_t>(bit_cast<int32_t>(MSACSR_)));
break;
- case MOVE_V:
- UNIMPLEMENTED();
- break;
+ case MOVE_V: {
+ msa_reg_t ws;
+ get_msa_register(ws_reg(), &ws);
+ set_msa_register(wd_reg(), &ws);
+ TraceMSARegWr(&ws);
+ } break;
default:
opcode &= kMsaELMMask;
switch (opcode) {
@@ -4890,28 +4897,28 @@ void Simulator::DecodeTypeMsaELM() {
msa_reg_t ws;
switch (DecodeMsaDataFormat()) {
case MSA_BYTE:
- DCHECK(n < kMSALanesByte);
+ DCHECK_LT(n, kMSALanesByte);
get_msa_register(instr_.WsValue(), ws.b);
alu_out = static_cast<int32_t>(ws.b[n]);
SetResult(wd_reg(),
(opcode == COPY_U) ? alu_out & 0xFFu : alu_out);
break;
case MSA_HALF:
- DCHECK(n < kMSALanesHalf);
+ DCHECK_LT(n, kMSALanesHalf);
get_msa_register(instr_.WsValue(), ws.h);
alu_out = static_cast<int32_t>(ws.h[n]);
SetResult(wd_reg(),
(opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out);
break;
case MSA_WORD:
- DCHECK(n < kMSALanesWord);
+ DCHECK_LT(n, kMSALanesWord);
get_msa_register(instr_.WsValue(), ws.w);
alu_out = static_cast<int32_t>(ws.w[n]);
SetResult(wd_reg(),
(opcode == COPY_U) ? alu_out & 0xFFFFFFFFu : alu_out);
break;
case MSA_DWORD:
- DCHECK(n < kMSALanesDword);
+ DCHECK_LT(n, kMSALanesDword);
get_msa_register(instr_.WsValue(), ws.d);
alu_out = static_cast<int64_t>(ws.d[n]);
SetResult(wd_reg(), alu_out);
@@ -4924,7 +4931,7 @@ void Simulator::DecodeTypeMsaELM() {
msa_reg_t wd;
switch (DecodeMsaDataFormat()) {
case MSA_BYTE: {
- DCHECK(n < kMSALanesByte);
+ DCHECK_LT(n, kMSALanesByte);
int64_t rs = get_register(instr_.WsValue());
get_msa_register(instr_.WdValue(), wd.b);
wd.b[n] = rs & 0xFFu;
@@ -4933,7 +4940,7 @@ void Simulator::DecodeTypeMsaELM() {
break;
}
case MSA_HALF: {
- DCHECK(n < kMSALanesHalf);
+ DCHECK_LT(n, kMSALanesHalf);
int64_t rs = get_register(instr_.WsValue());
get_msa_register(instr_.WdValue(), wd.h);
wd.h[n] = rs & 0xFFFFu;
@@ -4942,7 +4949,7 @@ void Simulator::DecodeTypeMsaELM() {
break;
}
case MSA_WORD: {
- DCHECK(n < kMSALanesWord);
+ DCHECK_LT(n, kMSALanesWord);
int64_t rs = get_register(instr_.WsValue());
get_msa_register(instr_.WdValue(), wd.w);
wd.w[n] = rs & 0xFFFFFFFFu;
@@ -4951,7 +4958,7 @@ void Simulator::DecodeTypeMsaELM() {
break;
}
case MSA_DWORD: {
- DCHECK(n < kMSALanesDword);
+ DCHECK_LT(n, kMSALanesDword);
int64_t rs = get_register(instr_.WsValue());
get_msa_register(instr_.WdValue(), wd.d);
wd.d[n] = rs;
@@ -4963,7 +4970,50 @@ void Simulator::DecodeTypeMsaELM() {
UNREACHABLE();
}
} break;
- case SLDI:
+ case SLDI: {
+ uint8_t v[32];
+ msa_reg_t ws;
+ msa_reg_t wd;
+ get_msa_register(ws_reg(), &ws);
+ get_msa_register(wd_reg(), &wd);
+#define SLDI_DF(s, k) \
+ for (unsigned i = 0; i < s; i++) { \
+ v[i] = ws.b[s * k + i]; \
+ v[i + s] = wd.b[s * k + i]; \
+ } \
+ for (unsigned i = 0; i < s; i++) { \
+ wd.b[s * k + i] = v[i + n]; \
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ DCHECK(n < kMSALanesByte);
+ SLDI_DF(kMSARegSize / sizeof(int8_t) / kBitsPerByte, 0)
+ break;
+ case MSA_HALF:
+ DCHECK(n < kMSALanesHalf);
+ for (int k = 0; k < 2; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int16_t) / kBitsPerByte, k)
+ }
+ break;
+ case MSA_WORD:
+ DCHECK(n < kMSALanesWord);
+ for (int k = 0; k < 4; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int32_t) / kBitsPerByte, k)
+ }
+ break;
+ case MSA_DWORD:
+ DCHECK(n < kMSALanesDword);
+ for (int k = 0; k < 8; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int64_t) / kBitsPerByte, k)
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ set_msa_register(wd_reg(), &wd);
+ TraceMSARegWr(&wd);
+ } break;
+#undef SLDI_DF
case SPLATI:
case INSVE:
UNIMPLEMENTED();
@@ -5063,7 +5113,7 @@ T Simulator::MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m) {
}
void Simulator::DecodeTypeMsaBIT() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsaBITMask;
int32_t m = instr_.MsaBitMValue();
@@ -5100,10 +5150,11 @@ void Simulator::DecodeTypeMsaBIT() {
default:
UNREACHABLE();
}
+#undef MSA_BIT_DF
}
void Simulator::DecodeTypeMsaMI10() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsaMI10Mask;
int64_t s10 = (static_cast<int64_t>(instr_.MsaImmMI10Value()) << 54) >> 54;
@@ -5382,13 +5433,6 @@ T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) {
case DPSUB_U:
case SLD:
case SPLAT:
- case PCKEV:
- case PCKOD:
- case ILVL:
- case ILVR:
- case ILVEV:
- case ILVOD:
- case VSHF:
UNIMPLEMENTED();
break;
case SRAR: {
@@ -5400,108 +5444,616 @@ T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) {
int bit = wt_modulo == 0 ? 0 : (wsu >> (wt_modulo - 1)) & 1;
res = static_cast<T>((wsu >> wt_modulo) + bit);
} break;
+ default:
+ UNREACHABLE();
+ }
+ return res;
+}
+template <typename T_int, typename T_reg>
+void Msa3RInstrHelper_shuffle(const uint32_t opcode, T_reg ws, T_reg wt,
+ T_reg wd, const int i, const int num_of_lanes) {
+ T_int *ws_p, *wt_p, *wd_p;
+ ws_p = reinterpret_cast<T_int*>(ws);
+ wt_p = reinterpret_cast<T_int*>(wt);
+ wd_p = reinterpret_cast<T_int*>(wd);
+ switch (opcode) {
+ case PCKEV:
+ wd_p[i] = wt_p[2 * i];
+ wd_p[i + num_of_lanes / 2] = ws_p[2 * i];
+ break;
+ case PCKOD:
+ wd_p[i] = wt_p[2 * i + 1];
+ wd_p[i + num_of_lanes / 2] = ws_p[2 * i + 1];
+ break;
+ case ILVL:
+ wd_p[2 * i] = wt_p[i + num_of_lanes / 2];
+ wd_p[2 * i + 1] = ws_p[i + num_of_lanes / 2];
+ break;
+ case ILVR:
+ wd_p[2 * i] = wt_p[i];
+ wd_p[2 * i + 1] = ws_p[i];
+ break;
+ case ILVEV:
+ wd_p[2 * i] = wt_p[2 * i];
+ wd_p[2 * i + 1] = ws_p[2 * i];
+ break;
+ case ILVOD:
+ wd_p[2 * i] = wt_p[2 * i + 1];
+ wd_p[2 * i + 1] = ws_p[2 * i + 1];
+ break;
+ case VSHF: {
+ const int mask_not_valid = 0xc0;
+ const int mask_6_bits = 0x3f;
+ if ((wd_p[i] & mask_not_valid)) {
+ wd_p[i] = 0;
+ } else {
+ int k = (wd_p[i] & mask_6_bits) % (num_of_lanes * 2);
+ wd_p[i] = k >= num_of_lanes ? ws_p[k - num_of_lanes] : wt_p[k];
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <typename T_int, typename T_smaller_int, typename T_reg>
+void Msa3RInstrHelper_horizontal(const uint32_t opcode, T_reg ws, T_reg wt,
+ T_reg wd, const int i,
+ const int num_of_lanes) {
+ typedef typename std::make_unsigned<T_int>::type T_uint;
+ typedef typename std::make_unsigned<T_smaller_int>::type T_smaller_uint;
+ T_int* wd_p;
+ T_smaller_int *ws_p, *wt_p;
+ ws_p = reinterpret_cast<T_smaller_int*>(ws);
+ wt_p = reinterpret_cast<T_smaller_int*>(wt);
+ wd_p = reinterpret_cast<T_int*>(wd);
+ T_uint* wd_pu;
+ T_smaller_uint *ws_pu, *wt_pu;
+ ws_pu = reinterpret_cast<T_smaller_uint*>(ws);
+ wt_pu = reinterpret_cast<T_smaller_uint*>(wt);
+ wd_pu = reinterpret_cast<T_uint*>(wd);
+ switch (opcode) {
case HADD_S:
+ wd_p[i] =
+ static_cast<T_int>(ws_p[2 * i + 1]) + static_cast<T_int>(wt_p[2 * i]);
+ break;
case HADD_U:
+ wd_pu[i] = static_cast<T_uint>(ws_pu[2 * i + 1]) +
+ static_cast<T_uint>(wt_pu[2 * i]);
+ break;
case HSUB_S:
+ wd_p[i] =
+ static_cast<T_int>(ws_p[2 * i + 1]) - static_cast<T_int>(wt_p[2 * i]);
+ break;
case HSUB_U:
- UNIMPLEMENTED();
+ wd_pu[i] = static_cast<T_uint>(ws_pu[2 * i + 1]) -
+ static_cast<T_uint>(wt_pu[2 * i]);
break;
default:
UNREACHABLE();
}
- return res;
}
void Simulator::DecodeTypeMsa3R() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
msa_reg_t ws, wd, wt;
-
+ get_msa_register(ws_reg(), &ws);
+ get_msa_register(wt_reg(), &wt);
+ get_msa_register(wd_reg(), &wd);
+ switch (opcode) {
+ case HADD_S:
+ case HADD_U:
+ case HSUB_S:
+ case HSUB_U:
+#define HORIZONTAL_ARITHMETIC_DF(num_of_lanes, int_type, lesser_int_type) \
+ for (int i = 0; i < num_of_lanes; ++i) { \
+ Msa3RInstrHelper_horizontal<int_type, lesser_int_type>( \
+ opcode, &ws, &wt, &wd, i, num_of_lanes); \
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_HALF:
+ HORIZONTAL_ARITHMETIC_DF(kMSALanesHalf, int16_t, int8_t);
+ break;
+ case MSA_WORD:
+ HORIZONTAL_ARITHMETIC_DF(kMSALanesWord, int32_t, int16_t);
+ break;
+ case MSA_DWORD:
+ HORIZONTAL_ARITHMETIC_DF(kMSALanesDword, int64_t, int32_t);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef HORIZONTAL_ARITHMETIC_DF
+ case VSHF:
+#define VSHF_DF(num_of_lanes, int_type) \
+ for (int i = 0; i < num_of_lanes; ++i) { \
+ Msa3RInstrHelper_shuffle<int_type>(opcode, &ws, &wt, &wd, i, \
+ num_of_lanes); \
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ VSHF_DF(kMSALanesByte, int8_t);
+ break;
+ case MSA_HALF:
+ VSHF_DF(kMSALanesHalf, int16_t);
+ break;
+ case MSA_WORD:
+ VSHF_DF(kMSALanesWord, int32_t);
+ break;
+ case MSA_DWORD:
+ VSHF_DF(kMSALanesDword, int64_t);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef VSHF_DF
+ break;
+ case PCKEV:
+ case PCKOD:
+ case ILVL:
+ case ILVR:
+ case ILVEV:
+ case ILVOD:
+#define INTERLEAVE_PACK_DF(num_of_lanes, int_type) \
+ for (int i = 0; i < num_of_lanes / 2; ++i) { \
+ Msa3RInstrHelper_shuffle<int_type>(opcode, &ws, &wt, &wd, i, \
+ num_of_lanes); \
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ INTERLEAVE_PACK_DF(kMSALanesByte, int8_t);
+ break;
+ case MSA_HALF:
+ INTERLEAVE_PACK_DF(kMSALanesHalf, int16_t);
+ break;
+ case MSA_WORD:
+ INTERLEAVE_PACK_DF(kMSALanesWord, int32_t);
+ break;
+ case MSA_DWORD:
+ INTERLEAVE_PACK_DF(kMSALanesDword, int64_t);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef INTERLEAVE_PACK_DF
+ default:
#define MSA_3R_DF(elem, num_of_lanes) \
- get_msa_register(instr_.WdValue(), wd.elem); \
- get_msa_register(instr_.WsValue(), ws.elem); \
- get_msa_register(instr_.WtValue(), wt.elem); \
for (int i = 0; i < num_of_lanes; i++) { \
wd.elem[i] = Msa3RInstrHelper(opcode, wd.elem[i], ws.elem[i], wt.elem[i]); \
- } \
- set_msa_register(instr_.WdValue(), wd.elem); \
- TraceMSARegWr(wd.elem);
+ }
- switch (DecodeMsaDataFormat()) {
- case MSA_BYTE:
- MSA_3R_DF(b, kMSALanesByte);
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ MSA_3R_DF(b, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ MSA_3R_DF(h, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ MSA_3R_DF(w, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ MSA_3R_DF(d, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef MSA_3R_DF
break;
- case MSA_HALF:
- MSA_3R_DF(h, kMSALanesHalf);
+ }
+ set_msa_register(wd_reg(), &wd);
+ TraceMSARegWr(&wd);
+}
+
+template <typename T_int, typename T_fp, typename T_reg>
+void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+ const T_int all_ones = static_cast<T_int>(-1);
+ const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
+ const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
+ switch (opcode) {
+ case FCUN: {
+ if (std::isnan(s_element) || std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCEQ: {
+ if (s_element != t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FCUEQ: {
+ if (s_element == t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCLT: {
+ if (s_element >= t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FCULT: {
+ if (s_element < t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCLE: {
+ if (s_element > t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FCULE: {
+ if (s_element <= t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCOR: {
+ if (std::isnan(s_element) || std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FCUNE: {
+ if (s_element != t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = all_ones;
+ } else {
+ wd = 0;
+ }
+ } break;
+ case FCNE: {
+ if (s_element == t_element || std::isnan(s_element) ||
+ std::isnan(t_element)) {
+ wd = 0;
+ } else {
+ wd = all_ones;
+ }
+ } break;
+ case FADD:
+ wd = bit_cast<T_int>(s_element + t_element);
break;
- case MSA_WORD:
- MSA_3R_DF(w, kMSALanesWord);
+ case FSUB:
+ wd = bit_cast<T_int>(s_element - t_element);
break;
- case MSA_DWORD:
- MSA_3R_DF(d, kMSALanesDword);
+ case FMUL:
+ wd = bit_cast<T_int>(s_element * t_element);
+ break;
+ case FDIV: {
+ if (t_element == 0) {
+ wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ wd = bit_cast<T_int>(s_element / t_element);
+ }
+ } break;
+ case FMADD:
+ wd = bit_cast<T_int>(
+ std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ break;
+ case FMSUB:
+ wd = bit_cast<T_int>(
+ std::fma(-s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ break;
+ case FEXP2:
+ wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
+ break;
+ case FMIN:
+ wd = bit_cast<T_int>(std::min(s_element, t_element));
+ break;
+ case FMAX:
+ wd = bit_cast<T_int>(std::max(s_element, t_element));
+ break;
+ case FMIN_A: {
+ wd = bit_cast<T_int>(
+ std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element);
+ } break;
+ case FMAX_A: {
+ wd = bit_cast<T_int>(
+ std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element);
+ } break;
+ case FSOR:
+ case FSUNE:
+ case FSNE:
+ case FSAF:
+ case FSUN:
+ case FSEQ:
+ case FSUEQ:
+ case FSLT:
+ case FSULT:
+ case FSLE:
+ case FSULE:
+ UNIMPLEMENTED();
break;
default:
UNREACHABLE();
}
-#undef MSA_3R_DF
+}
+
+template <typename T_int, typename T_int_dbl, typename T_reg>
+void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+ // typedef typename std::make_unsigned<T_int>::type T_uint;
+ typedef typename std::make_unsigned<T_int_dbl>::type T_uint_dbl;
+ const T_int max_int = std::numeric_limits<T_int>::max();
+ const T_int min_int = std::numeric_limits<T_int>::min();
+ const int shift = kBitsPerByte * sizeof(T_int) - 1;
+ const T_int_dbl reg_s = ws;
+ const T_int_dbl reg_t = wt;
+ T_int_dbl product, result;
+ product = reg_s * reg_t;
+ switch (opcode) {
+ case MUL_Q: {
+ const T_int_dbl min_fix_dbl =
+ bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
+ const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
+ if (product == min_fix_dbl) {
+ product = max_fix_dbl;
+ }
+ wd = static_cast<T_int>(product >> shift);
+ } break;
+ case MADD_Q: {
+ result = (product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
+ wd = static_cast<T_int>(
+ result > max_int ? max_int : result < min_int ? min_int : result);
+ } break;
+ case MSUB_Q: {
+ result = (-product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
+ wd = static_cast<T_int>(
+ result > max_int ? max_int : result < min_int ? min_int : result);
+ } break;
+ case MULR_Q: {
+ const T_int_dbl min_fix_dbl =
+ bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
+ const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
+ if (product == min_fix_dbl) {
+ wd = static_cast<T_int>(max_fix_dbl >> shift);
+ break;
+ }
+ wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
+ } break;
+ case MADDR_Q: {
+ result = (product + (static_cast<T_int_dbl>(wd) << shift) +
+ (1 << (shift - 1))) >>
+ shift;
+ wd = static_cast<T_int>(
+ result > max_int ? max_int : result < min_int ? min_int : result);
+ } break;
+ case MSUBR_Q: {
+ result = (-product + (static_cast<T_int_dbl>(wd) << shift) +
+ (1 << (shift - 1))) >>
+ shift;
+ wd = static_cast<T_int>(
+ result > max_int ? max_int : result < min_int ? min_int : result);
+ } break;
+ default:
+ UNREACHABLE();
+ }
}
void Simulator::DecodeTypeMsa3RF() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
+ msa_reg_t wd, ws, wt;
+ if (opcode != FCAF) {
+ get_msa_register(ws_reg(), &ws);
+ get_msa_register(wt_reg(), &wt);
+ }
switch (opcode) {
case FCAF:
- case FCUN:
- case FCEQ:
- case FCUEQ:
- case FCLT:
- case FCULT:
- case FCLE:
- case FCULE:
- case FSAF:
- case FSUN:
- case FSEQ:
- case FSUEQ:
- case FSLT:
- case FSULT:
- case FSLE:
- case FSULE:
- case FADD:
- case FSUB:
- case FMUL:
- case FDIV:
- case FMADD:
- case FMSUB:
- case FEXP2:
+ wd.d[0] = 0;
+ wd.d[1] = 0;
+ break;
case FEXDO:
+#define PACK_FLOAT16(sign, exp, frac) \
+ static_cast<uint16_t>(((sign) << 15) + ((exp) << 10) + (frac))
+#define FEXDO_DF(source, dst) \
+ do { \
+ element = source; \
+ aSign = element >> 31; \
+ aExp = element >> 23 & 0xFF; \
+ aFrac = element & 0x007FFFFF; \
+ if (aExp == 0xFF) { \
+ if (aFrac) { \
+ /* Input is a NaN */ \
+ dst = 0x7DFFU; \
+ break; \
+ } \
+ /* Infinity */ \
+ dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ break; \
+ } else if (aExp == 0 && aFrac == 0) { \
+ dst = PACK_FLOAT16(aSign, 0, 0); \
+ break; \
+ } else { \
+ int maxexp = 29; \
+ uint32_t mask; \
+ uint32_t increment; \
+ bool rounding_bumps_exp; \
+ aFrac |= 0x00800000; \
+ aExp -= 0x71; \
+ if (aExp < 1) { \
+ /* Will be denormal in halfprec */ \
+ mask = 0x00ffffff; \
+ if (aExp >= -11) { \
+ mask >>= 11 + aExp; \
+ } \
+ } else { \
+ /* Normal number in halfprec */ \
+ mask = 0x00001fff; \
+ } \
+ switch (MSACSR_ & 3) { \
+ case kRoundToNearest: \
+ increment = (mask + 1) >> 1; \
+ if ((aFrac & mask) == increment) { \
+ increment = aFrac & (increment << 1); \
+ } \
+ break; \
+ case kRoundToPlusInf: \
+ increment = aSign ? 0 : mask; \
+ break; \
+ case kRoundToMinusInf: \
+ increment = aSign ? mask : 0; \
+ break; \
+ case kRoundToZero: \
+ increment = 0; \
+ break; \
+ } \
+ rounding_bumps_exp = (aFrac + increment >= 0x01000000); \
+ if (aExp > maxexp || (aExp == maxexp && rounding_bumps_exp)) { \
+ dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ break; \
+ } \
+ aFrac += increment; \
+ if (rounding_bumps_exp) { \
+ aFrac >>= 1; \
+ aExp++; \
+ } \
+ if (aExp < -10) { \
+ dst = PACK_FLOAT16(aSign, 0, 0); \
+ break; \
+ } \
+ if (aExp < 0) { \
+ aFrac >>= -aExp; \
+ aExp = 0; \
+ } \
+ dst = PACK_FLOAT16(aSign, aExp, aFrac >> 13); \
+ } \
+ } while (0);
+ switch (DecodeMsaDataFormat()) {
+ case MSA_HALF:
+ for (int i = 0; i < kMSALanesWord; i++) {
+ uint_fast32_t element;
+ uint_fast32_t aSign, aFrac;
+ int_fast32_t aExp;
+ FEXDO_DF(ws.uw[i], wd.uh[i + kMSALanesHalf / 2])
+ FEXDO_DF(wt.uw[i], wd.uh[i])
+ }
+ break;
+ case MSA_WORD:
+ for (int i = 0; i < kMSALanesDword; i++) {
+ wd.w[i + kMSALanesWord / 2] = bit_cast<int32_t>(
+ static_cast<float>(bit_cast<double>(ws.d[i])));
+ wd.w[i] = bit_cast<int32_t>(
+ static_cast<float>(bit_cast<double>(wt.d[i])));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef PACK_FLOAT16
+#undef FEXDO_DF
case FTQ:
- case FMIN:
- case FMIN_A:
- case FMAX:
- case FMAX_A:
- case FCOR:
- case FCUNE:
- case FCNE:
- case MUL_Q:
+#define FTQ_DF(source, dst, fp_type, int_type) \
+ element = bit_cast<fp_type>(source) * \
+ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
+ if (element > std::numeric_limits<int_type>::max()) { \
+ dst = std::numeric_limits<int_type>::max(); \
+ } else if (element < std::numeric_limits<int_type>::min()) { \
+ dst = std::numeric_limits<int_type>::min(); \
+ } else if (std::isnan(element)) { \
+ dst = 0; \
+ } else { \
+ int_type fixed_point; \
+ round_according_to_msacsr(element, element, fixed_point); \
+ dst = fixed_point; \
+ }
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_HALF:
+ for (int i = 0; i < kMSALanesWord; i++) {
+ float element;
+ FTQ_DF(ws.w[i], wd.h[i + kMSALanesHalf / 2], float, int16_t)
+ FTQ_DF(wt.w[i], wd.h[i], float, int16_t)
+ }
+ break;
+ case MSA_WORD:
+ double element;
+ for (int i = 0; i < kMSALanesDword; i++) {
+ FTQ_DF(ws.d[i], wd.w[i + kMSALanesWord / 2], double, int32_t)
+ FTQ_DF(wt.d[i], wd.w[i], double, int32_t)
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef FTQ_DF
+#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, wd); \
+ }
+#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, wd); \
+ }
case MADD_Q:
case MSUB_Q:
- case FSOR:
- case FSUNE:
- case FSNE:
- case MULR_Q:
case MADDR_Q:
case MSUBR_Q:
- UNIMPLEMENTED();
+ get_msa_register(wd_reg(), &wd); // fall-through
+ case MUL_Q:
+ case MULR_Q:
+ switch (DecodeMsaDataFormat()) {
+ case MSA_HALF:
+ MSA_3RF_DF2(int16_t, int32_t, kMSALanesHalf, ws.h[i], wt.h[i],
+ wd.h[i])
+ break;
+ case MSA_WORD:
+ MSA_3RF_DF2(int32_t, int64_t, kMSALanesWord, ws.w[i], wt.w[i],
+ wd.w[i])
+ break;
+ default:
+ UNREACHABLE();
+ }
break;
default:
- UNREACHABLE();
+ if (opcode == FMADD || opcode == FMSUB) {
+ get_msa_register(wd_reg(), &wd);
+ }
+ switch (DecodeMsaDataFormat()) {
+ case MSA_WORD:
+ MSA_3RF_DF(int32_t, float, kMSALanesWord, ws.w[i], wt.w[i], wd.w[i])
+ break;
+ case MSA_DWORD:
+ MSA_3RF_DF(int64_t, double, kMSALanesDword, ws.d[i], wt.d[i], wd.d[i])
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+#undef MSA_3RF_DF
+#undef MSA_3RF_DF2
}
+ set_msa_register(wd_reg(), &wd);
+ TraceMSARegWr(&wd);
}
void Simulator::DecodeTypeMsaVec() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsaVECMask;
msa_reg_t wd, ws, wt;
@@ -5544,7 +6096,7 @@ void Simulator::DecodeTypeMsaVec() {
}
void Simulator::DecodeTypeMsa2R() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsa2RMask;
msa_reg_t wd, ws;
@@ -5594,7 +6146,7 @@ void Simulator::DecodeTypeMsa2R() {
get_msa_register(instr_.WsValue(), ws.elem); \
for (int i = 0; i < num_of_lanes; i++) { \
uint64_t u64elem = static_cast<uint64_t>(ws.elem[i]); \
- wd.elem[i] = base::bits::CountPopulation64(u64elem); \
+ wd.elem[i] = base::bits::CountPopulation(u64elem); \
} \
set_msa_register(instr_.WdValue(), wd.elem); \
TraceMSARegWr(wd.elem)
@@ -5766,8 +6318,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
dst = 0;
- } else if (element > max_int || element < min_int) {
- dst = element > max_int ? max_int : min_int;
+ } else if (element >= max_int || element <= min_int) {
+ dst = element >= max_int ? max_int : min_int;
} else {
dst = static_cast<T_int>(std::trunc(element));
}
@@ -5778,8 +6330,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
dst = 0;
- } else if (element > max_int || element < 0) {
- dst = element > max_int ? max_int : 0;
+ } else if (element >= max_int || element <= 0) {
+ dst = element >= max_int ? max_int : 0;
} else {
dst = static_cast<T_uint>(std::trunc(element));
}
@@ -5888,8 +6440,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
return 0;
}
-template <typename T_int, typename T_fp, typename T_reg, typename T_i>
-T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, T_i i) {
+template <typename T_int, typename T_fp, typename T_reg>
+T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
switch (opcode) {
#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15)
#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1f)
@@ -5965,7 +6517,7 @@ T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, T_i i) {
}
void Simulator::DecodeTypeMsa2RF() {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask;
msa_reg_t wd, ws;
@@ -6119,6 +6671,30 @@ void Simulator::DecodeTypeImmediate() {
}
};
+ auto BranchHelper_MSA = [this, &next_pc, imm16,
+ &execute_branch_delay_instruction](bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int64_t current_pc = get_pc();
+ const int32_t bitsIn16Int = sizeof(int16_t) * kBitsPerByte;
+ if (do_branch) {
+ if (FLAG_debug_code) {
+ int16_t bits = imm16 & 0xfc;
+ if (imm16 >= 0) {
+ CHECK_EQ(bits, 0);
+ } else {
+ CHECK_EQ(bits ^ 0xfc, 0);
+ }
+ }
+ // jump range :[pc + kInstrSize - 512 * kInstrSize,
+ // pc + kInstrSize + 511 * kInstrSize]
+ int16_t offset = static_cast<int16_t>(imm16 << (bitsIn16Int - 10)) >>
+ (bitsIn16Int - 12);
+ next_pc = current_pc + offset + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) {
int64_t current_pc = get_pc();
CheckForbiddenSlot(current_pc);
@@ -6160,18 +6736,66 @@ void Simulator::DecodeTypeImmediate() {
case BC1NEZ:
BranchHelper(get_fpu_register(ft_reg) & 0x1);
break;
- case BZ_V:
+ case BZ_V: {
+ msa_reg_t wt;
+ get_msa_register(wt_reg(), &wt);
+ BranchHelper_MSA(wt.d[0] == 0 && wt.d[1] == 0);
+ } break;
+#define BZ_DF(witdh, lanes) \
+ { \
+ msa_reg_t wt; \
+ get_msa_register(wt_reg(), &wt); \
+ int i; \
+ for (i = 0; i < lanes; ++i) { \
+ if (wt.witdh[i] == 0) { \
+ break; \
+ } \
+ } \
+ BranchHelper_MSA(i != lanes); \
+ }
case BZ_B:
+ BZ_DF(b, kMSALanesByte)
+ break;
case BZ_H:
+ BZ_DF(h, kMSALanesHalf)
+ break;
case BZ_W:
+ BZ_DF(w, kMSALanesWord)
+ break;
case BZ_D:
- case BNZ_V:
+ BZ_DF(d, kMSALanesDword)
+ break;
+#undef BZ_DF
+ case BNZ_V: {
+ msa_reg_t wt;
+ get_msa_register(wt_reg(), &wt);
+ BranchHelper_MSA(wt.d[0] != 0 || wt.d[1] != 0);
+ } break;
+#define BNZ_DF(witdh, lanes) \
+ { \
+ msa_reg_t wt; \
+ get_msa_register(wt_reg(), &wt); \
+ int i; \
+ for (i = 0; i < lanes; ++i) { \
+ if (wt.witdh[i] == 0) { \
+ break; \
+ } \
+ } \
+ BranchHelper_MSA(i == lanes); \
+ }
case BNZ_B:
+ BNZ_DF(b, kMSALanesByte)
+ break;
case BNZ_H:
+ BNZ_DF(h, kMSALanesHalf)
+ break;
case BNZ_W:
+ BNZ_DF(w, kMSALanesWord)
+ break;
case BNZ_D:
- UNIMPLEMENTED();
+ BNZ_DF(d, kMSALanesDword)
break;
+#undef BNZ_DF
default:
UNREACHABLE();
}
@@ -6376,7 +7000,7 @@ void Simulator::DecodeTypeImmediate() {
case LUI:
if (rs_reg != 0) {
// AUI instruction.
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
int32_t alu32_out = static_cast<int32_t>(rs + (se_imm16 << 16));
SetResult(rt_reg, static_cast<int64_t>(alu32_out));
} else {
@@ -6387,8 +7011,8 @@ void Simulator::DecodeTypeImmediate() {
}
break;
case DAUI:
- DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs_reg != 0);
+ DCHECK_EQ(kArchVariant, kMips64r6);
+ DCHECK_NE(rs_reg, 0);
SetResult(rt_reg, rs + (se_imm16 << 16));
break;
// ------------- Memory instructions.
@@ -6513,26 +7137,26 @@ void Simulator::DecodeTypeImmediate() {
}
case LL: {
// LL/SC sequence cannot be simulated properly
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr()));
break;
}
case SC: {
// LL/SC sequence cannot be simulated properly
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
WriteW(rs + se_imm16, static_cast<int32_t>(rt), instr_.instr());
set_register(rt_reg, 1);
break;
}
case LLD: {
// LL/SC sequence cannot be simulated properly
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
set_register(rt_reg, ReadD(rs + se_imm16, instr_.instr()));
break;
}
case SCD: {
// LL/SC sequence cannot be simulated properly
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kArchVariant, kMips64r2);
WriteD(rs + se_imm16, rt, instr_.instr());
set_register(rt_reg, 1);
break;
@@ -6624,7 +7248,7 @@ void Simulator::DecodeTypeImmediate() {
switch (instr_.FunctionFieldRaw()) {
case LL_R6: {
// LL/SC sequence cannot be simulated properly
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
int64_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
set_register(rt_reg, ReadW(base + offset9, instr_.instr()));
@@ -6632,7 +7256,7 @@ void Simulator::DecodeTypeImmediate() {
}
case LLD_R6: {
// LL/SC sequence cannot be simulated properly
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
int64_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
set_register(rt_reg, ReadD(base + offset9, instr_.instr()));
@@ -6640,7 +7264,7 @@ void Simulator::DecodeTypeImmediate() {
}
case SC_R6: {
// LL/SC sequence cannot be simulated properly
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
int64_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
WriteW(base + offset9, static_cast<int32_t>(rt), instr_.instr());
@@ -6649,7 +7273,7 @@ void Simulator::DecodeTypeImmediate() {
}
case SCD_R6: {
// LL/SC sequence cannot be simulated properly
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK_EQ(kArchVariant, kMips64r6);
int64_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
WriteD(base + offset9, rt, instr_.instr());
@@ -6888,7 +7512,7 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
// Set up arguments.
// First four arguments passed in registers in both ABI's.
- DCHECK(argument_count >= 4);
+ DCHECK_GE(argument_count, 4);
set_register(a0, va_arg(parameters, int64_t));
set_register(a1, va_arg(parameters, int64_t));
set_register(a2, va_arg(parameters, int64_t));
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index f15659c7b3..ec6c39e288 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -111,15 +111,19 @@ class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(HeapObject* obj, int offset) {
- if (offset < kSize) return true;
+ if (offset < kSizeWithoutPrototype) return true;
+ if (offset < kSizeWithPrototype && obj->map()->has_prototype_slot()) {
+ return true;
+ }
return IsValidSlotImpl(obj, offset);
}
template <typename ObjectVisitor>
static inline void IterateBody(HeapObject* obj, int object_size,
ObjectVisitor* v) {
- IteratePointers(obj, kPropertiesOrHashOffset, kSize, v);
- IterateBodyImpl(obj, kSize, object_size, v);
+ int header_size = JSFunction::cast(obj)->GetHeaderSize();
+ IteratePointers(obj, kPropertiesOrHashOffset, header_size, v);
+ IterateBodyImpl(obj, header_size, object_size, v);
}
static inline int SizeOf(Map* map, HeapObject* object) {
@@ -351,9 +355,10 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
kSourcePositionTableOffset);
STATIC_ASSERT(kSourcePositionTableOffset + kPointerSize ==
- kTypeFeedbackInfoOffset);
- STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize ==
- kNextCodeLinkOffset);
+ kProtectedInstructionsOffset);
+ STATIC_ASSERT(kProtectedInstructionsOffset + kPointerSize ==
+ kCodeDataContainerOffset);
+ STATIC_ASSERT(kCodeDataContainerOffset + kPointerSize == kDataStart);
static bool IsValidSlot(HeapObject* obj, int offset) {
// Slots in code can't be invalid because we never trim code objects.
@@ -369,12 +374,8 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- IteratePointers(obj, kRelocationInfoOffset, kNextCodeLinkOffset, v);
- v->VisitNextCodeLink(Code::cast(obj),
- HeapObject::RawField(obj, kNextCodeLinkOffset));
-
// GC does not visit data/code in the header and in the body directly.
- STATIC_ASSERT(Code::kNextCodeLinkOffset + kPointerSize == kDataStart);
+ IteratePointers(obj, kRelocationInfoOffset, kDataStart, v);
RelocIterator it(Code::cast(obj), mode_mask);
Isolate* isolate = obj->GetIsolate();
@@ -455,6 +456,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
return ReturnType();
case PROPERTY_ARRAY_TYPE:
return Op::template apply<PropertyArray::BodyDescriptor>(p1, p2, p3);
+ case DESCRIPTOR_ARRAY_TYPE:
+ return Op::template apply<DescriptorArray::BodyDescriptor>(p1, p2, p3);
case TRANSITION_ARRAY_TYPE:
return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
case FEEDBACK_VECTOR_TYPE:
@@ -533,6 +536,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
return Op::template apply<
SmallOrderedHashTable<SmallOrderedHashMap>::BodyDescriptor>(p1, p2,
p3);
+ case CODE_DATA_CONTAINER_TYPE:
+ return Op::template apply<CodeDataContainer::BodyDescriptor>(p1, p2, p3);
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case FILLER_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index e403fe9b25..f1f49d5c45 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -13,7 +13,7 @@
#include "src/layout-descriptor.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
-#include "src/objects/bigint-inl.h"
+#include "src/objects/bigint.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module.h"
@@ -89,6 +89,9 @@ void HeapObject::HeapObjectVerify() {
case BYTECODE_ARRAY_TYPE:
BytecodeArray::cast(this)->BytecodeArrayVerify();
break;
+ case DESCRIPTOR_ARRAY_TYPE:
+ DescriptorArray::cast(this)->DescriptorArrayVerify();
+ break;
case TRANSITION_ARRAY_TYPE:
TransitionArray::cast(this)->TransitionArrayVerify();
break;
@@ -238,6 +241,9 @@ void HeapObject::HeapObjectVerify() {
case SMALL_ORDERED_HASH_MAP_TYPE:
SmallOrderedHashMap::cast(this)->SmallOrderedHashTableVerify();
break;
+ case CODE_DATA_CONTAINER_TYPE:
+ CodeDataContainer::cast(this)->CodeDataContainerVerify();
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
@@ -263,7 +269,7 @@ void HeapObject::VerifyHeapPointer(Object* p) {
void Symbol::SymbolVerify() {
CHECK(IsSymbol());
CHECK(HasHashCode());
- CHECK(Hash() > 0u);
+ CHECK_GT(Hash(), 0);
CHECK(name()->IsUndefined(GetIsolate()) || name()->IsString());
}
@@ -304,7 +310,7 @@ void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
CHECK(external_pointer() ==
ExternalReference::fixed_typed_array_base_data_offset().address());
} else {
- CHECK(base_pointer() == nullptr);
+ CHECK_NULL(base_pointer());
}
}
@@ -326,16 +332,15 @@ void JSObject::JSObjectVerify() {
int actual_unused_property_fields = map()->GetInObjectProperties() +
property_array()->length() -
map()->NextFreePropertyIndex();
- if (map()->unused_property_fields() != actual_unused_property_fields) {
+ if (map()->UnusedPropertyFields() != actual_unused_property_fields) {
// There are two reasons why this can happen:
// - in the middle of StoreTransitionStub when the new extended backing
// store is already set into the object and the allocation of the
// MutableHeapNumber triggers GC while the map isn't updated yet.
// - deletion of the last property can leave additional backing store
// capacity behind.
- CHECK_GT(actual_unused_property_fields, map()->unused_property_fields());
- int delta =
- actual_unused_property_fields - map()->unused_property_fields();
+ CHECK_GT(actual_unused_property_fields, map()->UnusedPropertyFields());
+ int delta = actual_unused_property_fields - map()->UnusedPropertyFields();
CHECK_EQ(0, delta % JSObject::kFieldsAdded);
}
DescriptorArray* descriptors = map()->instance_descriptors();
@@ -435,7 +440,7 @@ void Map::DictionaryMapVerify() {
CHECK(is_dictionary_map());
CHECK_EQ(kInvalidEnumCacheSentinel, EnumLength());
CHECK_EQ(GetHeap()->empty_descriptor_array(), instance_descriptors());
- CHECK_EQ(0, unused_property_fields());
+ CHECK_EQ(0, UnusedPropertyFields());
CHECK_EQ(Map::GetVisitorId(this), visitor_id());
}
@@ -449,13 +454,6 @@ void FixedArray::FixedArrayVerify() {
Object* e = get(i);
VerifyPointer(e);
}
- Heap* heap = GetHeap();
- if (this == heap->empty_descriptor_array()) {
- DescriptorArray* descriptors = DescriptorArray::cast(this);
- CHECK_EQ(2, length());
- CHECK_EQ(0, descriptors->number_of_descriptors());
- CHECK_EQ(heap->empty_enum_cache(), descriptors->GetEnumCache());
- }
}
void PropertyArray::PropertyArrayVerify() {
@@ -486,12 +484,22 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
}
+void DescriptorArray::DescriptorArrayVerify() {
+ FixedArrayVerify();
+ if (number_of_descriptors_storage() == 0) {
+ Heap* heap = GetHeap();
+ CHECK_EQ(heap->empty_descriptor_array(), this);
+ CHECK_EQ(2, length());
+ CHECK_EQ(0, number_of_descriptors());
+ CHECK_EQ(heap->empty_enum_cache(), GetEnumCache());
+ } else {
+ CHECK_LT(2, length());
+ CHECK_LE(LengthFor(number_of_descriptors()), length());
+ }
+}
void TransitionArray::TransitionArrayVerify() {
- for (int i = 0; i < length(); i++) {
- Object* e = get(i);
- VerifyPointer(e);
- }
+ FixedArrayVerify();
CHECK_LE(LengthFor(number_of_transitions()), length());
}
@@ -675,7 +683,7 @@ void ConsString::ConsStringVerify() {
CHECK(this->first()->IsString());
CHECK(this->second() == GetHeap()->empty_string() ||
this->second()->IsString());
- CHECK(this->length() >= ConsString::kMinLength);
+ CHECK_GE(this->length(), ConsString::kMinLength);
CHECK(this->length() == this->first()->length() + this->second()->length());
if (this->IsFlat()) {
// A flat cons can only be created by String::SlowFlatten.
@@ -693,7 +701,7 @@ void ThinString::ThinStringVerify() {
void SlicedString::SlicedStringVerify() {
CHECK(!this->parent()->IsConsString());
CHECK(!this->parent()->IsSlicedString());
- CHECK(this->length() >= SlicedString::kMinLength);
+ CHECK_GE(this->length(), SlicedString::kMinLength);
}
@@ -714,9 +722,11 @@ void JSBoundFunction::JSBoundFunctionVerify() {
void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
- VerifyObjectField(kPrototypeOrInitialMapOffset);
CHECK(code()->IsCode());
CHECK(map()->is_callable());
+ if (has_prototype_slot()) {
+ VerifyObjectField(kPrototypeOrInitialMapOffset);
+ }
}
@@ -739,23 +749,19 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
Isolate* isolate = GetIsolate();
CHECK(function_data()->IsUndefined(isolate) || IsApiFunction() ||
HasBytecodeArray() || HasAsmWasmData() ||
- HasLazyDeserializationBuiltinId());
+ HasLazyDeserializationBuiltinId() || HasPreParsedScopeData());
CHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId() ||
HasInferredName());
int expected_map_index = Context::FunctionMapIndex(
- language_mode(), kind(), has_shared_name(), needs_home_object());
+ language_mode(), kind(), true, has_shared_name(), needs_home_object());
CHECK_EQ(expected_map_index, function_map_index());
if (scope_info()->length() > 0) {
CHECK(kind() == scope_info()->function_kind());
CHECK_EQ(kind() == kModule, scope_info()->scope_type() == MODULE_SCOPE);
}
-
- CHECK(preparsed_scope_data()->IsNull(isolate) ||
- preparsed_scope_data()->IsPreParsedScopeData());
- VerifyObjectField(kPreParsedScopeDataOffset);
}
@@ -764,7 +770,6 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
JSObjectVerify();
VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
// Make sure that this object has no properties, elements.
- CHECK_EQ(GetHeap()->empty_fixed_array(), raw_properties_or_hash());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@@ -794,7 +799,7 @@ void Oddball::OddballVerify() {
// Hidden oddballs have negative smis.
const int kLeastHiddenOddballNumber = -7;
CHECK_LE(value, 1);
- CHECK(value >= kLeastHiddenOddballNumber);
+ CHECK_GE(value, kLeastHiddenOddballNumber);
}
if (map() == heap->undefined_map()) {
CHECK(this == heap->undefined_value());
@@ -840,12 +845,18 @@ void WeakCell::WeakCellVerify() {
VerifyObjectField(kValueOffset);
}
+void CodeDataContainer::CodeDataContainerVerify() {
+ CHECK(IsCodeDataContainer());
+ VerifyObjectField(kNextCodeLinkOffset);
+ CHECK(next_code_link()->IsCode() ||
+ next_code_link()->IsUndefined(GetIsolate()));
+}
void Code::CodeVerify() {
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
kCodeAlignment));
relocation_info()->ObjectVerify();
- Address last_gc_pc = NULL;
+ Address last_gc_pc = nullptr;
Isolate* isolate = GetIsolate();
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Verify(isolate);
@@ -855,8 +866,6 @@ void Code::CodeVerify() {
last_gc_pc = it.rinfo()->pc();
}
}
- CHECK(raw_type_feedback_info() == Smi::kZero ||
- raw_type_feedback_info()->IsSmi() == is_stub());
}
@@ -925,7 +934,7 @@ void JSArray::JSArrayVerify() {
CHECK(length()->ToArrayLength(&array_length));
}
if (array_length != 0) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements());
+ NumberDictionary* dict = NumberDictionary::cast(elements());
// The dictionary can never have more elements than the array length + 1.
// If the backing store grows the verification might be triggered with
// the old length in place.
@@ -941,7 +950,7 @@ void JSSet::JSSetVerify() {
CHECK(IsJSSet());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(GetIsolate()));
+ CHECK(table()->IsOrderedHashSet() || table()->IsUndefined(GetIsolate()));
// TODO(arv): Verify OrderedHashTable too.
}
@@ -950,7 +959,7 @@ void JSMap::JSMapVerify() {
CHECK(IsJSMap());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(GetIsolate()));
+ CHECK(table()->IsOrderedHashMap() || table()->IsUndefined(GetIsolate()));
// TODO(arv): Verify OrderedHashTable too.
}
@@ -959,7 +968,7 @@ void JSSetIterator::JSSetIteratorVerify() {
CHECK(IsJSSetIterator());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashTable());
+ CHECK(table()->IsOrderedHashSet());
CHECK(index()->IsSmi());
}
@@ -968,7 +977,7 @@ void JSMapIterator::JSMapIteratorVerify() {
CHECK(IsJSMapIterator());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashTable());
+ CHECK(table()->IsOrderedHashMap());
CHECK(index()->IsSmi());
}
@@ -1130,7 +1139,6 @@ void JSProxy::JSProxyVerify() {
Isolate* isolate = GetIsolate();
CHECK_EQ(target()->IsCallable(), map()->is_callable());
CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
- CHECK(hash()->IsSmi() || hash()->IsUndefined(isolate));
CHECK(map()->prototype()->IsNull(isolate));
// There should be no properties on a Proxy.
CHECK_EQ(0, map()->NumberOfOwnDescriptors());
@@ -1222,7 +1230,13 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestVerify() {
next()->ObjectVerify();
}
-void BigInt::BigIntVerify() { CHECK(IsBigInt()); }
+void BigInt::BigIntVerify() {
+ CHECK(IsBigInt());
+ CHECK_GE(length(), 0);
+ CHECK_IMPLIES(is_zero(), !sign()); // There is no -0n.
+ // TODO(neis): Somewhere check that MSD is non-zero. Doesn't hold during some
+ // operations that allocate which is why we can't test it here.
+}
void JSModuleNamespace::JSModuleNamespaceVerify() {
CHECK(IsJSModuleNamespace());
@@ -1255,6 +1269,7 @@ void Module::ModuleVerify() {
VerifyPointer(module_namespace());
VerifyPointer(requested_modules());
VerifyPointer(script());
+ VerifyPointer(import_meta());
VerifyPointer(exception());
VerifySmiField(kHashOffset);
VerifySmiField(kStatusOffset);
@@ -1275,6 +1290,8 @@ void Module::ModuleVerify() {
CHECK_EQ(requested_modules()->length(), info()->module_requests()->length());
+ CHECK(import_meta()->IsTheHole(GetIsolate()) || import_meta()->IsJSObject());
+
CHECK_NE(hash(), 0);
}
@@ -1411,8 +1428,10 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
Isolate* isolate = GetIsolate();
for (int i = 0; i < length(); i++) {
Object* e = FixedArray::get(i);
- if (e->IsMap()) {
- Map::cast(e)->DictionaryMapVerify();
+ if (e->IsWeakCell()) {
+ if (!WeakCell::cast(e)->cleared()) {
+ Map::cast(WeakCell::cast(e)->value())->DictionaryMapVerify();
+ }
} else {
CHECK(e->IsUndefined(isolate));
}
@@ -1452,7 +1471,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
if (HasFastProperties()) {
info->number_of_objects_with_fast_properties_++;
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
- info->number_of_fast_unused_fields_ += map()->unused_property_fields();
+ info->number_of_fast_unused_fields_ += map()->UnusedPropertyFields();
} else if (IsJSGlobalObject()) {
GlobalDictionary* dict = JSGlobalObject::cast(this)->global_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
@@ -1498,7 +1517,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
}
case DICTIONARY_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS: {
- SeededNumberDictionary* dict = element_dictionary();
+ NumberDictionary* dict = element_dictionary();
info->number_of_slow_used_elements_ += dict->NumberOfElements();
info->number_of_slow_unused_elements_ +=
dict->Capacity() - dict->NumberOfElements();
@@ -1552,7 +1571,7 @@ void JSObject::SpillInformation::Print() {
bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
if (valid_entries == -1) valid_entries = number_of_descriptors();
- Name* current_key = NULL;
+ Name* current_key = nullptr;
uint32_t current = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
Name* key = GetSortedKey(i);
@@ -1573,8 +1592,8 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
- DCHECK(valid_entries == -1);
- Name* prev_key = NULL;
+ DCHECK_EQ(valid_entries, -1);
+ Name* prev_key = nullptr;
PropertyKind prev_kind = kData;
PropertyAttributes prev_attributes = NONE;
uint32_t prev_hash = 0;
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index f29c4d8c49..77ad087268 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -33,9 +33,11 @@
#include "src/lookup.h"
#include "src/objects.h"
#include "src/objects/arguments-inl.h"
-#include "src/objects/bigint-inl.h"
+#include "src/objects/bigint.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/hash-table.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-inl.h"
#include "src/objects/regexp-match-info.h"
@@ -66,28 +68,28 @@ Smi* PropertyDetails::AsSmi() const {
int PropertyDetails::field_width_in_words() const {
- DCHECK(location() == kField);
+ DCHECK_EQ(location(), kField);
if (!FLAG_unbox_double_fields) return 1;
if (kDoubleSize == kPointerSize) return 1;
return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
}
+TYPE_CHECKER(BigInt, BIGINT_TYPE)
TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
-TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
-TYPE_CHECKER(CallHandlerInfo, TUPLE2_TYPE)
+TYPE_CHECKER(CallHandlerInfo, TUPLE3_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
-TYPE_CHECKER(Code, CODE_TYPE)
TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)
+TYPE_CHECKER(FeedbackVector, FEEDBACK_VECTOR_TYPE)
+TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
TYPE_CHECKER(HashTable, HASH_TABLE_TYPE)
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
-TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
TYPE_CHECKER(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE)
TYPE_CHECKER(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
@@ -100,10 +102,8 @@ TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
-TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
-TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
@@ -113,6 +113,7 @@ TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(PreParsedScopeData, TUPLE2_TYPE)
TYPE_CHECKER(PropertyArray, PROPERTY_ARRAY_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
+TYPE_CHECKER(PropertyDescriptorObject, FIXED_ARRAY_TYPE)
TYPE_CHECKER(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE)
TYPE_CHECKER(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE)
TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
@@ -139,10 +140,13 @@ bool HeapObject::IsFixedArrayBase() const {
bool HeapObject::IsFixedArray() const {
InstanceType instance_type = map()->instance_type();
- return instance_type == FIXED_ARRAY_TYPE || instance_type == HASH_TABLE_TYPE;
+ return instance_type >= FIRST_FIXED_ARRAY_TYPE &&
+ instance_type <= LAST_FIXED_ARRAY_TYPE;
}
-bool HeapObject::IsSloppyArgumentsElements() const { return IsFixedArray(); }
+bool HeapObject::IsSloppyArgumentsElements() const {
+ return IsFixedArrayExact();
+}
bool HeapObject::IsJSSloppyArgumentsObject() const {
return IsJSArgumentsObject();
@@ -153,7 +157,11 @@ bool HeapObject::IsJSGeneratorObject() const {
IsJSAsyncGeneratorObject();
}
-bool HeapObject::IsBoilerplateDescription() const { return IsFixedArray(); }
+bool HeapObject::IsBoilerplateDescription() const {
+ return IsFixedArrayExact();
+}
+
+bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
bool HeapObject::IsExternal() const {
return map()->FindRootMap() == GetHeap()->external_map();
@@ -269,6 +277,8 @@ bool HeapObject::IsExternalTwoByteString() const {
bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
+bool Object::IsNumeric() const { return IsNumber() || IsBigInt(); }
+
bool HeapObject::IsFiller() const {
InstanceType instance_type = map()->instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
@@ -316,29 +326,23 @@ bool HeapObject::IsJSWeakCollection() const {
bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
-bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
-
-bool HeapObject::IsPropertyDescriptorObject() const { return IsFixedArray(); }
+bool HeapObject::IsPromiseCapability() const { return IsTuple3(); }
bool HeapObject::IsEnumCache() const { return IsTuple2(); }
-bool HeapObject::IsFrameArray() const { return IsFixedArray(); }
+bool HeapObject::IsFrameArray() const { return IsFixedArrayExact(); }
-bool HeapObject::IsArrayList() const { return IsFixedArray(); }
+bool HeapObject::IsArrayList() const { return IsFixedArrayExact(); }
-bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArray(); }
+bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
bool Object::IsLayoutDescriptor() const { return IsSmi() || IsByteArray(); }
-bool HeapObject::IsFeedbackVector() const {
- return map() == GetHeap()->feedback_vector_map();
-}
-
-bool HeapObject::IsFeedbackMetadata() const { return IsFixedArray(); }
+bool HeapObject::IsFeedbackMetadata() const { return IsFixedArrayExact(); }
-bool HeapObject::IsDeoptimizationInputData() const {
+bool HeapObject::IsDeoptimizationData() const {
// Must be a fixed array.
- if (!IsFixedArray()) return false;
+ if (!IsFixedArrayExact()) return false;
// There's no sure way to detect the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can
@@ -347,19 +351,19 @@ bool HeapObject::IsDeoptimizationInputData() const {
int length = FixedArray::cast(this)->length();
if (length == 0) return true;
- length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
- return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
+ length -= DeoptimizationData::kFirstDeoptEntryIndex;
+ return length >= 0 && length % DeoptimizationData::kDeoptEntrySize == 0;
}
bool HeapObject::IsHandlerTable() const {
- if (!IsFixedArray()) return false;
+ if (!IsFixedArrayExact()) return false;
// There's actually no way to see the difference between a fixed array and
// a handler table array.
return true;
}
bool HeapObject::IsTemplateList() const {
- if (!IsFixedArray()) return false;
+ if (!IsFixedArrayExact()) return false;
// There's actually no way to see the difference between a fixed array and
// a template list.
if (FixedArray::cast(this)->length() < 1) return false;
@@ -367,7 +371,7 @@ bool HeapObject::IsTemplateList() const {
}
bool HeapObject::IsDependentCode() const {
- if (!IsFixedArray()) return false;
+ if (!IsFixedArrayExact()) return false;
// There's actually no way to see the difference between a fixed array and
// a dependent codes array.
return true;
@@ -418,28 +422,29 @@ bool HeapObject::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
}
-template <>
-inline bool Is<JSArray>(Object* obj) {
- return obj->IsJSArray();
+bool HeapObject::IsWeakHashTable() const {
+ return map() == GetHeap()->weak_hash_table_map();
}
-bool HeapObject::IsWeakHashTable() const { return IsHashTable(); }
-
bool HeapObject::IsDictionary() const {
return IsHashTable() && this != GetHeap()->string_table();
}
-bool Object::IsNameDictionary() const { return IsDictionary(); }
-
-bool Object::IsGlobalDictionary() const { return IsDictionary(); }
+bool HeapObject::IsGlobalDictionary() const {
+ return map() == GetHeap()->global_dictionary_map();
+}
-bool Object::IsSeededNumberDictionary() const { return IsDictionary(); }
+bool HeapObject::IsNameDictionary() const {
+ return map() == GetHeap()->name_dictionary_map();
+}
-bool HeapObject::IsUnseededNumberDictionary() const {
- return map() == GetHeap()->unseeded_number_dictionary_map();
+bool HeapObject::IsNumberDictionary() const {
+ return map() == GetHeap()->number_dictionary_map();
}
-bool HeapObject::IsStringTable() const { return IsHashTable(); }
+bool HeapObject::IsStringTable() const {
+ return map() == GetHeap()->string_table_map();
+}
bool HeapObject::IsStringSet() const { return IsHashTable(); }
@@ -455,13 +460,13 @@ bool HeapObject::IsMapCache() const { return IsHashTable(); }
bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
-bool HeapObject::IsOrderedHashTable() const {
- return map() == GetHeap()->ordered_hash_table_map();
+bool HeapObject::IsOrderedHashSet() const {
+ return map() == GetHeap()->ordered_hash_set_map();
}
-bool Object::IsOrderedHashSet() const { return IsOrderedHashTable(); }
-
-bool Object::IsOrderedHashMap() const { return IsOrderedHashTable(); }
+bool HeapObject::IsOrderedHashMap() const {
+ return map() == GetHeap()->ordered_hash_map_map();
+}
bool Object::IsSmallOrderedHashTable() const {
return IsSmallOrderedHashSet() || IsSmallOrderedHashMap();
@@ -538,7 +543,6 @@ bool Object::IsMinusZero() const {
// ------------------------------------
// Cast operations
-CAST_ACCESSOR(AbstractCode)
CAST_ACCESSOR(AccessCheckInfo)
CAST_ACCESSOR(AccessorInfo)
CAST_ACCESSOR(AccessorPair)
@@ -549,14 +553,10 @@ CAST_ACCESSOR(AsyncGeneratorRequest)
CAST_ACCESSOR(BigInt)
CAST_ACCESSOR(BoilerplateDescription)
CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(BytecodeArray)
CAST_ACCESSOR(CallHandlerInfo)
CAST_ACCESSOR(Cell)
-CAST_ACCESSOR(Code)
CAST_ACCESSOR(ConstantElementsPair)
CAST_ACCESSOR(ContextExtension)
-CAST_ACCESSOR(DeoptimizationInputData)
-CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(EnumCache)
CAST_ACCESSOR(FixedArray)
@@ -566,13 +566,8 @@ CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(FunctionTemplateInfo)
CAST_ACCESSOR(GlobalDictionary)
-CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(InterceptorInfo)
-CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSArrayBuffer)
-CAST_ACCESSOR(JSArrayBufferView)
-CAST_ACCESSOR(JSArrayIterator)
CAST_ACCESSOR(JSAsyncFromSyncIterator)
CAST_ACCESSOR(JSAsyncGeneratorObject)
CAST_ACCESSOR(JSBoundFunction)
@@ -589,11 +584,9 @@ CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
CAST_ACCESSOR(JSStringIterator)
-CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(JSWeakCollection)
CAST_ACCESSOR(JSWeakMap)
@@ -616,7 +609,7 @@ CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(PrototypeInfo)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(SeededNumberDictionary)
+CAST_ACCESSOR(NumberDictionary)
CAST_ACCESSOR(SmallOrderedHashMap)
CAST_ACCESSOR(SmallOrderedHashSet)
CAST_ACCESSOR(Smi)
@@ -632,7 +625,6 @@ CAST_ACCESSOR(TemplateObjectDescription)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
CAST_ACCESSOR(TypeFeedbackInfo)
-CAST_ACCESSOR(UnseededNumberDictionary)
CAST_ACCESSOR(WeakCell)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
@@ -778,8 +770,16 @@ MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
// static
MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
- if (input->IsNumber()) return input;
- return ConvertToNumber(HeapObject::cast(*input)->GetIsolate(), input);
+ if (input->IsNumber()) return input; // Shortcut.
+ return ConvertToNumberOrNumeric(HeapObject::cast(*input)->GetIsolate(), input,
+ Conversion::kToNumber);
+}
+
+// static
+MaybeHandle<Object> Object::ToNumeric(Handle<Object> input) {
+ if (input->IsNumber() || input->IsBigInt()) return input; // Shortcut.
+ return ConvertToNumberOrNumeric(HeapObject::cast(*input)->GetIsolate(), input,
+ Conversion::kToNumeric);
}
// static
@@ -963,7 +963,7 @@ Heap* HeapObject::GetHeap() const {
Heap* heap = MemoryChunk::FromAddress(
reinterpret_cast<Address>(const_cast<HeapObject*>(this)))
->heap();
- SLOW_DCHECK(heap != NULL);
+ SLOW_DCHECK(heap != nullptr);
return heap;
}
@@ -985,7 +985,7 @@ void HeapObject::set_map(Map* value) {
}
set_map_word(MapWord::FromMap(value));
if (value != nullptr) {
- // TODO(1600) We are passing NULL as a slot because maps can never be on
+ // TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
}
@@ -1004,7 +1004,7 @@ void HeapObject::synchronized_set_map(Map* value) {
}
synchronized_set_map_word(MapWord::FromMap(value));
if (value != nullptr) {
- // TODO(1600) We are passing NULL as a slot because maps can never be on
+ // TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
}
@@ -1024,8 +1024,8 @@ void HeapObject::set_map_no_write_barrier(Map* value) {
void HeapObject::set_map_after_allocation(Map* value, WriteBarrierMode mode) {
set_map_word(MapWord::FromMap(value));
if (mode != SKIP_WRITE_BARRIER) {
- DCHECK(value != nullptr);
- // TODO(1600) We are passing NULL as a slot because maps can never be on
+ DCHECK_NOT_NULL(value);
+ // TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
}
@@ -1214,7 +1214,7 @@ inline void AllocationSite::set_memento_found_count(int count) {
DCHECK((GetHeap()->MaxSemiSpaceSize() /
(Heap::kMinObjectSizeInWords * kPointerSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
- DCHECK(count < MementoFoundCountBits::kMax);
+ DCHECK_LT(count, MementoFoundCountBits::kMax);
set_pretenure_data(MementoFoundCountBits::update(value, count));
}
@@ -1451,12 +1451,16 @@ void WeakCell::initialize(HeapObject* val) {
bool WeakCell::cleared() const { return value() == Smi::kZero; }
-int JSObject::GetHeaderSize() {
+int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
+
+int JSObject::GetHeaderSize(const Map* map) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
- InstanceType type = map()->instance_type();
- return type == JS_OBJECT_TYPE ? JSObject::kHeaderSize : GetHeaderSize(type);
+ InstanceType instance_type = map->instance_type();
+ return instance_type == JS_OBJECT_TYPE
+ ? JSObject::kHeaderSize
+ : GetHeaderSize(instance_type, map->has_prototype_slot());
}
inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
@@ -1467,8 +1471,7 @@ inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
int JSObject::GetEmbedderFieldCount(const Map* map) {
int instance_size = map->instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
- InstanceType instance_type = map->instance_type();
- return ((instance_size - GetHeaderSize(instance_type)) >> kPointerSizeLog2) -
+ return ((instance_size - GetHeaderSize(map)) >> kPointerSizeLog2) -
map->GetInObjectProperties();
}
@@ -1641,7 +1644,7 @@ void JSObject::InitializeBody(Map* map, int start_offset,
int offset = start_offset;
if (filler_value != pre_allocated_value) {
int end_of_pre_allocated_offset =
- size - (map->unused_property_fields() * kPointerSize);
+ size - (map->UnusedPropertyFields() * kPointerSize);
DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
while (offset < end_of_pre_allocated_offset) {
WRITE_FIELD(this, offset, pre_allocated_value);
@@ -1655,7 +1658,7 @@ void JSObject::InitializeBody(Map* map, int start_offset,
}
bool Map::TooManyFastProperties(StoreFromKeyed store_mode) const {
- if (unused_property_fields() != 0) return false;
+ if (UnusedPropertyFields() != 0) return false;
if (is_prototype_map()) return false;
int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
int limit = Max(minimum, GetInObjectProperties());
@@ -1736,7 +1739,7 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
- DCHECK(IsFixedArray() || IsTransitionArray());
+ DCHECK(IsFixedArray());
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
@@ -1859,16 +1862,16 @@ void WeakFixedArray::set_last_used_index(int index) {
template <class T>
T* WeakFixedArray::Iterator::Next() {
- if (list_ != NULL) {
+ if (list_ != nullptr) {
// Assert that list did not change during iteration.
DCHECK_EQ(last_used_index_, list_->last_used_index());
while (index_ < list_->Length()) {
Object* item = list_->Get(index_++);
if (item != Empty()) return T::cast(item);
}
- list_ = NULL;
+ list_ = nullptr;
}
- return NULL;
+ return nullptr;
}
int ArrayList::Length() const {
@@ -1963,6 +1966,26 @@ AllocationAlignment HeapObject::RequiredAlignment() const {
return kWordAligned;
}
+bool HeapObject::NeedsRehashing() const {
+ switch (map()->instance_type()) {
+ case DESCRIPTOR_ARRAY_TYPE:
+ return DescriptorArray::cast(this)->number_of_descriptors() > 1;
+ case TRANSITION_ARRAY_TYPE:
+ return TransitionArray::cast(this)->number_of_entries() > 1;
+ case HASH_TABLE_TYPE:
+ if (IsOrderedHashMap()) {
+ return OrderedHashMap::cast(this)->NumberOfElements() > 0;
+ } else if (IsOrderedHashSet()) {
+ return OrderedHashSet::cast(this)->NumberOfElements() > 0;
+ }
+ return true;
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ return true;
+ default:
+ return false;
+ }
+}
void FixedArray::set(int index,
Object* value,
@@ -2037,17 +2060,15 @@ Object** FixedArray::RawFieldOfElementAt(int index) {
ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
-int DescriptorArray::number_of_descriptors() {
+int DescriptorArray::number_of_descriptors() const {
return Smi::ToInt(get(kDescriptorLengthIndex));
}
-
-int DescriptorArray::number_of_descriptors_storage() {
+int DescriptorArray::number_of_descriptors_storage() const {
return (length() - kFirstIndex) / kEntrySize;
}
-
-int DescriptorArray::NumberOfSlackDescriptors() {
+int DescriptorArray::NumberOfSlackDescriptors() const {
return number_of_descriptors_storage() - number_of_descriptors();
}
@@ -2056,8 +2077,7 @@ void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
}
-
-inline int DescriptorArray::number_of_entries() {
+inline int DescriptorArray::number_of_entries() const {
return number_of_descriptors();
}
@@ -2073,7 +2093,7 @@ EnumCache* DescriptorArray::GetEnumCache() {
template <SearchMode search_mode, typename T>
int BinarySearch(T* array, Name* name, int valid_entries,
int* out_insertion_index) {
- DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == NULL);
+ DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == nullptr);
int low = 0;
int high = array->number_of_entries() - 1;
uint32_t hash = name->hash_field();
@@ -2175,7 +2195,8 @@ int Search(T* array, Name* name, int valid_entries, int* out_insertion_index) {
int DescriptorArray::Search(Name* name, int valid_descriptors) {
DCHECK(name->IsUniqueName());
- return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, NULL);
+ return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors,
+ nullptr);
}
int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
@@ -2200,7 +2221,7 @@ PropertyDetails Map::GetLastDescriptorDetails() const {
int Map::LastAdded() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
- DCHECK(number_of_own_descriptors > 0);
+ DCHECK_GT(number_of_own_descriptors, 0);
return number_of_own_descriptors - 1;
}
@@ -2218,7 +2239,7 @@ int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); }
void Map::SetEnumLength(int length) {
if (length != kInvalidEnumCacheSentinel) {
- DCHECK(length >= 0);
+ DCHECK_GE(length, 0);
DCHECK(length <= NumberOfOwnDescriptors());
}
set_bit_field3(EnumLengthBits::update(bit_field3(), length));
@@ -2308,12 +2329,12 @@ PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
}
int DescriptorArray::GetFieldIndex(int descriptor_number) {
- DCHECK(GetDetails(descriptor_number).location() == kField);
+ DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
return GetDetails(descriptor_number).field_index();
}
FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
- DCHECK(GetDetails(descriptor_number).location() == kField);
+ DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
Object* wrapped_type = GetValue(descriptor_number);
return Map::UnwrapFieldType(wrapped_type);
}
@@ -2413,8 +2434,8 @@ void HashTableBase::SetNumberOfDeletedElements(int nod) {
}
template <typename Key>
-Map* BaseShape<Key>::GetMap(Isolate* isolate) {
- return isolate->heap()->hash_table_map();
+int BaseShape<Key>::GetMapRootIndex() {
+ return Heap::kHashTableMapRootIndex;
}
template <typename Derived, typename Shape>
@@ -2491,14 +2512,17 @@ uint32_t StringTableShape::HashForObject(Isolate* isolate, Object* object) {
return String::cast(object)->Hash();
}
-bool SeededNumberDictionary::requires_slow_elements() {
+int StringTableShape::GetMapRootIndex() {
+ return Heap::kStringTableMapRootIndex;
+}
+
+bool NumberDictionary::requires_slow_elements() {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return false;
return 0 != (Smi::ToInt(max_index_object) & kRequiresSlowElementsMask);
}
-
-uint32_t SeededNumberDictionary::max_number_key() {
+uint32_t NumberDictionary::max_number_key() {
DCHECK(!requires_slow_elements());
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return 0;
@@ -2506,8 +2530,7 @@ uint32_t SeededNumberDictionary::max_number_key() {
return value >> kRequiresSlowElementsTagSize;
}
-
-void SeededNumberDictionary::set_requires_slow_elements() {
+void NumberDictionary::set_requires_slow_elements() {
set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
}
@@ -2568,72 +2591,6 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
-BailoutId DeoptimizationInputData::BytecodeOffset(int i) {
- return BailoutId(BytecodeOffsetRaw(i)->value());
-}
-
-void DeoptimizationInputData::SetBytecodeOffset(int i, BailoutId value) {
- SetBytecodeOffsetRaw(i, Smi::FromInt(value.ToInt()));
-}
-
-
-int DeoptimizationInputData::DeoptCount() {
- return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
-}
-
-
-int HandlerTable::GetRangeStart(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeStartIndex));
-}
-
-int HandlerTable::GetRangeEnd(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeEndIndex));
-}
-
-int HandlerTable::GetRangeHandler(int index) const {
- return HandlerOffsetField::decode(
- Smi::ToInt(get(index * kRangeEntrySize + kRangeHandlerIndex)));
-}
-
-int HandlerTable::GetRangeData(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeDataIndex));
-}
-
-void HandlerTable::SetRangeStart(int index, int value) {
- set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
-}
-
-
-void HandlerTable::SetRangeEnd(int index, int value) {
- set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value));
-}
-
-
-void HandlerTable::SetRangeHandler(int index, int offset,
- CatchPrediction prediction) {
- int value = HandlerOffsetField::encode(offset) |
- HandlerPredictionField::encode(prediction);
- set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetRangeData(int index, int value) {
- set(index * kRangeEntrySize + kRangeDataIndex, Smi::FromInt(value));
-}
-
-
-void HandlerTable::SetReturnOffset(int index, int value) {
- set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetReturnHandler(int index, int offset) {
- int value = HandlerOffsetField::encode(offset);
- set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
-}
-
-int HandlerTable::NumberOfRangeEntries() const {
- return length() / kRangeEntrySize;
-}
-
template <typename Derived, typename Shape>
HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(Object* obj) {
SLOW_DCHECK(obj->IsHashTable());
@@ -2691,7 +2648,7 @@ int FreeSpace::Size() { return size(); }
FreeSpace* FreeSpace::next() {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
- (!GetHeap()->deserialization_complete() && map() == NULL));
+ (!GetHeap()->deserialization_complete() && map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
return reinterpret_cast<FreeSpace*>(
Memory::Address_at(address() + kNextOffset));
@@ -2700,7 +2657,7 @@ FreeSpace* FreeSpace::next() {
void FreeSpace::set_next(FreeSpace* next) {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
- (!GetHeap()->deserialization_complete() && map() == NULL));
+ (!GetHeap()->deserialization_complete() && map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
base::Relaxed_Store(
reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
@@ -2777,132 +2734,6 @@ Address ByteArray::GetDataStartAddress() {
return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
}
-byte BytecodeArray::get(int index) {
- DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
-}
-
-void BytecodeArray::set(int index, byte value) {
- DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
-}
-
-void BytecodeArray::set_frame_size(int frame_size) {
- DCHECK_GE(frame_size, 0);
- DCHECK(IsAligned(frame_size, static_cast<unsigned>(kPointerSize)));
- WRITE_INT_FIELD(this, kFrameSizeOffset, frame_size);
-}
-
-int BytecodeArray::frame_size() const {
- return READ_INT_FIELD(this, kFrameSizeOffset);
-}
-
-int BytecodeArray::register_count() const {
- return frame_size() / kPointerSize;
-}
-
-void BytecodeArray::set_parameter_count(int number_of_parameters) {
- DCHECK_GE(number_of_parameters, 0);
- // Parameter count is stored as the size on stack of the parameters to allow
- // it to be used directly by generated code.
- WRITE_INT_FIELD(this, kParameterSizeOffset,
- (number_of_parameters << kPointerSizeLog2));
-}
-
-interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
- const {
- int register_operand =
- READ_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset);
- if (register_operand == 0) {
- return interpreter::Register::invalid_value();
- } else {
- return interpreter::Register::FromOperand(register_operand);
- }
-}
-
-void BytecodeArray::set_incoming_new_target_or_generator_register(
- interpreter::Register incoming_new_target_or_generator_register) {
- if (!incoming_new_target_or_generator_register.is_valid()) {
- WRITE_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset, 0);
- } else {
- DCHECK(incoming_new_target_or_generator_register.index() <
- register_count());
- DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
- WRITE_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset,
- incoming_new_target_or_generator_register.ToOperand());
- }
-}
-
-int BytecodeArray::interrupt_budget() const {
- return READ_INT_FIELD(this, kInterruptBudgetOffset);
-}
-
-void BytecodeArray::set_interrupt_budget(int interrupt_budget) {
- DCHECK_GE(interrupt_budget, 0);
- WRITE_INT_FIELD(this, kInterruptBudgetOffset, interrupt_budget);
-}
-
-int BytecodeArray::osr_loop_nesting_level() const {
- return READ_INT8_FIELD(this, kOSRNestingLevelOffset);
-}
-
-void BytecodeArray::set_osr_loop_nesting_level(int depth) {
- DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
- STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
- WRITE_INT8_FIELD(this, kOSRNestingLevelOffset, depth);
-}
-
-BytecodeArray::Age BytecodeArray::bytecode_age() const {
- // Bytecode is aged by the concurrent marker.
- return static_cast<Age>(RELAXED_READ_INT8_FIELD(this, kBytecodeAgeOffset));
-}
-
-void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
- DCHECK_GE(age, kFirstBytecodeAge);
- DCHECK_LE(age, kLastBytecodeAge);
- STATIC_ASSERT(kLastBytecodeAge <= kMaxInt8);
- // Bytecode is aged by the concurrent marker.
- RELAXED_WRITE_INT8_FIELD(this, kBytecodeAgeOffset, static_cast<int8_t>(age));
-}
-
-int BytecodeArray::parameter_count() const {
- // Parameter count is stored as the size on stack of the parameters to allow
- // it to be used directly by generated code.
- return READ_INT_FIELD(this, kParameterSizeOffset) >> kPointerSizeLog2;
-}
-
-ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
-ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
-ACCESSORS(BytecodeArray, source_position_table, Object,
- kSourcePositionTableOffset)
-
-void BytecodeArray::clear_padding() {
- int data_size = kHeaderSize + length();
- memset(address() + data_size, 0, SizeFor(length()) - data_size);
-}
-
-Address BytecodeArray::GetFirstBytecodeAddress() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
-}
-
-ByteArray* BytecodeArray::SourcePositionTable() {
- Object* maybe_table = source_position_table();
- if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
- return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table();
-}
-
-int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
-
-int BytecodeArray::SizeIncludingMetadata() {
- int size = BytecodeArraySize();
- size += constant_pool()->Size();
- size += handler_table()->Size();
- size += SourcePositionTable()->Size();
- return size;
-}
-
ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
@@ -3147,59 +2978,78 @@ Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
return isolate->factory()->NewNumber(scalar);
}
-int Map::visitor_id() const { return READ_BYTE_FIELD(this, kVisitorIdOffset); }
+VisitorId Map::visitor_id() const {
+ return static_cast<VisitorId>(READ_BYTE_FIELD(this, kVisitorIdOffset));
+}
-void Map::set_visitor_id(int id) {
+void Map::set_visitor_id(VisitorId id) {
DCHECK_LE(0, id);
DCHECK_LT(id, 256);
WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
}
+int Map::instance_size_in_words() const {
+ return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeInWordsOffset);
+}
+
+void Map::set_instance_size_in_words(int value) {
+ RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeInWordsOffset,
+ static_cast<byte>(value));
+}
+
int Map::instance_size() const {
- return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
+ return instance_size_in_words() << kPointerSizeLog2;
}
-int Map::inobject_properties_or_constructor_function_index() const {
- return RELAXED_READ_BYTE_FIELD(
- this, kInObjectPropertiesOrConstructorFunctionIndexOffset);
+void Map::set_instance_size(int value) {
+ DCHECK_EQ(0, value & (kPointerSize - 1));
+ value >>= kPointerSizeLog2;
+ DCHECK(0 <= value && value < 256);
+ set_instance_size_in_words(value);
}
+int Map::inobject_properties_start_or_constructor_function_index() const {
+ return RELAXED_READ_BYTE_FIELD(
+ this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
+}
-void Map::set_inobject_properties_or_constructor_function_index(int value) {
+void Map::set_inobject_properties_start_or_constructor_function_index(
+ int value) {
DCHECK_LE(0, value);
DCHECK_LT(value, 256);
- RELAXED_WRITE_BYTE_FIELD(this,
- kInObjectPropertiesOrConstructorFunctionIndexOffset,
- static_cast<byte>(value));
+ RELAXED_WRITE_BYTE_FIELD(
+ this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
+ static_cast<byte>(value));
}
-int Map::GetInObjectProperties() const {
+int Map::GetInObjectPropertiesStartInWords() const {
DCHECK(IsJSObjectMap());
- return inobject_properties_or_constructor_function_index();
+ return inobject_properties_start_or_constructor_function_index();
}
+void Map::SetInObjectPropertiesStartInWords(int value) {
+ DCHECK(IsJSObjectMap());
+ set_inobject_properties_start_or_constructor_function_index(value);
+}
-void Map::SetInObjectProperties(int value) {
+int Map::GetInObjectProperties() const {
DCHECK(IsJSObjectMap());
- set_inobject_properties_or_constructor_function_index(value);
+ return instance_size_in_words() - GetInObjectPropertiesStartInWords();
}
int Map::GetConstructorFunctionIndex() const {
DCHECK(IsPrimitiveMap());
- return inobject_properties_or_constructor_function_index();
+ return inobject_properties_start_or_constructor_function_index();
}
void Map::SetConstructorFunctionIndex(int value) {
DCHECK(IsPrimitiveMap());
- set_inobject_properties_or_constructor_function_index(value);
+ set_inobject_properties_start_or_constructor_function_index(value);
}
int Map::GetInObjectPropertyOffset(int index) const {
- // Adjust for the number of properties stored in the object.
- index -= GetInObjectProperties();
- DCHECK(index <= 0);
- return instance_size() + (index * kPointerSize);
+ return (GetInObjectPropertiesStartInWords() + index) * kPointerSize;
}
@@ -3214,8 +3064,8 @@ int HeapObject::SizeFromMap(Map* map) const {
if (instance_size != kVariableSizeSentinel) return instance_size;
// Only inline the most frequent cases.
InstanceType instance_type = map->instance_type();
- if (instance_type == FIXED_ARRAY_TYPE || instance_type == HASH_TABLE_TYPE ||
- instance_type == TRANSITION_ARRAY_TYPE) {
+ if (instance_type >= FIRST_FIXED_ARRAY_TYPE &&
+ instance_type <= LAST_FIXED_ARRAY_TYPE) {
return FixedArray::SizeFor(
reinterpret_cast<const FixedArray*>(this)->synchronized_length());
}
@@ -3274,35 +3124,114 @@ int HeapObject::SizeFromMap(Map* map) const {
return reinterpret_cast<const Code*>(this)->CodeSize();
}
+InstanceType Map::instance_type() const {
+ return static_cast<InstanceType>(
+ READ_UINT16_FIELD(this, kInstanceTypeOffset));
+}
-void Map::set_instance_size(int value) {
- DCHECK_EQ(0, value & (kPointerSize - 1));
- value >>= kPointerSizeLog2;
- DCHECK(0 <= value && value < 256);
- RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
+
+void Map::set_instance_type(InstanceType value) {
+ WRITE_UINT16_FIELD(this, kInstanceTypeOffset, value);
}
+int Map::UnusedPropertyFields() const {
+ int value = used_or_unused_instance_size_in_words();
+ DCHECK_IMPLIES(!IsJSObjectMap(), value == 0);
+ int unused;
+ if (value >= JSObject::kFieldsAdded) {
+ unused = instance_size_in_words() - value;
+ } else {
+ // For out of object properties "used_or_unused_instance_size_in_words"
+ // byte encodes the slack in the property array.
+ unused = value;
+ }
+ return unused;
+}
-void Map::clear_unused() { WRITE_BYTE_FIELD(this, kUnusedOffset, 0); }
+int Map::used_or_unused_instance_size_in_words() const {
+ return READ_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset);
+}
-InstanceType Map::instance_type() const {
- return static_cast<InstanceType>(READ_BYTE_FIELD(this, kInstanceTypeOffset));
+void Map::set_used_or_unused_instance_size_in_words(int value) {
+ DCHECK_LE(0, value);
+ DCHECK_LE(value, 255);
+ WRITE_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset,
+ static_cast<byte>(value));
}
+int Map::UsedInstanceSize() const {
+ int words = used_or_unused_instance_size_in_words();
+ if (words < JSObject::kFieldsAdded) {
+ // All in-object properties are used and the words is tracking the slack
+ // in the property array.
+ return instance_size();
+ }
+ return words * kPointerSize;
+}
-void Map::set_instance_type(InstanceType value) {
- WRITE_BYTE_FIELD(this, kInstanceTypeOffset, value);
+void Map::SetInObjectUnusedPropertyFields(int value) {
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ if (!IsJSObjectMap()) {
+ DCHECK_EQ(0, value);
+ set_used_or_unused_instance_size_in_words(0);
+ DCHECK_EQ(0, UnusedPropertyFields());
+ return;
+ }
+ DCHECK_LE(0, value);
+ DCHECK_LE(value, GetInObjectProperties());
+ int used_inobject_properties = GetInObjectProperties() - value;
+ set_used_or_unused_instance_size_in_words(
+ GetInObjectPropertyOffset(used_inobject_properties) / kPointerSize);
+ DCHECK_EQ(value, UnusedPropertyFields());
}
-int Map::unused_property_fields() const {
- return READ_BYTE_FIELD(this, kUnusedPropertyFieldsOffset);
+void Map::SetOutOfObjectUnusedPropertyFields(int value) {
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ DCHECK_LE(0, value);
+ DCHECK_LT(value, JSObject::kFieldsAdded);
+ // For out of object properties "used_instance_size_in_words" byte encodes
+ // the slack in the property array.
+ set_used_or_unused_instance_size_in_words(value);
+ DCHECK_EQ(value, UnusedPropertyFields());
}
+void Map::CopyUnusedPropertyFields(Map* map) {
+ set_used_or_unused_instance_size_in_words(
+ map->used_or_unused_instance_size_in_words());
+ DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
+}
-void Map::set_unused_property_fields(int value) {
- WRITE_BYTE_FIELD(this, kUnusedPropertyFieldsOffset, Min(value, 255));
+void Map::AccountAddedPropertyField() {
+ // Update used instance size and unused property fields number.
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+#ifdef DEBUG
+ int new_unused = UnusedPropertyFields() - 1;
+ if (new_unused < 0) new_unused += JSObject::kFieldsAdded;
+#endif
+ int value = used_or_unused_instance_size_in_words();
+ if (value >= JSObject::kFieldsAdded) {
+ if (value == instance_size_in_words()) {
+ AccountAddedOutOfObjectPropertyField(0);
+ } else {
+ // The property is added in-object, so simply increment the counter.
+ set_used_or_unused_instance_size_in_words(value + 1);
+ }
+ } else {
+ AccountAddedOutOfObjectPropertyField(value);
+ }
+ DCHECK_EQ(new_unused, UnusedPropertyFields());
}
+void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
+ unused_in_property_array--;
+ if (unused_in_property_array < 0) {
+ unused_in_property_array += JSObject::kFieldsAdded;
+ }
+ DCHECK_GE(unused_in_property_array, 0);
+ DCHECK_LT(unused_in_property_array, JSObject::kFieldsAdded);
+ set_used_or_unused_instance_size_in_words(unused_in_property_array);
+ DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
+}
byte Map::bit_field() const { return READ_BYTE_FIELD(this, kBitFieldOffset); }
@@ -3329,6 +3258,7 @@ void Map::set_non_instance_prototype(bool value) {
}
bool Map::has_non_instance_prototype() const {
+ if (!has_prototype_slot()) return false;
return ((1 << kHasNonInstancePrototype) & bit_field()) != 0;
}
@@ -3346,6 +3276,8 @@ bool Map::is_constructor() const {
return ((1 << kIsConstructor) & bit_field()) != 0;
}
+BOOL_ACCESSORS(Map, bit_field, has_prototype_slot, kHasPrototypeSlot)
+
void Map::set_has_hidden_prototype(bool value) {
set_bit_field3(HasHiddenPrototype::update(bit_field3(), value));
}
@@ -3426,8 +3358,8 @@ bool Map::should_be_fast_prototype_map() const {
}
void Map::set_elements_kind(ElementsKind elements_kind) {
- DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
- DCHECK(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize));
+ DCHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
+ DCHECK_LE(kElementsKindCount, 1 << Map::ElementsKindBits::kSize);
set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
DCHECK(this->elements_kind() == elements_kind);
}
@@ -3506,6 +3438,9 @@ bool Map::is_callable() const {
void Map::deprecate() {
set_bit_field3(Deprecated::update(bit_field3(), true));
+ if (FLAG_trace_maps) {
+ LOG(GetIsolate(), MapEvent("Deprecate", this, nullptr));
+ }
}
bool Map::is_deprecated() const { return Deprecated::decode(bit_field3()); }
@@ -3627,418 +3562,6 @@ bool Map::IsSpecialReceiverMap() const {
return result;
}
-DependentCode* DependentCode::next_link() {
- return DependentCode::cast(get(kNextLinkIndex));
-}
-
-
-void DependentCode::set_next_link(DependentCode* next) {
- set(kNextLinkIndex, next);
-}
-
-int DependentCode::flags() { return Smi::ToInt(get(kFlagsIndex)); }
-
-void DependentCode::set_flags(int flags) {
- set(kFlagsIndex, Smi::FromInt(flags));
-}
-
-
-int DependentCode::count() { return CountField::decode(flags()); }
-
-void DependentCode::set_count(int value) {
- set_flags(CountField::update(flags(), value));
-}
-
-
-DependentCode::DependencyGroup DependentCode::group() {
- return static_cast<DependencyGroup>(GroupField::decode(flags()));
-}
-
-
-void DependentCode::set_group(DependentCode::DependencyGroup group) {
- set_flags(GroupField::update(flags(), static_cast<int>(group)));
-}
-
-
-void DependentCode::set_object_at(int i, Object* object) {
- set(kCodesStartIndex + i, object);
-}
-
-
-Object* DependentCode::object_at(int i) {
- return get(kCodesStartIndex + i);
-}
-
-
-void DependentCode::clear_at(int i) {
- set_undefined(kCodesStartIndex + i);
-}
-
-
-void DependentCode::copy(int from, int to) {
- set(kCodesStartIndex + to, get(kCodesStartIndex + from));
-}
-
-Code::Kind Code::kind() const {
- return KindField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
-}
-
-void Code::initialize_flags(Kind kind) {
- WRITE_UINT32_FIELD(this, kFlagsOffset, KindField::encode(kind));
-}
-
-void Code::set_kind(Kind kind) {
- STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
- uint32_t previous = READ_UINT32_FIELD(this, kFlagsOffset);
- uint32_t updated_value = KindField::update(previous, kind);
- WRITE_UINT32_FIELD(this, kFlagsOffset, updated_value);
-}
-
-// For initialization.
-void Code::set_raw_kind_specific_flags1(int value) {
- WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
-}
-
-
-void Code::set_raw_kind_specific_flags2(int value) {
- WRITE_INT_FIELD(this, kKindSpecificFlags2Offset, value);
-}
-
-inline bool Code::is_interpreter_trampoline_builtin() const {
- Builtins* builtins = GetIsolate()->builtins();
- bool is_interpreter_trampoline =
- (this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
- this == builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) ||
- this == builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch));
- DCHECK_IMPLIES(is_interpreter_trampoline, !Builtins::IsLazy(builtin_index()));
- return is_interpreter_trampoline;
-}
-
-inline bool Code::checks_optimization_marker() const {
- Builtins* builtins = GetIsolate()->builtins();
- bool checks_marker =
- (this == builtins->builtin(Builtins::kCompileLazy) ||
- this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
- this == builtins->builtin(Builtins::kCheckOptimizationMarker));
- DCHECK_IMPLIES(checks_marker, !Builtins::IsLazy(builtin_index()));
- return checks_marker ||
- (kind() == OPTIMIZED_FUNCTION && marked_for_deoptimization());
-}
-
-inline bool Code::has_unwinding_info() const {
- return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
-}
-
-inline void Code::set_has_unwinding_info(bool state) {
- uint32_t previous = READ_UINT32_FIELD(this, kFlagsOffset);
- uint32_t updated_value = HasUnwindingInfoField::update(previous, state);
- WRITE_UINT32_FIELD(this, kFlagsOffset, updated_value);
-}
-
-inline bool Code::has_tagged_params() const {
- int flags = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- return HasTaggedStackField::decode(flags);
-}
-
-inline void Code::set_has_tagged_params(bool value) {
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = HasTaggedStackField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
-inline bool Code::is_turbofanned() const {
- return IsTurbofannedField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-inline void Code::set_is_turbofanned(bool value) {
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = IsTurbofannedField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-inline bool Code::can_have_weak_objects() const {
- DCHECK(kind() == OPTIMIZED_FUNCTION);
- return CanHaveWeakObjectsField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-inline void Code::set_can_have_weak_objects(bool value) {
- DCHECK(kind() == OPTIMIZED_FUNCTION);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CanHaveWeakObjectsField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-inline bool Code::is_construct_stub() const {
- DCHECK(kind() == BUILTIN);
- return IsConstructStubField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-inline void Code::set_is_construct_stub(bool value) {
- DCHECK(kind() == BUILTIN);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = IsConstructStubField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-inline bool Code::is_promise_rejection() const {
- DCHECK(kind() == BUILTIN);
- return IsPromiseRejectionField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-inline void Code::set_is_promise_rejection(bool value) {
- DCHECK(kind() == BUILTIN);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = IsPromiseRejectionField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-inline bool Code::is_exception_caught() const {
- DCHECK(kind() == BUILTIN);
- return IsExceptionCaughtField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-inline void Code::set_is_exception_caught(bool value) {
- DCHECK(kind() == BUILTIN);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = IsExceptionCaughtField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
- if (is_promise_rejection()) return HandlerTable::PROMISE;
- if (is_exception_caught()) return HandlerTable::CAUGHT;
- return HandlerTable::UNCAUGHT;
-}
-
-int Code::builtin_index() const {
- int index = READ_INT_FIELD(this, kBuiltinIndexOffset);
- DCHECK(index == -1 || Builtins::IsBuiltinId(index));
- return index;
-}
-
-void Code::set_builtin_index(int index) {
- DCHECK(index == -1 || Builtins::IsBuiltinId(index));
- WRITE_INT_FIELD(this, kBuiltinIndexOffset, index);
-}
-
-bool Code::is_builtin() const { return builtin_index() != -1; }
-
-unsigned Code::stack_slots() const {
- DCHECK(is_turbofanned());
- return StackSlotsField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_stack_slots(unsigned slots) {
- CHECK(slots <= (1 << kStackSlotsBitCount));
- DCHECK(is_turbofanned());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = StackSlotsField::update(previous, slots);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-unsigned Code::safepoint_table_offset() const {
- DCHECK(is_turbofanned());
- return SafepointTableOffsetField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
-}
-
-
-void Code::set_safepoint_table_offset(unsigned offset) {
- CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
- DCHECK(is_turbofanned());
- DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = SafepointTableOffsetField::update(previous, offset);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
-
-bool Code::marked_for_deoptimization() const {
- DCHECK(kind() == OPTIMIZED_FUNCTION);
- return MarkedForDeoptimizationField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_marked_for_deoptimization(bool flag) {
- DCHECK(kind() == OPTIMIZED_FUNCTION);
- DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = MarkedForDeoptimizationField::update(previous, flag);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-bool Code::deopt_already_counted() const {
- DCHECK(kind() == OPTIMIZED_FUNCTION);
- return DeoptAlreadyCountedField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-void Code::set_deopt_already_counted(bool flag) {
- DCHECK(kind() == OPTIMIZED_FUNCTION);
- DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = DeoptAlreadyCountedField::update(previous, flag);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-bool Code::is_stub() const { return kind() == STUB; }
-bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
-bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
-
-Address Code::constant_pool() {
- Address constant_pool = NULL;
- if (FLAG_enable_embedded_constant_pool) {
- int offset = constant_pool_offset();
- if (offset < instruction_size()) {
- constant_pool = FIELD_ADDR(this, kHeaderSize + offset);
- }
- }
- return constant_pool;
-}
-
-Code* Code::GetCodeFromTargetAddress(Address address) {
- HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
- // GetCodeFromTargetAddress might be called when marking objects during mark
- // sweep. reinterpret_cast is therefore used instead of the more appropriate
- // Code::cast. Code::cast does not work when the object's map is
- // marked.
- Code* result = reinterpret_cast<Code*>(code);
- return result;
-}
-
-Object* Code::GetObjectFromCodeEntry(Address code_entry) {
- return HeapObject::FromAddress(code_entry - Code::kHeaderSize);
-}
-
-Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
- return GetObjectFromCodeEntry(Memory::Address_at(location_of_address));
-}
-
-
-bool Code::CanContainWeakObjects() {
- return is_optimized_code() && can_have_weak_objects();
-}
-
-
-bool Code::IsWeakObject(Object* object) {
- return (CanContainWeakObjects() && IsWeakObjectInOptimizedCode(object));
-}
-
-
-bool Code::IsWeakObjectInOptimizedCode(Object* object) {
- if (object->IsMap()) {
- return Map::cast(object)->CanTransition();
- }
- if (object->IsCell()) {
- object = Cell::cast(object)->value();
- } else if (object->IsPropertyCell()) {
- object = PropertyCell::cast(object)->value();
- }
- if (object->IsJSReceiver() || object->IsContext()) {
- return true;
- }
- return false;
-}
-
-
-int AbstractCode::instruction_size() {
- if (IsCode()) {
- return GetCode()->instruction_size();
- } else {
- return GetBytecodeArray()->length();
- }
-}
-
-ByteArray* AbstractCode::source_position_table() {
- if (IsCode()) {
- return GetCode()->SourcePositionTable();
- } else {
- return GetBytecodeArray()->SourcePositionTable();
- }
-}
-
-void AbstractCode::set_source_position_table(ByteArray* source_position_table) {
- if (IsCode()) {
- GetCode()->set_source_position_table(source_position_table);
- } else {
- GetBytecodeArray()->set_source_position_table(source_position_table);
- }
-}
-
-Object* AbstractCode::stack_frame_cache() {
- Object* maybe_table;
- if (IsCode()) {
- maybe_table = GetCode()->source_position_table();
- } else {
- maybe_table = GetBytecodeArray()->source_position_table();
- }
- if (maybe_table->IsSourcePositionTableWithFrameCache()) {
- return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->stack_frame_cache();
- }
- return Smi::kZero;
-}
-
-int AbstractCode::SizeIncludingMetadata() {
- if (IsCode()) {
- return GetCode()->SizeIncludingMetadata();
- } else {
- return GetBytecodeArray()->SizeIncludingMetadata();
- }
-}
-int AbstractCode::ExecutableSize() {
- if (IsCode()) {
- return GetCode()->ExecutableSize();
- } else {
- return GetBytecodeArray()->BytecodeArraySize();
- }
-}
-
-Address AbstractCode::instruction_start() {
- if (IsCode()) {
- return GetCode()->instruction_start();
- } else {
- return GetBytecodeArray()->GetFirstBytecodeAddress();
- }
-}
-
-Address AbstractCode::instruction_end() {
- if (IsCode()) {
- return GetCode()->instruction_end();
- } else {
- return GetBytecodeArray()->GetFirstBytecodeAddress() +
- GetBytecodeArray()->length();
- }
-}
-
-bool AbstractCode::contains(byte* inner_pointer) {
- return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
-}
-
-AbstractCode::Kind AbstractCode::kind() {
- if (IsCode()) {
- return static_cast<AbstractCode::Kind>(GetCode()->kind());
- } else {
- return INTERPRETED_FUNCTION;
- }
-}
-
-Code* AbstractCode::GetCode() { return Code::cast(this); }
-
-BytecodeArray* AbstractCode::GetBytecodeArray() {
- return BytecodeArray::cast(this);
-}
-
Object* Map::prototype() const {
return READ_FIELD(this, kPrototypeOffset);
}
@@ -4051,12 +3574,14 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
}
LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
+ DCHECK(FLAG_unbox_double_fields);
Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
return LayoutDescriptor::cast_gc_safe(layout_desc);
}
bool Map::HasFastPointerLayout() const {
+ DCHECK(FLAG_unbox_double_fields);
Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
return LayoutDescriptor::IsFastPointerLayout(layout_desc);
}
@@ -4073,7 +3598,7 @@ void Map::UpdateDescriptors(DescriptorArray* descriptors,
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
CHECK(layout_descriptor()->IsConsistentWithMap(this));
- CHECK(visitor_id() == Map::GetVisitorId(this));
+ CHECK_EQ(Map::GetVisitorId(this), visitor_id());
}
#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
@@ -4105,7 +3630,8 @@ void Map::InitializeDescriptors(DescriptorArray* descriptors,
ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
-ACCESSORS(Map, layout_descriptor, LayoutDescriptor, kLayoutDescriptorOffset)
+ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
+ kLayoutDescriptorOffset, FLAG_unbox_double_fields)
void Map::set_bit_field3(uint32_t bits) {
if (kInt32Size != kPointerSize) {
@@ -4136,11 +3662,15 @@ void Map::AppendDescriptor(Descriptor* desc) {
if (desc->GetKey()->IsInterestingSymbol()) {
set_may_have_interesting_symbols(true);
}
+ PropertyDetails details = desc->GetDetails();
+ if (details.location() == kField) {
+ DCHECK_GT(UnusedPropertyFields(), 0);
+ AccountAddedPropertyField();
+ }
// This function does not support appending double field descriptors and
// it should never try to (otherwise, layout descriptor must be updated too).
#ifdef DEBUG
- PropertyDetails details = desc->GetDetails();
DCHECK(details.location() != kField || !details.representation().IsDouble());
#endif
}
@@ -4220,7 +3750,7 @@ void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
return CopyInitialMap(map, map->instance_size(), map->GetInObjectProperties(),
- map->unused_property_fields());
+ map->UnusedPropertyFields());
}
Object* JSBoundFunction::raw_bound_target_function() const {
@@ -4239,10 +3769,9 @@ ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
-ACCESSORS(JSGlobalProxy, hash, Object, kHashOffset)
-ACCESSORS(AccessorInfo, name, Object, kNameOffset)
-SMI_ACCESSORS(AccessorInfo, flag, kFlagOffset)
+ACCESSORS(AccessorInfo, name, Name, kNameOffset)
+SMI_ACCESSORS(AccessorInfo, flags, kFlagsOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
kExpectedReceiverTypeOffset)
@@ -4364,6 +3893,7 @@ BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
+ACCESSORS(CallHandlerInfo, js_callback, Object, kJsCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
@@ -4482,7 +4012,7 @@ SMI_ACCESSORS(StackFrameInfo, id, kIdIndex)
ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
kSourcePositionTableIndex)
ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
- UnseededNumberDictionary, kStackFrameCacheIndex)
+ NumberDictionary, kStackFrameCacheIndex)
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -4559,6 +4089,7 @@ bool JSFunction::IsInOptimizationQueue() {
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
+ if (!has_prototype_slot()) return;
if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
initial_map()->CompleteInobjectSlackTracking();
}
@@ -4570,6 +4101,8 @@ bool Map::IsInobjectSlackTrackingInProgress() const {
void Map::InobjectSlackTrackingStep() {
+ // Slack tracking should only be performed on an initial map.
+ DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
if (!IsInobjectSlackTrackingInProgress()) return;
int counter = construction_counter();
set_construction_counter(counter - 1);
@@ -4628,7 +4161,9 @@ bool JSFunction::has_feedback_vector() const {
JSFunction::FeedbackVectorState JSFunction::GetFeedbackVectorState(
Isolate* isolate) const {
Cell* cell = feedback_vector_cell();
- if (cell == isolate->heap()->undefined_cell()) {
+ if (shared()->HasAsmWasmData()) {
+ return NO_VECTOR_NEEDED;
+ } else if (cell == isolate->heap()->undefined_cell()) {
return TOP_LEVEL_SCRIPT_NEEDS_VECTOR;
} else if (cell->value() == isolate->heap()->undefined_value() ||
!has_feedback_vector()) {
@@ -4659,9 +4194,12 @@ void JSFunction::set_context(Object* value) {
WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
}
-ACCESSORS(JSFunction, prototype_or_initial_map, Object,
- kPrototypeOrInitialMapOffset)
+ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
+ kPrototypeOrInitialMapOffset, map()->has_prototype_slot())
+bool JSFunction::has_prototype_slot() const {
+ return map()->has_prototype_slot();
+}
Map* JSFunction::initial_map() {
return Map::cast(prototype_or_initial_map());
@@ -4669,17 +4207,20 @@ Map* JSFunction::initial_map() {
bool JSFunction::has_initial_map() {
+ DCHECK(has_prototype_slot());
return prototype_or_initial_map()->IsMap();
}
bool JSFunction::has_instance_prototype() {
+ DCHECK(has_prototype_slot());
return has_initial_map() ||
!prototype_or_initial_map()->IsTheHole(GetIsolate());
}
bool JSFunction::has_prototype() {
+ DCHECK(has_prototype_slot());
return map()->has_non_instance_prototype() || has_instance_prototype();
}
@@ -4715,7 +4256,6 @@ bool JSFunction::is_compiled() {
ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
-ACCESSORS(JSProxy, hash, Object, kHashOffset)
bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
@@ -4804,329 +4344,8 @@ SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
-INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
-INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
-#define CODE_ACCESSORS(name, type, offset) \
- ACCESSORS_CHECKED2(Code, name, type, offset, true, \
- !GetHeap()->InNewSpace(value))
-CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
-CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
-CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
-CODE_ACCESSORS(trap_handler_index, Smi, kTrapHandlerIndex)
-CODE_ACCESSORS(raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
-CODE_ACCESSORS(next_code_link, Object, kNextCodeLinkOffset)
-#undef CODE_ACCESSORS
-
-void Code::WipeOutHeader() {
- WRITE_FIELD(this, kRelocationInfoOffset, nullptr);
- WRITE_FIELD(this, kHandlerTableOffset, nullptr);
- WRITE_FIELD(this, kDeoptimizationDataOffset, nullptr);
- WRITE_FIELD(this, kSourcePositionTableOffset, nullptr);
- // Do not wipe out major/minor keys on a code stub or IC
- if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, nullptr);
- }
- WRITE_FIELD(this, kNextCodeLinkOffset, nullptr);
-}
-
-void Code::clear_padding() {
- memset(address() + kHeaderPaddingStart, 0, kHeaderSize - kHeaderPaddingStart);
- Address data_end =
- has_unwinding_info() ? unwinding_info_end() : instruction_end();
- memset(data_end, 0, CodeSize() - (data_end - address()));
-}
-
-ByteArray* Code::SourcePositionTable() const {
- Object* maybe_table = source_position_table();
- if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
- return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table();
-}
-
-uint32_t Code::stub_key() const {
- DCHECK(is_stub());
- Smi* smi_key = Smi::cast(raw_type_feedback_info());
- return static_cast<uint32_t>(smi_key->value());
-}
-
-
-void Code::set_stub_key(uint32_t key) {
- DCHECK(is_stub());
- set_raw_type_feedback_info(Smi::FromInt(key));
-}
-
-byte* Code::instruction_start() const {
- return const_cast<byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
-}
-
-byte* Code::instruction_end() const {
- return instruction_start() + instruction_size();
-}
-
-int Code::GetUnwindingInfoSizeOffset() const {
- DCHECK(has_unwinding_info());
- return RoundUp(kHeaderSize + instruction_size(), kInt64Size);
-}
-
-int Code::unwinding_info_size() const {
- DCHECK(has_unwinding_info());
- return static_cast<int>(
- READ_UINT64_FIELD(this, GetUnwindingInfoSizeOffset()));
-}
-
-void Code::set_unwinding_info_size(int value) {
- DCHECK(has_unwinding_info());
- WRITE_UINT64_FIELD(this, GetUnwindingInfoSizeOffset(), value);
-}
-
-byte* Code::unwinding_info_start() const {
- DCHECK(has_unwinding_info());
- return const_cast<byte*>(
- FIELD_ADDR_CONST(this, GetUnwindingInfoSizeOffset())) +
- kInt64Size;
-}
-
-byte* Code::unwinding_info_end() const {
- DCHECK(has_unwinding_info());
- return unwinding_info_start() + unwinding_info_size();
-}
-
-int Code::body_size() const {
- int unpadded_body_size =
- has_unwinding_info()
- ? static_cast<int>(unwinding_info_end() - instruction_start())
- : instruction_size();
- return RoundUp(unpadded_body_size, kObjectAlignment);
-}
-
-int Code::SizeIncludingMetadata() const {
- int size = CodeSize();
- size += relocation_info()->Size();
- size += deoptimization_data()->Size();
- size += handler_table()->Size();
- return size;
-}
-
-ByteArray* Code::unchecked_relocation_info() const {
- return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
-}
-
-byte* Code::relocation_start() const {
- return unchecked_relocation_info()->GetDataStartAddress();
-}
-
-int Code::relocation_size() const {
- return unchecked_relocation_info()->length();
-}
-
-byte* Code::entry() const { return instruction_start(); }
-
-bool Code::contains(byte* inner_pointer) {
- return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
-}
-
-int Code::ExecutableSize() const {
- // Check that the assumptions about the layout of the code object holds.
- DCHECK_EQ(static_cast<int>(instruction_start() - address()),
- Code::kHeaderSize);
- return instruction_size() + Code::kHeaderSize;
-}
-
-int Code::CodeSize() const { return SizeFor(body_size()); }
-
-ACCESSORS(JSArray, length, Object, kLengthOffset)
-
-
-void* JSArrayBuffer::backing_store() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-
-void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr);
-}
-
-
-ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
-
-void* JSArrayBuffer::allocation_base() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kAllocationBaseOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-void JSArrayBuffer::set_allocation_base(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kAllocationBaseOffset, ptr);
-}
-
-size_t JSArrayBuffer::allocation_length() const {
- return *reinterpret_cast<const size_t*>(
- FIELD_ADDR_CONST(this, kAllocationLengthOffset));
-}
-
-void JSArrayBuffer::set_allocation_length(size_t value) {
- (*reinterpret_cast<size_t*>(FIELD_ADDR(this, kAllocationLengthOffset))) =
- value;
-}
-
-ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const {
- using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
- return has_guard_region() ? AllocationMode::kReservation
- : AllocationMode::kNormal;
-}
-
-void JSArrayBuffer::set_bit_field(uint32_t bits) {
- if (kInt32Size != kPointerSize) {
-#if V8_TARGET_LITTLE_ENDIAN
- WRITE_UINT32_FIELD(this, kBitFieldSlot + kInt32Size, 0);
-#else
- WRITE_UINT32_FIELD(this, kBitFieldSlot, 0);
-#endif
- }
- WRITE_UINT32_FIELD(this, kBitFieldOffset, bits);
-}
-
-
-uint32_t JSArrayBuffer::bit_field() const {
- return READ_UINT32_FIELD(this, kBitFieldOffset);
-}
-
-
-bool JSArrayBuffer::is_external() { return IsExternal::decode(bit_field()); }
-
-
-void JSArrayBuffer::set_is_external(bool value) {
- set_bit_field(IsExternal::update(bit_field(), value));
-}
-
-
-bool JSArrayBuffer::is_neuterable() {
- return IsNeuterable::decode(bit_field());
-}
-
-
-void JSArrayBuffer::set_is_neuterable(bool value) {
- set_bit_field(IsNeuterable::update(bit_field(), value));
-}
-bool JSArrayBuffer::was_neutered() { return WasNeutered::decode(bit_field()); }
-
-
-void JSArrayBuffer::set_was_neutered(bool value) {
- set_bit_field(WasNeutered::update(bit_field(), value));
-}
-
-
-bool JSArrayBuffer::is_shared() { return IsShared::decode(bit_field()); }
-
-
-void JSArrayBuffer::set_is_shared(bool value) {
- set_bit_field(IsShared::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::has_guard_region() const {
- return HasGuardRegion::decode(bit_field());
-}
-
-void JSArrayBuffer::set_has_guard_region(bool value) {
- set_bit_field(HasGuardRegion::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::is_growable() {
- return IsGrowable::decode(bit_field());
-}
-
-void JSArrayBuffer::set_is_growable(bool value) {
- set_bit_field(IsGrowable::update(bit_field(), value));
-}
-
-Object* JSArrayBufferView::byte_offset() const {
- if (WasNeutered()) return Smi::kZero;
- return Object::cast(READ_FIELD(this, kByteOffsetOffset));
-}
-
-
-void JSArrayBufferView::set_byte_offset(Object* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kByteOffsetOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteOffsetOffset, value, mode);
-}
-
-
-Object* JSArrayBufferView::byte_length() const {
- if (WasNeutered()) return Smi::kZero;
- return Object::cast(READ_FIELD(this, kByteLengthOffset));
-}
-
-
-void JSArrayBufferView::set_byte_length(Object* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kByteLengthOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteLengthOffset, value, mode);
-}
-
-
-ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
-#ifdef VERIFY_HEAP
-ACCESSORS(JSArrayBufferView, raw_byte_offset, Object, kByteOffsetOffset)
-ACCESSORS(JSArrayBufferView, raw_byte_length, Object, kByteLengthOffset)
-#endif
-
-
-bool JSArrayBufferView::WasNeutered() const {
- return JSArrayBuffer::cast(buffer())->was_neutered();
-}
-
-
-Object* JSTypedArray::length() const {
- if (WasNeutered()) return Smi::kZero;
- return Object::cast(READ_FIELD(this, kLengthOffset));
-}
-
-
-uint32_t JSTypedArray::length_value() const {
- if (WasNeutered()) return 0;
- uint32_t index = 0;
- CHECK(Object::cast(READ_FIELD(this, kLengthOffset))->ToArrayLength(&index));
- return index;
-}
-
-
-void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kLengthOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
-}
-
-// static
-MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
- Handle<Object> receiver,
- const char* method_name) {
- if (V8_UNLIKELY(!receiver->IsJSTypedArray())) {
- const MessageTemplate::Template message = MessageTemplate::kNotTypedArray;
- THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
- }
-
- Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
- if (V8_UNLIKELY(array->WasNeutered())) {
- const MessageTemplate::Template message =
- MessageTemplate::kDetachedOperation;
- Handle<String> operation =
- isolate->factory()->NewStringFromAsciiChecked(method_name);
- THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
- }
-
- // spec describes to return `buffer`, but it may disrupt current
- // implementations, and it's much useful to return array for now.
- return array;
-}
-
-#ifdef VERIFY_HEAP
-ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
-#endif
-
ACCESSORS(PromiseCapability, promise, Object, kPromiseOffset)
ACCESSORS(PromiseCapability, resolve, Object, kResolveOffset)
ACCESSORS(PromiseCapability, reject, Object, kRejectOffset)
@@ -5141,65 +4360,6 @@ SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
-ACCESSORS(JSRegExp, data, Object, kDataOffset)
-ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
-ACCESSORS(JSRegExp, source, Object, kSourceOffset)
-ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
-
-JSRegExp::Type JSRegExp::TypeTag() {
- Object* data = this->data();
- if (data->IsUndefined(GetIsolate())) return JSRegExp::NOT_COMPILED;
- Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
- return static_cast<JSRegExp::Type>(smi->value());
-}
-
-
-int JSRegExp::CaptureCount() {
- switch (TypeTag()) {
- case ATOM:
- return 0;
- case IRREGEXP:
- return Smi::ToInt(DataAt(kIrregexpCaptureCountIndex));
- default:
- UNREACHABLE();
- }
-}
-
-
-JSRegExp::Flags JSRegExp::GetFlags() {
- DCHECK(this->data()->IsFixedArray());
- Object* data = this->data();
- Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
- return Flags(smi->value());
-}
-
-
-String* JSRegExp::Pattern() {
- DCHECK(this->data()->IsFixedArray());
- Object* data = this->data();
- String* pattern = String::cast(FixedArray::cast(data)->get(kSourceIndex));
- return pattern;
-}
-
-Object* JSRegExp::CaptureNameMap() {
- DCHECK(this->data()->IsFixedArray());
- DCHECK_EQ(TypeTag(), IRREGEXP);
- Object* value = DataAt(kIrregexpCaptureNameMapIndex);
- DCHECK_NE(value, Smi::FromInt(JSRegExp::kUninitializedValue));
- return value;
-}
-
-Object* JSRegExp::DataAt(int index) {
- DCHECK(TypeTag() != NOT_COMPILED);
- return FixedArray::cast(data())->get(index);
-}
-
-
-void JSRegExp::SetDataAt(int index, Object* value) {
- DCHECK(TypeTag() != NOT_COMPILED);
- DCHECK(index >= kDataIndex); // Only implementation data can be set this way.
- FixedArray::cast(data())->set(index, value);
-}
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
@@ -5293,7 +4453,7 @@ bool JSObject::HasFixedTypedArrayElements() {
#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
bool JSObject::HasFixed##Type##Elements() { \
HeapObject* array = elements(); \
- DCHECK(array != NULL); \
+ DCHECK_NOT_NULL(array); \
if (!array->IsHeapObject()) return false; \
return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
}
@@ -5323,10 +4483,9 @@ GlobalDictionary* JSGlobalObject::global_dictionary() {
return GlobalDictionary::cast(raw_properties_or_hash());
}
-
-SeededNumberDictionary* JSObject::element_dictionary() {
+NumberDictionary* JSObject::element_dictionary() {
DCHECK(HasDictionaryElements() || HasSlowStringWrapperElements());
- return SeededNumberDictionary::cast(elements());
+ return NumberDictionary::cast(elements());
}
// static
@@ -5435,8 +4594,9 @@ void JSReceiver::initialize_properties() {
}
bool JSReceiver::HasFastProperties() const {
- DCHECK_EQ(raw_properties_or_hash()->IsDictionary(),
- map()->is_dictionary_map());
+ DCHECK(
+ raw_properties_or_hash()->IsSmi() ||
+ (raw_properties_or_hash()->IsDictionary() == map()->is_dictionary_map()));
return !map()->is_dictionary_map();
}
@@ -5549,68 +4709,17 @@ inline int JSGlobalProxy::SizeWithEmbedderFields(int embedder_field_count) {
return kSize + embedder_field_count * kPointerSize;
}
-Smi* JSReceiver::GetOrCreateIdentityHash(Isolate* isolate) {
- return IsJSProxy() ? JSProxy::cast(this)->GetOrCreateIdentityHash(isolate)
- : JSObject::cast(this)->GetOrCreateIdentityHash(isolate);
-}
-
-Object* JSReceiver::GetIdentityHash(Isolate* isolate) {
- return IsJSProxy() ? JSProxy::cast(this)->GetIdentityHash()
- : JSObject::cast(this)->GetIdentityHash(isolate);
-}
-
-
-bool AccessorInfo::all_can_read() {
- return BooleanBit::get(flag(), kAllCanReadBit);
-}
-
-
-void AccessorInfo::set_all_can_read(bool value) {
- set_flag(BooleanBit::set(flag(), kAllCanReadBit, value));
-}
-
-
-bool AccessorInfo::all_can_write() {
- return BooleanBit::get(flag(), kAllCanWriteBit);
-}
-
-
-void AccessorInfo::set_all_can_write(bool value) {
- set_flag(BooleanBit::set(flag(), kAllCanWriteBit, value));
-}
-
-
-bool AccessorInfo::is_special_data_property() {
- return BooleanBit::get(flag(), kSpecialDataProperty);
-}
-
-
-void AccessorInfo::set_is_special_data_property(bool value) {
- set_flag(BooleanBit::set(flag(), kSpecialDataProperty, value));
-}
-
-bool AccessorInfo::replace_on_access() {
- return BooleanBit::get(flag(), kReplaceOnAccess);
-}
-
-void AccessorInfo::set_replace_on_access(bool value) {
- set_flag(BooleanBit::set(flag(), kReplaceOnAccess, value));
-}
-
-bool AccessorInfo::is_sloppy() { return BooleanBit::get(flag(), kIsSloppy); }
-
-void AccessorInfo::set_is_sloppy(bool value) {
- set_flag(BooleanBit::set(flag(), kIsSloppy, value));
-}
-
-PropertyAttributes AccessorInfo::property_attributes() {
- return AttributesField::decode(static_cast<uint32_t>(flag()));
-}
-
-
-void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
- set_flag(AttributesField::update(flag(), attributes));
-}
+BIT_FIELD_ACCESSORS(AccessorInfo, flags, all_can_read,
+ AccessorInfo::AllCanReadBit)
+BIT_FIELD_ACCESSORS(AccessorInfo, flags, all_can_write,
+ AccessorInfo::AllCanWriteBit)
+BIT_FIELD_ACCESSORS(AccessorInfo, flags, is_special_data_property,
+ AccessorInfo::IsSpecialDataPropertyBit)
+BIT_FIELD_ACCESSORS(AccessorInfo, flags, replace_on_access,
+ AccessorInfo::ReplaceOnAccessBit)
+BIT_FIELD_ACCESSORS(AccessorInfo, flags, is_sloppy, AccessorInfo::IsSloppyBit)
+BIT_FIELD_ACCESSORS(AccessorInfo, flags, initial_property_attributes,
+ AccessorInfo::InitialAttributesBits)
bool FunctionTemplateInfo::IsTemplateFor(JSObject* object) {
return IsTemplateFor(object->map());
@@ -5693,8 +4802,16 @@ Object* GlobalDictionaryShape::Unwrap(Object* object) {
return PropertyCell::cast(object)->name();
}
+int GlobalDictionaryShape::GetMapRootIndex() {
+ return Heap::kGlobalDictionaryMapRootIndex;
+}
+
Name* NameDictionary::NameAt(int entry) { return Name::cast(KeyAt(entry)); }
+int NameDictionaryShape::GetMapRootIndex() {
+ return Heap::kNameDictionaryMapRootIndex;
+}
+
PropertyCell* GlobalDictionary::CellAt(int entry) {
DCHECK(KeyAt(entry)->IsPropertyCell());
return PropertyCell::cast(KeyAt(entry));
@@ -5730,31 +4847,19 @@ bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
return key == static_cast<uint32_t>(other->Number());
}
-uint32_t UnseededNumberDictionaryShape::Hash(Isolate* isolate, uint32_t key) {
- return ComputeIntegerHash(key);
-}
-
-uint32_t UnseededNumberDictionaryShape::HashForObject(Isolate* isolate,
- Object* other) {
- DCHECK(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
-}
-
-Map* UnseededNumberDictionaryShape::GetMap(Isolate* isolate) {
- return isolate->heap()->unseeded_number_dictionary_map();
-}
-
-uint32_t SeededNumberDictionaryShape::Hash(Isolate* isolate, uint32_t key) {
+uint32_t NumberDictionaryShape::Hash(Isolate* isolate, uint32_t key) {
return ComputeIntegerHash(key, isolate->heap()->HashSeed());
}
-uint32_t SeededNumberDictionaryShape::HashForObject(Isolate* isolate,
- Object* other) {
+uint32_t NumberDictionaryShape::HashForObject(Isolate* isolate, Object* other) {
DCHECK(other->IsNumber());
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()),
isolate->heap()->HashSeed());
}
+int NumberDictionaryShape::GetMapRootIndex() {
+ return Heap::kNumberDictionaryMapRootIndex;
+}
Handle<Object> NumberDictionaryShape::AsHandle(Isolate* isolate, uint32_t key) {
return isolate->factory()->NewNumberFromUint(key);
@@ -5833,16 +4938,13 @@ Handle<ObjectHashTable> ObjectHashTable::Shrink(Handle<ObjectHashTable> table) {
return DerivedHashTable::Shrink(table);
}
-template <int entrysize>
-bool WeakHashTableShape<entrysize>::IsMatch(Handle<Object> key, Object* other) {
+bool WeakHashTableShape::IsMatch(Handle<Object> key, Object* other) {
if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
return key->IsWeakCell() ? WeakCell::cast(*key)->value() == other
: *key == other;
}
-template <int entrysize>
-uint32_t WeakHashTableShape<entrysize>::Hash(Isolate* isolate,
- Handle<Object> key) {
+uint32_t WeakHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
intptr_t hash =
key->IsWeakCell()
? reinterpret_cast<intptr_t>(WeakCell::cast(*key)->value())
@@ -5850,21 +4952,20 @@ uint32_t WeakHashTableShape<entrysize>::Hash(Isolate* isolate,
return (uint32_t)(hash & 0xFFFFFFFF);
}
-template <int entrysize>
-uint32_t WeakHashTableShape<entrysize>::HashForObject(Isolate* isolate,
- Object* other) {
+uint32_t WeakHashTableShape::HashForObject(Isolate* isolate, Object* other) {
if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
intptr_t hash = reinterpret_cast<intptr_t>(other);
return (uint32_t)(hash & 0xFFFFFFFF);
}
-
-template <int entrysize>
-Handle<Object> WeakHashTableShape<entrysize>::AsHandle(Isolate* isolate,
- Handle<Object> key) {
+Handle<Object> WeakHashTableShape::AsHandle(Isolate* isolate,
+ Handle<Object> key) {
return key;
}
+int WeakHashTableShape::GetMapRootIndex() {
+ return Heap::kWeakHashTableMapRootIndex;
+}
int Map::SlackForArraySize(int old_size, int size_limit) {
const int max_slack = size_limit - old_size;
@@ -5876,46 +4977,6 @@ int Map::SlackForArraySize(int old_size, int size_limit) {
return Min(max_slack, old_size / 4);
}
-
-void JSArray::set_length(Smi* length) {
- // Don't need a write barrier for a Smi.
- set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
-}
-
-
-bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
- return new_length > kMaxFastArrayLength;
-}
-
-
-bool JSArray::AllowsSetLength() {
- bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
- DCHECK(result == !HasFixedTypedArrayElements());
- return result;
-}
-
-
-void JSArray::SetContent(Handle<JSArray> array,
- Handle<FixedArrayBase> storage) {
- EnsureCanContainElements(array, storage, storage->length(),
- ALLOW_COPIED_DOUBLE_ELEMENTS);
-
- DCHECK((storage->map() == array->GetHeap()->fixed_double_array_map() &&
- IsDoubleElementsKind(array->GetElementsKind())) ||
- ((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
- (IsObjectElementsKind(array->GetElementsKind()) ||
- (IsSmiElementsKind(array->GetElementsKind()) &&
- Handle<FixedArray>::cast(storage)->ContainsOnlySmisOrHoles()))));
- array->set_elements(*storage);
- array->set_length(Smi::FromInt(storage->length()));
-}
-
-
-bool JSArray::HasArrayPrototype(Isolate* isolate) {
- return map()->prototype() == *isolate->initial_array_prototype();
-}
-
-
int TypeFeedbackInfo::ic_total_count() {
int current = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
return ICTotalCountField::decode(current);
@@ -6076,10 +5137,6 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Object> key,
ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
-ACCESSORS(JSArrayIterator, object, Object, kIteratedObjectOffset)
-ACCESSORS(JSArrayIterator, index, Object, kNextIndexOffset)
-ACCESSORS(JSArrayIterator, object_map, Object, kIteratedObjectMapOffset)
-
ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
kSyncIteratorOffset)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 33928a5aa7..2ac24f823d 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -16,6 +16,7 @@
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions-inl.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -97,6 +98,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case BYTECODE_ARRAY_TYPE:
BytecodeArray::cast(this)->BytecodeArrayPrint(os);
break;
+ case DESCRIPTOR_ARRAY_TYPE:
+ DescriptorArray::cast(this)->PrintDescriptors(os);
+ break;
case TRANSITION_ARRAY_TYPE:
TransitionArray::cast(this)->TransitionArrayPrint(os);
break;
@@ -171,6 +175,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case CODE_TYPE:
Code::cast(this)->CodePrint(os);
break;
+ case CODE_DATA_CONTAINER_TYPE:
+ CodeDataContainer::cast(this)->CodeDataContainerPrint(os);
+ break;
case JS_PROXY_TYPE:
JSProxy::cast(this)->JSProxyPrint(os);
break;
@@ -382,7 +389,7 @@ void PrintFixedArrayElements(std::ostream& os, T* array) {
void PrintDictionaryElements(std::ostream& os, FixedArrayBase* elements) {
// Print some internal fields
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements);
+ NumberDictionary* dict = NumberDictionary::cast(elements);
if (dict->requires_slow_elements()) {
os << "\n - requires_slow_elements";
} else {
@@ -487,6 +494,10 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
<< ElementsKindToString(obj->map()->elements_kind());
if (obj->elements()->IsCowArray()) os << " (COW)";
os << "]";
+ Object* hash = obj->GetHash();
+ if (hash->IsSmi()) {
+ os << "\n - hash = " << Brief(hash);
+ }
if (obj->GetEmbedderFieldCount() > 0) {
os << "\n - embedder fields: " << obj->GetEmbedderFieldCount();
}
@@ -569,7 +580,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
os << "\n - inobject properties: " << GetInObjectProperties();
}
os << "\n - elements kind: " << ElementsKindToString(elements_kind());
- os << "\n - unused property fields: " << unused_property_fields();
+ os << "\n - unused property fields: " << UnusedPropertyFields();
os << "\n - enum length: ";
if (EnumLength() == kInvalidEnumCacheSentinel) {
os << "invalid";
@@ -587,6 +598,10 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_undetectable()) os << "\n - undetectable";
if (is_callable()) os << "\n - callable";
if (is_constructor()) os << "\n - constructor";
+ if (has_prototype_slot()) {
+ os << "\n - has_prototype_slot";
+ if (has_non_instance_prototype()) os << " (non-instance prototype)";
+ }
if (is_access_check_needed()) os << "\n - access_check_needed";
if (!is_extensible()) os << "\n - non-extensible";
if (is_prototype_map()) {
@@ -742,74 +757,8 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
FeedbackSlot slot = iter.Next();
FeedbackSlotKind kind = iter.kind();
- os << "\n Slot " << slot << " " << kind;
- os << " ";
- switch (kind) {
- case FeedbackSlotKind::kLoadProperty: {
- LoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kLoadGlobalInsideTypeof:
- case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
- LoadGlobalICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kLoadKeyed: {
- KeyedLoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kCall: {
- CallICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed:
- case FeedbackSlotKind::kStoreGlobalSloppy:
- case FeedbackSlotKind::kStoreGlobalStrict: {
- StoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict: {
- KeyedStoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kBinaryOp: {
- BinaryOpICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kCompareOp: {
- CompareICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kForIn: {
- ForInICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- StoreDataPropertyInLiteralICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kCreateClosure:
- case FeedbackSlotKind::kLiteral:
- case FeedbackSlotKind::kTypeProfile:
- break;
- case FeedbackSlotKind::kInvalid:
- case FeedbackSlotKind::kKindsNumber:
- UNREACHABLE();
- break;
- }
+ os << "\n Slot " << slot << " " << kind << " ";
+ FeedbackSlotPrint(os, slot, kind);
int entry_size = iter.entry_size();
for (int i = 0; i < entry_size; i++) {
@@ -820,6 +769,85 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void FeedbackVector::FeedbackSlotPrint(std::ostream& os,
+ FeedbackSlot slot) { // NOLINT
+ FeedbackSlotPrint(os, slot, GetKind(slot));
+}
+
+void FeedbackVector::FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot,
+ FeedbackSlotKind kind) { // NOLINT
+ switch (kind) {
+ case FeedbackSlotKind::kLoadProperty: {
+ LoadICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
+ LoadGlobalICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackSlotKind::kLoadKeyed: {
+ KeyedLoadICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackSlotKind::kCall: {
+ CallICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict: {
+ StoreICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict: {
+ KeyedStoreICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackSlotKind::kBinaryOp: {
+ BinaryOpICNexus nexus(this, slot);
+ os << "BinaryOp:" << nexus.GetBinaryOperationFeedback();
+ break;
+ }
+ case FeedbackSlotKind::kCompareOp: {
+ CompareICNexus nexus(this, slot);
+ os << "CompareOp:" << nexus.GetCompareOperationFeedback();
+ break;
+ }
+ case FeedbackSlotKind::kForIn: {
+ ForInICNexus nexus(this, slot);
+ os << "ForIn:" << nexus.GetForInFeedback();
+ break;
+ }
+ case FeedbackSlotKind::kInstanceOf: {
+ InstanceOfICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ StoreDataPropertyInLiteralICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kLiteral:
+ case FeedbackSlotKind::kTypeProfile:
+ break;
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ break;
+ }
+}
void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSValue");
@@ -915,8 +943,6 @@ void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
target()->ShortPrint(os);
os << "\n - handler = ";
handler()->ShortPrint(os);
- os << "\n - hash = ";
- hash()->ShortPrint(os);
os << "\n";
}
@@ -1062,8 +1088,19 @@ std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "Function");
- os << "\n - initial_map = ";
- if (has_initial_map()) os << Brief(initial_map());
+ os << "\n - function prototype = ";
+ if (has_prototype_slot()) {
+ if (has_prototype()) {
+ os << Brief(prototype());
+ if (map()->has_non_instance_prototype()) {
+ os << " (non-instance prototype)";
+ }
+ }
+ os << "\n - initial_map = ";
+ if (has_initial_map()) os << Brief(initial_map());
+ } else {
+ os << "<no-prototype-slot>";
+ }
os << "\n - shared_info = " << Brief(shared());
os << "\n - name = " << Brief(shared()->name());
os << "\n - formal_parameter_count = "
@@ -1077,6 +1114,14 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - bytecode = " << shared()->bytecode_array();
}
}
+ if (WasmExportedFunction::IsWasmExportedFunction(this)) {
+ WasmExportedFunction* function = WasmExportedFunction::cast(this);
+ os << "\n - WASM instance "
+ << reinterpret_cast<void*>(function->instance());
+ os << "\n context "
+ << reinterpret_cast<void*>(function->instance()->wasm_context()->get());
+ os << "\n - WASM function index " << function->function_index();
+ }
shared()->PrintSourceCode(os);
JSObjectPrintBody(os, this);
os << "\n - feedback vector: ";
@@ -1094,7 +1139,7 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
int start = start_position();
int length = end_position() - start;
std::unique_ptr<char[]> source_string = source->ToCString(
- DISALLOW_NULLS, FAST_STRING_TRAVERSAL, start, length, NULL);
+ DISALLOW_NULLS, FAST_STRING_TRAVERSAL, start, length, nullptr);
os << source_string.get();
}
}
@@ -1108,6 +1153,9 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "<no-shared-name>";
}
os << "\n - kind = " << kind();
+ if (needs_home_object()) {
+ os << "\n - needs_home_object";
+ }
os << "\n - function_map_index = " << function_map_index();
os << "\n - formal_parameter_count = " << internal_formal_parameter_count();
os << "\n - expected_nof_properties = " << expected_nof_properties();
@@ -1157,7 +1205,6 @@ void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
if (!GetIsolate()->bootstrapper()->IsActive()) {
os << "\n - native context = " << Brief(native_context());
}
- os << "\n - hash = " << Brief(hash());
JSObjectPrintBody(os, this);
}
@@ -1246,11 +1293,16 @@ void Code::CodePrint(std::ostream& os) { // NOLINT
os << "\n";
#ifdef ENABLE_DISASSEMBLER
if (FLAG_use_verbose_printer) {
- Disassemble(NULL, os);
+ Disassemble(nullptr, os);
}
#endif
}
+void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "CodeDataContainer");
+ os << "\n - kind_specific_flags: " << kind_specific_flags();
+ os << "\n";
+}
void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
os << "foreign address : " << reinterpret_cast<void*>(foreign_address());
@@ -1261,7 +1313,7 @@ void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AccessorInfo");
os << "\n - name: " << Brief(name());
- os << "\n - flag: " << flag();
+ os << "\n - flags: " << flags();
os << "\n - getter: " << Brief(getter());
os << "\n - setter: " << Brief(setter());
os << "\n - js_getter: " << Brief(js_getter());
@@ -1340,6 +1392,7 @@ void Module::ModulePrint(std::ostream& os) { // NOLINT
os << "\n - exports: " << Brief(exports());
os << "\n - requested_modules: " << Brief(requested_modules());
os << "\n - script: " << Brief(script());
+ os << "\n - import_meta: " << Brief(import_meta());
os << "\n - status: " << status();
os << "\n - exception: " << Brief(exception());
os << "\n";
@@ -1358,6 +1411,7 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
os << "\n - registry slot: " << registry_slot();
os << "\n - validity cell: " << Brief(validity_cell());
os << "\n - object create map: " << Brief(object_create_map());
+ os << "\n - should_be_fast_map: " << should_be_fast_map();
os << "\n";
}
@@ -1584,8 +1638,7 @@ void PreParsedScopeData::PreParsedScopeDataPrint(std::ostream& os) { // NOLINT
#endif // OBJECT_PRINT
-#if V8_TRACE_MAPS
-
+// TODO(cbruni): remove once the new maptracer is in place.
void Name::NameShortPrint() {
if (this->IsString()) {
PrintF("%s", String::cast(this)->ToCString().get());
@@ -1600,7 +1653,7 @@ void Name::NameShortPrint() {
}
}
-
+// TODO(cbruni): remove once the new maptracer is in place.
int Name::NameShortPrint(Vector<char> str) {
if (this->IsString()) {
return SNPrintF(str, "%s", String::cast(this)->ToCString().get());
@@ -1615,31 +1668,20 @@ int Name::NameShortPrint(Vector<char> str) {
}
}
-#endif // V8_TRACE_MAPS
-
-#if defined(DEBUG) || defined(OBJECT_PRINT)
-// This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-Latin1 characters
-// will be ignored in the output.
-char* String::ToAsciiArray() {
- // Static so that subsequent calls frees previously allocated space.
- // This also means that previous results will be overwritten.
- static char* buffer = NULL;
- if (buffer != NULL) delete[] buffer;
- buffer = new char[length() + 1];
- WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
- buffer[length()] = 0;
- return buffer;
-}
-
-
-void DescriptorArray::Print() {
- OFStream os(stdout);
- this->PrintDescriptors(os);
- os << std::flush;
+void Map::PrintMapDetails(std::ostream& os, JSObject* holder) {
+ DisallowHeapAllocation no_gc;
+#ifdef OBJECT_PRINT
+ this->MapPrint(os);
+#else
+ os << "Map=" << reinterpret_cast<void*>(this);
+#endif
+ os << "\n";
+ instance_descriptors()->PrintDescriptors(os);
+ if (is_dictionary_map() && holder != nullptr) {
+ os << holder->property_dictionary() << "\n";
+ }
}
-
void DescriptorArray::PrintDescriptors(std::ostream& os) { // NOLINT
HandleScope scope(GetIsolate());
os << "Descriptor array #" << number_of_descriptors() << ":";
@@ -1680,6 +1722,26 @@ void DescriptorArray::PrintDescriptorDetails(std::ostream& os, int descriptor,
}
}
+#if defined(DEBUG) || defined(OBJECT_PRINT)
+// This method is only meant to be called from gdb for debugging purposes.
+// Since the string can also be in two-byte encoding, non-Latin1 characters
+// will be ignored in the output.
+char* String::ToAsciiArray() {
+ // Static so that subsequent calls frees previously allocated space.
+ // This also means that previous results will be overwritten.
+ static char* buffer = nullptr;
+ if (buffer != nullptr) delete[] buffer;
+ buffer = new char[length() + 1];
+ WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
+ buffer[length()] = 0;
+ return buffer;
+}
+
+void DescriptorArray::Print() {
+ OFStream os(stdout);
+ this->PrintDescriptors(os);
+ os << std::flush;
+}
// static
void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name* key,
Map* target, Object* raw_target) {
@@ -1722,6 +1784,7 @@ void TransitionArray::Print() {
Print(os);
}
+// TODO(ishell): unify with TransitionArrayPrint().
void TransitionArray::Print(std::ostream& os) {
int num_transitions = number_of_transitions();
os << "Transition array #" << num_transitions << ":";
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index d46612f782..7b3c632a44 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -25,7 +25,6 @@
#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/compilation-dependencies.h"
#include "src/compiler.h"
#include "src/counters-inl.h"
@@ -54,6 +53,8 @@
#include "src/map-updater.h"
#include "src/messages.h"
#include "src/objects-body-descriptors-inl.h"
+#include "src/objects/bigint.h"
+#include "src/objects/code-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
@@ -69,6 +70,7 @@
#include "src/string-builder.h"
#include "src/string-search.h"
#include "src/string-stream.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache-inl.h"
#include "src/utils-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -83,6 +85,24 @@
namespace v8 {
namespace internal {
+bool ComparisonResultToBool(Operation op, ComparisonResult result) {
+ switch (op) {
+ case Operation::kLessThan:
+ return result == ComparisonResult::kLessThan;
+ case Operation::kLessThanOrEqual:
+ return result == ComparisonResult::kLessThan ||
+ result == ComparisonResult::kEqual;
+ case Operation::kGreaterThan:
+ return result == ComparisonResult::kGreaterThan;
+ case Operation::kGreaterThanOrEqual:
+ return result == ComparisonResult::kGreaterThan ||
+ result == ComparisonResult::kEqual;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
std::ostream& operator<<(std::ostream& os, InstanceType instance_type) {
switch (instance_type) {
#define WRITE_TYPE(TYPE) \
@@ -155,8 +175,9 @@ MaybeHandle<JSReceiver> Object::ConvertReceiver(Isolate* isolate,
}
// static
-MaybeHandle<Object> Object::ConvertToNumber(Isolate* isolate,
- Handle<Object> input) {
+MaybeHandle<Object> Object::ConvertToNumberOrNumeric(Isolate* isolate,
+ Handle<Object> input,
+ Conversion mode) {
while (true) {
if (input->IsNumber()) {
return input;
@@ -171,6 +192,12 @@ MaybeHandle<Object> Object::ConvertToNumber(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToNumber),
Object);
}
+ if (input->IsBigInt()) {
+ if (mode == Conversion::kToNumeric) return input;
+ DCHECK_EQ(mode, Conversion::kToNumber);
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kBigIntToNumber),
+ Object);
+ }
ASSIGN_RETURN_ON_EXCEPTION(
isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
ToPrimitiveHint::kNumber),
@@ -181,8 +208,9 @@ MaybeHandle<Object> Object::ConvertToNumber(Isolate* isolate,
// static
MaybeHandle<Object> Object::ConvertToInteger(Isolate* isolate,
Handle<Object> input) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber), Object);
if (input->IsSmi()) return input;
return isolate->factory()->NewNumber(DoubleToInteger(input->Number()));
}
@@ -190,8 +218,9 @@ MaybeHandle<Object> Object::ConvertToInteger(Isolate* isolate,
// static
MaybeHandle<Object> Object::ConvertToInt32(Isolate* isolate,
Handle<Object> input) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber), Object);
if (input->IsSmi()) return input;
return isolate->factory()->NewNumberFromInt(DoubleToInt32(input->Number()));
}
@@ -199,8 +228,9 @@ MaybeHandle<Object> Object::ConvertToInt32(Isolate* isolate,
// static
MaybeHandle<Object> Object::ConvertToUint32(Isolate* isolate,
Handle<Object> input) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber), Object);
if (input->IsSmi()) return handle(Smi::cast(*input)->ToUint32Smi(), isolate);
return isolate->factory()->NewNumberFromUint(DoubleToUint32(input->Number()));
}
@@ -313,7 +343,7 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
Handle<Object> input) {
DisallowJavascriptExecution no_js(isolate);
- if (input->IsString() || input->IsNumber() || input->IsOddball()) {
+ if (input->IsString() || input->IsNumeric() || input->IsOddball()) {
return Object::ToString(isolate, input).ToHandleChecked();
} else if (input->IsFunction()) {
// -- F u n c t i o n
@@ -459,6 +489,7 @@ bool Object::BooleanValue() {
if (IsUndetectable()) return false; // Undetectable object is false.
if (IsString()) return String::cast(this)->length() != 0;
if (IsHeapNumber()) return DoubleToBoolean(HeapNumber::cast(this)->value());
+ if (IsBigInt()) return BigInt::cast(this)->ToBoolean();
return true;
}
@@ -479,7 +510,6 @@ ComparisonResult NumberCompare(double x, double y) {
}
}
-
bool NumberEquals(double x, double y) {
// Must check explicitly for NaN's on Windows, but -0 works fine.
if (std::isnan(x)) return false;
@@ -487,18 +517,25 @@ bool NumberEquals(double x, double y) {
return x == y;
}
-
bool NumberEquals(const Object* x, const Object* y) {
return NumberEquals(x->Number(), y->Number());
}
-
bool NumberEquals(Handle<Object> x, Handle<Object> y) {
return NumberEquals(*x, *y);
}
-} // namespace
+ComparisonResult Reverse(ComparisonResult result) {
+ if (result == ComparisonResult::kLessThan) {
+ return ComparisonResult::kGreaterThan;
+ }
+ if (result == ComparisonResult::kGreaterThan) {
+ return ComparisonResult::kLessThan;
+ }
+ return result;
+}
+} // anonymous namespace
// static
Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y) {
@@ -513,19 +550,30 @@ Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y) {
String::Compare(Handle<String>::cast(x), Handle<String>::cast(y)));
}
// ES6 section 7.2.11 Abstract Relational Comparison step 6.
- if (!Object::ToNumber(x).ToHandle(&x) || !Object::ToNumber(y).ToHandle(&y)) {
+ if (!Object::ToNumeric(x).ToHandle(&x) ||
+ !Object::ToNumeric(y).ToHandle(&y)) {
return Nothing<ComparisonResult>();
}
- return Just(NumberCompare(x->Number(), y->Number()));
+
+ bool x_is_number = x->IsNumber();
+ bool y_is_number = y->IsNumber();
+ if (x_is_number && y_is_number) {
+ return Just(NumberCompare(x->Number(), y->Number()));
+ } else if (!x_is_number && !y_is_number) {
+ return Just(BigInt::CompareToBigInt(Handle<BigInt>::cast(x),
+ Handle<BigInt>::cast(y)));
+ } else if (x_is_number) {
+ return Just(Reverse(BigInt::CompareToNumber(Handle<BigInt>::cast(y), x)));
+ } else {
+ return Just(BigInt::CompareToNumber(Handle<BigInt>::cast(x), y));
+ }
}
// static
Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
- // This is the generic version of Abstract Equality Comparison; a version in
- // JavaScript land is available in the EqualStub and NotEqualStub. Whenever
- // you change something functionality wise in here, remember to update the
- // TurboFan code stubs as well.
+ // This is the generic version of Abstract Equality Comparison. Must be in
+ // sync with CodeStubAssembler::Equal.
while (true) {
if (x->IsNumber()) {
if (y->IsNumber()) {
@@ -534,6 +582,8 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
} else if (y->IsString()) {
return Just(NumberEquals(x, String::ToNumber(Handle<String>::cast(y))));
+ } else if (y->IsBigInt()) {
+ return Just(BigInt::EqualToNumber(Handle<BigInt>::cast(y), x));
} else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
@@ -552,6 +602,9 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else if (y->IsBoolean()) {
x = String::ToNumber(Handle<String>::cast(x));
return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
+ } else if (y->IsBigInt()) {
+ return Just(BigInt::EqualToString(Handle<BigInt>::cast(y),
+ Handle<String>::cast(x)));
} else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
@@ -568,6 +621,9 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else if (y->IsString()) {
y = String::ToNumber(Handle<String>::cast(y));
return Just(NumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
+ } else if (y->IsBigInt()) {
+ x = Oddball::ToNumber(Handle<Oddball>::cast(x));
+ return Just(BigInt::EqualToNumber(Handle<BigInt>::cast(y), x));
} else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
@@ -588,6 +644,11 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else {
return Just(false);
}
+ } else if (x->IsBigInt()) {
+ if (y->IsBigInt()) {
+ return Just(BigInt::EqualToBigInt(BigInt::cast(*x), BigInt::cast(*y)));
+ }
+ return Equals(y, x);
} else if (x->IsJSReceiver()) {
if (y->IsJSReceiver()) {
return Just(x.is_identical_to(y));
@@ -613,6 +674,9 @@ bool Object::StrictEquals(Object* that) {
} else if (this->IsString()) {
if (!that->IsString()) return false;
return String::cast(this)->Equals(String::cast(that));
+ } else if (this->IsBigInt()) {
+ if (!that->IsBigInt()) return false;
+ return BigInt::EqualToBigInt(BigInt::cast(this), BigInt::cast(that));
}
return this == that;
}
@@ -627,7 +691,7 @@ Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
}
if (object->IsString()) return isolate->factory()->string_string();
if (object->IsSymbol()) return isolate->factory()->symbol_string();
- if (object->IsString()) return isolate->factory()->string_string();
+ if (object->IsBigInt()) return isolate->factory()->bigint_string();
if (object->IsCallable()) return isolate->factory()->function_string();
return isolate->factory()->object_string();
}
@@ -662,7 +726,7 @@ MaybeHandle<Object> Object::Modulus(Isolate* isolate, Handle<Object> lhs,
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
- return isolate->factory()->NewNumber(modulo(lhs->Number(), rhs->Number()));
+ return isolate->factory()->NewNumber(Modulo(lhs->Number(), rhs->Number()));
}
@@ -1258,15 +1322,25 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
: isolate->factory()->empty_string();
}
Handle<Code> code = BUILTIN_CODE(isolate, HandleApiCall);
- bool is_constructor = !info->remove_prototype();
+ bool is_constructor;
+ FunctionKind function_kind;
+ if (!info->remove_prototype()) {
+ is_constructor = true;
+ function_kind = kNormalFunction;
+ } else {
+ is_constructor = false;
+ function_kind = kConciseMethod;
+ }
Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
- name_string, code, is_constructor);
+ name_string, code, is_constructor, function_kind);
if (is_constructor) {
result->SetConstructStub(*BUILTIN_CODE(isolate, JSConstructStubApi));
}
result->set_length(info->length());
- if (class_name->IsString()) result->set_instance_class_name(*class_name);
+ if (class_name->IsString()) {
+ result->set_instance_class_name(String::cast(*class_name));
+ }
result->set_api_func_data(*info);
result->DontAdaptArguments();
DCHECK(result->IsApiFunction());
@@ -1365,7 +1439,8 @@ void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
isolate->counters()->cow_arrays_converted()->Increment();
}
-int JSObject::GetHeaderSize(InstanceType type) {
+int JSObject::GetHeaderSize(InstanceType type,
+ bool function_has_prototype_slot) {
switch (type) {
case JS_OBJECT_TYPE:
case JS_API_OBJECT_TYPE:
@@ -1382,7 +1457,8 @@ int JSObject::GetHeaderSize(InstanceType type) {
case JS_BOUND_FUNCTION_TYPE:
return JSBoundFunction::kSize;
case JS_FUNCTION_TYPE:
- return JSFunction::kSize;
+ return function_has_prototype_slot ? JSFunction::kSizeWithPrototype
+ : JSFunction::kSizeWithoutPrototype;
case JS_VALUE_TYPE:
return JSValue::kSize;
case JS_DATE_TYPE:
@@ -1540,7 +1616,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
}
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
- Object::DONT_THROW);
+ kDontThrow);
Handle<Object> result = args.Call(call_fun, name);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.is_null()) return isolate->factory()->undefined_value();
@@ -1591,6 +1667,13 @@ Address AccessorInfo::redirected_getter() const {
return redirect(GetIsolate(), accessor, ACCESSOR_GETTER);
}
+Address CallHandlerInfo::redirected_callback() const {
+ Address address = v8::ToCData<Address>(callback());
+ ApiFunction fun(address);
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+ return ExternalReference(&fun, type, GetIsolate()).address();
+}
+
bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
Handle<AccessorInfo> info,
Handle<Map> map) {
@@ -1657,7 +1740,7 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
// (signalling an exception) or a boolean Oddball.
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
if (result.is_null()) return Just(true);
- DCHECK(result->BooleanValue() || should_throw == DONT_THROW);
+ DCHECK(result->BooleanValue() || should_throw == kDontThrow);
return Just(result->BooleanValue());
}
@@ -1705,7 +1788,7 @@ MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
return MaybeHandle<Object>();
}
- return Execution::Call(isolate, getter, receiver, 0, NULL);
+ return Execution::Call(isolate, getter, receiver, 0, nullptr);
}
@@ -1767,7 +1850,7 @@ MaybeHandle<Object> GetPropertyWithInterceptorInternal(
isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
}
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, Object::DONT_THROW);
+ *holder, kDontThrow);
if (it->IsElement()) {
uint32_t index = it->index();
@@ -1811,7 +1894,7 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
Nothing<PropertyAttributes>());
}
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, Object::DONT_THROW);
+ *holder, kDontThrow);
if (!interceptor->query()->IsUndefined(isolate)) {
Handle<Object> result;
if (it->IsElement()) {
@@ -1857,7 +1940,7 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
Maybe<bool> SetPropertyWithInterceptorInternal(
LookupIterator* it, Handle<InterceptorInfo> interceptor,
- Object::ShouldThrow should_throw, Handle<Object> value) {
+ ShouldThrow should_throw, Handle<Object> value) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -1901,7 +1984,7 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
Maybe<bool> DefinePropertyWithInterceptorInternal(
LookupIterator* it, Handle<InterceptorInfo> interceptor,
- Object::ShouldThrow should_throw, PropertyDescriptor& desc) {
+ ShouldThrow should_throw, PropertyDescriptor& desc) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -2109,7 +2192,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
} else {
PropertyDetails original_details = dictionary->DetailsAt(entry);
int enumeration_index = original_details.dictionary_index();
- DCHECK(enumeration_index > 0);
+ DCHECK_GT(enumeration_index, 0);
details = details.set_index(enumeration_index);
dictionary->SetEntry(entry, *name, *value, details);
}
@@ -2214,8 +2297,9 @@ MUST_USE_RESULT Maybe<bool> FastAssign(
if (use_set) {
LookupIterator it(target, next_key, target);
- Maybe<bool> result = Object::SetProperty(
- &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ Maybe<bool> result =
+ Object::SetProperty(&it, prop_value, LanguageMode::kStrict,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED);
if (result.IsNothing()) return result;
if (stable) stable = from->map() == *map;
} else {
@@ -2229,9 +2313,8 @@ MUST_USE_RESULT Maybe<bool> FastAssign(
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, target, next_key, &success, LookupIterator::OWN);
CHECK(success);
- CHECK(
- JSObject::CreateDataProperty(&it, prop_value, Object::THROW_ON_ERROR)
- .FromJust());
+ CHECK(JSObject::CreateDataProperty(&it, prop_value, kThrowOnError)
+ .FromJust());
}
}
@@ -2277,8 +2360,9 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
// 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
Handle<Object> status;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, status, Runtime::SetObjectProperty(
- isolate, target, next_key, prop_value, STRICT),
+ isolate, status,
+ Runtime::SetObjectProperty(isolate, target, next_key, prop_value,
+ LanguageMode::kStrict),
Nothing<bool>());
} else {
if (excluded_properties != nullptr &&
@@ -2291,8 +2375,7 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, target, next_key, &success, LookupIterator::OWN);
CHECK(success);
- CHECK(JSObject::CreateDataProperty(&it, prop_value,
- Object::THROW_ON_ERROR)
+ CHECK(JSObject::CreateDataProperty(&it, prop_value, kThrowOnError)
.FromJust());
}
}
@@ -2341,11 +2424,12 @@ Object* GetSimpleHash(Object* object) {
if (object->IsHeapNumber()) {
double num = HeapNumber::cast(object)->value();
if (std::isnan(num)) return Smi::FromInt(Smi::kMaxValue);
- if (i::IsMinusZero(num)) num = 0;
- if (IsSmiDouble(num)) {
- return Smi::FromInt(FastD2I(num))->GetHash();
- }
- uint32_t hash = ComputeLongHash(double_to_uint64(num));
+ // Use ComputeIntegerHash for all values in Signed32 range, including -0,
+ // which is considered equal to 0 because collections use SameValueZero.
+ int32_t inum = FastD2I(num);
+ uint32_t hash = (FastI2D(inum) == num)
+ ? ComputeIntegerHash(inum)
+ : ComputeLongHash(double_to_uint64(num));
return Smi::FromInt(hash & Smi::kMaxValue);
}
if (object->IsName()) {
@@ -2396,8 +2480,6 @@ Smi* Object::GetOrCreateHash(Isolate* isolate) {
bool Object::SameValue(Object* other) {
if (other == this) return true;
- // The object is either a number, a name, an odd-ball,
- // a real JS object, or a Harmony proxy.
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
double other_value = other->Number();
@@ -2412,7 +2494,7 @@ bool Object::SameValue(Object* other) {
return String::cast(this)->Equals(String::cast(other));
}
if (IsBigInt() && other->IsBigInt()) {
- return BigInt::Equal(BigInt::cast(this), BigInt::cast(other));
+ return BigInt::EqualToBigInt(BigInt::cast(this), BigInt::cast(other));
}
return false;
}
@@ -2421,8 +2503,6 @@ bool Object::SameValue(Object* other) {
bool Object::SameValueZero(Object* other) {
if (other == this) return true;
- // The object is either a number, a name, an odd-ball,
- // a real JS object, or a Harmony proxy.
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
double other_value = other->Number();
@@ -2433,6 +2513,9 @@ bool Object::SameValueZero(Object* other) {
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
}
+ if (IsBigInt() && other->IsBigInt()) {
+ return BigInt::EqualToBigInt(BigInt::cast(this), BigInt::cast(other));
+ }
return false;
}
@@ -2530,6 +2613,10 @@ bool Object::IterationHasObservableEffects() {
JSArray* array = JSArray::cast(this);
Isolate* isolate = array->GetIsolate();
+#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+ if (isolate->force_slow_path()) return true;
+#endif
+
// Check that we have the original ArrayPrototype.
if (!array->map()->prototype()->IsJSObject()) return true;
JSObject* array_proto = JSObject::cast(array->map()->prototype());
@@ -2554,7 +2641,7 @@ bool Object::IterationHasObservableEffects() {
// the prototype. This could have different results if the prototype has been
// changed.
if (IsHoleyElementsKind(array_kind) &&
- isolate->IsFastArrayConstructorPrototypeChainIntact()) {
+ isolate->IsNoElementsProtectorIntact()) {
return false;
}
return true;
@@ -2593,7 +2680,7 @@ void Smi::SmiPrint(std::ostream& os) const { // NOLINT
Handle<String> String::SlowFlatten(Handle<ConsString> cons,
PretenureFlag pretenure) {
- DCHECK(cons->second()->length() != 0);
+ DCHECK_NE(cons->second()->length(), 0);
// TurboFan can create cons strings with empty first parts.
while (cons->first()->length() == 0) {
@@ -2646,9 +2733,8 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
DCHECK(static_cast<size_t>(this->length()) == resource->length());
ScopedVector<uc16> smart_chars(this->length());
String::WriteToFlat(this, smart_chars.start(), 0, this->length());
- DCHECK(memcmp(smart_chars.start(),
- resource->data(),
- resource->length() * sizeof(smart_chars[0])) == 0);
+ DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
+ resource->length() * sizeof(smart_chars[0])));
}
#endif // DEBUG
int size = this->Size(); // Byte size of the original string.
@@ -2720,9 +2806,8 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
}
ScopedVector<char> smart_chars(this->length());
String::WriteToFlat(this, smart_chars.start(), 0, this->length());
- DCHECK(memcmp(smart_chars.start(),
- resource->data(),
- resource->length() * sizeof(smart_chars[0])) == 0);
+ DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
+ resource->length() * sizeof(smart_chars[0])));
}
#endif // DEBUG
int size = this->Size(); // Byte size of the original string.
@@ -3063,6 +3148,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
+ case DESCRIPTOR_ARRAY_TYPE:
return kVisitFixedArray;
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -3120,6 +3206,9 @@ VisitorId Map::GetVisitorId(Map* map) {
case SMALL_ORDERED_HASH_SET_TYPE:
return kVisitSmallOrderedHashSet;
+ case CODE_DATA_CONTAINER_TYPE:
+ return kVisitCodeDataContainer;
+
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
@@ -3341,6 +3430,10 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case BYTECODE_ARRAY_TYPE:
os << "<BytecodeArray[" << BytecodeArray::cast(this)->length() << "]>";
break;
+ case DESCRIPTOR_ARRAY_TYPE:
+ os << "<DescriptorArray[" << DescriptorArray::cast(this)->length()
+ << "]>";
+ break;
case TRANSITION_ARRAY_TYPE:
os << "<TransitionArray[" << TransitionArray::cast(this)->length()
<< "]>";
@@ -3386,7 +3479,11 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
#undef MAKE_STRUCT_CASE
case CODE_TYPE: {
Code* code = Code::cast(this);
- os << "<Code " << Code::Kind2String(code->kind()) << ">";
+ os << "<Code " << Code::Kind2String(code->kind());
+ if (code->is_stub()) {
+ os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
+ }
+ os << ">";
break;
}
case ODDBALL_TYPE: {
@@ -3549,6 +3646,61 @@ String* JSReceiver::class_name() {
return GetHeap()->Object_string();
}
+bool HeapObject::CanBeRehashed() const {
+ DCHECK(NeedsRehashing());
+ switch (map()->instance_type()) {
+ case HASH_TABLE_TYPE:
+ // TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
+ return IsNameDictionary() || IsGlobalDictionary() ||
+ IsNumberDictionary() || IsStringTable() || IsWeakHashTable();
+ case DESCRIPTOR_ARRAY_TYPE:
+ return true;
+ case TRANSITION_ARRAY_TYPE:
+ return true;
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ return SmallOrderedHashMap::cast(this)->NumberOfElements() == 0;
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ return SmallOrderedHashMap::cast(this)->NumberOfElements() == 0;
+ default:
+ return false;
+ }
+ return false;
+}
+
+void HeapObject::RehashBasedOnMap() {
+ switch (map()->instance_type()) {
+ case HASH_TABLE_TYPE:
+ if (IsNameDictionary()) {
+ NameDictionary::cast(this)->Rehash();
+ } else if (IsNumberDictionary()) {
+ NumberDictionary::cast(this)->Rehash();
+ } else if (IsGlobalDictionary()) {
+ GlobalDictionary::cast(this)->Rehash();
+ } else if (IsStringTable()) {
+ StringTable::cast(this)->Rehash();
+ } else if (IsWeakHashTable()) {
+ WeakHashTable::cast(this)->Rehash();
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case DESCRIPTOR_ARRAY_TYPE:
+ DCHECK_LE(1, DescriptorArray::cast(this)->number_of_descriptors());
+ DescriptorArray::cast(this)->Sort();
+ break;
+ case TRANSITION_ARRAY_TYPE:
+ TransitionArray::cast(this)->Sort();
+ break;
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ DCHECK_EQ(0, SmallOrderedHashMap::cast(this)->NumberOfElements());
+ break;
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ DCHECK_EQ(0, SmallOrderedHashSet::cast(this)->NumberOfElements());
+ break;
+ default:
+ break;
+ }
+}
// static
Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
@@ -3563,8 +3715,7 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Object* maybe_constructor = receiver->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(maybe_constructor);
- String* name = constructor->shared()->name();
- if (name->length() == 0) name = constructor->shared()->inferred_name();
+ String* name = constructor->shared()->DebugName();
if (name->length() != 0 &&
!name->Equals(isolate->heap()->Object_string())) {
return handle(name, isolate);
@@ -3591,8 +3742,7 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Handle<String> result = isolate->factory()->Object_string();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(*maybe_constructor);
- String* name = constructor->shared()->name();
- if (name->length() == 0) name = constructor->shared()->inferred_name();
+ String* name = constructor->shared()->DebugName();
if (name->length() > 0) result = handle(name, isolate);
}
@@ -3603,9 +3753,6 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Handle<Context> JSReceiver::GetCreationContext() {
JSReceiver* receiver = this;
- while (receiver->IsJSBoundFunction()) {
- receiver = JSBoundFunction::cast(receiver)->bound_target_function();
- }
// Externals are JSObjects with null as a constructor.
DCHECK(!receiver->IsExternal());
Object* constructor = receiver->map()->GetConstructor();
@@ -3678,11 +3825,7 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
Descriptor d = Descriptor::DataField(name, index, attributes, constness,
representation, wrapped_type);
Handle<Map> new_map = Map::CopyAddDescriptor(map, &d, flag);
- int unused_property_fields = new_map->unused_property_fields() - 1;
- if (unused_property_fields < 0) {
- unused_property_fields += JSObject::kFieldsAdded;
- }
- new_map->set_unused_property_fields(unused_property_fields);
+ new_map->AccountAddedPropertyField();
return new_map;
}
@@ -3756,7 +3899,7 @@ bool Map::TransitionRequiresSynchronizationWithGC(Map* target) const {
bool Map::InstancesNeedRewriting(Map* target) const {
int target_number_of_fields = target->NumberOfFields();
int target_inobject = target->GetInObjectProperties();
- int target_unused = target->unused_property_fields();
+ int target_unused = target->UnusedPropertyFields();
int old_number_of_fields;
return InstancesNeedRewriting(target, target_number_of_fields,
@@ -3856,7 +3999,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
PropertyDetails details = new_map->GetLastDescriptorDetails();
int target_index = details.field_index() - new_map->GetInObjectProperties();
int property_array_length = object->property_array()->length();
- bool have_space = old_map->unused_property_fields() > 0 ||
+ bool have_space = old_map->UnusedPropertyFields() > 0 ||
(details.location() == kField && target_index >= 0 &&
property_array_length > target_index);
// Either new_map adds an kDescriptor property, or a kField property for
@@ -3884,7 +4027,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// This migration is a transition from a map that has run out of property
// space. Extend the backing store.
- int grow_by = new_map->unused_property_fields() + 1;
+ int grow_by = new_map->UnusedPropertyFields() + 1;
Handle<PropertyArray> old_storage(object->property_array());
Handle<PropertyArray> new_storage =
isolate->factory()->CopyPropertyArrayAndGrow(old_storage, grow_by);
@@ -3898,7 +4041,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
DCHECK_EQ(kField, details.location());
DCHECK_EQ(kData, details.kind());
- DCHECK(target_index >= 0); // Must be a backing store index.
+ DCHECK_GE(target_index, 0); // Must be a backing store index.
new_storage->set(target_index, *value);
// From here on we cannot fail and we shouldn't GC anymore.
@@ -3913,7 +4056,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
int old_number_of_fields;
int number_of_fields = new_map->NumberOfFields();
int inobject = new_map->GetInObjectProperties();
- int unused = new_map->unused_property_fields();
+ int unused = new_map->UnusedPropertyFields();
// Nothing to do if no functions were converted to fields and no smis were
// converted to doubles.
@@ -4050,7 +4193,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// Create filler object past the new instance size.
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size;
- DCHECK(instance_size_delta >= 0);
+ DCHECK_GE(instance_size_delta, 0);
if (instance_size_delta > 0) {
Address address = object->address();
@@ -4133,7 +4276,7 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size;
- DCHECK(instance_size_delta >= 0);
+ DCHECK_GE(instance_size_delta, 0);
if (instance_size_delta > 0) {
heap->CreateFillerObjectAt(object->address() + new_instance_size,
@@ -4298,11 +4441,7 @@ Handle<Map> Map::CopyGeneralizeAllFields(Handle<Map> map,
attributes, Representation::Tagged());
descriptors->Replace(modify_index, &d);
if (details.location() != kField) {
- int unused_property_fields = new_map->unused_property_fields() - 1;
- if (unused_property_fields < 0) {
- unused_property_fields += JSObject::kFieldsAdded;
- }
- new_map->set_unused_property_fields(unused_property_fields);
+ new_map->AccountAddedPropertyField();
}
} else {
DCHECK(details.attributes() == attributes);
@@ -4598,7 +4737,7 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
if (from_kind != to_kind) {
// Try to follow existing elements kind transitions.
root_map = root_map->LookupElementsTransitionMap(to_kind);
- if (root_map == NULL) return MaybeHandle<Map>();
+ if (root_map == nullptr) return MaybeHandle<Map>();
// From here on, use the map with correct elements kind as root map.
}
Map* new_map = root_map->TryReplayPropertyTransitions(*old_map);
@@ -4715,7 +4854,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
it->UpdateProtector();
DCHECK(it->IsFound());
ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -4745,7 +4884,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
} else {
Maybe<PropertyAttributes> maybe_attributes =
JSObject::GetPropertyAttributesWithInterceptor(it);
- if (!maybe_attributes.IsJust()) return Nothing<bool>();
+ if (maybe_attributes.IsNothing()) return Nothing<bool>();
if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
return WriteToReadOnlyProperty(it, value, should_throw);
}
@@ -4813,7 +4952,7 @@ Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
}
ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
return AddDataProperty(it, value, NONE, should_throw, store_mode);
}
@@ -4836,7 +4975,7 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
// property.
ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
if (!it->GetReceiver()->IsJSReceiver()) {
return WriteToReadOnlyProperty(it, value, should_throw);
@@ -5136,31 +5275,37 @@ Handle<Map> Map::GetObjectCreateMap(Handle<HeapObject> prototype) {
return Map::TransitionToPrototype(map, prototype);
}
+// static
+MaybeHandle<Map> Map::TryGetObjectCreateMap(Handle<HeapObject> prototype) {
+ Isolate* isolate = prototype->GetIsolate();
+ Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ isolate);
+ if (map->prototype() == *prototype) return map;
+ if (prototype->IsNull(isolate)) {
+ return isolate->slow_object_with_null_prototype_map();
+ }
+ if (!prototype->IsJSObject()) return MaybeHandle<Map>();
+ Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+ if (!js_prototype->map()->is_prototype_map()) return MaybeHandle<Map>();
+ Handle<PrototypeInfo> info =
+ Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+ if (!info->HasObjectCreateMap()) return MaybeHandle<Map>();
+ return handle(info->ObjectCreateMap(), isolate);
+}
+
template <class T>
static int AppendUniqueCallbacks(Handle<TemplateList> callbacks,
Handle<typename T::Array> array,
int valid_descriptors) {
int nof_callbacks = callbacks->length();
- Isolate* isolate = array->GetIsolate();
- // Ensure the keys are unique names before writing them into the
- // instance descriptor. Since it may cause a GC, it has to be done before we
- // temporarily put the heap in an invalid state while appending descriptors.
- for (int i = 0; i < nof_callbacks; ++i) {
- Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
- if (entry->name()->IsUniqueName()) continue;
- Handle<String> key =
- isolate->factory()->InternalizeString(
- Handle<String>(String::cast(entry->name())));
- entry->set_name(*key);
- }
-
// Fill in new callback descriptors. Process the callbacks from
// back to front so that the last callback with a given name takes
// precedence over previously added callbacks with that name.
for (int i = nof_callbacks - 1; i >= 0; i--) {
Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
Handle<Name> key(Name::cast(entry->name()));
+ DCHECK(key->IsUniqueName());
// Check if a descriptor with this name already exists before writing.
if (!T::Contains(key, entry, valid_descriptors, array)) {
T::Insert(key, entry, valid_descriptors, array);
@@ -5171,27 +5316,6 @@ static int AppendUniqueCallbacks(Handle<TemplateList> callbacks,
return valid_descriptors;
}
-struct DescriptorArrayAppender {
- typedef DescriptorArray Array;
- static bool Contains(Handle<Name> key,
- Handle<AccessorInfo> entry,
- int valid_descriptors,
- Handle<DescriptorArray> array) {
- DisallowHeapAllocation no_gc;
- return array->Search(*key, valid_descriptors) != DescriptorArray::kNotFound;
- }
- static void Insert(Handle<Name> key,
- Handle<AccessorInfo> entry,
- int valid_descriptors,
- Handle<DescriptorArray> array) {
- DisallowHeapAllocation no_gc;
- Descriptor d =
- Descriptor::AccessorConstant(key, entry, entry->property_attributes());
- array->Append(&d);
- }
-};
-
-
struct FixedArrayAppender {
typedef FixedArray Array;
static bool Contains(Handle<Name> key,
@@ -5213,17 +5337,6 @@ struct FixedArrayAppender {
};
-void Map::AppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors) {
- int nof = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> array(map->instance_descriptors());
- Handle<TemplateList> callbacks = Handle<TemplateList>::cast(descriptors);
- DCHECK_GE(array->NumberOfSlackDescriptors(), callbacks->length());
- nof = AppendUniqueCallbacks<DescriptorArrayAppender>(callbacks, array, nof);
- map->SetNumberOfOwnDescriptors(nof);
-}
-
-
int AccessorInfo::AppendUnique(Handle<Object> descriptors,
Handle<FixedArray> array,
int valid_descriptors) {
@@ -5541,7 +5654,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
Factory* factory = isolate->factory();
Handle<String> trap_name = factory->set_string();
ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
if (proxy->IsRevoked()) {
isolate->Throw(
@@ -5588,7 +5701,7 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
LanguageMode language_mode) {
DCHECK(!name->IsPrivate());
ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
Isolate* isolate = proxy->GetIsolate();
STACK_CHECK(isolate, Nothing<bool>());
Factory* factory = isolate->factory();
@@ -5824,7 +5937,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
}
int number_of_fields = map->NumberOfFields();
int inobject = map->GetInObjectProperties();
- int unused = map->unused_property_fields();
+ int unused = map->UnusedPropertyFields();
int total_size = number_of_fields + unused;
int external = total_size - inobject;
// Allocate mutable double boxes if necessary. It is always necessary if we
@@ -5922,7 +6035,7 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
DCHECK(!it.IsFound());
DCHECK(object->map()->is_extensible() || name->IsPrivate());
#endif
- CHECK(AddDataProperty(&it, value, attributes, THROW_ON_ERROR,
+ CHECK(AddDataProperty(&it, value, attributes, kThrowOnError,
CERTAINLY_NOT_STORE_FROM_KEYED)
.IsJust());
}
@@ -5935,8 +6048,8 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
AccessorInfoHandling handling) {
- MAYBE_RETURN_NULL(DefineOwnPropertyIgnoreAttributes(
- it, value, attributes, THROW_ON_ERROR, handling));
+ MAYBE_RETURN_NULL(DefineOwnPropertyIgnoreAttributes(it, value, attributes,
+ kThrowOnError, handling));
return value;
}
@@ -6071,7 +6184,7 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
case LookupIterator::INTERCEPTOR: {
Maybe<PropertyAttributes> result =
JSObject::GetPropertyAttributesWithInterceptor(it);
- if (!result.IsJust()) return result;
+ if (result.IsNothing()) return result;
if (result.FromJust() != ABSENT) return result;
break;
}
@@ -6100,19 +6213,23 @@ MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
PropertyNormalizationMode mode) {
DisallowHeapAllocation no_gc;
Object* value = FixedArray::get(GetIndex(fast_map));
- if (!value->IsMap() ||
- !Map::cast(value)->EquivalentToForNormalization(*fast_map, mode)) {
+ if (!value->IsWeakCell() || WeakCell::cast(value)->cleared()) {
return MaybeHandle<Map>();
}
- return handle(Map::cast(value));
-}
+ Map* normalized_map = Map::cast(WeakCell::cast(value)->value());
+ if (!normalized_map->EquivalentToForNormalization(*fast_map, mode)) {
+ return MaybeHandle<Map>();
+ }
+ return handle(normalized_map);
+}
-void NormalizedMapCache::Set(Handle<Map> fast_map,
- Handle<Map> normalized_map) {
+void NormalizedMapCache::Set(Handle<Map> fast_map, Handle<Map> normalized_map,
+ Handle<WeakCell> normalized_map_weak_cell) {
DisallowHeapAllocation no_gc;
DCHECK(normalized_map->is_dictionary_map());
- FixedArray::set(GetIndex(fast_map), *normalized_map);
+ DCHECK_EQ(normalized_map_weak_cell->value(), *normalized_map);
+ FixedArray::set(GetIndex(fast_map), *normalized_map_weak_cell);
}
@@ -6187,19 +6304,15 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
NotifyMapChange(old_map, new_map, isolate);
-#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
- PrintF("[TraceMaps: SlowToFast from= %p to= %p reason= %s ]\n",
- reinterpret_cast<void*>(*old_map), reinterpret_cast<void*>(*new_map),
- reason);
+ LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
}
-#endif
if (instance_descriptor_length == 0) {
DisallowHeapAllocation no_gc;
DCHECK_LE(unused_property_fields, inobject_props);
// Transform the object.
- new_map->set_unused_property_fields(inobject_props);
+ new_map->SetInObjectUnusedPropertyFields(inobject_props);
object->synchronized_set_map(*new_map);
object->SetProperties(isolate->heap()->empty_fixed_array());
// Check that it really works.
@@ -6291,7 +6404,11 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
DisallowHeapAllocation no_gc;
new_map->InitializeDescriptors(*descriptors, *layout_descriptor);
- new_map->set_unused_property_fields(unused_property_fields);
+ if (number_of_allocated_fields == 0) {
+ new_map->SetInObjectUnusedPropertyFields(unused_property_fields);
+ } else {
+ new_map->SetOutOfObjectUnusedPropertyFields(unused_property_fields);
+ }
// Transform the object.
object->synchronized_set_map(*new_map);
@@ -6303,7 +6420,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
DCHECK(object->HasFastProperties());
}
-void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
+void JSObject::RequireSlowElements(NumberDictionary* dictionary) {
if (dictionary->requires_slow_elements()) return;
dictionary->set_requires_slow_elements();
if (map()->is_prototype_map()) {
@@ -6313,9 +6430,7 @@ void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
}
}
-
-Handle<SeededNumberDictionary> JSObject::NormalizeElements(
- Handle<JSObject> object) {
+Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
DCHECK(!object->HasFixedTypedArrayElements());
Isolate* isolate = object->GetIsolate();
bool is_sloppy_arguments = object->HasSloppyArgumentsElements();
@@ -6328,7 +6443,7 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
}
if (elements->IsDictionary()) {
- return handle(SeededNumberDictionary::cast(elements), isolate);
+ return handle(NumberDictionary::cast(elements), isolate);
}
}
@@ -6336,7 +6451,7 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
object->HasFastArgumentsElements() ||
object->HasFastStringWrapperElements());
- Handle<SeededNumberDictionary> dictionary =
+ Handle<NumberDictionary> dictionary =
object->GetElementsAccessor()->Normalize(object);
// Switch to using the dictionary as the backing storage for elements.
@@ -6407,10 +6522,14 @@ int GetIdentityHashHelper(Isolate* isolate, JSReceiver* object) {
return PropertyArray::cast(properties)->Hash();
}
- if (properties->IsDictionary()) {
+ if (properties->IsNameDictionary()) {
return NameDictionary::cast(properties)->Hash();
}
+ if (properties->IsGlobalDictionary()) {
+ return GlobalDictionary::cast(properties)->Hash();
+ }
+
#ifdef DEBUG
FixedArray* empty_fixed_array = isolate->heap()->empty_fixed_array();
FixedArray* empty_property_dictionary =
@@ -6452,22 +6571,8 @@ void JSReceiver::SetProperties(HeapObject* properties) {
set_raw_properties_or_hash(new_properties);
}
-template <typename ProxyType>
-Smi* GetOrCreateIdentityHashHelper(Isolate* isolate, ProxyType* proxy) {
- DisallowHeapAllocation no_gc;
- Object* maybe_hash = proxy->hash();
- if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
-
- Smi* hash = Smi::FromInt(isolate->GenerateIdentityHash(Smi::kMaxValue));
- proxy->set_hash(hash);
- return hash;
-}
-
-Object* JSObject::GetIdentityHash(Isolate* isolate) {
+Object* JSReceiver::GetIdentityHash(Isolate* isolate) {
DisallowHeapAllocation no_gc;
- if (IsJSGlobalProxy()) {
- return JSGlobalProxy::cast(this)->hash();
- }
int hash = GetIdentityHashHelper(isolate, this);
if (hash == PropertyArray::kNoHashSentinel) {
@@ -6477,30 +6582,26 @@ Object* JSObject::GetIdentityHash(Isolate* isolate) {
return Smi::FromInt(hash);
}
-Smi* JSObject::GetOrCreateIdentityHash(Isolate* isolate) {
+// static
+Smi* JSReceiver::CreateIdentityHash(Isolate* isolate, JSReceiver* key) {
DisallowHeapAllocation no_gc;
- if (IsJSGlobalProxy()) {
- return GetOrCreateIdentityHashHelper(isolate, JSGlobalProxy::cast(this));
- }
-
- Object* hash_obj = GetIdentityHash(isolate);
- if (!hash_obj->IsUndefined(isolate)) {
- return Smi::cast(hash_obj);
- }
-
int hash = isolate->GenerateIdentityHash(PropertyArray::HashField::kMax);
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
- SetIdentityHash(hash);
+ key->SetIdentityHash(hash);
return Smi::FromInt(hash);
}
-Object* JSProxy::GetIdentityHash() { return hash(); }
+Smi* JSReceiver::GetOrCreateIdentityHash(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
-Smi* JSProxy::GetOrCreateIdentityHash(Isolate* isolate) {
- return GetOrCreateIdentityHashHelper(isolate, this);
-}
+ Object* hash_obj = GetIdentityHash(isolate);
+ if (!hash_obj->IsUndefined(isolate)) {
+ return Smi::cast(hash_obj);
+ }
+ return JSReceiver::CreateIdentityHash(isolate, this);
+}
Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
ShouldThrow should_throw) {
@@ -6607,7 +6708,7 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
return Just(false);
case LookupIterator::INTERCEPTOR: {
ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
Maybe<bool> result =
JSObject::DeletePropertyWithInterceptor(it, should_throw);
// An exception was thrown in the interceptor. Propagate.
@@ -6691,7 +6792,7 @@ Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
}
// 6. Let success be DefinePropertyOrThrow(O,key, desc).
Maybe<bool> success = DefineOwnProperty(
- isolate, Handle<JSReceiver>::cast(object), key, &desc, THROW_ON_ERROR);
+ isolate, Handle<JSReceiver>::cast(object), key, &desc, kThrowOnError);
// 7. ReturnIfAbrupt(success).
MAYBE_RETURN(success, isolate->heap()->exception());
CHECK(success.FromJust());
@@ -6740,7 +6841,7 @@ MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
isolate, props, next_key, &success, LookupIterator::OWN);
DCHECK(success);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.IsJust()) return MaybeHandle<Object>();
+ if (maybe.IsNothing()) return MaybeHandle<Object>();
PropertyAttributes attrs = maybe.FromJust();
// 7c. If propDesc is not undefined and propDesc.[[Enumerable]] is true:
if (attrs == ABSENT) continue;
@@ -6768,16 +6869,15 @@ MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
// 8c. Let status be DefinePropertyOrThrow(O, P, desc).
Maybe<bool> status =
DefineOwnProperty(isolate, Handle<JSReceiver>::cast(object),
- desc->name(), desc, THROW_ON_ERROR);
+ desc->name(), desc, kThrowOnError);
// 8d. ReturnIfAbrupt(status).
- if (!status.IsJust()) return MaybeHandle<Object>();
+ if (status.IsNothing()) return MaybeHandle<Object>();
CHECK(status.FromJust());
}
// 9. Return o.
return object;
}
-
// static
Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
Handle<JSReceiver> object,
@@ -6880,7 +6980,7 @@ Maybe<bool> JSReceiver::IsCompatiblePropertyDescriptor(
// 1. Return ValidateAndApplyPropertyDescriptor(undefined, undefined,
// Extensible, Desc, Current).
return ValidateAndApplyPropertyDescriptor(
- isolate, NULL, extensible, desc, current, should_throw, property_name);
+ isolate, nullptr, extensible, desc, current, should_throw, property_name);
}
@@ -6891,9 +6991,9 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
PropertyDescriptor* desc, PropertyDescriptor* current,
ShouldThrow should_throw, Handle<Name> property_name) {
// We either need a LookupIterator, or a property name.
- DCHECK((it == NULL) != property_name.is_null());
+ DCHECK((it == nullptr) != property_name.is_null());
Handle<JSObject> object;
- if (it != NULL) object = Handle<JSObject>::cast(it->GetReceiver());
+ if (it != nullptr) object = Handle<JSObject>::cast(it->GetReceiver());
bool desc_is_data_descriptor = PropertyDescriptor::IsDataDescriptor(desc);
bool desc_is_accessor_descriptor =
PropertyDescriptor::IsAccessorDescriptor(desc);
@@ -6904,9 +7004,10 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
if (current->is_empty()) {
// 2a. If extensible is false, return false.
if (!extensible) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kDefineDisallowed,
- it != NULL ? it->GetName() : property_name));
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kDefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
}
// 2c. If IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true, then:
// (This is equivalent to !IsAccessorDescriptor(desc).)
@@ -6918,7 +7019,7 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
// [[Configurable]] attribute values are described by Desc. If the value
// of an attribute field of Desc is absent, the attribute of the newly
// created property is set to its default value.
- if (it != NULL) {
+ if (it != nullptr) {
if (!desc->has_writable()) desc->set_writable(false);
if (!desc->has_enumerable()) desc->set_enumerable(false);
if (!desc->has_configurable()) desc->set_configurable(false);
@@ -6939,7 +7040,7 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
// [[Configurable]] attribute values are described by Desc. If the value
// of an attribute field of Desc is absent, the attribute of the newly
// created property is set to its default value.
- if (it != NULL) {
+ if (it != nullptr) {
if (!desc->has_enumerable()) desc->set_enumerable(false);
if (!desc->has_configurable()) desc->set_configurable(false);
Handle<Object> getter(
@@ -6980,17 +7081,19 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
if (!current->configurable()) {
// 5a. Return false, if the [[Configurable]] field of Desc is true.
if (desc->has_configurable() && desc->configurable()) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != NULL ? it->GetName() : property_name));
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
}
// 5b. Return false, if the [[Enumerable]] field of Desc is present and the
// [[Enumerable]] fields of current and Desc are the Boolean negation of
// each other.
if (desc->has_enumerable() && desc->enumerable() != current->enumerable()) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != NULL ? it->GetName() : property_name));
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
}
}
@@ -7005,9 +7108,10 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
} else if (current_is_data_descriptor != desc_is_data_descriptor) {
// 7a. Return false, if the [[Configurable]] field of current is false.
if (!current->configurable()) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != NULL ? it->GetName() : property_name));
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
}
// 7b. If IsDataDescriptor(current) is true, then:
if (current_is_data_descriptor) {
@@ -7037,7 +7141,7 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
RETURN_FAILURE(
isolate, should_throw,
NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != NULL ? it->GetName() : property_name));
+ it != nullptr ? it->GetName() : property_name));
}
// 8a ii. If the [[Writable]] field of current is false, then:
if (!current->writable()) {
@@ -7047,7 +7151,7 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
RETURN_FAILURE(
isolate, should_throw,
NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != NULL ? it->GetName() : property_name));
+ it != nullptr ? it->GetName() : property_name));
}
}
}
@@ -7064,7 +7168,7 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
RETURN_FAILURE(
isolate, should_throw,
NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != NULL ? it->GetName() : property_name));
+ it != nullptr ? it->GetName() : property_name));
}
// 9a ii. Return false, if the [[Get]] field of Desc is present and
// SameValue(Desc.[[Get]], current.[[Get]]) is false.
@@ -7072,13 +7176,13 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
RETURN_FAILURE(
isolate, should_throw,
NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != NULL ? it->GetName() : property_name));
+ it != nullptr ? it->GetName() : property_name));
}
}
}
// 10. If O is not undefined, then:
- if (it != NULL) {
+ if (it != nullptr) {
// 10a. For each field of Desc that is present, set the corresponding
// attribute of the property named P of object O to the value of the field.
PropertyAttributes attrs = NONE;
@@ -7249,7 +7353,7 @@ Maybe<bool> JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
Maybe<bool> succeeded =
OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
// 3h. Assert: succeeded is not an abrupt completion.
- // In our case, if should_throw == THROW_ON_ERROR, it can be!
+ // In our case, if should_throw == kThrowOnError, it can be!
// 3i. If succeeded is false, return false.
if (succeeded.IsNothing() || !succeeded.FromJust()) return succeeded;
// 3j. If index >= oldLen, then:
@@ -7491,7 +7595,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
// targetDesc) is false, throw a TypeError exception.
Maybe<bool> valid =
IsCompatiblePropertyDescriptor(isolate, extensible_target, desc,
- &target_desc, property_name, DONT_THROW);
+ &target_desc, property_name, kDontThrow);
MAYBE_RETURN(valid, Nothing<bool>());
if (!valid.FromJust()) {
isolate->Throw(*isolate->factory()->NewTypeError(
@@ -7584,7 +7688,7 @@ Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
}
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, Object::DONT_THROW);
+ *holder, kDontThrow);
if (it->IsElement()) {
uint32_t index = it->index();
v8::IndexedPropertyDescriptorCallback descriptorCallback =
@@ -7776,7 +7880,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
// resultDesc, targetDesc).
Maybe<bool> valid =
IsCompatiblePropertyDescriptor(isolate, extensible_target.FromJust(),
- desc, &target_desc, name, DONT_THROW);
+ desc, &target_desc, name, kDontThrow);
MAYBE_RETURN(valid, Nothing<bool>());
// 16. If valid is false, throw a TypeError exception.
if (!valid.FromJust()) {
@@ -7813,8 +7917,7 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
}
} else {
DCHECK(kind == DICTIONARY_ELEMENTS || kind == SLOW_STRING_WRAPPER_ELEMENTS);
- Object* key =
- SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
+ Object* key = NumberDictionary::cast(elements)->SlowReverseLookup(object);
if (!key->IsUndefined(isolate)) return true;
}
return false;
@@ -7982,7 +8085,7 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
for (int i = 0; i < keys->length(); ++i) {
Handle<Object> key(keys->get(i), isolate);
MAYBE_RETURN(
- DefineOwnProperty(isolate, receiver, key, &no_conf, THROW_ON_ERROR),
+ DefineOwnProperty(isolate, receiver, key, &no_conf, kThrowOnError),
Nothing<bool>());
}
return Just(true);
@@ -8000,7 +8103,7 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
? no_conf
: no_conf_no_write;
MAYBE_RETURN(
- DefineOwnProperty(isolate, receiver, key, &desc, THROW_ON_ERROR),
+ DefineOwnProperty(isolate, receiver, key, &desc, kThrowOnError),
Nothing<bool>());
}
}
@@ -8064,7 +8167,7 @@ bool TestElementsIntegrityLevel(JSObject* object, PropertyAttributes level) {
if (IsDictionaryElementsKind(kind)) {
return TestDictionaryPropertiesIntegrityLevel(
- SeededNumberDictionary::cast(object->elements()), object->GetIsolate(),
+ NumberDictionary::cast(object->elements()), object->GetIsolate(),
level);
}
@@ -8226,7 +8329,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
if (!object->HasFixedTypedArrayElements()) {
// If there are fast elements we normalize.
- Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ Handle<NumberDictionary> dictionary = NormalizeElements(object);
DCHECK(object->HasDictionaryElements() ||
object->HasSlowArgumentsElements());
@@ -8382,7 +8485,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
RETURN_FAILURE(isolate, should_throw, NewTypeError(message));
}
- Handle<SeededNumberDictionary> new_element_dictionary;
+ Handle<NumberDictionary> new_element_dictionary;
if (!object->HasFixedTypedArrayElements() &&
!object->HasDictionaryElements() &&
!object->HasSlowStringWrapperElements()) {
@@ -8407,7 +8510,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<Map> old_map(object->map(), isolate);
TransitionsAccessor transitions(old_map);
Map* transition = transitions.SearchSpecial(*transition_marker);
- if (transition != NULL) {
+ if (transition != nullptr) {
Handle<Map> transition_map(transition, isolate);
DCHECK(transition_map->has_dictionary_elements() ||
transition_map->has_fixed_typed_array_elements() ||
@@ -8471,8 +8574,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
- Handle<SeededNumberDictionary> dictionary(object->element_dictionary(),
- isolate);
+ Handle<NumberDictionary> dictionary(object->element_dictionary(), isolate);
// Make sure we never go back to the fast case
object->RequireSlowElements(*dictionary);
if (attrs != NONE) {
@@ -8545,8 +8647,8 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
if (method->IsCallable()) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, method, receiver, 0, NULL),
- Object);
+ isolate, result,
+ Execution::Call(isolate, method, receiver, 0, nullptr), Object);
if (result->IsPrimitive()) return result;
}
}
@@ -8604,8 +8706,7 @@ bool JSObject::HasEnumerableElements() {
return length > 0;
}
case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* elements =
- SeededNumberDictionary::cast(object->elements());
+ NumberDictionary* elements = NumberDictionary::cast(object->elements());
return elements->NumberOfEnumerableProperties() > 0;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -8822,7 +8923,7 @@ bool Map::DictionaryElementsInPrototypeChainOnly() {
if (current->HasSlowArgumentsElements()) {
FixedArray* parameter_map = FixedArray::cast(current->elements());
Object* arguments = parameter_map->get(1);
- if (SeededNumberDictionary::cast(arguments)->requires_slow_elements()) {
+ if (NumberDictionary::cast(arguments)->requires_slow_elements()) {
return true;
}
}
@@ -8877,11 +8978,11 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
return isolate->factory()->undefined_value();
}
-
MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
- Handle<AccessorInfo> info) {
+ Handle<Name> name,
+ Handle<AccessorInfo> info,
+ PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
- Handle<Name> name(Name::cast(info->name()), isolate);
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -8913,7 +9014,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
return it.factory()->undefined_value();
}
- it.TransitionToAccessorPair(info, info->property_attributes());
+ it.TransitionToAccessorPair(info, attributes);
return object;
}
@@ -8964,11 +9065,12 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
}
-
-Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) {
+Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size,
+ int inobject_properties) {
Isolate* isolate = map->GetIsolate();
- Handle<Map> result =
- isolate->factory()->NewMap(map->instance_type(), instance_size);
+ Handle<Map> result = isolate->factory()->NewMap(
+ map->instance_type(), instance_size, TERMINAL_FAST_ELEMENTS_KIND,
+ inobject_properties);
Handle<Object> prototype(map->prototype(), isolate);
Map::SetPrototype(result, prototype);
result->set_constructor_or_backpointer(map->GetConstructor());
@@ -9014,39 +9116,35 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
if (new_map->is_prototype_map()) {
// For prototype maps, the PrototypeInfo is not copied.
- DCHECK(memcmp(fresh->address(), new_map->address(),
- kTransitionsOrPrototypeInfoOffset) == 0);
- DCHECK(fresh->raw_transitions() == Smi::kZero);
+ DCHECK_EQ(0, memcmp(fresh->address(), new_map->address(),
+ kTransitionsOrPrototypeInfoOffset));
+ DCHECK_EQ(fresh->raw_transitions(), Smi::kZero);
STATIC_ASSERT(kDescriptorsOffset ==
kTransitionsOrPrototypeInfoOffset + kPointerSize);
- DCHECK(memcmp(HeapObject::RawField(*fresh, kDescriptorsOffset),
- HeapObject::RawField(*new_map, kDescriptorsOffset),
- kDependentCodeOffset - kDescriptorsOffset) == 0);
+ DCHECK_EQ(0, memcmp(HeapObject::RawField(*fresh, kDescriptorsOffset),
+ HeapObject::RawField(*new_map, kDescriptorsOffset),
+ kDependentCodeOffset - kDescriptorsOffset));
} else {
- DCHECK(memcmp(fresh->address(), new_map->address(),
- Map::kDependentCodeOffset) == 0);
+ DCHECK_EQ(0, memcmp(fresh->address(), new_map->address(),
+ Map::kDependentCodeOffset));
}
STATIC_ASSERT(Map::kWeakCellCacheOffset ==
Map::kDependentCodeOffset + kPointerSize);
int offset = Map::kWeakCellCacheOffset + kPointerSize;
- DCHECK(memcmp(fresh->address() + offset,
- new_map->address() + offset,
- Map::kSize - offset) == 0);
+ DCHECK_EQ(0, memcmp(fresh->address() + offset,
+ new_map->address() + offset, Map::kSize - offset));
}
#endif
} else {
new_map = Map::CopyNormalized(fast_map, mode);
if (use_cache) {
- cache->Set(fast_map, new_map);
+ Handle<WeakCell> cell = Map::WeakCellForMap(new_map);
+ cache->Set(fast_map, new_map, cell);
isolate->counters()->maps_normalized()->Increment();
}
-#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
- PrintF("[TraceMaps: Normalize from= %p to= %p reason= %s ]\n",
- reinterpret_cast<void*>(*fast_map),
- reinterpret_cast<void*>(*new_map), reason);
+ LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
}
-#endif
}
fast_map->NotifyLeafMapLayoutChange();
return new_map;
@@ -9060,12 +9158,12 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
new_instance_size -= map->GetInObjectProperties() * kPointerSize;
}
- Handle<Map> result = RawCopy(map, new_instance_size);
-
- if (mode != CLEAR_INOBJECT_PROPERTIES) {
- result->SetInObjectProperties(map->GetInObjectProperties());
- }
-
+ Handle<Map> result = RawCopy(
+ map, new_instance_size,
+ mode == CLEAR_INOBJECT_PROPERTIES ? 0 : map->GetInObjectProperties());
+ // Clear the unused_property_fields explicitly as this field should not
+ // be accessed for normalized maps.
+ result->SetInObjectUnusedPropertyFields(0);
result->set_dictionary_map(true);
result->set_migration_target(false);
result->set_may_have_interesting_symbols(true);
@@ -9128,14 +9226,13 @@ Handle<Map> Map::CopyInitialMapNormalized(Handle<Map> map,
// static
Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
- int in_object_properties,
+ int inobject_properties,
int unused_property_fields) {
EnsureInitialMap(map);
- Handle<Map> result = RawCopy(map, instance_size);
+ Handle<Map> result = RawCopy(map, instance_size, inobject_properties);
// Please note instance_type and instance_size are set when allocated.
- result->SetInObjectProperties(in_object_properties);
- result->set_unused_property_fields(unused_property_fields);
+ result->SetInObjectUnusedPropertyFields(unused_property_fields);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors > 0) {
@@ -9145,7 +9242,7 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
result->SetNumberOfOwnDescriptors(number_of_own_descriptors);
DCHECK_EQ(result->NumberOfFields(),
- in_object_properties - unused_property_fields);
+ result->GetInObjectProperties() - result->UnusedPropertyFields());
}
return result;
@@ -9153,12 +9250,13 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
- Handle<Map> result = RawCopy(map, map->instance_size());
+ Handle<Map> result =
+ RawCopy(map, map->instance_size(),
+ map->IsJSObjectMap() ? map->GetInObjectProperties() : 0);
// Please note instance_type and instance_size are set when allocated.
if (map->IsJSObjectMap()) {
- result->SetInObjectProperties(map->GetInObjectProperties());
- result->set_unused_property_fields(map->unused_property_fields());
+ result->CopyUnusedPropertyFields(*map);
}
map->NotifyLeafMapLayoutChange();
return result;
@@ -9211,34 +9309,6 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
return result;
}
-#if V8_TRACE_MAPS
-
-// static
-void Map::TraceTransition(const char* what, Map* from, Map* to, Name* name) {
- if (FLAG_trace_maps) {
- PrintF("[TraceMaps: %s from= %p to= %p name= ", what,
- reinterpret_cast<void*>(from), reinterpret_cast<void*>(to));
- name->NameShortPrint();
- PrintF(" ]\n");
- }
-}
-
-
-// static
-void Map::TraceAllTransitions(Map* map) {
- DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(map, &no_gc);
- int num_transitions = transitions.NumberOfTransitions();
- for (int i = -0; i < num_transitions; ++i) {
- Map* target = transitions.GetTarget(i);
- Name* key = transitions.GetKey(i);
- Map::TraceTransition("Transition", map, target, key);
- Map::TraceAllTransitions(target);
- }
-}
-
-#endif // V8_TRACE_MAPS
-
void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
Handle<Name> name, SimpleTransitionFlag flag) {
Isolate* isolate = parent->GetIsolate();
@@ -9249,6 +9319,11 @@ void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
// Do not track transitions during bootstrap except for element transitions.
if (isolate->bootstrapper()->IsActive() &&
!name.is_identical_to(isolate->factory()->elements_transition_symbol())) {
+ if (FLAG_trace_maps) {
+ LOG(isolate,
+ MapEvent("Transition", *parent, *child,
+ child->is_prototype_map() ? "prototype" : "", *name));
+ }
return;
}
if (!parent->GetBackPointer()->IsUndefined(isolate)) {
@@ -9262,14 +9337,14 @@ void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
}
if (parent->is_prototype_map()) {
DCHECK(child->is_prototype_map());
-#if V8_TRACE_MAPS
- Map::TraceTransition("NoTransition", *parent, *child, *name);
-#endif
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("Transition", *parent, *child, "prototype", *name));
+ }
} else {
TransitionsAccessor(parent).Insert(name, child, flag);
-#if V8_TRACE_MAPS
- Map::TraceTransition("Transition", *parent, *child, *name);
-#endif
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("Transition", *parent, *child, "", *name));
+ }
}
}
@@ -9304,18 +9379,14 @@ Handle<Map> Map::CopyReplaceDescriptors(
} else {
result->InitializeDescriptors(*descriptors, *layout_descriptor);
}
-#if V8_TRACE_MAPS
if (FLAG_trace_maps &&
// Mirror conditions above that did not call ConnectTransition().
(map->is_prototype_map() ||
!(flag == INSERT_TRANSITION &&
TransitionsAccessor(map).CanHaveMoreTransitions()))) {
- PrintF("[TraceMaps: ReplaceDescriptors from= %p to= %p reason= %s ]\n",
- reinterpret_cast<void*>(*map), reinterpret_cast<void*>(*result),
- reason);
+ LOG(map->GetIsolate(), MapEvent("ReplaceDescriptors", *map, *result, reason,
+ maybe_name.is_null() ? nullptr : *name));
}
-#endif
-
return result;
}
@@ -9338,9 +9409,13 @@ Handle<Map> Map::AddMissingTransitions(
// Number of unused properties is temporarily incorrect and the layout
// descriptor could unnecessarily be in slow mode but we will fix after
// all the other intermediate maps are created.
+ // Also the last map might have interesting symbols, we temporarily set
+ // the flag and clear it right before the descriptors are installed. This
+ // makes heap verification happy and ensures the flag ends up accurate.
Handle<Map> last_map = CopyDropDescriptors(split_map);
last_map->InitializeDescriptors(*descriptors, *full_layout_descriptor);
- last_map->set_unused_property_fields(0);
+ last_map->SetInObjectUnusedPropertyFields(0);
+ last_map->set_may_have_interesting_symbols(true);
// During creation of intermediate maps we violate descriptors sharing
// invariant since the last map is not yet connected to the transition tree
@@ -9354,6 +9429,7 @@ Handle<Map> Map::AddMissingTransitions(
map = new_map;
}
map->NotifyLeafMapLayoutChange();
+ last_map->set_may_have_interesting_symbols(false);
InstallDescriptors(map, last_map, nof_descriptors - 1, descriptors,
full_layout_descriptor);
return last_map;
@@ -9370,16 +9446,11 @@ void Map::InstallDescriptors(Handle<Map> parent, Handle<Map> child,
child->set_instance_descriptors(*descriptors);
child->SetNumberOfOwnDescriptors(new_descriptor + 1);
-
- int unused_property_fields = parent->unused_property_fields();
+ child->CopyUnusedPropertyFields(*parent);
PropertyDetails details = descriptors->GetDetails(new_descriptor);
if (details.location() == kField) {
- unused_property_fields = parent->unused_property_fields() - 1;
- if (unused_property_fields < 0) {
- unused_property_fields += JSObject::kFieldsAdded;
- }
+ child->AccountAddedPropertyField();
}
- child->set_unused_property_fields(unused_property_fields);
if (FLAG_unbox_double_fields) {
Handle<LayoutDescriptor> layout_descriptor =
@@ -9414,14 +9485,14 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
!map->CanHaveFastTransitionableElementsKind(),
IsDictionaryElementsKind(kind) || IsTerminalElementsKind(kind));
- Map* maybe_elements_transition_map = NULL;
+ Map* maybe_elements_transition_map = nullptr;
if (flag == INSERT_TRANSITION) {
// Ensure we are requested to add elements kind transition "near the root".
DCHECK_EQ(map->FindRootMap()->NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
maybe_elements_transition_map = map->ElementsTransitionMap();
- DCHECK(maybe_elements_transition_map == NULL ||
+ DCHECK(maybe_elements_transition_map == nullptr ||
(maybe_elements_transition_map->elements_kind() ==
DICTIONARY_ELEMENTS &&
kind == DICTIONARY_ELEMENTS));
@@ -9432,7 +9503,7 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
bool insert_transition = flag == INSERT_TRANSITION &&
TransitionsAccessor(map).CanHaveMoreTransitions() &&
- maybe_elements_transition_map == NULL;
+ maybe_elements_transition_map == nullptr;
if (insert_transition) {
Handle<Map> new_map = CopyForTransition(map, "CopyAsElementsKind");
@@ -9462,13 +9533,13 @@ Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
Handle<Map> function_map(Map::cast(
isolate->native_context()->get(shared_info->function_map_index())));
- STATIC_ASSERT(LANGUAGE_END == 2);
- DCHECK_EQ(STRICT, shared_info->language_mode());
+ STATIC_ASSERT(LanguageModeSize == 2);
+ DCHECK_EQ(LanguageMode::kStrict, shared_info->language_mode());
Handle<Symbol> transition_symbol =
isolate->factory()->strict_function_transition_symbol();
Map* maybe_transition =
TransitionsAccessor(initial_map).SearchSpecial(*transition_symbol);
- if (maybe_transition != NULL) {
+ if (maybe_transition != nullptr) {
return handle(maybe_transition, isolate);
}
initial_map->NotifyLeafMapLayoutChange();
@@ -9478,9 +9549,10 @@ Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
Handle<Map> map =
Map::CopyInitialMap(function_map, initial_map->instance_size(),
initial_map->GetInObjectProperties(),
- initial_map->unused_property_fields());
+ initial_map->UnusedPropertyFields());
map->SetConstructor(initial_map->GetConstructor());
map->set_prototype(initial_map->prototype());
+ map->set_construction_counter(initial_map->construction_counter());
if (TransitionsAccessor(initial_map).CanHaveMoreTransitions()) {
Map::ConnectTransition(initial_map, map, transition_symbol,
@@ -9512,14 +9584,10 @@ Handle<Map> Map::CopyForTransition(Handle<Map> map, const char* reason) {
new_map->InitializeDescriptors(*new_descriptors, *new_layout_descriptor);
}
-#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
- PrintF("[TraceMaps: CopyForTransition from= %p to= %p reason= %s ]\n",
- reinterpret_cast<void*>(*map), reinterpret_cast<void*>(*new_map),
- reason);
+ LOG(map->GetIsolate(),
+ MapEvent("CopyForTransition", *map, *new_map, reason));
}
-#endif
-
return new_map;
}
@@ -9552,9 +9620,10 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
JSObject::kHeaderSize + kPointerSize * inobject_properties;
// Adjust the map with the extra inobject properties.
- copy->SetInObjectProperties(inobject_properties);
- copy->set_unused_property_fields(inobject_properties);
copy->set_instance_size(new_instance_size);
+ copy->SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kPointerSize);
+ DCHECK_EQ(copy->GetInObjectProperties(), inobject_properties);
+ copy->SetInObjectUnusedPropertyFields(inobject_properties);
copy->set_visitor_id(Map::GetVisitorId(*copy));
return copy;
}
@@ -9665,7 +9734,7 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
Map* maybe_transition =
TransitionsAccessor(map).SearchTransition(*name, kData, attributes);
- if (maybe_transition != NULL) {
+ if (maybe_transition != nullptr) {
*created_new_map = false;
Handle<Map> transition(maybe_transition);
int descriptor = transition->LastAdded();
@@ -9789,7 +9858,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Map* maybe_transition =
TransitionsAccessor(map).SearchTransition(*name, kAccessor, attributes);
- if (maybe_transition != NULL) {
+ if (maybe_transition != nullptr) {
Handle<Map> transition(maybe_transition, isolate);
DescriptorArray* descriptors = transition->instance_descriptors();
int descriptor = transition->LastAdded();
@@ -9983,7 +10052,11 @@ Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
int insertion_index,
TransitionFlag flag) {
Handle<Name> key = descriptor->GetKey();
- DCHECK(*key == descriptors->GetKey(insertion_index));
+ DCHECK_EQ(*key, descriptors->GetKey(insertion_index));
+ // This function does not support replacing property fields as
+ // that would break property field counters.
+ DCHECK_NE(kField, descriptor->GetDetails().location());
+ DCHECK_NE(kField, descriptors->GetDetails(insertion_index).location());
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
descriptors, map->NumberOfOwnDescriptors());
@@ -10077,7 +10150,7 @@ Handle<WeakFixedArray> WeakFixedArray::Add(Handle<Object> maybe_array,
for (int i = first_index;;) {
if (array->IsEmptySlot((i))) {
WeakFixedArray::Set(array, i, value);
- if (assigned_index != NULL) *assigned_index = i;
+ if (assigned_index != nullptr) *assigned_index = i;
return array;
}
if (FLAG_trace_weak_arrays) {
@@ -10096,7 +10169,7 @@ Handle<WeakFixedArray> WeakFixedArray::Add(Handle<Object> maybe_array,
PrintF("[WeakFixedArray: growing to size %d ]\n", new_length);
}
WeakFixedArray::Set(new_array, length, value);
- if (assigned_index != NULL) *assigned_index = length;
+ if (assigned_index != nullptr) *assigned_index = length;
return new_array;
}
@@ -10167,7 +10240,7 @@ bool WeakFixedArray::Remove(Handle<HeapObject> value) {
// static
Handle<WeakFixedArray> WeakFixedArray::Allocate(
Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from) {
- DCHECK(0 <= size);
+ DCHECK_LE(0, size);
Handle<FixedArray> result =
isolate->factory()->NewUninitializedFixedArray(size + kFirstIndex);
int index = 0;
@@ -10294,14 +10367,19 @@ Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
// static
Handle<FrameArray> FrameArray::AppendWasmFrame(
Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
- int wasm_function_index, Handle<AbstractCode> code, int offset, int flags) {
+ int wasm_function_index, WasmCodeWrapper code, int offset, int flags) {
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
Handle<FrameArray> array = EnsureSpace(in, new_length);
array->SetWasmInstance(frame_count, *wasm_instance);
array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
// code will be a null handle for interpreted wasm frames.
- if (!code.is_null()) array->SetCode(frame_count, *code);
+ if (!code.IsCodeObject()) {
+ array->SetIsWasmInterpreterFrame(frame_count, Smi::FromInt(code.is_null()));
+ } else {
+ if (!code.is_null())
+ array->SetCode(frame_count, AbstractCode::cast(*code.GetCode()));
+ }
array->SetOffset(frame_count, Smi::FromInt(offset));
array->SetFlags(frame_count, Smi::FromInt(flags));
array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
@@ -10320,15 +10398,14 @@ Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int number_of_descriptors,
int slack,
PretenureFlag pretenure) {
- DCHECK(0 <= number_of_descriptors);
+ DCHECK_LE(0, number_of_descriptors);
Factory* factory = isolate->factory();
// Do not use DescriptorArray::cast on incomplete object.
int size = number_of_descriptors + slack;
if (size == 0) return factory->empty_descriptor_array();
// Allocate the array of keys.
- Handle<FixedArray> result =
- factory->NewFixedArray(LengthFor(size), pretenure);
-
+ Handle<FixedArray> result = factory->NewFixedArrayWithMap(
+ Heap::kDescriptorArrayMapRootIndex, LengthFor(size), pretenure);
result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
result->set(kEnumCacheIndex, isolate->heap()->empty_enum_cache());
return Handle<DescriptorArray>::cast(result);
@@ -10439,15 +10516,19 @@ Handle<Object> AccessorPair::GetComponent(Handle<AccessorPair> accessor_pair,
return handle(accessor, isolate);
}
-Handle<DeoptimizationInputData> DeoptimizationInputData::New(
- Isolate* isolate, int deopt_entry_count, PretenureFlag pretenure) {
- return Handle<DeoptimizationInputData>::cast(
- isolate->factory()->NewFixedArray(LengthFor(deopt_entry_count),
- pretenure));
+Handle<DeoptimizationData> DeoptimizationData::New(Isolate* isolate,
+ int deopt_entry_count,
+ PretenureFlag pretenure) {
+ return Handle<DeoptimizationData>::cast(isolate->factory()->NewFixedArray(
+ LengthFor(deopt_entry_count), pretenure));
}
+Handle<DeoptimizationData> DeoptimizationData::Empty(Isolate* isolate) {
+ return Handle<DeoptimizationData>::cast(
+ isolate->factory()->empty_fixed_array());
+}
-SharedFunctionInfo* DeoptimizationInputData::GetInlinedFunction(int index) {
+SharedFunctionInfo* DeoptimizationData::GetInlinedFunction(int index) {
if (index == -1) {
return SharedFunctionInfo::cast(SharedFunctionInfo());
} else {
@@ -10499,6 +10580,9 @@ int HandlerTable::LookupReturn(int pc_offset) {
return -1;
}
+Handle<HandlerTable> HandlerTable::Empty(Isolate* isolate) {
+ return Handle<HandlerTable>::cast(isolate->factory()->empty_fixed_array());
+}
#ifdef DEBUG
bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
@@ -10583,7 +10667,7 @@ bool AreDigits(const uint8_t* s, int from, int to) {
int ParseDecimalInteger(const uint8_t* s, int from, int to) {
- DCHECK(to - from < 10); // Overflow is not possible.
+ DCHECK_LT(to - from, 10); // Overflow is not possible.
DCHECK(from < to);
int d = s[from] - '0';
@@ -10596,7 +10680,6 @@ int ParseDecimalInteger(const uint8_t* s, int from, int to) {
} // namespace
-
// static
Handle<Object> String::ToNumber(Handle<String> subject) {
Isolate* const isolate = subject->GetIsolate();
@@ -10697,7 +10780,7 @@ String::FlatContent String::GetFlatContent() {
}
return FlatContent(start + offset, length);
} else {
- DCHECK(shape.encoding_tag() == kTwoByteStringTag);
+ DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
const uc16* start;
if (shape.representation_tag() == kSeqStringTag) {
start = SeqTwoByteString::cast(string)->GetChars();
@@ -10788,7 +10871,7 @@ const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
Relocatable* current = isolate->relocatable_top();
- while (current != NULL) {
+ while (current != nullptr) {
current->PostGarbageCollection();
current = current->prev_;
}
@@ -10804,7 +10887,7 @@ int Relocatable::ArchiveSpacePerThread() {
// Archive statics that are thread-local.
char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
*reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
- isolate->set_relocatable_top(NULL);
+ isolate->set_relocatable_top(nullptr);
return to + ArchiveSpacePerThread();
}
@@ -10827,7 +10910,7 @@ void Relocatable::Iterate(Isolate* isolate, RootVisitor* v) {
void Relocatable::Iterate(RootVisitor* v, Relocatable* top) {
Relocatable* current = top;
- while (current != NULL) {
+ while (current != nullptr) {
current->IterateInstance(v);
current = current->prev_;
}
@@ -10851,7 +10934,7 @@ FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
void FlatStringReader::PostGarbageCollection() {
- if (str_ == NULL) return;
+ if (str_ == nullptr) return;
Handle<String> str(str_);
DCHECK(str->IsFlat());
DisallowHeapAllocation no_gc;
@@ -10868,7 +10951,7 @@ void FlatStringReader::PostGarbageCollection() {
void ConsStringIterator::Initialize(ConsString* cons_string, int offset) {
- DCHECK(cons_string != NULL);
+ DCHECK_NOT_NULL(cons_string);
root_ = cons_string;
consumed_ = offset;
// Force stack blown condition to trigger restart.
@@ -10879,19 +10962,19 @@ void ConsStringIterator::Initialize(ConsString* cons_string, int offset) {
String* ConsStringIterator::Continue(int* offset_out) {
- DCHECK(depth_ != 0);
+ DCHECK_NE(depth_, 0);
DCHECK_EQ(0, *offset_out);
bool blew_stack = StackBlown();
- String* string = NULL;
+ String* string = nullptr;
// Get the next leaf if there is one.
if (!blew_stack) string = NextLeaf(&blew_stack);
// Restart search from root.
if (blew_stack) {
- DCHECK(string == NULL);
+ DCHECK_NULL(string);
string = Search(offset_out);
}
// Ensure future calls return null immediately.
- if (string == NULL) Reset(NULL);
+ if (string == nullptr) Reset(nullptr);
return string;
}
@@ -10938,15 +11021,15 @@ String* ConsStringIterator::Search(int* offset_out) {
// This happens only if we have asked for an offset outside the string.
if (length == 0) {
// Reset so future operations will return null immediately.
- Reset(NULL);
- return NULL;
+ Reset(nullptr);
+ return nullptr;
}
// Tell the stack we're done descending.
AdjustMaximumDepth();
// Pop stack so next iteration is in correct place.
Pop();
}
- DCHECK(length != 0);
+ DCHECK_NE(length, 0);
// Adjust return values and exit.
consumed_ = offset + length;
*offset_out = consumed - offset;
@@ -10961,12 +11044,12 @@ String* ConsStringIterator::NextLeaf(bool* blew_stack) {
// Tree traversal complete.
if (depth_ == 0) {
*blew_stack = false;
- return NULL;
+ return nullptr;
}
// We've lost track of higher nodes.
if (StackBlown()) {
*blew_stack = true;
- return NULL;
+ return nullptr;
}
// Go right.
ConsString* cons_string = frames_[OffsetForDepth(depth_ - 1)];
@@ -11238,12 +11321,12 @@ class RawStringComparator<uint8_t, uint8_t> {
class StringComparator {
class State {
public:
- State() : is_one_byte_(true), length_(0), buffer8_(NULL) {}
+ State() : is_one_byte_(true), length_(0), buffer8_(nullptr) {}
void Init(String* string) {
ConsString* cons_string = String::VisitFlat(this, string);
iter_.Reset(cons_string);
- if (cons_string != NULL) {
+ if (cons_string != nullptr) {
int offset;
string = iter_.Next(&offset);
String::VisitFlat(this, string, offset);
@@ -11278,7 +11361,7 @@ class StringComparator {
int offset;
String* next = iter_.Next(&offset);
DCHECK_EQ(0, offset);
- DCHECK(next != NULL);
+ DCHECK_NOT_NULL(next);
String::VisitFlat(this, next);
}
@@ -11557,7 +11640,7 @@ int SearchString(Isolate* isolate, String::FlatContent receiver_content,
int String::IndexOf(Isolate* isolate, Handle<String> receiver,
Handle<String> search, int start_index) {
- DCHECK(0 <= start_index);
+ DCHECK_LE(0, start_index);
DCHECK(start_index <= receiver->length());
uint32_t search_length = search->length();
@@ -11757,7 +11840,7 @@ template <typename schar, typename pchar>
int StringMatchBackwards(Vector<const schar> subject,
Vector<const pchar> pattern, int idx) {
int pattern_length = pattern.length();
- DCHECK(pattern_length >= 1);
+ DCHECK_GE(pattern_length, 1);
DCHECK(idx + pattern_length <= subject.length());
if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
@@ -11940,7 +12023,7 @@ uint32_t String::ComputeAndSetHash() {
// Check the hash code is there.
DCHECK(HasHashCode());
uint32_t result = field >> kHashShift;
- DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
+ DCHECK_NE(result, 0); // Ensure that the hash value of 0 is never computed.
return result;
}
@@ -12014,15 +12097,15 @@ void SeqTwoByteString::clear_padding() {
uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
// For array indexes mix the length into the hash as an array index could
// be zero.
- DCHECK(length > 0);
- DCHECK(length <= String::kMaxArrayIndexSize);
+ DCHECK_GT(length, 0);
+ DCHECK_LE(length, String::kMaxArrayIndexSize);
DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
value <<= String::ArrayIndexValueBits::kShift;
value |= length << String::ArrayIndexLengthBits::kShift;
- DCHECK((value & String::kIsNotArrayIndexMask) == 0);
+ DCHECK_EQ(value & String::kIsNotArrayIndexMask, 0);
DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength,
Name::ContainsCachedArrayIndex(value));
return value;
@@ -12265,13 +12348,14 @@ void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
break;
}
case HAS_VECTOR:
+ case NO_VECTOR_NEEDED:
// Nothing to do.
break;
}
}
static void GetMinInobjectSlack(Map* map, void* data) {
- int slack = map->unused_property_fields();
+ int slack = map->UnusedPropertyFields();
if (*reinterpret_cast<int*>(data) > slack) {
*reinterpret_cast<int*>(data) = slack;
}
@@ -12279,16 +12363,16 @@ static void GetMinInobjectSlack(Map* map, void* data) {
static void ShrinkInstanceSize(Map* map, void* data) {
+ int slack = *reinterpret_cast<int*>(data);
+ DCHECK_GE(slack, 0);
#ifdef DEBUG
int old_visitor_id = Map::GetVisitorId(map);
+ int new_unused = map->UnusedPropertyFields() - slack;
#endif
- int slack = *reinterpret_cast<int*>(data);
- DCHECK_GE(slack, 0);
- map->SetInObjectProperties(map->GetInObjectProperties() - slack);
- map->set_unused_property_fields(map->unused_property_fields() - slack);
map->set_instance_size(map->instance_size() - slack * kPointerSize);
map->set_construction_counter(Map::kNoSlackTracking);
DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
+ DCHECK_EQ(new_unused, map->UnusedPropertyFields());
}
static void StopSlackTracking(Map* map, void* data) {
@@ -12296,11 +12380,11 @@ static void StopSlackTracking(Map* map, void* data) {
}
void Map::CompleteInobjectSlackTracking() {
+ DisallowHeapAllocation no_gc;
// Has to be an initial map.
DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
- int slack = unused_property_fields();
- DisallowHeapAllocation no_gc;
+ int slack = UnusedPropertyFields();
TransitionsAccessor transitions(this, &no_gc);
transitions.TraverseTransitionTree(&GetMinInobjectSlack, &slack);
if (slack != 0) {
@@ -12345,24 +12429,22 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
}
// static
-void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
+void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
+ bool enable_setup_mode) {
if (object->IsJSGlobalObject()) return;
- if (PrototypeBenefitsFromNormalization(object)) {
+ if (enable_setup_mode && PrototypeBenefitsFromNormalization(object)) {
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
"NormalizeAsPrototype");
}
- Handle<Map> previous_map(object->map());
if (object->map()->is_prototype_map()) {
if (object->map()->should_be_fast_prototype_map() &&
!object->HasFastProperties()) {
JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
}
} else {
- if (object->map() == *previous_map) {
- Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
- JSObject::MigrateToMap(object, new_map);
- }
+ Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
+ JSObject::MigrateToMap(object, new_map);
object->map()->set_is_prototype_map(true);
// Replace the pointer to the exact constructor with the Object function
@@ -12604,13 +12686,14 @@ Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSReceiver> prototype,
}
// static
-void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype) {
+void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
+ bool enable_prototype_setup_mode) {
RuntimeCallTimerScope stats_scope(*map, &RuntimeCallStats::Map_SetPrototype);
bool is_hidden = false;
if (prototype->IsJSObject()) {
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
- JSObject::OptimizeAsPrototype(prototype_jsobj);
+ JSObject::OptimizeAsPrototype(prototype_jsobj, enable_prototype_setup_mode);
Object* maybe_constructor = prototype_jsobj->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
@@ -12758,13 +12841,10 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
if (map->prototype() != *prototype) Map::SetPrototype(map, prototype);
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
-#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
- PrintF("[TraceMaps: InitialMap map= %p SFI= %d_%s ]\n",
- reinterpret_cast<void*>(*map), function->shared()->unique_id(),
- function->shared()->DebugName()->ToCString().get());
+ LOG(map->GetIsolate(), MapEvent("InitialMap", nullptr, *map, "",
+ function->shared()->DebugName()));
}
-#endif
}
@@ -12846,6 +12926,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
+ DCHECK(function->has_prototype_slot());
DCHECK(function->IsConstructor() ||
IsResumableFunction(function->shared()->kind()));
if (function->has_initial_map()) return;
@@ -12872,11 +12953,13 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
}
int instance_size;
- int in_object_properties;
- CalculateInstanceSizeHelper(instance_type, 0, expected_nof_properties,
- &instance_size, &in_object_properties);
+ int inobject_properties;
+ CalculateInstanceSizeHelper(instance_type, false, 0, expected_nof_properties,
+ &instance_size, &inobject_properties);
- Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size);
+ Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size,
+ TERMINAL_FAST_ELEMENTS_KIND,
+ inobject_properties);
// Fetch or allocate prototype.
Handle<Object> prototype;
@@ -12885,8 +12968,6 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
} else {
prototype = isolate->factory()->NewFunctionPrototype(function);
}
- map->SetInObjectProperties(in_object_properties);
- map->set_unused_property_fields(in_object_properties);
DCHECK(map->has_fast_object_elements());
// Finally link initial map and constructor function.
@@ -12930,7 +13011,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
int embedder_fields =
JSObject::GetEmbedderFieldCount(*constructor_initial_map);
int pre_allocated = constructor_initial_map->GetInObjectProperties() -
- constructor_initial_map->unused_property_fields();
+ constructor_initial_map->UnusedPropertyFields();
int instance_size;
int in_object_properties;
CalculateInstanceSizeForDerivedClass(function, instance_type,
@@ -13079,16 +13160,16 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
}
// Check if we should print {function} as a class.
- Handle<Object> class_start_position = JSReceiver::GetDataProperty(
- function, isolate->factory()->class_start_position_symbol());
- if (class_start_position->IsSmi()) {
- Handle<Object> class_end_position = JSReceiver::GetDataProperty(
- function, isolate->factory()->class_end_position_symbol());
+ Handle<Object> maybe_class_positions = JSReceiver::GetDataProperty(
+ function, isolate->factory()->class_positions_symbol());
+ if (maybe_class_positions->IsTuple2()) {
+ Tuple2* class_positions = Tuple2::cast(*maybe_class_positions);
+ int start_position = Smi::ToInt(class_positions->value1());
+ int end_position = Smi::ToInt(class_positions->value2());
Handle<String> script_source(
String::cast(Script::cast(shared_info->script())->source()), isolate);
- return isolate->factory()->NewSubString(
- script_source, Handle<Smi>::cast(class_start_position)->value(),
- Handle<Smi>::cast(class_end_position)->value());
+ return isolate->factory()->NewSubString(script_source, start_position,
+ end_position);
}
// Check if we have source code for the {function}.
@@ -13165,7 +13246,7 @@ int Script::GetEvalPosition() {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(eval_from_shared());
position = shared->abstract_code()->SourcePosition(-position);
}
- DCHECK(position >= 0);
+ DCHECK_GE(position, 0);
set_eval_from_position(position);
}
return position;
@@ -13434,8 +13515,8 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
if (shared->script() == *script_object) return;
Isolate* isolate = shared->GetIsolate();
- if (reset_preparsed_scope_data) {
- shared->set_preparsed_scope_data(isolate->heap()->null_value());
+ if (reset_preparsed_scope_data && shared->HasPreParsedScopeData()) {
+ shared->ClearPreParsedScopeData();
}
// Add shared function info to new script's list. If a collection occurs,
@@ -13538,9 +13619,8 @@ void SharedFunctionInfo::set_debugger_hints(int value) {
}
String* SharedFunctionInfo::DebugName() {
- String* n = name();
- if (String::cast(n)->length() == 0) return inferred_name();
- return String::cast(n);
+ if (name()->length() == 0) return inferred_name();
+ return name();
}
bool SharedFunctionInfo::HasNoSideEffect() {
@@ -13629,11 +13709,12 @@ int SharedFunctionInfo::SourceSize() {
}
void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
+ bool has_prototype_slot,
int requested_embedder_fields,
int requested_in_object_properties,
int* instance_size,
int* in_object_properties) {
- int header_size = JSObject::GetHeaderSize(instance_type);
+ int header_size = JSObject::GetHeaderSize(instance_type, has_prototype_slot);
DCHECK_LE(requested_embedder_fields,
(JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2);
*instance_size =
@@ -13669,7 +13750,7 @@ void JSFunction::CalculateInstanceSizeForDerivedClass(
break;
}
}
- CalculateInstanceSizeHelper(instance_type, requested_embedder_fields,
+ CalculateInstanceSizeHelper(instance_type, true, requested_embedder_fields,
expected_nof_properties, instance_size,
in_object_properties);
}
@@ -13710,7 +13791,7 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
- DCHECK(reason != kNoReason);
+ DCHECK_NE(reason, kNoReason);
set_compiler_hints(
DisabledOptimizationReasonBits::update(compiler_hints(), reason));
@@ -13739,7 +13820,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_inferred_name(*lit->inferred_name());
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
shared_info->set_language_mode(lit->language_mode());
- shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
// shared_info->set_kind(lit->kind());
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
@@ -13749,7 +13829,10 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
}
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_function_literal_id(lit->function_literal_id());
-
+ DCHECK_IMPLIES(lit->requires_instance_fields_initializer(),
+ IsClassConstructor(lit->kind()));
+ shared_info->set_requires_instance_fields_initializer(
+ lit->requires_instance_fields_initializer());
// For lazy parsed functions, the following flags will be inaccurate since we
// don't have the information yet. They're set later in
// SetSharedFunctionFlagsFromLiteral (compiler.cc), when the function is
@@ -13811,7 +13894,7 @@ void SharedFunctionInfo::SetConstructStub(Code* code) {
void Map::StartInobjectSlackTracking() {
DCHECK(!IsInobjectSlackTrackingInProgress());
- if (unused_property_fields() == 0) return;
+ if (UnusedPropertyFields() == 0) return;
set_construction_counter(Map::kSlackTrackingCounterStart);
}
@@ -13832,12 +13915,6 @@ void ObjectVisitor::VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) {
}
-void Code::InvalidateRelocation() {
- InvalidateEmbeddedObjects();
- set_relocation_info(GetHeap()->empty_byte_array());
-}
-
-
void Code::InvalidateEmbeddedObjects() {
HeapObject* undefined = GetHeap()->undefined_value();
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -13925,7 +14002,7 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
namespace {
template <typename Code>
void SetStackFrameCacheCommon(Handle<Code> code,
- Handle<UnseededNumberDictionary> cache) {
+ Handle<NumberDictionary> cache) {
Handle<Object> maybe_table(code->source_position_table(), code->GetIsolate());
if (maybe_table->IsSourcePositionTableWithFrameCache()) {
Handle<SourcePositionTableWithFrameCache>::cast(maybe_table)
@@ -13943,7 +14020,7 @@ void SetStackFrameCacheCommon(Handle<Code> code,
// static
void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
- Handle<UnseededNumberDictionary> cache) {
+ Handle<NumberDictionary> cache) {
if (abstract_code->IsCode()) {
SetStackFrameCacheCommon(handle(abstract_code->GetCode()), cache);
} else {
@@ -14005,7 +14082,8 @@ void JSFunction::ClearTypeFeedbackInfo() {
FeedbackVector* vector = feedback_vector();
Isolate* isolate = GetIsolate();
if (vector->ClearSlots(isolate)) {
- IC::OnFeedbackChanged(isolate, vector, this);
+ IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), this,
+ "ClearTypeFeedbackInfo");
}
}
}
@@ -14023,8 +14101,8 @@ void Code::PrintDeoptLocation(FILE* out, Address pc) {
bool Code::CanDeoptAt(Address pc) {
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(deoptimization_data());
+ DeoptimizationData* deopt_data =
+ DeoptimizationData::cast(deoptimization_data());
Address code_start_address = instruction_start();
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
@@ -14059,9 +14137,9 @@ const char* AbstractCode::Kind2String(Kind kind) {
Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
DCHECK(code->kind() == OPTIMIZED_FUNCTION);
WeakCell* raw_cell = code->CachedWeakCell();
- if (raw_cell != NULL) return Handle<WeakCell>(raw_cell);
+ if (raw_cell != nullptr) return Handle<WeakCell>(raw_cell);
Handle<WeakCell> cell = code->GetIsolate()->factory()->NewWeakCell(code);
- DeoptimizationInputData::cast(code->deoptimization_data())
+ DeoptimizationData::cast(code->deoptimization_data())
->SetWeakCellCache(*cell);
return cell;
}
@@ -14069,20 +14147,20 @@ Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
WeakCell* Code::CachedWeakCell() {
DCHECK(kind() == OPTIMIZED_FUNCTION);
Object* weak_cell_cache =
- DeoptimizationInputData::cast(deoptimization_data())->WeakCellCache();
+ DeoptimizationData::cast(deoptimization_data())->WeakCellCache();
if (weak_cell_cache->IsWeakCell()) {
DCHECK(this == WeakCell::cast(weak_cell_cache)->value());
return WeakCell::cast(weak_cell_cache);
}
- return NULL;
+ return nullptr;
}
bool Code::Inlines(SharedFunctionInfo* sfi) {
// We can only check for inlining for optimized code.
DCHECK(is_optimized_code());
DisallowHeapAllocation no_gc;
- DeoptimizationInputData* const data =
- DeoptimizationInputData::cast(deoptimization_data());
+ DeoptimizationData* const data =
+ DeoptimizationData::cast(deoptimization_data());
if (data->length() == 0) return false;
if (data->SharedFunctionInfo() == sfi) return true;
FixedArray* const literals = data->LiteralArray();
@@ -14160,8 +14238,12 @@ void print_pc(std::ostream& os, int pc) {
}
} // anonymous namespace
-void DeoptimizationInputData::DeoptimizationInputDataPrint(
- std::ostream& os) { // NOLINT
+void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
+ if (length() == 0) {
+ os << "Deoptimization Input Data invalidated by lazy deoptimization\n";
+ return;
+ }
+
disasm::NameConverter converter;
int const inlined_function_count = InlinedFunctionCount()->value();
os << "Inlined functions (count = " << inlined_function_count << ")\n";
@@ -14254,15 +14336,6 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME: {
- int shared_info_id = iterator.Next();
- Object* shared_info = LiteralArray()->get(shared_info_id);
- os << "{function=" << Brief(SharedFunctionInfo::cast(shared_info)
- ->DebugName()) << "}";
- break;
- }
-
case Translation::REGISTER: {
int reg_code = iterator.Next();
os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
@@ -14404,7 +14477,8 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "kind = " << Kind2String(kind()) << "\n";
if (is_stub()) {
const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this));
- os << "major_key = " << (n == NULL ? "null" : n) << "\n";
+ os << "major_key = " << (n == nullptr ? "null" : n) << "\n";
+ os << "minor_key = " << CodeStub::MinorKeyFromKey(this->stub_key()) << "\n";
}
if ((name != nullptr) && (name[0] != '\0')) {
os << "name = " << name << "\n";
@@ -14425,6 +14499,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "stack_slots = " << stack_slots() << "\n";
}
os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
+ os << "address = " << static_cast<const void*>(this) << "\n";
os << "Instructions (size = " << instruction_size() << ")\n";
{
@@ -14443,8 +14518,8 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
Disassembler::Decode(isolate, &os, begin, end, this);
if (constant_pool_offset < size) {
- int constant_pool_size = size - constant_pool_offset;
- DCHECK((constant_pool_size & kPointerAlignmentMask) == 0);
+ int constant_pool_size = safepoint_offset - constant_pool_offset;
+ DCHECK_EQ(constant_pool_size & kPointerAlignmentMask, 0);
os << "\nConstant Pool (size = " << constant_pool_size << ")\n";
Vector<char> buf = Vector<char>::New(50);
intptr_t* ptr = reinterpret_cast<intptr_t*>(begin + constant_pool_offset);
@@ -14468,9 +14543,9 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
if (kind() == OPTIMIZED_FUNCTION) {
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(this->deoptimization_data());
- data->DeoptimizationInputDataPrint(os);
+ DeoptimizationData* data =
+ DeoptimizationData::cast(this->deoptimization_data());
+ data->DeoptimizationDataPrint(os);
}
os << "\n";
@@ -14613,7 +14688,7 @@ bool BytecodeArray::IsOld() const {
// static
void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
- DCHECK(capacity >= 0);
+ DCHECK_GE(capacity, 0);
array->GetIsolate()->factory()->NewJSArrayStorage(
array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
@@ -14891,8 +14966,8 @@ void DependentCode::SetMarkedForDeoptimization(Code* code,
code->set_marked_for_deoptimization(true);
if (FLAG_trace_deopt &&
(code->deoptimization_data() != code->GetHeap()->empty_fixed_array())) {
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
+ DeoptimizationData* deopt_data =
+ DeoptimizationData::cast(code->deoptimization_data());
CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
PrintF(scope.file(), "[marking dependent code 0x%08" V8PRIxPTR
" (opt #%d) for deoptimization, reason: %s]\n",
@@ -15103,7 +15178,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// Set the new prototype of the object.
- isolate->UpdateArrayProtectorOnSetPrototype(real_receiver);
+ isolate->UpdateNoElementsProtectorOnSetPrototype(real_receiver);
Handle<Map> new_map = Map::TransitionToPrototype(map, value);
DCHECK(new_map->prototype() == *value);
@@ -15171,10 +15246,9 @@ static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
// If the fast-case backing storage takes up much more memory than a
// dictionary backing storage would, the object should have slow elements.
int used_elements = object->GetFastElementsUsage();
- uint32_t size_threshold =
- SeededNumberDictionary::kPreferFastElementsSizeFactor *
- SeededNumberDictionary::ComputeCapacity(used_elements) *
- SeededNumberDictionary::kEntrySize;
+ uint32_t size_threshold = NumberDictionary::kPreferFastElementsSizeFactor *
+ NumberDictionary::ComputeCapacity(used_elements) *
+ NumberDictionary::kEntrySize;
return size_threshold <= *new_capacity;
}
@@ -15201,7 +15275,7 @@ static ElementsKind BestFittingFastElementsKind(JSObject* object) {
return FAST_STRING_WRAPPER_ELEMENTS;
}
DCHECK(object->HasDictionaryElements());
- SeededNumberDictionary* dictionary = object->element_dictionary();
+ NumberDictionary* dictionary = object->element_dictionary();
ElementsKind kind = HOLEY_SMI_ELEMENTS;
for (int i = 0; i < dictionary->Capacity(); i++) {
Object* key = dictionary->KeyAt(i);
@@ -15217,9 +15291,8 @@ static ElementsKind BestFittingFastElementsKind(JSObject* object) {
return kind;
}
-
static bool ShouldConvertToFastElements(JSObject* object,
- SeededNumberDictionary* dictionary,
+ NumberDictionary* dictionary,
uint32_t index,
uint32_t* new_capacity) {
// If properties with non-standard attributes or accessors were added, we
@@ -15241,7 +15314,7 @@ static bool ShouldConvertToFastElements(JSObject* object,
*new_capacity = Max(index + 1, *new_capacity);
uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
- SeededNumberDictionary::kEntrySize;
+ NumberDictionary::kEntrySize;
// Turn fast if the dictionary only saves 50% space.
return 2 * dictionary_size >= *new_capacity;
@@ -15254,7 +15327,7 @@ MaybeHandle<Object> JSObject::AddDataElement(Handle<JSObject> object,
Handle<Object> value,
PropertyAttributes attributes) {
MAYBE_RETURN_NULL(
- AddDataElement(object, index, value, attributes, THROW_ON_ERROR));
+ AddDataElement(object, index, value, attributes, kThrowOnError));
return value;
}
@@ -15287,10 +15360,9 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
if (attributes != NONE) {
kind = dictionary_kind;
- } else if (elements->IsSeededNumberDictionary()) {
- kind = ShouldConvertToFastElements(*object,
- SeededNumberDictionary::cast(elements),
- index, &new_capacity)
+ } else if (elements->IsNumberDictionary()) {
+ kind = ShouldConvertToFastElements(
+ *object, NumberDictionary::cast(elements), index, &new_capacity)
? BestFittingFastElementsKind(*object)
: dictionary_kind;
} else if (ShouldConvertToSlowElements(
@@ -15429,7 +15501,7 @@ const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
case kZombie: return "zombie";
default: UNREACHABLE();
}
- return NULL;
+ return nullptr;
}
template <AllocationSiteUpdateMode update_or_check>
@@ -15446,7 +15518,7 @@ bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
AllocationMemento* memento =
heap->FindAllocationMemento<Heap::kForRuntime>(object->map(), *object);
- if (memento == NULL) return false;
+ if (memento == nullptr) return false;
// Walk through to the Allocation Site
site = handle(memento->GetAllocationSite());
@@ -15596,7 +15668,6 @@ void Dictionary<Derived, Shape>::Print(std::ostream& os) {
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
- if (!Shape::IsLive(isolate, k)) continue;
if (!dictionary->ToKey(isolate, i, &k)) continue;
os << "\n ";
if (k->IsString()) {
@@ -15612,6 +15683,7 @@ template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::Print() {
OFStream os(stdout);
Print(os);
+ os << std::endl;
}
#endif
@@ -15757,7 +15829,7 @@ class StringSharedKey : public HashTableKey {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
array->set(0, *shared_);
array->set(1, *source_);
- array->set(2, Smi::FromInt(language_mode_));
+ array->set(2, Smi::FromEnum(language_mode_));
array->set(3, Smi::FromInt(position_));
array->set_map(isolate->heap()->fixed_cow_array_map());
return array;
@@ -15809,11 +15881,7 @@ JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
flag = JSRegExp::kMultiline;
break;
case 's':
- if (FLAG_harmony_regexp_dotall) {
- flag = JSRegExp::kDotAll;
- } else {
- return JSRegExp::Flags(0);
- }
+ flag = JSRegExp::kDotAll;
break;
case 'u':
flag = JSRegExp::kUnicode;
@@ -15967,10 +16035,12 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
SKIP_WRITE_BARRIER);
} else {
// Map has changed, so use generic, but slower, method.
- RETURN_ON_EXCEPTION(isolate, JSReceiver::SetProperty(
- regexp, factory->lastIndex_string(),
- Handle<Smi>(Smi::kZero, isolate), STRICT),
- JSRegExp);
+ RETURN_ON_EXCEPTION(
+ isolate,
+ JSReceiver::SetProperty(regexp, factory->lastIndex_string(),
+ Handle<Smi>(Smi::kZero, isolate),
+ LanguageMode::kStrict),
+ JSRegExp);
}
return regexp;
@@ -16081,7 +16151,7 @@ template <typename Derived, typename Shape>
Handle<Derived> HashTable<Derived, Shape>::New(
Isolate* isolate, int at_least_space_for, PretenureFlag pretenure,
MinimumCapacity capacity_option) {
- DCHECK(0 <= at_least_space_for);
+ DCHECK_LE(0, at_least_space_for);
DCHECK_IMPLIES(capacity_option == USE_CUSTOM_MINIMUM_CAPACITY,
base::bits::IsPowerOfTwo(at_least_space_for));
@@ -16099,8 +16169,10 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
Isolate* isolate, int capacity, PretenureFlag pretenure) {
Factory* factory = isolate->factory();
int length = EntryToIndex(capacity);
- Handle<FixedArray> array = factory->NewFixedArray(length, pretenure);
- array->set_map_no_write_barrier(Shape::GetMap(isolate));
+ Heap::RootListIndex map_root_index =
+ static_cast<Heap::RootListIndex>(Shape::GetMapRootIndex());
+ Handle<FixedArray> array =
+ factory->NewFixedArrayWithMap(map_root_index, length, pretenure);
Handle<Derived> table = Handle<Derived>::cast(array);
table->SetNumberOfElements(0);
@@ -16302,7 +16374,7 @@ template class HashTable<CompilationCacheTable, CompilationCacheShape>;
template class HashTable<ObjectHashTable, ObjectHashTableShape>;
-template class HashTable<WeakHashTable, WeakHashTableShape<2>>;
+template class HashTable<WeakHashTable, WeakHashTableShape>;
template class HashTable<TemplateMap, TemplateMapShape>;
@@ -16310,14 +16382,11 @@ template class Dictionary<NameDictionary, NameDictionaryShape>;
template class Dictionary<GlobalDictionary, GlobalDictionaryShape>;
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- HashTable<SeededNumberDictionary, SeededNumberDictionaryShape>;
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>;
-
-template class Dictionary<UnseededNumberDictionary,
- UnseededNumberDictionaryShape>;
+ Dictionary<NumberDictionary, NumberDictionaryShape>;
template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::New(
@@ -16327,19 +16396,12 @@ template Handle<GlobalDictionary>
BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::New(
Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
-template Handle<SeededNumberDictionary>
- Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>::AtPut(
- Handle<SeededNumberDictionary>, uint32_t, Handle<Object>,
- PropertyDetails);
-
-template Handle<UnseededNumberDictionary>
- Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape>::AtPut(
- Handle<UnseededNumberDictionary>, uint32_t, Handle<Object>,
- PropertyDetails);
+template Handle<NumberDictionary>
+ Dictionary<NumberDictionary, NumberDictionaryShape>::AtPut(
+ Handle<NumberDictionary>, uint32_t, Handle<Object>, PropertyDetails);
-template Object*
-Dictionary<SeededNumberDictionary,
- SeededNumberDictionaryShape>::SlowReverseLookup(Object* value);
+template Object* Dictionary<
+ NumberDictionary, NumberDictionaryShape>::SlowReverseLookup(Object* value);
template Object* Dictionary<
NameDictionary, NameDictionaryShape>::SlowReverseLookup(Object* value);
@@ -16348,17 +16410,9 @@ template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape>::DeleteEntry(
Handle<NameDictionary>, int);
-template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>::DeleteEntry(
- Handle<SeededNumberDictionary>, int);
-
-template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape>::
- DeleteEntry(Handle<UnseededNumberDictionary>, int);
-
-template Handle<UnseededNumberDictionary>
-HashTable<UnseededNumberDictionary, UnseededNumberDictionaryShape>::New(
- Isolate*, int, PretenureFlag, MinimumCapacity);
+template Handle<NumberDictionary>
+Dictionary<NumberDictionary, NumberDictionaryShape>::DeleteEntry(
+ Handle<NumberDictionary>, int);
template Handle<NameDictionary>
HashTable<NameDictionary, NameDictionaryShape>::New(Isolate*, int,
@@ -16373,10 +16427,6 @@ HashTable<ObjectHashSet, ObjectHashSetShape>::New(Isolate*, int n,
template Handle<NameDictionary> HashTable<
NameDictionary, NameDictionaryShape>::Shrink(Handle<NameDictionary>);
-template Handle<UnseededNumberDictionary>
- HashTable<UnseededNumberDictionary, UnseededNumberDictionaryShape>::Shrink(
- Handle<UnseededNumberDictionary>);
-
template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::Add(
Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails,
@@ -16389,15 +16439,9 @@ BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::Add(
template void HashTable<GlobalDictionary, GlobalDictionaryShape>::Rehash();
-template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>::Add(
- Handle<SeededNumberDictionary>, uint32_t, Handle<Object>, PropertyDetails,
- int*);
-
-template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape>::Add(
- Handle<UnseededNumberDictionary>, uint32_t, Handle<Object>, PropertyDetails,
- int*);
+template Handle<NumberDictionary>
+Dictionary<NumberDictionary, NumberDictionaryShape>::Add(
+ Handle<NumberDictionary>, uint32_t, Handle<Object>, PropertyDetails, int*);
template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::EnsureCapacity(
@@ -16433,9 +16477,8 @@ template void
BaseNameDictionary<NameDictionary, NameDictionaryShape>::CollectKeysTo(
Handle<NameDictionary> dictionary, KeyAccumulator* keys);
-template int
-Dictionary<SeededNumberDictionary,
- SeededNumberDictionaryShape>::NumberOfEnumerableProperties();
+template int Dictionary<NumberDictionary,
+ NumberDictionaryShape>::NumberOfEnumerableProperties();
namespace {
@@ -16764,6 +16807,9 @@ void MigrateExternalStringResource(Isolate* isolate, String* from, String* to) {
}
void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
+ DCHECK_NE(string, internalized);
+ DCHECK(internalized->IsInternalizedString());
+
if (string->IsExternalString()) {
if (internalized->IsExternalOneByteString()) {
MigrateExternalStringResource<ExternalOneByteString>(isolate, string,
@@ -16779,23 +16825,21 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
}
}
- if (!string->IsInternalizedString()) {
- DisallowHeapAllocation no_gc;
- int old_size = string->Size();
- isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc);
- bool one_byte = internalized->IsOneByteRepresentation();
- Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
- : isolate->factory()->thin_string_map();
- DCHECK(old_size >= ThinString::kSize);
- string->synchronized_set_map(*map);
- ThinString* thin = ThinString::cast(string);
- thin->set_actual(internalized);
- Address thin_end = thin->address() + ThinString::kSize;
- int size_delta = old_size - ThinString::kSize;
- if (size_delta != 0) {
- Heap* heap = isolate->heap();
- heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo);
- }
+ DisallowHeapAllocation no_gc;
+ int old_size = string->Size();
+ isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc);
+ bool one_byte = internalized->IsOneByteRepresentation();
+ Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
+ : isolate->factory()->thin_string_map();
+ DCHECK_GE(old_size, ThinString::kSize);
+ string->synchronized_set_map(*map);
+ ThinString* thin = ThinString::cast(string);
+ thin->set_actual(internalized);
+ Address thin_end = thin->address() + ThinString::kSize;
+ int size_delta = old_size - ThinString::kSize;
+ if (size_delta != 0) {
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo);
}
}
@@ -16810,7 +16854,9 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
Handle<String> result = LookupKey(isolate, &key);
if (FLAG_thin_strings) {
- MakeStringThin(*string, *result, isolate);
+ if (!string->IsInternalizedString()) {
+ MakeStringThin(*string, *result, isolate);
+ }
} else { // !FLAG_thin_strings
if (string->IsConsString()) {
Handle<ConsString> cons = Handle<ConsString>::cast(string);
@@ -17005,6 +17051,7 @@ Object* StringTable::LookupStringIfExists_NoAllocate(String* string) {
return Smi::FromInt(ResultSentinel::kUnsupported);
}
+ DCHECK(!string->IsInternalizedString());
int entry = table->FindEntry(isolate, &key, key.Hash());
if (entry != kNotFound) {
String* internalized = String::cast(table->KeyAt(entry));
@@ -17018,11 +17065,16 @@ Object* StringTable::LookupStringIfExists_NoAllocate(String* string) {
return Smi::FromInt(ResultSentinel::kNotFound);
}
-String* StringTable::LookupKeyIfExists(Isolate* isolate, StringTableKey* key) {
+String* StringTable::ForwardStringIfExists(Isolate* isolate,
+ StringTableKey* key,
+ String* string) {
Handle<StringTable> table = isolate->factory()->string_table();
int entry = table->FindEntry(isolate, key);
- if (entry != kNotFound) return String::cast(table->KeyAt(entry));
- return NULL;
+ if (entry == kNotFound) return nullptr;
+
+ String* canonical = String::cast(table->KeyAt(entry));
+ if (canonical != string) MakeStringThin(string, canonical, isolate);
+ return canonical;
}
Handle<StringSet> StringSet::New(Isolate* isolate) {
@@ -17123,7 +17175,7 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
}
// Can we reuse an entry?
- DCHECK(entry < 0);
+ DCHECK_LT(entry, 0);
int length = old_literals_map->length();
for (int i = 0; i < length; i += kLiteralEntryLength) {
if (WeakCell::cast(old_literals_map->get(i + kLiteralContextOffset))
@@ -17428,6 +17480,23 @@ Handle<Derived> Dictionary<Derived, Shape>::AtPut(Handle<Derived> dictionary,
}
template <typename Derived, typename Shape>
+Handle<Derived>
+BaseNameDictionary<Derived, Shape>::AddNoUpdateNextEnumerationIndex(
+ Handle<Derived> dictionary, Key key, Handle<Object> value,
+ PropertyDetails details, int* entry_out) {
+ // Insert element at empty or deleted entry
+ return Dictionary<Derived, Shape>::Add(dictionary, key, value, details,
+ entry_out);
+}
+
+// GCC workaround: Explicitly instantiate template method for NameDictionary
+// to avoid "undefined reference" issues during linking.
+template Handle<NameDictionary>
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::
+ AddNoUpdateNextEnumerationIndex(Handle<NameDictionary>, Handle<Name>,
+ Handle<Object>, PropertyDetails, int*);
+
+template <typename Derived, typename Shape>
Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
Handle<Derived> dictionary, Key key, Handle<Object> value,
PropertyDetails details, int* entry_out) {
@@ -17438,7 +17507,7 @@ Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
int index = dictionary->NextEnumerationIndex();
details = details.set_index(index);
dictionary->SetNextEnumerationIndex(index + 1);
- return Dictionary<Derived, Shape>::Add(dictionary, key, value, details,
+ return AddNoUpdateNextEnumerationIndex(dictionary, key, value, details,
entry_out);
}
@@ -17466,7 +17535,7 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(Handle<Derived> dictionary,
return dictionary;
}
-bool SeededNumberDictionary::HasComplexElements() {
+bool NumberDictionary::HasComplexElements() {
if (!requires_slow_elements()) return false;
Isolate* isolate = this->GetIsolate();
int capacity = this->Capacity();
@@ -17481,8 +17550,8 @@ bool SeededNumberDictionary::HasComplexElements() {
return false;
}
-void SeededNumberDictionary::UpdateMaxNumberKey(
- uint32_t key, Handle<JSObject> dictionary_holder) {
+void NumberDictionary::UpdateMaxNumberKey(uint32_t key,
+ Handle<JSObject> dictionary_holder) {
DisallowHeapAllocation no_allocation;
// If the dictionary requires slow elements an element has already
// been added at a high index.
@@ -17504,15 +17573,14 @@ void SeededNumberDictionary::UpdateMaxNumberKey(
}
}
-Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
- Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, Handle<JSObject> dictionary_holder,
- PropertyDetails details) {
+Handle<NumberDictionary> NumberDictionary::Set(
+ Handle<NumberDictionary> dictionary, uint32_t key, Handle<Object> value,
+ Handle<JSObject> dictionary_holder, PropertyDetails details) {
dictionary->UpdateMaxNumberKey(key, dictionary_holder);
return AtPut(dictionary, key, value, details);
}
-void SeededNumberDictionary::CopyValuesTo(FixedArray* elements) {
+void NumberDictionary::CopyValuesTo(FixedArray* elements) {
Isolate* isolate = this->GetIsolate();
int pos = 0;
int capacity = this->Capacity();
@@ -17527,13 +17595,6 @@ void SeededNumberDictionary::CopyValuesTo(FixedArray* elements) {
DCHECK_EQ(pos, elements->length());
}
-Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
- Handle<UnseededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- return AtPut(dictionary, key, value, PropertyDetails::Empty());
-}
-
template <typename Derived, typename Shape>
int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
Isolate* isolate = this->GetIsolate();
@@ -17629,8 +17690,6 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
array->set(array_size++, Smi::FromInt(i));
}
- DCHECK_EQ(array_size, length);
-
EnumIndexComparator<Derived> cmp(raw_dictionary);
// Use AtomicElement wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
@@ -17911,10 +17970,9 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
}
int num_buckets = capacity / kLoadFactor;
- Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray(
+ Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
+ static_cast<Heap::RootListIndex>(Derived::GetMapRootIndex()),
kHashTableStartIndex + num_buckets + (capacity * kEntrySize), pretenure);
- backing_store->set_map_no_write_barrier(
- isolate->heap()->ordered_hash_table_map());
Handle<Derived> table = Handle<Derived>::cast(backing_store);
for (int i = 0; i < num_buckets; ++i) {
table->set(kHashTableStartIndex + i, Smi::FromInt(kNotFound));
@@ -17970,7 +18028,8 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::HasKey(Isolate* isolate,
Derived* table, Object* key) {
- DCHECK(table->IsOrderedHashTable());
+ DCHECK((entrysize == 1 && table->IsOrderedHashSet()) ||
+ (entrysize == 2 && table->IsOrderedHashMap()));
DisallowHeapAllocation no_gc;
int entry = table->FindEntry(isolate, key);
return entry != kNotFound;
@@ -18030,6 +18089,14 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
return result;
}
+HeapObject* OrderedHashSet::GetEmpty(Isolate* isolate) {
+ return isolate->heap()->empty_ordered_hash_set();
+}
+
+HeapObject* OrderedHashMap::GetEmpty(Isolate* isolate) {
+ return isolate->heap()->empty_ordered_hash_map();
+}
+
template <class Derived, int entrysize>
Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
Handle<Derived> table, int new_capacity) {
@@ -18104,7 +18171,7 @@ Object* OrderedHashMap::GetHash(Isolate* isolate, Object* key) {
// If the object does not have an identity hash, it was never used as a key
if (hash->IsUndefined(isolate)) return Smi::FromInt(-1);
DCHECK(hash->IsSmi());
- DCHECK(Smi::cast(hash)->value() >= 0);
+ DCHECK_GE(Smi::cast(hash)->value(), 0);
return hash;
}
@@ -18218,7 +18285,7 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
capacity * Derived::kEntrySize);
} else {
for (int i = 0; i < capacity; i++) {
- for (int j = 0; j < Derived::kEntrySize; i++) {
+ for (int j = 0; j < Derived::kEntrySize; j++) {
SetDataEntry(i, j, isolate->heap()->the_hole_value());
}
}
@@ -18448,7 +18515,7 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
if (index < used_capacity) return true;
- set_table(isolate->heap()->empty_ordered_hash_table());
+ set_table(TableType::GetEmpty(isolate));
return false;
}
@@ -18617,7 +18684,7 @@ Object* JSDate::GetField(Object* object, Smi* index) {
Object* JSDate::DoGetField(FieldIndex index) {
- DCHECK(index != kDateValue);
+ DCHECK_NE(index, kDateValue);
DateCache* date_cache = GetIsolate()->date_cache();
@@ -18655,7 +18722,7 @@ Object* JSDate::DoGetField(FieldIndex index) {
int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
if (index == kMillisecond) return Smi::FromInt(time_in_day_ms % 1000);
- DCHECK(index == kTimeInDay);
+ DCHECK_EQ(index, kTimeInDay);
return Smi::FromInt(time_in_day_ms);
}
@@ -18663,7 +18730,7 @@ Object* JSDate::DoGetField(FieldIndex index) {
Object* JSDate::GetUTCField(FieldIndex index,
double value,
DateCache* date_cache) {
- DCHECK(index >= kFirstUTCField);
+ DCHECK_GE(index, kFirstUTCField);
if (std::isnan(value)) return GetIsolate()->heap()->nan_value();
@@ -18682,7 +18749,7 @@ Object* JSDate::GetUTCField(FieldIndex index,
date_cache->YearMonthDayFromDays(days, &year, &month, &day);
if (index == kYearUTC) return Smi::FromInt(year);
if (index == kMonthUTC) return Smi::FromInt(month);
- DCHECK(index == kDayUTC);
+ DCHECK_EQ(index, kDayUTC);
return Smi::FromInt(day);
}
@@ -18830,7 +18897,7 @@ void JSArrayBuffer::FreeBackingStore() {
using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
const size_t length = allocation_length();
const AllocationMode mode = allocation_mode();
- GetIsolate()->array_buffer_allocator()->Free(allocation_base(), length, mode);
+ FreeBackingStore(GetIsolate(), {allocation_base(), length, mode});
// Zero out the backing store and allocation base to avoid dangling
// pointers.
@@ -18842,6 +18909,12 @@ void JSArrayBuffer::FreeBackingStore() {
set_allocation_length(0);
}
+// static
+void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
+ isolate->array_buffer_allocator()->Free(allocation.allocation_base,
+ allocation.length, allocation.mode);
+}
+
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
bool is_external, void* data, size_t allocated_length,
SharedFlag shared) {
@@ -18853,8 +18926,8 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
bool is_external, void* allocation_base,
size_t allocation_length, void* data,
size_t byte_length, SharedFlag shared) {
- DCHECK(array_buffer->GetEmbedderFieldCount() ==
- v8::ArrayBuffer::kEmbedderFieldCount);
+ DCHECK_EQ(array_buffer->GetEmbedderFieldCount(),
+ v8::ArrayBuffer::kEmbedderFieldCount);
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
array_buffer->SetEmbedderField(i, Smi::kZero);
}
@@ -18894,7 +18967,7 @@ bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
size_t allocated_length,
bool initialize, SharedFlag shared) {
void* data;
- CHECK(isolate->array_buffer_allocator() != NULL);
+ CHECK_NOT_NULL(isolate->array_buffer_allocator());
if (allocated_length != 0) {
if (allocated_length >= MB)
isolate->counters()->array_buffer_big_allocations()->AddSample(
@@ -18908,13 +18981,13 @@ bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
data = isolate->array_buffer_allocator()->AllocateUninitialized(
allocated_length);
}
- if (data == NULL) {
+ if (data == nullptr) {
isolate->counters()->array_buffer_new_size_failures()->AddSample(
ConvertToMb(allocated_length));
return false;
}
} else {
- data = NULL;
+ data = nullptr;
}
const bool is_external = false;
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 895d92ba31..a9be023ec8 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -175,6 +175,11 @@ namespace internal {
struct InliningPosition;
class PropertyDescriptorObject;
+enum KeyedAccessLoadMode {
+ STANDARD_LOAD,
+ LOAD_IGNORE_OUT_OF_BOUNDS,
+};
+
enum KeyedAccessStoreMode {
STANDARD_STORE,
STORE_TRANSITION_TO_OBJECT,
@@ -356,7 +361,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
V(PROMISE_REACTION_JOB_INFO_TYPE) \
- V(PROMISE_CAPABILITY_TYPE) \
V(DEBUG_INFO_TYPE) \
V(STACK_FRAME_INFO_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
@@ -368,8 +372,9 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(ASYNC_GENERATOR_REQUEST_TYPE) \
V(FIXED_ARRAY_TYPE) \
V(HASH_TABLE_TYPE) \
- V(FEEDBACK_VECTOR_TYPE) \
+ V(DESCRIPTOR_ARRAY_TYPE) \
V(TRANSITION_ARRAY_TYPE) \
+ V(FEEDBACK_VECTOR_TYPE) \
V(PROPERTY_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
V(CELL_TYPE) \
@@ -377,6 +382,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(PROPERTY_CELL_TYPE) \
V(SMALL_ORDERED_HASH_MAP_TYPE) \
V(SMALL_ORDERED_HASH_SET_TYPE) \
+ V(CODE_DATA_CONTAINER_TYPE) \
\
V(JS_PROXY_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
@@ -539,7 +545,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
promise_resolve_thenable_job_info) \
V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
promise_reaction_job_info) \
- V(PROMISE_CAPABILITY, PromiseCapability, promise_capability) \
V(DEBUG_INFO, DebugInfo, debug_info) \
V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
@@ -550,12 +555,11 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request)
-// We use the full 8 bits of the instance_type field to encode heap object
-// instance types. The high-order bit (bit 7) is set if the object is not a
-// string, and cleared if it is a string.
-const uint32_t kIsNotStringMask = 0x80;
+// We use the full 16 bits of the instance_type field to encode heap object
+// instance types. All the high-order bits (bit 7-15) are cleared if the object
+// is a string, and contain set bits if it is not a string.
+const uint32_t kIsNotStringMask = 0xff80;
const uint32_t kStringTag = 0x0;
-const uint32_t kNotStringTag = 0x80;
// Bit 6 indicates that the object is an internalized string (if set) or not.
// Bit 7 has to be clear as well.
@@ -615,7 +619,7 @@ static inline bool IsShortcutCandidate(int type) {
return ((type & kShortcutTypeMask) == kShortcutTypeTag);
}
-enum InstanceType : uint8_t {
+enum InstanceType : uint16_t {
// String types.
INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag |
kInternalizedTag, // FIRST_PRIMITIVE_TYPE
@@ -666,7 +670,10 @@ enum InstanceType : uint8_t {
kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
// Non-string names
- SYMBOL_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
+ SYMBOL_TYPE =
+ 1 + (kIsNotInternalizedMask | kShortExternalStringMask |
+ kOneByteDataHintMask | kStringEncodingMask |
+ kStringRepresentationMask), // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
// Other primitives (cannot contain non-map-word pointers to heap objects).
HEAP_NUMBER_TYPE,
@@ -709,7 +716,6 @@ enum InstanceType : uint8_t {
ALIASED_ARGUMENTS_ENTRY_TYPE,
PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
PROMISE_REACTION_JOB_INFO_TYPE,
- PROMISE_CAPABILITY_TYPE,
DEBUG_INFO_TYPE,
STACK_FRAME_INFO_TYPE,
PROTOTYPE_INFO_TYPE,
@@ -719,10 +725,11 @@ enum InstanceType : uint8_t {
MODULE_TYPE,
MODULE_INFO_ENTRY_TYPE,
ASYNC_GENERATOR_REQUEST_TYPE,
- FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
HASH_TABLE_TYPE,
+ DESCRIPTOR_ARRAY_TYPE,
+ TRANSITION_ARRAY_TYPE, // LAST_FIXED_ARRAY_TYPE
FEEDBACK_VECTOR_TYPE,
- TRANSITION_ARRAY_TYPE,
PROPERTY_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
CELL_TYPE,
@@ -730,6 +737,7 @@ enum InstanceType : uint8_t {
PROPERTY_CELL_TYPE,
SMALL_ORDERED_HASH_MAP_TYPE,
SMALL_ORDERED_HASH_SET_TYPE,
+ CODE_DATA_CONTAINER_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -794,6 +802,9 @@ enum InstanceType : uint8_t {
LAST_PRIMITIVE_TYPE = ODDBALL_TYPE,
FIRST_FUNCTION_TYPE = JS_BOUND_FUNCTION_TYPE,
LAST_FUNCTION_TYPE = JS_FUNCTION_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of FixedArray.
+ FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE,
+ LAST_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
// Boundaries for testing for a fixed typed array.
FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE,
@@ -835,8 +846,10 @@ enum InstanceType : uint8_t {
LAST_MAP_ITERATOR_TYPE = JS_MAP_VALUE_ITERATOR_TYPE,
};
+STATIC_ASSERT((FIRST_NONSTRING_TYPE & kIsNotStringMask) != kStringTag);
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
+STATIC_ASSERT(JS_SPECIAL_API_OBJECT_TYPE == Internals::kJSSpecialApiObjectType);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
@@ -893,17 +906,6 @@ enum FixedArraySubInstanceType {
LAST_FIXED_ARRAY_SUB_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE
};
-
-// TODO(bmeurer): Remove this in favor of the ComparisonResult below.
-enum CompareResult {
- LESS = -1,
- EQUAL = 0,
- GREATER = 1,
-
- NOT_EQUAL = GREATER
-};
-
-
// Result of an abstract relational comparison of x and y, implemented according
// to ES6 section 7.2.11 Abstract Relational Comparison.
enum class ComparisonResult {
@@ -913,12 +915,15 @@ enum class ComparisonResult {
kUndefined // at least one of x or y was undefined or NaN
};
+// (Returns false whenever {result} is kUndefined.)
+bool ComparisonResultToBool(Operation op, ComparisonResult result);
class AbstractCode;
class AccessorPair;
class AllocationSite;
class Cell;
class ConsString;
+class DependentCode;
class ElementsAccessor;
class EnumCache;
class FixedArrayBase;
@@ -963,7 +968,8 @@ template <class C> inline bool Is(Object* obj);
V(LayoutDescriptor) \
V(HeapObject) \
V(Primitive) \
- V(Number)
+ V(Number) \
+ V(Numeric)
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
V(AbstractCode) \
@@ -979,14 +985,16 @@ template <class C> inline bool Is(Object* obj);
V(Callable) \
V(CallHandlerInfo) \
V(Cell) \
+ V(ClassBoilerplate) \
V(Code) \
+ V(CodeDataContainer) \
V(CompilationCacheTable) \
V(ConsString) \
V(ConstantElementsPair) \
V(Constructor) \
V(Context) \
V(CoverageInfo) \
- V(DeoptimizationInputData) \
+ V(DeoptimizationData) \
V(DependentCode) \
V(DescriptorArray) \
V(EnumCache) \
@@ -999,6 +1007,7 @@ template <class C> inline bool Is(Object* obj);
V(Filler) \
V(FixedArray) \
V(FixedArrayBase) \
+ V(FixedArrayExact) \
V(FixedDoubleArray) \
V(FixedFloat32Array) \
V(FixedFloat64Array) \
@@ -1014,6 +1023,7 @@ template <class C> inline bool Is(Object* obj);
V(FrameArray) \
V(FreeSpace) \
V(Function) \
+ V(GlobalDictionary) \
V(HandlerTable) \
V(HeapNumber) \
V(InternalizedString) \
@@ -1057,18 +1067,23 @@ template <class C> inline bool Is(Object* obj);
V(ModuleInfo) \
V(MutableHeapNumber) \
V(Name) \
+ V(NameDictionary) \
V(NativeContext) \
V(NormalizedMapCache) \
V(ObjectHashSet) \
V(ObjectHashTable) \
V(Oddball) \
+ V(OrderedHashMap) \
+ V(OrderedHashSet) \
V(PreParsedScopeData) \
+ V(PromiseCapability) \
V(PropertyArray) \
V(PropertyCell) \
V(PropertyDescriptorObject) \
V(RegExpMatchInfo) \
V(ScopeInfo) \
V(ScriptContextTable) \
+ V(NumberDictionary) \
V(SeqOneByteString) \
V(SeqString) \
V(SeqTwoByteString) \
@@ -1093,7 +1108,6 @@ template <class C> inline bool Is(Object* obj);
V(TypeFeedbackInfo) \
V(Undetectable) \
V(UniqueName) \
- V(UnseededNumberDictionary) \
V(WasmInstanceObject) \
V(WasmMemoryObject) \
V(WasmModuleObject) \
@@ -1104,8 +1118,7 @@ template <class C> inline bool Is(Object* obj);
#define HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) \
V(Dictionary) \
- V(HashTable) \
- V(OrderedHashTable)
+ V(HashTable)
#define HEAP_OBJECT_TYPE_LIST(V) \
HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
@@ -1156,11 +1169,11 @@ class Object {
CERTAINLY_NOT_STORE_FROM_KEYED
};
- enum ShouldThrow { THROW_ON_ERROR, DONT_THROW };
+ enum class Conversion { kToNumber, kToNumeric };
#define RETURN_FAILURE(isolate, should_throw, call) \
do { \
- if ((should_throw) == DONT_THROW) { \
+ if ((should_throw) == kDontThrow) { \
return Just(false); \
} else { \
isolate->Throw(*isolate->factory()->call); \
@@ -1182,11 +1195,6 @@ class Object {
// ES6, #sec-isarray. NOT to be confused with %_IsArray.
INLINE(MUST_USE_RESULT static Maybe<bool> IsArray(Handle<Object> object));
- INLINE(bool IsNameDictionary() const);
- INLINE(bool IsGlobalDictionary() const);
- INLINE(bool IsSeededNumberDictionary() const);
- INLINE(bool IsOrderedHashSet() const);
- INLINE(bool IsOrderedHashMap() const);
INLINE(bool IsSmallOrderedHashTable() const);
// Extract the number.
@@ -1268,6 +1276,9 @@ class Object {
MUST_USE_RESULT static inline MaybeHandle<Object> ToNumber(
Handle<Object> input);
+ MUST_USE_RESULT static inline MaybeHandle<Object> ToNumeric(
+ Handle<Object> input);
+
// ES6 section 7.1.4 ToInteger
MUST_USE_RESULT static inline MaybeHandle<Object> ToInteger(
Isolate* isolate, Handle<Object> input);
@@ -1376,10 +1387,10 @@ class Object {
V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
LookupIterator* it);
- // ES6 [[Set]] (when passed DONT_THROW)
+ // ES6 [[Set]] (when passed kDontThrow)
// Invariants for this and related functions (unless stated otherwise):
// 1) When the result is Nothing, an exception is pending.
- // 2) When passed THROW_ON_ERROR, the result is never Just(false).
+ // 2) When passed kThrowOnError, the result is never Just(false).
// In some cases, an exception is thrown regardless of the ShouldThrow
// argument. These cases are either in accordance with the spec or not
// covered by it (eg., concerning API callbacks).
@@ -1537,8 +1548,8 @@ class Object {
Isolate* isolate, Handle<Object> value);
MUST_USE_RESULT static MaybeHandle<String> ConvertToString(
Isolate* isolate, Handle<Object> input);
- MUST_USE_RESULT static MaybeHandle<Object> ConvertToNumber(
- Isolate* isolate, Handle<Object> input);
+ MUST_USE_RESULT static MaybeHandle<Object> ConvertToNumberOrNumeric(
+ Isolate* isolate, Handle<Object> input, Conversion mode);
MUST_USE_RESULT static MaybeHandle<Object> ConvertToInteger(
Isolate* isolate, Handle<Object> input);
MUST_USE_RESULT static MaybeHandle<Object> ConvertToInt32(
@@ -1813,6 +1824,19 @@ class HeapObject: public Object {
inline AllocationAlignment RequiredAlignment() const;
+ // Whether the object needs rehashing. That is the case if the object's
+ // content depends on FLAG_hash_seed. When the object is deserialized into
+ // a heap with a different hash seed, these objects need to adapt.
+ inline bool NeedsRehashing() const;
+
+ // Rehashing support is not implemented for all objects that need rehashing.
+ // With objects that need rehashing but cannot be rehashed, rehashing has to
+ // be disabled.
+ bool CanBeRehashed() const;
+
+ // Rehash the object based on the layout inferred from its map.
+ void RehashBasedOnMap();
+
// Layout description.
// First field in a heap object is map.
static const int kMapOffset = Object::kHeaderSize;
@@ -1952,6 +1976,7 @@ class PropertyArray : public HeapObject {
static const int kLengthFieldSize = 10;
class LengthField : public BitField<int, 0, kLengthFieldSize> {};
+ static const int kMaxLength = LengthField::kMax;
class HashField : public BitField<int, kLengthFieldSize,
kSmiValueSize - kLengthFieldSize - 1> {};
@@ -2057,15 +2082,15 @@ class JSReceiver: public HeapObject {
// Implementation of ES6 [[Delete]]
MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSReceiver> object, Handle<Name> name,
- LanguageMode language_mode = SLOPPY);
+ LanguageMode language_mode = LanguageMode::kSloppy);
MUST_USE_RESULT static Maybe<bool> DeleteProperty(
Handle<JSReceiver> object, Handle<Name> name,
- LanguageMode language_mode = SLOPPY);
+ LanguageMode language_mode = LanguageMode::kSloppy);
MUST_USE_RESULT static Maybe<bool> DeleteProperty(LookupIterator* it,
LanguageMode language_mode);
MUST_USE_RESULT static Maybe<bool> DeleteElement(
Handle<JSReceiver> object, uint32_t index,
- LanguageMode language_mode = SLOPPY);
+ LanguageMode language_mode = LanguageMode::kSloppy);
MUST_USE_RESULT static Object* DefineProperty(Isolate* isolate,
Handle<Object> object,
@@ -2079,7 +2104,7 @@ class JSReceiver: public HeapObject {
Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
PropertyDescriptor* desc, ShouldThrow should_throw);
- // ES6 7.3.4 (when passed DONT_THROW)
+ // ES6 7.3.4 (when passed kDontThrow)
MUST_USE_RESULT static Maybe<bool> CreateDataProperty(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
@@ -2110,7 +2135,7 @@ class JSReceiver: public HeapObject {
typedef PropertyAttributes IntegrityLevel;
- // ES6 7.3.14 (when passed DONT_THROW)
+ // ES6 7.3.14 (when passed kDontThrow)
// 'level' must be SEALED or FROZEN.
MUST_USE_RESULT static Maybe<bool> SetIntegrityLevel(
Handle<JSReceiver> object, IntegrityLevel lvl, ShouldThrow should_throw);
@@ -2120,7 +2145,7 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static Maybe<bool> TestIntegrityLevel(
Handle<JSReceiver> object, IntegrityLevel lvl);
- // ES6 [[PreventExtensions]] (when passed DONT_THROW)
+ // ES6 [[PreventExtensions]] (when passed kDontThrow)
MUST_USE_RESULT static Maybe<bool> PreventExtensions(
Handle<JSReceiver> object, ShouldThrow should_throw);
@@ -2163,11 +2188,12 @@ class JSReceiver: public HeapObject {
// Retrieves a permanent object identity hash code. The undefined value might
// be returned in case no hash was created yet.
- inline Object* GetIdentityHash(Isolate* isolate);
+ Object* GetIdentityHash(Isolate* isolate);
// Retrieves a permanent object identity hash code. May create and store a
// hash code if needed and none exists.
- inline Smi* GetOrCreateIdentityHash(Isolate* isolate);
+ static Smi* CreateIdentityHash(Isolate* isolate, JSReceiver* key);
+ Smi* GetOrCreateIdentityHash(Isolate* isolate);
// Stores the hash code. The hash passed in must be masked with
// JSReceiver::kHashMask.
@@ -2276,7 +2302,7 @@ class JSObject: public JSReceiver {
inline bool HasSlowStringWrapperElements();
bool HasEnumerableElements();
- inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
+ inline NumberDictionary* element_dictionary(); // Gets slow elements.
// Requires: HasFastElements().
static void EnsureWritableFastElements(Handle<JSObject> object);
@@ -2318,7 +2344,7 @@ class JSObject: public JSReceiver {
// cannot.
MUST_USE_RESULT static Maybe<bool> CreateDataProperty(
LookupIterator* it, Handle<Object> value,
- ShouldThrow should_throw = DONT_THROW);
+ ShouldThrow should_throw = kDontThrow);
static void AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value, PropertyAttributes attributes);
@@ -2355,7 +2381,8 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes);
- static void OptimizeAsPrototype(Handle<JSObject> object);
+ static void OptimizeAsPrototype(Handle<JSObject> object,
+ bool enable_setup_mode = true);
static void ReoptimizeIfPrototype(Handle<JSObject> object);
static void MakePrototypesFast(Handle<Object> receiver,
WhereToStart where_to_start, Isolate* isolate);
@@ -2404,8 +2431,8 @@ class JSObject: public JSReceiver {
// Defines an AccessorInfo property on the given object.
MUST_USE_RESULT static MaybeHandle<Object> SetAccessor(
- Handle<JSObject> object,
- Handle<AccessorInfo> info);
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> info,
+ PropertyAttributes attributes);
// The result must be checked first for exceptions. If there's no exception,
// the output parameter |done| indicates whether the interceptor has a result
@@ -2469,8 +2496,12 @@ class JSObject: public JSReceiver {
// Get the header size for a JSObject. Used to compute the index of
// embedder fields as well as the number of embedder fields.
- static int GetHeaderSize(InstanceType instance_type);
- inline int GetHeaderSize();
+ // The |function_has_prototype_slot| parameter is needed only for
+ // JSFunction objects.
+ static int GetHeaderSize(InstanceType instance_type,
+ bool function_has_prototype_slot = false);
+ static inline int GetHeaderSize(const Map* map);
+ inline int GetHeaderSize() const;
static inline int GetEmbedderFieldCount(const Map* map);
inline int GetEmbedderFieldCount() const;
@@ -2507,11 +2538,10 @@ class JSObject: public JSReceiver {
const char* reason);
// Convert and update the elements backing store to be a
- // SeededNumberDictionary dictionary. Returns the backing after conversion.
- static Handle<SeededNumberDictionary> NormalizeElements(
- Handle<JSObject> object);
+ // NumberDictionary dictionary. Returns the backing after conversion.
+ static Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
- void RequireSlowElements(SeededNumberDictionary* dictionary);
+ void RequireSlowElements(NumberDictionary* dictionary);
// Transform slow named properties to fast variants.
static void MigrateSlowToFast(Handle<JSObject> object,
@@ -2651,6 +2681,8 @@ class JSObject: public JSReceiver {
// its size by more than the 1 entry necessary, so sequentially adding fields
// to the same object requires fewer allocations and copies.
static const int kFieldsAdded = 3;
+ STATIC_ASSERT(kMaxNumberOfDescriptors + kFieldsAdded <=
+ PropertyArray::kMaxLength);
// Layout description.
static const int kElementsOffset = JSReceiver::kHeaderSize;
@@ -2692,10 +2724,6 @@ class JSObject: public JSReceiver {
ElementsKind kind,
Object* object);
- Object* GetIdentityHash(Isolate* isolate);
-
- Smi* GetOrCreateIdentityHash(Isolate* isolate);
-
// Helper for fast versions of preventExtensions, seal, and freeze.
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
template <PropertyAttributes attrs>
@@ -2900,6 +2928,14 @@ class FixedArray: public FixedArrayBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
+// FixedArray alias added only because of IsFixedArrayExact() predicate, which
+// checks for the exact instance type FIXED_ARRAY_TYPE instead of a range
+// check: [FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE].
+class FixedArrayExact final : public FixedArray {
+ public:
+ DECL_CAST(FixedArrayExact)
+};
+
// FixedDoubleArray describes fixed-sized arrays with element type double.
class FixedDoubleArray: public FixedArrayBase {
public:
@@ -2957,7 +2993,7 @@ class WeakFixedArray : public FixedArray {
// ensure this themselves if necessary.
static Handle<WeakFixedArray> Add(Handle<Object> maybe_array,
Handle<HeapObject> value,
- int* assigned_index = NULL);
+ int* assigned_index = nullptr);
// Returns true if an entry was found and removed.
bool Remove(Handle<HeapObject> value);
@@ -2979,7 +3015,9 @@ class WeakFixedArray : public FixedArray {
class Iterator {
public:
- explicit Iterator(Object* maybe_array) : list_(NULL) { Reset(maybe_array); }
+ explicit Iterator(Object* maybe_array) : list_(nullptr) {
+ Reset(maybe_array);
+ }
void Reset(Object* maybe_array);
template <class T>
@@ -3073,90 +3111,7 @@ enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
template <SearchMode search_mode, typename T>
inline int Search(T* array, Name* name, int valid_entries = 0,
- int* out_insertion_index = NULL);
-
-// HandlerTable is a fixed array containing entries for exception handlers in
-// the code object it is associated with. The tables comes in two flavors:
-// 1) Based on ranges: Used for unoptimized code. Contains one entry per
-// exception handler and a range representing the try-block covered by that
-// handler. Layout looks as follows:
-// [ range-start , range-end , handler-offset , handler-data ]
-// 2) Based on return addresses: Used for turbofanned code. Contains one entry
-// per call-site that could throw an exception. Layout looks as follows:
-// [ return-address-offset , handler-offset ]
-class HandlerTable : public FixedArray {
- public:
- // Conservative prediction whether a given handler will locally catch an
- // exception or cause a re-throw to outside the code boundary. Since this is
- // undecidable it is merely an approximation (e.g. useful for debugger).
- enum CatchPrediction {
- UNCAUGHT, // The handler will (likely) rethrow the exception.
- CAUGHT, // The exception will be caught by the handler.
- PROMISE, // The exception will be caught and cause a promise rejection.
- DESUGARING, // The exception will be caught, but both the exception and the
- // catching are part of a desugaring and should therefore not
- // be visible to the user (we won't notify the debugger of such
- // exceptions).
- ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
- // in the desugaring of an async function, so special
- // async/await handling in the debugger can take place.
- };
-
- // Getters for handler table based on ranges.
- inline int GetRangeStart(int index) const;
- inline int GetRangeEnd(int index) const;
- inline int GetRangeHandler(int index) const;
- inline int GetRangeData(int index) const;
-
- // Setters for handler table based on ranges.
- inline void SetRangeStart(int index, int value);
- inline void SetRangeEnd(int index, int value);
- inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
- inline void SetRangeData(int index, int value);
-
- // Setters for handler table based on return addresses.
- inline void SetReturnOffset(int index, int value);
- inline void SetReturnHandler(int index, int offset);
-
- // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
- // the start of the potentially throwing instruction (using return addresses
- // for this value would be invalid).
- int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
-
- // Lookup handler in a table based on return addresses.
- int LookupReturn(int pc_offset);
-
- // Returns the number of entries in the table.
- inline int NumberOfRangeEntries() const;
-
- // Returns the required length of the underlying fixed array.
- static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
- static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
-
- DECL_CAST(HandlerTable)
-
-#ifdef ENABLE_DISASSEMBLER
- void HandlerTableRangePrint(std::ostream& os); // NOLINT
- void HandlerTableReturnPrint(std::ostream& os); // NOLINT
-#endif
-
- private:
- // Layout description for handler table based on ranges.
- static const int kRangeStartIndex = 0;
- static const int kRangeEndIndex = 1;
- static const int kRangeHandlerIndex = 2;
- static const int kRangeDataIndex = 3;
- static const int kRangeEntrySize = 4;
-
- // Layout description for handler table based on return addresses.
- static const int kReturnOffsetIndex = 0;
- static const int kReturnHandlerIndex = 1;
- static const int kReturnEntrySize = 2;
-
- // Encoding of the {handler} field.
- class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
- class HandlerOffsetField : public BitField<int, 3, 29> {};
-};
+ int* out_insertion_index = nullptr);
// ByteArray represents fixed sized byte arrays. Used for the relocation info
// that is attached to code objects.
@@ -3192,7 +3147,7 @@ class ByteArray: public FixedArrayBase {
// have.
static int LengthFor(int size_in_bytes) {
DCHECK(IsAligned(size_in_bytes, kPointerSize));
- DCHECK(size_in_bytes >= kHeaderSize);
+ DCHECK_GE(size_in_bytes, kHeaderSize);
return size_in_bytes - kHeaderSize;
}
@@ -3254,138 +3209,6 @@ class PodArray : public ByteArray {
DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
};
-// BytecodeArray represents a sequence of interpreter bytecodes.
-class BytecodeArray : public FixedArrayBase {
- public:
- enum Age {
- kNoAgeBytecodeAge = 0,
- kQuadragenarianBytecodeAge,
- kQuinquagenarianBytecodeAge,
- kSexagenarianBytecodeAge,
- kSeptuagenarianBytecodeAge,
- kOctogenarianBytecodeAge,
- kAfterLastBytecodeAge,
- kFirstBytecodeAge = kNoAgeBytecodeAge,
- kLastBytecodeAge = kAfterLastBytecodeAge - 1,
- kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
- kIsOldBytecodeAge = kSexagenarianBytecodeAge
- };
-
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length);
- }
-
- // Setter and getter
- inline byte get(int index);
- inline void set(int index, byte value);
-
- // Returns data start address.
- inline Address GetFirstBytecodeAddress();
-
- // Accessors for frame size.
- inline int frame_size() const;
- inline void set_frame_size(int frame_size);
-
- // Accessor for register count (derived from frame_size).
- inline int register_count() const;
-
- // Accessors for parameter count (including implicit 'this' receiver).
- inline int parameter_count() const;
- inline void set_parameter_count(int number_of_parameters);
-
- // Register used to pass the incoming new.target or generator object from the
- // fucntion call.
- inline interpreter::Register incoming_new_target_or_generator_register()
- const;
- inline void set_incoming_new_target_or_generator_register(
- interpreter::Register incoming_new_target_or_generator_register);
-
- // Accessors for profiling count.
- inline int interrupt_budget() const;
- inline void set_interrupt_budget(int interrupt_budget);
-
- // Accessors for OSR loop nesting level.
- inline int osr_loop_nesting_level() const;
- inline void set_osr_loop_nesting_level(int depth);
-
- // Accessors for bytecode's code age.
- inline Age bytecode_age() const;
- inline void set_bytecode_age(Age age);
-
- // Accessors for the constant pool.
- DECL_ACCESSORS(constant_pool, FixedArray)
-
- // Accessors for handler table containing offsets of exception handlers.
- DECL_ACCESSORS(handler_table, FixedArray)
-
- // Accessors for source position table containing mappings between byte code
- // offset and source position or SourcePositionTableWithFrameCache.
- DECL_ACCESSORS(source_position_table, Object)
-
- inline ByteArray* SourcePositionTable();
-
- DECL_CAST(BytecodeArray)
-
- // Dispatched behavior.
- inline int BytecodeArraySize();
-
- inline int instruction_size();
-
- // Returns the size of bytecode and its metadata. This includes the size of
- // bytecode, constant pool, source position table, and handler table.
- inline int SizeIncludingMetadata();
-
- int SourcePosition(int offset);
- int SourceStatementPosition(int offset);
-
- DECL_PRINTER(BytecodeArray)
- DECL_VERIFIER(BytecodeArray)
-
- void Disassemble(std::ostream& os);
-
- void CopyBytecodesTo(BytecodeArray* to);
-
- // Bytecode aging
- bool IsOld() const;
- void MakeOlder();
-
- // Clear uninitialized padding space. This ensures that the snapshot content
- // is deterministic.
- inline void clear_padding();
-
- // Layout description.
-#define BYTECODE_ARRAY_FIELDS(V) \
- /* Pointer fields. */ \
- V(kConstantPoolOffset, kPointerSize) \
- V(kHandlerTableOffset, kPointerSize) \
- V(kSourcePositionTableOffset, kPointerSize) \
- V(kFrameSizeOffset, kIntSize) \
- V(kParameterSizeOffset, kIntSize) \
- V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
- V(kInterruptBudgetOffset, kIntSize) \
- V(kOSRNestingLevelOffset, kCharSize) \
- V(kBytecodeAgeOffset, kCharSize) \
- /* Total size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
- BYTECODE_ARRAY_FIELDS)
-#undef BYTECODE_ARRAY_FIELDS
-
- // Maximal memory consumption for a single BytecodeArray.
- static const int kMaxSize = 512 * MB;
- // Maximal length of a single BytecodeArray.
- static const int kMaxLength = kMaxSize - kHeaderSize;
-
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
-};
-
-
// FreeSpace are fixed-size free memory blocks used by the heap and GC.
// They look like heap objects (are heap object tagged and have a map) so that
// the heap remains iterable. They have a size and a next pointer.
@@ -3537,92 +3360,6 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
#undef FIXED_TYPED_ARRAY_TRAITS
-// DeoptimizationInputData is a fixed array used to hold the deoptimization
-// data for optimized code. It also contains information about functions that
-// were inlined. If N different functions were inlined then first N elements of
-// the literal array will contain these functions.
-//
-// It can be empty.
-class DeoptimizationInputData: public FixedArray {
- public:
- // Layout description. Indices in the array.
- static const int kTranslationByteArrayIndex = 0;
- static const int kInlinedFunctionCountIndex = 1;
- static const int kLiteralArrayIndex = 2;
- static const int kOsrBytecodeOffsetIndex = 3;
- static const int kOsrPcOffsetIndex = 4;
- static const int kOptimizationIdIndex = 5;
- static const int kSharedFunctionInfoIndex = 6;
- static const int kWeakCellCacheIndex = 7;
- static const int kInliningPositionsIndex = 8;
- static const int kFirstDeoptEntryIndex = 9;
-
- // Offsets of deopt entry elements relative to the start of the entry.
- static const int kBytecodeOffsetRawOffset = 0;
- static const int kTranslationIndexOffset = 1;
- static const int kPcOffset = 2;
- static const int kDeoptEntrySize = 3;
-
- // Simple element accessors.
-#define DECL_ELEMENT_ACCESSORS(name, type) \
- inline type* name(); \
- inline void Set##name(type* value);
-
- DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
- DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
- DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
- DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
- DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
- DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
- DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
- DECL_ELEMENT_ACCESSORS(WeakCellCache, Object)
- DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
-
-#undef DECL_ELEMENT_ACCESSORS
-
-// Accessors for elements of the ith deoptimization entry.
-#define DECL_ENTRY_ACCESSORS(name, type) \
- inline type* name(int i); \
- inline void Set##name(int i, type* value);
-
- DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
- DECL_ENTRY_ACCESSORS(TranslationIndex, Smi)
- DECL_ENTRY_ACCESSORS(Pc, Smi)
-
-#undef DECL_ENTRY_ACCESSORS
-
- inline BailoutId BytecodeOffset(int i);
-
- inline void SetBytecodeOffset(int i, BailoutId value);
-
- inline int DeoptCount();
-
- static const int kNotInlinedIndex = -1;
-
- // Returns the inlined function at the given position in LiteralArray, or the
- // outer function if index == kNotInlinedIndex.
- class SharedFunctionInfo* GetInlinedFunction(int index);
-
- // Allocates a DeoptimizationInputData.
- static Handle<DeoptimizationInputData> New(Isolate* isolate,
- int deopt_entry_count,
- PretenureFlag pretenure);
-
- DECL_CAST(DeoptimizationInputData)
-
-#ifdef ENABLE_DISASSEMBLER
- void DeoptimizationInputDataPrint(std::ostream& os); // NOLINT
-#endif
-
- private:
- static int IndexForEntry(int i) {
- return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
- }
-
-
- static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
-};
-
class TemplateList : public FixedArray {
public:
static Handle<TemplateList> New(Isolate* isolate, int size);
@@ -3638,622 +3375,57 @@ class TemplateList : public FixedArray {
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateList);
};
-// Code describes objects with on-the-fly generated machine code.
-class Code: public HeapObject {
- public:
- // Opaque data type for encapsulating code flags like kind, inline
- // cache state, and arguments count.
- typedef uint32_t Flags;
-
-#define CODE_KIND_LIST(V) \
- V(OPTIMIZED_FUNCTION) \
- V(BYTECODE_HANDLER) \
- V(STUB) \
- V(BUILTIN) \
- V(REGEXP) \
- V(WASM_FUNCTION) \
- V(WASM_TO_JS_FUNCTION) \
- V(JS_TO_WASM_FUNCTION) \
- V(WASM_INTERPRETER_ENTRY) \
- V(C_WASM_ENTRY)
-
- enum Kind {
-#define DEFINE_CODE_KIND_ENUM(name) name,
- CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
-#undef DEFINE_CODE_KIND_ENUM
- NUMBER_OF_KINDS
- };
-
- static const char* Kind2String(Kind kind);
-
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
- // Printing
- static const char* ICState2String(InlineCacheState state);
-#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
-#ifdef ENABLE_DISASSEMBLER
- void Disassemble(const char* name, std::ostream& os); // NOLINT
-#endif // ENABLE_DISASSEMBLER
-
- // [instruction_size]: Size of the native instructions
- inline int instruction_size() const;
- inline void set_instruction_size(int value);
-
- // [relocation_info]: Code relocation information
- DECL_ACCESSORS(relocation_info, ByteArray)
- void InvalidateRelocation();
- void InvalidateEmbeddedObjects();
-
- // [handler_table]: Fixed array containing offsets of exception handlers.
- DECL_ACCESSORS(handler_table, FixedArray)
-
- // [deoptimization_data]: Array containing data for deopt.
- DECL_ACCESSORS(deoptimization_data, FixedArray)
-
- // [source_position_table]: ByteArray for the source positions table or
- // SourcePositionTableWithFrameCache.
- DECL_ACCESSORS(source_position_table, Object)
-
- inline ByteArray* SourcePositionTable() const;
-
- // [trap_handler_index]: An index into the trap handler's master list of code
- // objects.
- DECL_ACCESSORS(trap_handler_index, Smi)
-
- // [raw_type_feedback_info]: This field stores various things, depending on
- // the kind of the code object.
- // STUB and ICs => major/minor key as Smi.
- // TODO(mvstanton): rename raw_type_feedback_info to stub_key, since the
- // field is no longer overloaded.
- DECL_ACCESSORS(raw_type_feedback_info, Object)
- inline uint32_t stub_key() const;
- inline void set_stub_key(uint32_t key);
-
- // [next_code_link]: Link for lists of optimized or deoptimized code.
- // Note that storage for this field is overlapped with typefeedback_info.
- DECL_ACCESSORS(next_code_link, Object)
-
- // [constant_pool offset]: Offset of the constant pool.
- // Valid for FLAG_enable_embedded_constant_pool only
- inline int constant_pool_offset() const;
- inline void set_constant_pool_offset(int offset);
-
- // Unchecked accessors to be used during GC.
- inline ByteArray* unchecked_relocation_info() const;
-
- inline int relocation_size() const;
-
- // [kind]: Access to specific code kind.
- inline Kind kind() const;
- inline void set_kind(Kind kind);
-
- inline bool is_stub() const;
- inline bool is_optimized_code() const;
- inline bool is_wasm_code() const;
-
- inline void set_raw_kind_specific_flags1(int value);
- inline void set_raw_kind_specific_flags2(int value);
-
- // Testers for interpreter builtins.
- inline bool is_interpreter_trampoline_builtin() const;
-
- // Tells whether the code checks the optimization marker in the function's
- // feedback vector.
- inline bool checks_optimization_marker() const;
-
- // [has_tagged_params]: For compiled code or builtins: Tells whether the
- // outgoing parameters of this code are tagged pointers. True for other kinds.
- inline bool has_tagged_params() const;
- inline void set_has_tagged_params(bool value);
-
- // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
- // code object was generated by the TurboFan optimizing compiler.
- inline bool is_turbofanned() const;
- inline void set_is_turbofanned(bool value);
-
- // [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the
- // embedded objects in code should be treated weakly.
- inline bool can_have_weak_objects() const;
- inline void set_can_have_weak_objects(bool value);
-
- // [is_construct_stub]: For kind BUILTIN, tells whether the code object
- // represents a hand-written construct stub
- // (e.g., NumberConstructor_ConstructStub).
- inline bool is_construct_stub() const;
- inline void set_is_construct_stub(bool value);
-
- // [builtin_index]: For builtins, tells which builtin index the code object
- // has. The builtin index is a non-negative integer for builtins, and -1
- // otherwise.
- inline int builtin_index() const;
- inline void set_builtin_index(int id);
- inline bool is_builtin() const;
-
- // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
- // reserved in the code prologue.
- inline unsigned stack_slots() const;
- inline void set_stack_slots(unsigned slots);
-
- // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in
- // the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_offset() const;
- inline void set_safepoint_table_offset(unsigned offset);
-
- // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
- // the code is going to be deoptimized because of dead embedded maps.
- inline bool marked_for_deoptimization() const;
- inline void set_marked_for_deoptimization(bool flag);
-
- // [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether
- // the code was already deoptimized.
- inline bool deopt_already_counted() const;
- inline void set_deopt_already_counted(bool flag);
-
- // [is_promise_rejection]: For kind BUILTIN tells whether the
- // exception thrown by the code will lead to promise rejection or
- // uncaught if both this and is_exception_caught is set.
- // Use GetBuiltinCatchPrediction to access this.
- inline void set_is_promise_rejection(bool flag);
-
- // [is_exception_caught]: For kind BUILTIN tells whether the
- // exception thrown by the code will be caught internally or
- // uncaught if both this and is_promise_rejection is set.
- // Use GetBuiltinCatchPrediction to access this.
- inline void set_is_exception_caught(bool flag);
-
- // [constant_pool]: The constant pool for this function.
- inline Address constant_pool();
-
- // Get the safepoint entry for the given pc.
- SafepointEntry GetSafepointEntry(Address pc);
-
- // The entire code object including its header is copied verbatim to the
- // snapshot so that it can be written in one, fast, memcpy during
- // deserialization. The deserializer will overwrite some pointers, rather
- // like a runtime linker, but the random allocation addresses used in the
- // mksnapshot process would still be present in the unlinked snapshot data,
- // which would make snapshot production non-reproducible. This method wipes
- // out the to-be-overwritten header data for reproducible snapshots.
- inline void WipeOutHeader();
-
- // Clear uninitialized padding space. This ensures that the snapshot content
- // is deterministic.
- inline void clear_padding();
- // Initialize the flags field. Similar to clear_padding above this ensure that
- // the snapshot content is deterministic.
- inline void initialize_flags(Kind kind);
-
- // Convert a target address into a code object.
- static inline Code* GetCodeFromTargetAddress(Address address);
-
- // Convert an entry address into an object.
- static inline Object* GetObjectFromEntryAddress(Address location_of_address);
-
- // Convert a code entry into an object.
- static inline Object* GetObjectFromCodeEntry(Address code_entry);
-
- // Returns the address of the first instruction.
- inline byte* instruction_start() const;
-
- // Returns the address right after the last instruction.
- inline byte* instruction_end() const;
-
- // Returns the size of the instructions, padding, relocation and unwinding
- // information.
- inline int body_size() const;
-
- // Returns the size of code and its metadata. This includes the size of code
- // relocation information, deoptimization data and handler table.
- inline int SizeIncludingMetadata() const;
-
- // Returns the address of the first relocation info (read backwards!).
- inline byte* relocation_start() const;
-
- // [has_unwinding_info]: Whether this code object has unwinding information.
- // If it doesn't, unwinding_information_start() will point to invalid data.
- //
- // The body of all code objects has the following layout.
- //
- // +--------------------------+ <-- instruction_start()
- // | instructions |
- // | ... |
- // +--------------------------+
- // | relocation info |
- // | ... |
- // +--------------------------+ <-- instruction_end()
- //
- // If has_unwinding_info() is false, instruction_end() points to the first
- // memory location after the end of the code object. Otherwise, the body
- // continues as follows:
- //
- // +--------------------------+
- // | padding to the next |
- // | 8-byte aligned address |
- // +--------------------------+ <-- instruction_end()
- // | [unwinding_info_size] |
- // | as uint64_t |
- // +--------------------------+ <-- unwinding_info_start()
- // | unwinding info |
- // | ... |
- // +--------------------------+ <-- unwinding_info_end()
- //
- // and unwinding_info_end() points to the first memory location after the end
- // of the code object.
- //
- DECL_BOOLEAN_ACCESSORS(has_unwinding_info)
-
- // [unwinding_info_size]: Size of the unwinding information.
- inline int unwinding_info_size() const;
- inline void set_unwinding_info_size(int value);
-
- // Returns the address of the unwinding information, if any.
- inline byte* unwinding_info_start() const;
-
- // Returns the address right after the end of the unwinding information.
- inline byte* unwinding_info_end() const;
-
- // Code entry point.
- inline byte* entry() const;
-
- // Returns true if pc is inside this object's instructions.
- inline bool contains(byte* pc);
-
- // Relocate the code by delta bytes. Called to signal that this code
- // object has been moved by delta bytes.
- void Relocate(intptr_t delta);
-
- // Migrate code described by desc.
- void CopyFrom(const CodeDesc& desc);
-
- // Returns the object size for a given body (used for allocation).
- static int SizeFor(int body_size) {
- DCHECK_SIZE_TAG_ALIGNED(body_size);
- return RoundUp(kHeaderSize + body_size, kCodeAlignment);
- }
-
- // Calculate the size of the code object to report for log events. This takes
- // the layout of the code object into account.
- inline int ExecutableSize() const;
-
- DECL_CAST(Code)
-
- // Dispatched behavior.
- inline int CodeSize() const;
-
- DECL_PRINTER(Code)
- DECL_VERIFIER(Code)
-
- void PrintDeoptLocation(FILE* out, Address pc);
- bool CanDeoptAt(Address pc);
-
- inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
-#ifdef VERIFY_HEAP
- void VerifyEmbeddedObjectsDependency();
-#endif
-
-#ifdef DEBUG
- enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
- void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
-#endif // DEBUG
-
- inline bool CanContainWeakObjects();
-
- inline bool IsWeakObject(Object* object);
-
- static inline bool IsWeakObjectInOptimizedCode(Object* object);
-
- static Handle<WeakCell> WeakCellFor(Handle<Code> code);
- WeakCell* CachedWeakCell();
-
- // Return true if the function is inlined in the code.
- bool Inlines(SharedFunctionInfo* sfi);
-
- class OptimizedCodeIterator {
- public:
- explicit OptimizedCodeIterator(Isolate* isolate);
- Code* Next();
-
- private:
- Context* next_context_;
- Code* current_code_;
- Isolate* isolate_;
-
- DisallowHeapAllocation no_gc;
- DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator)
- };
-
- static const int kConstantPoolSize =
- FLAG_enable_embedded_constant_pool ? kIntSize : 0;
-
- // Layout description.
- static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
- static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
- static const int kDeoptimizationDataOffset =
- kHandlerTableOffset + kPointerSize;
- static const int kSourcePositionTableOffset =
- kDeoptimizationDataOffset + kPointerSize;
- // For FUNCTION kind, we store the type feedback info here.
- static const int kTypeFeedbackInfoOffset =
- kSourcePositionTableOffset + kPointerSize;
- static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
- static const int kInstructionSizeOffset = kNextCodeLinkOffset + kPointerSize;
- static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
- static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
- static const int kKindSpecificFlags2Offset =
- kKindSpecificFlags1Offset + kIntSize;
- static const int kConstantPoolOffset = kKindSpecificFlags2Offset + kIntSize;
- static const int kBuiltinIndexOffset =
- kConstantPoolOffset + kConstantPoolSize;
- static const int kTrapHandlerIndex = kBuiltinIndexOffset + kIntSize;
- static const int kHeaderPaddingStart = kTrapHandlerIndex + kPointerSize;
-
- // Add padding to align the instruction start following right after
- // the Code object header.
- static const int kHeaderSize =
- (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
-
- // Data or code not directly visited by GC directly starts here.
- // The serializer needs to copy bytes starting from here verbatim.
- // Objects embedded into code is visited via reloc info.
- static const int kDataStart = kInstructionSizeOffset;
-
- inline int GetUnwindingInfoSizeOffset() const;
-
- class BodyDescriptor;
-
- // Flags layout. BitField<type, shift, size>.
- class HasUnwindingInfoField : public BitField<bool, 0, 1> {};
- class KindField : public BitField<Kind, HasUnwindingInfoField::kNext, 5> {};
- STATIC_ASSERT(NUMBER_OF_KINDS <= KindField::kMax);
-
- // KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
- static const int kStackSlotsFirstBit = 0;
- static const int kStackSlotsBitCount = 24;
- static const int kMarkedForDeoptimizationBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kDeoptAlreadyCountedBit = kMarkedForDeoptimizationBit + 1;
- static const int kIsTurbofannedBit = kDeoptAlreadyCountedBit + 1;
- static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1;
- // Could be moved to overlap previous bits when we need more space.
- static const int kIsConstructStub = kCanHaveWeakObjects + 1;
- static const int kIsPromiseRejection = kIsConstructStub + 1;
- static const int kIsExceptionCaught = kIsPromiseRejection + 1;
-
- STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
- STATIC_ASSERT(kIsExceptionCaught + 1 <= 32);
-
- class StackSlotsField: public BitField<int,
- kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
- class MarkedForDeoptimizationField
- : public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT
- class DeoptAlreadyCountedField
- : public BitField<bool, kDeoptAlreadyCountedBit, 1> {}; // NOLINT
- class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> {
- }; // NOLINT
- class CanHaveWeakObjectsField
- : public BitField<bool, kCanHaveWeakObjects, 1> {}; // NOLINT
- class IsConstructStubField : public BitField<bool, kIsConstructStub, 1> {
- }; // NOLINT
- class IsPromiseRejectionField
- : public BitField<bool, kIsPromiseRejection, 1> {}; // NOLINT
- class IsExceptionCaughtField : public BitField<bool, kIsExceptionCaught, 1> {
- }; // NOLINT
-
- // KindSpecificFlags2 layout (ALL)
- static const int kHasTaggedStackBit = 0;
- class HasTaggedStackField : public BitField<bool, kHasTaggedStackBit, 1> {};
-
- // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
- static const int kSafepointTableOffsetFirstBit = kHasTaggedStackBit + 1;
- static const int kSafepointTableOffsetBitCount = 30;
-
- STATIC_ASSERT(kSafepointTableOffsetFirstBit +
- kSafepointTableOffsetBitCount <= 32);
- STATIC_ASSERT(1 + kSafepointTableOffsetBitCount <= 32);
-
- class SafepointTableOffsetField: public BitField<int,
- kSafepointTableOffsetFirstBit,
- kSafepointTableOffsetBitCount> {}; // NOLINT
-
- static const int kArgumentsBits = 16;
- static const int kMaxArguments = (1 << kArgumentsBits) - 1;
-
- private:
- friend class RelocIterator;
-
- bool is_promise_rejection() const;
- bool is_exception_caught() const;
+class PrototypeInfo;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
+// An abstract superclass, a marker class really, for simple structure classes.
+// It doesn't carry much functionality but allows struct classes to be
+// identified in the type system.
+class Struct: public HeapObject {
+ public:
+ inline void InitializeBody(int object_size);
+ DECL_CAST(Struct)
+ void BriefPrintDetails(std::ostream& os);
};
-class AbstractCode : public HeapObject {
+class Tuple2 : public Struct {
public:
- // All code kinds and INTERPRETED_FUNCTION.
- enum Kind {
-#define DEFINE_CODE_KIND_ENUM(name) name,
- CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
-#undef DEFINE_CODE_KIND_ENUM
- INTERPRETED_FUNCTION,
- NUMBER_OF_KINDS
- };
-
- static const char* Kind2String(Kind kind);
-
- int SourcePosition(int offset);
- int SourceStatementPosition(int offset);
-
- // Returns the address of the first instruction.
- inline Address instruction_start();
-
- // Returns the address right after the last instruction.
- inline Address instruction_end();
-
- // Returns the size of the code instructions.
- inline int instruction_size();
-
- // Return the source position table.
- inline ByteArray* source_position_table();
-
- // Set the source position table.
- inline void set_source_position_table(ByteArray* source_position_table);
-
- inline Object* stack_frame_cache();
- static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
- Handle<UnseededNumberDictionary> cache);
- void DropStackFrameCache();
-
- // Returns the size of instructions and the metadata.
- inline int SizeIncludingMetadata();
-
- // Returns true if pc is inside this object's instructions.
- inline bool contains(byte* pc);
+ DECL_ACCESSORS(value1, Object)
+ DECL_ACCESSORS(value2, Object)
- // Returns the AbstractCode::Kind of the code.
- inline Kind kind();
+ DECL_CAST(Tuple2)
- // Calculate the size of the code object to report for log events. This takes
- // the layout of the code object into account.
- inline int ExecutableSize();
+ // Dispatched behavior.
+ DECL_PRINTER(Tuple2)
+ DECL_VERIFIER(Tuple2)
+ void BriefPrintDetails(std::ostream& os);
- DECL_CAST(AbstractCode)
- inline Code* GetCode();
- inline BytecodeArray* GetBytecodeArray();
+ static const int kValue1Offset = HeapObject::kHeaderSize;
+ static const int kValue2Offset = kValue1Offset + kPointerSize;
+ static const int kSize = kValue2Offset + kPointerSize;
- // Max loop nesting marker used to postpose OSR. We don't take loop
- // nesting that is deeper than 5 levels into account.
- static const int kMaxLoopNestingMarker = 6;
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple2);
};
-// Dependent code is a singly linked list of fixed arrays. Each array contains
-// code objects in weak cells for one dependent group. The suffix of the array
-// can be filled with the undefined value if the number of codes is less than
-// the length of the array.
-//
-// +------+-----------------+--------+--------+-----+--------+-----------+-----+
-// | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
-// +------+-----------------+--------+--------+-----+--------+-----------+-----+
-// |
-// V
-// +------+-----------------+--------+--------+-----+--------+-----------+-----+
-// | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... |
-// +------+-----------------+--------+--------+-----+--------+-----------+-----+
-// |
-// V
-// empty_fixed_array()
-//
-// The list of fixed arrays is ordered by dependency groups.
-
-class DependentCode: public FixedArray {
+class Tuple3 : public Tuple2 {
public:
- enum DependencyGroup {
- // Group of code that weakly embed this map and depend on being
- // deoptimized when the map is garbage collected.
- kWeakCodeGroup,
- // Group of code that embed a transition to this map, and depend on being
- // deoptimized when the transition is replaced by a new version.
- kTransitionGroup,
- // Group of code that omit run-time prototype checks for prototypes
- // described by this map. The group is deoptimized whenever an object
- // described by this map changes shape (and transitions to a new map),
- // possibly invalidating the assumptions embedded in the code.
- kPrototypeCheckGroup,
- // Group of code that depends on global property values in property cells
- // not being changed.
- kPropertyCellChangedGroup,
- // Group of code that omit run-time checks for field(s) introduced by
- // this map, i.e. for the field type.
- kFieldOwnerGroup,
- // Group of code that omit run-time type checks for initial maps of
- // constructors.
- kInitialMapChangedGroup,
- // Group of code that depends on tenuring information in AllocationSites
- // not being changed.
- kAllocationSiteTenuringChangedGroup,
- // Group of code that depends on element transition information in
- // AllocationSites not being changed.
- kAllocationSiteTransitionChangedGroup
- };
-
- static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
- static const int kNextLinkIndex = 0;
- static const int kFlagsIndex = 1;
- static const int kCodesStartIndex = 2;
-
- bool Contains(DependencyGroup group, WeakCell* code_cell);
- bool IsEmpty(DependencyGroup group);
-
- static Handle<DependentCode> InsertCompilationDependencies(
- Handle<DependentCode> entries, DependencyGroup group,
- Handle<Foreign> info);
-
- static Handle<DependentCode> InsertWeakCode(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<WeakCell> code_cell);
-
- void UpdateToFinishedCode(DependencyGroup group, Foreign* info,
- WeakCell* code_cell);
-
- void RemoveCompilationDependencies(DependentCode::DependencyGroup group,
- Foreign* info);
-
- void DeoptimizeDependentCodeGroup(Isolate* isolate,
- DependentCode::DependencyGroup group);
+ DECL_ACCESSORS(value3, Object)
- bool MarkCodeForDeoptimization(Isolate* isolate,
- DependentCode::DependencyGroup group);
+ DECL_CAST(Tuple3)
- // The following low-level accessors should only be used by this class
- // and the mark compact collector.
- inline DependentCode* next_link();
- inline void set_next_link(DependentCode* next);
- inline int count();
- inline void set_count(int value);
- inline DependencyGroup group();
- inline void set_group(DependencyGroup group);
- inline Object* object_at(int i);
- inline void set_object_at(int i, Object* object);
- inline void clear_at(int i);
- inline void copy(int from, int to);
- DECL_CAST(DependentCode)
+ // Dispatched behavior.
+ DECL_PRINTER(Tuple3)
+ DECL_VERIFIER(Tuple3)
+ void BriefPrintDetails(std::ostream& os);
- static const char* DependencyGroupName(DependencyGroup group);
- static void SetMarkedForDeoptimization(Code* code, DependencyGroup group);
+ static const int kValue3Offset = Tuple2::kSize;
+ static const int kSize = kValue3Offset + kPointerSize;
private:
- static Handle<DependentCode> Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Object> object);
- static Handle<DependentCode> New(DependencyGroup group, Handle<Object> object,
- Handle<DependentCode> next);
- static Handle<DependentCode> EnsureSpace(Handle<DependentCode> entries);
- // Compact by removing cleared weak cells and return true if there was
- // any cleared weak cell.
- bool Compact();
- static int Grow(int number_of_entries) {
- if (number_of_entries < 5) return number_of_entries + 1;
- return number_of_entries * 5 / 4;
- }
- inline int flags();
- inline void set_flags(int flags);
- class GroupField : public BitField<int, 0, 3> {};
- class CountField : public BitField<int, 3, 27> {};
- STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
-};
-
-class PrototypeInfo;
-
-// An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to be
-// identified in the type system.
-class Struct: public HeapObject {
- public:
- inline void InitializeBody(int object_size);
- DECL_CAST(Struct)
- void BriefPrintDetails(std::ostream& os);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple3);
};
-class PromiseCapability : public Struct {
+class PromiseCapability : public Tuple3 {
public:
DECL_CAST(PromiseCapability)
DECL_PRINTER(PromiseCapability)
@@ -4263,10 +3435,10 @@ class PromiseCapability : public Struct {
DECL_ACCESSORS(resolve, Object)
DECL_ACCESSORS(reject, Object)
- static const int kPromiseOffset = Struct::kHeaderSize;
- static const int kResolveOffset = kPromiseOffset + kPointerSize;
- static const int kRejectOffset = kResolveOffset + kPointerSize;
- static const int kSize = kRejectOffset + kPointerSize;
+ static const int kPromiseOffset = Tuple3::kValue1Offset;
+ static const int kResolveOffset = Tuple3::kValue2Offset;
+ static const int kRejectOffset = Tuple3::kValue3Offset;
+ static const int kSize = Tuple3::kSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseCapability);
@@ -4413,44 +3585,6 @@ class PrototypeInfo : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
};
-class Tuple2 : public Struct {
- public:
- DECL_ACCESSORS(value1, Object)
- DECL_ACCESSORS(value2, Object)
-
- DECL_CAST(Tuple2)
-
- // Dispatched behavior.
- DECL_PRINTER(Tuple2)
- DECL_VERIFIER(Tuple2)
- void BriefPrintDetails(std::ostream& os);
-
- static const int kValue1Offset = HeapObject::kHeaderSize;
- static const int kValue2Offset = kValue1Offset + kPointerSize;
- static const int kSize = kValue2Offset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple2);
-};
-
-class Tuple3 : public Tuple2 {
- public:
- DECL_ACCESSORS(value3, Object)
-
- DECL_CAST(Tuple3)
-
- // Dispatched behavior.
- DECL_PRINTER(Tuple3)
- DECL_VERIFIER(Tuple3)
- void BriefPrintDetails(std::ostream& os);
-
- static const int kValue3Offset = Tuple2::kSize;
- static const int kSize = kValue3Offset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple3);
-};
-
// Pair used to store both a ScopeInfo and an extension object in the extension
// slot of a block, catch, or with context. Needed in the rare case where a
// declaration block scope (a "varblock" as used to desugar parameter
@@ -4818,6 +3952,8 @@ class JSFunction: public JSObject {
static const int kLengthDescriptorIndex = 0;
static const int kNameDescriptorIndex = 1;
+ // Home object descriptor index when function has a [[HomeObject]] slot.
+ static const int kMaybeHomeObjectDescriptorIndex = 2;
// [context]: The context for this function.
inline Context* context();
@@ -4896,7 +4032,8 @@ class JSFunction: public JSObject {
enum FeedbackVectorState {
TOP_LEVEL_SCRIPT_NEEDS_VECTOR,
NEEDS_VECTOR,
- HAS_VECTOR
+ HAS_VECTOR,
+ NO_VECTOR_NEEDED
};
inline FeedbackVectorState GetFeedbackVectorState(Isolate* isolate) const;
@@ -4909,6 +4046,8 @@ class JSFunction: public JSObject {
// Unconditionally clear the type feedback vector.
void ClearTypeFeedbackInfo();
+ inline bool has_prototype_slot() const;
+
// The initial map for an object created by this constructor.
inline Map* initial_map();
static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
@@ -4948,6 +4087,7 @@ class JSFunction: public JSObject {
int requested_embedder_fields, int* instance_size,
int* in_object_properties);
static void CalculateInstanceSizeHelper(InstanceType instance_type,
+ bool has_prototype_slot,
int requested_embedder_fields,
int requested_in_object_properties,
int* instance_size,
@@ -4979,15 +4119,21 @@ class JSFunction: public JSObject {
// ES6 section 19.2.3.5 Function.prototype.toString ( ).
static Handle<String> ToString(Handle<JSFunction> function);
- // Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
- // kSize) is weak and has special handling during garbage collection.
- static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
- static const int kSharedFunctionInfoOffset =
- kPrototypeOrInitialMapOffset + kPointerSize;
- static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
- static const int kFeedbackVectorOffset = kContextOffset + kPointerSize;
- static const int kCodeOffset = kFeedbackVectorOffset + kPointerSize;
- static const int kSize = kCodeOffset + kPointerSize;
+// Layout description.
+#define JS_FUNCTION_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kSharedFunctionInfoOffset, kPointerSize) \
+ V(kContextOffset, kPointerSize) \
+ V(kFeedbackVectorOffset, kPointerSize) \
+ V(kEndOfStrongFieldsOffset, 0) \
+ V(kCodeOffset, kPointerSize) \
+ /* Size of JSFunction object without prototype field. */ \
+ V(kSizeWithoutPrototype, 0) \
+ V(kPrototypeOrInitialMapOffset, kPointerSize) \
+ /* Size of JSFunction object with prototype field. */ \
+ V(kSizeWithPrototype, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_FUNCTION_FIELDS)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
@@ -5008,9 +4154,6 @@ class JSGlobalProxy : public JSObject {
// It is null value if this object is not used by any context.
DECL_ACCESSORS(native_context, Object)
- // [hash]: The hash code property (undefined if not initialized yet).
- DECL_ACCESSORS(hash, Object)
-
DECL_CAST(JSGlobalProxy)
inline bool IsDetachedFrom(JSGlobalObject* global) const;
@@ -5023,8 +4166,7 @@ class JSGlobalProxy : public JSObject {
// Layout description.
static const int kNativeContextOffset = JSObject::kHeaderSize;
- static const int kHashOffset = kNativeContextOffset + kPointerSize;
- static const int kSize = kHashOffset + kPointerSize;
+ static const int kSize = kNativeContextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
@@ -5328,129 +4470,6 @@ class JSPromise : public JSObject {
STATIC_ASSERT(v8::Promise::kRejected == 2);
};
-// Regular expressions
-// The regular expression holds a single reference to a FixedArray in
-// the kDataOffset field.
-// The FixedArray contains the following data:
-// - tag : type of regexp implementation (not compiled yet, atom or irregexp)
-// - reference to the original source string
-// - reference to the original flag string
-// If it is an atom regexp
-// - a reference to a literal string to search for
-// If it is an irregexp regexp:
-// - a reference to code for Latin1 inputs (bytecode or compiled), or a smi
-// used for tracking the last usage (used for regexp code flushing).
-// - a reference to code for UC16 inputs (bytecode or compiled), or a smi
-// used for tracking the last usage (used for regexp code flushing).
-// - max number of registers used by irregexp implementations.
-// - number of capture registers (output values) of the regexp.
-class JSRegExp: public JSObject {
- public:
- // Meaning of Type:
- // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
- // ATOM: A simple string to match against using an indexOf operation.
- // IRREGEXP: Compiled with Irregexp.
- enum Type { NOT_COMPILED, ATOM, IRREGEXP };
- enum Flag {
- kNone = 0,
- kGlobal = 1 << 0,
- kIgnoreCase = 1 << 1,
- kMultiline = 1 << 2,
- kSticky = 1 << 3,
- kUnicode = 1 << 4,
- kDotAll = 1 << 5,
- // Update FlagCount when adding new flags.
- };
- typedef base::Flags<Flag> Flags;
-
- static int FlagCount() { return FLAG_harmony_regexp_dotall ? 6 : 5; }
-
- DECL_ACCESSORS(data, Object)
- DECL_ACCESSORS(flags, Object)
- DECL_ACCESSORS(last_index, Object)
- DECL_ACCESSORS(source, Object)
-
- V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(Handle<String> source,
- Flags flags);
- static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
-
- static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
- Handle<String> source, Flags flags);
- static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
- Handle<String> source,
- Handle<String> flags_string);
-
- inline Type TypeTag();
- // Number of captures (without the match itself).
- inline int CaptureCount();
- inline Flags GetFlags();
- inline String* Pattern();
- inline Object* CaptureNameMap();
- inline Object* DataAt(int index);
- // Set implementation data after the object has been prepared.
- inline void SetDataAt(int index, Object* value);
-
- static int code_index(bool is_latin1) {
- if (is_latin1) {
- return kIrregexpLatin1CodeIndex;
- } else {
- return kIrregexpUC16CodeIndex;
- }
- }
-
- DECL_CAST(JSRegExp)
-
- // Dispatched behavior.
- DECL_PRINTER(JSRegExp)
- DECL_VERIFIER(JSRegExp)
-
- static const int kDataOffset = JSObject::kHeaderSize;
- static const int kSourceOffset = kDataOffset + kPointerSize;
- static const int kFlagsOffset = kSourceOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
- static const int kLastIndexOffset = kSize; // In-object field.
-
- // Indices in the data array.
- static const int kTagIndex = 0;
- static const int kSourceIndex = kTagIndex + 1;
- static const int kFlagsIndex = kSourceIndex + 1;
- static const int kDataIndex = kFlagsIndex + 1;
- // The data fields are used in different ways depending on the
- // value of the tag.
- // Atom regexps (literal strings).
- static const int kAtomPatternIndex = kDataIndex;
-
- static const int kAtomDataSize = kAtomPatternIndex + 1;
-
- // Irregexp compiled code or bytecode for Latin1. If compilation
- // fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpLatin1CodeIndex = kDataIndex;
- // Irregexp compiled code or bytecode for UC16. If compilation
- // fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
- // Maximal number of registers used by either Latin1 or UC16.
- // Only used to check that there is enough stack space
- static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
- // Number of captures in the compiled regexp.
- static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
- // Maps names of named capture groups (at indices 2i) to their corresponding
- // (1-based) capture group indices (at indices 2i + 1).
- static const int kIrregexpCaptureNameMapIndex = kDataIndex + 4;
-
- static const int kIrregexpDataSize = kIrregexpCaptureNameMapIndex + 1;
-
- // In-object fields.
- static const int kLastIndexFieldIndex = 0;
- static const int kInObjectFieldCount = 1;
-
- // The uninitialized value for a regexp code object.
- static const int kUninitializedValue = -1;
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
-
class TypeFeedbackInfo : public Tuple3 {
public:
inline int ic_total_count();
@@ -5886,8 +4905,6 @@ class JSProxy: public JSReceiver {
DECL_ACCESSORS(handler, Object)
// [target]: The target property.
DECL_ACCESSORS(target, JSReceiver)
- // [hash]: The hash code property (undefined if not initialized yet).
- DECL_ACCESSORS(hash, Object)
static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
@@ -5910,7 +4927,7 @@ class JSProxy: public JSReceiver {
// ES6, #sec-isarray. NOT to be confused with %_IsArray.
MUST_USE_RESULT static Maybe<bool> IsArray(Handle<JSProxy> proxy);
- // ES6 9.5.4 (when passed DONT_THROW)
+ // ES6 9.5.4 (when passed kDontThrow)
MUST_USE_RESULT static Maybe<bool> PreventExtensions(
Handle<JSProxy> proxy, ShouldThrow should_throw);
@@ -5955,7 +4972,7 @@ class JSProxy: public JSReceiver {
Handle<Object> receiver,
LanguageMode language_mode);
- // ES6 9.5.10 (when passed SLOPPY)
+ // ES6 9.5.10 (when passed LanguageMode::kSloppy)
MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
@@ -5976,8 +4993,7 @@ class JSProxy: public JSReceiver {
// Layout description.
static const int kTargetOffset = JSReceiver::kHeaderSize;
static const int kHandlerOffset = kTargetOffset + kPointerSize;
- static const int kHashOffset = kHandlerOffset + kPointerSize;
- static const int kSize = kHashOffset + kPointerSize;
+ static const int kSize = kHandlerOffset + kPointerSize;
// kTargetOffset aliases with the elements of JSObject. The fact that
// JSProxy::target is a Javascript value which cannot be confused with an
@@ -5990,10 +5006,6 @@ class JSProxy: public JSReceiver {
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
- Object* GetIdentityHash();
-
- Smi* GetOrCreateIdentityHash(Isolate* isolate);
-
static Maybe<bool> SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
PropertyDescriptor* desc,
@@ -6054,35 +5066,6 @@ class JSMap : public JSCollection {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
};
-class JSArrayIterator : public JSObject {
- public:
- DECL_PRINTER(JSArrayIterator)
- DECL_VERIFIER(JSArrayIterator)
-
- DECL_CAST(JSArrayIterator)
-
- // [object]: the [[IteratedObject]] inobject property.
- DECL_ACCESSORS(object, Object)
-
- // [index]: The [[ArrayIteratorNextIndex]] inobject property.
- DECL_ACCESSORS(index, Object)
-
- // [map]: The Map of the [[IteratedObject]] field at the time the iterator is
- // allocated.
- DECL_ACCESSORS(object_map, Object)
-
- // Return the ElementsKind that a JSArrayIterator's [[IteratedObject]] is
- // expected to have, based on its instance type.
- static ElementsKind ElementsKindForInstanceType(InstanceType instance_type);
-
- static const int kIteratedObjectOffset = JSObject::kHeaderSize;
- static const int kNextIndexOffset = kIteratedObjectOffset + kPointerSize;
- static const int kIteratedObjectMapOffset = kNextIndexOffset + kPointerSize;
- static const int kSize = kIteratedObjectMapOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
-};
// The [Async-from-Sync Iterator] object
// (proposal-async-iteration/#sec-async-from-sync-iterator-objects)
@@ -6203,219 +5186,6 @@ class JSWeakSet: public JSWeakCollection {
};
-// Whether a JSArrayBuffer is a SharedArrayBuffer or not.
-enum class SharedFlag { kNotShared, kShared };
-
-
-class JSArrayBuffer: public JSObject {
- public:
- // [backing_store]: backing memory for this array
- DECL_ACCESSORS(backing_store, void)
-
- // [byte_length]: length in bytes
- DECL_ACCESSORS(byte_length, Object)
-
- // [allocation_base]: the start of the memory allocation for this array,
- // normally equal to backing_store
- DECL_ACCESSORS(allocation_base, void)
-
- // [allocation_length]: the size of the memory allocation for this array,
- // normally equal to byte_length
- inline size_t allocation_length() const;
- inline void set_allocation_length(size_t value);
-
- inline uint32_t bit_field() const;
- inline void set_bit_field(uint32_t bits);
-
- // [is_external]: true indicates that the embedder is in charge of freeing the
- // backing_store, while is_external == false means that v8 will free the
- // memory block once all ArrayBuffers referencing it are collected by the GC.
- inline bool is_external();
- inline void set_is_external(bool value);
-
- inline bool is_neuterable();
- inline void set_is_neuterable(bool value);
-
- inline bool was_neutered();
- inline void set_was_neutered(bool value);
-
- inline bool is_shared();
- inline void set_is_shared(bool value);
-
- inline bool has_guard_region() const;
- inline void set_has_guard_region(bool value);
-
- inline bool is_growable();
- inline void set_is_growable(bool value);
-
- DECL_CAST(JSArrayBuffer)
-
- void Neuter();
-
- inline ArrayBuffer::Allocator::AllocationMode allocation_mode() const;
-
- void FreeBackingStore();
-
- V8_EXPORT_PRIVATE static void Setup(
- Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
- void* data, size_t allocated_length,
- SharedFlag shared = SharedFlag::kNotShared);
-
- V8_EXPORT_PRIVATE static void Setup(
- Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
- void* allocation_base, size_t allocation_length, void* data,
- size_t byte_length, SharedFlag shared = SharedFlag::kNotShared);
-
- // Returns false if array buffer contents could not be allocated.
- // In this case, |array_buffer| will not be set up.
- static bool SetupAllocatingData(
- Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
- size_t allocated_length, bool initialize = true,
- SharedFlag shared = SharedFlag::kNotShared) WARN_UNUSED_RESULT;
-
- // Dispatched behavior.
- DECL_PRINTER(JSArrayBuffer)
- DECL_VERIFIER(JSArrayBuffer)
-
- static const int kByteLengthOffset = JSObject::kHeaderSize;
- // The rest of the fields are not JSObjects, so they are not iterated over in
- // objects-body-descriptors-inl.h.
- static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
- static const int kAllocationBaseOffset = kBackingStoreOffset + kPointerSize;
- static const int kAllocationLengthOffset =
- kAllocationBaseOffset + kPointerSize;
- static const int kBitFieldSlot = kAllocationLengthOffset + kSizetSize;
-#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
- static const int kBitFieldOffset = kBitFieldSlot;
-#else
- static const int kBitFieldOffset = kBitFieldSlot + kIntSize;
-#endif
- static const int kSize = kBitFieldSlot + kPointerSize;
-
- static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBuffer::kEmbedderFieldCount * kPointerSize;
-
- // Iterates all fields in the object including internal ones except
- // kBackingStoreOffset and kBitFieldSlot.
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- class IsExternal : public BitField<bool, 1, 1> {};
- class IsNeuterable : public BitField<bool, 2, 1> {};
- class WasNeutered : public BitField<bool, 3, 1> {};
- class IsShared : public BitField<bool, 4, 1> {};
- class HasGuardRegion : public BitField<bool, 5, 1> {};
- class IsGrowable : public BitField<bool, 6, 1> {};
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
-};
-
-
-class JSArrayBufferView: public JSObject {
- public:
- // [buffer]: ArrayBuffer that this typed array views.
- DECL_ACCESSORS(buffer, Object)
-
- // [byte_offset]: offset of typed array in bytes.
- DECL_ACCESSORS(byte_offset, Object)
-
- // [byte_length]: length of typed array in bytes.
- DECL_ACCESSORS(byte_length, Object)
-
- DECL_CAST(JSArrayBufferView)
-
- DECL_VERIFIER(JSArrayBufferView)
-
- inline bool WasNeutered() const;
-
- static const int kBufferOffset = JSObject::kHeaderSize;
- static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
- static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
- static const int kViewSize = kByteLengthOffset + kPointerSize;
-
- private:
-#ifdef VERIFY_HEAP
- DECL_ACCESSORS(raw_byte_offset, Object)
- DECL_ACCESSORS(raw_byte_length, Object)
-#endif
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
-};
-
-
-class JSTypedArray: public JSArrayBufferView {
- public:
- // [length]: length of typed array in elements.
- DECL_ACCESSORS(length, Object)
- inline uint32_t length_value() const;
-
- // ES6 9.4.5.3
- MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
- Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
-
- DECL_CAST(JSTypedArray)
-
- ExternalArrayType type();
- V8_EXPORT_PRIVATE size_t element_size();
-
- Handle<JSArrayBuffer> GetBuffer();
-
- static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
- Handle<Object> receiver,
- const char* method_name);
- // ES7 section 22.2.4.6 Create ( constructor, argumentList )
- static MaybeHandle<JSTypedArray> Create(Isolate* isolate,
- Handle<Object> default_ctor, int argc,
- Handle<Object>* argv,
- const char* method_name);
- // ES7 section 22.2.4.7 TypedArraySpeciesCreate ( exemplar, argumentList )
- static MaybeHandle<JSTypedArray> SpeciesCreate(Isolate* isolate,
- Handle<JSTypedArray> exemplar,
- int argc, Handle<Object>* argv,
- const char* method_name);
-
- // Dispatched behavior.
- DECL_PRINTER(JSTypedArray)
- DECL_VERIFIER(JSTypedArray)
-
- static const int kLengthOffset = kViewSize;
- static const int kSize = kLengthOffset + kPointerSize;
-
- static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
-
- private:
- static Handle<JSArrayBuffer> MaterializeArrayBuffer(
- Handle<JSTypedArray> typed_array);
-#ifdef VERIFY_HEAP
- DECL_ACCESSORS(raw_length, Object)
-#endif
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
-};
-
-
-class JSDataView: public JSArrayBufferView {
- public:
- DECL_CAST(JSDataView)
-
- // Dispatched behavior.
- DECL_PRINTER(JSDataView)
- DECL_VERIFIER(JSDataView)
-
- static const int kSize = kViewSize;
-
- static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
-};
-
-
// Foreign describes objects pointing from JavaScript to C structures.
class Foreign: public HeapObject {
public:
@@ -6444,115 +5214,6 @@ class Foreign: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
};
-
-// The JSArray describes JavaScript Arrays
-// Such an array can be in one of two modes:
-// - fast, backing storage is a FixedArray and length <= elements.length();
-// Please note: push and pop can be used to grow and shrink the array.
-// - slow, backing storage is a HashTable with numbers as keys.
-class JSArray: public JSObject {
- public:
- // [length]: The length property.
- DECL_ACCESSORS(length, Object)
-
- // Overload the length setter to skip write barrier when the length
- // is set to a smi. This matches the set function on FixedArray.
- inline void set_length(Smi* length);
-
- static bool HasReadOnlyLength(Handle<JSArray> array);
- static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
-
- // Initialize the array with the given capacity. The function may
- // fail due to out-of-memory situations, but only if the requested
- // capacity is non-zero.
- static void Initialize(Handle<JSArray> array, int capacity, int length = 0);
-
- // If the JSArray has fast elements, and new_length would result in
- // normalization, returns true.
- bool SetLengthWouldNormalize(uint32_t new_length);
- static inline bool SetLengthWouldNormalize(Heap* heap, uint32_t new_length);
-
- // Initializes the array to a certain length.
- inline bool AllowsSetLength();
-
- static void SetLength(Handle<JSArray> array, uint32_t length);
-
- // Set the content of the array to the content of storage.
- static inline void SetContent(Handle<JSArray> array,
- Handle<FixedArrayBase> storage);
-
- // ES6 9.4.2.1
- MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
- Isolate* isolate, Handle<JSArray> o, Handle<Object> name,
- PropertyDescriptor* desc, ShouldThrow should_throw);
-
- static bool AnythingToArrayLength(Isolate* isolate,
- Handle<Object> length_object,
- uint32_t* output);
- MUST_USE_RESULT static Maybe<bool> ArraySetLength(Isolate* isolate,
- Handle<JSArray> a,
- PropertyDescriptor* desc,
- ShouldThrow should_throw);
-
- // Checks whether the Array has the current realm's Array.prototype as its
- // prototype. This function is best-effort and only gives a conservative
- // approximation, erring on the side of false, in particular with respect
- // to Proxies and objects with a hidden prototype.
- inline bool HasArrayPrototype(Isolate* isolate);
-
- DECL_CAST(JSArray)
-
- // Dispatched behavior.
- DECL_PRINTER(JSArray)
- DECL_VERIFIER(JSArray)
-
- // Number of element slots to pre-allocate for an empty array.
- static const int kPreallocatedArrayElements = 4;
-
- // Layout description.
- static const int kLengthOffset = JSObject::kHeaderSize;
- static const int kSize = kLengthOffset + kPointerSize;
-
- // Max. number of elements being copied in Array builtins.
- static const int kMaxCopyElements = 100;
-
- // This constant is somewhat arbitrary. Any large enough value would work.
- static const uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
-
- static const int kInitialMaxFastElementArray =
- (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize -
- AllocationMemento::kSize) >>
- kDoubleSizeLog2;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
-};
-
-
-Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
- Handle<Map> initial_map);
-
-
-// JSRegExpResult is just a JSArray with a specific initial map.
-// This initial map adds in-object properties for "index" and "input"
-// properties, as assigned by RegExp.prototype.exec, which allows
-// faster creation of RegExp exec results.
-// This class just holds constants used when creating the result.
-// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResult: public JSArray {
- public:
- // Offsets of object fields.
- static const int kIndexOffset = JSArray::kSize;
- static const int kInputOffset = kIndexOffset + kPointerSize;
- static const int kSize = kInputOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kIndexIndex = 0;
- static const int kInputIndex = 1;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
-};
-
-
// An accessor must have a getter, but can have no setter.
//
// When setting a property, V8 searches accessors in prototypes.
@@ -6564,8 +5225,8 @@ class JSRegExpResult: public JSArray {
// This shadows the accessor in the prototype.
class AccessorInfo: public Struct {
public:
- DECL_ACCESSORS(name, Object)
- DECL_INT_ACCESSORS(flag)
+ DECL_ACCESSORS(name, Name)
+ DECL_INT_ACCESSORS(flags)
DECL_ACCESSORS(expected_receiver_type, Object)
// This directly points at a foreign C function to be used from the runtime.
DECL_ACCESSORS(getter, Object)
@@ -6582,23 +5243,17 @@ class AccessorInfo: public Struct {
// Dispatched behavior.
DECL_PRINTER(AccessorInfo)
- inline bool all_can_read();
- inline void set_all_can_read(bool value);
-
- inline bool all_can_write();
- inline void set_all_can_write(bool value);
-
- inline bool is_special_data_property();
- inline void set_is_special_data_property(bool value);
-
- inline bool replace_on_access();
- inline void set_replace_on_access(bool value);
-
- inline bool is_sloppy();
- inline void set_is_sloppy(bool value);
+ DECL_BOOLEAN_ACCESSORS(all_can_read)
+ DECL_BOOLEAN_ACCESSORS(all_can_write)
+ DECL_BOOLEAN_ACCESSORS(is_special_data_property)
+ DECL_BOOLEAN_ACCESSORS(replace_on_access)
+ DECL_BOOLEAN_ACCESSORS(is_sloppy)
- inline PropertyAttributes property_attributes();
- inline void set_property_attributes(PropertyAttributes attributes);
+ // The property attributes used when an API object template is instantiated
+ // for the first time. Changing of this value afterwards does not affect
+ // the actual attributes of a property.
+ inline PropertyAttributes initial_property_attributes() const;
+ inline void set_initial_property_attributes(PropertyAttributes attributes);
// Checks whether the given receiver is compatible with this accessor.
static bool IsCompatibleReceiverMap(Isolate* isolate,
@@ -6617,26 +5272,34 @@ class AccessorInfo: public Struct {
Handle<FixedArray> array,
int valid_descriptors);
- static const int kNameOffset = HeapObject::kHeaderSize;
- static const int kFlagOffset = kNameOffset + kPointerSize;
- static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
- static const int kSetterOffset = kExpectedReceiverTypeOffset + kPointerSize;
- static const int kGetterOffset = kSetterOffset + kPointerSize;
- static const int kJsGetterOffset = kGetterOffset + kPointerSize;
- static const int kDataOffset = kJsGetterOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
+// Layout description.
+#define ACCESSOR_INFO_FIELDS(V) \
+ V(kNameOffset, kPointerSize) \
+ V(kFlagsOffset, kPointerSize) \
+ V(kExpectedReceiverTypeOffset, kPointerSize) \
+ V(kSetterOffset, kPointerSize) \
+ V(kGetterOffset, kPointerSize) \
+ V(kJsGetterOffset, kPointerSize) \
+ V(kDataOffset, kPointerSize) \
+ V(kSize, 0)
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ACCESSOR_INFO_FIELDS)
+#undef ACCESSOR_INFO_FIELDS
private:
inline bool HasExpectedReceiverType();
- // Bit positions in flag.
- static const int kAllCanReadBit = 0;
- static const int kAllCanWriteBit = 1;
- static const int kSpecialDataProperty = 2;
- static const int kIsSloppy = 3;
- static const int kReplaceOnAccess = 4;
- class AttributesField : public BitField<PropertyAttributes, 5, 3> {};
+// Bit positions in |flags|.
+#define ACCESSOR_INFO_FLAGS_BIT_FIELDS(V, _) \
+ V(AllCanReadBit, bool, 1, _) \
+ V(AllCanWriteBit, bool, 1, _) \
+ V(IsSpecialDataPropertyBit, bool, 1, _) \
+ V(IsSloppyBit, bool, 1, _) \
+ V(ReplaceOnAccessBit, bool, 1, _) \
+ V(InitialAttributesBits, PropertyAttributes, 3, _)
+
+ DEFINE_BIT_FIELDS(ACCESSOR_INFO_FLAGS_BIT_FIELDS)
+#undef ACCESSOR_INFO_FLAGS_BIT_FIELDS
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
};
@@ -6760,15 +5423,19 @@ class InterceptorInfo: public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
};
-class CallHandlerInfo : public Tuple2 {
+class CallHandlerInfo : public Tuple3 {
public:
DECL_ACCESSORS(callback, Object)
+ DECL_ACCESSORS(js_callback, Object)
DECL_ACCESSORS(data, Object)
DECL_CAST(CallHandlerInfo)
+ Address redirected_callback() const;
+
static const int kCallbackOffset = kValue1Offset;
- static const int kDataOffset = kValue2Offset;
+ static const int kJsCallbackOffset = kValue2Offset;
+ static const int kDataOffset = kValue3Offset;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
@@ -7021,7 +5688,7 @@ class StackFrameInfo : public Struct {
class SourcePositionTableWithFrameCache : public Tuple2 {
public:
DECL_ACCESSORS(source_position_table, ByteArray)
- DECL_ACCESSORS(stack_frame_cache, UnseededNumberDictionary)
+ DECL_ACCESSORS(stack_frame_cache, NumberDictionary)
DECL_CAST(SourcePositionTableWithFrameCache)
@@ -7034,51 +5701,6 @@ class SourcePositionTableWithFrameCache : public Tuple2 {
DISALLOW_IMPLICIT_CONSTRUCTORS(SourcePositionTableWithFrameCache);
};
-// Abstract base class for visiting, and optionally modifying, the
-// pointers contained in Objects. Used in GC and serialization/deserialization.
-// TODO(ulan): move to src/visitors.h
-class ObjectVisitor BASE_EMBEDDED {
- public:
- virtual ~ObjectVisitor() {}
-
- // Visits a contiguous arrays of pointers in the half-open range
- // [start, end). Any or all of the values may be modified on return.
- virtual void VisitPointers(HeapObject* host, Object** start,
- Object** end) = 0;
-
- // Handy shorthand for visiting a single pointer.
- virtual void VisitPointer(HeapObject* host, Object** p) {
- VisitPointers(host, p, p + 1);
- }
-
- // Visit weak next_code_link in Code object.
- virtual void VisitNextCodeLink(Code* host, Object** p) {
- VisitPointers(host, p, p + 1);
- }
-
- // To allow lazy clearing of inline caches the visitor has
- // a rich interface for iterating over Code objects..
-
- // Visits a code target in the instruction stream.
- virtual void VisitCodeTarget(Code* host, RelocInfo* rinfo);
-
- // Visits a runtime entry in the instruction stream.
- virtual void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) {}
-
- // Visit pointer embedded into a code object.
- virtual void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo);
-
- // Visits an external reference embedded into a code object.
- virtual void VisitExternalReference(Code* host, RelocInfo* rinfo) {}
-
- // Visits an external reference.
- virtual void VisitExternalReference(Foreign* host, Address* p) {}
-
- // Visits an (encoded) internal reference.
- virtual void VisitInternalReference(Code* host, RelocInfo* rinfo) {}
-};
-
-
// BooleanBit is a helper class for setting and getting a bit in an integer.
class BooleanBit : public AllStatic {
public:
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 48ff8daec4..d759c7dab2 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -45,6 +45,33 @@ void SloppyArgumentsElements::set_mapped_entry(uint32_t entry, Object* object) {
set(entry + kParameterMapStart, object);
}
+// TODO(danno): This shouldn't be inline here, but to defensively avoid
+// regressions associated with the fix for the bug 778574, it's staying that way
+// until the splice implementation in builtin-arrays.cc can be removed and this
+// function can be moved into runtime-arrays.cc near its other usage.
+bool JSSloppyArgumentsObject::GetSloppyArgumentsLength(Isolate* isolate,
+ Handle<JSObject> object,
+ int* out) {
+ Context* context = *isolate->native_context();
+ Map* map = object->map();
+ if (map != context->sloppy_arguments_map() &&
+ map != context->strict_arguments_map() &&
+ map != context->fast_aliased_arguments_map()) {
+ return false;
+ }
+ DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
+ Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
+ if (!len_obj->IsSmi()) return false;
+ *out = Max(0, Smi::ToInt(len_obj));
+
+ FixedArray* parameters = FixedArray::cast(object->elements());
+ if (object->HasSloppyArgumentsElements()) {
+ FixedArray* arguments = FixedArray::cast(parameters->get(1));
+ return *out <= arguments->length();
+ }
+ return *out <= parameters->length();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 64c34df993..83476d78be 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -42,6 +42,10 @@ class JSSloppyArgumentsObject : public JSArgumentsObject {
// Indices of in-object properties.
static const int kCalleeIndex = kLengthIndex + 1;
+ inline static bool GetSloppyArgumentsLength(Isolate* isolate,
+ Handle<JSObject> object,
+ int* out);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSSloppyArgumentsObject);
};
diff --git a/deps/v8/src/objects/bigint-inl.h b/deps/v8/src/objects/bigint-inl.h
deleted file mode 100644
index c22620176e..0000000000
--- a/deps/v8/src/objects/bigint-inl.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_BIGINT_INL_H_
-#define V8_OBJECTS_BIGINT_INL_H_
-
-#include "src/objects/bigint.h"
-
-#include "src/objects.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-int BigInt::length() const {
- intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
- return LengthBits::decode(static_cast<uint32_t>(bitfield));
-}
-void BigInt::set_length(int new_length) {
- intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
- bitfield = LengthBits::update(static_cast<uint32_t>(bitfield), new_length);
- WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
-}
-
-bool BigInt::sign() const {
- intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
- return SignBits::decode(static_cast<uint32_t>(bitfield));
-}
-void BigInt::set_sign(bool new_sign) {
- intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
- bitfield = SignBits::update(static_cast<uint32_t>(bitfield), new_sign);
- WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
-}
-
-BigInt::digit_t BigInt::digit(int n) const {
- SLOW_DCHECK(0 <= n && n < length());
- const byte* address = FIELD_ADDR_CONST(this, kDigitsOffset + n * kDigitSize);
- return *reinterpret_cast<digit_t*>(reinterpret_cast<intptr_t>(address));
-}
-void BigInt::set_digit(int n, digit_t value) {
- SLOW_DCHECK(0 <= n && n < length());
- byte* address = FIELD_ADDR(this, kDigitsOffset + n * kDigitSize);
- (*reinterpret_cast<digit_t*>(reinterpret_cast<intptr_t>(address))) = value;
-}
-
-TYPE_CHECKER(BigInt, BIGINT_TYPE)
-
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_BIGINT_INL_H_
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index e6fe89dbf1..85424600c0 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -19,23 +19,287 @@
#include "src/objects/bigint.h"
+#include "src/double.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
+// The MutableBigInt class is an implementation detail designed to prevent
+// accidental mutation of a BigInt after its construction. Step-by-step
+// construction of a BigInt must happen in terms of MutableBigInt, the
+// final result is then passed through MutableBigInt::MakeImmutable and not
+// modified further afterwards.
+// Many of the functions in this class use arguments of type {BigIntBase},
+// indicating that they will be used in a read-only capacity, and both
+// {BigInt} and {MutableBigInt} objects can be passed in.
+class MutableBigInt : public FreshlyAllocatedBigInt {
+ public:
+ // Bottleneck for converting MutableBigInts to BigInts.
+ static MaybeHandle<BigInt> MakeImmutable(MaybeHandle<MutableBigInt> maybe);
+ static Handle<BigInt> MakeImmutable(Handle<MutableBigInt> result);
+
+ // Allocation helpers.
+ static MaybeHandle<MutableBigInt> New(Isolate* isolate, int length);
+ static Handle<BigInt> NewFromInt(Isolate* isolate, int value);
+ static Handle<BigInt> NewFromSafeInteger(Isolate* isolate, double value);
+ void InitializeDigits(int length, byte value = 0);
+ static Handle<MutableBigInt> Copy(Handle<BigIntBase> source);
+ static Handle<BigInt> Zero(Isolate* isolate) {
+ // TODO(jkummerow): Consider caching a canonical zero-BigInt.
+ return MakeImmutable(New(isolate, 0)).ToHandleChecked();
+ }
+
+ static Handle<MutableBigInt> Cast(Handle<FreshlyAllocatedBigInt> bigint) {
+ SLOW_DCHECK(bigint->IsBigInt());
+ return Handle<MutableBigInt>::cast(bigint);
+ }
+
+ // Internal helpers.
+ static MaybeHandle<MutableBigInt> BitwiseAnd(Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<MutableBigInt> BitwiseXor(Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<MutableBigInt> BitwiseOr(Handle<BigInt> x,
+ Handle<BigInt> y);
+
+ static Handle<BigInt> TruncateToNBits(int n, Handle<BigInt> x);
+ static Handle<BigInt> TruncateAndSubFromPowerOfTwo(int n, Handle<BigInt> x,
+ bool result_sign);
+
+ static MaybeHandle<BigInt> AbsoluteAdd(Handle<BigInt> x, Handle<BigInt> y,
+ bool result_sign);
+ static Handle<BigInt> AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
+ bool result_sign);
+ static MaybeHandle<MutableBigInt> AbsoluteAddOne(
+ Handle<BigIntBase> x, bool sign, MutableBigInt* result_storage = nullptr);
+ static Handle<MutableBigInt> AbsoluteSubOne(Handle<BigIntBase> x);
+ static MaybeHandle<MutableBigInt> AbsoluteSubOne(Handle<BigIntBase> x,
+ int result_length);
+
+ enum ExtraDigitsHandling { kCopy, kSkip };
+ enum SymmetricOp { kSymmetric, kNotSymmetric };
+ static inline Handle<MutableBigInt> AbsoluteBitwiseOp(
+ Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage,
+ ExtraDigitsHandling extra_digits, SymmetricOp symmetric,
+ std::function<digit_t(digit_t, digit_t)> op);
+ static Handle<MutableBigInt> AbsoluteAnd(
+ Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage = nullptr);
+ static Handle<MutableBigInt> AbsoluteAndNot(
+ Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage = nullptr);
+ static Handle<MutableBigInt> AbsoluteOr(
+ Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage = nullptr);
+ static Handle<MutableBigInt> AbsoluteXor(
+ Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage = nullptr);
+
+ static int AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y);
+
+ static void MultiplyAccumulate(Handle<BigIntBase> multiplicand,
+ digit_t multiplier,
+ Handle<MutableBigInt> accumulator,
+ int accumulator_index);
+ static void InternalMultiplyAdd(BigIntBase* source, digit_t factor,
+ digit_t summand, int n,
+ MutableBigInt* result);
+ void InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand);
+
+ // Specialized helpers for Divide/Remainder.
+ static void AbsoluteDivSmall(Handle<BigIntBase> x, digit_t divisor,
+ Handle<MutableBigInt>* quotient,
+ digit_t* remainder);
+ static bool AbsoluteDivLarge(Handle<BigIntBase> dividend,
+ Handle<BigIntBase> divisor,
+ Handle<MutableBigInt>* quotient,
+ Handle<MutableBigInt>* remainder);
+ static bool ProductGreaterThan(digit_t factor1, digit_t factor2, digit_t high,
+ digit_t low);
+ digit_t InplaceAdd(Handle<BigIntBase> summand, int start_index);
+ digit_t InplaceSub(Handle<BigIntBase> subtrahend, int start_index);
+ void InplaceRightShift(int shift);
+ enum SpecialLeftShiftMode {
+ kSameSizeResult,
+ kAlwaysAddOneDigit,
+ };
+ static MaybeHandle<MutableBigInt> SpecialLeftShift(Handle<BigIntBase> x,
+ int shift,
+ SpecialLeftShiftMode mode);
+
+ // Specialized helpers for shift operations.
+ static MaybeHandle<BigInt> LeftShiftByAbsolute(Handle<BigIntBase> x,
+ Handle<BigIntBase> y);
+ static Handle<BigInt> RightShiftByAbsolute(Handle<BigIntBase> x,
+ Handle<BigIntBase> y);
+ static Handle<BigInt> RightShiftByMaximum(Isolate* isolate, bool sign);
+ static Maybe<digit_t> ToShiftAmount(Handle<BigIntBase> x);
+
+ static MaybeHandle<String> ToStringBasePowerOfTwo(Handle<BigIntBase> x,
+ int radix);
+ static MaybeHandle<String> ToStringGeneric(Handle<BigIntBase> x, int radix);
+
+ static double ToDouble(Handle<BigIntBase> x);
+ enum Rounding { kRoundDown, kTie, kRoundUp };
+ static Rounding DecideRounding(Handle<BigIntBase> x, int mantissa_bits_unset,
+ int digit_index, uint64_t current_digit);
+
+ // Digit arithmetic helpers.
+ static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
+ static inline digit_t digit_sub(digit_t a, digit_t b, digit_t* borrow);
+ static inline digit_t digit_mul(digit_t a, digit_t b, digit_t* high);
+ static inline digit_t digit_div(digit_t high, digit_t low, digit_t divisor,
+ digit_t* remainder);
+ static digit_t digit_pow(digit_t base, digit_t exponent);
+ static inline bool digit_ismax(digit_t x) {
+ return static_cast<digit_t>(~x) == 0;
+ }
+
+// Internal field setters. Non-mutable BigInts don't have these.
+#include "src/objects/object-macros.h"
+ inline void set_sign(bool new_sign) {
+ intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ bitfield = SignBits::update(static_cast<uint32_t>(bitfield), new_sign);
+ WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
+ }
+ inline void set_length(int new_length) {
+ intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ bitfield = LengthBits::update(static_cast<uint32_t>(bitfield), new_length);
+ WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
+ }
+ inline void set_digit(int n, digit_t value) {
+ SLOW_DCHECK(0 <= n && n < length());
+ byte* address = FIELD_ADDR(this, kDigitsOffset + n * kDigitSize);
+ (*reinterpret_cast<digit_t*>(reinterpret_cast<intptr_t>(address))) = value;
+ }
+#include "src/objects/object-macros-undef.h"
+};
+
+MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length) {
+ if (length > BigInt::kMaxLength) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ MutableBigInt);
+ }
+ Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(length));
+ result->set_length(length);
+ result->set_sign(false);
+#if DEBUG
+ result->InitializeDigits(length, 0xbf);
+#endif
+ return result;
+}
+
+Handle<BigInt> MutableBigInt::NewFromInt(Isolate* isolate, int value) {
+ if (value == 0) return Zero(isolate);
+ Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(1));
+ result->set_length(1);
+ if (value > 0) {
+ result->set_sign(false);
+ result->set_digit(0, value);
+ } else {
+ result->set_sign(true);
+ if (value == kMinInt) {
+ STATIC_ASSERT(kMinInt == -kMaxInt - 1);
+ result->set_digit(0, static_cast<BigInt::digit_t>(kMaxInt) + 1);
+ } else {
+ result->set_digit(0, -value);
+ }
+ }
+ return MakeImmutable(result);
+}
+
+Handle<BigInt> MutableBigInt::NewFromSafeInteger(Isolate* isolate,
+ double value) {
+ if (value == 0) return Zero(isolate);
+
+ uint64_t absolute = std::abs(value);
+ int length = 64 / kDigitBits;
+ Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(length));
+ result->set_length(length);
+ result->set_sign(value < 0); // Treats -0 like 0.
+ if (kDigitBits == 64) {
+ result->set_digit(0, absolute);
+ } else {
+ DCHECK_EQ(kDigitBits, 32);
+ result->set_digit(0, absolute);
+ result->set_digit(1, absolute >> 32);
+ }
+ return MakeImmutable(result);
+}
+
+Handle<MutableBigInt> MutableBigInt::Copy(Handle<BigIntBase> source) {
+ int length = source->length();
+ // Allocating a BigInt of the same length as an existing BigInt cannot throw.
+ Handle<MutableBigInt> result =
+ New(source->GetIsolate(), length).ToHandleChecked();
+ memcpy(result->address() + BigIntBase::kHeaderSize,
+ source->address() + BigIntBase::kHeaderSize,
+ BigInt::SizeFor(length) - BigIntBase::kHeaderSize);
+ return result;
+}
+
+void MutableBigInt::InitializeDigits(int length, byte value) {
+ memset(reinterpret_cast<void*>(reinterpret_cast<Address>(this) +
+ kDigitsOffset - kHeapObjectTag),
+ value, length * kDigitSize);
+}
+
+MaybeHandle<BigInt> MutableBigInt::MakeImmutable(
+ MaybeHandle<MutableBigInt> maybe) {
+ Handle<MutableBigInt> result;
+ if (!maybe.ToHandle(&result)) return MaybeHandle<BigInt>();
+ return MakeImmutable(result);
+}
+
+Handle<BigInt> MutableBigInt::MakeImmutable(Handle<MutableBigInt> result) {
+ // Check if we need to right-trim any leading zero-digits.
+ int old_length = result->length();
+ int new_length = old_length;
+ while (new_length > 0 && result->digit(new_length - 1) == 0) new_length--;
+ int to_trim = old_length - new_length;
+ if (to_trim != 0) {
+ int size_delta = to_trim * kDigitSize;
+ Address new_end = result->address() + BigInt::SizeFor(new_length);
+ Heap* heap = result->GetHeap();
+ heap->CreateFillerObjectAt(new_end, size_delta, ClearRecordedSlots::kNo);
+ result->set_length(new_length);
+
+ // Canonicalize -0n.
+ if (new_length == 0) {
+ result->set_sign(false);
+ // TODO(jkummerow): If we cache a canonical 0n, return that here.
+ }
+ }
+ DCHECK_IMPLIES(result->length() > 0,
+ result->digit(result->length() - 1) != 0); // MSD is non-zero.
+ return Handle<BigInt>(reinterpret_cast<BigInt**>(result.location()));
+}
+
+Handle<BigInt> BigInt::Zero(Isolate* isolate) {
+ return MutableBigInt::Zero(isolate);
+}
+
Handle<BigInt> BigInt::UnaryMinus(Handle<BigInt> x) {
// Special case: There is no -0n.
if (x->is_zero()) {
return x;
}
- Handle<BigInt> result = BigInt::Copy(x);
+ Handle<MutableBigInt> result = MutableBigInt::Copy(x);
result->set_sign(!x->sign());
- return result;
+ return MutableBigInt::MakeImmutable(result);
}
-Handle<BigInt> BigInt::BitwiseNot(Handle<BigInt> x) {
- UNIMPLEMENTED(); // TODO(jkummerow): Implement.
+MaybeHandle<BigInt> BigInt::BitwiseNot(Handle<BigInt> x) {
+ MaybeHandle<MutableBigInt> result;
+ if (x->sign()) {
+ // ~(-x) == ~(~(x-1)) == x-1
+ result = MutableBigInt::AbsoluteSubOne(x, x->length());
+ } else {
+ // ~x == -x-1 == -(x+1)
+ result = MutableBigInt::AbsoluteAddOne(x, true);
+ }
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
@@ -43,17 +307,20 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
UNIMPLEMENTED(); // TODO(jkummerow): Implement.
}
-Handle<BigInt> BigInt::Multiply(Handle<BigInt> x, Handle<BigInt> y) {
+MaybeHandle<BigInt> BigInt::Multiply(Handle<BigInt> x, Handle<BigInt> y) {
if (x->is_zero()) return x;
if (y->is_zero()) return y;
- Handle<BigInt> result =
- x->GetIsolate()->factory()->NewBigInt(x->length() + y->length());
+ int result_length = x->length() + y->length();
+ Handle<MutableBigInt> result;
+ if (!MutableBigInt::New(x->GetIsolate(), result_length).ToHandle(&result)) {
+ return MaybeHandle<BigInt>();
+ }
+ result->InitializeDigits(result_length);
for (int i = 0; i < x->length(); i++) {
- MultiplyAccumulate(y, x->digit(i), result, i);
+ MutableBigInt::MultiplyAccumulate(y, x->digit(i), result, i);
}
result->set_sign(x->sign() != y->sign());
- result->RightTrim();
- return result;
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::Divide(Handle<BigInt> x, Handle<BigInt> y) {
@@ -65,88 +332,98 @@ MaybeHandle<BigInt> BigInt::Divide(Handle<BigInt> x, Handle<BigInt> y) {
// 2. Let quotient be the mathematical value of x divided by y.
// 3. Return a BigInt representing quotient rounded towards 0 to the next
// integral value.
- if (AbsoluteCompare(x, y) < 0) {
- // TODO(jkummerow): Consider caching a canonical zero-BigInt.
- return x->GetIsolate()->factory()->NewBigIntFromInt(0);
+ if (MutableBigInt::AbsoluteCompare(x, y) < 0) {
+ return Zero(x->GetIsolate());
}
- Handle<BigInt> quotient;
+ Handle<MutableBigInt> quotient;
+ bool result_sign = x->sign() != y->sign();
if (y->length() == 1) {
+ digit_t divisor = y->digit(0);
+ if (divisor == 1) {
+ return result_sign == x->sign() ? x : UnaryMinus(x);
+ }
digit_t remainder;
- AbsoluteDivSmall(x, y->digit(0), &quotient, &remainder);
+ MutableBigInt::AbsoluteDivSmall(x, divisor, &quotient, &remainder);
} else {
- AbsoluteDivLarge(x, y, &quotient, nullptr);
+ if (!MutableBigInt::AbsoluteDivLarge(x, y, &quotient, nullptr)) {
+ return MaybeHandle<BigInt>();
+ }
}
quotient->set_sign(x->sign() != y->sign());
- quotient->RightTrim();
- return quotient;
+ return MutableBigInt::MakeImmutable(quotient);
}
MaybeHandle<BigInt> BigInt::Remainder(Handle<BigInt> x, Handle<BigInt> y) {
+ Isolate* isolate = x->GetIsolate();
// 1. If y is 0n, throw a RangeError exception.
if (y->is_zero()) {
- THROW_NEW_ERROR(y->GetIsolate(),
- NewRangeError(MessageTemplate::kBigIntDivZero), BigInt);
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntDivZero),
+ BigInt);
}
// 2. Return the BigInt representing x modulo y.
// See https://github.com/tc39/proposal-bigint/issues/84 though.
- if (AbsoluteCompare(x, y) < 0) return x;
- Handle<BigInt> remainder;
+ if (MutableBigInt::AbsoluteCompare(x, y) < 0) return x;
+ Handle<MutableBigInt> remainder;
if (y->length() == 1) {
+ digit_t divisor = y->digit(0);
+ if (divisor == 1) return Zero(isolate);
digit_t remainder_digit;
- AbsoluteDivSmall(x, y->digit(0), nullptr, &remainder_digit);
+ MutableBigInt::AbsoluteDivSmall(x, divisor, nullptr, &remainder_digit);
if (remainder_digit == 0) {
- return x->GetIsolate()->factory()->NewBigIntFromInt(0);
+ return Zero(isolate);
}
- remainder = x->GetIsolate()->factory()->NewBigIntRaw(1);
+ remainder = MutableBigInt::New(isolate, 1).ToHandleChecked();
remainder->set_digit(0, remainder_digit);
} else {
- AbsoluteDivLarge(x, y, nullptr, &remainder);
+ if (!MutableBigInt::AbsoluteDivLarge(x, y, nullptr, &remainder)) {
+ return MaybeHandle<BigInt>();
+ }
}
remainder->set_sign(x->sign());
- return remainder;
+ return MutableBigInt::MakeImmutable(remainder);
}
-Handle<BigInt> BigInt::Add(Handle<BigInt> x, Handle<BigInt> y) {
+MaybeHandle<BigInt> BigInt::Add(Handle<BigInt> x, Handle<BigInt> y) {
bool xsign = x->sign();
if (xsign == y->sign()) {
// x + y == x + y
// -x + -y == -(x + y)
- return AbsoluteAdd(x, y, xsign);
+ return MutableBigInt::AbsoluteAdd(x, y, xsign);
}
// x + -y == x - y == -(y - x)
// -x + y == y - x == -(x - y)
- if (AbsoluteCompare(x, y) >= 0) {
- return AbsoluteSub(x, y, xsign);
+ if (MutableBigInt::AbsoluteCompare(x, y) >= 0) {
+ return MutableBigInt::AbsoluteSub(x, y, xsign);
}
- return AbsoluteSub(y, x, !xsign);
+ return MutableBigInt::AbsoluteSub(y, x, !xsign);
}
-Handle<BigInt> BigInt::Subtract(Handle<BigInt> x, Handle<BigInt> y) {
+MaybeHandle<BigInt> BigInt::Subtract(Handle<BigInt> x, Handle<BigInt> y) {
bool xsign = x->sign();
if (xsign != y->sign()) {
// x - (-y) == x + y
// (-x) - y == -(x + y)
- return AbsoluteAdd(x, y, xsign);
+ return MutableBigInt::AbsoluteAdd(x, y, xsign);
}
// x - y == -(y - x)
// (-x) - (-y) == y - x == -(x - y)
- if (AbsoluteCompare(x, y) >= 0) {
- return AbsoluteSub(x, y, xsign);
+ if (MutableBigInt::AbsoluteCompare(x, y) >= 0) {
+ return MutableBigInt::AbsoluteSub(x, y, xsign);
}
- return AbsoluteSub(y, x, !xsign);
+ return MutableBigInt::AbsoluteSub(y, x, !xsign);
}
MaybeHandle<BigInt> BigInt::LeftShift(Handle<BigInt> x, Handle<BigInt> y) {
if (y->is_zero() || x->is_zero()) return x;
- if (y->sign()) return RightShiftByAbsolute(x, y);
- return LeftShiftByAbsolute(x, y);
+ if (y->sign()) return MutableBigInt::RightShiftByAbsolute(x, y);
+ return MutableBigInt::LeftShiftByAbsolute(x, y);
}
MaybeHandle<BigInt> BigInt::SignedRightShift(Handle<BigInt> x,
Handle<BigInt> y) {
if (y->is_zero() || x->is_zero()) return x;
- if (y->sign()) return LeftShiftByAbsolute(x, y);
- return RightShiftByAbsolute(x, y);
+ if (y->sign()) return MutableBigInt::LeftShiftByAbsolute(x, y);
+ return MutableBigInt::RightShiftByAbsolute(x, y);
}
MaybeHandle<BigInt> BigInt::UnsignedRightShift(Handle<BigInt> x,
@@ -155,11 +432,40 @@ MaybeHandle<BigInt> BigInt::UnsignedRightShift(Handle<BigInt> x,
BigInt);
}
-bool BigInt::LessThan(Handle<BigInt> x, Handle<BigInt> y) {
- UNIMPLEMENTED(); // TODO(jkummerow): Implement.
+namespace {
+
+// Produces comparison result for {left_negative} == sign(x) != sign(y).
+ComparisonResult UnequalSign(bool left_negative) {
+ return left_negative ? ComparisonResult::kLessThan
+ : ComparisonResult::kGreaterThan;
+}
+
+// Produces result for |x| > |y|, with {both_negative} == sign(x) == sign(y);
+ComparisonResult AbsoluteGreater(bool both_negative) {
+ return both_negative ? ComparisonResult::kLessThan
+ : ComparisonResult::kGreaterThan;
+}
+
+// Produces result for |x| < |y|, with {both_negative} == sign(x) == sign(y).
+ComparisonResult AbsoluteLess(bool both_negative) {
+ return both_negative ? ComparisonResult::kGreaterThan
+ : ComparisonResult::kLessThan;
+}
+
+} // namespace
+
+// (Never returns kUndefined.)
+ComparisonResult BigInt::CompareToBigInt(Handle<BigInt> x, Handle<BigInt> y) {
+ bool x_sign = x->sign();
+ if (x_sign != y->sign()) return UnequalSign(x_sign);
+
+ int result = MutableBigInt::AbsoluteCompare(x, y);
+ if (result > 0) return AbsoluteGreater(x_sign);
+ if (result < 0) return AbsoluteLess(x_sign);
+ return ComparisonResult::kEqual;
}
-bool BigInt::Equal(BigInt* x, BigInt* y) {
+bool BigInt::EqualToBigInt(BigInt* x, BigInt* y) {
if (x->sign() != y->sign()) return false;
if (x->length() != y->length()) return false;
for (int i = 0; i < x->length(); i++) {
@@ -168,73 +474,278 @@ bool BigInt::Equal(BigInt* x, BigInt* y) {
return true;
}
-Handle<BigInt> BigInt::BitwiseAnd(Handle<BigInt> x, Handle<BigInt> y) {
- Handle<BigInt> result;
+MaybeHandle<BigInt> BigInt::BitwiseAnd(Handle<BigInt> x, Handle<BigInt> y) {
+ return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseAnd(x, y));
+}
+
+MaybeHandle<MutableBigInt> MutableBigInt::BitwiseAnd(Handle<BigInt> x,
+ Handle<BigInt> y) {
if (!x->sign() && !y->sign()) {
- result = AbsoluteAnd(x, y);
+ return AbsoluteAnd(x, y);
} else if (x->sign() && y->sign()) {
int result_length = Max(x->length(), y->length()) + 1;
// (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1))
// == -(((x-1) | (y-1)) + 1)
- result = AbsoluteSubOne(x, result_length);
- result = AbsoluteOr(result, AbsoluteSubOne(y, y->length()), *result);
- result = AbsoluteAddOne(result, true, *result);
+ Handle<MutableBigInt> result;
+ if (!AbsoluteSubOne(x, result_length).ToHandle(&result)) {
+ return MaybeHandle<MutableBigInt>();
+ }
+ result = AbsoluteOr(result, AbsoluteSubOne(y), *result);
+ return AbsoluteAddOne(result, true, *result);
} else {
DCHECK(x->sign() != y->sign());
// Assume that x is the positive BigInt.
if (x->sign()) std::swap(x, y);
// x & (-y) == x & ~(y-1) == x &~ (y-1)
- result = AbsoluteAndNot(x, AbsoluteSubOne(y, y->length()));
+ return AbsoluteAndNot(x, AbsoluteSubOne(y));
}
- result->RightTrim();
- return result;
}
-Handle<BigInt> BigInt::BitwiseXor(Handle<BigInt> x, Handle<BigInt> y) {
- Handle<BigInt> result;
+MaybeHandle<BigInt> BigInt::BitwiseXor(Handle<BigInt> x, Handle<BigInt> y) {
+ return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseXor(x, y));
+}
+
+MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Handle<BigInt> x,
+ Handle<BigInt> y) {
if (!x->sign() && !y->sign()) {
- result = AbsoluteXor(x, y);
+ return AbsoluteXor(x, y);
} else if (x->sign() && y->sign()) {
int result_length = Max(x->length(), y->length());
// (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
- result = AbsoluteSubOne(x, result_length);
- result = AbsoluteXor(result, AbsoluteSubOne(y, y->length()), *result);
+ Handle<MutableBigInt> result =
+ AbsoluteSubOne(x, result_length).ToHandleChecked();
+ return AbsoluteXor(result, AbsoluteSubOne(y), *result);
} else {
DCHECK(x->sign() != y->sign());
int result_length = Max(x->length(), y->length()) + 1;
// Assume that x is the positive BigInt.
if (x->sign()) std::swap(x, y);
// x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
- result = AbsoluteSubOne(y, result_length);
+ Handle<MutableBigInt> result;
+ if (!AbsoluteSubOne(y, result_length).ToHandle(&result)) {
+ return MaybeHandle<MutableBigInt>();
+ }
result = AbsoluteXor(result, x, *result);
- result = AbsoluteAddOne(result, true, *result);
+ return AbsoluteAddOne(result, true, *result);
}
- result->RightTrim();
- return result;
}
-Handle<BigInt> BigInt::BitwiseOr(Handle<BigInt> x, Handle<BigInt> y) {
- Handle<BigInt> result;
+MaybeHandle<BigInt> BigInt::BitwiseOr(Handle<BigInt> x, Handle<BigInt> y) {
+ return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseOr(x, y));
+}
+
+MaybeHandle<MutableBigInt> MutableBigInt::BitwiseOr(Handle<BigInt> x,
+ Handle<BigInt> y) {
int result_length = Max(x->length(), y->length());
if (!x->sign() && !y->sign()) {
- result = AbsoluteOr(x, y);
+ return AbsoluteOr(x, y);
} else if (x->sign() && y->sign()) {
// (-x) | (-y) == ~(x-1) | ~(y-1) == ~((x-1) & (y-1))
// == -(((x-1) & (y-1)) + 1)
- result = AbsoluteSubOne(x, result_length);
- result = AbsoluteAnd(result, AbsoluteSubOne(y, y->length()), *result);
- result = AbsoluteAddOne(result, true, *result);
+ Handle<MutableBigInt> result =
+ AbsoluteSubOne(x, result_length).ToHandleChecked();
+ result = AbsoluteAnd(result, AbsoluteSubOne(y), *result);
+ return AbsoluteAddOne(result, true, *result);
} else {
DCHECK(x->sign() != y->sign());
// Assume that x is the positive BigInt.
if (x->sign()) std::swap(x, y);
// x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1)
- result = AbsoluteSubOne(y, result_length);
+ Handle<MutableBigInt> result =
+ AbsoluteSubOne(y, result_length).ToHandleChecked();
result = AbsoluteAndNot(result, x, *result);
- result = AbsoluteAddOne(result, true, *result);
+ return AbsoluteAddOne(result, true, *result);
}
- result->RightTrim();
- return result;
+}
+
+MaybeHandle<BigInt> BigInt::Increment(Handle<BigInt> x) {
+ if (x->sign()) {
+ Handle<MutableBigInt> result = MutableBigInt::AbsoluteSubOne(x);
+ result->set_sign(true);
+ return MutableBigInt::MakeImmutable(result);
+ } else {
+ return MutableBigInt::MakeImmutable(
+ MutableBigInt::AbsoluteAddOne(x, false));
+ }
+}
+
+MaybeHandle<BigInt> BigInt::Decrement(Handle<BigInt> x) {
+ MaybeHandle<MutableBigInt> result;
+ if (x->sign()) {
+ result = MutableBigInt::AbsoluteAddOne(x, true);
+ } else if (x->is_zero()) {
+ // TODO(jkummerow): Consider caching a canonical -1n BigInt.
+ return MutableBigInt::NewFromInt(x->GetIsolate(), -1);
+ } else {
+ result = MutableBigInt::AbsoluteSubOne(x);
+ }
+ return MutableBigInt::MakeImmutable(result);
+}
+
+bool BigInt::EqualToString(Handle<BigInt> x, Handle<String> y) {
+ Isolate* isolate = x->GetIsolate();
+ // a. Let n be StringToBigInt(y).
+ MaybeHandle<BigInt> maybe_n = StringToBigInt(isolate, y);
+ // b. If n is NaN, return false.
+ Handle<BigInt> n;
+ if (!maybe_n.ToHandle(&n)) {
+ DCHECK(!isolate->has_pending_exception());
+ return false;
+ }
+ // c. Return the result of x == n.
+ return EqualToBigInt(*x, *n);
+}
+
+bool BigInt::EqualToNumber(Handle<BigInt> x, Handle<Object> y) {
+ DCHECK(y->IsNumber());
+ // a. If x or y are any of NaN, +∞, or -∞, return false.
+ // b. If the mathematical value of x is equal to the mathematical value of y,
+ // return true, otherwise return false.
+ if (y->IsSmi()) {
+ int value = Smi::ToInt(*y);
+ if (value == 0) return x->is_zero();
+ // Any multi-digit BigInt is bigger than a Smi.
+ STATIC_ASSERT(sizeof(digit_t) >= sizeof(value));
+ return (x->length() == 1) && (x->sign() == (value < 0)) &&
+ (x->digit(0) ==
+ static_cast<digit_t>(std::abs(static_cast<int64_t>(value))));
+ }
+ DCHECK(y->IsHeapNumber());
+ double value = Handle<HeapNumber>::cast(y)->value();
+ return CompareToDouble(x, value) == ComparisonResult::kEqual;
+}
+
+ComparisonResult BigInt::CompareToNumber(Handle<BigInt> x, Handle<Object> y) {
+ DCHECK(y->IsNumber());
+ if (y->IsSmi()) {
+ bool x_sign = x->sign();
+ int y_value = Smi::ToInt(*y);
+ bool y_sign = (y_value < 0);
+ if (x_sign != y_sign) return UnequalSign(x_sign);
+
+ if (x->is_zero()) {
+ DCHECK(!y_sign);
+ return y_value == 0 ? ComparisonResult::kEqual
+ : ComparisonResult::kLessThan;
+ }
+ // Any multi-digit BigInt is bigger than a Smi.
+ STATIC_ASSERT(sizeof(digit_t) >= sizeof(y_value));
+ if (x->length() > 1) return AbsoluteGreater(x_sign);
+
+ digit_t abs_value = std::abs(static_cast<int64_t>(y_value));
+ digit_t x_digit = x->digit(0);
+ if (x_digit > abs_value) return AbsoluteGreater(x_sign);
+ if (x_digit < abs_value) return AbsoluteLess(x_sign);
+ return ComparisonResult::kEqual;
+ }
+ DCHECK(y->IsHeapNumber());
+ double value = Handle<HeapNumber>::cast(y)->value();
+ return CompareToDouble(x, value);
+}
+
+ComparisonResult BigInt::CompareToDouble(Handle<BigInt> x, double y) {
+ if (std::isnan(y)) return ComparisonResult::kUndefined;
+ if (y == V8_INFINITY) return ComparisonResult::kLessThan;
+ if (y == -V8_INFINITY) return ComparisonResult::kGreaterThan;
+ bool x_sign = x->sign();
+ // Note that this is different from the double's sign bit for -0. That's
+ // intentional because -0 must be treated like 0.
+ bool y_sign = (y < 0);
+ if (x_sign != y_sign) return UnequalSign(x_sign);
+ if (y == 0) {
+ DCHECK(!x_sign);
+ return x->is_zero() ? ComparisonResult::kEqual
+ : ComparisonResult::kGreaterThan;
+ }
+ if (x->is_zero()) {
+ DCHECK(!y_sign);
+ return ComparisonResult::kLessThan;
+ }
+ uint64_t double_bits = bit_cast<uint64_t>(y);
+ int raw_exponent =
+ static_cast<int>(double_bits >> Double::kPhysicalSignificandSize) & 0x7FF;
+ uint64_t mantissa = double_bits & Double::kSignificandMask;
+ // Non-finite doubles are handled above.
+ DCHECK_NE(raw_exponent, 0x7FF);
+ int exponent = raw_exponent - 0x3FF;
+ if (exponent < 0) {
+ // The absolute value of the double is less than 1. Only 0n has an
+ // absolute value smaller than that, but we've already covered that case.
+ DCHECK(!x->is_zero());
+ return AbsoluteGreater(x_sign);
+ }
+ int x_length = x->length();
+ digit_t x_msd = x->digit(x_length - 1);
+ int msd_leading_zeros = base::bits::CountLeadingZeros(x_msd);
+ int x_bitlength = x_length * kDigitBits - msd_leading_zeros;
+ int y_bitlength = exponent + 1;
+ if (x_bitlength < y_bitlength) return AbsoluteLess(x_sign);
+ if (x_bitlength > y_bitlength) return AbsoluteGreater(x_sign);
+
+ // At this point, we know that signs and bit lengths (i.e. position of
+ // the most significant bit in exponent-free representation) are identical.
+ // {x} is not zero, {y} is finite and not denormal.
+ // Now we virtually convert the double to an integer by shifting its
+ // mantissa according to its exponent, so it will align with the BigInt {x},
+ // and then we compare them bit for bit until we find a difference or the
+ // least significant bit.
+ // <----- 52 ------> <-- virtual trailing zeroes -->
+ // y / mantissa: 1yyyyyyyyyyyyyyyyy 0000000000000000000000000000000
+ // x / digits: 0001xxxx xxxxxxxx xxxxxxxx ...
+ // <--> <------>
+ // msd_topbit kDigitBits
+ //
+ mantissa |= Double::kHiddenBit;
+ const int kMantissaTopBit = 52; // 0-indexed.
+ // 0-indexed position of {x}'s most significant bit within the {msd}.
+ int msd_topbit = kDigitBits - 1 - msd_leading_zeros;
+ DCHECK_EQ(msd_topbit, (x_bitlength - 1) % kDigitBits);
+ // Shifted chunk of {mantissa} for comparing with {digit}.
+ digit_t compare_mantissa;
+ // Number of unprocessed bits in {mantissa}. We'll keep them shifted to
+ // the left (i.e. most significant part) of the underlying uint64_t.
+ int remaining_mantissa_bits = 0;
+
+ // First, compare the most significant digit against the beginning of
+ // the mantissa.
+ if (msd_topbit < kMantissaTopBit) {
+ remaining_mantissa_bits = (kMantissaTopBit - msd_topbit);
+ compare_mantissa = mantissa >> remaining_mantissa_bits;
+ mantissa = mantissa << (64 - remaining_mantissa_bits);
+ } else {
+ DCHECK_GE(msd_topbit, kMantissaTopBit);
+ compare_mantissa = mantissa << (msd_topbit - kMantissaTopBit);
+ mantissa = 0;
+ }
+ if (x_msd > compare_mantissa) return AbsoluteGreater(x_sign);
+ if (x_msd < compare_mantissa) return AbsoluteLess(x_sign);
+
+ // Then, compare additional digits against any remaining mantissa bits.
+ for (int digit_index = x_length - 2; digit_index >= 0; digit_index--) {
+ if (remaining_mantissa_bits > 0) {
+ remaining_mantissa_bits -= kDigitBits;
+ if (sizeof(mantissa) != sizeof(x_msd)) {
+ compare_mantissa = mantissa >> (64 - kDigitBits);
+ // "& 63" to appease compilers. kDigitBits is 32 here anyway.
+ mantissa = mantissa << (kDigitBits & 63);
+ } else {
+ compare_mantissa = mantissa;
+ mantissa = 0;
+ }
+ } else {
+ compare_mantissa = 0;
+ }
+ digit_t digit = x->digit(digit_index);
+ if (digit > compare_mantissa) return AbsoluteGreater(x_sign);
+ if (digit < compare_mantissa) return AbsoluteLess(x_sign);
+ }
+
+ // Integer parts are equal; check whether {y} has a fractional part.
+ if (mantissa != 0) {
+ DCHECK_GT(remaining_mantissa_bits, 0);
+ return AbsoluteLess(x_sign);
+ }
+ return ComparisonResult::kEqual;
}
MaybeHandle<String> BigInt::ToString(Handle<BigInt> bigint, int radix) {
@@ -243,25 +754,167 @@ MaybeHandle<String> BigInt::ToString(Handle<BigInt> bigint, int radix) {
return isolate->factory()->NewStringFromStaticChars("0");
}
if (base::bits::IsPowerOfTwo(radix)) {
- return ToStringBasePowerOfTwo(bigint, radix);
+ return MutableBigInt::ToStringBasePowerOfTwo(bigint, radix);
}
- return ToStringGeneric(bigint, radix);
+ return MutableBigInt::ToStringGeneric(bigint, radix);
}
-void BigInt::Initialize(int length, bool zero_initialize) {
- set_length(length);
- set_sign(false);
- if (zero_initialize) {
- memset(reinterpret_cast<void*>(reinterpret_cast<Address>(this) +
- kDigitsOffset - kHeapObjectTag),
- 0, length * kDigitSize);
-#if DEBUG
- } else {
- memset(reinterpret_cast<void*>(reinterpret_cast<Address>(this) +
- kDigitsOffset - kHeapObjectTag),
- 0xbf, length * kDigitSize);
-#endif
+namespace {
+
+bool IsSafeInteger(double value) {
+ if (std::isnan(value) || std::isinf(value)) return false;
+
+ // Let integer be ! ToInteger(value).
+ // If ! SameValueZero(integer, value) is false, return false.
+ if (DoubleToInteger(value) != value) return false;
+
+ return std::abs(value) <= kMaxSafeInteger;
+}
+
+} // anonymous namespace
+
+MaybeHandle<BigInt> BigInt::FromNumber(Isolate* isolate,
+ Handle<Object> number) {
+ DCHECK(number->IsNumber());
+ if (number->IsSmi()) {
+ return MutableBigInt::NewFromInt(isolate, Smi::ToInt(*number));
+ }
+ if (!IsSafeInteger(Handle<HeapNumber>::cast(number)->value())) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kBigIntFromNumber, number),
+ BigInt);
+ }
+ return MutableBigInt::NewFromSafeInteger(
+ isolate, Handle<HeapNumber>::cast(number)->value());
+}
+
+MaybeHandle<BigInt> BigInt::FromObject(Isolate* isolate, Handle<Object> obj) {
+ if (obj->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, obj,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(obj),
+ ToPrimitiveHint::kNumber),
+ BigInt);
+ }
+
+ if (obj->IsBoolean()) {
+ return MutableBigInt::NewFromInt(isolate, obj->BooleanValue());
+ }
+ if (obj->IsBigInt()) {
+ return Handle<BigInt>::cast(obj);
+ }
+ if (obj->IsString()) {
+ Handle<BigInt> n;
+ if (!StringToBigInt(isolate, Handle<String>::cast(obj)).ToHandle(&n)) {
+ THROW_NEW_ERROR(isolate,
+ NewSyntaxError(MessageTemplate::kBigIntFromObject, obj),
+ BigInt);
+ }
+ return n;
+ }
+
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kBigIntFromObject, obj), BigInt);
+}
+
+Handle<Object> BigInt::ToNumber(Handle<BigInt> x) {
+ Isolate* isolate = x->GetIsolate();
+ if (x->is_zero()) return Handle<Smi>(Smi::kZero, isolate);
+ if (x->length() == 1 && x->digit(0) < Smi::kMaxValue) {
+ int value = static_cast<int>(x->digit(0));
+ if (x->sign()) value = -value;
+ return Handle<Smi>(Smi::FromInt(value), isolate);
+ }
+ double result = MutableBigInt::ToDouble(x);
+ return isolate->factory()->NewHeapNumber(result);
+}
+
+double MutableBigInt::ToDouble(Handle<BigIntBase> x) {
+ if (x->is_zero()) return 0.0;
+ int x_length = x->length();
+ digit_t x_msd = x->digit(x_length - 1);
+ int msd_leading_zeros = base::bits::CountLeadingZeros(x_msd);
+ int x_bitlength = x_length * kDigitBits - msd_leading_zeros;
+ if (x_bitlength > 1024) return x->sign() ? -V8_INFINITY : V8_INFINITY;
+ uint64_t exponent = x_bitlength - 1;
+ // We need the most significant bit shifted to the position of a double's
+ // "hidden bit". We also need to hide that MSB, so we shift it out.
+ uint64_t current_digit = x_msd;
+ int digit_index = x_length - 1;
+ int shift = msd_leading_zeros + 1 + (64 - kDigitBits);
+ DCHECK_LE(1, shift);
+ DCHECK_LE(shift, 64);
+ uint64_t mantissa = (shift == 64) ? 0 : current_digit << shift;
+ mantissa >>= 12;
+ int mantissa_bits_unset = shift - 12;
+ // If not all mantissa bits are defined yet, get more digits as needed.
+ if (mantissa_bits_unset >= kDigitBits && digit_index > 0) {
+ digit_index--;
+ current_digit = static_cast<uint64_t>(x->digit(digit_index));
+ mantissa |= (current_digit << (mantissa_bits_unset - kDigitBits));
+ mantissa_bits_unset -= kDigitBits;
+ }
+ if (mantissa_bits_unset > 0 && digit_index > 0) {
+ DCHECK_LT(mantissa_bits_unset, kDigitBits);
+ digit_index--;
+ current_digit = static_cast<uint64_t>(x->digit(digit_index));
+ mantissa |= (current_digit >> (kDigitBits - mantissa_bits_unset));
+ mantissa_bits_unset -= kDigitBits;
+ }
+ // If there are unconsumed digits left, we may have to round.
+ Rounding rounding =
+ DecideRounding(x, mantissa_bits_unset, digit_index, current_digit);
+ if (rounding == kRoundUp || (rounding == kTie && (mantissa & 1) == 1)) {
+ mantissa++;
+ // Incrementing the mantissa can overflow the mantissa bits. In that case
+ // the new mantissa will be all zero (plus hidden bit).
+ if ((mantissa >> Double::kPhysicalSignificandSize) != 0) {
+ mantissa = 0;
+ exponent++;
+ // Incrementing the exponent can overflow too.
+ if (exponent > 1023) {
+ return x->sign() ? -V8_INFINITY : V8_INFINITY;
+ }
+ }
}
+ // Assemble the result.
+ uint64_t sign_bit = x->sign() ? (static_cast<uint64_t>(1) << 63) : 0;
+ exponent = (exponent + 0x3FF) << Double::kPhysicalSignificandSize;
+ uint64_t double_bits = sign_bit | exponent | mantissa;
+ return bit_cast<double>(double_bits);
+}
+
+// This is its own function to keep control flow sane. The meaning of the
+// parameters is defined by {ToDouble}'s local variable usage.
+MutableBigInt::Rounding MutableBigInt::DecideRounding(Handle<BigIntBase> x,
+ int mantissa_bits_unset,
+ int digit_index,
+ uint64_t current_digit) {
+ if (mantissa_bits_unset > 0) return kRoundDown;
+ int top_unconsumed_bit;
+ if (mantissa_bits_unset < 0) {
+ // There are unconsumed bits in {current_digit}.
+ top_unconsumed_bit = -mantissa_bits_unset - 1;
+ } else {
+ DCHECK_EQ(mantissa_bits_unset, 0);
+ // {current_digit} fit the mantissa exactly; look at the next digit.
+ if (digit_index == 0) return kRoundDown;
+ digit_index--;
+ current_digit = static_cast<uint64_t>(x->digit(digit_index));
+ top_unconsumed_bit = kDigitBits - 1;
+ }
+ // If the most significant remaining bit is 0, round down.
+ uint64_t bitmask = static_cast<uint64_t>(1) << top_unconsumed_bit;
+ if ((current_digit & bitmask) == 0) {
+ return kRoundDown;
+ }
+ // If any other remaining bit is set, round up.
+ bitmask -= 1;
+ if ((current_digit & bitmask) != 0) return kRoundUp;
+ for (; digit_index >= 0; digit_index--) {
+ if (x->digit(digit_index) != 0) return kRoundUp;
+ }
+ return kTie;
}
void BigInt::BigIntShortPrint(std::ostream& os) {
@@ -275,20 +928,23 @@ void BigInt::BigIntShortPrint(std::ostream& os) {
os << digit(0);
}
-// Private helpers for public methods.
+// Internal helpers.
-Handle<BigInt> BigInt::AbsoluteAdd(Handle<BigInt> x, Handle<BigInt> y,
- bool result_sign) {
+MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Handle<BigInt> x,
+ Handle<BigInt> y,
+ bool result_sign) {
if (x->length() < y->length()) return AbsoluteAdd(y, x, result_sign);
if (x->is_zero()) {
DCHECK(y->is_zero());
return x;
}
if (y->is_zero()) {
- return result_sign == x->sign() ? x : UnaryMinus(x);
+ return result_sign == x->sign() ? x : BigInt::UnaryMinus(x);
+ }
+ Handle<MutableBigInt> result;
+ if (!New(x->GetIsolate(), x->length() + 1).ToHandle(&result)) {
+ return MaybeHandle<BigInt>();
}
- Handle<BigInt> result =
- x->GetIsolate()->factory()->NewBigIntRaw(x->length() + 1);
digit_t carry = 0;
int i = 0;
for (; i < y->length(); i++) {
@@ -306,12 +962,11 @@ Handle<BigInt> BigInt::AbsoluteAdd(Handle<BigInt> x, Handle<BigInt> y,
}
result->set_digit(i, carry);
result->set_sign(result_sign);
- result->RightTrim();
- return result;
+ return MakeImmutable(result);
}
-Handle<BigInt> BigInt::AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
- bool result_sign) {
+Handle<BigInt> MutableBigInt::AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
+ bool result_sign) {
DCHECK(x->length() >= y->length());
SLOW_DCHECK(AbsoluteCompare(x, y) >= 0);
if (x->is_zero()) {
@@ -319,9 +974,10 @@ Handle<BigInt> BigInt::AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
return x;
}
if (y->is_zero()) {
- return result_sign == x->sign() ? x : UnaryMinus(x);
+ return result_sign == x->sign() ? x : BigInt::UnaryMinus(x);
}
- Handle<BigInt> result = x->GetIsolate()->factory()->NewBigIntRaw(x->length());
+ Handle<MutableBigInt> result =
+ New(x->GetIsolate(), x->length()).ToHandleChecked();
digit_t borrow = 0;
int i = 0;
for (; i < y->length(); i++) {
@@ -339,21 +995,36 @@ Handle<BigInt> BigInt::AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
}
DCHECK_EQ(0, borrow);
result->set_sign(result_sign);
- result->RightTrim();
- return result;
+ return MakeImmutable(result);
}
-// Adds 1 to the absolute value of {x}, stores the result in {result_storage}
-// and sets its sign to {sign}.
+// Adds 1 to the absolute value of {x} and sets the result's sign to {sign}.
+// {result_storage} is optional; if present, it will be used to store the
+// result, otherwise a new BigInt will be allocated for the result.
// {result_storage} and {x} may refer to the same BigInt for in-place
// modification.
-Handle<BigInt> BigInt::AbsoluteAddOne(Handle<BigInt> x, bool sign,
- BigInt* result_storage) {
- DCHECK(result_storage != nullptr);
+MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
+ Handle<BigIntBase> x, bool sign, MutableBigInt* result_storage) {
int input_length = x->length();
- int result_length = result_storage->length();
+ // The addition will overflow into a new digit if all existing digits are
+ // at maximum.
+ bool will_overflow = true;
+ for (int i = 0; i < input_length; i++) {
+ if (!digit_ismax(x->digit(i))) {
+ will_overflow = false;
+ break;
+ }
+ }
+ int result_length = input_length + will_overflow;
Isolate* isolate = x->GetIsolate();
- Handle<BigInt> result(result_storage, isolate);
+ Handle<MutableBigInt> result(result_storage, isolate);
+ if (result_storage == nullptr) {
+ if (!New(isolate, result_length).ToHandle(&result)) {
+ return MaybeHandle<MutableBigInt>();
+ }
+ } else {
+ DCHECK(result->length() == result_length);
+ }
digit_t carry = 1;
for (int i = 0; i < input_length; i++) {
digit_t new_carry = 0;
@@ -363,20 +1034,30 @@ Handle<BigInt> BigInt::AbsoluteAddOne(Handle<BigInt> x, bool sign,
if (result_length > input_length) {
result->set_digit(input_length, carry);
} else {
- DCHECK(carry == 0);
+ DCHECK_EQ(carry, 0);
}
result->set_sign(sign);
return result;
}
// Subtracts 1 from the absolute value of {x}. {x} must not be zero.
-// Allocates a new BigInt of length {result_length} for the result;
-// {result_length} must be at least as large as {x->length()}.
-Handle<BigInt> BigInt::AbsoluteSubOne(Handle<BigInt> x, int result_length) {
+Handle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Handle<BigIntBase> x) {
+ DCHECK(!x->is_zero());
+ // Requesting a result length identical to an existing BigInt's length
+ // cannot overflow the limit.
+ return AbsoluteSubOne(x, x->length()).ToHandleChecked();
+}
+
+// Like the above, but you can specify that the allocated result should have
+// length {result_length}, which must be at least as large as {x->length()}.
+MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Handle<BigIntBase> x,
+ int result_length) {
DCHECK(!x->is_zero());
DCHECK(result_length >= x->length());
- Handle<BigInt> result =
- x->GetIsolate()->factory()->NewBigIntRaw(result_length);
+ Handle<MutableBigInt> result;
+ if (!New(x->GetIsolate(), result_length).ToHandle(&result)) {
+ return MaybeHandle<MutableBigInt>();
+ }
int length = x->length();
digit_t borrow = 1;
for (int i = 0; i < length; i++) {
@@ -384,7 +1065,7 @@ Handle<BigInt> BigInt::AbsoluteSubOne(Handle<BigInt> x, int result_length) {
result->set_digit(i, digit_sub(x->digit(i), borrow, &new_borrow));
borrow = new_borrow;
}
- DCHECK(borrow == 0);
+ DCHECK_EQ(borrow, 0);
for (int i = length; i < result_length; i++) {
result->set_digit(i, borrow);
}
@@ -394,8 +1075,8 @@ Handle<BigInt> BigInt::AbsoluteSubOne(Handle<BigInt> x, int result_length) {
// Helper for Absolute{And,AndNot,Or,Xor}.
// Performs the given binary {op} on digit pairs of {x} and {y}; when the
// end of the shorter of the two is reached, {extra_digits} configures how
-// remaining digits in the longer input are handled: copied to the result
-// or ignored.
+// remaining digits in the longer input (if {symmetric} == kSymmetric, in
+// {x} otherwise) are handled: copied to the result or ignored.
// If {result_storage} is non-nullptr, it will be used for the result and
// any extra digits in it will be zeroed out, otherwise a new BigInt (with
// the same length as the longer input) will be allocated.
@@ -408,26 +1089,32 @@ Handle<BigInt> BigInt::AbsoluteSubOne(Handle<BigInt> x, int result_length) {
// | | | |
// v v v v
// result_storage: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ]
-inline Handle<BigInt> BigInt::AbsoluteBitwiseOp(
- Handle<BigInt> x, Handle<BigInt> y, BigInt* result_storage,
- ExtraDigitsHandling extra_digits,
+inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
+ Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage,
+ ExtraDigitsHandling extra_digits, SymmetricOp symmetric,
std::function<digit_t(digit_t, digit_t)> op) {
int x_length = x->length();
int y_length = y->length();
+ int num_pairs = y_length;
if (x_length < y_length) {
- return AbsoluteBitwiseOp(y, x, result_storage, extra_digits, op);
+ num_pairs = x_length;
+ if (symmetric == kSymmetric) {
+ std::swap(x, y);
+ std::swap(x_length, y_length);
+ }
}
+ DCHECK(num_pairs == Min(x_length, y_length));
Isolate* isolate = x->GetIsolate();
- Handle<BigInt> result(result_storage, isolate);
- int result_length = extra_digits == kCopy ? x_length : y_length;
+ Handle<MutableBigInt> result(result_storage, isolate);
+ int result_length = extra_digits == kCopy ? x_length : num_pairs;
if (result_storage == nullptr) {
- result = isolate->factory()->NewBigIntRaw(result_length);
+ result = New(isolate, result_length).ToHandleChecked();
} else {
DCHECK(result_storage->length() >= result_length);
result_length = result_storage->length();
}
int i = 0;
- for (; i < y_length; i++) {
+ for (; i < num_pairs; i++) {
result->set_digit(i, op(x->digit(i), y->digit(i)));
}
if (extra_digits == kCopy) {
@@ -444,42 +1131,43 @@ inline Handle<BigInt> BigInt::AbsoluteBitwiseOp(
// If {result_storage} is non-nullptr, it will be used for the result,
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<BigInt> BigInt::AbsoluteAnd(Handle<BigInt> x, Handle<BigInt> y,
- BigInt* result_storage) {
- return AbsoluteBitwiseOp(x, y, result_storage, kSkip,
+Handle<MutableBigInt> MutableBigInt::AbsoluteAnd(
+ Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage) {
+ return AbsoluteBitwiseOp(x, y, result_storage, kSkip, kSymmetric,
[](digit_t a, digit_t b) { return a & b; });
}
// If {result_storage} is non-nullptr, it will be used for the result,
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<BigInt> BigInt::AbsoluteAndNot(Handle<BigInt> x, Handle<BigInt> y,
- BigInt* result_storage) {
- return AbsoluteBitwiseOp(x, y, result_storage, kCopy,
+Handle<MutableBigInt> MutableBigInt::AbsoluteAndNot(
+ Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage) {
+ return AbsoluteBitwiseOp(x, y, result_storage, kCopy, kNotSymmetric,
[](digit_t a, digit_t b) { return a & ~b; });
}
// If {result_storage} is non-nullptr, it will be used for the result,
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<BigInt> BigInt::AbsoluteOr(Handle<BigInt> x, Handle<BigInt> y,
- BigInt* result_storage) {
- return AbsoluteBitwiseOp(x, y, result_storage, kCopy,
+Handle<MutableBigInt> MutableBigInt::AbsoluteOr(Handle<BigIntBase> x,
+ Handle<BigIntBase> y,
+ MutableBigInt* result_storage) {
+ return AbsoluteBitwiseOp(x, y, result_storage, kCopy, kSymmetric,
[](digit_t a, digit_t b) { return a | b; });
}
// If {result_storage} is non-nullptr, it will be used for the result,
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<BigInt> BigInt::AbsoluteXor(Handle<BigInt> x, Handle<BigInt> y,
- BigInt* result_storage) {
- return AbsoluteBitwiseOp(x, y, result_storage, kCopy,
+Handle<MutableBigInt> MutableBigInt::AbsoluteXor(
+ Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage) {
+ return AbsoluteBitwiseOp(x, y, result_storage, kCopy, kSymmetric,
[](digit_t a, digit_t b) { return a ^ b; });
}
// Returns a positive value if abs(x) > abs(y), a negative value if
// abs(x) < abs(y), or zero if abs(x) == abs(y).
-int BigInt::AbsoluteCompare(Handle<BigInt> x, Handle<BigInt> y) {
+int MutableBigInt::AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y) {
int diff = x->length() - y->length();
if (diff != 0) return diff;
int i = x->length() - 1;
@@ -492,9 +1180,10 @@ int BigInt::AbsoluteCompare(Handle<BigInt> x, Handle<BigInt> y) {
// {accumulator}, starting at {accumulator_index} for the least-significant
// digit.
// Callers must ensure that {accumulator} is big enough to hold the result.
-void BigInt::MultiplyAccumulate(Handle<BigInt> multiplicand, digit_t multiplier,
- Handle<BigInt> accumulator,
- int accumulator_index) {
+void MutableBigInt::MultiplyAccumulate(Handle<BigIntBase> multiplicand,
+ digit_t multiplier,
+ Handle<MutableBigInt> accumulator,
+ int accumulator_index) {
// This is a minimum requirement; the DCHECK in the second loop below
// will enforce more as needed.
DCHECK(accumulator->length() > multiplicand->length() + accumulator_index);
@@ -529,8 +1218,9 @@ void BigInt::MultiplyAccumulate(Handle<BigInt> multiplicand, digit_t multiplier,
// Multiplies {source} with {factor} and adds {summand} to the result.
// {result} and {source} may be the same BigInt for inplace modification.
-void BigInt::InternalMultiplyAdd(BigInt* source, digit_t factor,
- digit_t summand, int n, BigInt* result) {
+void MutableBigInt::InternalMultiplyAdd(BigIntBase* source, digit_t factor,
+ digit_t summand, int n,
+ MutableBigInt* result) {
DCHECK(source->length() >= n);
DCHECK(result->length() >= n);
digit_t carry = summand;
@@ -556,15 +1246,18 @@ void BigInt::InternalMultiplyAdd(BigInt* source, digit_t factor,
result->set_digit(n++, 0);
}
} else {
- CHECK((carry + high) == 0);
+ CHECK_EQ(carry + high, 0);
}
}
-// Multiplies {this} with {factor} and adds {summand} to the result.
-void BigInt::InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand) {
+// Multiplies {x} with {factor} and then adds {summand} to it.
+void BigInt::InplaceMultiplyAdd(Handle<FreshlyAllocatedBigInt> x,
+ uintptr_t factor, uintptr_t summand) {
STATIC_ASSERT(sizeof(factor) == sizeof(digit_t));
STATIC_ASSERT(sizeof(summand) == sizeof(digit_t));
- InternalMultiplyAdd(this, factor, summand, length(), this);
+ Handle<MutableBigInt> bigint = MutableBigInt::Cast(x);
+ MutableBigInt::InternalMultiplyAdd(*bigint, factor, summand, bigint->length(),
+ *bigint);
}
// Divides {x} by {divisor}, returning the result in {quotient} and {remainder}.
@@ -574,20 +1267,16 @@ void BigInt::InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand) {
// allocated for it; otherwise the caller must ensure that it is big enough.
// {quotient} can be the same as {x} for an in-place division. {quotient} can
// also be nullptr if the caller is only interested in the remainder.
-void BigInt::AbsoluteDivSmall(Handle<BigInt> x, digit_t divisor,
- Handle<BigInt>* quotient, digit_t* remainder) {
- DCHECK(divisor != 0);
+void MutableBigInt::AbsoluteDivSmall(Handle<BigIntBase> x, digit_t divisor,
+ Handle<MutableBigInt>* quotient,
+ digit_t* remainder) {
+ DCHECK_NE(divisor, 0);
DCHECK(!x->is_zero()); // Callers check anyway, no need to handle this.
*remainder = 0;
- if (divisor == 1) {
- if (quotient != nullptr) *quotient = x;
- return;
- }
-
int length = x->length();
if (quotient != nullptr) {
if ((*quotient).is_null()) {
- *quotient = x->GetIsolate()->factory()->NewBigIntRaw(length);
+ *quotient = New(x->GetIsolate(), length).ToHandleChecked();
}
for (int i = length - 1; i >= 0; i--) {
digit_t q = digit_div(*remainder, x->digit(i), divisor, remainder);
@@ -606,12 +1295,13 @@ void BigInt::AbsoluteDivSmall(Handle<BigInt> x, digit_t divisor,
// Both {quotient} and {remainder} are optional, for callers that are only
// interested in one of them.
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
-void BigInt::AbsoluteDivLarge(Handle<BigInt> dividend, Handle<BigInt> divisor,
- Handle<BigInt>* quotient,
- Handle<BigInt>* remainder) {
- DCHECK(divisor->length() >= 2);
+bool MutableBigInt::AbsoluteDivLarge(Handle<BigIntBase> dividend,
+ Handle<BigIntBase> divisor,
+ Handle<MutableBigInt>* quotient,
+ Handle<MutableBigInt>* remainder) {
+ DCHECK_GE(divisor->length(), 2);
DCHECK(dividend->length() >= divisor->length());
- Factory* factory = dividend->GetIsolate()->factory();
+ Isolate* isolate = dividend->GetIsolate();
// The unusual variable names inside this function are consistent with
// Knuth's book, as well as with Go's implementation of this algorithm.
// Maintaining this consistency is probably more useful than trying to
@@ -620,11 +1310,12 @@ void BigInt::AbsoluteDivLarge(Handle<BigInt> dividend, Handle<BigInt> divisor,
int m = dividend->length() - n;
// The quotient to be computed.
- Handle<BigInt> q;
- if (quotient != nullptr) q = factory->NewBigIntRaw(m + 1);
+ Handle<MutableBigInt> q;
+ if (quotient != nullptr) q = New(isolate, m + 1).ToHandleChecked();
// In each iteration, {qhatv} holds {divisor} * {current quotient digit}.
// "v" is the book's name for {divisor}, "qhat" the current quotient digit.
- Handle<BigInt> qhatv = factory->NewBigIntRaw(n + 1);
+ Handle<MutableBigInt> qhatv;
+ if (!New(isolate, n + 1).ToHandle(&qhatv)) return false;
// D1.
// Left-shift inputs so that the divisor's MSB is set. This is necessary
@@ -633,11 +1324,15 @@ void BigInt::AbsoluteDivLarge(Handle<BigInt> dividend, Handle<BigInt> divisor,
// result).
int shift = base::bits::CountLeadingZeros(divisor->digit(n - 1));
if (shift > 0) {
- divisor = SpecialLeftShift(divisor, shift, kSameSizeResult);
+ divisor =
+ SpecialLeftShift(divisor, shift, kSameSizeResult).ToHandleChecked();
}
// Holds the (continuously updated) remaining part of the dividend, which
// eventually becomes the remainder.
- Handle<BigInt> u = SpecialLeftShift(dividend, shift, kAlwaysAddOneDigit);
+ Handle<MutableBigInt> u;
+ if (!SpecialLeftShift(dividend, shift, kAlwaysAddOneDigit).ToHandle(&u)) {
+ return false;
+ }
// D2.
// Iterate over the dividend's digit (like the "grad school" algorithm).
@@ -678,9 +1373,9 @@ void BigInt::AbsoluteDivLarge(Handle<BigInt> dividend, Handle<BigInt> divisor,
// was one too high, so we must correct it and undo one subtraction of
// the (shifted) divisor.
InternalMultiplyAdd(*divisor, qhat, 0, n, *qhatv);
- digit_t c = u->InplaceSub(*qhatv, j);
+ digit_t c = u->InplaceSub(qhatv, j);
if (c != 0) {
- c = u->InplaceAdd(*divisor, j);
+ c = u->InplaceAdd(divisor, j);
u->set_digit(j + n, u->digit(j + n) + c);
qhat--;
}
@@ -694,11 +1389,12 @@ void BigInt::AbsoluteDivLarge(Handle<BigInt> dividend, Handle<BigInt> divisor,
u->InplaceRightShift(shift);
*remainder = u;
}
+ return true;
}
// Returns whether (factor1 * factor2) > (high << kDigitBits) + low.
-bool BigInt::ProductGreaterThan(digit_t factor1, digit_t factor2, digit_t high,
- digit_t low) {
+bool MutableBigInt::ProductGreaterThan(digit_t factor1, digit_t factor2,
+ digit_t high, digit_t low) {
digit_t result_high;
digit_t result_low = digit_mul(factor1, factor2, &result_high);
return result_high > high || (result_high == high && result_low > low);
@@ -706,7 +1402,8 @@ bool BigInt::ProductGreaterThan(digit_t factor1, digit_t factor2, digit_t high,
// Adds {summand} onto {this}, starting with {summand}'s 0th digit
// at {this}'s {start_index}'th digit. Returns the "carry" (0 or 1).
-BigInt::digit_t BigInt::InplaceAdd(BigInt* summand, int start_index) {
+BigInt::digit_t MutableBigInt::InplaceAdd(Handle<BigIntBase> summand,
+ int start_index) {
digit_t carry = 0;
int n = summand->length();
DCHECK(length() >= start_index + n);
@@ -723,7 +1420,8 @@ BigInt::digit_t BigInt::InplaceAdd(BigInt* summand, int start_index) {
// Subtracts {subtrahend} from {this}, starting with {subtrahend}'s 0th digit
// at {this}'s {start_index}-th digit. Returns the "borrow" (0 or 1).
-BigInt::digit_t BigInt::InplaceSub(BigInt* subtrahend, int start_index) {
+BigInt::digit_t MutableBigInt::InplaceSub(Handle<BigIntBase> subtrahend,
+ int start_index) {
digit_t borrow = 0;
int n = subtrahend->length();
DCHECK(length() >= start_index + n);
@@ -738,11 +1436,11 @@ BigInt::digit_t BigInt::InplaceSub(BigInt* subtrahend, int start_index) {
return borrow;
}
-void BigInt::InplaceRightShift(int shift) {
- DCHECK(shift >= 0);
- DCHECK(shift < kDigitBits);
- DCHECK(length() > 0);
- DCHECK((digit(0) & ((1 << shift) - 1)) == 0);
+void MutableBigInt::InplaceRightShift(int shift) {
+ DCHECK_GE(shift, 0);
+ DCHECK_LT(shift, kDigitBits);
+ DCHECK_GT(length(), 0);
+ DCHECK_EQ(digit(0) & ((static_cast<digit_t>(1) << shift) - 1), 0);
if (shift == 0) return;
digit_t carry = digit(0) >> shift;
int last = length() - 1;
@@ -752,20 +1450,27 @@ void BigInt::InplaceRightShift(int shift) {
carry = d >> shift;
}
set_digit(last, carry);
- RightTrim();
}
// Always copies the input, even when {shift} == 0.
// {shift} must be less than kDigitBits, {x} must be non-zero.
-Handle<BigInt> BigInt::SpecialLeftShift(Handle<BigInt> x, int shift,
- SpecialLeftShiftMode mode) {
- DCHECK(shift >= 0);
- DCHECK(shift < kDigitBits);
- DCHECK(x->length() > 0);
+MaybeHandle<MutableBigInt> MutableBigInt::SpecialLeftShift(
+ Handle<BigIntBase> x, int shift, SpecialLeftShiftMode mode) {
+ DCHECK_GE(shift, 0);
+ DCHECK_LT(shift, kDigitBits);
+ DCHECK_GT(x->length(), 0);
int n = x->length();
int result_length = mode == kAlwaysAddOneDigit ? n + 1 : n;
- Handle<BigInt> result =
- x->GetIsolate()->factory()->NewBigIntRaw(result_length);
+ Handle<MutableBigInt> result;
+ if (!New(x->GetIsolate(), result_length).ToHandle(&result)) {
+ return MaybeHandle<MutableBigInt>();
+ }
+ if (shift == 0) {
+ for (int i = 0; i < n; i++) result->set_digit(i, x->digit(i));
+ if (mode == kAlwaysAddOneDigit) result->set_digit(n, 0);
+ return result;
+ }
+ DCHECK_GT(shift, 0);
digit_t carry = 0;
for (int i = 0; i < n; i++) {
digit_t d = x->digit(i);
@@ -775,14 +1480,14 @@ Handle<BigInt> BigInt::SpecialLeftShift(Handle<BigInt> x, int shift,
if (mode == kAlwaysAddOneDigit) {
result->set_digit(n, carry);
} else {
- DCHECK(mode == kSameSizeResult);
- DCHECK(carry == 0);
+ DCHECK_EQ(mode, kSameSizeResult);
+ DCHECK_EQ(carry, 0);
}
return result;
}
-MaybeHandle<BigInt> BigInt::LeftShiftByAbsolute(Handle<BigInt> x,
- Handle<BigInt> y) {
+MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Handle<BigIntBase> x,
+ Handle<BigIntBase> y) {
Isolate* isolate = x->GetIsolate();
Maybe<digit_t> maybe_shift = ToShiftAmount(y);
if (maybe_shift.IsNothing()) {
@@ -800,7 +1505,10 @@ MaybeHandle<BigInt> BigInt::LeftShiftByAbsolute(Handle<BigInt> x,
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
BigInt);
}
- Handle<BigInt> result = isolate->factory()->NewBigIntRaw(result_length);
+ Handle<MutableBigInt> result;
+ if (!New(isolate, result_length).ToHandle(&result)) {
+ return MaybeHandle<BigInt>();
+ }
if (bits_shift == 0) {
int i = 0;
for (; i < digit_shift; i++) result->set_digit(i, 0ul);
@@ -818,16 +1526,15 @@ MaybeHandle<BigInt> BigInt::LeftShiftByAbsolute(Handle<BigInt> x,
if (grow) {
result->set_digit(length + digit_shift, carry);
} else {
- DCHECK(carry == 0);
+ DCHECK_EQ(carry, 0);
}
}
result->set_sign(x->sign());
- result->RightTrim();
- return result;
+ return MakeImmutable(result);
}
-Handle<BigInt> BigInt::RightShiftByAbsolute(Handle<BigInt> x,
- Handle<BigInt> y) {
+Handle<BigInt> MutableBigInt::RightShiftByAbsolute(Handle<BigIntBase> x,
+ Handle<BigIntBase> y) {
Isolate* isolate = x->GetIsolate();
int length = x->length();
bool sign = x->sign();
@@ -848,7 +1555,8 @@ Handle<BigInt> BigInt::RightShiftByAbsolute(Handle<BigInt> x,
// large enough up front, it avoids having to do a second allocation later.
bool must_round_down = false;
if (sign) {
- if ((x->digit(digit_shift) & ((1 << bits_shift) - 1)) != 0) {
+ const digit_t mask = (static_cast<digit_t>(1) << bits_shift) - 1;
+ if ((x->digit(digit_shift) & mask) != 0) {
must_round_down = true;
} else {
for (int i = 0; i < digit_shift; i++) {
@@ -867,7 +1575,8 @@ Handle<BigInt> BigInt::RightShiftByAbsolute(Handle<BigInt> x,
if (rounding_can_overflow) result_length++;
}
- Handle<BigInt> result = isolate->factory()->NewBigIntRaw(result_length);
+ DCHECK_LE(result_length, length);
+ Handle<MutableBigInt> result = New(isolate, result_length).ToHandleChecked();
if (bits_shift == 0) {
for (int i = digit_shift; i < length; i++) {
result->set_digit(i - digit_shift, x->digit(i));
@@ -887,27 +1596,25 @@ Handle<BigInt> BigInt::RightShiftByAbsolute(Handle<BigInt> x,
result->set_sign(true);
if (must_round_down) {
// Since the result is negative, rounding down means adding one to
- // its absolute value.
- result = AbsoluteAddOne(result, true, *result);
+ // its absolute value. This cannot overflow.
+ result = AbsoluteAddOne(result, true, *result).ToHandleChecked();
}
}
- result->RightTrim();
- return result;
+ return MakeImmutable(result);
}
-Handle<BigInt> BigInt::RightShiftByMaximum(Isolate* isolate, bool sign) {
+Handle<BigInt> MutableBigInt::RightShiftByMaximum(Isolate* isolate, bool sign) {
if (sign) {
// TODO(jkummerow): Consider caching a canonical -1n BigInt.
- return isolate->factory()->NewBigIntFromInt(-1);
+ return NewFromInt(isolate, -1);
} else {
- // TODO(jkummerow): Consider caching a canonical zero BigInt.
- return isolate->factory()->NewBigIntFromInt(0);
+ return Zero(isolate);
}
}
// Returns the value of {x} if it is less than the maximum bit length of
// a BigInt, or Nothing otherwise.
-Maybe<BigInt::digit_t> BigInt::ToShiftAmount(Handle<BigInt> x) {
+Maybe<BigInt::digit_t> MutableBigInt::ToShiftAmount(Handle<BigIntBase> x) {
if (x->length() > 1) return Nothing<digit_t>();
digit_t value = x->digit(0);
STATIC_ASSERT(kMaxLength * kDigitBits < std::numeric_limits<digit_t>::max());
@@ -915,15 +1622,6 @@ Maybe<BigInt::digit_t> BigInt::ToShiftAmount(Handle<BigInt> x) {
return Just(value);
}
-Handle<BigInt> BigInt::Copy(Handle<BigInt> source) {
- int length = source->length();
- Handle<BigInt> result = source->GetIsolate()->factory()->NewBigIntRaw(length);
- memcpy(result->address() + HeapObject::kHeaderSize,
- source->address() + HeapObject::kHeaderSize,
- SizeFor(length) - HeapObject::kHeaderSize);
- return result;
-}
-
// Lookup table for the maximum number of bits required per character of a
// base-N string representation of a number. To increase accuracy, the array
// value is the actual value multiplied by 32. To generate this table:
@@ -939,52 +1637,47 @@ constexpr uint8_t kMaxBitsPerChar[] = {
static const int kBitsPerCharTableShift = 5;
static const size_t kBitsPerCharTableMultiplier = 1u << kBitsPerCharTableShift;
-MaybeHandle<BigInt> BigInt::AllocateFor(Isolate* isolate, int radix,
- int charcount) {
+MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
+ Isolate* isolate, int radix, int charcount, ShouldThrow should_throw) {
DCHECK(2 <= radix && radix <= 36);
- DCHECK(charcount >= 0);
+ DCHECK_GE(charcount, 0);
size_t bits_per_char = kMaxBitsPerChar[radix];
size_t chars = static_cast<size_t>(charcount);
const int roundup = kBitsPerCharTableMultiplier - 1;
- if ((std::numeric_limits<size_t>::max() - roundup) / bits_per_char < chars) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
- }
- size_t bits_min = bits_per_char * chars;
- // Divide by 32 (see table), rounding up.
- bits_min = (bits_min + roundup) >> kBitsPerCharTableShift;
- if (bits_min > static_cast<size_t>(kMaxInt)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ if (chars <= (std::numeric_limits<size_t>::max() - roundup) / bits_per_char) {
+ size_t bits_min = bits_per_char * chars;
+ // Divide by 32 (see table), rounding up.
+ bits_min = (bits_min + roundup) >> kBitsPerCharTableShift;
+ if (bits_min <= static_cast<size_t>(kMaxInt)) {
+ // Divide by kDigitsBits, rounding up.
+ int length = (static_cast<int>(bits_min) + kDigitBits - 1) / kDigitBits;
+ if (length <= kMaxLength) {
+ Handle<MutableBigInt> result =
+ MutableBigInt::New(isolate, length).ToHandleChecked();
+ result->InitializeDigits(length);
+ return result;
+ }
+ }
}
- // Divide by kDigitsBits, rounding up.
- int length = (static_cast<int>(bits_min) + kDigitBits - 1) / kDigitBits;
- if (length > BigInt::kMaxLength) {
+ // All the overflow/maximum checks above fall through to here.
+ if (should_throw == kThrowOnError) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ FreshlyAllocatedBigInt);
+ } else {
+ return MaybeHandle<FreshlyAllocatedBigInt>();
}
- return isolate->factory()->NewBigInt(length);
}
-void BigInt::RightTrim() {
- int old_length = length();
- int new_length = old_length;
- while (new_length > 0 && digit(new_length - 1) == 0) new_length--;
- int to_trim = old_length - new_length;
- if (to_trim == 0) return;
- int size_delta = to_trim * kDigitSize;
- Address new_end = this->address() + SizeFor(new_length);
- Heap* heap = GetHeap();
- heap->CreateFillerObjectAt(new_end, size_delta, ClearRecordedSlots::kNo);
- // Canonicalize -0n.
- if (new_length == 0) set_sign(false);
- set_length(new_length);
+Handle<BigInt> BigInt::Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign) {
+ Handle<MutableBigInt> bigint = MutableBigInt::Cast(x);
+ bigint->set_sign(sign);
+ return MutableBigInt::MakeImmutable(bigint);
}
static const char kConversionChars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
-MaybeHandle<String> BigInt::ToStringBasePowerOfTwo(Handle<BigInt> x,
- int radix) {
+MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(Handle<BigIntBase> x,
+ int radix) {
STATIC_ASSERT(base::bits::IsPowerOfTwo(kDigitBits));
DCHECK(base::bits::IsPowerOfTwo(radix));
DCHECK(radix >= 2 && radix <= 32);
@@ -993,7 +1686,7 @@ MaybeHandle<String> BigInt::ToStringBasePowerOfTwo(Handle<BigInt> x,
const int length = x->length();
const bool sign = x->sign();
- const int bits_per_char = base::bits::CountTrailingZeros32(radix);
+ const int bits_per_char = base::bits::CountTrailingZeros(radix);
const int char_mask = radix - 1;
// Compute the length of the resulting string: divide the bit length of the
// BigInt by the number of bits representable per character (rounding up).
@@ -1041,11 +1734,12 @@ MaybeHandle<String> BigInt::ToStringBasePowerOfTwo(Handle<BigInt> x,
digit >>= bits_per_char;
}
if (sign) buffer[pos--] = '-';
- DCHECK(pos == -1);
+ DCHECK_EQ(pos, -1);
return result;
}
-MaybeHandle<String> BigInt::ToStringGeneric(Handle<BigInt> x, int radix) {
+MaybeHandle<String> MutableBigInt::ToStringGeneric(Handle<BigIntBase> x,
+ int radix) {
DCHECK(radix >= 2 && radix <= 36);
DCHECK(!x->is_zero());
Heap* heap = x->GetHeap();
@@ -1101,31 +1795,31 @@ MaybeHandle<String> BigInt::ToStringGeneric(Handle<BigInt> x, int radix) {
kDigitBits * kBitsPerCharTableMultiplier / max_bits_per_char;
digit_t chunk_divisor = digit_pow(radix, chunk_chars);
// By construction of chunk_chars, there can't have been overflow.
- DCHECK(chunk_divisor != 0);
+ DCHECK_NE(chunk_divisor, 0);
int nonzero_digit = length - 1;
- DCHECK(x->digit(nonzero_digit) != 0);
+ DCHECK_NE(x->digit(nonzero_digit), 0);
// {rest} holds the part of the BigInt that we haven't looked at yet.
// Not to be confused with "remainder"!
- Handle<BigInt> rest;
+ Handle<MutableBigInt> rest;
// In the first round, divide the input, allocating a new BigInt for
// the result == rest; from then on divide the rest in-place.
- Handle<BigInt>* dividend = &x;
+ Handle<BigIntBase>* dividend = &x;
do {
digit_t chunk;
AbsoluteDivSmall(*dividend, chunk_divisor, &rest, &chunk);
DCHECK(!rest.is_null());
- dividend = &rest;
+ dividend = reinterpret_cast<Handle<BigIntBase>*>(&rest);
DisallowHeapAllocation no_gc;
uint8_t* chars = result->GetChars();
for (int i = 0; i < chunk_chars; i++) {
chars[pos++] = kConversionChars[chunk % radix];
chunk /= radix;
}
- DCHECK(chunk == 0);
+ DCHECK_EQ(chunk, 0);
if (rest->digit(nonzero_digit) == 0) nonzero_digit--;
// We can never clear more than one digit per iteration, because
// chunk_divisor is smaller than max digit value.
- DCHECK(rest->digit(nonzero_digit) > 0);
+ DCHECK_GT(rest->digit(nonzero_digit), 0);
} while (nonzero_digit > 0);
last_digit = rest->digit(0);
}
@@ -1135,7 +1829,7 @@ MaybeHandle<String> BigInt::ToStringGeneric(Handle<BigInt> x, int radix) {
chars[pos++] = kConversionChars[last_digit % radix];
last_digit /= radix;
} while (last_digit > 0);
- DCHECK(pos >= 1);
+ DCHECK_GE(pos, 1);
DCHECK(pos <= static_cast<int>(chars_required));
// Remove leading zeroes.
while (pos > 1 && chars[pos - 1] == '0') pos--;
@@ -1161,11 +1855,162 @@ MaybeHandle<String> BigInt::ToStringGeneric(Handle<BigInt> x, int radix) {
#if DEBUG
// Verify that all characters have been written.
DCHECK(result->length() == pos);
- for (int i = 0; i < pos; i++) DCHECK(chars[i] != '?');
+ for (int i = 0; i < pos; i++) DCHECK_NE(chars[i], '?');
#endif
return result;
}
+Handle<BigInt> BigInt::AsIntN(uint64_t n, Handle<BigInt> x) {
+ if (x->is_zero()) return x;
+ if (n == 0) return MutableBigInt::Zero(x->GetIsolate());
+ uint64_t needed_length = (n + kDigitBits - 1) / kDigitBits;
+ // If {x} has less than {n} bits, return it directly.
+ if (static_cast<uint64_t>(x->length()) < needed_length) return x;
+ DCHECK_LE(needed_length, kMaxInt);
+ digit_t top_digit = x->digit(static_cast<int>(needed_length) - 1);
+ digit_t compare_digit = static_cast<digit_t>(1) << ((n - 1) % kDigitBits);
+ if (top_digit < compare_digit) return x;
+ // Otherwise we have to truncate (which is a no-op in the special case
+ // of x == -2^(n-1)), and determine the right sign. We also might have
+ // to subtract from 2^n to simulate having two's complement representation.
+ // In most cases, the result's sign is x->sign() xor "(n-1)th bit present".
+ // The only exception is when x is negative, has the (n-1)th bit, and all
+ // its bits below (n-1) are zero. In that case, the result is the minimum
+ // n-bit integer (example: asIntN(3, -12n) => -4n).
+ bool has_bit = (top_digit & compare_digit) == compare_digit;
+ DCHECK_LE(n, kMaxInt);
+ int N = static_cast<int>(n);
+ if (!has_bit) {
+ return MutableBigInt::TruncateToNBits(N, x);
+ }
+ if (!x->sign()) {
+ return MutableBigInt::TruncateAndSubFromPowerOfTwo(N, x, true);
+ }
+ // Negative numbers must subtract from 2^n, except for the special case
+ // described above.
+ if ((top_digit & (compare_digit - 1)) == 0) {
+ for (int i = static_cast<int>(needed_length) - 2; i >= 0; i--) {
+ if (x->digit(i) != 0) {
+ return MutableBigInt::TruncateAndSubFromPowerOfTwo(N, x, false);
+ }
+ }
+ return MutableBigInt::TruncateToNBits(N, x);
+ }
+ return MutableBigInt::TruncateAndSubFromPowerOfTwo(N, x, false);
+}
+
+MaybeHandle<BigInt> BigInt::AsUintN(uint64_t n, Handle<BigInt> x) {
+ if (x->is_zero()) return x;
+ if (n == 0) return MutableBigInt::Zero(x->GetIsolate());
+ // If {x} is negative, simulate two's complement representation.
+ if (x->sign()) {
+ if (n > kMaxLengthBits) {
+ THROW_NEW_ERROR(x->GetIsolate(),
+ NewRangeError(MessageTemplate::kBigIntTooBig), BigInt);
+ }
+ return MutableBigInt::TruncateAndSubFromPowerOfTwo(static_cast<int>(n), x,
+ false);
+ }
+ // If {x} is positive and has up to {n} bits, return it directly.
+ if (n >= kMaxLengthBits) return x;
+ STATIC_ASSERT(kMaxLengthBits < kMaxInt - kDigitBits);
+ int needed_length = static_cast<int>((n + kDigitBits - 1) / kDigitBits);
+ if (x->length() < needed_length) return x;
+ int bits_in_top_digit = n % kDigitBits;
+ if (bits_in_top_digit == 0) {
+ if (x->length() == needed_length) return x;
+ } else {
+ digit_t top_digit = x->digit(needed_length - 1);
+ if ((top_digit >> bits_in_top_digit) == 0) return x;
+ }
+ // Otherwise, truncate.
+ DCHECK_LE(n, kMaxInt);
+ return MutableBigInt::TruncateToNBits(static_cast<int>(n), x);
+}
+
+Handle<BigInt> MutableBigInt::TruncateToNBits(int n, Handle<BigInt> x) {
+ // Only call this when there's something to do.
+ DCHECK_NE(n, 0);
+ DCHECK_GT(x->length(), n / kDigitBits);
+ Isolate* isolate = x->GetIsolate();
+
+ int needed_digits = (n + (kDigitBits - 1)) / kDigitBits;
+ DCHECK_LE(needed_digits, x->length());
+ Handle<MutableBigInt> result = New(isolate, needed_digits).ToHandleChecked();
+
+ // Copy all digits except the MSD.
+ int last = needed_digits - 1;
+ for (int i = 0; i < last; i++) {
+ result->set_digit(i, x->digit(i));
+ }
+
+ // The MSD might contain extra bits that we don't want.
+ digit_t msd = x->digit(last);
+ int drop = kDigitBits - (n % kDigitBits);
+ result->set_digit(last, (msd << drop) >> drop);
+ result->set_sign(x->sign());
+ return MakeImmutable(result);
+}
+
+// Subtracts the least significant n bits of abs(x) from 2^n.
+Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(int n,
+ Handle<BigInt> x,
+ bool result_sign) {
+ DCHECK_NE(n, 0);
+ DCHECK_LE(n, kMaxLengthBits);
+ Isolate* isolate = x->GetIsolate();
+
+ int needed_digits = (n + (kDigitBits - 1)) / kDigitBits;
+ DCHECK_LE(needed_digits, kMaxLength); // Follows from n <= kMaxLengthBits.
+ Handle<MutableBigInt> result = New(isolate, needed_digits).ToHandleChecked();
+
+ // Process all digits except the MSD.
+ int i = 0;
+ int last = needed_digits - 1;
+ int x_length = x->length();
+ digit_t borrow = 0;
+ // Take digits from {x} unless its length is exhausted.
+ int limit = Min(last, x_length);
+ for (; i < limit; i++) {
+ digit_t new_borrow = 0;
+ digit_t difference = digit_sub(0, x->digit(i), &new_borrow);
+ difference = digit_sub(difference, borrow, &new_borrow);
+ result->set_digit(i, difference);
+ borrow = new_borrow;
+ }
+ // Then simulate leading zeroes in {x} as needed.
+ for (; i < last; i++) {
+ digit_t new_borrow = 0;
+ digit_t difference = digit_sub(0, borrow, &new_borrow);
+ result->set_digit(i, difference);
+ borrow = new_borrow;
+ }
+
+ // The MSD might contain extra bits that we don't want.
+ digit_t msd = last < x_length ? x->digit(last) : 0;
+ int msd_bits_consumed = n % kDigitBits;
+ digit_t result_msd;
+ if (msd_bits_consumed == 0) {
+ digit_t new_borrow = 0;
+ result_msd = digit_sub(0, msd, &new_borrow);
+ result_msd = digit_sub(result_msd, borrow, &new_borrow);
+ } else {
+ int drop = kDigitBits - msd_bits_consumed;
+ msd = (msd << drop) >> drop;
+ digit_t minuend_msd = static_cast<digit_t>(1) << (kDigitBits - drop);
+ digit_t new_borrow = 0;
+ result_msd = digit_sub(minuend_msd, msd, &new_borrow);
+ result_msd = digit_sub(result_msd, borrow, &new_borrow);
+ DCHECK_EQ(new_borrow, 0); // result < 2^n.
+ // If all subtracted bits were zero, we have to get rid of the
+ // materialized minuend_msd again.
+ result_msd &= (minuend_msd - 1);
+ }
+ result->set_digit(last, result_msd);
+ result->set_sign(result_sign);
+ return MakeImmutable(result);
+}
+
// Digit arithmetic helpers.
#if V8_TARGET_ARCH_32_BIT
@@ -1179,7 +2024,8 @@ typedef __uint128_t twodigit_t;
// {carry} must point to an initialized digit_t and will either be incremented
// by one or left alone.
-inline BigInt::digit_t BigInt::digit_add(digit_t a, digit_t b, digit_t* carry) {
+inline BigInt::digit_t MutableBigInt::digit_add(digit_t a, digit_t b,
+ digit_t* carry) {
#if HAVE_TWODIGIT_T
twodigit_t result = static_cast<twodigit_t>(a) + static_cast<twodigit_t>(b);
*carry += result >> kDigitBits;
@@ -1193,8 +2039,8 @@ inline BigInt::digit_t BigInt::digit_add(digit_t a, digit_t b, digit_t* carry) {
// {borrow} must point to an initialized digit_t and will either be incremented
// by one or left alone.
-inline BigInt::digit_t BigInt::digit_sub(digit_t a, digit_t b,
- digit_t* borrow) {
+inline BigInt::digit_t MutableBigInt::digit_sub(digit_t a, digit_t b,
+ digit_t* borrow) {
#if HAVE_TWODIGIT_T
twodigit_t result = static_cast<twodigit_t>(a) - static_cast<twodigit_t>(b);
*borrow += (result >> kDigitBits) & 1;
@@ -1207,7 +2053,8 @@ inline BigInt::digit_t BigInt::digit_sub(digit_t a, digit_t b,
}
// Returns the low half of the result. High half is in {high}.
-inline BigInt::digit_t BigInt::digit_mul(digit_t a, digit_t b, digit_t* high) {
+inline BigInt::digit_t MutableBigInt::digit_mul(digit_t a, digit_t b,
+ digit_t* high) {
#if HAVE_TWODIGIT_T
twodigit_t result = static_cast<twodigit_t>(a) * static_cast<twodigit_t>(b);
*high = result >> kDigitBits;
@@ -1244,8 +2091,8 @@ inline BigInt::digit_t BigInt::digit_mul(digit_t a, digit_t b, digit_t* high) {
// Returns the quotient.
// quotient = (high << kDigitBits + low - remainder) / divisor
-BigInt::digit_t BigInt::digit_div(digit_t high, digit_t low, digit_t divisor,
- digit_t* remainder) {
+BigInt::digit_t MutableBigInt::digit_div(digit_t high, digit_t low,
+ digit_t divisor, digit_t* remainder) {
DCHECK(high < divisor);
#if V8_TARGET_ARCH_X64 && (__GNUC__ || __clang__)
digit_t quotient;
@@ -1311,7 +2158,7 @@ BigInt::digit_t BigInt::digit_div(digit_t high, digit_t low, digit_t divisor,
}
// Raises {base} to the power of {exponent}. Does not check for overflow.
-BigInt::digit_t BigInt::digit_pow(digit_t base, digit_t exponent) {
+BigInt::digit_t MutableBigInt::digit_pow(digit_t base, digit_t exponent) {
digit_t result = 1ull;
while (exponent > 0) {
if (exponent & 1) {
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 43a91b5133..de0daf495e 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -15,167 +15,166 @@
namespace v8 {
namespace internal {
+// BigIntBase is just the raw data object underlying a BigInt. Use with care!
+// Most code should be using BigInts instead.
+class BigIntBase : public HeapObject {
+ public:
+ inline int length() const {
+ intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ return LengthBits::decode(static_cast<uint32_t>(bitfield));
+ }
+
+ // The maximum length that the current implementation supports would be
+ // kMaxInt / kDigitBits. However, we use a lower limit for now, because
+ // raising it later is easier than lowering it.
+ // Support up to 1 million bits.
+ static const int kMaxLengthBits = 1024 * 1024;
+ static const int kMaxLength = kMaxLengthBits / (kPointerSize * kBitsPerByte);
+
+ private:
+ friend class BigInt;
+ friend class MutableBigInt;
+
+ typedef uintptr_t digit_t;
+ static const int kDigitSize = sizeof(digit_t);
+ // kMaxLength definition assumes this:
+ STATIC_ASSERT(kDigitSize == kPointerSize);
+
+ static const int kDigitBits = kDigitSize * kBitsPerByte;
+ static const int kHalfDigitBits = kDigitBits / 2;
+ static const digit_t kHalfDigitMask = (1ull << kHalfDigitBits) - 1;
+
+ static const int kBitfieldOffset = HeapObject::kHeaderSize;
+ static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
+ static const int kHeaderSize = kDigitsOffset;
+
+ static const int kLengthFieldBits = 20;
+ STATIC_ASSERT(kMaxLength <= ((1 << kLengthFieldBits) - 1));
+ class LengthBits : public BitField<int, 0, kLengthFieldBits> {};
+ class SignBits : public BitField<bool, LengthBits::kNext, 1> {};
+
+ // sign() == true means negative.
+ inline bool sign() const {
+ intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ return SignBits::decode(static_cast<uint32_t>(bitfield));
+ }
+
+ inline digit_t digit(int n) const {
+ SLOW_DCHECK(0 <= n && n < length());
+ const byte* address =
+ FIELD_ADDR_CONST(this, kDigitsOffset + n * kDigitSize);
+ return *reinterpret_cast<digit_t*>(reinterpret_cast<intptr_t>(address));
+ }
+
+ bool is_zero() const { return length() == 0; }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BigIntBase);
+};
+
+class FreshlyAllocatedBigInt : public BigIntBase {
+ // This class is essentially the publicly accessible abstract version of
+ // MutableBigInt (which is a hidden implementation detail). It serves as
+ // the return type of Factory::NewBigInt, and makes it possible to enforce
+ // casting restrictions:
+ // - FreshlyAllocatedBigInt can be cast explicitly to MutableBigInt
+ // (with MutableBigInt::Cast) for initialization.
+ // - MutableBigInt can be cast/converted explicitly to BigInt
+ // (with MutableBigInt::MakeImmutable); is afterwards treated as readonly.
+ // - No accidental implicit casting is possible from BigInt to MutableBigInt
+ // (and no explicit operator is provided either).
+
+ public:
+ inline static FreshlyAllocatedBigInt* cast(Object* object) {
+ SLOW_DCHECK(object->IsBigInt());
+ return reinterpret_cast<FreshlyAllocatedBigInt*>(object);
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreshlyAllocatedBigInt);
+};
+
// UNDER CONSTRUCTION!
// Arbitrary precision integers in JavaScript.
-class BigInt : public HeapObject {
+class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
public:
// Implementation of the Spec methods, see:
// https://tc39.github.io/proposal-bigint/#sec-numeric-types
// Sections 1.1.1 through 1.1.19.
static Handle<BigInt> UnaryMinus(Handle<BigInt> x);
- static Handle<BigInt> BitwiseNot(Handle<BigInt> x);
+ static MaybeHandle<BigInt> BitwiseNot(Handle<BigInt> x);
static MaybeHandle<BigInt> Exponentiate(Handle<BigInt> base,
Handle<BigInt> exponent);
- static Handle<BigInt> Multiply(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> Multiply(Handle<BigInt> x, Handle<BigInt> y);
static MaybeHandle<BigInt> Divide(Handle<BigInt> x, Handle<BigInt> y);
static MaybeHandle<BigInt> Remainder(Handle<BigInt> x, Handle<BigInt> y);
- static Handle<BigInt> Add(Handle<BigInt> x, Handle<BigInt> y);
- static Handle<BigInt> Subtract(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> Add(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> Subtract(Handle<BigInt> x, Handle<BigInt> y);
static MaybeHandle<BigInt> LeftShift(Handle<BigInt> x, Handle<BigInt> y);
static MaybeHandle<BigInt> SignedRightShift(Handle<BigInt> x,
Handle<BigInt> y);
static MaybeHandle<BigInt> UnsignedRightShift(Handle<BigInt> x,
Handle<BigInt> y);
- static bool LessThan(Handle<BigInt> x, Handle<BigInt> y);
- static bool Equal(BigInt* x, BigInt* y);
- static Handle<BigInt> BitwiseAnd(Handle<BigInt> x, Handle<BigInt> y);
- static Handle<BigInt> BitwiseXor(Handle<BigInt> x, Handle<BigInt> y);
- static Handle<BigInt> BitwiseOr(Handle<BigInt> x, Handle<BigInt> y);
+ // More convenient version of "bool LessThan(x, y)".
+ static ComparisonResult CompareToBigInt(Handle<BigInt> x, Handle<BigInt> y);
+ static bool EqualToBigInt(BigInt* x, BigInt* y);
+ static MaybeHandle<BigInt> BitwiseAnd(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> BitwiseXor(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> BitwiseOr(Handle<BigInt> x, Handle<BigInt> y);
// Other parts of the public interface.
+ static MaybeHandle<BigInt> Increment(Handle<BigInt> x);
+ static MaybeHandle<BigInt> Decrement(Handle<BigInt> x);
+
bool ToBoolean() { return !is_zero(); }
uint32_t Hash() {
// TODO(jkummerow): Improve this. At least use length and sign.
return is_zero() ? 0 : ComputeIntegerHash(static_cast<uint32_t>(digit(0)));
}
+ static bool EqualToString(Handle<BigInt> x, Handle<String> y);
+ static bool EqualToNumber(Handle<BigInt> x, Handle<Object> y);
+ static ComparisonResult CompareToNumber(Handle<BigInt> x, Handle<Object> y);
+ // Exposed for tests, do not call directly. Use CompareToNumber() instead.
+ static ComparisonResult CompareToDouble(Handle<BigInt> x, double y);
+
+ static Handle<BigInt> AsIntN(uint64_t n, Handle<BigInt> x);
+ static MaybeHandle<BigInt> AsUintN(uint64_t n, Handle<BigInt> x);
+
DECL_CAST(BigInt)
DECL_VERIFIER(BigInt)
DECL_PRINTER(BigInt)
void BigIntShortPrint(std::ostream& os);
- // TODO(jkummerow): Do we need {synchronized_length} for GC purposes?
- DECL_INT_ACCESSORS(length)
-
inline static int SizeFor(int length) {
return kHeaderSize + length * kDigitSize;
}
- void Initialize(int length, bool zero_initialize);
static MaybeHandle<String> ToString(Handle<BigInt> bigint, int radix = 10);
+ // "The Number value for x", see:
+ // https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type
+ // Returns a Smi or HeapNumber.
+ static Handle<Object> ToNumber(Handle<BigInt> x);
- // The maximum length that the current implementation supports would be
- // kMaxInt / kDigitBits. However, we use a lower limit for now, because
- // raising it later is easier than lowering it.
- static const int kMaxLengthBits = 20;
- static const int kMaxLength = (1 << kMaxLengthBits) - 1;
+ // ECMAScript's NumberToBigInt
+ static MaybeHandle<BigInt> FromNumber(Isolate* isolate,
+ Handle<Object> number);
+
+ // ECMAScript's ToBigInt (throws for Number input)
+ static MaybeHandle<BigInt> FromObject(Isolate* isolate, Handle<Object> obj);
class BodyDescriptor;
private:
- friend class Factory;
friend class BigIntParseIntHelper;
- typedef uintptr_t digit_t;
- static const int kDigitSize = sizeof(digit_t);
- static const int kDigitBits = kDigitSize * kBitsPerByte;
- static const int kHalfDigitBits = kDigitBits / 2;
- static const digit_t kHalfDigitMask = (1ull << kHalfDigitBits) - 1;
-
- // Private helpers for public methods.
- static Handle<BigInt> Copy(Handle<BigInt> source);
- static MaybeHandle<BigInt> AllocateFor(Isolate* isolate, int radix,
- int charcount);
- void RightTrim();
-
- static Handle<BigInt> AbsoluteAdd(Handle<BigInt> x, Handle<BigInt> y,
- bool result_sign);
- static Handle<BigInt> AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
- bool result_sign);
- static Handle<BigInt> AbsoluteAddOne(Handle<BigInt> x, bool sign,
- BigInt* result_storage);
- static Handle<BigInt> AbsoluteSubOne(Handle<BigInt> x, int result_length);
-
- enum ExtraDigitsHandling { kCopy, kSkip };
- static inline Handle<BigInt> AbsoluteBitwiseOp(
- Handle<BigInt> x, Handle<BigInt> y, BigInt* result_storage,
- ExtraDigitsHandling extra_digits,
- std::function<digit_t(digit_t, digit_t)> op);
- static Handle<BigInt> AbsoluteAnd(Handle<BigInt> x, Handle<BigInt> y,
- BigInt* result_storage = nullptr);
- static Handle<BigInt> AbsoluteAndNot(Handle<BigInt> x, Handle<BigInt> y,
- BigInt* result_storage = nullptr);
- static Handle<BigInt> AbsoluteOr(Handle<BigInt> x, Handle<BigInt> y,
- BigInt* result_storage = nullptr);
- static Handle<BigInt> AbsoluteXor(Handle<BigInt> x, Handle<BigInt> y,
- BigInt* result_storage = nullptr);
-
- static int AbsoluteCompare(Handle<BigInt> x, Handle<BigInt> y);
-
- static void MultiplyAccumulate(Handle<BigInt> multiplicand,
- digit_t multiplier, Handle<BigInt> accumulator,
- int accumulator_index);
- static void InternalMultiplyAdd(BigInt* source, digit_t factor,
- digit_t summand, int n, BigInt* result);
- void InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand);
-
- // Specialized helpers for Divide/Remainder.
- static void AbsoluteDivSmall(Handle<BigInt> x, digit_t divisor,
- Handle<BigInt>* quotient, digit_t* remainder);
- static void AbsoluteDivLarge(Handle<BigInt> dividend, Handle<BigInt> divisor,
- Handle<BigInt>* quotient,
- Handle<BigInt>* remainder);
- static bool ProductGreaterThan(digit_t factor1, digit_t factor2, digit_t high,
- digit_t low);
- digit_t InplaceAdd(BigInt* summand, int start_index);
- digit_t InplaceSub(BigInt* subtrahend, int start_index);
- void InplaceRightShift(int shift);
- enum SpecialLeftShiftMode {
- kSameSizeResult,
- kAlwaysAddOneDigit,
- };
- static Handle<BigInt> SpecialLeftShift(Handle<BigInt> x, int shift,
- SpecialLeftShiftMode mode);
-
- // Specialized helpers for shift operations.
- static MaybeHandle<BigInt> LeftShiftByAbsolute(Handle<BigInt> x,
- Handle<BigInt> y);
- static Handle<BigInt> RightShiftByAbsolute(Handle<BigInt> x,
- Handle<BigInt> y);
- static Handle<BigInt> RightShiftByMaximum(Isolate* isolate, bool sign);
- static Maybe<digit_t> ToShiftAmount(Handle<BigInt> x);
-
- static MaybeHandle<String> ToStringBasePowerOfTwo(Handle<BigInt> x,
- int radix);
- static MaybeHandle<String> ToStringGeneric(Handle<BigInt> x, int radix);
-
- // Digit arithmetic helpers.
- static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
- static inline digit_t digit_sub(digit_t a, digit_t b, digit_t* borrow);
- static inline digit_t digit_mul(digit_t a, digit_t b, digit_t* high);
- static inline digit_t digit_div(digit_t high, digit_t low, digit_t divisor,
- digit_t* remainder);
- static digit_t digit_pow(digit_t base, digit_t exponent);
- static inline bool digit_ismax(digit_t x) {
- return static_cast<digit_t>(~x) == 0;
- }
+ // Special functions for BigIntParseIntHelper:
+ static Handle<BigInt> Zero(Isolate* isolate);
+ static MaybeHandle<FreshlyAllocatedBigInt> AllocateFor(
+ Isolate* isolate, int radix, int charcount, ShouldThrow should_throw);
+ static void InplaceMultiplyAdd(Handle<FreshlyAllocatedBigInt> x,
+ uintptr_t factor, uintptr_t summand);
+ static Handle<BigInt> Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign);
- class LengthBits : public BitField<int, 0, kMaxLengthBits> {};
- class SignBits : public BitField<bool, LengthBits::kNext, 1> {};
-
- // Low-level accessors.
- // sign() == true means negative.
- DECL_BOOLEAN_ACCESSORS(sign)
- inline digit_t digit(int n) const;
- inline void set_digit(int n, digit_t value);
-
- bool is_zero() {
- DCHECK(length() > 0 || !sign()); // There is no -0n.
- return length() == 0;
- }
- static const int kBitfieldOffset = HeapObject::kHeaderSize;
- static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
- static const int kHeaderSize = kDigitsOffset;
DISALLOW_IMPLICIT_CONSTRUCTORS(BigInt);
};
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
new file mode 100644
index 0000000000..17cfa4f67b
--- /dev/null
+++ b/deps/v8/src/objects/code-inl.h
@@ -0,0 +1,714 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_CODE_INL_H_
+#define V8_OBJECTS_CODE_INL_H_
+
+#include "src/objects/code.h"
+
+#include "src/objects/dictionary.h"
+#include "src/v8memory.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
+TYPE_CHECKER(Code, CODE_TYPE)
+TYPE_CHECKER(CodeDataContainer, CODE_DATA_CONTAINER_TYPE)
+
+CAST_ACCESSOR(AbstractCode)
+CAST_ACCESSOR(BytecodeArray)
+CAST_ACCESSOR(Code)
+CAST_ACCESSOR(CodeDataContainer)
+CAST_ACCESSOR(DependentCode)
+CAST_ACCESSOR(DeoptimizationData)
+CAST_ACCESSOR(HandlerTable)
+
+int AbstractCode::instruction_size() {
+ if (IsCode()) {
+ return GetCode()->instruction_size();
+ } else {
+ return GetBytecodeArray()->length();
+ }
+}
+
+ByteArray* AbstractCode::source_position_table() {
+ if (IsCode()) {
+ return GetCode()->SourcePositionTable();
+ } else {
+ return GetBytecodeArray()->SourcePositionTable();
+ }
+}
+
+Object* AbstractCode::stack_frame_cache() {
+ Object* maybe_table;
+ if (IsCode()) {
+ maybe_table = GetCode()->source_position_table();
+ } else {
+ maybe_table = GetBytecodeArray()->source_position_table();
+ }
+ if (maybe_table->IsSourcePositionTableWithFrameCache()) {
+ return SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->stack_frame_cache();
+ }
+ return Smi::kZero;
+}
+
+int AbstractCode::SizeIncludingMetadata() {
+ if (IsCode()) {
+ return GetCode()->SizeIncludingMetadata();
+ } else {
+ return GetBytecodeArray()->SizeIncludingMetadata();
+ }
+}
+int AbstractCode::ExecutableSize() {
+ if (IsCode()) {
+ return GetCode()->ExecutableSize();
+ } else {
+ return GetBytecodeArray()->BytecodeArraySize();
+ }
+}
+
+Address AbstractCode::instruction_start() {
+ if (IsCode()) {
+ return GetCode()->instruction_start();
+ } else {
+ return GetBytecodeArray()->GetFirstBytecodeAddress();
+ }
+}
+
+Address AbstractCode::instruction_end() {
+ if (IsCode()) {
+ return GetCode()->instruction_end();
+ } else {
+ return GetBytecodeArray()->GetFirstBytecodeAddress() +
+ GetBytecodeArray()->length();
+ }
+}
+
+bool AbstractCode::contains(byte* inner_pointer) {
+ return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
+}
+
+AbstractCode::Kind AbstractCode::kind() {
+ if (IsCode()) {
+ return static_cast<AbstractCode::Kind>(GetCode()->kind());
+ } else {
+ return INTERPRETED_FUNCTION;
+ }
+}
+
+Code* AbstractCode::GetCode() { return Code::cast(this); }
+
+BytecodeArray* AbstractCode::GetBytecodeArray() {
+ return BytecodeArray::cast(this);
+}
+
+DependentCode* DependentCode::next_link() {
+ return DependentCode::cast(get(kNextLinkIndex));
+}
+
+void DependentCode::set_next_link(DependentCode* next) {
+ set(kNextLinkIndex, next);
+}
+
+int DependentCode::flags() { return Smi::ToInt(get(kFlagsIndex)); }
+
+void DependentCode::set_flags(int flags) {
+ set(kFlagsIndex, Smi::FromInt(flags));
+}
+
+int DependentCode::count() { return CountField::decode(flags()); }
+
+void DependentCode::set_count(int value) {
+ set_flags(CountField::update(flags(), value));
+}
+
+DependentCode::DependencyGroup DependentCode::group() {
+ return static_cast<DependencyGroup>(GroupField::decode(flags()));
+}
+
+void DependentCode::set_group(DependentCode::DependencyGroup group) {
+ set_flags(GroupField::update(flags(), static_cast<int>(group)));
+}
+
+void DependentCode::set_object_at(int i, Object* object) {
+ set(kCodesStartIndex + i, object);
+}
+
+Object* DependentCode::object_at(int i) { return get(kCodesStartIndex + i); }
+
+void DependentCode::clear_at(int i) { set_undefined(kCodesStartIndex + i); }
+
+void DependentCode::copy(int from, int to) {
+ set(kCodesStartIndex + to, get(kCodesStartIndex + from));
+}
+
+INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
+#define CODE_ACCESSORS(name, type, offset) \
+ ACCESSORS_CHECKED2(Code, name, type, offset, true, \
+ !GetHeap()->InNewSpace(value))
+CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
+CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
+CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
+CODE_ACCESSORS(protected_instructions, FixedArray, kProtectedInstructionsOffset)
+CODE_ACCESSORS(code_data_container, CodeDataContainer, kCodeDataContainerOffset)
+CODE_ACCESSORS(trap_handler_index, Smi, kTrapHandlerIndex)
+#undef CODE_ACCESSORS
+
+void Code::WipeOutHeader() {
+ WRITE_FIELD(this, kRelocationInfoOffset, nullptr);
+ WRITE_FIELD(this, kHandlerTableOffset, nullptr);
+ WRITE_FIELD(this, kDeoptimizationDataOffset, nullptr);
+ WRITE_FIELD(this, kSourcePositionTableOffset, nullptr);
+ WRITE_FIELD(this, kProtectedInstructionsOffset, nullptr);
+ WRITE_FIELD(this, kCodeDataContainerOffset, nullptr);
+}
+
+void Code::clear_padding() {
+ memset(address() + kHeaderPaddingStart, 0, kHeaderSize - kHeaderPaddingStart);
+ Address data_end =
+ has_unwinding_info() ? unwinding_info_end() : instruction_end();
+ memset(data_end, 0, CodeSize() - (data_end - address()));
+}
+
+ByteArray* Code::SourcePositionTable() const {
+ Object* maybe_table = source_position_table();
+ if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
+ DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ return SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->source_position_table();
+}
+
+uint32_t Code::stub_key() const {
+ DCHECK(is_stub());
+ return READ_UINT32_FIELD(this, kStubKeyOffset);
+}
+
+void Code::set_stub_key(uint32_t key) {
+ DCHECK(is_stub() || key == 0); // Allow zero initialization.
+ WRITE_UINT32_FIELD(this, kStubKeyOffset, key);
+}
+
+Object* Code::next_code_link() const {
+ return code_data_container()->next_code_link();
+}
+
+void Code::set_next_code_link(Object* value) {
+ code_data_container()->set_next_code_link(value);
+}
+
+byte* Code::instruction_start() const {
+ return const_cast<byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
+}
+
+byte* Code::instruction_end() const {
+ return instruction_start() + instruction_size();
+}
+
+int Code::GetUnwindingInfoSizeOffset() const {
+ DCHECK(has_unwinding_info());
+ return RoundUp(kHeaderSize + instruction_size(), kInt64Size);
+}
+
+int Code::unwinding_info_size() const {
+ DCHECK(has_unwinding_info());
+ return static_cast<int>(
+ READ_UINT64_FIELD(this, GetUnwindingInfoSizeOffset()));
+}
+
+void Code::set_unwinding_info_size(int value) {
+ DCHECK(has_unwinding_info());
+ WRITE_UINT64_FIELD(this, GetUnwindingInfoSizeOffset(), value);
+}
+
+byte* Code::unwinding_info_start() const {
+ DCHECK(has_unwinding_info());
+ return const_cast<byte*>(
+ FIELD_ADDR_CONST(this, GetUnwindingInfoSizeOffset())) +
+ kInt64Size;
+}
+
+byte* Code::unwinding_info_end() const {
+ DCHECK(has_unwinding_info());
+ return unwinding_info_start() + unwinding_info_size();
+}
+
+int Code::body_size() const {
+ int unpadded_body_size =
+ has_unwinding_info()
+ ? static_cast<int>(unwinding_info_end() - instruction_start())
+ : instruction_size();
+ return RoundUp(unpadded_body_size, kObjectAlignment);
+}
+
+int Code::SizeIncludingMetadata() const {
+ int size = CodeSize();
+ size += relocation_info()->Size();
+ size += deoptimization_data()->Size();
+ size += handler_table()->Size();
+ size += protected_instructions()->Size();
+ return size;
+}
+
+ByteArray* Code::unchecked_relocation_info() const {
+ return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
+}
+
+byte* Code::relocation_start() const {
+ return unchecked_relocation_info()->GetDataStartAddress();
+}
+
+int Code::relocation_size() const {
+ return unchecked_relocation_info()->length();
+}
+
+byte* Code::entry() const { return instruction_start(); }
+
+bool Code::contains(byte* inner_pointer) {
+ return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
+}
+
+int Code::ExecutableSize() const {
+ // Check that the assumptions about the layout of the code object holds.
+ DCHECK_EQ(static_cast<int>(instruction_start() - address()),
+ Code::kHeaderSize);
+ return instruction_size() + Code::kHeaderSize;
+}
+
+int Code::CodeSize() const { return SizeFor(body_size()); }
+
+Code::Kind Code::kind() const {
+ return KindField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+}
+
+void Code::initialize_flags(Kind kind, bool has_unwinding_info,
+ bool is_turbofanned, int stack_slots) {
+ CHECK_LE(stack_slots, StackSlotsField::kMax);
+ DCHECK_IMPLIES(stack_slots != 0, is_turbofanned);
+ static_assert(Code::NUMBER_OF_KINDS <= KindField::kMax + 1, "field overflow");
+ uint32_t flags = HasUnwindingInfoField::encode(has_unwinding_info) |
+ KindField::encode(kind) |
+ IsTurbofannedField::encode(is_turbofanned) |
+ StackSlotsField::encode(stack_slots);
+ WRITE_UINT32_FIELD(this, kFlagsOffset, flags);
+}
+
+inline bool Code::is_interpreter_trampoline_builtin() const {
+ Builtins* builtins = GetIsolate()->builtins();
+ bool is_interpreter_trampoline =
+ (this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
+ this == builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) ||
+ this == builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch));
+ DCHECK_IMPLIES(is_interpreter_trampoline, !Builtins::IsLazy(builtin_index()));
+ return is_interpreter_trampoline;
+}
+
+inline bool Code::checks_optimization_marker() const {
+ Builtins* builtins = GetIsolate()->builtins();
+ bool checks_marker =
+ (this == builtins->builtin(Builtins::kCompileLazy) ||
+ this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
+ this == builtins->builtin(Builtins::kCheckOptimizationMarker));
+ DCHECK_IMPLIES(checks_marker, !Builtins::IsLazy(builtin_index()));
+ return checks_marker ||
+ (kind() == OPTIMIZED_FUNCTION && marked_for_deoptimization());
+}
+
+inline bool Code::has_unwinding_info() const {
+ return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+}
+
+inline bool Code::has_tagged_params() const {
+ int flags = READ_UINT32_FIELD(this, kFlagsOffset);
+ return HasTaggedStackField::decode(flags);
+}
+
+inline void Code::set_has_tagged_params(bool value) {
+ int previous = READ_UINT32_FIELD(this, kFlagsOffset);
+ int updated = HasTaggedStackField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kFlagsOffset, updated);
+}
+
+inline bool Code::is_turbofanned() const {
+ return IsTurbofannedField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+}
+
+inline bool Code::can_have_weak_objects() const {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ int flags = code_data_container()->kind_specific_flags();
+ return CanHaveWeakObjectsField::decode(flags);
+}
+
+inline void Code::set_can_have_weak_objects(bool value) {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ int previous = code_data_container()->kind_specific_flags();
+ int updated = CanHaveWeakObjectsField::update(previous, value);
+ code_data_container()->set_kind_specific_flags(updated);
+}
+
+inline bool Code::is_construct_stub() const {
+ DCHECK(kind() == BUILTIN);
+ int flags = code_data_container()->kind_specific_flags();
+ return IsConstructStubField::decode(flags);
+}
+
+inline void Code::set_is_construct_stub(bool value) {
+ DCHECK(kind() == BUILTIN);
+ int previous = code_data_container()->kind_specific_flags();
+ int updated = IsConstructStubField::update(previous, value);
+ code_data_container()->set_kind_specific_flags(updated);
+}
+
+inline bool Code::is_promise_rejection() const {
+ DCHECK(kind() == BUILTIN);
+ int flags = code_data_container()->kind_specific_flags();
+ return IsPromiseRejectionField::decode(flags);
+}
+
+inline void Code::set_is_promise_rejection(bool value) {
+ DCHECK(kind() == BUILTIN);
+ int previous = code_data_container()->kind_specific_flags();
+ int updated = IsPromiseRejectionField::update(previous, value);
+ code_data_container()->set_kind_specific_flags(updated);
+}
+
+inline bool Code::is_exception_caught() const {
+ DCHECK(kind() == BUILTIN);
+ int flags = code_data_container()->kind_specific_flags();
+ return IsExceptionCaughtField::decode(flags);
+}
+
+inline void Code::set_is_exception_caught(bool value) {
+ DCHECK(kind() == BUILTIN);
+ int previous = code_data_container()->kind_specific_flags();
+ int updated = IsExceptionCaughtField::update(previous, value);
+ code_data_container()->set_kind_specific_flags(updated);
+}
+
+inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
+ if (is_promise_rejection()) return HandlerTable::PROMISE;
+ if (is_exception_caught()) return HandlerTable::CAUGHT;
+ return HandlerTable::UNCAUGHT;
+}
+
+int Code::builtin_index() const {
+ int index = READ_INT_FIELD(this, kBuiltinIndexOffset);
+ DCHECK(index == -1 || Builtins::IsBuiltinId(index));
+ return index;
+}
+
+void Code::set_builtin_index(int index) {
+ DCHECK(index == -1 || Builtins::IsBuiltinId(index));
+ WRITE_INT_FIELD(this, kBuiltinIndexOffset, index);
+}
+
+bool Code::is_builtin() const { return builtin_index() != -1; }
+
+unsigned Code::stack_slots() const {
+ DCHECK(is_turbofanned());
+ return StackSlotsField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+}
+
+unsigned Code::safepoint_table_offset() const {
+ DCHECK(is_turbofanned());
+ return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
+}
+
+void Code::set_safepoint_table_offset(unsigned offset) {
+ CHECK(offset <= std::numeric_limits<uint32_t>::max());
+ DCHECK(is_turbofanned() || offset == 0); // Allow zero initialization.
+ DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+ WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
+}
+
+bool Code::marked_for_deoptimization() const {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ int flags = code_data_container()->kind_specific_flags();
+ return MarkedForDeoptimizationField::decode(flags);
+}
+
+void Code::set_marked_for_deoptimization(bool flag) {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
+ int previous = code_data_container()->kind_specific_flags();
+ int updated = MarkedForDeoptimizationField::update(previous, flag);
+ code_data_container()->set_kind_specific_flags(updated);
+}
+
+bool Code::deopt_already_counted() const {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ int flags = code_data_container()->kind_specific_flags();
+ return DeoptAlreadyCountedField::decode(flags);
+}
+
+void Code::set_deopt_already_counted(bool flag) {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
+ int previous = code_data_container()->kind_specific_flags();
+ int updated = DeoptAlreadyCountedField::update(previous, flag);
+ code_data_container()->set_kind_specific_flags(updated);
+}
+
+bool Code::is_stub() const { return kind() == STUB; }
+bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
+bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
+
+Address Code::constant_pool() {
+ Address constant_pool = nullptr;
+ if (FLAG_enable_embedded_constant_pool) {
+ int offset = constant_pool_offset();
+ if (offset < instruction_size()) {
+ constant_pool = FIELD_ADDR(this, kHeaderSize + offset);
+ }
+ }
+ return constant_pool;
+}
+
+Code* Code::GetCodeFromTargetAddress(Address address) {
+ HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
+ // GetCodeFromTargetAddress might be called when marking objects during mark
+ // sweep. reinterpret_cast is therefore used instead of the more appropriate
+ // Code::cast. Code::cast does not work when the object's map is
+ // marked.
+ Code* result = reinterpret_cast<Code*>(code);
+ return result;
+}
+
+Object* Code::GetObjectFromCodeEntry(Address code_entry) {
+ return HeapObject::FromAddress(code_entry - Code::kHeaderSize);
+}
+
+Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
+ return GetObjectFromCodeEntry(Memory::Address_at(location_of_address));
+}
+
+bool Code::CanContainWeakObjects() {
+ return is_optimized_code() && can_have_weak_objects();
+}
+
+bool Code::IsWeakObject(Object* object) {
+ return (CanContainWeakObjects() && IsWeakObjectInOptimizedCode(object));
+}
+
+bool Code::IsWeakObjectInOptimizedCode(Object* object) {
+ if (object->IsMap()) {
+ return Map::cast(object)->CanTransition();
+ }
+ if (object->IsCell()) {
+ object = Cell::cast(object)->value();
+ } else if (object->IsPropertyCell()) {
+ object = PropertyCell::cast(object)->value();
+ }
+ if (object->IsJSReceiver() || object->IsContext()) {
+ return true;
+ }
+ return false;
+}
+
+INT_ACCESSORS(CodeDataContainer, kind_specific_flags, kKindSpecificFlagsOffset)
+ACCESSORS(CodeDataContainer, next_code_link, Object, kNextCodeLinkOffset)
+
+void CodeDataContainer::clear_padding() {
+ memset(address() + kUnalignedSize, 0, kSize - kUnalignedSize);
+}
+
+byte BytecodeArray::get(int index) {
+ DCHECK(index >= 0 && index < this->length());
+ return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+}
+
+void BytecodeArray::set(int index, byte value) {
+ DCHECK(index >= 0 && index < this->length());
+ WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
+}
+
+void BytecodeArray::set_frame_size(int frame_size) {
+ DCHECK_GE(frame_size, 0);
+ DCHECK(IsAligned(frame_size, static_cast<unsigned>(kPointerSize)));
+ WRITE_INT_FIELD(this, kFrameSizeOffset, frame_size);
+}
+
+int BytecodeArray::frame_size() const {
+ return READ_INT_FIELD(this, kFrameSizeOffset);
+}
+
+int BytecodeArray::register_count() const {
+ return frame_size() / kPointerSize;
+}
+
+void BytecodeArray::set_parameter_count(int number_of_parameters) {
+ DCHECK_GE(number_of_parameters, 0);
+ // Parameter count is stored as the size on stack of the parameters to allow
+ // it to be used directly by generated code.
+ WRITE_INT_FIELD(this, kParameterSizeOffset,
+ (number_of_parameters << kPointerSizeLog2));
+}
+
+interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
+ const {
+ int register_operand =
+ READ_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset);
+ if (register_operand == 0) {
+ return interpreter::Register::invalid_value();
+ } else {
+ return interpreter::Register::FromOperand(register_operand);
+ }
+}
+
+void BytecodeArray::set_incoming_new_target_or_generator_register(
+ interpreter::Register incoming_new_target_or_generator_register) {
+ if (!incoming_new_target_or_generator_register.is_valid()) {
+ WRITE_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset, 0);
+ } else {
+ DCHECK(incoming_new_target_or_generator_register.index() <
+ register_count());
+ DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
+ WRITE_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset,
+ incoming_new_target_or_generator_register.ToOperand());
+ }
+}
+
+int BytecodeArray::interrupt_budget() const {
+ return READ_INT_FIELD(this, kInterruptBudgetOffset);
+}
+
+void BytecodeArray::set_interrupt_budget(int interrupt_budget) {
+ DCHECK_GE(interrupt_budget, 0);
+ WRITE_INT_FIELD(this, kInterruptBudgetOffset, interrupt_budget);
+}
+
+int BytecodeArray::osr_loop_nesting_level() const {
+ return READ_INT8_FIELD(this, kOSRNestingLevelOffset);
+}
+
+void BytecodeArray::set_osr_loop_nesting_level(int depth) {
+ DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
+ STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
+ WRITE_INT8_FIELD(this, kOSRNestingLevelOffset, depth);
+}
+
+BytecodeArray::Age BytecodeArray::bytecode_age() const {
+ // Bytecode is aged by the concurrent marker.
+ return static_cast<Age>(RELAXED_READ_INT8_FIELD(this, kBytecodeAgeOffset));
+}
+
+void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
+ DCHECK_GE(age, kFirstBytecodeAge);
+ DCHECK_LE(age, kLastBytecodeAge);
+ STATIC_ASSERT(kLastBytecodeAge <= kMaxInt8);
+ // Bytecode is aged by the concurrent marker.
+ RELAXED_WRITE_INT8_FIELD(this, kBytecodeAgeOffset, static_cast<int8_t>(age));
+}
+
+int BytecodeArray::parameter_count() const {
+ // Parameter count is stored as the size on stack of the parameters to allow
+ // it to be used directly by generated code.
+ return READ_INT_FIELD(this, kParameterSizeOffset) >> kPointerSizeLog2;
+}
+
+ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
+ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
+ACCESSORS(BytecodeArray, source_position_table, Object,
+ kSourcePositionTableOffset)
+
+void BytecodeArray::clear_padding() {
+ int data_size = kHeaderSize + length();
+ memset(address() + data_size, 0, SizeFor(length()) - data_size);
+}
+
+Address BytecodeArray::GetFirstBytecodeAddress() {
+ return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
+}
+
+ByteArray* BytecodeArray::SourcePositionTable() {
+ Object* maybe_table = source_position_table();
+ if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
+ DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ return SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->source_position_table();
+}
+
+int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
+
+int BytecodeArray::SizeIncludingMetadata() {
+ int size = BytecodeArraySize();
+ size += constant_pool()->Size();
+ size += handler_table()->Size();
+ size += SourcePositionTable()->Size();
+ return size;
+}
+
+int HandlerTable::GetRangeStart(int index) const {
+ return Smi::ToInt(get(index * kRangeEntrySize + kRangeStartIndex));
+}
+
+int HandlerTable::GetRangeEnd(int index) const {
+ return Smi::ToInt(get(index * kRangeEntrySize + kRangeEndIndex));
+}
+
+int HandlerTable::GetRangeHandler(int index) const {
+ return HandlerOffsetField::decode(
+ Smi::ToInt(get(index * kRangeEntrySize + kRangeHandlerIndex)));
+}
+
+int HandlerTable::GetRangeData(int index) const {
+ return Smi::ToInt(get(index * kRangeEntrySize + kRangeDataIndex));
+}
+
+void HandlerTable::SetRangeStart(int index, int value) {
+ set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
+}
+
+void HandlerTable::SetRangeEnd(int index, int value) {
+ set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value));
+}
+
+void HandlerTable::SetRangeHandler(int index, int offset,
+ CatchPrediction prediction) {
+ int value = HandlerOffsetField::encode(offset) |
+ HandlerPredictionField::encode(prediction);
+ set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
+}
+
+void HandlerTable::SetRangeData(int index, int value) {
+ set(index * kRangeEntrySize + kRangeDataIndex, Smi::FromInt(value));
+}
+
+void HandlerTable::SetReturnOffset(int index, int value) {
+ set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
+}
+
+void HandlerTable::SetReturnHandler(int index, int offset) {
+ int value = HandlerOffsetField::encode(offset);
+ set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
+}
+
+int HandlerTable::NumberOfRangeEntries() const {
+ return length() / kRangeEntrySize;
+}
+
+BailoutId DeoptimizationData::BytecodeOffset(int i) {
+ return BailoutId(BytecodeOffsetRaw(i)->value());
+}
+
+void DeoptimizationData::SetBytecodeOffset(int i, BailoutId value) {
+ SetBytecodeOffsetRaw(i, Smi::FromInt(value.ToInt()));
+}
+
+int DeoptimizationData::DeoptCount() {
+ return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_CODE_INL_H_
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
new file mode 100644
index 0000000000..bb447ce2dd
--- /dev/null
+++ b/deps/v8/src/objects/code.h
@@ -0,0 +1,947 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_CODE_H_
+#define V8_OBJECTS_CODE_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class ByteArray;
+class BytecodeArray;
+class CodeDataContainer;
+
+// HandlerTable is a fixed array containing entries for exception handlers in
+// the code object it is associated with. The tables comes in two flavors:
+// 1) Based on ranges: Used for unoptimized code. Contains one entry per
+// exception handler and a range representing the try-block covered by that
+// handler. Layout looks as follows:
+// [ range-start , range-end , handler-offset , handler-data ]
+// 2) Based on return addresses: Used for turbofanned code. Contains one entry
+// per call-site that could throw an exception. Layout looks as follows:
+// [ return-address-offset , handler-offset ]
+class HandlerTable : public FixedArray {
+ public:
+ // Conservative prediction whether a given handler will locally catch an
+ // exception or cause a re-throw to outside the code boundary. Since this is
+ // undecidable it is merely an approximation (e.g. useful for debugger).
+ enum CatchPrediction {
+ UNCAUGHT, // The handler will (likely) rethrow the exception.
+ CAUGHT, // The exception will be caught by the handler.
+ PROMISE, // The exception will be caught and cause a promise rejection.
+ DESUGARING, // The exception will be caught, but both the exception and the
+ // catching are part of a desugaring and should therefore not
+ // be visible to the user (we won't notify the debugger of such
+ // exceptions).
+ ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
+ // in the desugaring of an async function, so special
+ // async/await handling in the debugger can take place.
+ };
+
+ // Getters for handler table based on ranges.
+ inline int GetRangeStart(int index) const;
+ inline int GetRangeEnd(int index) const;
+ inline int GetRangeHandler(int index) const;
+ inline int GetRangeData(int index) const;
+
+ // Setters for handler table based on ranges.
+ inline void SetRangeStart(int index, int value);
+ inline void SetRangeEnd(int index, int value);
+ inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
+ inline void SetRangeData(int index, int value);
+
+ // Setters for handler table based on return addresses.
+ inline void SetReturnOffset(int index, int value);
+ inline void SetReturnHandler(int index, int offset);
+
+ // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
+ // the start of the potentially throwing instruction (using return addresses
+ // for this value would be invalid).
+ int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
+
+ // Lookup handler in a table based on return addresses.
+ int LookupReturn(int pc_offset);
+
+ // Returns the number of entries in the table.
+ inline int NumberOfRangeEntries() const;
+
+ // Returns the required length of the underlying fixed array.
+ static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
+ static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
+
+ // Returns an empty handler table.
+ static Handle<HandlerTable> Empty(Isolate* isolate);
+
+ DECL_CAST(HandlerTable)
+
+#ifdef ENABLE_DISASSEMBLER
+ void HandlerTableRangePrint(std::ostream& os); // NOLINT
+ void HandlerTableReturnPrint(std::ostream& os); // NOLINT
+#endif
+
+ private:
+ // Layout description for handler table based on ranges.
+ static const int kRangeStartIndex = 0;
+ static const int kRangeEndIndex = 1;
+ static const int kRangeHandlerIndex = 2;
+ static const int kRangeDataIndex = 3;
+ static const int kRangeEntrySize = 4;
+
+ // Layout description for handler table based on return addresses.
+ static const int kReturnOffsetIndex = 0;
+ static const int kReturnHandlerIndex = 1;
+ static const int kReturnEntrySize = 2;
+
+ // Encoding of the {handler} field.
+ class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
+ class HandlerOffsetField : public BitField<int, 3, 29> {};
+};
+
+// Code describes objects with on-the-fly generated machine code.
+class Code : public HeapObject {
+ public:
+ // Opaque data type for encapsulating code flags like kind, inline
+ // cache state, and arguments count.
+ typedef uint32_t Flags;
+
+#define CODE_KIND_LIST(V) \
+ V(OPTIMIZED_FUNCTION) \
+ V(BYTECODE_HANDLER) \
+ V(STUB) \
+ V(BUILTIN) \
+ V(REGEXP) \
+ V(WASM_FUNCTION) \
+ V(WASM_TO_JS_FUNCTION) \
+ V(WASM_TO_WASM_FUNCTION) \
+ V(JS_TO_WASM_FUNCTION) \
+ V(WASM_INTERPRETER_ENTRY) \
+ V(C_WASM_ENTRY)
+
+ enum Kind {
+#define DEFINE_CODE_KIND_ENUM(name) name,
+ CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
+#undef DEFINE_CODE_KIND_ENUM
+ NUMBER_OF_KINDS
+ };
+
+ static const char* Kind2String(Kind kind);
+
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+ // Printing
+ static const char* ICState2String(InlineCacheState state);
+#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+
+#ifdef ENABLE_DISASSEMBLER
+ void Disassemble(const char* name, std::ostream& os); // NOLINT
+#endif
+
+ // [instruction_size]: Size of the native instructions
+ inline int instruction_size() const;
+ inline void set_instruction_size(int value);
+
+ // [relocation_info]: Code relocation information
+ DECL_ACCESSORS(relocation_info, ByteArray)
+ void InvalidateEmbeddedObjects();
+
+ // [handler_table]: Fixed array containing offsets of exception handlers.
+ DECL_ACCESSORS(handler_table, FixedArray)
+
+ // [deoptimization_data]: Array containing data for deopt.
+ DECL_ACCESSORS(deoptimization_data, FixedArray)
+
+ // [source_position_table]: ByteArray for the source positions table or
+ // SourcePositionTableWithFrameCache.
+ DECL_ACCESSORS(source_position_table, Object)
+ inline ByteArray* SourcePositionTable() const;
+
+ // TODO(mtrofin): remove when we don't need FLAG_wasm_jit_to_native
+ // [protected instructions]: Array containing list of protected
+ // instructions and corresponding landing pad offset.
+ DECL_ACCESSORS(protected_instructions, FixedArray)
+
+ // [code_data_container]: A container indirection for all mutable fields.
+ DECL_ACCESSORS(code_data_container, CodeDataContainer)
+
+ // [trap_handler_index]: An index into the trap handler's master list of code
+ // objects.
+ DECL_ACCESSORS(trap_handler_index, Smi)
+
+ // [stub_key]: The major/minor key of a code stub.
+ inline uint32_t stub_key() const;
+ inline void set_stub_key(uint32_t key);
+
+ // [next_code_link]: Link for lists of optimized or deoptimized code.
+ // Note that this field is stored in the {CodeDataContainer} to be mutable.
+ inline Object* next_code_link() const;
+ inline void set_next_code_link(Object* value);
+
+ // [constant_pool offset]: Offset of the constant pool.
+ // Valid for FLAG_enable_embedded_constant_pool only
+ inline int constant_pool_offset() const;
+ inline void set_constant_pool_offset(int offset);
+
+ // Unchecked accessors to be used during GC.
+ inline ByteArray* unchecked_relocation_info() const;
+
+ inline int relocation_size() const;
+
+ // [kind]: Access to specific code kind.
+ inline Kind kind() const;
+
+ inline bool is_stub() const;
+ inline bool is_optimized_code() const;
+ inline bool is_wasm_code() const;
+
+ // Testers for interpreter builtins.
+ inline bool is_interpreter_trampoline_builtin() const;
+
+ // Tells whether the code checks the optimization marker in the function's
+ // feedback vector.
+ inline bool checks_optimization_marker() const;
+
+ // [has_tagged_params]: For compiled code or builtins: Tells whether the
+ // outgoing parameters of this code are tagged pointers. True for other kinds.
+ inline bool has_tagged_params() const;
+ inline void set_has_tagged_params(bool value);
+
+ // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
+ // code object was generated by the TurboFan optimizing compiler.
+ inline bool is_turbofanned() const;
+
+ // [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the
+ // embedded objects in code should be treated weakly.
+ inline bool can_have_weak_objects() const;
+ inline void set_can_have_weak_objects(bool value);
+
+ // [is_construct_stub]: For kind BUILTIN, tells whether the code object
+ // represents a hand-written construct stub
+ // (e.g., NumberConstructor_ConstructStub).
+ inline bool is_construct_stub() const;
+ inline void set_is_construct_stub(bool value);
+
+ // [builtin_index]: For builtins, tells which builtin index the code object
+ // has. The builtin index is a non-negative integer for builtins, and -1
+ // otherwise.
+ inline int builtin_index() const;
+ inline void set_builtin_index(int id);
+ inline bool is_builtin() const;
+
+ // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
+ // reserved in the code prologue.
+ inline unsigned stack_slots() const;
+
+ // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in
+ // the instruction stream where the safepoint table starts.
+ inline unsigned safepoint_table_offset() const;
+ inline void set_safepoint_table_offset(unsigned offset);
+
+ // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
+ // the code is going to be deoptimized because of dead embedded maps.
+ inline bool marked_for_deoptimization() const;
+ inline void set_marked_for_deoptimization(bool flag);
+
+ // [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether
+ // the code was already deoptimized.
+ inline bool deopt_already_counted() const;
+ inline void set_deopt_already_counted(bool flag);
+
+ // [is_promise_rejection]: For kind BUILTIN tells whether the
+ // exception thrown by the code will lead to promise rejection or
+ // uncaught if both this and is_exception_caught is set.
+ // Use GetBuiltinCatchPrediction to access this.
+ inline void set_is_promise_rejection(bool flag);
+
+ // [is_exception_caught]: For kind BUILTIN tells whether the
+ // exception thrown by the code will be caught internally or
+ // uncaught if both this and is_promise_rejection is set.
+ // Use GetBuiltinCatchPrediction to access this.
+ inline void set_is_exception_caught(bool flag);
+
+ // [constant_pool]: The constant pool for this function.
+ inline Address constant_pool();
+
+ // Get the safepoint entry for the given pc.
+ SafepointEntry GetSafepointEntry(Address pc);
+
+ // The entire code object including its header is copied verbatim to the
+ // snapshot so that it can be written in one, fast, memcpy during
+ // deserialization. The deserializer will overwrite some pointers, rather
+ // like a runtime linker, but the random allocation addresses used in the
+ // mksnapshot process would still be present in the unlinked snapshot data,
+ // which would make snapshot production non-reproducible. This method wipes
+ // out the to-be-overwritten header data for reproducible snapshots.
+ inline void WipeOutHeader();
+
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic.
+ inline void clear_padding();
+ // Initialize the flags field. Similar to clear_padding above this ensure that
+ // the snapshot content is deterministic.
+ inline void initialize_flags(Kind kind, bool has_unwinding_info,
+ bool is_turbofanned, int stack_slots);
+
+ // Convert a target address into a code object.
+ static inline Code* GetCodeFromTargetAddress(Address address);
+
+ // Convert an entry address into an object.
+ static inline Object* GetObjectFromEntryAddress(Address location_of_address);
+
+ // Convert a code entry into an object.
+ static inline Object* GetObjectFromCodeEntry(Address code_entry);
+
+ // Returns the address of the first instruction.
+ inline byte* instruction_start() const;
+
+ // Returns the address right after the last instruction.
+ inline byte* instruction_end() const;
+
+ // Returns the size of the instructions, padding, relocation and unwinding
+ // information.
+ inline int body_size() const;
+
+ // Returns the size of code and its metadata. This includes the size of code
+ // relocation information, deoptimization data and handler table.
+ inline int SizeIncludingMetadata() const;
+
+ // Returns the address of the first relocation info (read backwards!).
+ inline byte* relocation_start() const;
+
+ // [has_unwinding_info]: Whether this code object has unwinding information.
+ // If it doesn't, unwinding_information_start() will point to invalid data.
+ //
+ // The body of all code objects has the following layout.
+ //
+ // +--------------------------+ <-- instruction_start()
+ // | instructions |
+ // | ... |
+ // +--------------------------+
+ // | relocation info |
+ // | ... |
+ // +--------------------------+ <-- instruction_end()
+ //
+ // If has_unwinding_info() is false, instruction_end() points to the first
+ // memory location after the end of the code object. Otherwise, the body
+ // continues as follows:
+ //
+ // +--------------------------+
+ // | padding to the next |
+ // | 8-byte aligned address |
+ // +--------------------------+ <-- instruction_end()
+ // | [unwinding_info_size] |
+ // | as uint64_t |
+ // +--------------------------+ <-- unwinding_info_start()
+ // | unwinding info |
+ // | ... |
+ // +--------------------------+ <-- unwinding_info_end()
+ //
+ // and unwinding_info_end() points to the first memory location after the end
+ // of the code object.
+ //
+ inline bool has_unwinding_info() const;
+
+ // [unwinding_info_size]: Size of the unwinding information.
+ inline int unwinding_info_size() const;
+ inline void set_unwinding_info_size(int value);
+
+ // Returns the address of the unwinding information, if any.
+ inline byte* unwinding_info_start() const;
+
+ // Returns the address right after the end of the unwinding information.
+ inline byte* unwinding_info_end() const;
+
+ // Code entry point.
+ inline byte* entry() const;
+
+ // Returns true if pc is inside this object's instructions.
+ inline bool contains(byte* pc);
+
+ // Relocate the code by delta bytes. Called to signal that this code
+ // object has been moved by delta bytes.
+ void Relocate(intptr_t delta);
+
+ // Migrate code described by desc.
+ void CopyFrom(const CodeDesc& desc);
+
+ // Returns the object size for a given body (used for allocation).
+ static int SizeFor(int body_size) {
+ DCHECK_SIZE_TAG_ALIGNED(body_size);
+ return RoundUp(kHeaderSize + body_size, kCodeAlignment);
+ }
+
+ // Calculate the size of the code object to report for log events. This takes
+ // the layout of the code object into account.
+ inline int ExecutableSize() const;
+
+ DECL_CAST(Code)
+
+ // Dispatched behavior.
+ inline int CodeSize() const;
+
+ DECL_PRINTER(Code)
+ DECL_VERIFIER(Code)
+
+ void PrintDeoptLocation(FILE* out, Address pc);
+ bool CanDeoptAt(Address pc);
+
+ inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
+#ifdef VERIFY_HEAP
+ void VerifyEmbeddedObjectsDependency();
+#endif
+
+#ifdef DEBUG
+ enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
+ void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
+#endif // DEBUG
+
+ inline bool CanContainWeakObjects();
+
+ inline bool IsWeakObject(Object* object);
+
+ static inline bool IsWeakObjectInOptimizedCode(Object* object);
+
+ static Handle<WeakCell> WeakCellFor(Handle<Code> code);
+ WeakCell* CachedWeakCell();
+
+ // Return true if the function is inlined in the code.
+ bool Inlines(SharedFunctionInfo* sfi);
+
+ class OptimizedCodeIterator {
+ public:
+ explicit OptimizedCodeIterator(Isolate* isolate);
+ Code* Next();
+
+ private:
+ Context* next_context_;
+ Code* current_code_;
+ Isolate* isolate_;
+
+ DisallowHeapAllocation no_gc;
+ DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator)
+ };
+
+ static const int kConstantPoolSize =
+ FLAG_enable_embedded_constant_pool ? kIntSize : 0;
+
+ // Layout description.
+ static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
+ static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
+ static const int kDeoptimizationDataOffset =
+ kHandlerTableOffset + kPointerSize;
+ static const int kSourcePositionTableOffset =
+ kDeoptimizationDataOffset + kPointerSize;
+ static const int kProtectedInstructionsOffset =
+ kSourcePositionTableOffset + kPointerSize;
+ static const int kCodeDataContainerOffset =
+ kProtectedInstructionsOffset + kPointerSize;
+ static const int kInstructionSizeOffset =
+ kCodeDataContainerOffset + kPointerSize;
+ static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
+ static const int kSafepointTableOffsetOffset = kFlagsOffset + kIntSize;
+ static const int kStubKeyOffset = kSafepointTableOffsetOffset + kIntSize;
+ static const int kConstantPoolOffset = kStubKeyOffset + kIntSize;
+ static const int kBuiltinIndexOffset =
+ kConstantPoolOffset + kConstantPoolSize;
+ static const int kTrapHandlerIndex = kBuiltinIndexOffset + kIntSize;
+ static const int kHeaderPaddingStart = kTrapHandlerIndex + kPointerSize;
+
+ // Add padding to align the instruction start following right after
+ // the Code object header.
+ static const int kHeaderSize =
+ (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
+
+ // Data or code not directly visited by GC directly starts here.
+ // The serializer needs to copy bytes starting from here verbatim.
+ // Objects embedded into code is visited via reloc info.
+ static const int kDataStart = kInstructionSizeOffset;
+
+ enum TrapFields { kTrapCodeOffset, kTrapLandingOffset, kTrapDataSize };
+
+ inline int GetUnwindingInfoSizeOffset() const;
+
+ class BodyDescriptor;
+
+ // Flags layout. BitField<type, shift, size>.
+#define CODE_FLAGS_BIT_FIELDS(V, _) \
+ V(HasUnwindingInfoField, bool, 1, _) \
+ V(KindField, Kind, 5, _) \
+ V(HasTaggedStackField, bool, 1, _) \
+ V(IsTurbofannedField, bool, 1, _) \
+ V(StackSlotsField, int, 24, _)
+ DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
+#undef CODE_FLAGS_BIT_FIELDS
+ static_assert(NUMBER_OF_KINDS <= KindField::kMax, "Code::KindField size");
+ static_assert(StackSlotsField::kNext <= 32, "Code::flags field exhausted");
+
+ // KindSpecificFlags layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
+#define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
+ V(MarkedForDeoptimizationField, bool, 1, _) \
+ V(DeoptAlreadyCountedField, bool, 1, _) \
+ V(CanHaveWeakObjectsField, bool, 1, _) \
+ V(IsConstructStubField, bool, 1, _) \
+ V(IsPromiseRejectionField, bool, 1, _) \
+ V(IsExceptionCaughtField, bool, 1, _)
+ DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
+#undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
+ static_assert(IsExceptionCaughtField::kNext <= 32, "KindSpecificFlags full");
+
+ // The {marked_for_deoptimization} field is accessed from generated code.
+ static const int kMarkedForDeoptimizationBit =
+ MarkedForDeoptimizationField::kShift;
+
+ static const int kArgumentsBits = 16;
+ static const int kMaxArguments = (1 << kArgumentsBits) - 1;
+
+ private:
+ friend class RelocIterator;
+
+ bool is_promise_rejection() const;
+ bool is_exception_caught() const;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
+};
+
+// CodeDataContainer is a container for all mutable fields associated with its
+// referencing {Code} object. Since {Code} objects reside on write-protected
+// pages within the heap, its header fields need to be immutable. There always
+// is a 1-to-1 relation between {Code} and {CodeDataContainer}, the referencing
+// field {Code::code_data_container} itself is immutable.
+class CodeDataContainer : public HeapObject {
+ public:
+ DECL_ACCESSORS(next_code_link, Object)
+ DECL_INT_ACCESSORS(kind_specific_flags)
+
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic.
+ inline void clear_padding();
+
+ DECL_CAST(CodeDataContainer)
+
+ // Dispatched behavior.
+ DECL_PRINTER(CodeDataContainer)
+ DECL_VERIFIER(CodeDataContainer)
+
+ static const int kNextCodeLinkOffset = HeapObject::kHeaderSize;
+ static const int kKindSpecificFlagsOffset =
+ kNextCodeLinkOffset + kPointerSize;
+ static const int kUnalignedSize = kKindSpecificFlagsOffset + kIntSize;
+ static const int kSize = OBJECT_POINTER_ALIGN(kUnalignedSize);
+
+ // During mark compact we need to take special care for weak fields.
+ static const int kPointerFieldsStrongEndOffset = kNextCodeLinkOffset;
+ static const int kPointerFieldsWeakEndOffset = kKindSpecificFlagsOffset;
+
+ // Ignores weakness.
+ typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
+ kPointerFieldsWeakEndOffset, kSize>
+ BodyDescriptor;
+
+ // Respects weakness.
+ typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
+ kPointerFieldsStrongEndOffset, kSize>
+ BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CodeDataContainer);
+};
+
+class AbstractCode : public HeapObject {
+ public:
+ // All code kinds and INTERPRETED_FUNCTION.
+ enum Kind {
+#define DEFINE_CODE_KIND_ENUM(name) name,
+ CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
+#undef DEFINE_CODE_KIND_ENUM
+ INTERPRETED_FUNCTION,
+ NUMBER_OF_KINDS
+ };
+
+ static const char* Kind2String(Kind kind);
+
+ int SourcePosition(int offset);
+ int SourceStatementPosition(int offset);
+
+ // Returns the address of the first instruction.
+ inline Address instruction_start();
+
+ // Returns the address right after the last instruction.
+ inline Address instruction_end();
+
+ // Returns the size of the code instructions.
+ inline int instruction_size();
+
+ // Return the source position table.
+ inline ByteArray* source_position_table();
+
+ inline Object* stack_frame_cache();
+ static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
+ Handle<NumberDictionary> cache);
+ void DropStackFrameCache();
+
+ // Returns the size of instructions and the metadata.
+ inline int SizeIncludingMetadata();
+
+ // Returns true if pc is inside this object's instructions.
+ inline bool contains(byte* pc);
+
+ // Returns the AbstractCode::Kind of the code.
+ inline Kind kind();
+
+ // Calculate the size of the code object to report for log events. This takes
+ // the layout of the code object into account.
+ inline int ExecutableSize();
+
+ DECL_CAST(AbstractCode)
+ inline Code* GetCode();
+ inline BytecodeArray* GetBytecodeArray();
+
+ // Max loop nesting marker used to postpose OSR. We don't take loop
+ // nesting that is deeper than 5 levels into account.
+ static const int kMaxLoopNestingMarker = 6;
+};
+
+// Dependent code is a singly linked list of fixed arrays. Each array contains
+// code objects in weak cells for one dependent group. The suffix of the array
+// can be filled with the undefined value if the number of codes is less than
+// the length of the array.
+//
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// |
+// V
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... |
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// |
+// V
+// empty_fixed_array()
+//
+// The list of fixed arrays is ordered by dependency groups.
+
+class DependentCode : public FixedArray {
+ public:
+ enum DependencyGroup {
+ // Group of code that weakly embed this map and depend on being
+ // deoptimized when the map is garbage collected.
+ kWeakCodeGroup,
+ // Group of code that embed a transition to this map, and depend on being
+ // deoptimized when the transition is replaced by a new version.
+ kTransitionGroup,
+ // Group of code that omit run-time prototype checks for prototypes
+ // described by this map. The group is deoptimized whenever an object
+ // described by this map changes shape (and transitions to a new map),
+ // possibly invalidating the assumptions embedded in the code.
+ kPrototypeCheckGroup,
+ // Group of code that depends on global property values in property cells
+ // not being changed.
+ kPropertyCellChangedGroup,
+ // Group of code that omit run-time checks for field(s) introduced by
+ // this map, i.e. for the field type.
+ kFieldOwnerGroup,
+ // Group of code that omit run-time type checks for initial maps of
+ // constructors.
+ kInitialMapChangedGroup,
+ // Group of code that depends on tenuring information in AllocationSites
+ // not being changed.
+ kAllocationSiteTenuringChangedGroup,
+ // Group of code that depends on element transition information in
+ // AllocationSites not being changed.
+ kAllocationSiteTransitionChangedGroup
+ };
+
+ static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
+ static const int kNextLinkIndex = 0;
+ static const int kFlagsIndex = 1;
+ static const int kCodesStartIndex = 2;
+
+ bool Contains(DependencyGroup group, WeakCell* code_cell);
+ bool IsEmpty(DependencyGroup group);
+
+ static Handle<DependentCode> InsertCompilationDependencies(
+ Handle<DependentCode> entries, DependencyGroup group,
+ Handle<Foreign> info);
+
+ static Handle<DependentCode> InsertWeakCode(Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<WeakCell> code_cell);
+
+ void UpdateToFinishedCode(DependencyGroup group, Foreign* info,
+ WeakCell* code_cell);
+
+ void RemoveCompilationDependencies(DependentCode::DependencyGroup group,
+ Foreign* info);
+
+ void DeoptimizeDependentCodeGroup(Isolate* isolate,
+ DependentCode::DependencyGroup group);
+
+ bool MarkCodeForDeoptimization(Isolate* isolate,
+ DependentCode::DependencyGroup group);
+
+ // The following low-level accessors should only be used by this class
+ // and the mark compact collector.
+ inline DependentCode* next_link();
+ inline void set_next_link(DependentCode* next);
+ inline int count();
+ inline void set_count(int value);
+ inline DependencyGroup group();
+ inline void set_group(DependencyGroup group);
+ inline Object* object_at(int i);
+ inline void set_object_at(int i, Object* object);
+ inline void clear_at(int i);
+ inline void copy(int from, int to);
+ DECL_CAST(DependentCode)
+
+ static const char* DependencyGroupName(DependencyGroup group);
+ static void SetMarkedForDeoptimization(Code* code, DependencyGroup group);
+
+ private:
+ static Handle<DependentCode> Insert(Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<Object> object);
+ static Handle<DependentCode> New(DependencyGroup group, Handle<Object> object,
+ Handle<DependentCode> next);
+ static Handle<DependentCode> EnsureSpace(Handle<DependentCode> entries);
+ // Compact by removing cleared weak cells and return true if there was
+ // any cleared weak cell.
+ bool Compact();
+ static int Grow(int number_of_entries) {
+ if (number_of_entries < 5) return number_of_entries + 1;
+ return number_of_entries * 5 / 4;
+ }
+ inline int flags();
+ inline void set_flags(int flags);
+ class GroupField : public BitField<int, 0, 3> {};
+ class CountField : public BitField<int, 3, 27> {};
+ STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
+};
+
+// BytecodeArray represents a sequence of interpreter bytecodes.
+class BytecodeArray : public FixedArrayBase {
+ public:
+ enum Age {
+ kNoAgeBytecodeAge = 0,
+ kQuadragenarianBytecodeAge,
+ kQuinquagenarianBytecodeAge,
+ kSexagenarianBytecodeAge,
+ kSeptuagenarianBytecodeAge,
+ kOctogenarianBytecodeAge,
+ kAfterLastBytecodeAge,
+ kFirstBytecodeAge = kNoAgeBytecodeAge,
+ kLastBytecodeAge = kAfterLastBytecodeAge - 1,
+ kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
+ kIsOldBytecodeAge = kSexagenarianBytecodeAge
+ };
+
+ static int SizeFor(int length) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length);
+ }
+
+ // Setter and getter
+ inline byte get(int index);
+ inline void set(int index, byte value);
+
+ // Returns data start address.
+ inline Address GetFirstBytecodeAddress();
+
+ // Accessors for frame size.
+ inline int frame_size() const;
+ inline void set_frame_size(int frame_size);
+
+ // Accessor for register count (derived from frame_size).
+ inline int register_count() const;
+
+ // Accessors for parameter count (including implicit 'this' receiver).
+ inline int parameter_count() const;
+ inline void set_parameter_count(int number_of_parameters);
+
+ // Register used to pass the incoming new.target or generator object from the
+ // fucntion call.
+ inline interpreter::Register incoming_new_target_or_generator_register()
+ const;
+ inline void set_incoming_new_target_or_generator_register(
+ interpreter::Register incoming_new_target_or_generator_register);
+
+ // Accessors for profiling count.
+ inline int interrupt_budget() const;
+ inline void set_interrupt_budget(int interrupt_budget);
+
+ // Accessors for OSR loop nesting level.
+ inline int osr_loop_nesting_level() const;
+ inline void set_osr_loop_nesting_level(int depth);
+
+ // Accessors for bytecode's code age.
+ inline Age bytecode_age() const;
+ inline void set_bytecode_age(Age age);
+
+ // Accessors for the constant pool.
+ DECL_ACCESSORS(constant_pool, FixedArray)
+
+ // Accessors for handler table containing offsets of exception handlers.
+ DECL_ACCESSORS(handler_table, FixedArray)
+
+ // Accessors for source position table containing mappings between byte code
+ // offset and source position or SourcePositionTableWithFrameCache.
+ DECL_ACCESSORS(source_position_table, Object)
+
+ inline ByteArray* SourcePositionTable();
+
+ DECL_CAST(BytecodeArray)
+
+ // Dispatched behavior.
+ inline int BytecodeArraySize();
+
+ inline int instruction_size();
+
+ // Returns the size of bytecode and its metadata. This includes the size of
+ // bytecode, constant pool, source position table, and handler table.
+ inline int SizeIncludingMetadata();
+
+ int SourcePosition(int offset);
+ int SourceStatementPosition(int offset);
+
+ DECL_PRINTER(BytecodeArray)
+ DECL_VERIFIER(BytecodeArray)
+
+ void Disassemble(std::ostream& os);
+
+ void CopyBytecodesTo(BytecodeArray* to);
+
+ // Bytecode aging
+ bool IsOld() const;
+ void MakeOlder();
+
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic.
+ inline void clear_padding();
+
+// Layout description.
+#define BYTECODE_ARRAY_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kConstantPoolOffset, kPointerSize) \
+ V(kHandlerTableOffset, kPointerSize) \
+ V(kSourcePositionTableOffset, kPointerSize) \
+ V(kFrameSizeOffset, kIntSize) \
+ V(kParameterSizeOffset, kIntSize) \
+ V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
+ V(kInterruptBudgetOffset, kIntSize) \
+ V(kOSRNestingLevelOffset, kCharSize) \
+ V(kBytecodeAgeOffset, kCharSize) \
+ /* Total size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
+ BYTECODE_ARRAY_FIELDS)
+#undef BYTECODE_ARRAY_FIELDS
+
+ // Maximal memory consumption for a single BytecodeArray.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single BytecodeArray.
+ static const int kMaxLength = kMaxSize - kHeaderSize;
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
+};
+
+// DeoptimizationData is a fixed array used to hold the deoptimization data for
+// optimized code. It also contains information about functions that were
+// inlined. If N different functions were inlined then the first N elements of
+// the literal array will contain these functions.
+//
+// It can be empty.
+class DeoptimizationData : public FixedArray {
+ public:
+ // Layout description. Indices in the array.
+ static const int kTranslationByteArrayIndex = 0;
+ static const int kInlinedFunctionCountIndex = 1;
+ static const int kLiteralArrayIndex = 2;
+ static const int kOsrBytecodeOffsetIndex = 3;
+ static const int kOsrPcOffsetIndex = 4;
+ static const int kOptimizationIdIndex = 5;
+ static const int kSharedFunctionInfoIndex = 6;
+ static const int kWeakCellCacheIndex = 7;
+ static const int kInliningPositionsIndex = 8;
+ static const int kFirstDeoptEntryIndex = 9;
+
+ // Offsets of deopt entry elements relative to the start of the entry.
+ static const int kBytecodeOffsetRawOffset = 0;
+ static const int kTranslationIndexOffset = 1;
+ static const int kPcOffset = 2;
+ static const int kDeoptEntrySize = 3;
+
+// Simple element accessors.
+#define DECL_ELEMENT_ACCESSORS(name, type) \
+ inline type* name(); \
+ inline void Set##name(type* value);
+
+ DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+ DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
+ DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+ DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
+ DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+ DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
+ DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
+ DECL_ELEMENT_ACCESSORS(WeakCellCache, Object)
+ DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
+
+#undef DECL_ELEMENT_ACCESSORS
+
+// Accessors for elements of the ith deoptimization entry.
+#define DECL_ENTRY_ACCESSORS(name, type) \
+ inline type* name(int i); \
+ inline void Set##name(int i, type* value);
+
+ DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
+ DECL_ENTRY_ACCESSORS(TranslationIndex, Smi)
+ DECL_ENTRY_ACCESSORS(Pc, Smi)
+
+#undef DECL_ENTRY_ACCESSORS
+
+ inline BailoutId BytecodeOffset(int i);
+
+ inline void SetBytecodeOffset(int i, BailoutId value);
+
+ inline int DeoptCount();
+
+ static const int kNotInlinedIndex = -1;
+
+ // Returns the inlined function at the given position in LiteralArray, or the
+ // outer function if index == kNotInlinedIndex.
+ class SharedFunctionInfo* GetInlinedFunction(int index);
+
+ // Allocates a DeoptimizationData.
+ static Handle<DeoptimizationData> New(Isolate* isolate, int deopt_entry_count,
+ PretenureFlag pretenure);
+
+ // Return an empty DeoptimizationData.
+ static Handle<DeoptimizationData> Empty(Isolate* isolate);
+
+ DECL_CAST(DeoptimizationData)
+
+#ifdef ENABLE_DISASSEMBLER
+ void DeoptimizationDataPrint(std::ostream& os); // NOLINT
+#endif
+
+ private:
+ static int IndexForEntry(int i) {
+ return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
+ }
+
+ static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_CODE_H_
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h
index 42798ed5a0..99edc50c96 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-inl.h
@@ -32,7 +32,7 @@ uint32_t CompilationCacheShape::StringSharedHash(String* source,
// collection.
Script* script(Script::cast(shared->script()));
hash ^= String::cast(script->source())->Hash();
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
if (is_strict(language_mode)) hash ^= 0x8000;
hash += position;
}
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index a2358671f5..16bced9998 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_COMPILATION_CACHE_H_
#include "src/objects/hash-table.h"
+#include "src/objects/js-regexp.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index 1efe39c1d9..c0425fca8a 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -114,7 +114,7 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
}
index = old_break_points->length();
}
- DCHECK(index != kNoBreakPointInfo);
+ DCHECK_NE(index, kNoBreakPointInfo);
// Allocate new BreakPointInfo object and set the break point.
Handle<BreakPointInfo> new_break_point_info =
@@ -221,7 +221,7 @@ void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
int found_count = 0;
for (int i = 0; i < old_array->length(); i++) {
if (IsEqual(old_array->get(i), *break_point_object)) {
- DCHECK(found_count == 0);
+ DCHECK_EQ(found_count, 0);
found_count++;
} else {
new_array->set(i - found_count, old_array->get(i));
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index e75043f8d2..9ee2765897 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+class BytecodeArray;
+
// The DebugInfo class holds additional information for a function being
// debugged.
class DebugInfo : public Struct {
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 71537d3d38..f0b985337b 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -46,12 +46,12 @@ class EnumCache : public Tuple2 {
class DescriptorArray : public FixedArray {
public:
// Returns the number of descriptors in the array.
- inline int number_of_descriptors();
- inline int number_of_descriptors_storage();
- inline int NumberOfSlackDescriptors();
+ inline int number_of_descriptors() const;
+ inline int number_of_descriptors_storage() const;
+ inline int NumberOfSlackDescriptors() const;
inline void SetNumberOfDescriptors(int number_of_descriptors);
- inline int number_of_entries();
+ inline int number_of_entries() const;
inline EnumCache* GetEnumCache();
@@ -141,17 +141,18 @@ class DescriptorArray : public FixedArray {
static const int kEntryValueIndex = 2;
static const int kEntrySize = 3;
-#if defined(DEBUG) || defined(OBJECT_PRINT)
- // For our gdb macros, we should perhaps change these in the future.
- void Print();
-
// Print all the descriptors.
void PrintDescriptors(std::ostream& os); // NOLINT
-
void PrintDescriptorDetails(std::ostream& os, int descriptor,
PropertyDetails::PrintMode mode);
+
+#if defined(DEBUG) || defined(OBJECT_PRINT)
+ // For our gdb macros, we should perhaps change these in the future.
+ void Print();
#endif
+ DECL_VERIFIER(DescriptorArray)
+
#ifdef DEBUG
// Is the descriptor array sorted and without duplicates?
bool IsSortedNoDuplicates(int valid_descriptors = -1);
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 11cf8b1163..5cf6bfb67d 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -92,7 +92,7 @@ class BaseDictionaryShape : public BaseShape<Key> {
template <typename Dictionary>
static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
- DCHECK(entry >= 0); // Not found is -1, which is not caught by get().
+ DCHECK_GE(entry, 0); // Not found is -1, which is not caught by get().
return PropertyDetails(Smi::cast(dict->get(
Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex)));
}
@@ -112,6 +112,7 @@ class NameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
static inline uint32_t Hash(Isolate* isolate, Handle<Name> key);
static inline uint32_t HashForObject(Isolate* isolate, Object* object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
+ static inline int GetMapRootIndex();
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const int kEntryValueIndex = 1;
@@ -171,6 +172,10 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
// Ensure enough space for n additional elements.
static Handle<Derived> EnsureCapacity(Handle<Derived> dictionary, int n);
+ MUST_USE_RESULT static Handle<Derived> AddNoUpdateNextEnumerationIndex(
+ Handle<Derived> dictionary, Key key, Handle<Object> value,
+ PropertyDetails details, int* entry_out = nullptr);
+
MUST_USE_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
Key key, Handle<Object> value,
PropertyDetails details,
@@ -207,6 +212,7 @@ class GlobalDictionaryShape : public NameDictionaryShape {
static inline Object* Unwrap(Object* key);
static inline bool IsKey(Isolate* isolate, Object* k);
static inline bool IsLive(Isolate* isolate, Object* key);
+ static inline int GetMapRootIndex();
};
class GlobalDictionary
@@ -224,57 +230,33 @@ class GlobalDictionary
class NumberDictionaryShape : public BaseDictionaryShape<uint32_t> {
public:
- static inline bool IsMatch(uint32_t key, Object* other);
- static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
-};
-
-class SeededNumberDictionaryShape : public NumberDictionaryShape {
- public:
static const int kPrefixSize = 1;
static const int kEntrySize = 3;
- static inline uint32_t Hash(Isolate* isolate, uint32_t key);
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
-};
-
-class UnseededNumberDictionaryShape : public NumberDictionaryShape {
- public:
- static const bool kHasDetails = false;
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
+ static inline bool IsMatch(uint32_t key, Object* other);
+ static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
static inline uint32_t Hash(Isolate* isolate, uint32_t key);
static inline uint32_t HashForObject(Isolate* isolate, Object* object);
- template <typename Dictionary>
- static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
- UNREACHABLE();
- }
-
- template <typename Dictionary>
- static inline void DetailsAtPut(Dictionary* dict, int entry,
- PropertyDetails value) {
- UNREACHABLE();
- }
-
- static inline Map* GetMap(Isolate* isolate);
+ static inline int GetMapRootIndex();
};
-extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- HashTable<SeededNumberDictionary, SeededNumberDictionaryShape>;
+extern template class EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>;
+ Dictionary<NumberDictionary, NumberDictionaryShape>;
-class SeededNumberDictionary
- : public Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape> {
+class NumberDictionary
+ : public Dictionary<NumberDictionary, NumberDictionaryShape> {
public:
- DECL_CAST(SeededNumberDictionary)
+ DECL_CAST(NumberDictionary)
// Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
- Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, Handle<JSObject> dictionary_holder,
+ MUST_USE_RESULT static Handle<NumberDictionary> Set(
+ Handle<NumberDictionary> dictionary, uint32_t key, Handle<Object> value,
+ Handle<JSObject> dictionary_holder = Handle<JSObject>::null(),
PropertyDetails details = PropertyDetails::Empty());
static const int kMaxNumberKeyIndex = kPrefixStartIndex;
@@ -313,20 +295,6 @@ class SeededNumberDictionary
static const uint32_t kPreferFastElementsSizeFactor = 3;
};
-class UnseededNumberDictionary
- : public Dictionary<UnseededNumberDictionary,
- UnseededNumberDictionaryShape> {
- public:
- DECL_CAST(UnseededNumberDictionary)
-
- // Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT static Handle<UnseededNumberDictionary> Set(
- Handle<UnseededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value);
-
- static const int kEntryValueIndex = 1;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index 8bc188cf6a..59bbfc2b63 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -20,6 +20,7 @@ class Handle;
#define FRAME_ARRAY_FIELD_LIST(V) \
V(WasmInstance, WasmInstanceObject) \
V(WasmFunctionIndex, Smi) \
+ V(IsWasmInterpreterFrame, Smi) \
V(Receiver, Object) \
V(Function, JSFunction) \
V(Code, AbstractCode) \
@@ -59,8 +60,7 @@ class FrameArray : public FixedArray {
int flags);
static Handle<FrameArray> AppendWasmFrame(
Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
- int wasm_function_index, Handle<AbstractCode> code, int offset,
- int flags);
+ int wasm_function_index, WasmCodeWrapper code, int offset, int flags);
DECL_CAST(FrameArray)
@@ -74,6 +74,7 @@ class FrameArray : public FixedArray {
static const int kWasmInstanceOffset = 0;
static const int kWasmFunctionIndexOffset = 1;
+ static const int kIsWasmInterpreterFrameOffset = 2;
static const int kReceiverOffset = 0;
static const int kFunctionOffset = 1;
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 1f1014e230..baff7c03b4 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_HASH_TABLE_INL_H_
#define V8_OBJECTS_HASH_TABLE_INL_H_
+#include "src/heap/heap.h"
#include "src/objects/hash-table.h"
namespace v8 {
@@ -16,6 +17,14 @@ bool BaseShape<KeyT>::IsLive(Isolate* isolate, Object* k) {
return k != heap->the_hole_value() && k != heap->undefined_value();
}
+int OrderedHashSet::GetMapRootIndex() {
+ return Heap::kOrderedHashSetMapRootIndex;
+}
+
+int OrderedHashMap::GetMapRootIndex() {
+ return Heap::kOrderedHashMapMapRootIndex;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 6b5682535a..9b7ac5deb3 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -56,7 +56,7 @@ template <typename KeyT>
class BaseShape {
public:
typedef KeyT Key;
- static inline Map* GetMap(Isolate* isolate);
+ static inline int GetMapRootIndex();
static const bool kNeedsHoleCheck = true;
static Object* Unwrap(Object* key) { return key; }
static bool IsKey(Isolate* isolate, Object* key) {
@@ -225,8 +225,8 @@ class HashTable : public HashTableBase {
// To scale a computed hash code to fit within the hash table, we
// use bit-wise AND with a mask, so the capacity must be positive
// and non-zero.
- DCHECK(capacity > 0);
- DCHECK(capacity <= kMaxCapacity);
+ DCHECK_GT(capacity, 0);
+ DCHECK_LE(capacity, kMaxCapacity);
set(kCapacityIndex, Smi::FromInt(capacity));
}
@@ -437,17 +437,21 @@ class OrderedHashTable : public OrderedHashTableBase {
// the key has been deleted. This does not shrink the table.
static bool Delete(Isolate* isolate, Derived* table, Object* key);
- int NumberOfElements() { return Smi::ToInt(get(kNumberOfElementsIndex)); }
+ int NumberOfElements() const {
+ return Smi::ToInt(get(kNumberOfElementsIndex));
+ }
- int NumberOfDeletedElements() {
+ int NumberOfDeletedElements() const {
return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
}
// Returns the number of contiguous entries in the data table, starting at 0,
// that either are real entries or have been deleted.
- int UsedCapacity() { return NumberOfElements() + NumberOfDeletedElements(); }
+ int UsedCapacity() const {
+ return NumberOfElements() + NumberOfDeletedElements();
+ }
- int NumberOfBuckets() { return Smi::ToInt(get(kNumberOfBucketsIndex)); }
+ int NumberOfBuckets() const { return Smi::ToInt(get(kNumberOfBucketsIndex)); }
// Returns an index into |this| for the given entry.
int EntryToIndex(int entry) {
@@ -549,6 +553,8 @@ class OrderedHashSet : public OrderedHashTable<OrderedHashSet, 1> {
Handle<Object> value);
static Handle<FixedArray> ConvertToKeysArray(Handle<OrderedHashSet> table,
GetKeysConversion convert);
+ static HeapObject* GetEmpty(Isolate* isolate);
+ static inline int GetMapRootIndex();
};
class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
@@ -563,26 +569,29 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
static Object* GetHash(Isolate* isolate, Object* key);
+ static HeapObject* GetEmpty(Isolate* isolate);
+ static inline int GetMapRootIndex();
+
static const int kValueOffset = 1;
};
-template <int entrysize>
class WeakHashTableShape : public BaseShape<Handle<Object>> {
public:
static inline bool IsMatch(Handle<Object> key, Object* other);
static inline uint32_t Hash(Isolate* isolate, Handle<Object> key);
static inline uint32_t HashForObject(Isolate* isolate, Object* object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key);
+ static inline int GetMapRootIndex();
static const int kPrefixSize = 0;
- static const int kEntrySize = entrysize;
+ static const int kEntrySize = 2;
static const bool kNeedsHoleCheck = false;
};
// WeakHashTable maps keys that are arbitrary heap objects to heap object
// values. The table wraps the keys in weak cells and store values directly.
// Thus it references keys weakly and values strongly.
-class WeakHashTable : public HashTable<WeakHashTable, WeakHashTableShape<2>> {
- typedef HashTable<WeakHashTable, WeakHashTableShape<2>> DerivedHashTable;
+class WeakHashTable : public HashTable<WeakHashTable, WeakHashTableShape> {
+ typedef HashTable<WeakHashTable, WeakHashTableShape> DerivedHashTable;
public:
DECL_CAST(WeakHashTable)
@@ -597,8 +606,6 @@ class WeakHashTable : public HashTable<WeakHashTable, WeakHashTableShape<2>> {
Handle<HeapObject> key,
Handle<HeapObject> value);
- static Handle<FixedArray> GetValues(Handle<WeakHashTable> table);
-
private:
friend class MarkCompactCollector;
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index f889e6899b..9688717e76 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -93,7 +93,7 @@ icu::SimpleDateFormat* CreateICUDateFormat(Isolate* isolate,
Handle<JSObject> options) {
// Create time zone as specified by the user. We have to re-create time zone
// since calendar takes ownership.
- icu::TimeZone* tz = NULL;
+ icu::TimeZone* tz = nullptr;
icu::UnicodeString timezone;
if (ExtractStringSetting(isolate, options, "timeZone", &timezone)) {
tz = icu::TimeZone::createTimeZone(timezone);
@@ -118,7 +118,7 @@ icu::SimpleDateFormat* CreateICUDateFormat(Isolate* isolate,
// Make formatter from skeleton. Calendar and numbering system are added
// to the locale as Unicode extension (if they were specified at all).
- icu::SimpleDateFormat* date_format = NULL;
+ icu::SimpleDateFormat* date_format = nullptr;
icu::UnicodeString skeleton;
if (ExtractStringSetting(isolate, options, "skeleton", &skeleton)) {
std::unique_ptr<icu::DateTimePatternGenerator> generator(
@@ -156,7 +156,7 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
pattern.length()))
.ToHandleChecked(),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
// Set time zone and calendar.
@@ -167,7 +167,7 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
const char* calendar_name = calendar->getType();
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("calendar"),
factory->NewStringFromAsciiChecked(calendar_name),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
const icu::TimeZone& tz = calendar->getTimeZone();
@@ -186,9 +186,9 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
// DCHECK(canonical_time_zone != UNICODE_STRING_SIMPLE("Etc/GMT")) .
if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/UTC") ||
canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("timeZone"),
- factory->NewStringFromStaticChars("UTC"), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("timeZone"),
+ factory->NewStringFromStaticChars("UTC"), LanguageMode::kSloppy)
.Assert();
} else {
JSObject::SetProperty(resolved,
@@ -199,7 +199,7 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
canonical_time_zone.getBuffer()),
canonical_time_zone.length()))
.ToHandleChecked(),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
}
}
@@ -212,14 +212,14 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
icu::NumberingSystem::createInstance(icu_locale, status);
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("numberingSystem"),
- factory->NewStringFromAsciiChecked(ns), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("numberingSystem"),
+ factory->NewStringFromAsciiChecked(ns), LanguageMode::kSloppy)
.Assert();
} else {
JSObject::SetProperty(resolved,
factory->NewStringFromStaticChars("numberingSystem"),
- factory->undefined_value(), SLOPPY)
+ factory->undefined_value(), LanguageMode::kSloppy)
.Assert();
}
delete numbering_system;
@@ -231,12 +231,14 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result), SLOPPY)
+ factory->NewStringFromAsciiChecked(result),
+ LanguageMode::kSloppy)
.Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"), SLOPPY)
+ factory->NewStringFromStaticChars("und"),
+ LanguageMode::kSloppy)
.Assert();
}
}
@@ -283,7 +285,7 @@ icu::DecimalFormat* CreateICUNumberFormat(Isolate* isolate,
// Make formatter from options. Numbering system is added
// to the locale as Unicode extension (if it was specified at all).
UErrorCode status = U_ZERO_ERROR;
- icu::DecimalFormat* number_format = NULL;
+ icu::DecimalFormat* number_format = nullptr;
icu::UnicodeString style;
icu::UnicodeString currency;
if (ExtractStringSetting(isolate, options, "style", &style)) {
@@ -317,14 +319,14 @@ icu::DecimalFormat* CreateICUNumberFormat(Isolate* isolate,
if (U_FAILURE(status)) {
delete number_format;
- return NULL;
+ return nullptr;
}
} else if (style == UNICODE_STRING_SIMPLE("percent")) {
number_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createPercentInstance(icu_locale, status));
if (U_FAILURE(status)) {
delete number_format;
- return NULL;
+ return nullptr;
}
// Make sure 1.1% doesn't go into 2%.
number_format->setMinimumFractionDigits(1);
@@ -337,7 +339,7 @@ icu::DecimalFormat* CreateICUNumberFormat(Isolate* isolate,
if (U_FAILURE(status)) {
delete number_format;
- return NULL;
+ return nullptr;
}
// Set all options.
@@ -363,19 +365,19 @@ void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("minimumIntegerDigits"),
factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("minimumFractionDigits"),
factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("maximumFractionDigits"),
factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
Handle<String> key =
@@ -386,7 +388,7 @@ void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("minimumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
}
@@ -397,7 +399,7 @@ void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("maximumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
}
@@ -408,12 +410,14 @@ void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result), SLOPPY)
+ factory->NewStringFromAsciiChecked(result),
+ LanguageMode::kSloppy)
.Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"), SLOPPY)
+ factory->NewStringFromStaticChars("und"),
+ LanguageMode::kSloppy)
.Assert();
}
}
@@ -433,7 +437,7 @@ void SetResolvedNumberSettings(Isolate* isolate, const icu::Locale& icu_locale,
reinterpret_cast<const uint16_t*>(currency.getBuffer()),
currency.length()))
.ToHandleChecked(),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
}
@@ -445,21 +449,22 @@ void SetResolvedNumberSettings(Isolate* isolate, const icu::Locale& icu_locale,
icu::NumberingSystem::createInstance(icu_locale, status);
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("numberingSystem"),
- factory->NewStringFromAsciiChecked(ns), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("numberingSystem"),
+ factory->NewStringFromAsciiChecked(ns), LanguageMode::kSloppy)
.Assert();
} else {
JSObject::SetProperty(resolved,
factory->NewStringFromStaticChars("numberingSystem"),
- factory->undefined_value(), SLOPPY)
+ factory->undefined_value(), LanguageMode::kSloppy)
.Assert();
}
delete numbering_system;
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("useGrouping"),
- factory->ToBoolean(number_format->isGroupingUsed()), SLOPPY)
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("useGrouping"),
+ factory->ToBoolean(number_format->isGroupingUsed()),
+ LanguageMode::kSloppy)
.Assert();
SetResolvedNumericSettings(isolate, icu_locale, number_format, resolved);
@@ -469,13 +474,13 @@ icu::Collator* CreateICUCollator(Isolate* isolate,
const icu::Locale& icu_locale,
Handle<JSObject> options) {
// Make collator from options.
- icu::Collator* collator = NULL;
+ icu::Collator* collator = nullptr;
UErrorCode status = U_ZERO_ERROR;
collator = icu::Collator::createInstance(icu_locale, status);
if (U_FAILURE(status)) {
delete collator;
- return NULL;
+ return nullptr;
}
// Set flags first, and then override them with sensitivity if necessary.
@@ -538,26 +543,26 @@ void SetResolvedCollatorSettings(Isolate* isolate,
resolved, factory->NewStringFromStaticChars("numeric"),
factory->ToBoolean(
collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("lower"), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("lower"), LanguageMode::kSloppy)
.Assert();
break;
case UCOL_UPPER_FIRST:
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("upper"), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("upper"), LanguageMode::kSloppy)
.Assert();
break;
default:
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("false"), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("false"), LanguageMode::kSloppy)
.Assert();
}
@@ -565,19 +570,19 @@ void SetResolvedCollatorSettings(Isolate* isolate,
case UCOL_PRIMARY: {
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("primary"), SLOPPY)
+ factory->NewStringFromStaticChars("primary"), LanguageMode::kSloppy)
.Assert();
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("case"), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("case"), LanguageMode::kSloppy)
.Assert();
} else {
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("base"), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("base"), LanguageMode::kSloppy)
.Assert();
}
break;
@@ -585,43 +590,44 @@ void SetResolvedCollatorSettings(Isolate* isolate,
case UCOL_SECONDARY:
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("secondary"), SLOPPY)
+ factory->NewStringFromStaticChars("secondary"), LanguageMode::kSloppy)
.Assert();
- JSObject::SetProperty(resolved,
- factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("accent"), SLOPPY)
+ JSObject::SetProperty(
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("accent"), LanguageMode::kSloppy)
.Assert();
break;
case UCOL_TERTIARY:
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("tertiary"), SLOPPY)
+ factory->NewStringFromStaticChars("tertiary"), LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), SLOPPY)
+ factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
.Assert();
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
// put them into variant.
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("quaternary"), SLOPPY)
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("strength"),
+ factory->NewStringFromStaticChars("quaternary"),
+ LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), SLOPPY)
+ factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
.Assert();
break;
default:
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("identical"), SLOPPY)
+ factory->NewStringFromStaticChars("identical"), LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), SLOPPY)
+ factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
.Assert();
}
@@ -629,7 +635,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
resolved, factory->NewStringFromStaticChars("ignorePunctuation"),
factory->ToBoolean(collator->getAttribute(UCOL_ALTERNATE_HANDLING,
status) == UCOL_SHIFTED),
- SLOPPY)
+ LanguageMode::kSloppy)
.Assert();
// Set the locale
@@ -639,12 +645,14 @@ void SetResolvedCollatorSettings(Isolate* isolate,
FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result), SLOPPY)
+ factory->NewStringFromAsciiChecked(result),
+ LanguageMode::kSloppy)
.Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"), SLOPPY)
+ factory->NewStringFromStaticChars("und"),
+ LanguageMode::kSloppy)
.Assert();
}
}
@@ -718,7 +726,7 @@ bool SetResolvedPluralRulesSettings(Isolate* isolate,
for (int32_t i = 0;; i++) {
const icu::UnicodeString* category = categories->snext(status);
if (U_FAILURE(status)) return false;
- if (category == NULL) return true;
+ if (category == nullptr) return true;
std::string keyword;
Handle<String> value = factory->NewStringFromAsciiChecked(
@@ -735,9 +743,9 @@ icu::BreakIterator* CreateICUBreakIterator(Isolate* isolate,
const icu::Locale& icu_locale,
Handle<JSObject> options) {
UErrorCode status = U_ZERO_ERROR;
- icu::BreakIterator* break_iterator = NULL;
+ icu::BreakIterator* break_iterator = nullptr;
icu::UnicodeString type;
- if (!ExtractStringSetting(isolate, options, "type", &type)) return NULL;
+ if (!ExtractStringSetting(isolate, options, "type", &type)) return nullptr;
if (type == UNICODE_STRING_SIMPLE("character")) {
break_iterator =
@@ -754,7 +762,7 @@ icu::BreakIterator* CreateICUBreakIterator(Isolate* isolate,
if (U_FAILURE(status)) {
delete break_iterator;
- return NULL;
+ return nullptr;
}
isolate->CountUsage(v8::Isolate::UseCounterFeature::kBreakIterator);
@@ -776,12 +784,14 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result), SLOPPY)
+ factory->NewStringFromAsciiChecked(result),
+ LanguageMode::kSloppy)
.Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"), SLOPPY)
+ factory->NewStringFromStaticChars("und"),
+ LanguageMode::kSloppy)
.Assert();
}
}
@@ -802,7 +812,7 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
+ return nullptr;
}
icu_locale = icu::Locale(icu_result);
}
@@ -853,7 +863,7 @@ icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
+ return nullptr;
}
icu_locale = icu::Locale(icu_result);
}
@@ -905,7 +915,7 @@ icu::Collator* Collator::InitializeCollator(Isolate* isolate,
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
+ return nullptr;
}
icu_locale = icu::Locale(icu_result);
}
@@ -1014,7 +1024,7 @@ icu::BreakIterator* V8BreakIterator::InitializeBreakIterator(
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
+ return nullptr;
}
icu_locale = icu::Locale(icu_result);
}
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
new file mode 100644
index 0000000000..6bba2f0054
--- /dev/null
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -0,0 +1,243 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_ARRAY_INL_H_
+#define V8_OBJECTS_JS_ARRAY_INL_H_
+
+#include "src/objects/js-array.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
+TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
+TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
+
+CAST_ACCESSOR(JSArray)
+CAST_ACCESSOR(JSArrayBuffer)
+CAST_ACCESSOR(JSArrayBufferView)
+CAST_ACCESSOR(JSArrayIterator)
+CAST_ACCESSOR(JSTypedArray)
+
+ACCESSORS(JSArray, length, Object, kLengthOffset)
+
+template <>
+inline bool Is<JSArray>(Object* obj) {
+ return obj->IsJSArray();
+}
+
+void JSArray::set_length(Smi* length) {
+ // Don't need a write barrier for a Smi.
+ set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
+}
+
+bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
+ return new_length > kMaxFastArrayLength;
+}
+
+bool JSArray::AllowsSetLength() {
+ bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
+ DCHECK(result == !HasFixedTypedArrayElements());
+ return result;
+}
+
+void JSArray::SetContent(Handle<JSArray> array,
+ Handle<FixedArrayBase> storage) {
+ EnsureCanContainElements(array, storage, storage->length(),
+ ALLOW_COPIED_DOUBLE_ELEMENTS);
+
+ DCHECK((storage->map() == array->GetHeap()->fixed_double_array_map() &&
+ IsDoubleElementsKind(array->GetElementsKind())) ||
+ ((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
+ (IsObjectElementsKind(array->GetElementsKind()) ||
+ (IsSmiElementsKind(array->GetElementsKind()) &&
+ Handle<FixedArray>::cast(storage)->ContainsOnlySmisOrHoles()))));
+ array->set_elements(*storage);
+ array->set_length(Smi::FromInt(storage->length()));
+}
+
+bool JSArray::HasArrayPrototype(Isolate* isolate) {
+ return map()->prototype() == *isolate->initial_array_prototype();
+}
+
+void* JSArrayBuffer::backing_store() const {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
+ return reinterpret_cast<void*>(ptr);
+}
+
+void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr);
+}
+
+ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
+
+void* JSArrayBuffer::allocation_base() const {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kAllocationBaseOffset);
+ return reinterpret_cast<void*>(ptr);
+}
+
+void JSArrayBuffer::set_allocation_base(void* value, WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kAllocationBaseOffset, ptr);
+}
+
+size_t JSArrayBuffer::allocation_length() const {
+ return *reinterpret_cast<const size_t*>(
+ FIELD_ADDR_CONST(this, kAllocationLengthOffset));
+}
+
+void JSArrayBuffer::set_allocation_length(size_t value) {
+ (*reinterpret_cast<size_t*>(FIELD_ADDR(this, kAllocationLengthOffset))) =
+ value;
+}
+
+ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const {
+ using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
+ return has_guard_region() ? AllocationMode::kReservation
+ : AllocationMode::kNormal;
+}
+
+void JSArrayBuffer::set_bit_field(uint32_t bits) {
+ if (kInt32Size != kPointerSize) {
+#if V8_TARGET_LITTLE_ENDIAN
+ WRITE_UINT32_FIELD(this, kBitFieldSlot + kInt32Size, 0);
+#else
+ WRITE_UINT32_FIELD(this, kBitFieldSlot, 0);
+#endif
+ }
+ WRITE_UINT32_FIELD(this, kBitFieldOffset, bits);
+}
+
+uint32_t JSArrayBuffer::bit_field() const {
+ return READ_UINT32_FIELD(this, kBitFieldOffset);
+}
+
+bool JSArrayBuffer::is_external() { return IsExternal::decode(bit_field()); }
+
+void JSArrayBuffer::set_is_external(bool value) {
+ set_bit_field(IsExternal::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::is_neuterable() {
+ return IsNeuterable::decode(bit_field());
+}
+
+void JSArrayBuffer::set_is_neuterable(bool value) {
+ set_bit_field(IsNeuterable::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::was_neutered() { return WasNeutered::decode(bit_field()); }
+
+void JSArrayBuffer::set_was_neutered(bool value) {
+ set_bit_field(WasNeutered::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::is_shared() { return IsShared::decode(bit_field()); }
+
+void JSArrayBuffer::set_is_shared(bool value) {
+ set_bit_field(IsShared::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::has_guard_region() const {
+ return HasGuardRegion::decode(bit_field());
+}
+
+void JSArrayBuffer::set_has_guard_region(bool value) {
+ set_bit_field(HasGuardRegion::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::is_growable() { return IsGrowable::decode(bit_field()); }
+
+void JSArrayBuffer::set_is_growable(bool value) {
+ set_bit_field(IsGrowable::update(bit_field(), value));
+}
+
+Object* JSArrayBufferView::byte_offset() const {
+ if (WasNeutered()) return Smi::kZero;
+ return Object::cast(READ_FIELD(this, kByteOffsetOffset));
+}
+
+void JSArrayBufferView::set_byte_offset(Object* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kByteOffsetOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteOffsetOffset, value, mode);
+}
+
+Object* JSArrayBufferView::byte_length() const {
+ if (WasNeutered()) return Smi::kZero;
+ return Object::cast(READ_FIELD(this, kByteLengthOffset));
+}
+
+void JSArrayBufferView::set_byte_length(Object* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kByteLengthOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteLengthOffset, value, mode);
+}
+
+ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
+#ifdef VERIFY_HEAP
+ACCESSORS(JSArrayBufferView, raw_byte_offset, Object, kByteOffsetOffset)
+ACCESSORS(JSArrayBufferView, raw_byte_length, Object, kByteLengthOffset)
+#endif
+
+bool JSArrayBufferView::WasNeutered() const {
+ return JSArrayBuffer::cast(buffer())->was_neutered();
+}
+
+Object* JSTypedArray::length() const {
+ if (WasNeutered()) return Smi::kZero;
+ return Object::cast(READ_FIELD(this, kLengthOffset));
+}
+
+uint32_t JSTypedArray::length_value() const {
+ if (WasNeutered()) return 0;
+ uint32_t index = 0;
+ CHECK(Object::cast(READ_FIELD(this, kLengthOffset))->ToArrayLength(&index));
+ return index;
+}
+
+void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kLengthOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
+}
+
+// static
+MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
+ Handle<Object> receiver,
+ const char* method_name) {
+ if (V8_UNLIKELY(!receiver->IsJSTypedArray())) {
+ const MessageTemplate::Template message = MessageTemplate::kNotTypedArray;
+ THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
+ }
+
+ Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
+ if (V8_UNLIKELY(array->WasNeutered())) {
+ const MessageTemplate::Template message =
+ MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
+ }
+
+ // spec describes to return `buffer`, but it may disrupt current
+ // implementations, and it's much useful to return array for now.
+ return array;
+}
+
+#ifdef VERIFY_HEAP
+ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
+#endif
+
+ACCESSORS(JSArrayIterator, object, Object, kIteratedObjectOffset)
+ACCESSORS(JSArrayIterator, index, Object, kNextIndexOffset)
+ACCESSORS(JSArrayIterator, object_map, Object, kIteratedObjectMapOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_ARRAY_INL_H_
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
new file mode 100644
index 0000000000..a2d13a766d
--- /dev/null
+++ b/deps/v8/src/objects/js-array.h
@@ -0,0 +1,357 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_ARRAY_H_
+#define V8_OBJECTS_JS_ARRAY_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// The JSArray describes JavaScript Arrays
+// Such an array can be in one of two modes:
+// - fast, backing storage is a FixedArray and length <= elements.length();
+// Please note: push and pop can be used to grow and shrink the array.
+// - slow, backing storage is a HashTable with numbers as keys.
+class JSArray : public JSObject {
+ public:
+ // [length]: The length property.
+ DECL_ACCESSORS(length, Object)
+
+ // Overload the length setter to skip write barrier when the length
+ // is set to a smi. This matches the set function on FixedArray.
+ inline void set_length(Smi* length);
+
+ static bool HasReadOnlyLength(Handle<JSArray> array);
+ static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
+
+ // Initialize the array with the given capacity. The function may
+ // fail due to out-of-memory situations, but only if the requested
+ // capacity is non-zero.
+ static void Initialize(Handle<JSArray> array, int capacity, int length = 0);
+
+ // If the JSArray has fast elements, and new_length would result in
+ // normalization, returns true.
+ bool SetLengthWouldNormalize(uint32_t new_length);
+ static inline bool SetLengthWouldNormalize(Heap* heap, uint32_t new_length);
+
+ // Initializes the array to a certain length.
+ inline bool AllowsSetLength();
+
+ static void SetLength(Handle<JSArray> array, uint32_t length);
+
+ // Set the content of the array to the content of storage.
+ static inline void SetContent(Handle<JSArray> array,
+ Handle<FixedArrayBase> storage);
+
+ // ES6 9.4.2.1
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSArray> o, Handle<Object> name,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
+ static bool AnythingToArrayLength(Isolate* isolate,
+ Handle<Object> length_object,
+ uint32_t* output);
+ MUST_USE_RESULT static Maybe<bool> ArraySetLength(Isolate* isolate,
+ Handle<JSArray> a,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
+ // Checks whether the Array has the current realm's Array.prototype as its
+ // prototype. This function is best-effort and only gives a conservative
+ // approximation, erring on the side of false, in particular with respect
+ // to Proxies and objects with a hidden prototype.
+ inline bool HasArrayPrototype(Isolate* isolate);
+
+ DECL_CAST(JSArray)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSArray)
+ DECL_VERIFIER(JSArray)
+
+ // Number of element slots to pre-allocate for an empty array.
+ static const int kPreallocatedArrayElements = 4;
+
+ // Layout description.
+ static const int kLengthOffset = JSObject::kHeaderSize;
+ static const int kSize = kLengthOffset + kPointerSize;
+
+ // Max. number of elements being copied in Array builtins.
+ static const int kMaxCopyElements = 100;
+
+ // This constant is somewhat arbitrary. Any large enough value would work.
+ static const uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
+
+ static const int kInitialMaxFastElementArray =
+ (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize -
+ AllocationMemento::kSize) >>
+ kDoubleSizeLog2;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
+};
+
+Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+ Handle<Map> initial_map);
+
+class JSArrayIterator : public JSObject {
+ public:
+ DECL_PRINTER(JSArrayIterator)
+ DECL_VERIFIER(JSArrayIterator)
+
+ DECL_CAST(JSArrayIterator)
+
+ // [object]: the [[IteratedObject]] inobject property.
+ DECL_ACCESSORS(object, Object)
+
+ // [index]: The [[ArrayIteratorNextIndex]] inobject property.
+ DECL_ACCESSORS(index, Object)
+
+ // [map]: The Map of the [[IteratedObject]] field at the time the iterator is
+ // allocated.
+ DECL_ACCESSORS(object_map, Object)
+
+ // Return the ElementsKind that a JSArrayIterator's [[IteratedObject]] is
+ // expected to have, based on its instance type.
+ static ElementsKind ElementsKindForInstanceType(InstanceType instance_type);
+
+ static const int kIteratedObjectOffset = JSObject::kHeaderSize;
+ static const int kNextIndexOffset = kIteratedObjectOffset + kPointerSize;
+ static const int kIteratedObjectMapOffset = kNextIndexOffset + kPointerSize;
+ static const int kSize = kIteratedObjectMapOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
+};
+
+// Whether a JSArrayBuffer is a SharedArrayBuffer or not.
+enum class SharedFlag { kNotShared, kShared };
+
+class JSArrayBuffer : public JSObject {
+ public:
+ // [byte_length]: length in bytes
+ DECL_ACCESSORS(byte_length, Object)
+
+ // [backing_store]: backing memory for this array
+ DECL_ACCESSORS(backing_store, void)
+
+ // [allocation_base]: the start of the memory allocation for this array,
+ // normally equal to backing_store
+ DECL_ACCESSORS(allocation_base, void)
+
+ // [allocation_length]: the size of the memory allocation for this array,
+ // normally equal to byte_length
+ inline size_t allocation_length() const;
+ inline void set_allocation_length(size_t value);
+
+ inline uint32_t bit_field() const;
+ inline void set_bit_field(uint32_t bits);
+
+ // [is_external]: true indicates that the embedder is in charge of freeing the
+ // backing_store, while is_external == false means that v8 will free the
+ // memory block once all ArrayBuffers referencing it are collected by the GC.
+ inline bool is_external();
+ inline void set_is_external(bool value);
+
+ inline bool is_neuterable();
+ inline void set_is_neuterable(bool value);
+
+ inline bool was_neutered();
+ inline void set_was_neutered(bool value);
+
+ inline bool is_shared();
+ inline void set_is_shared(bool value);
+
+ inline bool has_guard_region() const;
+ inline void set_has_guard_region(bool value);
+
+ inline bool is_growable();
+ inline void set_is_growable(bool value);
+
+ DECL_CAST(JSArrayBuffer)
+
+ void Neuter();
+
+ inline ArrayBuffer::Allocator::AllocationMode allocation_mode() const;
+
+ struct Allocation {
+ using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
+
+ Allocation(void* allocation_base, size_t length, AllocationMode mode)
+ : allocation_base(allocation_base), length(length), mode(mode) {}
+
+ void* allocation_base;
+ size_t length;
+ AllocationMode mode;
+ };
+
+ void FreeBackingStore();
+ static void FreeBackingStore(Isolate* isolate, Allocation allocation);
+
+ V8_EXPORT_PRIVATE static void Setup(
+ Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
+ void* data, size_t allocated_length,
+ SharedFlag shared = SharedFlag::kNotShared);
+
+ V8_EXPORT_PRIVATE static void Setup(
+ Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
+ void* allocation_base, size_t allocation_length, void* data,
+ size_t byte_length, SharedFlag shared = SharedFlag::kNotShared);
+
+ // Returns false if array buffer contents could not be allocated.
+ // In this case, |array_buffer| will not be set up.
+ static bool SetupAllocatingData(
+ Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
+ size_t allocated_length, bool initialize = true,
+ SharedFlag shared = SharedFlag::kNotShared) WARN_UNUSED_RESULT;
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSArrayBuffer)
+ DECL_VERIFIER(JSArrayBuffer)
+
+ static const int kByteLengthOffset = JSObject::kHeaderSize;
+ // The rest of the fields are not JSObjects, so they are not iterated over in
+ // objects-body-descriptors-inl.h.
+ static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
+ static const int kAllocationBaseOffset = kBackingStoreOffset + kPointerSize;
+ static const int kAllocationLengthOffset =
+ kAllocationBaseOffset + kPointerSize;
+ static const int kBitFieldSlot = kAllocationLengthOffset + kSizetSize;
+#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
+ static const int kBitFieldOffset = kBitFieldSlot;
+#else
+ static const int kBitFieldOffset = kBitFieldSlot + kInt32Size;
+#endif
+ static const int kSize = kBitFieldSlot + kPointerSize;
+
+ static const int kSizeWithEmbedderFields =
+ kSize + v8::ArrayBuffer::kEmbedderFieldCount * kPointerSize;
+
+ // Iterates all fields in the object including internal ones except
+ // kBackingStoreOffset and kBitFieldSlot.
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ class IsExternal : public BitField<bool, 1, 1> {};
+ class IsNeuterable : public BitField<bool, 2, 1> {};
+ class WasNeutered : public BitField<bool, 3, 1> {};
+ class IsShared : public BitField<bool, 4, 1> {};
+ class HasGuardRegion : public BitField<bool, 5, 1> {};
+ class IsGrowable : public BitField<bool, 6, 1> {};
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
+};
+
+class JSArrayBufferView : public JSObject {
+ public:
+ // [buffer]: ArrayBuffer that this typed array views.
+ DECL_ACCESSORS(buffer, Object)
+
+ // [byte_offset]: offset of typed array in bytes.
+ DECL_ACCESSORS(byte_offset, Object)
+
+ // [byte_length]: length of typed array in bytes.
+ DECL_ACCESSORS(byte_length, Object)
+
+ DECL_CAST(JSArrayBufferView)
+
+ DECL_VERIFIER(JSArrayBufferView)
+
+ inline bool WasNeutered() const;
+
+ static const int kBufferOffset = JSObject::kHeaderSize;
+ static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
+ static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
+ static const int kViewSize = kByteLengthOffset + kPointerSize;
+
+ private:
+#ifdef VERIFY_HEAP
+ DECL_ACCESSORS(raw_byte_offset, Object)
+ DECL_ACCESSORS(raw_byte_length, Object)
+#endif
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
+};
+
+class JSTypedArray : public JSArrayBufferView {
+ public:
+ // [length]: length of typed array in elements.
+ DECL_ACCESSORS(length, Object)
+ inline uint32_t length_value() const;
+
+ // ES6 9.4.5.3
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
+ DECL_CAST(JSTypedArray)
+
+ ExternalArrayType type();
+ V8_EXPORT_PRIVATE size_t element_size();
+
+ Handle<JSArrayBuffer> GetBuffer();
+
+ static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
+ Handle<Object> receiver,
+ const char* method_name);
+ // ES7 section 22.2.4.6 Create ( constructor, argumentList )
+ static MaybeHandle<JSTypedArray> Create(Isolate* isolate,
+ Handle<Object> default_ctor, int argc,
+ Handle<Object>* argv,
+ const char* method_name);
+ // ES7 section 22.2.4.7 TypedArraySpeciesCreate ( exemplar, argumentList )
+ static MaybeHandle<JSTypedArray> SpeciesCreate(Isolate* isolate,
+ Handle<JSTypedArray> exemplar,
+ int argc, Handle<Object>* argv,
+ const char* method_name);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSTypedArray)
+ DECL_VERIFIER(JSTypedArray)
+
+ static const int kLengthOffset = kViewSize;
+ static const int kSize = kLengthOffset + kPointerSize;
+
+ static const int kSizeWithEmbedderFields =
+ kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
+
+ private:
+ static Handle<JSArrayBuffer> MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array);
+#ifdef VERIFY_HEAP
+ DECL_ACCESSORS(raw_length, Object)
+#endif
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
+};
+
+class JSDataView : public JSArrayBufferView {
+ public:
+ DECL_CAST(JSDataView)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSDataView)
+ DECL_VERIFIER(JSDataView)
+
+ static const int kSize = kViewSize;
+
+ static const int kSizeWithEmbedderFields =
+ kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_ARRAY_H_
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
new file mode 100644
index 0000000000..697c81eb42
--- /dev/null
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -0,0 +1,84 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_REGEXP_INL_H_
+#define V8_OBJECTS_JS_REGEXP_INL_H_
+
+#include "src/objects/js-regexp.h"
+
+#include "src/objects/string.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
+
+CAST_ACCESSOR(JSRegExp)
+
+ACCESSORS(JSRegExp, data, Object, kDataOffset)
+ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
+ACCESSORS(JSRegExp, source, Object, kSourceOffset)
+ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
+
+JSRegExp::Type JSRegExp::TypeTag() {
+ Object* data = this->data();
+ if (data->IsUndefined(GetIsolate())) return JSRegExp::NOT_COMPILED;
+ Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
+ return static_cast<JSRegExp::Type>(smi->value());
+}
+
+int JSRegExp::CaptureCount() {
+ switch (TypeTag()) {
+ case ATOM:
+ return 0;
+ case IRREGEXP:
+ return Smi::ToInt(DataAt(kIrregexpCaptureCountIndex));
+ default:
+ UNREACHABLE();
+ }
+}
+
+JSRegExp::Flags JSRegExp::GetFlags() {
+ DCHECK(this->data()->IsFixedArray());
+ Object* data = this->data();
+ Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
+ return Flags(smi->value());
+}
+
+String* JSRegExp::Pattern() {
+ DCHECK(this->data()->IsFixedArray());
+ Object* data = this->data();
+ String* pattern = String::cast(FixedArray::cast(data)->get(kSourceIndex));
+ return pattern;
+}
+
+Object* JSRegExp::CaptureNameMap() {
+ DCHECK(this->data()->IsFixedArray());
+ DCHECK_EQ(TypeTag(), IRREGEXP);
+ Object* value = DataAt(kIrregexpCaptureNameMapIndex);
+ DCHECK_NE(value, Smi::FromInt(JSRegExp::kUninitializedValue));
+ return value;
+}
+
+Object* JSRegExp::DataAt(int index) {
+ DCHECK(TypeTag() != NOT_COMPILED);
+ return FixedArray::cast(data())->get(index);
+}
+
+void JSRegExp::SetDataAt(int index, Object* value) {
+ DCHECK(TypeTag() != NOT_COMPILED);
+ DCHECK_GE(index,
+ kDataIndex); // Only implementation data can be set this way.
+ FixedArray::cast(data())->set(index, value);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_REGEXP_INL_H_
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
new file mode 100644
index 0000000000..32c07e879e
--- /dev/null
+++ b/deps/v8/src/objects/js-regexp.h
@@ -0,0 +1,164 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_REGEXP_H_
+#define V8_OBJECTS_JS_REGEXP_H_
+
+#include "src/objects/js-array.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Regular expressions
+// The regular expression holds a single reference to a FixedArray in
+// the kDataOffset field.
+// The FixedArray contains the following data:
+// - tag : type of regexp implementation (not compiled yet, atom or irregexp)
+// - reference to the original source string
+// - reference to the original flag string
+// If it is an atom regexp
+// - a reference to a literal string to search for
+// If it is an irregexp regexp:
+// - a reference to code for Latin1 inputs (bytecode or compiled), or a smi
+// used for tracking the last usage (used for regexp code flushing).
+// - a reference to code for UC16 inputs (bytecode or compiled), or a smi
+// used for tracking the last usage (used for regexp code flushing).
+// - max number of registers used by irregexp implementations.
+// - number of capture registers (output values) of the regexp.
+class JSRegExp : public JSObject {
+ public:
+ // Meaning of Type:
+ // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
+ // ATOM: A simple string to match against using an indexOf operation.
+ // IRREGEXP: Compiled with Irregexp.
+ enum Type { NOT_COMPILED, ATOM, IRREGEXP };
+ enum Flag {
+ kNone = 0,
+ kGlobal = 1 << 0,
+ kIgnoreCase = 1 << 1,
+ kMultiline = 1 << 2,
+ kSticky = 1 << 3,
+ kUnicode = 1 << 4,
+ kDotAll = 1 << 5,
+ // Update FlagCount when adding new flags.
+ };
+ typedef base::Flags<Flag> Flags;
+
+ static int FlagCount() { return 6; }
+
+ DECL_ACCESSORS(data, Object)
+ DECL_ACCESSORS(flags, Object)
+ DECL_ACCESSORS(last_index, Object)
+ DECL_ACCESSORS(source, Object)
+
+ V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(Handle<String> source,
+ Flags flags);
+ static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
+
+ static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source, Flags flags);
+ static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source,
+ Handle<String> flags_string);
+
+ inline Type TypeTag();
+ // Number of captures (without the match itself).
+ inline int CaptureCount();
+ inline Flags GetFlags();
+ inline String* Pattern();
+ inline Object* CaptureNameMap();
+ inline Object* DataAt(int index);
+ // Set implementation data after the object has been prepared.
+ inline void SetDataAt(int index, Object* value);
+
+ static int code_index(bool is_latin1) {
+ if (is_latin1) {
+ return kIrregexpLatin1CodeIndex;
+ } else {
+ return kIrregexpUC16CodeIndex;
+ }
+ }
+
+ DECL_CAST(JSRegExp)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSRegExp)
+ DECL_VERIFIER(JSRegExp)
+
+ static const int kDataOffset = JSObject::kHeaderSize;
+ static const int kSourceOffset = kDataOffset + kPointerSize;
+ static const int kFlagsOffset = kSourceOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
+ static const int kLastIndexOffset = kSize; // In-object field.
+
+ // Indices in the data array.
+ static const int kTagIndex = 0;
+ static const int kSourceIndex = kTagIndex + 1;
+ static const int kFlagsIndex = kSourceIndex + 1;
+ static const int kDataIndex = kFlagsIndex + 1;
+ // The data fields are used in different ways depending on the
+ // value of the tag.
+ // Atom regexps (literal strings).
+ static const int kAtomPatternIndex = kDataIndex;
+
+ static const int kAtomDataSize = kAtomPatternIndex + 1;
+
+ // Irregexp compiled code or bytecode for Latin1. If compilation
+ // fails, this fields hold an exception object that should be
+ // thrown if the regexp is used again.
+ static const int kIrregexpLatin1CodeIndex = kDataIndex;
+ // Irregexp compiled code or bytecode for UC16. If compilation
+ // fails, this fields hold an exception object that should be
+ // thrown if the regexp is used again.
+ static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
+ // Maximal number of registers used by either Latin1 or UC16.
+ // Only used to check that there is enough stack space
+ static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
+ // Number of captures in the compiled regexp.
+ static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
+ // Maps names of named capture groups (at indices 2i) to their corresponding
+ // (1-based) capture group indices (at indices 2i + 1).
+ static const int kIrregexpCaptureNameMapIndex = kDataIndex + 4;
+
+ static const int kIrregexpDataSize = kIrregexpCaptureNameMapIndex + 1;
+
+ // In-object fields.
+ static const int kLastIndexFieldIndex = 0;
+ static const int kInObjectFieldCount = 1;
+
+ // The uninitialized value for a regexp code object.
+ static const int kUninitializedValue = -1;
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
+
+// JSRegExpResult is just a JSArray with a specific initial map.
+// This initial map adds in-object properties for "index" and "input"
+// properties, as assigned by RegExp.prototype.exec, which allows
+// faster creation of RegExp exec results.
+// This class just holds constants used when creating the result.
+// After creation the result must be treated as a JSArray in all regards.
+class JSRegExpResult : public JSArray {
+ public:
+ // Offsets of object fields.
+ static const int kIndexOffset = JSArray::kSize;
+ static const int kInputOffset = kIndexOffset + kPointerSize;
+ static const int kSize = kInputOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kIndexIndex = 0;
+ static const int kInputIndex = 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_REGEXP_H_
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
new file mode 100644
index 0000000000..34a427c67b
--- /dev/null
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -0,0 +1,51 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LITERAL_OBJECTS_INL_H_
+#define V8_LITERAL_OBJECTS_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(ClassBoilerplate)
+
+BIT_FIELD_ACCESSORS(ClassBoilerplate, flags, install_class_name_accessor,
+ ClassBoilerplate::Flags::InstallClassNameAccessorBit)
+
+BIT_FIELD_ACCESSORS(ClassBoilerplate, flags, arguments_count,
+ ClassBoilerplate::Flags::ArgumentsCountBits)
+
+SMI_ACCESSORS(ClassBoilerplate, flags,
+ FixedArray::OffsetOfElementAt(kFlagsIndex));
+
+ACCESSORS(ClassBoilerplate, static_properties_template, Object,
+ FixedArray::OffsetOfElementAt(kClassPropertiesTemplateIndex));
+
+ACCESSORS(ClassBoilerplate, static_elements_template, Object,
+ FixedArray::OffsetOfElementAt(kClassElementsTemplateIndex));
+
+ACCESSORS(ClassBoilerplate, static_computed_properties, FixedArray,
+ FixedArray::OffsetOfElementAt(kClassComputedPropertiesIndex));
+
+ACCESSORS(ClassBoilerplate, instance_properties_template, Object,
+ FixedArray::OffsetOfElementAt(kPrototypePropertiesTemplateIndex));
+
+ACCESSORS(ClassBoilerplate, instance_elements_template, Object,
+ FixedArray::OffsetOfElementAt(kPrototypeElementsTemplateIndex));
+
+ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
+ FixedArray::OffsetOfElementAt(kPrototypeComputedPropertiesIndex));
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_LITERAL_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 22d2119645..13f8b00878 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -4,9 +4,12 @@
#include "src/objects/literal-objects.h"
+#include "src/accessors.h"
+#include "src/ast/ast.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
@@ -51,5 +54,534 @@ bool BoilerplateDescription::has_number_of_properties() const {
return length() % 2 != 0;
}
+namespace {
+
+inline int EncodeComputedEntry(ClassBoilerplate::ValueKind value_kind,
+ unsigned key_index) {
+ typedef ClassBoilerplate::ComputedEntryFlags Flags;
+ int flags = Flags::ValueKindBits::encode(value_kind) |
+ Flags::KeyIndexBits::encode(key_index);
+ return flags;
+}
+
+void AddToDescriptorArrayTemplate(
+ Isolate* isolate, Handle<DescriptorArray> descriptor_array_template,
+ Handle<Name> name, ClassBoilerplate::ValueKind value_kind,
+ Handle<Object> value) {
+ int entry = descriptor_array_template->Search(
+ *name, descriptor_array_template->number_of_descriptors());
+ // TODO(ishell): deduplicate properties at AST level, this will allow us to
+ // avoid creation of closures that will be overwritten anyway.
+ if (entry == DescriptorArray::kNotFound) {
+ // Entry not found, add new one.
+ Descriptor d;
+ if (value_kind == ClassBoilerplate::kData) {
+ d = Descriptor::DataConstant(name, value, DONT_ENUM);
+ } else {
+ DCHECK(value_kind == ClassBoilerplate::kGetter ||
+ value_kind == ClassBoilerplate::kSetter);
+ Handle<AccessorPair> pair = isolate->factory()->NewAccessorPair();
+ pair->set(value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
+ : ACCESSOR_SETTER,
+ *value);
+ d = Descriptor::AccessorConstant(name, pair, DONT_ENUM);
+ }
+ descriptor_array_template->Append(&d);
+
+ } else {
+ // Entry found, update it.
+ int sorted_index = descriptor_array_template->GetDetails(entry).pointer();
+ if (value_kind == ClassBoilerplate::kData) {
+ Descriptor d = Descriptor::DataConstant(name, value, DONT_ENUM);
+ d.SetSortedKeyIndex(sorted_index);
+ descriptor_array_template->Set(entry, &d);
+ } else {
+ DCHECK(value_kind == ClassBoilerplate::kGetter ||
+ value_kind == ClassBoilerplate::kSetter);
+ Object* raw_accessor = descriptor_array_template->GetValue(entry);
+ AccessorPair* pair;
+ if (raw_accessor->IsAccessorPair()) {
+ pair = AccessorPair::cast(raw_accessor);
+ } else {
+ Handle<AccessorPair> new_pair = isolate->factory()->NewAccessorPair();
+ Descriptor d = Descriptor::AccessorConstant(name, new_pair, DONT_ENUM);
+ d.SetSortedKeyIndex(sorted_index);
+ descriptor_array_template->Set(entry, &d);
+ pair = *new_pair;
+ }
+ pair->set(value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
+ : ACCESSOR_SETTER,
+ *value);
+ }
+ }
+}
+
+Handle<NameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
+ Handle<NameDictionary> dictionary, Handle<Name> name, Handle<Object> value,
+ PropertyDetails details, int* entry_out = nullptr) {
+ return NameDictionary::AddNoUpdateNextEnumerationIndex(
+ dictionary, name, value, details, entry_out);
+}
+
+Handle<NumberDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
+ Handle<NumberDictionary> dictionary, uint32_t element, Handle<Object> value,
+ PropertyDetails details, int* entry_out = nullptr) {
+ // NumberDictionary does not maintain the enumeration order, so it's
+ // a normal Add().
+ return NumberDictionary::Add(dictionary, element, value, details, entry_out);
+}
+
+void DictionaryUpdateMaxNumberKey(Handle<NameDictionary> dictionary,
+ Handle<Name> name) {
+ // No-op for name dictionaries.
+}
+
+void DictionaryUpdateMaxNumberKey(Handle<NumberDictionary> dictionary,
+ uint32_t element) {
+ dictionary->UpdateMaxNumberKey(element, Handle<JSObject>());
+ dictionary->set_requires_slow_elements();
+}
+
+constexpr int ComputeEnumerationIndex(int value_index) {
+ // We "shift" value indices to ensure that the enumeration index for the value
+ // will not overlap with minimum properties set for both class and prototype
+ // objects.
+ return value_index + Max(ClassBoilerplate::kMinimumClassPropertiesCount,
+ ClassBoilerplate::kMinimumPrototypePropertiesCount);
+}
+
+inline int GetExistingValueIndex(Object* value) {
+ return value->IsSmi() ? Smi::ToInt(value) : -1;
+}
+
+template <typename Dictionary, typename Key>
+void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
+ Key key, int key_index,
+ ClassBoilerplate::ValueKind value_kind,
+ Object* value) {
+ int entry = dictionary->FindEntry(isolate, key);
+
+ if (entry == kNotFound) {
+ // Entry not found, add new one.
+ const bool is_elements_dictionary =
+ std::is_same<Dictionary, NumberDictionary>::value;
+ STATIC_ASSERT(is_elements_dictionary !=
+ (std::is_same<Dictionary, NameDictionary>::value));
+ int enum_order =
+ is_elements_dictionary ? 0 : ComputeEnumerationIndex(key_index);
+ Handle<Object> value_handle;
+ PropertyDetails details(
+ value_kind != ClassBoilerplate::kData ? kAccessor : kData, DONT_ENUM,
+ PropertyCellType::kNoCell, enum_order);
+
+ if (value_kind == ClassBoilerplate::kData) {
+ value_handle = handle(value, isolate);
+ } else {
+ AccessorComponent component = value_kind == ClassBoilerplate::kGetter
+ ? ACCESSOR_GETTER
+ : ACCESSOR_SETTER;
+ Handle<AccessorPair> pair(isolate->factory()->NewAccessorPair());
+ pair->set(component, value);
+ value_handle = pair;
+ }
+
+ // Add value to the dictionary without updating next enumeration index.
+ Handle<Dictionary> dict = DictionaryAddNoUpdateNextEnumerationIndex(
+ dictionary, key, value_handle, details, &entry);
+ // It is crucial to avoid dictionary reallocations because it may remove
+ // potential gaps in enumeration indices values that are necessary for
+ // inserting computed properties into right places in the enumeration order.
+ CHECK_EQ(*dict, *dictionary);
+
+ DictionaryUpdateMaxNumberKey(dictionary, key);
+
+ } else {
+ // Entry found, update it.
+ int enum_order = dictionary->DetailsAt(entry).dictionary_index();
+ Object* existing_value = dictionary->ValueAt(entry);
+ if (value_kind == ClassBoilerplate::kData) {
+ // Computed value is a normal method.
+ if (existing_value->IsAccessorPair()) {
+ AccessorPair* current_pair = AccessorPair::cast(existing_value);
+
+ int existing_getter_index =
+ GetExistingValueIndex(current_pair->getter());
+ int existing_setter_index =
+ GetExistingValueIndex(current_pair->setter());
+ if (existing_getter_index < key_index &&
+ existing_setter_index < key_index) {
+ // Both getter and setter were defined before the computed method,
+ // so overwrite both.
+ PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
+ enum_order);
+ dictionary->DetailsAtPut(entry, details);
+ dictionary->ValueAtPut(entry, value);
+
+ } else {
+ if (existing_getter_index < key_index) {
+ DCHECK_LT(existing_setter_index, key_index);
+ // Getter was defined before the computed method and then it was
+ // overwritten by the current computed method which in turn was
+ // later overwritten by the setter method. So we clear the getter.
+ current_pair->set_getter(*isolate->factory()->null_value());
+
+ } else if (existing_setter_index < key_index) {
+ DCHECK_LT(existing_getter_index, key_index);
+ // Setter was defined before the computed method and then it was
+ // overwritten by the current computed method which in turn was
+ // later overwritten by the getter method. So we clear the setter.
+ current_pair->set_setter(*isolate->factory()->null_value());
+ }
+ }
+ } else {
+ // Overwrite existing value if it was defined before the computed one.
+ int existing_value_index = Smi::ToInt(existing_value);
+ if (existing_value_index < key_index) {
+ PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
+ enum_order);
+ dictionary->DetailsAtPut(entry, details);
+ dictionary->ValueAtPut(entry, value);
+ }
+ }
+ } else {
+ AccessorComponent component = value_kind == ClassBoilerplate::kGetter
+ ? ACCESSOR_GETTER
+ : ACCESSOR_SETTER;
+ if (existing_value->IsAccessorPair()) {
+ AccessorPair* current_pair = AccessorPair::cast(existing_value);
+
+ int existing_component_index =
+ GetExistingValueIndex(current_pair->get(component));
+ if (existing_component_index < key_index) {
+ current_pair->set(component, value);
+ }
+
+ } else {
+ Handle<AccessorPair> pair(isolate->factory()->NewAccessorPair());
+ pair->set(component, value);
+ PropertyDetails details(kAccessor, DONT_ENUM,
+ PropertyCellType::kNoCell);
+ dictionary->DetailsAtPut(entry, details);
+ dictionary->ValueAtPut(entry, *pair);
+ }
+ }
+ }
+}
+
+} // namespace
+
+// Helper class that eases building of a properties, elements and computed
+// properties templates.
+class ObjectDescriptor {
+ public:
+ void IncComputedCount() { ++computed_count_; }
+ void IncPropertiesCount() { ++property_count_; }
+ void IncElementsCount() { ++element_count_; }
+
+ bool HasDictionaryProperties() const {
+ return computed_count_ > 0 || property_count_ > kMaxNumberOfDescriptors;
+ }
+
+ Handle<Object> properties_template() const {
+ return HasDictionaryProperties()
+ ? Handle<Object>::cast(properties_dictionary_template_)
+ : Handle<Object>::cast(descriptor_array_template_);
+ }
+
+ Handle<NumberDictionary> elements_template() const {
+ return elements_dictionary_template_;
+ }
+
+ Handle<FixedArray> computed_properties() const {
+ return computed_properties_;
+ }
+
+ void CreateTemplates(Isolate* isolate, int slack) {
+ Factory* factory = isolate->factory();
+ descriptor_array_template_ = factory->empty_descriptor_array();
+ properties_dictionary_template_ = factory->empty_property_dictionary();
+ if (property_count_ || HasDictionaryProperties() || slack) {
+ if (HasDictionaryProperties()) {
+ properties_dictionary_template_ = NameDictionary::New(
+ isolate, property_count_ + computed_count_ + slack);
+ } else {
+ descriptor_array_template_ =
+ DescriptorArray::Allocate(isolate, 0, property_count_ + slack);
+ }
+ }
+ elements_dictionary_template_ =
+ element_count_ || computed_count_
+ ? NumberDictionary::New(isolate, element_count_ + computed_count_)
+ : factory->empty_slow_element_dictionary();
+
+ computed_properties_ =
+ computed_count_
+ ? factory->NewFixedArray(computed_count_ *
+ ClassBoilerplate::kFullComputedEntrySize)
+ : factory->empty_fixed_array();
+
+ temp_handle_ = handle(Smi::kZero, isolate);
+ }
+
+ void AddConstant(Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attribs) {
+ bool is_accessor = value->IsAccessorInfo();
+ DCHECK(!value->IsAccessorPair());
+ if (HasDictionaryProperties()) {
+ PropertyKind kind = is_accessor ? i::kAccessor : i::kData;
+ PropertyDetails details(kind, attribs, PropertyCellType::kNoCell,
+ next_enumeration_index_++);
+ properties_dictionary_template_ =
+ DictionaryAddNoUpdateNextEnumerationIndex(
+ properties_dictionary_template_, name, value, details);
+ } else {
+ Descriptor d = is_accessor
+ ? Descriptor::AccessorConstant(name, value, attribs)
+ : Descriptor::DataConstant(name, value, attribs);
+ descriptor_array_template_->Append(&d);
+ }
+ }
+
+ void AddNamedProperty(Isolate* isolate, Handle<Name> name,
+ ClassBoilerplate::ValueKind value_kind,
+ int value_index) {
+ Smi* value = Smi::FromInt(value_index);
+ if (HasDictionaryProperties()) {
+ UpdateNextEnumerationIndex(value_index);
+ AddToDictionaryTemplate(isolate, properties_dictionary_template_, name,
+ value_index, value_kind, value);
+ } else {
+ *temp_handle_.location() = value;
+ AddToDescriptorArrayTemplate(isolate, descriptor_array_template_, name,
+ value_kind, temp_handle_);
+ }
+ }
+
+ void AddIndexedProperty(Isolate* isolate, uint32_t element,
+ ClassBoilerplate::ValueKind value_kind,
+ int value_index) {
+ Smi* value = Smi::FromInt(value_index);
+ AddToDictionaryTemplate(isolate, elements_dictionary_template_, element,
+ value_index, value_kind, value);
+ }
+
+ void AddComputed(ClassBoilerplate::ValueKind value_kind, int key_index) {
+ int value_index = key_index + 1;
+ UpdateNextEnumerationIndex(value_index);
+
+ int flags = EncodeComputedEntry(value_kind, key_index);
+ computed_properties_->set(current_computed_index_++, Smi::FromInt(flags));
+ }
+
+ void UpdateNextEnumerationIndex(int value_index) {
+ int next_index = ComputeEnumerationIndex(value_index);
+ DCHECK_LT(next_enumeration_index_, next_index);
+ next_enumeration_index_ = next_index;
+ }
+
+ void Finalize(Isolate* isolate) {
+ if (HasDictionaryProperties()) {
+ properties_dictionary_template_->SetNextEnumerationIndex(
+ next_enumeration_index_);
+
+ isolate->heap()->RightTrimFixedArray(
+ *computed_properties_,
+ computed_properties_->length() - current_computed_index_);
+ } else {
+ DCHECK(descriptor_array_template_->IsSortedNoDuplicates());
+ }
+ }
+
+ private:
+ int property_count_ = 0;
+ int next_enumeration_index_ = PropertyDetails::kInitialIndex;
+ int element_count_ = 0;
+ int computed_count_ = 0;
+ int current_computed_index_ = 0;
+
+ Handle<DescriptorArray> descriptor_array_template_;
+ Handle<NameDictionary> properties_dictionary_template_;
+ Handle<NumberDictionary> elements_dictionary_template_;
+ Handle<FixedArray> computed_properties_;
+ // This temporary handle is used for storing to descriptor array.
+ Handle<Object> temp_handle_;
+};
+
+void ClassBoilerplate::AddToPropertiesTemplate(
+ Isolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
+ int key_index, ClassBoilerplate::ValueKind value_kind, Object* value) {
+ AddToDictionaryTemplate(isolate, dictionary, name, key_index, value_kind,
+ value);
+}
+
+void ClassBoilerplate::AddToElementsTemplate(
+ Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
+ int key_index, ClassBoilerplate::ValueKind value_kind, Object* value) {
+ AddToDictionaryTemplate(isolate, dictionary, key, key_index, value_kind,
+ value);
+}
+
+Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
+ Isolate* isolate, ClassLiteral* expr) {
+ Factory* factory = isolate->factory();
+ ObjectDescriptor static_desc;
+ ObjectDescriptor instance_desc;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ClassLiteral::Property* property = expr->properties()->at(i);
+ ObjectDescriptor& desc =
+ property->is_static() ? static_desc : instance_desc;
+ if (property->is_computed_name()) {
+ desc.IncComputedCount();
+ } else {
+ if (property->key()->AsLiteral()->IsPropertyName()) {
+ desc.IncPropertiesCount();
+ } else {
+ desc.IncElementsCount();
+ }
+ }
+ }
+
+ //
+ // Initialize class object template.
+ //
+ static_desc.CreateTemplates(isolate, kMinimumClassPropertiesCount);
+ Handle<DescriptorArray> class_function_descriptors(
+ isolate->native_context()->class_function_map()->instance_descriptors(),
+ isolate);
+ STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
+ {
+ // Add length_accessor.
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ static_desc.AddConstant(factory->length_string(),
+ factory->function_length_accessor(), attribs);
+ }
+ {
+ // Add prototype_accessor.
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ static_desc.AddConstant(factory->prototype_string(),
+ factory->function_prototype_accessor(), attribs);
+ }
+ if (FunctionLiteral::NeedsHomeObject(expr->constructor())) {
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ Handle<Object> value(
+ Smi::FromInt(ClassBoilerplate::kPrototypeArgumentIndex), isolate);
+ static_desc.AddConstant(factory->home_object_symbol(), value, attribs);
+ }
+ {
+ Handle<Smi> start_position(Smi::FromInt(expr->start_position()), isolate);
+ Handle<Smi> end_position(Smi::FromInt(expr->end_position()), isolate);
+ Handle<Tuple2> class_positions =
+ factory->NewTuple2(start_position, end_position, NOT_TENURED);
+ static_desc.AddConstant(factory->class_positions_symbol(), class_positions,
+ DONT_ENUM);
+ }
+
+ //
+ // Initialize prototype object template.
+ //
+ instance_desc.CreateTemplates(isolate, kMinimumPrototypePropertiesCount);
+ {
+ Handle<Object> value(
+ Smi::FromInt(ClassBoilerplate::kConstructorArgumentIndex), isolate);
+ instance_desc.AddConstant(factory->constructor_string(), value, DONT_ENUM);
+ }
+
+ //
+ // Fill in class boilerplate.
+ //
+ int dynamic_argument_index = ClassBoilerplate::kFirstDynamicArgumentIndex;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ClassLiteral::Property* property = expr->properties()->at(i);
+
+ ClassBoilerplate::ValueKind value_kind;
+ switch (property->kind()) {
+ case ClassLiteral::Property::METHOD:
+ value_kind = ClassBoilerplate::kData;
+ break;
+ case ClassLiteral::Property::GETTER:
+ value_kind = ClassBoilerplate::kGetter;
+ break;
+ case ClassLiteral::Property::SETTER:
+ value_kind = ClassBoilerplate::kSetter;
+ break;
+ case ClassLiteral::Property::FIELD:
+ if (property->is_computed_name()) {
+ ++dynamic_argument_index;
+ }
+ continue;
+ }
+
+ ObjectDescriptor& desc =
+ property->is_static() ? static_desc : instance_desc;
+ if (property->is_computed_name()) {
+ int computed_name_index = dynamic_argument_index;
+ dynamic_argument_index += 2; // Computed name and value indices.
+ desc.AddComputed(value_kind, computed_name_index);
+ continue;
+ }
+ int value_index = dynamic_argument_index++;
+
+ Literal* key_literal = property->key()->AsLiteral();
+ uint32_t index;
+ if (key_literal->AsArrayIndex(&index)) {
+ desc.AddIndexedProperty(isolate, index, value_kind, value_index);
+
+ } else {
+ Handle<String> name = key_literal->AsRawPropertyName()->string();
+ DCHECK(name->IsInternalizedString());
+ desc.AddNamedProperty(isolate, name, value_kind, value_index);
+ }
+ }
+
+ // Add name accessor to the class object if necessary.
+ bool install_class_name_accessor = false;
+ if (!expr->has_name_static_property() &&
+ expr->constructor()->has_shared_name()) {
+ if (static_desc.HasDictionaryProperties()) {
+ // Install class name accessor if necessary during class literal
+ // instantiation.
+ install_class_name_accessor = true;
+ } else {
+ // Set class name accessor if the "name" method was not added yet.
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ static_desc.AddConstant(factory->name_string(),
+ factory->function_name_accessor(), attribs);
+ }
+ }
+
+ static_desc.Finalize(isolate);
+ instance_desc.Finalize(isolate);
+
+ Handle<ClassBoilerplate> class_boilerplate =
+ Handle<ClassBoilerplate>::cast(factory->NewFixedArray(kBoileplateLength));
+
+ class_boilerplate->set_flags(0);
+ class_boilerplate->set_install_class_name_accessor(
+ install_class_name_accessor);
+ class_boilerplate->set_arguments_count(dynamic_argument_index);
+
+ class_boilerplate->set_static_properties_template(
+ *static_desc.properties_template());
+ class_boilerplate->set_static_elements_template(
+ *static_desc.elements_template());
+ class_boilerplate->set_static_computed_properties(
+ *static_desc.computed_properties());
+
+ class_boilerplate->set_instance_properties_template(
+ *instance_desc.properties_template());
+ class_boilerplate->set_instance_elements_template(
+ *instance_desc.elements_template());
+ class_boilerplate->set_instance_computed_properties(
+ *instance_desc.computed_properties());
+
+ return class_boilerplate;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 74a5afde42..6fe34ffa8a 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+class ClassLiteral;
+
// BoilerplateDescription is a list of properties consisting of name value
// pairs. In addition to the properties, it provides the projected number
// of properties in the backing store. This number includes properties with
@@ -56,6 +58,79 @@ class ConstantElementsPair : public Tuple2 {
DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantElementsPair);
};
+class ClassBoilerplate : public FixedArray {
+ public:
+ enum ValueKind { kData, kGetter, kSetter };
+
+ struct Flags {
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(InstallClassNameAccessorBit, bool, 1, _) \
+ V(ArgumentsCountBits, int, 30, _)
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+ };
+
+ struct ComputedEntryFlags {
+#define COMPUTED_ENTRY_BIT_FIELDS(V, _) \
+ V(ValueKindBits, ValueKind, 2, _) \
+ V(KeyIndexBits, unsigned, 29, _)
+ DEFINE_BIT_FIELDS(COMPUTED_ENTRY_BIT_FIELDS)
+#undef COMPUTED_ENTRY_BIT_FIELDS
+ };
+
+ enum DefineClassArgumentsIndices {
+ kConstructorArgumentIndex = 1,
+ kPrototypeArgumentIndex = 2,
+ // The index of a first dynamic argument passed to Runtime::kDefineClass
+ // function. The dynamic arguments are consist of method closures and
+ // computed property names.
+ kFirstDynamicArgumentIndex = 3,
+ };
+
+ static const int kMinimumClassPropertiesCount = 6;
+ static const int kMinimumPrototypePropertiesCount = 1;
+
+ DECL_CAST(ClassBoilerplate)
+
+ DECL_BOOLEAN_ACCESSORS(install_class_name_accessor)
+ DECL_INT_ACCESSORS(arguments_count)
+ DECL_ACCESSORS(static_properties_template, Object)
+ DECL_ACCESSORS(static_elements_template, Object)
+ DECL_ACCESSORS(static_computed_properties, FixedArray)
+ DECL_ACCESSORS(instance_properties_template, Object)
+ DECL_ACCESSORS(instance_elements_template, Object)
+ DECL_ACCESSORS(instance_computed_properties, FixedArray)
+
+ static void AddToPropertiesTemplate(Isolate* isolate,
+ Handle<NameDictionary> dictionary,
+ Handle<Name> name, int key_index,
+ ValueKind value_kind, Object* value);
+
+ static void AddToElementsTemplate(Isolate* isolate,
+ Handle<NumberDictionary> dictionary,
+ uint32_t key, int key_index,
+ ValueKind value_kind, Object* value);
+
+ static Handle<ClassBoilerplate> BuildClassBoilerplate(Isolate* isolate,
+ ClassLiteral* expr);
+
+ enum {
+ kFlagsIndex,
+ kClassPropertiesTemplateIndex,
+ kClassElementsTemplateIndex,
+ kClassComputedPropertiesIndex,
+ kPrototypePropertiesTemplateIndex,
+ kPrototypeElementsTemplateIndex,
+ kPrototypeComputedPropertiesIndex,
+ kBoileplateLength // last element
+ };
+
+ static const int kFullComputedEntrySize = 2;
+
+ private:
+ DECL_INT_ACCESSORS(flags)
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 5806a24ae0..d9a0a73158 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_MAP_H_
#include "src/objects.h"
+#include "src/objects/code.h"
#include "src/globals.h"
@@ -22,6 +23,7 @@ namespace internal {
V(BytecodeArray) \
V(Cell) \
V(Code) \
+ V(CodeDataContainer) \
V(ConsString) \
V(DataObject) \
V(FeedbackVector) \
@@ -84,23 +86,28 @@ typedef std::vector<Handle<Map>> MapHandles;
// +---------------+---------------------------------------------+
// | TaggedPointer | map - Always a pointer to the MetaMap root |
// +---------------+---------------------------------------------+
-// | Int | instance_sizes (the first int field) |
+// | Int | The first int field |
// `---+----------+---------------------------------------------+
// | Byte | [instance_size] |
// +----------+---------------------------------------------+
// | Byte | If Map for a primitive type: |
// | | native context index for constructor fn |
// | | If Map for an Object type: |
-// | | number of in-object properties |
+// | | inobject properties start offset in words |
// +----------+---------------------------------------------+
-// | Byte | unused |
+// | Byte | [used_or_unused_instance_size_in_words] |
+// | | For JSObject in fast mode this byte encodes |
+// | | the size of the object that includes only |
+// | | the used property fields or the slack size |
+// | | in properties backing store. |
// +----------+---------------------------------------------+
// | Byte | [visitor_id] |
// +----+----------+---------------------------------------------+
-// | Int | instance_attributes (second int field) |
+// | Int | The second int field |
// `---+----------+---------------------------------------------+
-// | Word16 | [instance_type] in low byte |
-// | | [bit_field] in high byte |
+// | Short | [instance_type] |
+// +----------+---------------------------------------------+
+// | Byte | [bit_field] |
// | | - has_non_instance_prototype (bit 0) |
// | | - is_callable (bit 1) |
// | | - has_named_interceptor (bit 2) |
@@ -114,11 +121,8 @@ typedef std::vector<Handle<Map>> MapHandles;
// | | - is_extensible (bit 0) |
// | | - is_prototype_map (bit 2) |
// | | - elements_kind (bits 3..7) |
-// +----------+---------------------------------------------+
-// | Byte | [unused_property_fields] number of unused |
-// | | property fields in JSObject (for fast-mode) |
// +----+----------+---------------------------------------------+
-// | Word | [bit_field3] |
+// | Int | [bit_field3] |
// | | - number_of_own_descriptors (bit 0..19) |
// | | - is_dictionary_map (bit 20) |
// | | - owns_descriptors (bit 21) |
@@ -131,9 +135,10 @@ typedef std::vector<Handle<Map>> MapHandles;
// | | - may_have_interesting_symbols (bit 28) |
// | | - construction_counter (bit 29..31) |
// | | |
-// | | On systems with 64bit pointer types, there |
+// +*************************************************************+
+// | Int | On systems with 64bit pointer types, there |
// | | is an unused 32bits after bit_field3 |
-// +---------------+---------------------------------------------+
+// +*************************************************************+
// | TaggedPointer | [prototype] |
// +---------------+---------------------------------------------+
// | TaggedPointer | [constructor_or_backpointer] |
@@ -146,7 +151,9 @@ typedef std::vector<Handle<Map>> MapHandles;
// | TaggedPointer | [instance_descriptors] |
// +*************************************************************+
// ! TaggedPointer ! [layout_descriptors] !
-// ! ! Field is only present on 64 bit arch !
+// ! ! Field is only present if compile-time flag !
+// ! ! FLAG_unbox_double_fields is enabled !
+// ! ! (basically on 64 bit architectures) !
// +*************************************************************+
// | TaggedPointer | [dependent_code] |
// +---------------+---------------------------------------------+
@@ -158,20 +165,21 @@ class Map : public HeapObject {
// Instance size.
// Size in bytes or kVariableSizeSentinel if instances do not have
// a fixed size.
- inline int instance_size() const;
- inline void set_instance_size(int value);
+ DECL_INT_ACCESSORS(instance_size)
+ // Size in words or kVariableSizeSentinel if instances do not have
+ // a fixed size.
+ DECL_INT_ACCESSORS(instance_size_in_words)
- // Only to clear an unused byte, remove once byte is used.
- inline void clear_unused();
+ // [inobject_properties_start_or_constructor_function_index]:
+ // Provides access to the inobject properties start offset in words in case of
+ // JSObject maps, or the constructor function index in case of primitive maps.
+ DECL_INT_ACCESSORS(inobject_properties_start_or_constructor_function_index)
- // [inobject_properties_or_constructor_function_index]: Provides access
- // to the inobject properties in case of JSObject maps, or the constructor
- // function index in case of primitive maps.
- inline int inobject_properties_or_constructor_function_index() const;
- inline void set_inobject_properties_or_constructor_function_index(int value);
+ // Get/set the in-object property area start offset in words in the object.
+ inline int GetInObjectPropertiesStartInWords() const;
+ inline void SetInObjectPropertiesStartInWords(int value);
// Count of properties allocated in the object (JSObject only).
inline int GetInObjectProperties() const;
- inline void SetInObjectProperties(int value);
// Index of the constructor function in the native context (primitives only),
// or the special sentinel value to indicate that there is no object wrapper
// for the primitive (i.e. in case of null or undefined).
@@ -189,10 +197,22 @@ class Map : public HeapObject {
inline InstanceType instance_type() const;
inline void set_instance_type(InstanceType value);
- // Tells how many unused property fields are available in the
- // instance (only used for JSObject in fast mode).
- inline int unused_property_fields() const;
- inline void set_unused_property_fields(int value);
+ // Returns the size of the used in-object area including object header
+ // (only used for JSObject in fast mode, for the other kinds of objects it
+ // is equal to the instance size).
+ inline int UsedInstanceSize() const;
+
+ // Tells how many unused property fields (in-object or out-of object) are
+ // available in the instance (only used for JSObject in fast mode).
+ inline int UnusedPropertyFields() const;
+ // Updates the counters tracking unused fields in the object.
+ inline void SetInObjectUnusedPropertyFields(int unused_property_fields);
+ // Updates the counters tracking unused fields in the property array.
+ inline void SetOutOfObjectUnusedPropertyFields(int unused_property_fields);
+ inline void CopyUnusedPropertyFields(Map* map);
+ inline void AccountAddedPropertyField();
+ inline void AccountAddedOutOfObjectPropertyField(
+ int unused_in_property_array);
// Bit field.
inline byte bit_field() const;
@@ -222,6 +242,8 @@ class Map : public HeapObject {
class NewTargetIsBase : public BitField<bool, 27, 1> {};
class MayHaveInterestingSymbols : public BitField<bool, 28, 1> {};
+ STATIC_ASSERT(NumberOfOwnDescriptorsBits::kMax >= kMaxNumberOfDescriptors);
+
// Keep this bit field at the very end for better code in
// Builtins::kJSConstructStubGeneric stub.
// This counter is used for in-object slack tracking.
@@ -293,19 +315,18 @@ class Map : public HeapObject {
// Tells whether the instance has a [[Construct]] internal method.
// This property is implemented according to ES6, section 7.2.4.
- inline void set_is_constructor(bool value);
- inline bool is_constructor() const;
+ DECL_BOOLEAN_ACCESSORS(is_constructor)
// Tells whether the instance with this map may have properties for
// interesting symbols on it.
// An "interesting symbol" is one for which Name::IsInterestingSymbol()
// returns true, i.e. a well-known symbol like @@toStringTag.
- inline void set_may_have_interesting_symbols(bool value);
- inline bool may_have_interesting_symbols() const;
+ DECL_BOOLEAN_ACCESSORS(may_have_interesting_symbols)
+
+ DECL_BOOLEAN_ACCESSORS(has_prototype_slot)
// Tells whether the instance with this map has a hidden prototype.
- inline void set_has_hidden_prototype(bool value);
- inline bool has_hidden_prototype() const;
+ DECL_BOOLEAN_ACCESSORS(has_hidden_prototype)
// Records and queries whether the instance has a named interceptor.
inline void set_has_named_interceptor();
@@ -329,12 +350,9 @@ class Map : public HeapObject {
inline void set_is_callable();
inline bool is_callable() const;
- inline void set_new_target_is_base(bool value);
- inline bool new_target_is_base() const;
- inline void set_is_extensible(bool value);
- inline bool is_extensible() const;
- inline void set_is_prototype_map(bool value);
- inline bool is_prototype_map() const;
+ DECL_BOOLEAN_ACCESSORS(new_target_is_base)
+ DECL_BOOLEAN_ACCESSORS(is_extensible)
+ DECL_BOOLEAN_ACCESSORS(is_prototype_map)
inline bool is_abandoned_prototype_map() const;
inline void set_elements_kind(ElementsKind elements_kind);
@@ -482,7 +500,8 @@ class Map : public HeapObject {
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
// TODO(jkummerow): make set_prototype private.
- static void SetPrototype(Handle<Map> map, Handle<Object> prototype);
+ static void SetPrototype(Handle<Map> map, Handle<Object> prototype,
+ bool enable_prototype_setup_mode = true);
// [constructor]: points back to the function or FunctionTemplateInfo
// responsible for this map.
@@ -543,16 +562,14 @@ class Map : public HeapObject {
inline int EnumLength() const;
inline void SetEnumLength(int length);
- inline bool owns_descriptors() const;
- inline void set_owns_descriptors(bool owns_descriptors);
+ DECL_BOOLEAN_ACCESSORS(owns_descriptors)
inline void mark_unstable();
inline bool is_stable() const;
inline void set_migration_target(bool value);
inline bool is_migration_target() const;
inline void set_immutable_proto(bool value);
inline bool is_immutable_proto() const;
- inline void set_construction_counter(int value);
- inline int construction_counter() const;
+ DECL_INT_ACCESSORS(construction_counter)
inline void deprecate();
inline bool is_deprecated() const;
inline bool CanBeDeprecated() const;
@@ -649,17 +666,18 @@ class Map : public HeapObject {
DECL_CAST(Map)
- // Extend the descriptor array of the map with the list of descriptors.
- // In case of duplicates, the latest descriptor is used.
- static void AppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors);
-
static inline int SlackForArraySize(int old_size, int size_limit);
static void EnsureDescriptorSlack(Handle<Map> map, int slack);
+ // Returns the map to be used for instances when the given {prototype} is
+ // passed to an Object.create call. Might transition the given {prototype}.
static Handle<Map> GetObjectCreateMap(Handle<HeapObject> prototype);
+ // Similar to {GetObjectCreateMap} but does not transition {prototype} and
+ // fails gracefully by returning an empty handle instead.
+ static MaybeHandle<Map> TryGetObjectCreateMap(Handle<HeapObject> prototype);
+
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
@@ -702,8 +720,7 @@ class Map : public HeapObject {
void DictionaryMapVerify();
#endif
- inline int visitor_id() const;
- inline void set_visitor_id(int visitor_id);
+ DECL_PRIMITIVE_ACCESSORS(visitor_id, VisitorId)
static Handle<Map> TransitionToPrototype(Handle<Map> map,
Handle<Object> prototype);
@@ -713,65 +730,34 @@ class Map : public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
- static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
- static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
- static const int kBitField3Offset = kInstanceAttributesOffset + kIntSize;
- static const int kPrototypeOffset = kBitField3Offset + kPointerSize;
- static const int kConstructorOrBackPointerOffset =
- kPrototypeOffset + kPointerSize;
- // When there is only one transition, it is stored directly in this field;
- // otherwise a transition array is used.
- // For prototype maps, this slot is used to store this map's PrototypeInfo
- // struct.
- static const int kTransitionsOrPrototypeInfoOffset =
- kConstructorOrBackPointerOffset + kPointerSize;
- static const int kDescriptorsOffset =
- kTransitionsOrPrototypeInfoOffset + kPointerSize;
-#if V8_DOUBLE_FIELDS_UNBOXING
- static const int kLayoutDescriptorOffset = kDescriptorsOffset + kPointerSize;
- static const int kDependentCodeOffset =
- kLayoutDescriptorOffset + kPointerSize;
-#else
- static const int kLayoutDescriptorOffset = 1; // Must not be ever accessed.
- static const int kDependentCodeOffset = kDescriptorsOffset + kPointerSize;
-#endif
- static const int kWeakCellCacheOffset = kDependentCodeOffset + kPointerSize;
- static const int kSize = kWeakCellCacheOffset + kPointerSize;
-
- // Layout of pointer fields. Heap iteration code relies on them
- // being continuously allocated.
- static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
- static const int kPointerFieldsEndOffset = kSize;
-
- // Byte offsets within kInstanceSizesOffset.
- static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
- static const int kInObjectPropertiesOrConstructorFunctionIndexByte = 1;
- static const int kInObjectPropertiesOrConstructorFunctionIndexOffset =
- kInstanceSizesOffset + kInObjectPropertiesOrConstructorFunctionIndexByte;
- // Note there is one byte available for use here.
- static const int kUnusedByte = 2;
- static const int kUnusedOffset = kInstanceSizesOffset + kUnusedByte;
- static const int kVisitorIdByte = 3;
- static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte;
-
-// Byte offsets within kInstanceAttributesOffset attributes.
-#if V8_TARGET_LITTLE_ENDIAN
- // Order instance type and bit field together such that they can be loaded
- // together as a 16-bit word with instance type in the lower 8 bits regardless
- // of endianess. Also provide endian-independent offset to that 16-bit word.
- static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
- static const int kBitFieldOffset = kInstanceAttributesOffset + 1;
-#else
- static const int kBitFieldOffset = kInstanceAttributesOffset + 0;
- static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1;
-#endif
- static const int kInstanceTypeAndBitFieldOffset =
- kInstanceAttributesOffset + 0;
- static const int kBitField2Offset = kInstanceAttributesOffset + 2;
- static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3;
-
- STATIC_ASSERT(kInstanceTypeAndBitFieldOffset ==
- Internals::kMapInstanceTypeAndBitFieldOffset);
+#define MAP_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kInstanceSizeInWordsOffset, kUInt8Size) \
+ V(kInObjectPropertiesStartOrConstructorFunctionIndexOffset, kUInt8Size) \
+ V(kUsedOrUnusedInstanceSizeInWordsOffset, kUInt8Size) \
+ V(kVisitorIdOffset, kUInt8Size) \
+ V(kInstanceTypeOffset, kUInt16Size) \
+ V(kBitFieldOffset, kUInt8Size) \
+ V(kBitField2Offset, kUInt8Size) \
+ V(kBitField3Offset, kUInt32Size) \
+ V(k64BitArchPaddingOffset, kPointerSize == kUInt32Size ? 0 : kUInt32Size) \
+ /* Pointer fields. */ \
+ V(kPointerFieldsBeginOffset, 0) \
+ V(kPrototypeOffset, kPointerSize) \
+ V(kConstructorOrBackPointerOffset, kPointerSize) \
+ V(kTransitionsOrPrototypeInfoOffset, kPointerSize) \
+ V(kDescriptorsOffset, kPointerSize) \
+ V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kPointerSize : 0) \
+ V(kDependentCodeOffset, kPointerSize) \
+ V(kWeakCellCacheOffset, kPointerSize) \
+ V(kPointerFieldsEndOffset, 0) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
+#undef MAP_FIELDS
+
+ STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
// Bit positions for bit field.
static const int kHasNonInstancePrototype = 0;
@@ -781,7 +767,7 @@ class Map : public HeapObject {
static const int kIsUndetectable = 4;
static const int kIsAccessCheckNeeded = 5;
static const int kIsConstructor = 6;
- // Bit 7 is free.
+ static const int kHasPrototypeSlot = 7;
// Bit positions for bit field 2
static const int kIsExtensible = 0;
@@ -789,24 +775,6 @@ class Map : public HeapObject {
class IsPrototypeMapBits : public BitField<bool, 2, 1> {};
class ElementsKindBits : public BitField<ElementsKind, 3, 5> {};
- // Derived values from bit field 2
- static const int8_t kMaximumBitField2FastElementValue =
- static_cast<int8_t>((PACKED_ELEMENTS + 1)
- << Map::ElementsKindBits::kShift) -
- 1;
- static const int8_t kMaximumBitField2FastSmiElementValue =
- static_cast<int8_t>((PACKED_SMI_ELEMENTS + 1)
- << Map::ElementsKindBits::kShift) -
- 1;
- static const int8_t kMaximumBitField2FastHoleyElementValue =
- static_cast<int8_t>((HOLEY_ELEMENTS + 1)
- << Map::ElementsKindBits::kShift) -
- 1;
- static const int8_t kMaximumBitField2FastHoleySmiElementValue =
- static_cast<int8_t>((HOLEY_SMI_ELEMENTS + 1)
- << Map::ElementsKindBits::kShift) -
- 1;
-
typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
kPointerFieldsEndOffset, kSize>
BodyDescriptor;
@@ -821,10 +789,7 @@ class Map : public HeapObject {
// Returns true if given field is unboxed double.
inline bool IsUnboxedDoubleField(FieldIndex index) const;
-#if V8_TRACE_MAPS
- static void TraceTransition(const char* what, Map* from, Map* to, Name* name);
- static void TraceAllTransitions(Map* map);
-#endif
+ void PrintMapDetails(std::ostream& os, JSObject* holder = nullptr);
static inline Handle<Map> AddMissingTransitionsForTesting(
Handle<Map> split_map, Handle<DescriptorArray> descriptors,
@@ -848,6 +813,19 @@ class Map : public HeapObject {
inline bool CanHaveFastTransitionableElementsKind() const;
private:
+ // This byte encodes either the instance size without the in-object slack or
+ // the slack size in properties backing store.
+ // Let H be JSObject::kHeaderSize / kPointerSize.
+ // If value >= H then:
+ // - all field properties are stored in the object.
+ // - there is no property array.
+ // - value * kPointerSize is the actual object size without the slack.
+ // Otherwise:
+ // - there is no slack in the object.
+ // - the property array has value slack slots.
+ // Note that this encoding requires that H = JSObject::kFieldsAdded.
+ DECL_INT_ACCESSORS(used_or_unused_instance_size_in_words)
+
// Returns the map that this (root) map transitions to if its elements_kind
// is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
Map* LookupElementsTransitionMap(ElementsKind elements_kind);
@@ -864,7 +842,8 @@ class Map : public HeapObject {
bool EquivalentToForTransition(const Map* other) const;
bool EquivalentToForElementsKindTransition(const Map* other) const;
- static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
+ static Handle<Map> RawCopy(Handle<Map> map, int instance_size,
+ int inobject_properties);
static Handle<Map> ShareDescriptor(Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor);
@@ -941,7 +920,8 @@ class NormalizedMapCache : public FixedArray {
MUST_USE_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
PropertyNormalizationMode mode);
- void Set(Handle<Map> fast_map, Handle<Map> normalized_map);
+ void Set(Handle<Map> fast_map, Handle<Map> normalized_map,
+ Handle<WeakCell> normalized_map_weak_cell);
void Clear();
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index 7e5310ff3b..7b166598a1 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -22,6 +22,7 @@ ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
ACCESSORS(Module, requested_modules, FixedArray, kRequestedModulesOffset)
ACCESSORS(Module, script, Script, kScriptOffset)
ACCESSORS(Module, exception, Object, kExceptionOffset)
+ACCESSORS(Module, import_meta, Object, kImportMetaOffset)
SMI_ACCESSORS(Module, status, kStatusOffset)
SMI_ACCESSORS(Module, dfs_index, kDfsIndexOffset)
SMI_ACCESSORS(Module, dfs_ancestor_index, kDfsAncestorIndexOffset)
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 7d1bf42cfd..4040d05bca 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -848,10 +848,10 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module) {
"JSModuleNamespace");
for (const auto& name : names) {
JSObject::SetNormalizedProperty(
- ns, name, Accessors::ModuleNamespaceEntryInfo(isolate, name, attr),
+ ns, name, Accessors::MakeModuleNamespaceEntryInfo(isolate, name),
PropertyDetails(kAccessor, attr, PropertyCellType::kMutable));
}
- JSObject::PreventExtensions(ns, THROW_ON_ERROR).ToChecked();
+ JSObject::PreventExtensions(ns, kThrowOnError).ToChecked();
// Optimize the namespace object as a prototype, for two reasons:
// - The object's map is guaranteed not to be shared. ICs rely on this.
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index ee59d97bd1..7680f55313 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -78,6 +78,11 @@ class Module : public Struct {
// [script]: Script from which the module originates.
DECL_ACCESSORS(script, Script)
+ // The value of import.meta inside of this module.
+ // Lazily initialized on first access. It's the hole before first access and
+ // a JSObject afterwards.
+ DECL_ACCESSORS(import_meta, Object)
+
// Get the ModuleInfo associated with the code.
inline ModuleInfo* info() const;
@@ -119,7 +124,8 @@ class Module : public Struct {
static const int kDfsAncestorIndexOffset = kDfsIndexOffset + kPointerSize;
static const int kExceptionOffset = kDfsAncestorIndexOffset + kPointerSize;
static const int kScriptOffset = kExceptionOffset + kPointerSize;
- static const int kSize = kScriptOffset + kPointerSize;
+ static const int kImportMetaOffset = kScriptOffset + kPointerSize;
+ static const int kSize = kImportMetaOffset + kPointerSize;
private:
friend class Factory;
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index 610f930116..d59a3f54a3 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -41,7 +41,7 @@ void Name::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
#if V8_HOST_ARCH_64_BIT
#if V8_TARGET_LITTLE_ENDIAN
- WRITE_UINT32_FIELD(this, kHashFieldSlot + kIntSize, 0);
+ WRITE_UINT32_FIELD(this, kHashFieldSlot + kInt32Size, 0);
#else
WRITE_UINT32_FIELD(this, kHashFieldSlot, 0);
#endif
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index ede1d14fb0..dd5b3692f9 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -57,17 +57,15 @@ class Name : public HeapObject {
DECL_CAST(Name)
DECL_PRINTER(Name)
-#if V8_TRACE_MAPS
void NameShortPrint();
int NameShortPrint(Vector<char> str);
-#endif
// Layout description.
static const int kHashFieldSlot = HeapObject::kHeaderSize;
#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
static const int kHashFieldOffset = kHashFieldSlot;
#else
- static const int kHashFieldOffset = kHashFieldSlot + kIntSize;
+ static const int kHashFieldOffset = kHashFieldSlot + kInt32Size;
#endif
static const int kSize = kHashFieldSlot + kPointerSize;
@@ -188,9 +186,8 @@ class Symbol : public Name {
private:
const char* PrivateSymbolToName() const;
-#if V8_TRACE_MAPS
+ // TODO(cbruni): remove once the new maptracer is in place.
friend class Name; // For PrivateSymbolToName.
-#endif
DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
};
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index 4ad2cd2f71..f81dc29504 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#undef DECL_PRIMITIVE_ACCESSORS
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
#undef DECL_ACCESSORS
@@ -17,6 +18,7 @@
#undef RELAXED_SMI_ACCESSORS
#undef BOOL_GETTER
#undef BOOL_ACCESSORS
+#undef BIT_FIELD_ACCESSORS
#undef TYPE_CHECKER
#undef FIELD_ADDR
#undef FIELD_ADDR_CONST
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index eb192bcd8c..5d367d351f 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -12,17 +12,15 @@
// for fields that can be written to and read from multiple threads at the same
// time. See comments in src/base/atomicops.h for the memory ordering sematics.
-#define DECL_BOOLEAN_ACCESSORS(name) \
- inline bool name() const; \
- inline void set_##name(bool value);
+#define DECL_PRIMITIVE_ACCESSORS(name, type) \
+ inline type name() const; \
+ inline void set_##name(type value);
-#define DECL_INT_ACCESSORS(name) \
- inline int name() const; \
- inline void set_##name(int value);
+#define DECL_BOOLEAN_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, bool)
-#define DECL_INT32_ACCESSORS(name) \
- inline int32_t name() const; \
- inline void set_##name(int32_t value);
+#define DECL_INT_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int)
+
+#define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
#define DECL_ACCESSORS(name, type) \
inline type* name() const; \
@@ -285,18 +283,16 @@
#define DECL_VERIFIER(Name)
#endif
-#define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
- type* DeoptimizationInputData::name() { \
- return type::cast(get(k##name##Index)); \
- } \
- void DeoptimizationInputData::Set##name(type* value) { \
- set(k##name##Index, value); \
+#define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
+ type* DeoptimizationData::name() { return type::cast(get(k##name##Index)); } \
+ void DeoptimizationData::Set##name(type* value) { \
+ set(k##name##Index, value); \
}
#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \
- type* DeoptimizationInputData::name(int i) { \
+ type* DeoptimizationData::name(int i) { \
return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
} \
- void DeoptimizationInputData::Set##name(int i, type* value) { \
+ void DeoptimizationData::Set##name(int i, type* value) { \
set(IndexForEntry(i) + k##name##Offset, value); \
}
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 59baa0c5c0..82f835ff0c 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -299,7 +299,8 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
// Encode the flags.
int flags =
ScopeTypeField::encode(WITH_SCOPE) | CallsSloppyEvalField::encode(false) |
- LanguageModeField::encode(SLOPPY) | DeclarationScopeField::encode(false) |
+ LanguageModeField::encode(LanguageMode::kSloppy) |
+ DeclarationScopeField::encode(false) |
ReceiverVariableField::encode(NONE) | HasNewTargetField::encode(false) |
FunctionVariableField::encode(NONE) | AsmModuleField::encode(false) |
HasSimpleParametersField::encode(true) |
@@ -352,7 +353,7 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
// Encode the flags.
int flags = ScopeTypeField::encode(SCRIPT_SCOPE) |
CallsSloppyEvalField::encode(false) |
- LanguageModeField::encode(SLOPPY) |
+ LanguageModeField::encode(LanguageMode::kSloppy) |
DeclarationScopeField::encode(true) |
ReceiverVariableField::encode(receiver_info) |
FunctionVariableField::encode(function_name_info) |
@@ -413,7 +414,8 @@ bool ScopeInfo::CallsSloppyEval() {
}
LanguageMode ScopeInfo::language_mode() {
- return length() > 0 ? LanguageModeField::decode(Flags()) : SLOPPY;
+ return length() > 0 ? LanguageModeField::decode(Flags())
+ : LanguageMode::kSloppy;
}
bool ScopeInfo::is_declaration_scope() {
@@ -852,8 +854,8 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
}
// Serialize special exports.
- Handle<FixedArray> special_exports =
- isolate->factory()->NewFixedArray(descr->special_exports().length());
+ Handle<FixedArray> special_exports = isolate->factory()->NewFixedArray(
+ static_cast<int>(descr->special_exports().size()));
{
int i = 0;
for (auto entry : descr->special_exports()) {
@@ -863,8 +865,8 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
}
// Serialize namespace imports.
- Handle<FixedArray> namespace_imports =
- isolate->factory()->NewFixedArray(descr->namespace_imports().length());
+ Handle<FixedArray> namespace_imports = isolate->factory()->NewFixedArray(
+ static_cast<int>(descr->namespace_imports().size()));
{
int i = 0;
for (auto entry : descr->namespace_imports()) {
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 27f6b83f0d..b03b1e831e 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -129,7 +129,7 @@ class ScopeInfo : public FixedArray {
// Lookup support for serialized scope info. Returns the local context slot
// index for a given slot name if the slot is present; otherwise
// returns a value < 0. The name must be an internalized string.
- // If the slot is present and mode != NULL, sets *mode to the corresponding
+ // If the slot is present and mode != nullptr, sets *mode to the corresponding
// mode for that variable.
static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
VariableMode* mode, InitializationFlag* init_flag,
@@ -289,7 +289,7 @@ class ScopeInfo : public FixedArray {
class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
class CallsSloppyEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {
};
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
class LanguageModeField
: public BitField<LanguageMode, CallsSloppyEvalField::kNext, 1> {};
class DeclarationScopeField
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 6ea98f409e..2544b4e20e 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -71,9 +71,9 @@ bool Script::HasValidSource() {
String* src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
if (src_str->IsOneByteRepresentation()) {
- return ExternalOneByteString::cast(src)->resource() != NULL;
+ return ExternalOneByteString::cast(src)->resource() != nullptr;
} else if (src_str->IsTwoByteRepresentation()) {
- return ExternalTwoByteString::cast(src)->resource() != NULL;
+ return ExternalTwoByteString::cast(src)->resource() != nullptr;
}
return true;
}
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 248f160ea9..0c35933950 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -27,15 +27,13 @@ ACCESSORS(SharedFunctionInfo, raw_name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, feedback_metadata, FeedbackMetadata,
kFeedbackMetadataOffset)
-ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
+ACCESSORS(SharedFunctionInfo, instance_class_name, String,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, function_identifier, Object,
kFunctionIdentifierOffset)
-ACCESSORS(SharedFunctionInfo, preparsed_scope_data, Object,
- kPreParsedScopeDataOffset)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, start_position_and_type,
is_named_expression,
@@ -84,8 +82,6 @@ AbstractCode* SharedFunctionInfo::abstract_code() {
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
SharedFunctionInfo::AllowLazyCompilationBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_arguments,
- SharedFunctionInfo::UsesArgumentsBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
has_duplicate_parameters,
SharedFunctionInfo::HasDuplicateParametersBit)
@@ -96,6 +92,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, native,
SharedFunctionInfo::IsNativeBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
SharedFunctionInfo::IsAsmWasmBrokenBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
+ requires_instance_fields_initializer,
+ SharedFunctionInfo::RequiresInstanceFieldsInitializer)
bool SharedFunctionInfo::optimization_disabled() const {
return disable_optimization_reason() != BailoutReason::kNoReason;
@@ -106,12 +105,12 @@ BailoutReason SharedFunctionInfo::disable_optimization_reason() const {
}
LanguageMode SharedFunctionInfo::language_mode() {
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
return construct_language_mode(IsStrictBit::decode(compiler_hints()));
}
void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
// We only allow language mode transitions that set the same language mode
// again or go up in the chain:
DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
@@ -167,7 +166,7 @@ void SharedFunctionInfo::clear_padding() {
void SharedFunctionInfo::UpdateFunctionMapIndex() {
int map_index = Context::FunctionMapIndex(
- language_mode(), kind(), has_shared_name(), needs_home_object());
+ language_mode(), kind(), true, has_shared_name(), needs_home_object());
set_function_map_index(map_index);
}
@@ -322,12 +321,24 @@ int SharedFunctionInfo::lazy_deserialization_builtin_id() const {
return id;
}
-void SharedFunctionInfo::set_lazy_deserialization_builtin_id(int builtin_id) {
- DCHECK(function_data()->IsUndefined(GetIsolate()) ||
- HasLazyDeserializationBuiltinId());
- DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK(Builtins::IsLazy(builtin_id));
- set_function_data(Smi::FromInt(builtin_id));
+bool SharedFunctionInfo::HasPreParsedScopeData() const {
+ return function_data()->IsPreParsedScopeData();
+}
+
+PreParsedScopeData* SharedFunctionInfo::preparsed_scope_data() const {
+ DCHECK(HasPreParsedScopeData());
+ return PreParsedScopeData::cast(function_data());
+}
+
+void SharedFunctionInfo::set_preparsed_scope_data(
+ PreParsedScopeData* preparsed_scope_data) {
+ DCHECK(function_data()->IsUndefined(GetIsolate()));
+ set_function_data(preparsed_scope_data);
+}
+
+void SharedFunctionInfo::ClearPreParsedScopeData() {
+ DCHECK(function_data()->IsUndefined(GetIsolate()) || HasPreParsedScopeData());
+ set_function_data(GetHeap()->undefined_value());
}
bool SharedFunctionInfo::HasBuiltinFunctionId() {
@@ -351,9 +362,9 @@ String* SharedFunctionInfo::inferred_name() {
if (HasInferredName()) {
return String::cast(function_identifier());
}
- Isolate* isolate = GetIsolate();
- DCHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId());
- return isolate->heap()->empty_string();
+ DCHECK(function_identifier()->IsUndefined(GetIsolate()) ||
+ HasBuiltinFunctionId());
+ return GetHeap()->empty_string();
}
void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
@@ -372,10 +383,6 @@ bool SharedFunctionInfo::IsSubjectToDebugging() {
return IsUserJavaScript() && !HasAsmWasmData();
}
-bool SharedFunctionInfo::HasPreParsedScopeData() const {
- return preparsed_scope_data()->IsPreParsedScopeData();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 57c07f9b13..a43c2a12b7 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -14,6 +14,7 @@
namespace v8 {
namespace internal {
+class BytecodeArray;
class CoverageInfo;
class DebugInfo;
@@ -136,7 +137,7 @@ class SharedFunctionInfo : public HeapObject {
#endif
// [instance class name]: class name for instances.
- DECL_ACCESSORS(instance_class_name, Object)
+ DECL_ACCESSORS(instance_class_name, String)
// [function data]: This field holds some additional data for function.
// Currently it has one of:
@@ -144,6 +145,7 @@ class SharedFunctionInfo : public HeapObject {
// - a BytecodeArray for the interpreter [HasBytecodeArray()].
// - a FixedArray with Asm->Wasm conversion [HasAsmWasmData()].
// - a Smi containing the builtin id [HasLazyDeserializationBuiltinId()]
+ // - a PreParsedScopeData for the parser [HasPreParsedScopeData()]
DECL_ACCESSORS(function_data, Object)
inline bool IsApiFunction();
@@ -164,7 +166,10 @@ class SharedFunctionInfo : public HeapObject {
// mainly used during optimization).
inline bool HasLazyDeserializationBuiltinId() const;
inline int lazy_deserialization_builtin_id() const;
- inline void set_lazy_deserialization_builtin_id(int builtin_id);
+ inline bool HasPreParsedScopeData() const;
+ inline PreParsedScopeData* preparsed_scope_data() const;
+ inline void set_preparsed_scope_data(PreParsedScopeData* data);
+ inline void ClearPreParsedScopeData();
// [function identifier]: This field holds an additional identifier for the
// function.
@@ -210,11 +215,6 @@ class SharedFunctionInfo : public HeapObject {
// [debug info]: Debug information.
DECL_ACCESSORS(debug_info, Object)
- // PreParsedScopeData or null.
- DECL_ACCESSORS(preparsed_scope_data, Object)
-
- inline bool HasPreParsedScopeData() const;
-
// Bit field containing various information collected for debugging.
// This field is either stored on the kDebugInfo slot or inside the
// debug info struct.
@@ -288,9 +288,6 @@ class SharedFunctionInfo : public HeapObject {
inline LanguageMode language_mode();
inline void set_language_mode(LanguageMode language_mode);
- // False if the function definitely does not allocate an arguments object.
- DECL_BOOLEAN_ACCESSORS(uses_arguments)
-
// True if the function has any duplicated parameter names.
DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
@@ -331,6 +328,12 @@ class SharedFunctionInfo : public HeapObject {
// shared function info.
void DisableOptimization(BailoutReason reason);
+ // This class constructor needs to call out to an instance fields
+ // initializer. This flag is set when creating the
+ // SharedFunctionInfo as a reminder to emit the initializer call
+ // when generating code later.
+ DECL_BOOLEAN_ACCESSORS(requires_instance_fields_initializer)
+
// [source code]: Source code for the function.
bool HasSourceCode() const;
Handle<Object> GetSourceCode();
@@ -424,7 +427,6 @@ class SharedFunctionInfo : public HeapObject {
V(kDebugInfoOffset, kPointerSize) \
V(kFunctionIdentifierOffset, kPointerSize) \
V(kFeedbackMetadataOffset, kPointerSize) \
- V(kPreParsedScopeDataOffset, kPointerSize) \
V(kEndOfPointerFieldsOffset, 0) \
/* Raw data fields. */ \
V(kFunctionLiteralIdOffset, kInt32Size) \
@@ -460,18 +462,18 @@ class SharedFunctionInfo : public HeapObject {
#undef START_POSITION_AND_TYPE_BIT_FIELDS
// Bit positions in |compiler_hints|.
-#define COMPILER_HINTS_BIT_FIELDS(V, _) \
- V(IsNativeBit, bool, 1, _) \
- V(IsStrictBit, bool, 1, _) \
- V(FunctionKindBits, FunctionKind, 10, _) \
- V(HasDuplicateParametersBit, bool, 1, _) \
- V(AllowLazyCompilationBit, bool, 1, _) \
- V(UsesArgumentsBit, bool, 1, _) \
- V(NeedsHomeObjectBit, bool, 1, _) \
- V(IsDeclarationBit, bool, 1, _) \
- V(IsAsmWasmBrokenBit, bool, 1, _) \
- V(FunctionMapIndexBits, int, 5, _) \
- V(DisabledOptimizationReasonBits, BailoutReason, 7, _)
+#define COMPILER_HINTS_BIT_FIELDS(V, _) \
+ V(IsNativeBit, bool, 1, _) \
+ V(IsStrictBit, bool, 1, _) \
+ V(FunctionKindBits, FunctionKind, 10, _) \
+ V(HasDuplicateParametersBit, bool, 1, _) \
+ V(AllowLazyCompilationBit, bool, 1, _) \
+ V(NeedsHomeObjectBit, bool, 1, _) \
+ V(IsDeclarationBit, bool, 1, _) \
+ V(IsAsmWasmBrokenBit, bool, 1, _) \
+ V(FunctionMapIndexBits, int, 5, _) \
+ V(DisabledOptimizationReasonBits, BailoutReason, 7, _) \
+ V(RequiresInstanceFieldsInitializer, bool, 1, _)
DEFINE_BIT_FIELDS(COMPILER_HINTS_BIT_FIELDS)
#undef COMPILER_HINTS_BIT_FIELDS
@@ -500,16 +502,18 @@ class SharedFunctionInfo : public HeapObject {
DEFINE_BIT_FIELDS(DEBUGGER_HINTS_BIT_FIELDS)
#undef DEBUGGER_HINTS_BIT_FIELDS
+ // Indicates that this function uses a super property (or an eval that may
+ // use a super property).
+ // This is needed to set up the [[HomeObject]] on the function instance.
+ inline bool needs_home_object() const;
+
private:
// [raw_name]: Function name string or kNoSharedNameSentinel.
DECL_ACCESSORS(raw_name, Object)
inline void set_kind(FunctionKind kind);
- // Indicates that this function uses a super property (or an eval that may
- // use a super property).
- // This is needed to set up the [[HomeObject]] on the function instance.
- DECL_BOOLEAN_ACCESSORS(needs_home_object)
+ inline void set_needs_home_object(bool value);
friend class Factory;
friend class V8HeapExplorer;
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index a30b65da95..dd75210a54 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -35,17 +35,17 @@ CAST_ACCESSOR(ThinString)
StringShape::StringShape(const String* str)
: type_(str->map()->instance_type()) {
set_valid();
- DCHECK((type_ & kIsNotStringMask) == kStringTag);
+ DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
StringShape::StringShape(Map* map) : type_(map->instance_type()) {
set_valid();
- DCHECK((type_ & kIsNotStringMask) == kStringTag);
+ DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
StringShape::StringShape(InstanceType t) : type_(static_cast<uint32_t>(t)) {
set_valid();
- DCHECK((type_ & kIsNotStringMask) == kStringTag);
+ DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
bool StringShape::IsInternalized() {
@@ -378,25 +378,25 @@ ConsString* String::VisitFlat(Visitor* visitor, String* string,
visitor->VisitOneByteString(
SeqOneByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return NULL;
+ return nullptr;
case kSeqStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
SeqTwoByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return NULL;
+ return nullptr;
case kExternalStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
ExternalOneByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return NULL;
+ return nullptr;
case kExternalStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return NULL;
+ return nullptr;
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag: {
@@ -546,7 +546,7 @@ void ExternalOneByteString::set_resource(
DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
*reinterpret_cast<const Resource**>(FIELD_ADDR(this, kResourceOffset)) =
resource;
- if (resource != NULL) update_data_cache();
+ if (resource != nullptr) update_data_cache();
}
const uint8_t* ExternalOneByteString::GetChars() {
@@ -573,7 +573,7 @@ void ExternalTwoByteString::set_resource(
const ExternalTwoByteString::Resource* resource) {
*reinterpret_cast<const Resource**>(FIELD_ADDR(this, kResourceOffset)) =
resource;
- if (resource != NULL) update_data_cache();
+ if (resource != nullptr) update_data_cache();
}
const uint16_t* ExternalTwoByteString::GetChars() { return resource()->data(); }
@@ -604,13 +604,13 @@ void ConsStringIterator::AdjustMaximumDepth() {
}
void ConsStringIterator::Pop() {
- DCHECK(depth_ > 0);
+ DCHECK_GT(depth_, 0);
DCHECK(depth_ <= maximum_depth_);
depth_--;
}
uint16_t StringCharacterStream::GetNext() {
- DCHECK(buffer8_ != NULL && end_ != NULL);
+ DCHECK(buffer8_ != nullptr && end_ != nullptr);
// Advance cursor if needed.
if (buffer8_ == end_) HasMore();
DCHECK(buffer8_ < end_);
@@ -623,13 +623,13 @@ StringCharacterStream::StringCharacterStream(String* string, int offset)
}
void StringCharacterStream::Reset(String* string, int offset) {
- buffer8_ = NULL;
- end_ = NULL;
+ buffer8_ = nullptr;
+ end_ = nullptr;
ConsString* cons_string = String::VisitFlat(this, string, offset);
iter_.Reset(cons_string, offset);
- if (cons_string != NULL) {
+ if (cons_string != nullptr) {
string = iter_.Next(&offset);
- if (string != NULL) String::VisitFlat(this, string, offset);
+ if (string != nullptr) String::VisitFlat(this, string, offset);
}
}
@@ -638,7 +638,7 @@ bool StringCharacterStream::HasMore() {
int offset;
String* string = iter_.Next(&offset);
DCHECK_EQ(offset, 0);
- if (string == NULL) return false;
+ if (string == nullptr) return false;
String::VisitFlat(this, string);
DCHECK(buffer8_ != end_);
return true;
@@ -666,30 +666,6 @@ bool String::AsArrayIndex(uint32_t* index) {
return SlowAsArrayIndex(index);
}
-void String::SetForwardedInternalizedString(String* canonical) {
- DCHECK(IsInternalizedString());
- DCHECK(HasHashCode());
- if (canonical == this) return; // No need to forward.
- DCHECK(SlowEquals(canonical));
- DCHECK(canonical->IsInternalizedString());
- DCHECK(canonical->HasHashCode());
- WRITE_FIELD(this, kHashFieldSlot, canonical);
- // Setting the hash field to a tagged value sets the LSB, causing the hash
- // code to be interpreted as uninitialized. We use this fact to recognize
- // that we have a forwarded string.
- DCHECK(!HasHashCode());
-}
-
-String* String::GetForwardedInternalizedString() {
- DCHECK(IsInternalizedString());
- if (HasHashCode()) return this;
- String* canonical = String::cast(READ_FIELD(this, kHashFieldSlot));
- DCHECK(canonical->IsInternalizedString());
- DCHECK(SlowEquals(canonical));
- DCHECK(canonical->HasHashCode());
- return canonical;
-}
-
String::SubStringRange::SubStringRange(String* string, int first, int length)
: string_(string),
first_(first),
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index 88a86dfcdf..382fe06bf4 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -42,6 +42,8 @@ class StringTableShape : public BaseShape<StringTableKey*> {
static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
+ static inline int GetMapRootIndex();
+
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
};
@@ -59,7 +61,8 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
V8_EXPORT_PRIVATE static Handle<String> LookupString(Isolate* isolate,
Handle<String> key);
static Handle<String> LookupKey(Isolate* isolate, StringTableKey* key);
- static String* LookupKeyIfExists(Isolate* isolate, StringTableKey* key);
+ static String* ForwardStringIfExists(Isolate* isolate, StringTableKey* key,
+ String* string);
// Looks up a string that is equal to the given string and returns
// string handle if it is found, or an empty handle otherwise.
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 1a9baa61b7..f21171d62f 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+class BigInt;
+
enum AllowNullsFlag { ALLOW_NULLS, DISALLOW_NULLS };
enum RobustnessFlag { ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL };
@@ -138,7 +140,7 @@ class String : public Name {
: onebyte_start(start), length_(length), state_(ONE_BYTE) {}
explicit FlatContent(const uc16* start, int length)
: twobyte_start(start), length_(length), state_(TWO_BYTE) {}
- FlatContent() : onebyte_start(NULL), length_(0), state_(NON_FLAT) {}
+ FlatContent() : onebyte_start(nullptr), length_(0), state_(NON_FLAT) {}
union {
const uint8_t* onebyte_start;
@@ -385,7 +387,7 @@ class String : public Name {
++chars;
}
// Check aligned words.
- DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F);
+ DCHECK_EQ(unibrow::Utf8::kMaxOneByteChar, 0x7F);
const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
while (chars + sizeof(uintptr_t) <= limit) {
if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
@@ -435,11 +437,6 @@ class String : public Name {
static Handle<FixedArray> CalculateLineEnds(Handle<String> string,
bool include_ending_line);
- // Use the hash field to forward to the canonical internalized string
- // when deserializing an internalized string.
- inline void SetForwardedInternalizedString(String* string);
- inline String* GetForwardedInternalizedString();
-
private:
friend class Name;
friend class StringTableInsertionKey;
@@ -513,7 +510,8 @@ class SeqOneByteString : public SeqString {
}
// Maximal memory usage for a single sequential one-byte string.
- static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxLength + kHeaderSize);
+ static const int kMaxCharsSize = kMaxLength;
+ static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize);
STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
class BodyDescriptor;
@@ -559,8 +557,8 @@ class SeqTwoByteString : public SeqString {
}
// Maximal memory usage for a single sequential two-byte string.
- static const int kMaxSize =
- OBJECT_POINTER_ALIGN(kMaxLength * 2 + kHeaderSize);
+ static const int kMaxCharsSize = kMaxLength * 2;
+ static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize);
STATIC_ASSERT(static_cast<int>((kMaxSize - kHeaderSize) / sizeof(uint16_t)) >=
String::kMaxLength);
@@ -826,14 +824,14 @@ class ConsStringIterator {
}
inline void Reset(ConsString* cons_string, int offset = 0) {
depth_ = 0;
- // Next will always return NULL.
- if (cons_string == NULL) return;
+ // Next will always return nullptr.
+ if (cons_string == nullptr) return;
Initialize(cons_string, offset);
}
- // Returns NULL when complete.
+ // Returns nullptr when complete.
inline String* Next(int* offset_out) {
*offset_out = 0;
- if (depth_ == 0) return NULL;
+ if (depth_ == 0) return nullptr;
return Continue(offset_out);
}
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 5e374b4722..24f306aa68 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -54,8 +54,7 @@ Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
cooked_strings, PACKED_ELEMENTS, cooked_strings->length(), TENURED);
// Freeze the {raw_object}.
- JSObject::SetIntegrityLevel(raw_object, FROZEN, Object::THROW_ON_ERROR)
- .ToChecked();
+ JSObject::SetIntegrityLevel(raw_object, FROZEN, kThrowOnError).ToChecked();
// Install a "raw" data property for {raw_object} on {template_object}.
PropertyDescriptor raw_desc;
@@ -65,11 +64,11 @@ Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
raw_desc.set_writable(false);
JSArray::DefineOwnProperty(isolate, template_object,
isolate->factory()->raw_string(), &raw_desc,
- Object::THROW_ON_ERROR)
+ kThrowOnError)
.ToChecked();
// Freeze the {template_object} as well.
- JSObject::SetIntegrityLevel(template_object, FROZEN, Object::THROW_ON_ERROR)
+ JSObject::SetIntegrityLevel(template_object, FROZEN, kThrowOnError)
.ToChecked();
// Remember the {template_object} in the {template_map}.
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/parsing/background-parsing-task.cc
index 9d155d5af8..387cd3a1c6 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/parsing/background-parsing-task.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/background-parsing-task.h"
+#include "src/parsing/background-parsing-task.h"
+#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
@@ -20,7 +21,10 @@ void StreamedSource::Release() {
BackgroundParsingTask::BackgroundParsingTask(
StreamedSource* source, ScriptCompiler::CompileOptions options,
int stack_size, Isolate* isolate)
- : source_(source), stack_size_(stack_size), script_data_(nullptr) {
+ : source_(source),
+ stack_size_(stack_size),
+ script_data_(nullptr),
+ timer_(isolate->counters()->compile_script_on_background()) {
// We don't set the context to the CompilationInfo yet, because the background
// thread cannot do anything with it anyway. We set it just before compilation
// on the foreground thread.
@@ -37,6 +41,8 @@ BackgroundParsingTask::BackgroundParsingTask(
info->InitFromIsolate(isolate);
if (V8_UNLIKELY(FLAG_runtime_stats)) {
info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
+ } else {
+ info->set_runtime_call_stats(nullptr);
}
info->set_toplevel();
std::unique_ptr<Utf16CharacterStream> stream(
@@ -50,8 +56,12 @@ BackgroundParsingTask::BackgroundParsingTask(
info->AllocateSourceRangeMap();
}
info->set_cached_data(&script_data_);
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
+ info->set_language_mode(
+ stricter_language_mode(info->language_mode(), language_mode));
source->info.reset(info);
+ allocator_ = isolate->allocator();
// Parser needs to stay alive for finalizing the parsing on the main
// thread.
@@ -61,16 +71,26 @@ BackgroundParsingTask::BackgroundParsingTask(
}
void BackgroundParsingTask::Run() {
+ TimedHistogramScope timer(timer_);
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
+ source_->info->set_on_background_thread(true);
+
// Reset the stack limit of the parser to reflect correctly that we're on a
// background thread.
+ uintptr_t old_stack_limit = source_->info->stack_limit();
uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
+ source_->info->set_stack_limit(stack_limit);
source_->parser->set_stack_limit(stack_limit);
source_->parser->ParseOnBackground(source_->info.get());
+ if (FLAG_background_compile && source_->info->literal() != nullptr) {
+ // Parsing has succeeded, compile.
+ source_->outer_function_job = Compiler::CompileTopLevelOnBackgroundThread(
+ source_->info.get(), allocator_, &source_->inner_function_jobs);
+ }
if (script_data_ != nullptr) {
source_->cached_data.reset(new ScriptCompiler::CachedData(
@@ -80,6 +100,11 @@ void BackgroundParsingTask::Run() {
delete script_data_;
script_data_ = nullptr;
}
+
+ source_->info->EmitBackgroundParseStatisticsOnBackgroundThread();
+
+ source_->info->set_on_background_thread(false);
+ source_->info->set_stack_limit(old_stack_limit);
}
} // namespace internal
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/parsing/background-parsing-task.h
index 061e36595d..eb3ed61e2e 100644
--- a/deps/v8/src/background-parsing-task.h
+++ b/deps/v8/src/parsing/background-parsing-task.h
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BACKGROUND_PARSING_TASK_H_
-#define V8_BACKGROUND_PARSING_TASK_H_
+#ifndef V8_PARSING_BACKGROUND_PARSING_TASK_H_
+#define V8_PARSING_BACKGROUND_PARSING_TASK_H_
#include <memory>
#include "include/v8.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
+#include "src/compiler.h"
#include "src/parsing/parse-info.h"
#include "src/unicode-cache.h"
@@ -18,6 +19,7 @@ namespace internal {
class Parser;
class ScriptData;
+class TimedHistogram;
// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
// data which needs to be transmitted between threads for background parsing,
@@ -41,12 +43,15 @@ struct StreamedSource {
std::unique_ptr<ParseInfo> info;
std::unique_ptr<Parser> parser;
+ // Data needed for finalizing compilation after background compilation.
+ std::unique_ptr<CompilationJob> outer_function_job;
+ CompilationJobList inner_function_jobs;
+
// Prevent copying.
StreamedSource(const StreamedSource&) = delete;
StreamedSource& operator=(const StreamedSource&) = delete;
};
-
class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
public:
BackgroundParsingTask(StreamedSource* source,
@@ -59,8 +64,11 @@ class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
StreamedSource* source_; // Not owned.
int stack_size_;
ScriptData* script_data_;
+ AccountingAllocator* allocator_;
+ TimedHistogram* timer_;
};
+
} // namespace internal
} // namespace v8
-#endif // V8_BACKGROUND_PARSING_TASK_H_
+#endif // V8_PARSING_BACKGROUND_PARSING_TASK_H_
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index d2dc6fa170..6c6c813b3e 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -424,7 +424,7 @@ class ExpressionClassifier {
typename Types::Base* base_;
ExpressionClassifier* previous_;
Zone* zone_;
- ZoneList<typename Types::Expression>* non_patterns_to_rewrite_;
+ ZoneList<typename Types::RewritableExpression>* non_patterns_to_rewrite_;
ZoneList<Error>* reported_errors_;
DuplicateFinder* duplicate_finder_;
// The uint16_t for non_pattern_begin_ will not be enough in the case,
diff --git a/deps/v8/src/parsing/expression-scope-reparenter.cc b/deps/v8/src/parsing/expression-scope-reparenter.cc
index 6ca29611c1..18c52add11 100644
--- a/deps/v8/src/parsing/expression-scope-reparenter.cc
+++ b/deps/v8/src/parsing/expression-scope-reparenter.cc
@@ -27,6 +27,7 @@ class Reparenter final : public AstTraversalVisitor<Reparenter> {
void VisitFunctionLiteral(FunctionLiteral* expr);
void VisitClassLiteral(ClassLiteral* expr);
void VisitVariableProxy(VariableProxy* expr);
+ void VisitRewritableExpression(RewritableExpression* expr);
void VisitBlock(Block* stmt);
void VisitTryCatchStatement(TryCatchStatement* stmt);
@@ -45,6 +46,12 @@ void Reparenter::VisitClassLiteral(ClassLiteral* class_literal) {
// scope on its scope chain.
DCHECK_EQ(class_literal->constructor()->scope()->outer_scope(),
class_literal->scope());
+
+ if (class_literal->static_fields_initializer() != nullptr) {
+ DCHECK_EQ(
+ class_literal->static_fields_initializer()->scope()->outer_scope(),
+ class_literal->scope());
+ }
#if DEBUG
// The same goes for the rest of the class, but we do some
// sanity checking in debug mode.
@@ -72,6 +79,11 @@ void Reparenter::VisitVariableProxy(VariableProxy* proxy) {
}
}
+void Reparenter::VisitRewritableExpression(RewritableExpression* expr) {
+ Visit(expr->expression());
+ expr->set_scope(scope_);
+}
+
void Reparenter::VisitBlock(Block* stmt) {
if (stmt->scope() != nullptr)
stmt->scope()->ReplaceOuterScope(scope_);
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index a6cc179b82..b55c5ddd5d 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -46,7 +46,7 @@ void FuncNameInferrer::PushVariableName(const AstRawString* name) {
void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
if (IsOpen()) {
- CHECK(names_stack_.length() > 0);
+ CHECK_GT(names_stack_.length(), 0);
CHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
names_stack_.RemoveLast();
}
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index f17916ccb6..1c9d648a1e 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -53,13 +53,16 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
set_end_position(shared->end_position());
function_literal_id_ = shared->function_literal_id();
set_language_mode(shared->language_mode());
- set_module(shared->kind() == FunctionKind::kModule);
set_asm_wasm_broken(shared->is_asm_wasm_broken());
+ set_requires_instance_fields_initializer(
+ shared->requires_instance_fields_initializer());
Handle<Script> script(Script::cast(shared->script()));
set_script(script);
set_native(script->type() == Script::TYPE_NATIVE);
set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
+ set_module(script->origin_options().IsModule());
+ DCHECK(!(is_eval() && is_module()));
Handle<HeapObject> scope_info(shared->outer_scope_info());
if (!scope_info->IsTheHole(isolate) &&
@@ -90,6 +93,8 @@ ParseInfo::ParseInfo(Handle<Script> script)
set_native(script->type() == Script::TYPE_NATIVE);
set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
+ set_module(script->origin_options().IsModule());
+ DCHECK(!(is_eval() && is_module()));
set_collect_type_profile(script->GetIsolate()->is_collecting_type_profile() &&
script->IsUserJavaScript());
@@ -114,7 +119,6 @@ ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
p->set_end_position(shared->end_position());
p->function_literal_id_ = shared->function_literal_id();
p->set_language_mode(shared->language_mode());
- p->set_module(shared->kind() == FunctionKind::kModule);
// BUG(5946): This function exists as a workaround until we can
// get rid of %SetCode in our native functions. The ParseInfo
@@ -126,6 +130,8 @@ ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
// We tolerate a ParseInfo without a Script in this case.
p->set_native(true);
p->set_eval(false);
+ p->set_module(false);
+ DCHECK_NE(shared->kind(), FunctionKind::kModule);
Handle<HeapObject> scope_info(shared->outer_scope_info());
if (!scope_info->IsTheHole(isolate) &&
@@ -151,12 +157,27 @@ void ParseInfo::InitFromIsolate(Isolate* isolate) {
set_stack_limit(isolate->stack_guard()->real_climit());
set_unicode_cache(isolate->unicode_cache());
set_runtime_call_stats(isolate->counters()->runtime_call_stats());
+ set_logger(isolate->logger());
set_ast_string_constants(isolate->ast_string_constants());
if (isolate->is_block_code_coverage()) set_block_coverage_enabled();
if (isolate->is_collecting_type_profile()) set_collect_type_profile();
}
-void ParseInfo::UpdateStatisticsAfterBackgroundParse(Isolate* isolate) {
+void ParseInfo::EmitBackgroundParseStatisticsOnBackgroundThread() {
+ // If runtime call stats was enabled by tracing, emit a trace event at the
+ // end of background parsing on the background thread.
+ if (runtime_call_stats_ &&
+ (FLAG_runtime_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ auto value = v8::tracing::TracedValue::Create();
+ runtime_call_stats_->Dump(value.get());
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
+ "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
+ "runtime-call-stats", std::move(value));
+ }
+}
+
+void ParseInfo::UpdateBackgroundParseStatisticsOnMainThread(Isolate* isolate) {
// Copy over the counters from the background thread to the main counters on
// the isolate.
RuntimeCallStats* main_call_stats = isolate->counters()->runtime_call_stats();
@@ -197,7 +218,7 @@ void ParseInfo::ResetCharacterStream() { character_stream_.reset(); }
void ParseInfo::set_character_stream(
std::unique_ptr<Utf16CharacterStream> character_stream) {
- DCHECK(character_stream_.get() == nullptr);
+ DCHECK_NULL(character_stream_);
character_stream_.swap(character_stream);
}
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 1426f94bbf..9deea1ecac 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -13,6 +13,7 @@
#include "src/globals.h"
#include "src/handles.h"
#include "src/parsing/preparsed-scope-data.h"
+#include "src/pending-compilation-error-handler.h"
namespace v8 {
@@ -27,6 +28,7 @@ class AstValueFactory;
class DeclarationScope;
class FunctionLiteral;
class RuntimeCallStats;
+class Logger;
class ScriptData;
class SourceRangeMap;
class UnicodeCache;
@@ -73,13 +75,17 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
FLAG_ACCESSOR(kIsNamedExpression, is_named_expression,
set_is_named_expression)
- FLAG_ACCESSOR(kSerializing, will_serialize, set_will_serialize)
FLAG_ACCESSOR(kLazyCompile, lazy_compile, set_lazy_compile)
FLAG_ACCESSOR(kCollectTypeProfile, collect_type_profile,
set_collect_type_profile)
FLAG_ACCESSOR(kIsAsmWasmBroken, is_asm_wasm_broken, set_asm_wasm_broken)
+ FLAG_ACCESSOR(kRequiresInstanceFieldsInitializer,
+ requires_instance_fields_initializer,
+ set_requires_instance_fields_initializer)
FLAG_ACCESSOR(kBlockCoverageEnabled, block_coverage_enabled,
set_block_coverage_enabled)
+ FLAG_ACCESSOR(kOnBackgroundThread, on_background_thread,
+ set_on_background_thread)
#undef FLAG_ACCESSOR
void set_parse_restriction(ParseRestriction restriction) {
@@ -186,6 +192,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
void set_runtime_call_stats(RuntimeCallStats* runtime_call_stats) {
runtime_call_stats_ = runtime_call_stats;
}
+ Logger* logger() const { return logger_; }
+ void set_logger(Logger* logger) { logger_ = logger; }
void AllocateSourceRangeMap();
SourceRangeMap* source_range_map() const { return source_range_map_; }
@@ -193,6 +201,10 @@ class V8_EXPORT_PRIVATE ParseInfo {
source_range_map_ = source_range_map;
}
+ PendingCompilationErrorHandler* pending_error_handler() {
+ return &pending_error_handler_;
+ }
+
// Getters for individual compiler hints.
bool is_declaration() const;
FunctionKind function_kind() const;
@@ -215,7 +227,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
return construct_language_mode(is_strict_mode());
}
void set_language_mode(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 2);
+ STATIC_ASSERT(LanguageModeSize == 2);
set_strict_mode(is_strict(language_mode));
}
@@ -229,7 +241,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
}
}
- void UpdateStatisticsAfterBackgroundParse(Isolate* isolate);
+ void EmitBackgroundParseStatisticsOnBackgroundThread();
+ void UpdateBackgroundParseStatisticsOnMainThread(Isolate* isolate);
private:
// Various configuration flags for parsing.
@@ -244,11 +257,12 @@ class V8_EXPORT_PRIVATE ParseInfo {
kModule = 1 << 6,
kAllowLazyParsing = 1 << 7,
kIsNamedExpression = 1 << 8,
- kSerializing = 1 << 9,
- kLazyCompile = 1 << 10,
- kCollectTypeProfile = 1 << 11,
- kBlockCoverageEnabled = 1 << 12,
- kIsAsmWasmBroken = 1 << 13,
+ kLazyCompile = 1 << 9,
+ kCollectTypeProfile = 1 << 10,
+ kBlockCoverageEnabled = 1 << 11,
+ kIsAsmWasmBroken = 1 << 12,
+ kRequiresInstanceFieldsInitializer = 1 << 13,
+ kOnBackgroundThread = 1 << 14,
};
//------------- Inputs to parsing and scope analysis -----------------------
@@ -279,11 +293,13 @@ class V8_EXPORT_PRIVATE ParseInfo {
const class AstStringConstants* ast_string_constants_;
const AstRawString* function_name_;
RuntimeCallStats* runtime_call_stats_;
+ Logger* logger_;
SourceRangeMap* source_range_map_; // Used when block coverage is enabled.
//----------- Output of parsing and scope analysis ------------------------
FunctionLiteral* literal_;
std::shared_ptr<DeferredHandles> deferred_handles_;
+ PendingCompilationErrorHandler pending_error_handler_;
void SetFlag(Flag f) { flags_ |= f; }
void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index b211b85d2a..c393bc5ec2 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -14,6 +14,7 @@
#include "src/base/hashmap.h"
#include "src/counters.h"
#include "src/globals.h"
+#include "src/log.h"
#include "src/messages.h"
#include "src/parsing/expression-classifier.h"
#include "src/parsing/func-name-inferrer.h"
@@ -236,6 +237,7 @@ class ParserBase {
typedef typename Types::ObjectLiteralProperty ObjectLiteralPropertyT;
typedef typename Types::ClassLiteralProperty ClassLiteralPropertyT;
typedef typename Types::Suspend SuspendExpressionT;
+ typedef typename Types::RewritableExpression RewritableExpressionT;
typedef typename Types::ExpressionList ExpressionListT;
typedef typename Types::FormalParameters FormalParametersT;
typedef typename Types::Statement StatementT;
@@ -251,8 +253,9 @@ class ParserBase {
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
- RuntimeCallStats* runtime_call_stats,
- bool parsing_on_main_thread = true)
+ PendingCompilationErrorHandler* pending_error_handler,
+ RuntimeCallStats* runtime_call_stats, Logger* logger,
+ int script_id, bool parsing_module, bool parsing_on_main_thread)
: scope_(nullptr),
original_scope_(nullptr),
function_state_(nullptr),
@@ -261,25 +264,24 @@ class ParserBase {
ast_value_factory_(ast_value_factory),
ast_node_factory_(ast_value_factory, zone),
runtime_call_stats_(runtime_call_stats),
+ logger_(logger),
parsing_on_main_thread_(parsing_on_main_thread),
- parsing_module_(false),
+ parsing_module_(parsing_module),
stack_limit_(stack_limit),
+ pending_error_handler_(pending_error_handler),
zone_(zone),
classifier_(nullptr),
scanner_(scanner),
- stack_overflow_(false),
default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
function_literal_id_(0),
+ script_id_(script_id),
allow_natives_(false),
allow_harmony_do_expressions_(false),
allow_harmony_function_sent_(false),
- allow_harmony_restrictive_generators_(false),
- allow_harmony_class_fields_(false),
- allow_harmony_object_rest_spread_(false),
+ allow_harmony_public_fields_(false),
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
- allow_harmony_async_iteration_(false),
- allow_harmony_template_escapes_(false) {}
+ allow_harmony_async_iteration_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -288,16 +290,20 @@ class ParserBase {
ALLOW_ACCESSORS(natives);
ALLOW_ACCESSORS(harmony_do_expressions);
ALLOW_ACCESSORS(harmony_function_sent);
- ALLOW_ACCESSORS(harmony_restrictive_generators);
- ALLOW_ACCESSORS(harmony_class_fields);
- ALLOW_ACCESSORS(harmony_object_rest_spread);
+ ALLOW_ACCESSORS(harmony_public_fields);
ALLOW_ACCESSORS(harmony_dynamic_import);
ALLOW_ACCESSORS(harmony_import_meta);
ALLOW_ACCESSORS(harmony_async_iteration);
- ALLOW_ACCESSORS(harmony_template_escapes);
#undef ALLOW_ACCESSORS
+ bool allow_harmony_bigint() const {
+ return scanner()->allow_harmony_bigint();
+ }
+ void set_allow_harmony_bigint(bool allow) {
+ scanner()->set_allow_harmony_bigint(allow);
+ }
+
uintptr_t stack_limit() const { return stack_limit_; }
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
@@ -366,15 +372,6 @@ class ParserBase {
Scope* const outer_scope_;
};
- struct DestructuringAssignment {
- public:
- DestructuringAssignment(ExpressionT expression, Scope* scope)
- : assignment(expression), scope(scope) {}
-
- ExpressionT assignment;
- Scope* scope;
- };
-
class FunctionState final : public BlockState {
public:
FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
@@ -396,12 +393,12 @@ class ParserBase {
void SetDestructuringAssignmentsScope(int pos, Scope* scope) {
for (int i = pos; i < destructuring_assignments_to_rewrite_.length();
++i) {
- destructuring_assignments_to_rewrite_[i].scope = scope;
+ destructuring_assignments_to_rewrite_[i]->set_scope(scope);
}
}
- const ZoneList<DestructuringAssignment>&
- destructuring_assignments_to_rewrite() const {
+ const ZoneList<RewritableExpressionT>&
+ destructuring_assignments_to_rewrite() const {
return destructuring_assignments_to_rewrite_;
}
@@ -409,7 +406,7 @@ class ParserBase {
return &reported_errors_;
}
- ZoneList<ExpressionT>* non_patterns_to_rewrite() {
+ ZoneList<RewritableExpressionT>* non_patterns_to_rewrite() {
return &non_patterns_to_rewrite_;
}
@@ -450,15 +447,16 @@ class ParserBase {
};
private:
- void AddDestructuringAssignment(DestructuringAssignment pair) {
- destructuring_assignments_to_rewrite_.Add(pair, scope_->zone());
+ void AddDestructuringAssignment(RewritableExpressionT expr) {
+ destructuring_assignments_to_rewrite_.Add(expr, scope_->zone());
}
- void AddNonPatternForRewriting(ExpressionT expr, bool* ok) {
+ void AddNonPatternForRewriting(RewritableExpressionT expr, bool* ok) {
non_patterns_to_rewrite_.Add(expr, scope_->zone());
if (non_patterns_to_rewrite_.length() >=
- std::numeric_limits<uint16_t>::max())
+ std::numeric_limits<uint16_t>::max()) {
*ok = false;
+ }
}
// Properties count estimation.
@@ -468,8 +466,8 @@ class ParserBase {
FunctionState* outer_function_state_;
DeclarationScope* scope_;
- ZoneList<DestructuringAssignment> destructuring_assignments_to_rewrite_;
- ZoneList<ExpressionT> non_patterns_to_rewrite_;
+ ZoneList<RewritableExpressionT> destructuring_assignments_to_rewrite_;
+ ZoneList<RewritableExpressionT> non_patterns_to_rewrite_;
ZoneList<typename ExpressionClassifier::Error> reported_errors_;
@@ -556,21 +554,43 @@ class ParserBase {
: variable(nullptr),
extends(parser->impl()->NullExpression()),
properties(parser->impl()->NewClassPropertyList(4)),
+ static_fields(parser->impl()->NewClassPropertyList(4)),
+ instance_fields(parser->impl()->NewClassPropertyList(4)),
constructor(parser->impl()->NullExpression()),
has_seen_constructor(false),
has_name_static_property(false),
has_static_computed_names(false),
- is_anonymous(false) {}
+ has_static_class_fields(false),
+ has_instance_class_fields(false),
+ is_anonymous(false),
+ static_fields_scope(nullptr),
+ instance_fields_scope(nullptr),
+ computed_field_count(0) {}
Variable* variable;
ExpressionT extends;
typename Types::ClassPropertyList properties;
+ typename Types::ClassPropertyList static_fields;
+ typename Types::ClassPropertyList instance_fields;
FunctionLiteralT constructor;
+
+ // TODO(gsathya): Use a bitfield store all the booleans.
bool has_seen_constructor;
bool has_name_static_property;
bool has_static_computed_names;
+ bool has_static_class_fields;
+ bool has_instance_class_fields;
bool is_anonymous;
+ DeclarationScope* static_fields_scope;
+ DeclarationScope* instance_fields_scope;
+ int computed_field_count;
};
+ const AstRawString* ClassFieldVariableName(AstValueFactory* ast_value_factory,
+ int index) {
+ std::string name = ".class-field-" + std::to_string(index);
+ return ast_value_factory->GetOneByteString(name.c_str());
+ }
+
DeclarationScope* NewScriptScope() const {
return new (zone()) DeclarationScope(zone(), ast_value_factory());
}
@@ -635,11 +655,15 @@ class ParserBase {
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
int position() const { return scanner_->location().beg_pos; }
int peek_position() const { return scanner_->peek_location().beg_pos; }
- bool stack_overflow() const { return stack_overflow_; }
- void set_stack_overflow() { stack_overflow_ = true; }
+ bool stack_overflow() const {
+ return pending_error_handler()->stack_overflow();
+ }
+ void set_stack_overflow() { pending_error_handler()->set_stack_overflow(); }
+ int script_id() { return script_id_; }
+ void set_script_id(int id) { script_id_ = id; }
INLINE(Token::Value peek()) {
- if (stack_overflow_) return Token::ILLEGAL;
+ if (stack_overflow()) return Token::ILLEGAL;
return scanner()->peek();
}
@@ -651,18 +675,18 @@ class ParserBase {
}
INLINE(Token::Value PeekAhead()) {
- if (stack_overflow_) return Token::ILLEGAL;
+ if (stack_overflow()) return Token::ILLEGAL;
return scanner()->PeekAhead();
}
INLINE(Token::Value Next()) {
- if (stack_overflow_) return Token::ILLEGAL;
+ if (stack_overflow()) return Token::ILLEGAL;
{
if (GetCurrentStackPosition() < stack_limit_) {
// Any further calls to Next or peek will return the illegal token.
// The current call must return the next token, which might already
// have been peek'ed.
- stack_overflow_ = true;
+ set_stack_overflow();
}
}
return scanner()->Next();
@@ -874,6 +898,13 @@ class ParserBase {
return IsResumableFunction(function_state_->kind());
}
+ const PendingCompilationErrorHandler* pending_error_handler() const {
+ return pending_error_handler_;
+ }
+ PendingCompilationErrorHandler* pending_error_handler() {
+ return pending_error_handler_;
+ }
+
// Report syntax errors.
void ReportMessage(MessageTemplate::Template message) {
Scanner::Location source_location = scanner()->location();
@@ -1049,8 +1080,8 @@ class ParserBase {
// This method wraps the parsing of the expression inside a new expression
// classifier and calls RewriteNonPattern if parsing is successful.
- // It should be used whenever we're parsing an expression that will be
- // used as a non-pattern (i.e., in most cases).
+ // It should be used whenever we're parsing an expression that is known
+ // to not be a pattern or part of a pattern.
V8_INLINE ExpressionT ParseExpression(bool accept_IN, bool* ok);
// This method does not wrap the parsing of the expression inside a
@@ -1081,11 +1112,12 @@ class ParserBase {
bool* ok);
ExpressionT ParseObjectLiteral(bool* ok);
ClassLiteralPropertyT ParseClassPropertyDefinition(
- ClassLiteralChecker* checker, bool has_extends, bool* is_computed_name,
- bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
- bool* is_static, bool* has_name_static_property, bool* ok);
- FunctionLiteralT ParseClassFieldForInitializer(bool has_initializer,
- bool* ok);
+ ClassLiteralChecker* checker, ClassInfo* class_info, bool has_extends,
+ bool* is_computed_name, bool* has_seen_constructor,
+ ClassLiteralProperty::Kind* property_kind, bool* is_static,
+ bool* has_name_static_property, bool* ok);
+ ExpressionT ParseClassFieldInitializer(ClassInfo* class_info, bool is_static,
+ bool* ok);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
ObjectLiteralChecker* checker, bool* is_computed_name,
bool* is_rest_property, bool* ok);
@@ -1357,7 +1389,7 @@ class ParserBase {
inline StatementT BuildReturnStatement(ExpressionT expr, int pos,
int end_pos = kNoSourcePosition) {
if (impl()->IsNull(expr)) {
- expr = impl()->GetLiteralUndefined(kNoSourcePosition);
+ expr = factory()->NewUndefinedLiteral(kNoSourcePosition);
} else if (is_async_generator()) {
// In async generators, if there is an explicit operand to the return
// statement, await the operand.
@@ -1399,6 +1431,7 @@ class ParserBase {
void CheckClassMethodName(Token::Value property, PropertyKind type,
bool is_generator, bool is_async, bool is_static,
bool* ok);
+ void CheckClassFieldName(bool is_static, bool* ok);
private:
bool IsConstructor() {
@@ -1478,9 +1511,11 @@ class ParserBase {
AstValueFactory* ast_value_factory_; // Not owned.
typename Types::Factory ast_node_factory_;
RuntimeCallStats* runtime_call_stats_;
+ internal::Logger* logger_;
bool parsing_on_main_thread_;
- bool parsing_module_;
+ const bool parsing_module_;
uintptr_t stack_limit_;
+ PendingCompilationErrorHandler* pending_error_handler_;
// Parser base's private field members.
@@ -1489,22 +1524,19 @@ class ParserBase {
ExpressionClassifier* classifier_;
Scanner* scanner_;
- bool stack_overflow_;
FunctionLiteral::EagerCompileHint default_eager_compile_hint_;
int function_literal_id_;
+ int script_id_;
bool allow_natives_;
bool allow_harmony_do_expressions_;
bool allow_harmony_function_sent_;
- bool allow_harmony_restrictive_generators_;
- bool allow_harmony_class_fields_;
- bool allow_harmony_object_rest_spread_;
+ bool allow_harmony_public_fields_;
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
bool allow_harmony_async_iteration_;
- bool allow_harmony_template_escapes_;
friend class DiscardableZoneScope;
};
@@ -1549,6 +1581,7 @@ void ParserBase<Impl>::GetUnexpectedTokenMessage(
break;
case Token::SMI:
case Token::NUMBER:
+ case Token::BIGINT:
*message = MessageTemplate::kUnexpectedTokenNumber;
break;
case Token::STRING:
@@ -1590,7 +1623,7 @@ void ParserBase<Impl>::GetUnexpectedTokenMessage(
break;
default:
const char* name = Token::String(token);
- DCHECK(name != NULL);
+ DCHECK_NOT_NULL(name);
*arg = name;
break;
}
@@ -1780,6 +1813,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::FALSE_LITERAL:
case Token::SMI:
case Token::NUMBER:
+ case Token::BIGINT:
BindingPatternUnexpectedToken();
return impl()->ExpressionFromLiteral(Next(), beg_pos);
@@ -1881,7 +1915,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
return ParseTemplateLiteral(impl()->NullExpression(), beg_pos, false, ok);
case Token::MOD:
- if (allow_natives() || extension_ != NULL) {
+ if (allow_natives() || extension_ != nullptr) {
BindingPatternUnexpectedToken();
return ParseV8Intrinsic(ok);
}
@@ -1951,6 +1985,10 @@ ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
if (impl()->IsNull(result)) {
// First time through the loop.
result = right;
+ } else if (impl()->CollapseNaryExpression(&result, right, Token::COMMA,
+ comma_pos,
+ SourceRange::Empty())) {
+ // Do nothing, "result" is already updated.
} else {
result =
factory()->NewBinaryOperation(Token::COMMA, result, right, comma_pos);
@@ -1992,7 +2030,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
while (peek() != Token::RBRACK) {
ExpressionT elem;
if (peek() == Token::COMMA) {
- elem = impl()->GetLiteralTheHole(peek_position());
+ elem = factory()->NewTheHoleLiteral();
} else if (peek() == Token::ELLIPSIS) {
int start_pos = peek_position();
Consume(Token::ELLIPSIS);
@@ -2033,8 +2071,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
ExpressionT result =
factory()->NewArrayLiteral(values, first_spread_index, pos);
if (first_spread_index >= 0) {
- result = factory()->NewRewritableExpression(result);
- impl()->QueueNonPatternForRewriting(result, ok);
+ auto rewritable = factory()->NewRewritableExpression(result, scope());
+ impl()->QueueNonPatternForRewriting(rewritable, ok);
if (!*ok) {
// If the non-pattern rewriting mechanism is used in the future for
// rewriting other things than spreads, this error message will have
@@ -2043,6 +2081,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
ReportMessage(MessageTemplate::kTooManySpreads);
return impl()->NullExpression();
}
+ result = rewritable;
}
return result;
}
@@ -2079,7 +2118,7 @@ template <class Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
IdentifierT* name, PropertyKind* kind, bool* is_generator, bool* is_get,
bool* is_set, bool* is_async, bool* is_computed_name, bool* ok) {
- DCHECK(*kind == PropertyKind::kNotSet);
+ DCHECK_EQ(*kind, PropertyKind::kNotSet);
DCHECK(!*is_generator);
DCHECK(!*is_get);
DCHECK(!*is_set);
@@ -2166,8 +2205,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
}
case Token::ELLIPSIS:
- if (allow_harmony_object_rest_spread() && !*is_generator && !*is_async &&
- !*is_get && !*is_set) {
+ if (!*is_generator && !*is_async && !*is_get && !*is_set) {
*name = impl()->NullIdentifier();
Consume(Token::ELLIPSIS);
expression = ParseAssignmentExpression(true, CHECK_OK);
@@ -2217,9 +2255,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
template <typename Impl>
typename ParserBase<Impl>::ClassLiteralPropertyT
ParserBase<Impl>::ParseClassPropertyDefinition(
- ClassLiteralChecker* checker, bool has_extends, bool* is_computed_name,
- bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
- bool* is_static, bool* has_name_static_property, bool* ok) {
+ ClassLiteralChecker* checker, ClassInfo* class_info, bool has_extends,
+ bool* is_computed_name, bool* has_seen_constructor,
+ ClassLiteralProperty::Kind* property_kind, bool* is_static,
+ bool* has_name_static_property, bool* ok) {
DCHECK_NOT_NULL(has_seen_constructor);
DCHECK_NOT_NULL(has_name_static_property);
bool is_get = false;
@@ -2273,14 +2312,17 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
// as an uninitialized field.
case PropertyKind::kShorthandProperty:
case PropertyKind::kValueProperty:
- if (allow_harmony_class_fields()) {
- bool has_initializer = Check(Token::ASSIGN);
- ExpressionT function_literal = ParseClassFieldForInitializer(
- has_initializer, CHECK_OK_CUSTOM(NullLiteralProperty));
- ExpectSemicolon(CHECK_OK_CUSTOM(NullLiteralProperty));
+ if (allow_harmony_public_fields()) {
*property_kind = ClassLiteralProperty::FIELD;
+ if (!*is_computed_name) {
+ checker->CheckClassFieldName(*is_static,
+ CHECK_OK_CUSTOM(NullLiteralProperty));
+ }
+ ExpressionT initializer = ParseClassFieldInitializer(
+ class_info, *is_static, CHECK_OK_CUSTOM(NullLiteralProperty));
+ ExpectSemicolon(CHECK_OK_CUSTOM(NullLiteralProperty));
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
- name_expression, function_literal, *property_kind, *is_static,
+ name_expression, initializer, *property_kind, *is_static,
*is_computed_name);
impl()->SetFunctionNameFromPropertyName(result, name);
return result;
@@ -2377,36 +2419,43 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
}
template <typename Impl>
-typename ParserBase<Impl>::FunctionLiteralT
-ParserBase<Impl>::ParseClassFieldForInitializer(bool has_initializer,
- bool* ok) {
- // Makes a concise method which evaluates and returns the initialized value
- // (or undefined if absent).
- FunctionKind kind = FunctionKind::kConciseMethod;
- DeclarationScope* initializer_scope = NewFunctionScope(kind);
- initializer_scope->set_start_position(scanner()->location().end_pos);
- FunctionState initializer_state(&function_state_, &scope_, initializer_scope);
- DCHECK_EQ(initializer_scope, scope());
- scope()->SetLanguageMode(STRICT);
- ExpressionClassifier expression_classifier(this);
- ExpressionT value;
- if (has_initializer) {
- value =
- this->ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpression));
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info,
+ bool is_static, bool* ok) {
+ DeclarationScope* initializer_scope = is_static
+ ? class_info->static_fields_scope
+ : class_info->instance_fields_scope;
+
+ if (initializer_scope == nullptr) {
+ initializer_scope = NewFunctionScope(FunctionKind::kConciseMethod);
+ // TODO(gsathya): Make scopes be non contiguous.
+ initializer_scope->set_start_position(scanner()->location().end_pos);
+ initializer_scope->SetLanguageMode(LanguageMode::kStrict);
+ }
+
+ ExpressionT initializer;
+ if (Check(Token::ASSIGN)) {
+ FunctionState initializer_state(&function_state_, &scope_,
+ initializer_scope);
+ ExpressionClassifier expression_classifier(this);
+
+ initializer =
+ ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpression));
impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpression));
} else {
- value = factory()->NewUndefinedLiteral(kNoSourcePosition);
+ initializer = factory()->NewUndefinedLiteral(kNoSourcePosition);
}
+
initializer_scope->set_end_position(scanner()->location().end_pos);
- typename Types::StatementList body = impl()->NewStatementList(1);
- body->Add(factory()->NewReturnStatement(value, kNoSourcePosition), zone());
- FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
- impl()->EmptyIdentifierString(), initializer_scope, body,
- initializer_state.expected_property_count(), 0, 0,
- FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kAnonymousExpression, default_eager_compile_hint_,
- initializer_scope->start_position(), true, GetNextFunctionLiteralId());
- return function_literal;
+ if (is_static) {
+ class_info->static_fields_scope = initializer_scope;
+ class_info->has_static_class_fields = true;
+ } else {
+ class_info->instance_fields_scope = initializer_scope;
+ class_info->has_instance_class_fields = true;
+ }
+
+ return initializer;
}
template <typename Impl>
@@ -2432,7 +2481,6 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
switch (kind) {
case PropertyKind::kSpreadProperty:
- DCHECK(allow_harmony_object_rest_spread());
DCHECK(!is_get && !is_set && !is_generator && !is_async &&
!*is_computed_name);
DCHECK(name_token == Token::ELLIPSIS);
@@ -2441,7 +2489,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
*is_rest_property = true;
return factory()->NewObjectLiteralProperty(
- impl()->GetLiteralTheHole(kNoSourcePosition), name_expression,
+ factory()->NewTheHoleLiteral(), name_expression,
ObjectLiteralProperty::SPREAD, true);
case PropertyKind::kValueProperty: {
@@ -2910,7 +2958,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
impl()->CheckAssigningFunctionLiteralToProperty(expression, right);
- if (fni_ != NULL) {
+ if (fni_ != nullptr) {
// Check if the right hand side is a call to avoid inferring a
// name if we're dealing with "a = function(){...}();"-like
// expression.
@@ -2925,17 +2973,14 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
impl()->SetFunctionNameFromIdentifierRef(right, expression);
}
- if (op == Token::ASSIGN_EXP) {
- DCHECK(!is_destructuring_assignment);
- return impl()->RewriteAssignExponentiation(expression, right, pos);
- }
-
DCHECK_NE(op, Token::INIT);
ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
if (is_destructuring_assignment) {
- result = factory()->NewRewritableExpression(result);
- impl()->QueueDestructuringAssignmentForRewriting(result);
+ DCHECK_NE(op, Token::ASSIGN_EXP);
+ auto rewritable = factory()->NewRewritableExpression(result, scope());
+ impl()->QueueDestructuringAssignmentForRewriting(rewritable);
+ result = rewritable;
}
return result;
@@ -3010,11 +3055,11 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
impl()->RewriteNonPattern(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
- Consume(Token::CONDITIONAL);
ExpressionT left;
{
SourceRangeScope range_scope(scanner(), &then_range);
+ Consume(Token::CONDITIONAL);
ExpressionClassifier classifier(this);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
@@ -3023,10 +3068,10 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
AccumulateNonBindingPatternErrors();
}
impl()->RewriteNonPattern(CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
ExpressionT right;
{
SourceRangeScope range_scope(scanner(), &else_range);
+ Expect(Token::COLON, CHECK_OK);
ExpressionClassifier classifier(this);
right = ParseAssignmentExpression(accept_IN, CHECK_OK);
AccumulateNonBindingPatternErrors();
@@ -3042,7 +3087,8 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
int prec, bool accept_IN, bool* ok) {
- DCHECK(prec >= 4);
+ DCHECK_GE(prec, 4);
+ SourceRange right_range;
ExpressionT x = ParseUnaryExpression(CHECK_OK);
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
@@ -3050,12 +3096,15 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
impl()->RewriteNonPattern(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
+
+ SourceRangeScope right_range_scope(scanner(), &right_range);
Token::Value op = Next();
int pos = position();
const bool is_right_associative = op == Token::EXP;
const int next_prec = is_right_associative ? prec1 : prec1 + 1;
ExpressionT y = ParseBinaryExpression(next_prec, accept_IN, CHECK_OK);
+ right_range_scope.Finalize();
impl()->RewriteNonPattern(CHECK_OK);
if (impl()->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos)) {
@@ -3078,11 +3127,14 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
// The comparison was negated - add a NOT.
x = factory()->NewUnaryOperation(Token::NOT, x, pos);
}
- } else if (op == Token::EXP) {
- x = impl()->RewriteExponentiation(x, y, pos);
+ } else if (impl()->CollapseNaryExpression(&x, y, op, pos, right_range)) {
+ continue;
} else {
// We have a "normal" binary operation.
x = factory()->NewBinaryOperation(op, x, y, pos);
+ if (op == Token::OR || op == Token::AND) {
+ impl()->RecordBinaryOperationSourceRange(x, right_range);
+ }
}
}
}
@@ -3316,7 +3368,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
factory()->NewAssignment(Token::INIT, this_expr, result, pos);
}
- if (fni_ != NULL) fni_->RemoveLastFunction();
+ if (fni_ != nullptr) fni_->RemoveLastFunction();
break;
}
@@ -3518,9 +3570,15 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseImportExpressions(
return impl()->NullExpression();
}
- return impl()->ExpressionFromLiteral(Token::NULL_LITERAL, pos);
+ return impl()->ImportMetaExpression(pos);
}
Expect(Token::LPAREN, CHECK_OK);
+ if (peek() == Token::RPAREN) {
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kImportMissingSpecifier);
+ *ok = false;
+ return impl()->NullExpression();
+ }
ExpressionT arg = ParseAssignmentExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
return factory()->NewImportCallExpression(arg, pos);
@@ -3666,6 +3724,7 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
// BindingElement[?Yield, ?GeneratorParameter]
bool is_rest = parameters->has_rest;
+ FuncNameInferrer::State fni_state(fni_);
ExpressionT pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(Void));
ValidateBindingPattern(CHECK_OK_CUSTOM(Void));
@@ -3775,12 +3834,12 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
break;
case Token::CONST:
Consume(Token::CONST);
- DCHECK(var_context != kStatement);
+ DCHECK_NE(var_context, kStatement);
parsing_result->descriptor.mode = CONST;
break;
case Token::LET:
Consume(Token::LET);
- DCHECK(var_context != kStatement);
+ DCHECK_NE(var_context, kStatement);
parsing_result->descriptor.mode = LET;
break;
default:
@@ -3857,7 +3916,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
}
// 'let x' initializes 'x' to undefined.
if (parsing_result->descriptor.mode == LET) {
- value = impl()->GetLiteralUndefined(position());
+ value = factory()->NewUndefinedLiteral(position());
}
}
@@ -3979,9 +4038,9 @@ ParserBase<Impl>::ParseHoistableDeclaration(
// sloppy_block_function_map. Don't add them to the map for async functions.
// Generators are also supposed to be prohibited; currently doing this behind
// a flag and UseCounting violations to assess web compatibility.
- bool is_sloppy_block_function =
- is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
- !is_async && !(allow_harmony_restrictive_generators() && is_generator);
+ bool is_sloppy_block_function = is_sloppy(language_mode()) &&
+ !scope()->is_declaration_scope() &&
+ !is_async && !is_generator;
return impl()->DeclareFunction(variable_name, function, mode, pos,
is_sloppy_block_function, names, ok);
@@ -4078,18 +4137,6 @@ void ParserBase<Impl>::ParseFunctionBody(
typename ParserBase<Impl>::StatementListT result, IdentifierT function_name,
int pos, const FormalParametersT& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok) {
- static const int kFunctionNameAssignmentIndex = 0;
- if (function_type == FunctionLiteral::kNamedExpression) {
- DCHECK(!impl()->IsNull(function_name));
- // If we have a named function expression, we add a local variable
- // declaration to the body of the function with the name of the
- // function and let it refer to the function itself (closure).
- // Not having parsed the function body, the language mode may still change,
- // so we reserve a spot and create the actual const assignment later.
- DCHECK_EQ(kFunctionNameAssignmentIndex, result->length());
- result->Add(impl()->NullStatement(), zone());
- }
-
DeclarationScope* function_scope = scope()->AsDeclarationScope();
DeclarationScope* inner_scope = function_scope;
BlockT inner_block = impl()->NullStatement();
@@ -4170,9 +4217,7 @@ void ParserBase<Impl>::ParseFunctionBody(
function_scope->DeclareArguments(ast_value_factory());
}
- impl()->CreateFunctionNameAssignment(function_name, pos, function_type,
- function_scope, result,
- kFunctionNameAssignmentIndex);
+ impl()->DeclareFunctionNameVar(function_name, function_type, function_scope);
}
template <typename Impl>
@@ -4233,9 +4278,10 @@ template <typename Impl>
bool ParserBase<Impl>::IsTrivialExpression() {
Token::Value peek_token = peek();
if (peek_token == Token::SMI || peek_token == Token::NUMBER ||
- peek_token == Token::NULL_LITERAL || peek_token == Token::TRUE_LITERAL ||
- peek_token == Token::FALSE_LITERAL || peek_token == Token::STRING ||
- peek_token == Token::IDENTIFIER || peek_token == Token::THIS) {
+ peek_token == Token::BIGINT || peek_token == Token::NULL_LITERAL ||
+ peek_token == Token::TRUE_LITERAL || peek_token == Token::FALSE_LITERAL ||
+ peek_token == Token::STRING || peek_token == Token::IDENTIFIER ||
+ peek_token == Token::THIS) {
// PeekAhead() is expensive & may not always be called, so we only call it
// after checking peek().
Token::Value peek_ahead = PeekAhead();
@@ -4260,6 +4306,8 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
counters[Impl::IsPreParser()][parsing_on_main_thread_]);
+ base::ElapsedTimer timer;
+ if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
@@ -4302,7 +4350,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// For arrow functions, we don't need to retrieve data about function
// parameters.
int dummy_num_parameters = -1;
- DCHECK((kind & FunctionKind::kArrowFunction) != 0);
+ DCHECK_NE(kind & FunctionKind::kArrowFunction, 0);
LazyParsingResult result = impl()->SkipFunction(
nullptr, kind, FunctionLiteral::kAnonymousExpression,
formal_parameters.scope, &dummy_num_parameters,
@@ -4313,6 +4361,13 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
formal_parameters.scope->ResetAfterPreparsing(ast_value_factory_,
false);
+ // Discard any queued destructuring assignments which appeared
+ // in this function's parameter list.
+ FunctionState* parent_state = function_state.outer();
+ DCHECK_NOT_NULL(parent_state);
+ DCHECK_GE(parent_state->destructuring_assignments_to_rewrite().length(),
+ rewritable_length);
+ parent_state->RewindDestructuringAssignments(rewritable_length);
} else {
Consume(Token::LBRACE);
body = impl()->NewStatementList(8);
@@ -4349,23 +4404,9 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
impl()->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
- if (is_lazy_top_level_function) {
- FunctionState* parent_state = function_state.outer();
- DCHECK_NOT_NULL(parent_state);
- DCHECK_GE(parent_state->destructuring_assignments_to_rewrite().length(),
- rewritable_length);
- parent_state->RewindDestructuringAssignments(rewritable_length);
- }
-
impl()->RewriteDestructuringAssignments();
}
- if (FLAG_trace_preparse) {
- Scope* scope = formal_parameters.scope;
- PrintF(" [%s]: %i-%i (arrow function)\n",
- is_lazy_top_level_function ? "Preparse no-resolution" : "Full parse",
- scope->start_position(), scope->end_position());
- }
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
impl()->EmptyIdentifierString(), formal_parameters.scope, body,
expected_property_count, formal_parameters.num_parameters(),
@@ -4380,6 +4421,17 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
impl()->AddFunctionForNameInference(function_literal);
+ if (V8_UNLIKELY((FLAG_log_function_events))) {
+ Scope* scope = formal_parameters.scope;
+ double ms = timer.Elapsed().InMillisecondsF();
+ const char* event_name =
+ is_lazy_top_level_function ? "preparse-no-resolution" : "parse";
+ const char* name = "arrow function";
+ logger_->FunctionEvent(event_name, nullptr, script_id(), ms,
+ scope->start_position(), scope->end_position(), name,
+ strlen(name));
+ }
+
return function_literal;
}
@@ -4407,7 +4459,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
Scope* block_scope = NewScope(BLOCK_SCOPE);
BlockState block_state(&scope_, block_scope);
- RaiseLanguageMode(STRICT);
+ RaiseLanguageMode(LanguageMode::kStrict);
ClassInfo class_info(this);
class_info.is_anonymous = is_anonymous;
@@ -4438,19 +4490,23 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
// property.
bool is_constructor = !class_info.has_seen_constructor;
ClassLiteralPropertyT property = ParseClassPropertyDefinition(
- &checker, has_extends, &is_computed_name,
+ &checker, &class_info, has_extends, &is_computed_name,
&class_info.has_seen_constructor, &property_kind, &is_static,
&class_info.has_name_static_property, CHECK_OK);
if (!class_info.has_static_computed_names && is_static &&
is_computed_name) {
class_info.has_static_computed_names = true;
}
+ if (is_computed_name && property_kind == ClassLiteralProperty::FIELD) {
+ class_info.computed_field_count++;
+ }
is_constructor &= class_info.has_seen_constructor;
impl()->RewriteNonPattern(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
impl()->DeclareClassProperty(name, property, property_kind, is_static,
- is_constructor, &class_info, CHECK_OK);
+ is_constructor, is_computed_name, &class_info,
+ CHECK_OK);
impl()->InferFunctionName();
}
@@ -4550,9 +4606,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
//
// When parsing a TemplateLiteral, we must have scanned either an initial
// TEMPLATE_SPAN, or a TEMPLATE_TAIL.
- CHECK(peek() == Token::TEMPLATE_SPAN || peek() == Token::TEMPLATE_TAIL);
+ DCHECK(peek() == Token::TEMPLATE_SPAN || peek() == Token::TEMPLATE_TAIL);
- bool forbid_illegal_escapes = !allow_harmony_template_escapes() || !tagged;
+ bool forbid_illegal_escapes = !tagged;
// If we reach a TEMPLATE_TAIL first, we are parsing a NoSubstitutionTemplate.
// In this case we may simply consume the token and build a template with a
@@ -4656,7 +4712,7 @@ ParserBase<Impl>::CheckAndRewriteReferenceExpression(
if (expression->IsValidReferenceExpression()) {
return expression;
}
- if (expression->IsCall()) {
+ if (expression->IsCall() && !expression->AsCall()->is_tagged_template()) {
// If it is a call, make it a runtime error for legacy web compatibility.
// Bug: https://bugs.chromium.org/p/v8/issues/detail?id=4480
// Rewrite `expr' to `expr[throw ReferenceError]'.
@@ -4763,7 +4819,7 @@ ParserBase<Impl>::ParseStatementList(StatementListT body, int end_token,
if (impl()->IsUseStrictDirective(stat) &&
token_loc.end_pos - token_loc.beg_pos == sizeof("use strict") + 1) {
// Directive "use strict" (ES5 14.1).
- RaiseLanguageMode(STRICT);
+ RaiseLanguageMode(LanguageMode::kStrict);
if (!scope()->HasSimpleParameters()) {
// TC39 deemed "use strict" directives to be an error when occurring
// in the body of a function with non-simple parameter list, on
@@ -4783,14 +4839,14 @@ ParserBase<Impl>::ParseStatementList(StatementListT body, int end_token,
// Possibly an unknown directive.
// Should not change mode, but will increment usage counters
// as appropriate. Ditto usages below.
- RaiseLanguageMode(SLOPPY);
+ RaiseLanguageMode(LanguageMode::kSloppy);
} else {
// End of the directive prologue.
directive_prologue = false;
- RaiseLanguageMode(SLOPPY);
+ RaiseLanguageMode(LanguageMode::kSloppy);
}
} else {
- RaiseLanguageMode(SLOPPY);
+ RaiseLanguageMode(LanguageMode::kSloppy);
}
// If we're allowed to abort, we will do so when we see a "long and
@@ -5121,7 +5177,7 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
ExpectSemicolon(CHECK_OK);
if (labels != nullptr) {
// TODO(adamk): Also measure in the PreParser by passing something
- // non-NULL as |labels|.
+ // non-null as |labels|.
impl()->CountUsage(v8::Isolate::kLabeledExpressionStatement);
}
return factory()->NewExpressionStatement(expr, pos);
@@ -5758,7 +5814,7 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
ForInfo* for_info, ZoneList<const AstRawString*>* labels, bool* ok) {
// Initializer is reference followed by in/of.
if (!expression->IsArrayLiteral() && !expression->IsObjectLiteral()) {
- expression = impl()->CheckAndRewriteReferenceExpression(
+ expression = CheckAndRewriteReferenceExpression(
expression, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
kSyntaxError, CHECK_OK);
}
@@ -5959,7 +6015,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
ValidateAssignmentPattern(CHECK_OK);
} else {
impl()->RewriteNonPattern(CHECK_OK);
- each_variable = impl()->CheckAndRewriteReferenceExpression(
+ each_variable = CheckAndRewriteReferenceExpression(
lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
kSyntaxError, CHECK_OK);
}
@@ -6081,6 +6137,22 @@ void ParserBase<Impl>::ClassLiteralChecker::CheckClassMethodName(
}
}
+template <typename Impl>
+void ParserBase<Impl>::ClassLiteralChecker::CheckClassFieldName(bool is_static,
+ bool* ok) {
+ if (is_static && IsPrototype()) {
+ this->parser()->ReportMessage(MessageTemplate::kStaticPrototype);
+ *ok = false;
+ return;
+ }
+
+ if (IsConstructor()) {
+ this->parser()->ReportMessage(MessageTemplate::kConstructorClassField);
+ *ok = false;
+ return;
+ }
+}
+
#undef CHECK_OK
#undef CHECK_OK_CUSTOM
#undef CHECK_OK_VOID
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index a554d7d242..4d291a741e 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -8,7 +8,6 @@
#include <memory>
#include "src/api.h"
-#include "src/ast/ast-expression-rewriter.h"
#include "src/ast/ast-function-literal-id-reindexer.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/ast.h"
@@ -16,6 +15,7 @@
#include "src/base/platform/platform.h"
#include "src/char-predicates-inl.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/log.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/parsing/duplicate-finder.h"
@@ -173,11 +173,11 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
FunctionKind kind = call_super ? FunctionKind::kDefaultDerivedConstructor
: FunctionKind::kDefaultBaseConstructor;
DeclarationScope* function_scope = NewFunctionScope(kind);
- SetLanguageMode(function_scope, STRICT);
+ SetLanguageMode(function_scope, LanguageMode::kStrict);
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
- ZoneList<Statement*>* body = NULL;
+ ZoneList<Statement*>* body = nullptr;
{
FunctionState function_state(&function_state_, &scope_, function_scope);
@@ -212,7 +212,6 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
parameter_count, FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, default_eager_compile_hint(), pos,
true, GetNextFunctionLiteralId());
-
return function_literal;
}
@@ -245,10 +244,9 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
Expression* y,
Token::Value op, int pos) {
- if ((*x)->AsLiteral() && (*x)->AsLiteral()->raw_value()->IsNumber() &&
- y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) {
- double x_val = (*x)->AsLiteral()->raw_value()->AsNumber();
- double y_val = y->AsLiteral()->raw_value()->AsNumber();
+ if ((*x)->IsNumberLiteral() && y->IsNumberLiteral()) {
+ double x_val = (*x)->AsLiteral()->AsNumber();
+ double y_val = y->AsLiteral()->AsNumber();
switch (op) {
case Token::ADD:
*x = factory()->NewNumberLiteral(x_val + y_val, pos);
@@ -308,16 +306,48 @@ bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
return false;
}
+bool Parser::CollapseNaryExpression(Expression** x, Expression* y,
+ Token::Value op, int pos,
+ const SourceRange& range) {
+ // Filter out unsupported ops.
+ if (!Token::IsBinaryOp(op) || op == Token::EXP) return false;
+
+ // Convert *x into an nary operation with the given op, returning false if
+ // this is not possible.
+ NaryOperation* nary = nullptr;
+ if ((*x)->IsBinaryOperation()) {
+ BinaryOperation* binop = (*x)->AsBinaryOperation();
+ if (binop->op() != op) return false;
+
+ nary = factory()->NewNaryOperation(op, binop->left(), 2);
+ nary->AddSubsequent(binop->right(), binop->position());
+ ConvertBinaryToNaryOperationSourceRange(binop, nary);
+ *x = nary;
+ } else if ((*x)->IsNaryOperation()) {
+ nary = (*x)->AsNaryOperation();
+ if (nary->op() != op) return false;
+ } else {
+ return false;
+ }
+
+ // Append our current expression to the nary operation.
+ // TODO(leszeks): Do some literal collapsing here if we're appending Smi or
+ // String literals.
+ nary->AddSubsequent(y, pos);
+ AppendNaryOperationSourceRange(nary, range);
+
+ return true;
+}
+
Expression* Parser::BuildUnaryExpression(Expression* expression,
Token::Value op, int pos) {
- DCHECK(expression != NULL);
- if (expression->IsLiteral()) {
- const AstValue* literal = expression->AsLiteral()->raw_value();
+ DCHECK_NOT_NULL(expression);
+ const Literal* literal = expression->AsLiteral();
+ if (literal != nullptr) {
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
- bool condition = literal->BooleanValue();
- return factory()->NewBooleanLiteral(!condition, pos);
- } else if (literal->IsNumber()) {
+ return factory()->NewBooleanLiteral(literal->ToBooleanIsFalse(), pos);
+ } else if (literal->IsNumberLiteral()) {
// Compute some expressions involving only number literals.
double value = literal->AsNumber();
switch (op) {
@@ -383,6 +413,12 @@ Expression* Parser::FunctionSentExpression(int pos) {
args, pos);
}
+Expression* Parser::ImportMetaExpression(int pos) {
+ return factory()->NewCallRuntime(
+ Runtime::kInlineGetImportMetaObject,
+ new (zone()) ZoneList<Expression*>(0, zone()), pos);
+}
+
Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
switch (token) {
case Token::NULL_LITERAL:
@@ -399,10 +435,13 @@ Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
double value = scanner()->DoubleValue();
return factory()->NewNumberLiteral(value, pos);
}
+ case Token::BIGINT:
+ return factory()->NewBigIntLiteral(
+ AstBigInt(scanner()->CurrentLiteralAsCString(zone())), pos);
default:
DCHECK(false);
}
- return NULL;
+ return nullptr;
}
Expression* Parser::NewV8Intrinsic(const AstRawString* name,
@@ -463,7 +502,10 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
Parser::Parser(ParseInfo* info)
: ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
info->extension(), info->GetOrCreateAstValueFactory(),
- info->runtime_call_stats(), true),
+ info->pending_error_handler(),
+ info->runtime_call_stats(), info->logger(),
+ info->script().is_null() ? -1 : info->script()->id(),
+ info->is_module(), true),
scanner_(info->unicode_cache(), use_counts_),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
@@ -479,7 +521,7 @@ Parser::Parser(ParseInfo* info)
// Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
// ParseInfo during background parsing.
- DCHECK(info->character_stream() != nullptr);
+ DCHECK_NOT_NULL(info->character_stream());
// Determine if functions can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know
@@ -500,13 +542,11 @@ Parser::Parser(ParseInfo* info)
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
set_allow_harmony_function_sent(FLAG_harmony_function_sent);
- set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
- set_allow_harmony_class_fields(FLAG_harmony_class_fields);
- set_allow_harmony_object_rest_spread(FLAG_harmony_object_rest_spread);
+ set_allow_harmony_public_fields(FLAG_harmony_public_fields);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
set_allow_harmony_import_meta(FLAG_harmony_import_meta);
set_allow_harmony_async_iteration(FLAG_harmony_async_iteration);
- set_allow_harmony_template_escapes(FLAG_harmony_template_escapes);
+ set_allow_harmony_bigint(FLAG_harmony_bigint);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -527,7 +567,6 @@ void Parser::DeserializeScopeChain(
scope = Scope::DeserializeScopeChain(
zone(), *outer_scope_info, script_scope, ast_value_factory(),
Scope::DeserializationMode::kScopesOnly);
- DCHECK(!info->is_module() || scope->is_module_scope());
}
original_scope_ = scope;
}
@@ -557,9 +596,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
: &RuntimeCallStats::ParseProgram);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseProgram");
base::ElapsedTimer timer;
- if (FLAG_trace_parse) {
- timer.Start();
- }
+ if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
// Initialize parser state.
@@ -583,23 +620,25 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
HandleSourceURLComments(isolate, info->script());
- if (FLAG_trace_parse && result != nullptr) {
- double ms = timer.Elapsed().InMillisecondsF();
- if (info->is_eval()) {
- PrintF("[parsing eval");
- } else if (info->script()->name()->IsString()) {
- String* name = String::cast(info->script()->name());
- std::unique_ptr<char[]> name_chars = name->ToCString();
- PrintF("[parsing script: %s", name_chars.get());
- } else {
- PrintF("[parsing script");
- }
- PrintF(" - took %0.3f ms]\n", ms);
- }
if (produce_cached_parse_data() && result != nullptr) {
*info->cached_data() = logger.GetScriptData();
}
log_ = nullptr;
+
+ if (V8_UNLIKELY(FLAG_log_function_events) && result != nullptr) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ const char* event_name = "parse-eval";
+ Script* script = *info->script();
+ int start = -1;
+ int end = -1;
+ if (!info->is_eval()) {
+ event_name = "parse-script";
+ start = 0;
+ end = String::cast(script->source())->length();
+ }
+ LOG(script->GetIsolate(),
+ FunctionEvent(event_name, script, -1, ms, start, end, "", 0));
+ }
return result;
}
@@ -616,11 +655,10 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
DCHECK(info->function_literal_id() == FunctionLiteral::kIdTypeTopLevel ||
info->function_literal_id() == FunctionLiteral::kIdTypeInvalid);
- FunctionLiteral* result = NULL;
+ FunctionLiteral* result = nullptr;
{
Scope* outer = original_scope_;
DCHECK_NOT_NULL(outer);
- parsing_module_ = info->is_module();
if (info->is_eval()) {
outer = NewEvalScope(outer);
} else if (parsing_module_) {
@@ -638,6 +676,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
if (parsing_module_) {
+ DCHECK(info->is_module());
// Declare the special module parameter.
auto name = ast_value_factory()->empty_string();
bool is_duplicate = false;
@@ -658,9 +697,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
zone());
ParseModuleItemList(body, &ok);
- ok = ok &&
- module()->Validate(this->scope()->AsModuleScope(),
- &pending_error_handler_, zone());
+ ok = ok && module()->Validate(this->scope()->AsModuleScope(),
+ pending_error_handler(), zone());
} else {
// Don't count the mode in the use counters--give the program a chance
// to enable script-wide strict mode below.
@@ -708,7 +746,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
info->set_max_function_literal_id(GetLastFunctionLiteralId());
// Make sure the target stack is empty.
- DCHECK(target_stack_ == NULL);
+ DCHECK_NULL(target_stack_);
return result;
}
@@ -722,9 +760,8 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
&RuntimeCallStats::ParseFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseFunction");
base::ElapsedTimer timer;
- if (FLAG_trace_parse) {
- timer.Start();
- }
+ if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
+
DeserializeScopeChain(info, info->maybe_outer_scope_info());
DCHECK_EQ(factory()->zone(), info->zone());
@@ -740,12 +777,18 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
result->set_inferred_name(inferred_name);
}
- if (FLAG_trace_parse && result != NULL) {
+ if (V8_UNLIKELY(FLAG_log_function_events) && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
// We need to make sure that the debug-name is available.
ast_value_factory()->Internalize(isolate);
- std::unique_ptr<char[]> name_chars = result->debug_name()->ToCString();
- PrintF("[parsing function: %s - took %0.3f ms]\n", name_chars.get(), ms);
+ DeclarationScope* function_scope = result->scope();
+ Script* script = *info->script();
+ std::unique_ptr<char[]> function_name = result->GetDebugName();
+ LOG(script->GetIsolate(),
+ FunctionEvent("parse-function", script, -1, ms,
+ function_scope->start_position(),
+ function_scope->end_position(), function_name.get(),
+ strlen(function_name.get())));
}
return result;
}
@@ -888,6 +931,11 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
kNoSourcePosition, function_type, info->language_mode(), &ok);
}
+
+ if (ok) {
+ result->set_requires_instance_fields_initializer(
+ info->requires_instance_fields_initializer());
+ }
// Make sure the results agree.
DCHECK(ok == (result != nullptr));
}
@@ -979,11 +1027,12 @@ void Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
// Keep track of the first reserved word encountered in case our
// caller needs to report an error.
if (!reserved_loc->IsValid() &&
- !Token::IsIdentifier(name_tok, STRICT, false, parsing_module_)) {
+ !Token::IsIdentifier(name_tok, LanguageMode::kStrict, false,
+ parsing_module_)) {
*reserved_loc = scanner()->location();
}
const AstRawString* local_name = ParseIdentifierName(CHECK_OK_VOID);
- const AstRawString* export_name = NULL;
+ const AstRawString* export_name = nullptr;
Scanner::Location location = scanner()->location();
if (CheckContextualKeyword(Token::AS)) {
export_name = ParseIdentifierName(CHECK_OK_VOID);
@@ -991,7 +1040,7 @@ void Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
// both for errors due to "a" and for errors due to "b".
location.end_pos = scanner()->location().end_pos;
}
- if (export_name == NULL) {
+ if (export_name == nullptr) {
export_name = local_name;
}
export_names->Add(export_name, zone());
@@ -1033,8 +1082,8 @@ ZoneList<const Parser::NamedImport*>* Parser::ParseNamedImports(
if (CheckContextualKeyword(Token::AS)) {
local_name = ParseIdentifierName(CHECK_OK);
}
- if (!Token::IsIdentifier(scanner()->current_token(), STRICT, false,
- parsing_module_)) {
+ if (!Token::IsIdentifier(scanner()->current_token(), LanguageMode::kStrict,
+ false, parsing_module_)) {
*ok = false;
ReportMessage(MessageTemplate::kUnexpectedReserved);
return nullptr;
@@ -1393,8 +1442,8 @@ Variable* Parser::Declare(Declaration* declaration,
}
bool sloppy_mode_block_scope_function_redefinition = false;
Variable* variable = scope->DeclareVariable(
- declaration, mode, init, allow_harmony_restrictive_generators(),
- &sloppy_mode_block_scope_function_redefinition, ok);
+ declaration, mode, init, &sloppy_mode_block_scope_function_redefinition,
+ ok);
if (!*ok) {
// If we only have the start position of a proxy, we can't highlight the
// whole variable name. Pretend its length is 1 so that we highlight at
@@ -1778,25 +1827,13 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
zone());
}
-void Parser::CreateFunctionNameAssignment(
- const AstRawString* function_name, int pos,
- FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, ZoneList<Statement*>* result, int index) {
- if (function_type == FunctionLiteral::kNamedExpression) {
- StatementT statement = factory()->NewEmptyStatement(kNoSourcePosition);
- if (function_scope->LookupLocal(function_name) == nullptr) {
- // Now that we know the language mode, we can create the const assignment
- // in the previously reserved spot.
- DCHECK_EQ(function_scope, scope());
- Variable* fvar = function_scope->DeclareFunctionVar(function_name);
- VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
- statement = factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::INIT, fproxy,
- factory()->NewThisFunction(pos),
- kNoSourcePosition),
- kNoSourcePosition);
- }
- result->Set(index, statement);
+void Parser::DeclareFunctionNameVar(const AstRawString* function_name,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope) {
+ if (function_type == FunctionLiteral::kNamedExpression &&
+ function_scope->LookupLocal(function_name) == nullptr) {
+ DCHECK_EQ(function_scope, scope());
+ function_scope->DeclareFunctionVar(function_name);
}
}
@@ -1851,7 +1888,7 @@ Statement* Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* subject,
Statement* body) {
ForOfStatement* for_of = stmt->AsForOfStatement();
- if (for_of != NULL) {
+ if (for_of != nullptr) {
const bool finalize = true;
return InitializeForOfStatement(for_of, each, subject, body, finalize,
IteratorType::kNormal, each->position());
@@ -2155,7 +2192,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// }
// }
- DCHECK(for_info.bound_names.length() > 0);
+ DCHECK_GT(for_info.bound_names.length(), 0);
ZoneList<Variable*> temps(for_info.bound_names.length(), zone());
Block* outer_block =
@@ -2180,7 +2217,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
temps.Add(temp, zone());
}
- Variable* first = NULL;
+ Variable* first = nullptr;
// Make statement: first = 1.
if (next) {
first = NewTemporary(temp_name);
@@ -2205,7 +2242,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// need to know about it. This should be safe because we don't run any code
// in this function that looks up break targets.
ForStatement* outer_loop =
- factory()->NewForStatement(NULL, kNoSourcePosition);
+ factory()->NewForStatement(nullptr, kNoSourcePosition);
outer_block->statements()->Add(outer_loop, zone());
outer_block->set_scope(scope());
@@ -2229,7 +2266,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, kNoSourcePosition);
int declaration_pos = for_info.parsing_result.descriptor.declaration_pos;
- DCHECK(declaration_pos != kNoSourcePosition);
+ DCHECK_NE(declaration_pos, kNoSourcePosition);
decl->proxy()->var()->set_initializer_position(declaration_pos);
ignore_completion_block->statements()->Add(assignment_statement, zone());
}
@@ -2237,7 +2274,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// Make statement: if (first == 1) { first = 0; } else { next; }
if (next) {
DCHECK(first);
- Expression* compare = NULL;
+ Expression* compare = nullptr;
// Make compare expression: first == 1.
{
Expression* const1 = factory()->NewSmiLiteral(1, kNoSourcePosition);
@@ -2245,7 +2282,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
compare = factory()->NewCompareOperation(Token::EQ, first_proxy, const1,
kNoSourcePosition);
}
- Statement* clear_first = NULL;
+ Statement* clear_first = nullptr;
// Make statement: first = 0.
{
VariableProxy* first_proxy = factory()->NewVariableProxy(first);
@@ -2284,7 +2321,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
inner_block->statements()->Add(ignore_completion_block, zone());
// Make cond expression for main loop: flag == 1.
- Expression* flag_cond = NULL;
+ Expression* flag_cond = nullptr;
{
Expression* const1 = factory()->NewSmiLiteral(1, kNoSourcePosition);
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
@@ -2293,9 +2330,9 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
}
// Create chain of expressions "flag = 0, temp_x = x, ..."
- Statement* compound_next_statement = NULL;
+ Statement* compound_next_statement = nullptr;
{
- Expression* compound_next = NULL;
+ Expression* compound_next = nullptr;
// Make expression: flag = 0.
{
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
@@ -2324,12 +2361,12 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// Note that we re-use the original loop node, which retains its labels
// and ensures that any break or continue statements in body point to
// the right place.
- loop->Initialize(NULL, flag_cond, compound_next_statement, body);
+ loop->Initialize(nullptr, flag_cond, compound_next_statement, body);
inner_block->statements()->Add(loop, zone());
// Make statement: {{if (flag == 1) break;}}
{
- Expression* compare = NULL;
+ Expression* compare = nullptr;
// Make compare expresion: flag == 1.
{
Expression* const1 = factory()->NewSmiLiteral(1, kNoSourcePosition);
@@ -2348,7 +2385,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
inner_block->set_scope(inner_scope);
}
- outer_loop->Initialize(NULL, NULL, NULL, inner_block);
+ outer_loop->Initialize(nullptr, nullptr, nullptr, inner_block);
return outer_block;
}
@@ -2357,6 +2394,7 @@ void Parser::AddArrowFunctionFormalParameters(
ParserFormalParameters* parameters, Expression* expr, int end_pos,
bool* ok) {
// ArrowFunctionFormals ::
+ // Nary(Token::COMMA, VariableProxy*, Tail)
// Binary(Token::COMMA, NonTailArrowFunctionFormals, Tail)
// Tail
// NonTailArrowFunctionFormals ::
@@ -2366,9 +2404,30 @@ void Parser::AddArrowFunctionFormalParameters(
// VariableProxy
// Spread(VariableProxy)
//
- // As we need to visit the parameters in left-to-right order, we recurse on
- // the left-hand side of comma expressions.
+ // We need to visit the parameters in left-to-right order
//
+
+ // For the Nary case, we simply visit the parameters in a loop.
+ if (expr->IsNaryOperation()) {
+ NaryOperation* nary = expr->AsNaryOperation();
+ // The classifier has already run, so we know that the expression is a valid
+ // arrow function formals production.
+ DCHECK_EQ(nary->op(), Token::COMMA);
+ // Each op position is the end position of the *previous* expr, with the
+ // second (i.e. first "subsequent") op position being the end position of
+ // the first child expression.
+ Expression* next = nary->first();
+ for (size_t i = 0; i < nary->subsequent_length(); ++i) {
+ AddArrowFunctionFormalParameters(
+ parameters, next, nary->subsequent_op_position(i), CHECK_OK_VOID);
+ next = nary->subsequent(i);
+ }
+ AddArrowFunctionFormalParameters(parameters, next, end_pos, CHECK_OK_VOID);
+ return;
+ }
+
+ // For the binary case, we recurse on the left-hand side of binary comma
+ // expressions.
if (expr->IsBinaryOperation()) {
BinaryOperation* binop = expr->AsBinaryOperation();
// The classifier has already run, so we know that the expression is a valid
@@ -2467,7 +2526,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Anonymous functions were passed either the empty symbol or a null
// handle as the function name. Remember if we were passed a non-empty
// handle to decide whether to invoke function name inference.
- bool should_infer_name = function_name == NULL;
+ bool should_infer_name = function_name == nullptr;
// We want a non-null handle as the function name by default. We will handle
// the "function does not have a shared name" case later.
@@ -2518,8 +2577,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
const bool is_lazy =
eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
- const bool is_top_level =
- impl()->AllowsLazyParsingWithoutUnresolvedVariables();
+ const bool is_top_level = AllowsLazyParsingWithoutUnresolvedVariables();
const bool is_lazy_top_level_function = is_lazy && is_top_level;
const bool is_lazy_inner_function = is_lazy && !is_top_level;
const bool is_expression =
@@ -2531,6 +2589,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
parsing_on_main_thread_
? &RuntimeCallStats::ParseFunctionLiteral
: &RuntimeCallStats::ParseBackgroundFunctionLiteral);
+ base::ElapsedTimer timer;
+ if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
// Determine whether we can still lazy parse the inner function.
// The preconditions are:
@@ -2631,13 +2691,17 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
DCHECK_EQ(should_preparse, temp_zoned_);
- if (V8_UNLIKELY(FLAG_trace_preparse)) {
- PrintF(" [%s]: %i-%i %.*s\n",
- should_preparse ? (is_top_level ? "Preparse no-resolution"
- : "Preparse resolution")
- : "Full parse",
- scope->start_position(), scope->end_position(),
- function_name->byte_length(), function_name->raw_data());
+ if (V8_UNLIKELY(FLAG_log_function_events)) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ const char* event_name = should_preparse
+ ? (is_top_level ? "preparse-no-resolution"
+ : "preparse-resolution")
+ : "full-parse";
+ logger_->FunctionEvent(
+ event_name, nullptr, script_id(), ms, scope->start_position(),
+ scope->end_position(),
+ reinterpret_cast<const char*>(function_name->raw_data()),
+ function_name->byte_length());
}
if (V8_UNLIKELY(FLAG_runtime_stats)) {
if (should_preparse) {
@@ -2764,8 +2828,8 @@ Parser::LazyParsingResult Parser::SkipFunction(
DCHECK(!is_inner_function || !may_abort);
PreParser::PreParseResult result = reusable_preparser()->PreParseFunction(
- function_name, kind, function_type, function_scope, parsing_module_,
- is_inner_function, may_abort, use_counts_, produced_preparsed_scope_data);
+ function_name, kind, function_type, function_scope, is_inner_function,
+ may_abort, use_counts_, produced_preparsed_scope_data, this->script_id());
// Return immediately if pre-parser decided to abort parsing.
if (result == PreParser::kPreParseAbort) return kLazyParsingAborted;
@@ -2775,7 +2839,7 @@ Parser::LazyParsingResult Parser::SkipFunction(
*ok = false;
return kLazyParsingComplete;
}
- if (pending_error_handler_.has_pending_error()) {
+ if (pending_error_handler()->has_pending_error()) {
*ok = false;
return kLazyParsingComplete;
}
@@ -3137,6 +3201,20 @@ void Parser::DeclareClassVariable(const AstRawString* name,
}
}
+// TODO(gsathya): Ideally, this should just bypass scope analysis and
+// allocate a slot directly on the context. We should just store this
+// index in the AST, instead of storing the variable.
+Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name,
+ bool* ok) {
+ VariableProxy* proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
+ Declaration* declaration =
+ factory()->NewVariableDeclaration(proxy, kNoSourcePosition);
+ Variable* var = Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
+ Variable::DefaultInitializationFlag(CONST), CHECK_OK);
+ var->ForceContextAllocation();
+ return var;
+}
+
// This method declares a property of the given class. It updates the
// following fields of class_info, as appropriate:
// - constructor
@@ -3145,7 +3223,8 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
ClassLiteralProperty* property,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
- ClassInfo* class_info, bool* ok) {
+ bool is_computed_name, ClassInfo* class_info,
+ bool* ok) {
if (is_constructor) {
DCHECK(!class_info->constructor);
class_info->constructor = property->value()->AsFunctionLiteral();
@@ -3156,11 +3235,44 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
return;
}
- if (property->kind() == ClassLiteralProperty::FIELD) {
- DCHECK(allow_harmony_class_fields());
- // TODO(littledan): Implement class fields
+ if (kind != ClassLiteralProperty::FIELD) {
+ class_info->properties->Add(property, zone());
+ return;
+ }
+
+ DCHECK(allow_harmony_public_fields());
+
+ if (is_static) {
+ class_info->static_fields->Add(property, zone());
+ } else {
+ class_info->instance_fields->Add(property, zone());
+ }
+
+ if (is_computed_name) {
+ // We create a synthetic variable name here so that scope
+ // analysis doesn't dedupe the vars.
+ Variable* computed_name_var = CreateSyntheticContextVariable(
+ ClassFieldVariableName(ast_value_factory(),
+ class_info->computed_field_count),
+ CHECK_OK_VOID);
+ property->set_computed_name_var(computed_name_var);
+ class_info->properties->Add(property, zone());
}
- class_info->properties->Add(property, zone());
+}
+
+FunctionLiteral* Parser::CreateInitializerFunction(
+ DeclarationScope* scope, ZoneList<ClassLiteral::Property*>* fields) {
+ // function() { .. class fields initializer .. }
+ ZoneList<Statement*>* statements = NewStatementList(1);
+ InitializeClassFieldsStatement* static_fields =
+ factory()->NewInitializeClassFieldsStatement(fields, kNoSourcePosition);
+ statements->Add(static_fields, zone());
+ return factory()->NewFunctionLiteral(
+ ast_value_factory()->empty_string(), scope, statements, 0, 0, 0,
+ FunctionLiteral::kNoDuplicateParameters,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kShouldEagerCompile, scope->start_position(), true,
+ GetNextFunctionLiteralId());
}
// This method generates a ClassLiteral AST node.
@@ -3177,7 +3289,7 @@ Expression* Parser::RewriteClassLiteral(Scope* block_scope,
int end_pos, bool* ok) {
DCHECK_NOT_NULL(block_scope);
DCHECK_EQ(block_scope->scope_type(), BLOCK_SCOPE);
- DCHECK_EQ(block_scope->language_mode(), STRICT);
+ DCHECK_EQ(block_scope->language_mode(), LanguageMode::kStrict);
bool has_extends = class_info->extends != nullptr;
bool has_default_constructor = class_info->constructor == nullptr;
@@ -3191,25 +3303,33 @@ Expression* Parser::RewriteClassLiteral(Scope* block_scope,
class_info->variable->set_initializer_position(end_pos);
}
+ FunctionLiteral* static_fields_initializer = nullptr;
+ if (class_info->has_static_class_fields) {
+ static_fields_initializer = CreateInitializerFunction(
+ class_info->static_fields_scope, class_info->static_fields);
+ }
+
+ FunctionLiteral* instance_fields_initializer_function = nullptr;
+ if (class_info->has_instance_class_fields) {
+ instance_fields_initializer_function = CreateInitializerFunction(
+ class_info->instance_fields_scope, class_info->instance_fields);
+ class_info->constructor->set_requires_instance_fields_initializer(true);
+ }
+
ClassLiteral* class_literal = factory()->NewClassLiteral(
block_scope, class_info->variable, class_info->extends,
- class_info->constructor, class_info->properties, pos, end_pos,
- class_info->has_name_static_property,
+ class_info->constructor, class_info->properties,
+ static_fields_initializer, instance_fields_initializer_function, pos,
+ end_pos, class_info->has_name_static_property,
class_info->has_static_computed_names, class_info->is_anonymous);
AddFunctionForNameInference(class_info->constructor);
-
return class_literal;
}
-Literal* Parser::GetLiteralUndefined(int position) {
- return factory()->NewUndefinedLiteral(position);
-}
-
-
void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Declaration* decl = scope->CheckConflictingVarDeclarations();
- if (decl != NULL) {
+ if (decl != nullptr) {
// In ES6, conflicting variable bindings are early errors.
const AstRawString* name = decl->proxy()->raw_name();
int position = decl->proxy()->position();
@@ -3263,7 +3383,7 @@ void Parser::InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope) {
// Parser support
bool Parser::TargetStackContainsLabel(const AstRawString* label) {
- for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
+ for (ParserTarget* t = target_stack_; t != nullptr; t = t->previous()) {
if (ContainsLabel(t->statement()->labels(), label)) return true;
}
return false;
@@ -3272,31 +3392,31 @@ bool Parser::TargetStackContainsLabel(const AstRawString* label) {
BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label,
bool* ok) {
- bool anonymous = label == NULL;
- for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
+ bool anonymous = label == nullptr;
+ for (ParserTarget* t = target_stack_; t != nullptr; t = t->previous()) {
BreakableStatement* stat = t->statement();
if ((anonymous && stat->is_target_for_anonymous()) ||
(!anonymous && ContainsLabel(stat->labels(), label))) {
return stat;
}
}
- return NULL;
+ return nullptr;
}
IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
bool* ok) {
- bool anonymous = label == NULL;
- for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
+ bool anonymous = label == nullptr;
+ for (ParserTarget* t = target_stack_; t != nullptr; t = t->previous()) {
IterationStatement* stat = t->statement()->AsIterationStatement();
- if (stat == NULL) continue;
+ if (stat == nullptr) continue;
DCHECK(stat->is_target_for_anonymous());
if (anonymous || ContainsLabel(stat->labels(), label)) {
return stat;
}
}
- return NULL;
+ return nullptr;
}
@@ -3311,17 +3431,6 @@ void Parser::HandleSourceURLComments(Isolate* isolate, Handle<Script> script) {
}
}
-void Parser::ReportErrors(Isolate* isolate, Handle<Script> script) {
- if (stack_overflow()) {
- isolate->StackOverflow();
- } else {
- DCHECK(pending_error_handler_.has_pending_error());
- // Internalize ast values for throwing the pending error.
- ast_value_factory()->Internalize(isolate);
- pending_error_handler_.ThrowPendingError(isolate, script);
- }
-}
-
void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
// Move statistics to Isolate.
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
@@ -3341,10 +3450,15 @@ void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
}
void Parser::ParseOnBackground(ParseInfo* info) {
+ RuntimeCallTimerScope runtimeTimer(runtime_call_stats_,
+ &RuntimeCallStats::ParseBackgroundProgram);
parsing_on_main_thread_ = false;
+ if (!info->script().is_null()) {
+ set_script_id(info->script()->id());
+ }
- DCHECK(info->literal() == NULL);
- FunctionLiteral* result = NULL;
+ DCHECK_NULL(info->literal());
+ FunctionLiteral* result = nullptr;
ParserLogger logger;
if (produce_cached_parse_data()) {
@@ -3380,17 +3494,8 @@ void Parser::ParseOnBackground(ParseInfo* info) {
// care of calling AstValueFactory::Internalize just before compilation.
if (produce_cached_parse_data()) {
- if (result != NULL) *info->cached_data() = logger.GetScriptData();
- log_ = NULL;
- }
- if (runtime_call_stats_ &&
- (FLAG_runtime_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- auto value = v8::tracing::TracedValue::Create();
- runtime_call_stats_->Dump(value.get());
- TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
- "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
- "runtime-call-stats", std::move(value));
+ if (result != nullptr) *info->cached_data() = logger.GetScriptData();
+ log_ = nullptr;
}
}
@@ -3400,17 +3505,13 @@ Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
void Parser::AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
bool tail) {
- DCHECK(should_cook || allow_harmony_template_escapes());
- int pos = scanner()->location().beg_pos;
int end = scanner()->location().end_pos - (tail ? 1 : 2);
- const AstRawString* trv = scanner()->CurrentRawSymbol(ast_value_factory());
- Literal* raw = factory()->NewStringLiteral(trv, pos);
+ const AstRawString* raw = scanner()->CurrentRawSymbol(ast_value_factory());
if (should_cook) {
- const AstRawString* tv = scanner()->CurrentSymbol(ast_value_factory());
- Literal* cooked = factory()->NewStringLiteral(tv, pos);
+ const AstRawString* cooked = scanner()->CurrentSymbol(ast_value_factory());
(*state)->AddTemplateSpan(cooked, raw, end, zone());
} else {
- (*state)->AddTemplateSpan(GetLiteralUndefined(pos), raw, end, zone());
+ (*state)->AddTemplateSpan(nullptr, raw, end, zone());
}
}
@@ -3425,46 +3526,54 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag) {
TemplateLiteral* lit = *state;
int pos = lit->position();
- const ZoneList<Literal*>* cooked_strings = lit->cooked();
- const ZoneList<Literal*>* raw_strings = lit->raw();
+ const ZoneList<const AstRawString*>* cooked_strings = lit->cooked();
+ const ZoneList<const AstRawString*>* raw_strings = lit->raw();
const ZoneList<Expression*>* expressions = lit->expressions();
DCHECK_EQ(cooked_strings->length(), raw_strings->length());
DCHECK_EQ(cooked_strings->length(), expressions->length() + 1);
if (!tag) {
- // Build tree of BinaryOps to simplify code-generation
- Expression* expr = cooked_strings->at(0);
+ Expression* first_string =
+ factory()->NewStringLiteral(cooked_strings->at(0), kNoSourcePosition);
+ if (expressions->length() == 0) return first_string;
+
+ // Build N-ary addition op to simplify code-generation.
+ // TODO(leszeks): Could we just store this expression in the
+ // TemplateLiteralState and build it as we go?
+ NaryOperation* expr = factory()->NewNaryOperation(
+ Token::ADD, first_string, 2 * expressions->length());
+
int i = 0;
while (i < expressions->length()) {
Expression* sub = expressions->at(i++);
- Expression* cooked_str = cooked_strings->at(i);
+ const AstRawString* cooked_str = cooked_strings->at(i);
+ DCHECK_NOT_NULL(cooked_str);
// Let middle be ToString(sub).
ZoneList<Expression*>* args =
new (zone()) ZoneList<Expression*>(1, zone());
args->Add(sub, zone());
- Expression* middle = factory()->NewCallRuntime(Runtime::kInlineToString,
- args, sub->position());
+ Expression* sub_to_string = factory()->NewCallRuntime(
+ Runtime::kInlineToString, args, sub->position());
- expr = factory()->NewBinaryOperation(
- Token::ADD, factory()->NewBinaryOperation(
- Token::ADD, expr, middle, expr->position()),
- cooked_str, sub->position());
+ expr->AddSubsequent(sub_to_string, sub->position());
+ expr->AddSubsequent(
+ factory()->NewStringLiteral(cooked_str, kNoSourcePosition),
+ sub->position());
}
return expr;
} else {
// GetTemplateObject
const int32_t hash = ComputeTemplateLiteralHash(lit);
- Expression* template_object = factory()->NewGetTemplateObject(
- const_cast<ZoneList<Literal*>*>(cooked_strings),
- const_cast<ZoneList<Literal*>*>(raw_strings), hash, pos);
+ Expression* template_object =
+ factory()->NewGetTemplateObject(cooked_strings, raw_strings, hash, pos);
// Call TagFn
ZoneList<Expression*>* call_args =
new (zone()) ZoneList<Expression*>(expressions->length() + 1, zone());
call_args->Add(template_object, zone());
call_args->AddAll(*expressions, zone());
- return factory()->NewCall(tag, call_args, pos);
+ return factory()->NewTaggedTemplate(tag, call_args, pos);
}
}
@@ -3483,7 +3592,7 @@ uint32_t HalfAvalance(uint32_t a) {
} // namespace
int32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
- const ZoneList<Literal*>* raw_strings = lit->raw();
+ const ZoneList<const AstRawString*>* raw_strings = lit->raw();
int total = raw_strings->length();
DCHECK_GT(total, 0);
@@ -3495,8 +3604,7 @@ int32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
running_hash, "${}", 3);
}
- const AstRawString* raw_string =
- raw_strings->at(index)->AsLiteral()->raw_value()->AsString();
+ const AstRawString* raw_string = raw_strings->at(index);
if (raw_string->is_one_byte()) {
const char* data = reinterpret_cast<const char*>(raw_string->raw_data());
running_hash = StringHasher::ComputeRunningHashOneByte(
@@ -3700,53 +3808,18 @@ void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
body->Add(block, zone());
}
-class NonPatternRewriter : public AstExpressionRewriter {
- public:
- NonPatternRewriter(uintptr_t stack_limit, Parser* parser)
- : AstExpressionRewriter(stack_limit), parser_(parser) {}
- ~NonPatternRewriter() override {}
-
- private:
- bool RewriteExpression(Expression* expr) override {
- if (expr->IsRewritableExpression()) return true;
- // Rewrite only what could have been a pattern but is not.
- if (expr->IsArrayLiteral()) {
- // Spread rewriting in array literals.
- ArrayLiteral* lit = expr->AsArrayLiteral();
- VisitExpressions(lit->values());
- replacement_ = parser_->RewriteSpreads(lit);
- return false;
- }
- if (expr->IsObjectLiteral()) {
- return true;
- }
- if (expr->IsBinaryOperation() &&
- expr->AsBinaryOperation()->op() == Token::COMMA) {
- return true;
- }
- // Everything else does not need rewriting.
- return false;
- }
-
- void VisitLiteralProperty(LiteralProperty* property) override {
- if (property == nullptr) return;
- // Do not rewrite (computed) key expressions
- AST_REWRITE_PROPERTY(Expression, property, value);
- }
-
- Parser* parser_;
-};
-
void Parser::RewriteNonPattern(bool* ok) {
ValidateExpression(CHECK_OK_VOID);
auto non_patterns_to_rewrite = function_state_->non_patterns_to_rewrite();
int begin = classifier()->GetNonPatternBegin();
int end = non_patterns_to_rewrite->length();
if (begin < end) {
- NonPatternRewriter rewriter(stack_limit_, this);
for (int i = begin; i < end; i++) {
- DCHECK(non_patterns_to_rewrite->at(i)->IsRewritableExpression());
- rewriter.Rewrite(non_patterns_to_rewrite->at(i));
+ RewritableExpression* expr = non_patterns_to_rewrite->at(i);
+ // TODO(adamk): Make this more typesafe.
+ DCHECK(expr->expression()->IsArrayLiteral());
+ ArrayLiteral* lit = expr->expression()->AsArrayLiteral();
+ expr->Rewrite(RewriteSpreads(lit));
}
non_patterns_to_rewrite->Rewind(begin);
}
@@ -3759,73 +3832,19 @@ void Parser::RewriteDestructuringAssignments() {
for (int i = assignments.length() - 1; i >= 0; --i) {
// Rewrite list in reverse, so that nested assignment patterns are rewritten
// correctly.
- const DestructuringAssignment& pair = assignments.at(i);
- RewritableExpression* to_rewrite =
- pair.assignment->AsRewritableExpression();
+ RewritableExpression* to_rewrite = assignments[i];
DCHECK_NOT_NULL(to_rewrite);
if (!to_rewrite->is_rewritten()) {
// Since this function is called at the end of parsing the program,
// pair.scope may already have been removed by FinalizeBlockScope in the
// meantime.
- Scope* scope = pair.scope->GetUnremovedScope();
+ Scope* scope = to_rewrite->scope()->GetUnremovedScope();
BlockState block_state(&scope_, scope);
RewriteDestructuringAssignment(to_rewrite);
}
}
}
-Expression* Parser::RewriteExponentiation(Expression* left, Expression* right,
- int pos) {
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(left, zone());
- args->Add(right, zone());
- return factory()->NewCallRuntime(Context::MATH_POW_INDEX, args, pos);
-}
-
-Expression* Parser::RewriteAssignExponentiation(Expression* left,
- Expression* right, int pos) {
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
- if (left->IsVariableProxy()) {
- VariableProxy* lhs = left->AsVariableProxy();
-
- Expression* result;
- DCHECK_NOT_NULL(lhs->raw_name());
- result = ExpressionFromIdentifier(lhs->raw_name(), lhs->position());
- args->Add(left, zone());
- args->Add(right, zone());
- Expression* call =
- factory()->NewCallRuntime(Context::MATH_POW_INDEX, args, pos);
- return factory()->NewAssignment(Token::ASSIGN, result, call, pos);
- } else if (left->IsProperty()) {
- Property* prop = left->AsProperty();
- auto temp_obj = NewTemporary(ast_value_factory()->empty_string());
- auto temp_key = NewTemporary(ast_value_factory()->empty_string());
- Expression* assign_obj = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(temp_obj), prop->obj(),
- kNoSourcePosition);
- Expression* assign_key = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(temp_key), prop->key(),
- kNoSourcePosition);
- args->Add(factory()->NewProperty(factory()->NewVariableProxy(temp_obj),
- factory()->NewVariableProxy(temp_key),
- left->position()),
- zone());
- args->Add(right, zone());
- Expression* call =
- factory()->NewCallRuntime(Context::MATH_POW_INDEX, args, pos);
- Expression* target = factory()->NewProperty(
- factory()->NewVariableProxy(temp_obj),
- factory()->NewVariableProxy(temp_key), kNoSourcePosition);
- Expression* assign =
- factory()->NewAssignment(Token::ASSIGN, target, call, pos);
- return factory()->NewBinaryOperation(
- Token::COMMA, assign_obj,
- factory()->NewBinaryOperation(Token::COMMA, assign_key, assign, pos),
- pos);
- }
- UNREACHABLE();
-}
-
Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
// Array literals containing spreads are rewritten using do expressions, e.g.
// [1, 2, 3, ...x, 4, ...y, 5]
@@ -3860,8 +3879,7 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
// %AppendElement($R, value)
// or, in case of a hole,
// ++($R.length)
- if (!value->IsLiteral() ||
- !value->AsLiteral()->raw_value()->IsTheHole()) {
+ if (!value->IsTheHoleLiteral()) {
ZoneList<Expression*>* append_element_args = NewExpressionList(2);
append_element_args->Add(factory()->NewVariableProxy(result), zone());
append_element_args->Add(value, zone());
@@ -3914,14 +3932,12 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
return factory()->NewDoExpression(do_block, result, lit->position());
}
-void Parser::QueueDestructuringAssignmentForRewriting(Expression* expr) {
- DCHECK(expr->IsRewritableExpression());
- function_state_->AddDestructuringAssignment(
- DestructuringAssignment(expr, scope()));
+void Parser::QueueDestructuringAssignmentForRewriting(
+ RewritableExpression* expr) {
+ function_state_->AddDestructuringAssignment(expr);
}
-void Parser::QueueNonPatternForRewriting(Expression* expr, bool* ok) {
- DCHECK(expr->IsRewritableExpression());
+void Parser::QueueNonPatternForRewriting(RewritableExpression* expr, bool* ok) {
function_state_->AddNonPatternForRewriting(expr, ok);
}
@@ -4171,7 +4187,7 @@ void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
Block* block = factory()->NewBlock(2, true);
Expression* proxy = factory()->NewVariableProxy(completion);
BuildIteratorCloseForCompletion(block->statements(), iter, proxy, type);
- DCHECK(block->statements()->length() == 2);
+ DCHECK_EQ(block->statements()->length(), 2);
maybe_close = IgnoreCompletion(factory()->NewIfStatement(
condition, block, factory()->NewEmptyStatement(nopos), nopos));
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 296762c14d..aa800dafc5 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -17,7 +17,6 @@
#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
-#include "src/pending-compilation-error-handler.h"
#include "src/utils.h"
namespace v8 {
@@ -31,6 +30,7 @@ class ParseInfo;
class ScriptData;
class ParserTarget;
class ParserTargetScope;
+class PendingCompilationErrorHandler;
class PreParsedScopeData;
class FunctionEntry BASE_EMBEDDED {
@@ -85,7 +85,7 @@ class ParseData {
if (pd->IsSane()) return pd;
cached_data->Reject();
delete pd;
- return NULL;
+ return nullptr;
}
void Initialize();
@@ -167,6 +167,7 @@ struct ParserTypes<Parser> {
typedef ObjectLiteral::Property* ObjectLiteralProperty;
typedef ClassLiteral::Property* ClassLiteralProperty;
typedef v8::internal::Suspend* Suspend;
+ typedef v8::internal::RewritableExpression* RewritableExpression;
typedef ZoneList<v8::internal::Expression*>* ExpressionList;
typedef ZoneList<ObjectLiteral::Property*>* ObjectPropertyList;
typedef ZoneList<ClassLiteral::Property*>* ClassPropertyList;
@@ -190,9 +191,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
explicit Parser(ParseInfo* info);
~Parser() {
delete reusable_preparser_;
- reusable_preparser_ = NULL;
+ reusable_preparser_ = nullptr;
delete cached_parse_data_;
- cached_parse_data_ = NULL;
+ cached_parse_data_ = nullptr;
}
static bool IsPreParser() { return false; }
@@ -210,8 +211,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void DeserializeScopeChain(ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info);
- // Handle errors detected during parsing
- void ReportErrors(Isolate* isolate, Handle<Script> script);
// Move statistics to Isolate
void UpdateStatistics(Isolate* isolate, Handle<Script> script);
void HandleSourceURLComments(Isolate* isolate, Handle<Script> script);
@@ -257,7 +256,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void PrepareGeneratorVariables();
- // Returns NULL if parsing failed.
+ // Returns nullptr if parsing failed.
FunctionLiteral* ParseProgram(Isolate* isolate, ParseInfo* info);
FunctionLiteral* ParseFunction(Isolate* isolate, ParseInfo* info,
@@ -283,22 +282,20 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
PreParser* reusable_preparser() {
- if (reusable_preparser_ == NULL) {
+ if (reusable_preparser_ == nullptr) {
reusable_preparser_ =
new PreParser(zone(), &scanner_, stack_limit_, ast_value_factory(),
- &pending_error_handler_, runtime_call_stats_,
- parsing_on_main_thread_);
+ pending_error_handler(), runtime_call_stats_, logger_,
+ -1, parsing_module_, parsing_on_main_thread_);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
SET_ALLOW(harmony_do_expressions);
SET_ALLOW(harmony_function_sent);
- SET_ALLOW(harmony_class_fields);
- SET_ALLOW(harmony_object_rest_spread);
+ SET_ALLOW(harmony_public_fields);
SET_ALLOW(harmony_dynamic_import);
SET_ALLOW(harmony_import_meta);
SET_ALLOW(harmony_async_iteration);
- SET_ALLOW(harmony_template_escapes);
- SET_ALLOW(harmony_restrictive_generators);
+ SET_ALLOW(harmony_bigint);
#undef SET_ALLOW
}
return reusable_preparser_;
@@ -348,15 +345,18 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void ParseAndRewriteAsyncGeneratorFunctionBody(int pos, FunctionKind kind,
ZoneList<Statement*>* body,
bool* ok);
- void CreateFunctionNameAssignment(const AstRawString* function_name, int pos,
- FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope,
- ZoneList<Statement*>* result, int index);
+ void DeclareFunctionNameVar(const AstRawString* function_name,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope);
Statement* DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
int pos, bool is_sloppy_block_function,
ZoneList<const AstRawString*>* names, bool* ok);
+ Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name,
+ bool* ok);
+ FunctionLiteral* CreateInitializerFunction(
+ DeclarationScope* scope, ZoneList<ClassLiteral::Property*>* fields);
V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
Expression* value,
ZoneList<const AstRawString*>* names,
@@ -368,6 +368,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ClassLiteralProperty* property,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
+ bool is_computed_name,
ClassInfo* class_info, bool* ok);
V8_INLINE Expression* RewriteClassLiteral(Scope* block_scope,
const AstRawString* name,
@@ -426,9 +427,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int function_token_position, FunctionLiteral::FunctionType type,
LanguageMode language_mode, bool* ok);
- // Get odd-ball literals.
- Literal* GetLiteralUndefined(int position);
-
// Check if the scope has conflicting var/let declarations from different
// scopes. This covers for example
//
@@ -499,13 +497,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
TemplateLiteral(Zone* zone, int pos)
: cooked_(8, zone), raw_(8, zone), expressions_(8, zone), pos_(pos) {}
- const ZoneList<Literal*>* cooked() const { return &cooked_; }
- const ZoneList<Literal*>* raw() const { return &raw_; }
+ const ZoneList<const AstRawString*>* cooked() const { return &cooked_; }
+ const ZoneList<const AstRawString*>* raw() const { return &raw_; }
const ZoneList<Expression*>* expressions() const { return &expressions_; }
int position() const { return pos_; }
- void AddTemplateSpan(Literal* cooked, Literal* raw, int end, Zone* zone) {
- DCHECK_NOT_NULL(cooked);
+ void AddTemplateSpan(const AstRawString* cooked, const AstRawString* raw,
+ int end, Zone* zone) {
DCHECK_NOT_NULL(raw);
cooked_.Add(cooked, zone);
raw_.Add(raw, zone);
@@ -517,8 +515,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
private:
- ZoneList<Literal*> cooked_;
- ZoneList<Literal*> raw_;
+ ZoneList<const AstRawString*> cooked_;
+ ZoneList<const AstRawString*> raw_;
ZoneList<Expression*> expressions_;
int pos_;
};
@@ -529,9 +527,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// "should_cook" means that the span can be "cooked": in tagged template
// literals, both the raw and "cooked" representations are available to user
// code ("cooked" meaning that escape sequences are converted to their
- // interpreted values). With the --harmony-template-escapes flag, invalid
- // escape sequences cause the cooked span to be represented by undefined,
- // instead of being a syntax error.
+ // interpreted values). Invalid escape sequences cause the cooked span
+ // to be represented by undefined, instead of being a syntax error.
// "tail" indicates that this span is the last in the literal.
void AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
bool tail);
@@ -554,20 +551,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Rewrite all DestructuringAssignments in the current FunctionState.
V8_INLINE void RewriteDestructuringAssignments();
- V8_INLINE Expression* RewriteExponentiation(Expression* left,
- Expression* right, int pos);
- V8_INLINE Expression* RewriteAssignExponentiation(Expression* left,
- Expression* right, int pos);
-
- friend class NonPatternRewriter;
- V8_INLINE Expression* RewriteSpreads(ArrayLiteral* lit);
+ Expression* RewriteSpreads(ArrayLiteral* lit);
// Rewrite expressions that are not used as patterns
V8_INLINE void RewriteNonPattern(bool* ok);
V8_INLINE void QueueDestructuringAssignmentForRewriting(
- Expression* assignment);
- V8_INLINE void QueueNonPatternForRewriting(Expression* expr, bool* ok);
+ RewritableExpression* assignment);
+ V8_INLINE void QueueNonPatternForRewriting(RewritableExpression* expr,
+ bool* ok);
friend class InitializerRewriter;
void RewriteParameterInitializer(Expression* expr);
@@ -623,9 +615,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Returns true if the expression is of type "this.foo".
V8_INLINE static bool IsThisProperty(Expression* expression) {
- DCHECK(expression != NULL);
+ DCHECK_NOT_NULL(expression);
Property* property = expression->AsProperty();
- return property != NULL && property->obj()->IsVariableProxy() &&
+ return property != nullptr && property->obj()->IsVariableProxy() &&
property->obj()->AsVariableProxy()->is_this();
}
@@ -689,8 +681,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ExpressionStatement* e_stat = statement->AsExpressionStatement();
if (e_stat == nullptr) return false;
Literal* literal = e_stat->expression()->AsLiteral();
- if (literal == nullptr || !literal->raw_value()->IsString()) return false;
- return arg == nullptr || literal->raw_value()->AsString() == arg;
+ if (literal == nullptr || !literal->IsString()) return false;
+ return arg == nullptr || literal->AsRawString() == arg;
}
V8_INLINE void GetDefaultStrings(
@@ -740,7 +732,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// literal so it can be added as a constant function property.
V8_INLINE static void CheckAssigningFunctionLiteralToProperty(
Expression* left, Expression* right) {
- DCHECK(left != NULL);
+ DCHECK_NOT_NULL(left);
if (left->IsProperty() && right->IsFunctionLiteral()) {
right->AsFunctionLiteral()->set_pretenure();
}
@@ -761,6 +753,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool ShortcutNumericLiteralBinaryExpression(Expression** x, Expression* y,
Token::Value op, int pos);
+ // Returns true if we have a binary operation between a binary/n-ary
+ // expression (with the same operation) and a value, which can be collapsed
+ // into a single n-ary expression. In that case, *x will be changed to an
+ // n-ary expression.
+ bool CollapseNaryExpression(Expression** x, Expression* y, Token::Value op,
+ int pos, const SourceRange& range);
+
// Rewrites the following types of unary expressions:
// not <literal> -> true / false
// + <numeric literal> -> <numeric literal>
@@ -798,10 +797,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
// Reporting errors.
- V8_INLINE void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError) {
+ void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
+ const char* arg = nullptr,
+ ParseErrorType error_type = kSyntaxError) {
if (stack_overflow()) {
// Suppress the error message (syntax error or such) in the presence of a
// stack overflow. The isolate allows only one pending exception at at
@@ -809,15 +808,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// and we want to report the stack overflow later.
return;
}
- pending_error_handler_.ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ pending_error_handler()->ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos, message,
+ arg, error_type);
}
- V8_INLINE void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError) {
+ void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
+ const AstRawString* arg,
+ ParseErrorType error_type = kSyntaxError) {
if (stack_overflow()) {
// Suppress the error message (syntax error or such) in the presence of a
// stack overflow. The isolate allows only one pending exception at at
@@ -825,9 +824,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// and we want to report the stack overflow later.
return;
}
- pending_error_handler_.ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ pending_error_handler()->ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos, message,
+ arg, error_type);
}
// "null" return type creators.
@@ -845,20 +844,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return subject == nullptr;
}
- // Non-NULL empty string.
+ // Non-null empty string.
V8_INLINE const AstRawString* EmptyIdentifierString() const {
return ast_value_factory()->empty_string();
}
- // Odd-ball literal creators.
- V8_INLINE Literal* GetLiteralTheHole(int position) {
- return factory()->NewTheHoleLiteral(kNoSourcePosition);
- }
-
// Producing data during the recursive descent.
V8_INLINE const AstRawString* GetSymbol() const {
const AstRawString* result = scanner()->CurrentSymbol(ast_value_factory());
- DCHECK(result != NULL);
+ DCHECK_NOT_NULL(result);
return result;
}
@@ -882,6 +876,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* NewSuperCallReference(int pos);
Expression* NewTargetExpression(int pos);
Expression* FunctionSentExpression(int pos);
+ Expression* ImportMetaExpression(int pos);
Literal* ExpressionFromLiteral(Token::Value token, int pos);
@@ -995,7 +990,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return function_state_->GetReportedErrorList();
}
- V8_INLINE ZoneList<Expression*>* GetNonPatternList() const {
+ V8_INLINE ZoneList<RewritableExpression*>* GetNonPatternList() const {
return function_state_->non_patterns_to_rewrite();
}
@@ -1009,6 +1004,32 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return parameters_end_pos_ != kNoSourcePosition;
}
+ V8_INLINE void ConvertBinaryToNaryOperationSourceRange(
+ BinaryOperation* binary_op, NaryOperation* nary_op) {
+ if (source_range_map_ == nullptr) return;
+ DCHECK_NULL(source_range_map_->Find(nary_op));
+
+ BinaryOperationSourceRanges* ranges =
+ static_cast<BinaryOperationSourceRanges*>(
+ source_range_map_->Find(binary_op));
+ if (ranges == nullptr) return;
+
+ SourceRange range = ranges->GetRange(SourceRangeKind::kRight);
+ source_range_map_->Insert(
+ nary_op, new (zone()) NaryOperationSourceRanges(zone(), range));
+ }
+
+ V8_INLINE void AppendNaryOperationSourceRange(NaryOperation* node,
+ const SourceRange& range) {
+ if (source_range_map_ == nullptr) return;
+ NaryOperationSourceRanges* ranges =
+ static_cast<NaryOperationSourceRanges*>(source_range_map_->Find(node));
+ if (ranges == nullptr) return;
+
+ ranges->AddRange(range);
+ DCHECK_EQ(node->subsequent_length(), ranges->RangeCount());
+ }
+
V8_INLINE void RecordBlockSourceRange(Block* node,
int32_t continuation_position) {
if (source_range_map_ == nullptr) return;
@@ -1032,6 +1053,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
new (zone()) ConditionalSourceRanges(then_range, else_range));
}
+ V8_INLINE void RecordBinaryOperationSourceRange(
+ Expression* node, const SourceRange& right_range) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(node->AsBinaryOperation(),
+ new (zone())
+ BinaryOperationSourceRanges(right_range));
+ }
+
V8_INLINE void RecordJumpStatementSourceRange(Statement* node,
int32_t continuation_position) {
if (source_range_map_ == nullptr) return;
@@ -1113,8 +1142,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ScriptCompiler::CompileOptions compile_options_;
ParseData* cached_parse_data_;
- PendingCompilationErrorHandler pending_error_handler_;
-
// Other information which will be stored in Parser and moved to Isolate after
// parsing.
int use_counts_[v8::Isolate::kUseCounterFeatureCount];
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index 7a280b00b8..bc3c6dec7b 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -40,7 +40,8 @@ bool ParseProgram(ParseInfo* info, Isolate* isolate) {
result = parser.ParseProgram(isolate, info);
info->set_literal(result);
if (result == nullptr) {
- parser.ReportErrors(isolate, info->script());
+ info->pending_error_handler()->ReportErrors(isolate, info->script(),
+ info->ast_value_factory());
} else {
result->scope()->AttachOuterScopeInfo(info, isolate);
info->set_language_mode(info->literal()->language_mode());
@@ -74,7 +75,8 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
result = parser.ParseFunction(isolate, info, shared_info);
info->set_literal(result);
if (result == nullptr) {
- parser.ReportErrors(isolate, info->script());
+ info->pending_error_handler()->ReportErrors(isolate, info->script(),
+ info->ast_value_factory());
} else {
result->scope()->AttachOuterScopeInfo(info, isolate);
}
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index 2f5d248aed..faecb5bb0c 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -131,7 +131,7 @@ void Parser::RewriteDestructuringAssignment(RewritableExpression* to_rewrite) {
Expression* Parser::RewriteDestructuringAssignment(Assignment* assignment) {
DCHECK_NOT_NULL(assignment);
DCHECK_EQ(Token::ASSIGN, assignment->op());
- auto to_rewrite = factory()->NewRewritableExpression(assignment);
+ auto to_rewrite = factory()->NewRewritableExpression(assignment, scope());
RewriteDestructuringAssignment(to_rewrite);
return to_rewrite->expression();
}
@@ -220,7 +220,7 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
if (!*ok_) return;
DCHECK_NOT_NULL(var);
DCHECK(proxy->is_resolved());
- DCHECK(initializer_position_ != kNoSourcePosition);
+ DCHECK_NE(initializer_position_, kNoSourcePosition);
var->set_initializer_position(initializer_position_);
Scope* declaration_scope =
@@ -419,7 +419,7 @@ void PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
DCHECK(key->IsPropertyName() || key->IsNumberLiteral());
}
- DCHECK(rest_runtime_callargs != nullptr);
+ DCHECK_NOT_NULL(rest_runtime_callargs);
rest_runtime_callargs->Add(excluded_property, zone());
}
@@ -539,7 +539,7 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
}
block_->statements()->Add(if_not_done, zone());
- if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
+ if (!value->IsTheHoleLiteral()) {
{
// completion = kAbruptCompletion;
Expression* proxy = factory()->NewVariableProxy(completion);
@@ -726,6 +726,7 @@ void PatternRewriter::VisitProperty(v8::internal::Property* node) {
void PatternRewriter::Visit##Node(v8::internal::Node*) { UNREACHABLE(); }
NOT_A_PATTERN(BinaryOperation)
+NOT_A_PATTERN(NaryOperation)
NOT_A_PATTERN(Block)
NOT_A_PATTERN(BreakStatement)
NOT_A_PATTERN(Call)
@@ -772,6 +773,7 @@ NOT_A_PATTERN(WithStatement)
NOT_A_PATTERN(Yield)
NOT_A_PATTERN(YieldStar)
NOT_A_PATTERN(Await)
+NOT_A_PATTERN(InitializeClassFieldsStatement)
#undef NOT_A_PATTERN
} // namespace internal
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index 8d2ce2d1a5..7191639cf8 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -20,27 +20,27 @@ class ScopeCallsSloppyEvalField : public BitField<bool, 0, 1> {};
class InnerScopeCallsEvalField
: public BitField<bool, ScopeCallsSloppyEvalField::kNext, 1> {};
-class VariableIsUsedField : public BitField16<bool, 0, 1> {};
-class VariableMaybeAssignedField
- : public BitField16<bool, VariableIsUsedField::kNext, 1> {};
+class VariableMaybeAssignedField : public BitField8<bool, 0, 1> {};
class VariableContextAllocatedField
- : public BitField16<bool, VariableMaybeAssignedField::kNext, 1> {};
+ : public BitField8<bool, VariableMaybeAssignedField::kNext, 1> {};
const int kMagicValue = 0xc0de0de;
#ifdef DEBUG
const size_t kUint32Size = 5;
const size_t kUint8Size = 2;
+const size_t kQuarterMarker = 0;
#else
const size_t kUint32Size = 4;
const size_t kUint8Size = 1;
#endif
+const int kPlaceholderSize = kUint32Size;
const int kSkippableFunctionDataSize = 4 * kUint32Size + 1 * kUint8Size;
-STATIC_ASSERT(LANGUAGE_END == 2);
-class LanguageField : public BitField<int, 0, 1> {};
-class UsesSuperField : public BitField<bool, LanguageField::kNext, 1> {};
+class LanguageField : public BitField8<LanguageMode, 0, 1> {};
+class UsesSuperField : public BitField8<bool, LanguageField::kNext, 1> {};
+STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
} // namespace
@@ -98,6 +98,7 @@ void ProducedPreParsedScopeData::ByteData::WriteUint32(uint32_t data) {
for (int i = 0; i < 4; ++i) {
backing_store_.push_back(*d++);
}
+ free_quarters_in_last_byte_ = 0;
}
void ProducedPreParsedScopeData::ByteData::OverwriteFirstUint32(uint32_t data) {
@@ -120,6 +121,25 @@ void ProducedPreParsedScopeData::ByteData::WriteUint8(uint8_t data) {
backing_store_.push_back(kUint8Size);
#endif
backing_store_.push_back(data);
+ free_quarters_in_last_byte_ = 0;
+}
+
+void ProducedPreParsedScopeData::ByteData::WriteQuarter(uint8_t data) {
+ DCHECK_LE(data, 3);
+ if (free_quarters_in_last_byte_ == 0) {
+#ifdef DEBUG
+ // Save a marker in debug mode.
+ backing_store_.push_back(kQuarterMarker);
+#endif
+ backing_store_.push_back(0);
+ free_quarters_in_last_byte_ = 3;
+ } else {
+ --free_quarters_in_last_byte_;
+ }
+
+ uint8_t shift_amount = free_quarters_in_last_byte_ * 2;
+ DCHECK_EQ(backing_store_.back() & (3 << shift_amount), 0);
+ backing_store_.back() |= (data << shift_amount);
}
Handle<PodArray<uint8_t>> ProducedPreParsedScopeData::ByteData::Serialize(
@@ -215,7 +235,7 @@ void ProducedPreParsedScopeData::AddSkippableFunction(
uint8_t language_and_super = LanguageField::encode(language_mode) |
UsesSuperField::encode(uses_super_property);
- byte_data_->WriteUint8(language_and_super);
+ byte_data_->WriteQuarter(language_and_super);
}
void ProducedPreParsedScopeData::SaveScopeAllocationData(
@@ -224,9 +244,9 @@ void ProducedPreParsedScopeData::SaveScopeAllocationData(
DCHECK(previously_produced_preparsed_scope_data_.is_null());
// The data contains a uint32 (reserved space for scope_data_start) and
// function data items, kSkippableFunctionDataSize each.
- DCHECK_GE(byte_data_->size(), kUint32Size);
+ DCHECK_GE(byte_data_->size(), kPlaceholderSize);
DCHECK_LE(byte_data_->size(), std::numeric_limits<uint32_t>::max());
- DCHECK_EQ(byte_data_->size() % kSkippableFunctionDataSize, kUint32Size);
+ DCHECK_EQ(byte_data_->size() % kSkippableFunctionDataSize, kPlaceholderSize);
if (bailed_out_) {
return;
@@ -235,7 +255,7 @@ void ProducedPreParsedScopeData::SaveScopeAllocationData(
uint32_t scope_data_start = static_cast<uint32_t>(byte_data_->size());
// If there are no skippable inner functions, we don't need to save anything.
- if (scope_data_start == kUint32Size) {
+ if (scope_data_start == kPlaceholderSize) {
return;
}
@@ -250,6 +270,10 @@ void ProducedPreParsedScopeData::SaveScopeAllocationData(
SaveDataForScope(scope);
}
+bool ProducedPreParsedScopeData::ContainsInnerFunctions() const {
+ return byte_data_->size() > kPlaceholderSize;
+}
+
MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
Isolate* isolate) {
if (!previously_produced_preparsed_scope_data_.is_null()) {
@@ -263,7 +287,7 @@ MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
DCHECK(!ThisOrParentBailedOut());
- if (byte_data_->size() <= kUint32Size) {
+ if (byte_data_->size() <= kPlaceholderSize) {
// The data contains only the placeholder.
return MaybeHandle<PreParsedScopeData>();
}
@@ -377,14 +401,11 @@ void ProducedPreParsedScopeData::SaveDataForVariable(Variable* var) {
byte_data_->WriteUint8(name->raw_data()[i]);
}
#endif
- // FIXME(marja): Only 3 bits needed, not a full byte.
- byte variable_data = VariableIsUsedField::encode(var->is_used()) |
- VariableMaybeAssignedField::encode(
+ byte variable_data = VariableMaybeAssignedField::encode(
var->maybe_assigned() == kMaybeAssigned) |
VariableContextAllocatedField::encode(
var->has_forced_context_allocation());
-
- byte_data_->WriteUint8(variable_data);
+ byte_data_->WriteQuarter(variable_data);
}
void ProducedPreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
@@ -397,8 +418,8 @@ void ProducedPreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
if (ScopeIsSkippableFunctionScope(inner)) {
// Don't save data about function scopes, since they'll have their own
// ProducedPreParsedScopeData where their data is saved.
- DCHECK(inner->AsDeclarationScope()->produced_preparsed_scope_data() !=
- nullptr);
+ DCHECK_NOT_NULL(
+ inner->AsDeclarationScope()->produced_preparsed_scope_data());
continue;
}
scopes.push_back(inner);
@@ -424,6 +445,7 @@ int32_t ConsumedPreParsedScopeData::ByteData::ReadUint32() {
for (int i = 0; i < 4; ++i) {
*p++ = data_->get(index_++);
}
+ stored_quarters_ = 0;
return result;
}
@@ -434,9 +456,29 @@ uint8_t ConsumedPreParsedScopeData::ByteData::ReadUint8() {
// Check that there indeed is a byte following.
DCHECK_EQ(data_->get(index_++), kUint8Size);
#endif
+ stored_quarters_ = 0;
return data_->get(index_++);
}
+uint8_t ConsumedPreParsedScopeData::ByteData::ReadQuarter() {
+ DCHECK_NOT_NULL(data_);
+ if (stored_quarters_ == 0) {
+ DCHECK_GE(RemainingBytes(), kUint8Size);
+#ifdef DEBUG
+ // Check that there indeed are quarters following.
+ DCHECK_EQ(data_->get(index_++), kQuarterMarker);
+#endif
+ stored_byte_ = data_->get(index_++);
+ stored_quarters_ = 4;
+ }
+ // Read the first 2 bits from stored_byte_.
+ uint8_t result = (stored_byte_ >> 6) & 3;
+ DCHECK_LE(result, 3);
+ --stored_quarters_;
+ stored_byte_ <<= 2;
+ return result;
+}
+
ConsumedPreParsedScopeData::ConsumedPreParsedScopeData()
: scope_data_(new ByteData()), child_index_(0) {}
@@ -452,7 +494,7 @@ void ConsumedPreParsedScopeData::SetData(Handle<PreParsedScopeData> data) {
DCHECK_EQ(scope_data_->ReadUint32(), kMagicValue);
#endif
// The first data item is scope_data_start. Skip over it.
- scope_data_->SetPosition(kUint32Size);
+ scope_data_->SetPosition(kPlaceholderSize);
}
ProducedPreParsedScopeData*
@@ -472,7 +514,7 @@ ConsumedPreParsedScopeData::GetDataForSkippableFunction(
*num_parameters = scope_data_->ReadUint32();
*num_inner_functions = scope_data_->ReadUint32();
- uint8_t language_and_super = scope_data_->ReadUint8();
+ uint8_t language_and_super = scope_data_->ReadQuarter();
*language_mode = LanguageMode(LanguageField::decode(language_and_super));
*uses_super_property = UsesSuperField::decode(language_and_super);
@@ -513,13 +555,6 @@ void ConsumedPreParsedScopeData::RestoreScopeAllocationData(
DCHECK_EQ(scope_data_->RemainingBytes(), 0);
}
-void ConsumedPreParsedScopeData::SkipFunctionDataForTesting() {
- ByteData::ReadingScope reading_scope(this);
- scope_data_->SetPosition(0);
- uint32_t scope_data_start = scope_data_->ReadUint32();
- scope_data_->SetPosition(scope_data_start);
-}
-
void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
if (scope->is_declaration_scope() &&
scope->AsDeclarationScope()->is_skipped_function()) {
@@ -576,15 +611,12 @@ void ConsumedPreParsedScopeData::RestoreDataForVariable(Variable* var) {
DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
}
#endif
- CHECK_GE(scope_data_->RemainingBytes(), kUint8Size);
- uint8_t variable_data = scope_data_->ReadUint8();
- if (VariableIsUsedField::decode(variable_data)) {
- var->set_is_used();
- }
+ uint8_t variable_data = scope_data_->ReadQuarter();
if (VariableMaybeAssignedField::decode(variable_data)) {
var->set_maybe_assigned();
}
if (VariableContextAllocatedField::decode(variable_data)) {
+ var->set_is_used();
var->ForceContextAllocation();
}
}
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
index 290bfba2fd..b621f069d2 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.h
+++ b/deps/v8/src/parsing/preparsed-scope-data.h
@@ -69,10 +69,12 @@ class ProducedPreParsedScopeData : public ZoneObject {
public:
class ByteData : public ZoneObject {
public:
- explicit ByteData(Zone* zone) : backing_store_(zone) {}
+ explicit ByteData(Zone* zone)
+ : backing_store_(zone), free_quarters_in_last_byte_(0) {}
void WriteUint32(uint32_t data);
void WriteUint8(uint8_t data);
+ void WriteQuarter(uint8_t data);
// For overwriting previously written data at position 0.
void OverwriteFirstUint32(uint32_t data);
@@ -83,6 +85,7 @@ class ProducedPreParsedScopeData : public ZoneObject {
private:
ZoneChunkList<uint8_t> backing_store_;
+ uint8_t free_quarters_in_last_byte_;
};
// Create a ProducedPreParsedScopeData object which will collect data as we
@@ -142,6 +145,8 @@ class ProducedPreParsedScopeData : public ZoneObject {
}
#endif // DEBUG
+ bool ContainsInnerFunctions() const;
+
// If there is data (if the Scope contains skippable inner functions), move
// the data into the heap and return a Handle to it; otherwise return a null
// MaybeHandle.
@@ -179,7 +184,8 @@ class ConsumedPreParsedScopeData {
public:
class ByteData {
public:
- ByteData() : data_(nullptr), index_(0) {}
+ ByteData()
+ : data_(nullptr), index_(0), stored_quarters_(0), stored_byte_(0) {}
// Reading from the ByteData is only allowed when a ReadingScope is on the
// stack. This ensures that we have a DisallowHeapAllocation in place
@@ -202,6 +208,7 @@ class ConsumedPreParsedScopeData {
int32_t ReadUint32();
uint8_t ReadUint8();
+ uint8_t ReadQuarter();
size_t RemainingBytes() const {
DCHECK_NOT_NULL(data_);
@@ -211,6 +218,8 @@ class ConsumedPreParsedScopeData {
// private:
PodArray<uint8_t>* data_;
int index_;
+ uint8_t stored_quarters_;
+ uint8_t stored_byte_;
};
ConsumedPreParsedScopeData();
@@ -229,11 +238,6 @@ class ConsumedPreParsedScopeData {
// subscopes') variables.
void RestoreScopeAllocationData(DeclarationScope* scope);
- // Skips the data about skippable functions, moves straight to the scope
- // allocation data. Useful for tests which don't want to verify only the scope
- // allocation data.
- void SkipFunctionDataForTesting();
-
private:
void RestoreData(Scope* scope);
void RestoreDataForVariable(Variable* var);
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index c31fd4af8e..16879e518c 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -87,7 +87,7 @@ PreParserIdentifier PreParser::GetSymbol() const {
return symbol;
}
-PreParser::PreParseResult PreParser::PreParseProgram(bool is_module) {
+PreParser::PreParseResult PreParser::PreParseProgram() {
DCHECK_NULL(scope_);
DeclarationScope* scope = NewScriptScope();
#ifdef DEBUG
@@ -97,13 +97,12 @@ PreParser::PreParseResult PreParser::PreParseProgram(bool is_module) {
// ModuleDeclarationInstantiation for Source Text Module Records creates a
// new Module Environment Record whose outer lexical environment record is
// the global scope.
- if (is_module) scope = NewModuleScope(scope);
+ if (parsing_module_) scope = NewModuleScope(scope);
FunctionState top_scope(&function_state_, &scope_, scope);
original_scope_ = scope_;
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
- parsing_module_ = is_module;
PreParserStatementList body;
ParseStatementList(body, Token::EOS, &ok);
original_scope_ = nullptr;
@@ -119,14 +118,14 @@ PreParser::PreParseResult PreParser::PreParseProgram(bool is_module) {
PreParser::PreParseResult PreParser::PreParseFunction(
const AstRawString* function_name, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, bool parsing_module,
- bool is_inner_function, bool may_abort, int* use_counts,
- ProducedPreParsedScopeData** produced_preparsed_scope_data) {
+ DeclarationScope* function_scope, bool is_inner_function, bool may_abort,
+ int* use_counts, ProducedPreParsedScopeData** produced_preparsed_scope_data,
+ int script_id) {
DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
- parsing_module_ = parsing_module;
use_counts_ = use_counts;
DCHECK(!track_unresolved_variables_);
track_unresolved_variables_ = is_inner_function;
+ set_script_id(script_id);
#ifdef DEBUG
function_scope->set_is_being_lazily_parsed(true);
#endif
@@ -208,7 +207,7 @@ PreParser::PreParseResult PreParser::PreParseFunction(
if (!IsArrowFunction(kind) && track_unresolved_variables_ &&
result == kLazyParsingComplete) {
- CreateFunctionNameAssignment(function_name, function_type, function_scope);
+ DeclareFunctionNameVar(function_name, function_type, function_scope);
// Declare arguments after parsing the function since lexical 'arguments'
// masks the arguments object. Declare arguments before declaring the
@@ -224,7 +223,7 @@ PreParser::PreParseResult PreParser::PreParseFunction(
} else if (stack_overflow()) {
return kPreParseStackOverflow;
} else if (!*ok) {
- DCHECK(pending_error_handler_->has_pending_error());
+ DCHECK(pending_error_handler()->has_pending_error());
} else {
DCHECK_EQ(Token::RBRACE, scanner()->peek());
@@ -279,6 +278,9 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
runtime_call_stats_,
counters[track_unresolved_variables_][parsing_on_main_thread_]);
+ base::ElapsedTimer timer;
+ if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
+
DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
@@ -344,11 +346,23 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
produced_preparsed_scope_data_scope->MarkFunctionAsSkippable(
end_position, GetLastFunctionLiteralId() - func_id);
}
- if (FLAG_trace_preparse) {
- PrintF(" [%s]: %i-%i\n",
- track_unresolved_variables_ ? "Preparse resolution"
- : "Preparse no-resolution",
- function_scope->start_position(), function_scope->end_position());
+ if (V8_UNLIKELY(FLAG_log_function_events)) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ const char* event_name = track_unresolved_variables_
+ ? "preparse-resolution"
+ : "preparse-no-resolution";
+ // We might not always get a function name here. However, it can be easily
+ // reconstructed from the script id and the byte range in the log processor.
+ const char* name = "";
+ size_t name_byte_length = 0;
+ const AstRawString* string = function_name.string_;
+ if (string != nullptr) {
+ name = reinterpret_cast<const char*>(string->raw_data());
+ name_byte_length = string->byte_length();
+ }
+ logger_->FunctionEvent(
+ event_name, nullptr, script_id(), ms, function_scope->start_position(),
+ function_scope->end_position(), name, name_byte_length);
}
return Expression::Default();
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 275c5e9e0b..8c1d183fd6 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -192,6 +192,17 @@ class PreParserExpression {
ExpressionTypeField::encode(kCallEvalExpression));
}
+ static PreParserExpression CallTaggedTemplate() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kCallTaggedTemplateExpression));
+ }
+
+ bool is_tagged_template() const {
+ DCHECK(IsCall());
+ return ExpressionTypeField::decode(code_) == kCallTaggedTemplateExpression;
+ }
+
static PreParserExpression SuperCallReference() {
return PreParserExpression(
TypeField::encode(kExpression) |
@@ -255,7 +266,13 @@ class PreParserExpression {
bool IsCall() const {
return TypeField::decode(code_) == kExpression &&
(ExpressionTypeField::decode(code_) == kCallExpression ||
- ExpressionTypeField::decode(code_) == kCallEvalExpression);
+ ExpressionTypeField::decode(code_) == kCallEvalExpression ||
+ ExpressionTypeField::decode(code_) ==
+ kCallTaggedTemplateExpression);
+ }
+ PreParserExpression* AsCall() {
+ if (IsCall()) return this;
+ return nullptr;
}
bool IsSuperCallReference() const {
@@ -286,6 +303,7 @@ class PreParserExpression {
int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}
+ void set_scope(Scope* scope) {}
private:
enum Type {
@@ -304,6 +322,7 @@ class PreParserExpression {
kPropertyExpression,
kCallExpression,
kCallEvalExpression,
+ kCallTaggedTemplateExpression,
kSuperCallReference,
kAssignment
};
@@ -382,7 +401,7 @@ inline void PreParserList<PreParserExpression>::Add(
const PreParserExpression& expression, Zone* zone) {
if (expression.variables_ != nullptr) {
DCHECK(FLAG_lazy_inner_functions);
- DCHECK(zone != nullptr);
+ DCHECK_NOT_NULL(zone);
if (variables_ == nullptr) {
variables_ = new (zone) ZoneList<VariableProxy*>(1, zone);
}
@@ -524,6 +543,9 @@ class PreParserFactory {
PreParserExpression NewUndefinedLiteral(int pos) {
return PreParserExpression::Default();
}
+ PreParserExpression NewTheHoleLiteral() {
+ return PreParserExpression::Default();
+ }
PreParserExpression NewRegExpLiteral(const PreParserIdentifier& js_pattern,
int js_flags, int pos) {
return PreParserExpression::Default();
@@ -583,7 +605,7 @@ class PreParserFactory {
return PreParserExpression::Default();
}
PreParserExpression NewRewritableExpression(
- const PreParserExpression& expression) {
+ const PreParserExpression& expression, Scope* scope) {
return expression;
}
PreParserExpression NewAssignment(Token::Value op,
@@ -624,6 +646,11 @@ class PreParserFactory {
}
return PreParserExpression::Call();
}
+ PreParserExpression NewTaggedTemplate(
+ PreParserExpression expression, const PreParserExpressionList& arguments,
+ int pos) {
+ return PreParserExpression::CallTaggedTemplate();
+ }
PreParserExpression NewCallNew(const PreParserExpression& expression,
const PreParserExpressionList& arguments,
int pos) {
@@ -805,6 +832,7 @@ struct ParserTypes<PreParser> {
typedef PreParserExpression ObjectLiteralProperty;
typedef PreParserExpression ClassLiteralProperty;
typedef PreParserExpression Suspend;
+ typedef PreParserExpression RewritableExpression;
typedef PreParserExpressionList ExpressionList;
typedef PreParserExpressionList ObjectPropertyList;
typedef PreParserExpressionList ClassPropertyList;
@@ -854,14 +882,15 @@ class PreParser : public ParserBase<PreParser> {
PreParser(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
AstValueFactory* ast_value_factory,
PendingCompilationErrorHandler* pending_error_handler,
- RuntimeCallStats* runtime_call_stats,
+ RuntimeCallStats* runtime_call_stats, Logger* logger,
+ int script_id = -1, bool parsing_module = false,
bool parsing_on_main_thread = true)
: ParserBase<PreParser>(zone, scanner, stack_limit, nullptr,
- ast_value_factory, runtime_call_stats,
- parsing_on_main_thread),
+ ast_value_factory, pending_error_handler,
+ runtime_call_stats, logger, script_id,
+ parsing_module, parsing_on_main_thread),
use_counts_(nullptr),
track_unresolved_variables_(false),
- pending_error_handler_(pending_error_handler),
produced_preparsed_scope_data_(nullptr) {}
static bool IsPreParser() { return true; }
@@ -872,7 +901,7 @@ class PreParser : public ParserBase<PreParser> {
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
- PreParseResult PreParseProgram(bool is_module = false);
+ PreParseResult PreParseProgram();
// Parses a single function literal, from the opening parentheses before
// parameters to the closing brace after the body.
@@ -885,9 +914,10 @@ class PreParser : public ParserBase<PreParser> {
PreParseResult PreParseFunction(
const AstRawString* function_name, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, bool parsing_module,
- bool track_unresolved_variables, bool may_abort, int* use_counts,
- ProducedPreParsedScopeData** produced_preparser_scope_data);
+ DeclarationScope* function_scope, bool track_unresolved_variables,
+ bool may_abort, int* use_counts,
+ ProducedPreParsedScopeData** produced_preparser_scope_data,
+ int script_id);
ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
return produced_preparsed_scope_data_;
@@ -914,6 +944,10 @@ class PreParser : public ParserBase<PreParser> {
bool AllowsLazyParsingWithoutUnresolvedVariables() const { return false; }
bool parse_lazily() const { return false; }
+ PendingCompilationErrorHandler* pending_error_handler() {
+ return pending_error_handler_;
+ }
+
V8_INLINE LazyParsingResult
SkipFunction(const AstRawString* name, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
@@ -922,11 +956,12 @@ class PreParser : public ParserBase<PreParser> {
bool is_inner_function, bool may_abort, bool* ok) {
UNREACHABLE();
}
- Expression ParseFunctionLiteral(
- Identifier name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_pos, FunctionLiteral::FunctionType function_type,
- LanguageMode language_mode, bool* ok);
+ Expression ParseFunctionLiteral(Identifier name,
+ Scanner::Location function_name_location,
+ FunctionNameValidity function_name_validity,
+ FunctionKind kind, int function_token_pos,
+ FunctionLiteral::FunctionType function_type,
+ LanguageMode language_mode, bool* ok);
LazyParsingResult ParseStatementListAndLogFunction(
PreParserFormalParameters* formals, bool maybe_abort, bool* ok);
@@ -960,17 +995,6 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void RewriteDestructuringAssignments() {}
- V8_INLINE PreParserExpression
- RewriteExponentiation(const PreParserExpression& left,
- const PreParserExpression& right, int pos) {
- return left;
- }
- V8_INLINE PreParserExpression
- RewriteAssignExponentiation(const PreParserExpression& left,
- const PreParserExpression& right, int pos) {
- return left;
- }
-
V8_INLINE void PrepareGeneratorVariables() {}
V8_INLINE void RewriteAsyncFunctionBody(
PreParserStatementList body, PreParserStatement block,
@@ -1038,27 +1062,24 @@ class PreParser : public ParserBase<PreParser> {
int pos, FunctionKind kind, PreParserStatementList body, bool* ok) {
ParseStatementList(body, Token::RBRACE, ok);
}
- V8_INLINE void CreateFunctionNameAssignment(
+ V8_INLINE void DeclareFunctionNameVar(
const AstRawString* function_name,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope) {
if (track_unresolved_variables_ &&
- function_type == FunctionLiteral::kNamedExpression) {
- if (function_scope->LookupLocal(function_name) == nullptr) {
- DCHECK_EQ(function_scope, scope());
- Variable* fvar = function_scope->DeclareFunctionVar(function_name);
- fvar->set_is_used();
- }
+ function_type == FunctionLiteral::kNamedExpression &&
+ function_scope->LookupLocal(function_name) == nullptr) {
+ DCHECK_EQ(function_scope, scope());
+ function_scope->DeclareFunctionVar(function_name);
}
}
- V8_INLINE void CreateFunctionNameAssignment(
- const PreParserIdentifier& function_name, int pos,
+ V8_INLINE void DeclareFunctionNameVar(
+ const PreParserIdentifier& function_name,
FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, PreParserStatementList result,
- int index) {
- CreateFunctionNameAssignment(function_name.string_, function_type,
- function_scope);
+ DeclarationScope* function_scope) {
+ DeclareFunctionNameVar(function_name.string_, function_type,
+ function_scope);
}
V8_INLINE PreParserExpression RewriteDoExpression(PreParserStatement body,
@@ -1118,7 +1139,16 @@ class PreParser : public ParserBase<PreParser> {
const PreParserExpression& property,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
- ClassInfo* class_info, bool* ok) {}
+ bool is_computed_name,
+ ClassInfo* class_info, bool* ok) {
+ if (kind == ClassLiteralProperty::FIELD && is_computed_name) {
+ scope()->DeclareVariableName(
+ ClassFieldVariableName(ast_value_factory(),
+ class_info->computed_field_count),
+ CONST);
+ }
+ }
+
V8_INLINE PreParserExpression
RewriteClassLiteral(Scope* scope, const PreParserIdentifier& name,
ClassInfo* class_info, int pos, int end_pos, bool* ok) {
@@ -1134,12 +1164,18 @@ class PreParser : public ParserBase<PreParser> {
FunctionKind kind = has_extends ? FunctionKind::kDefaultDerivedConstructor
: FunctionKind::kDefaultBaseConstructor;
DeclarationScope* function_scope = NewFunctionScope(kind);
- SetLanguageMode(function_scope, STRICT);
+ SetLanguageMode(function_scope, LanguageMode::kStrict);
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
FunctionState function_state(&function_state_, &scope_, function_scope);
GetNextFunctionLiteralId();
}
+ if (class_info->has_static_class_fields) {
+ GetNextFunctionLiteralId();
+ }
+ if (class_info->has_instance_class_fields) {
+ GetNextFunctionLiteralId();
+ }
return PreParserExpression::Default();
}
@@ -1265,6 +1301,13 @@ class PreParser : public ParserBase<PreParser> {
return false;
}
+ V8_INLINE NaryOperation* CollapseNaryExpression(PreParserExpression* x,
+ PreParserExpression y,
+ Token::Value op, int pos,
+ const SourceRange& range) {
+ return nullptr;
+ }
+
V8_INLINE PreParserExpression BuildUnaryExpression(
const PreParserExpression& expression, Token::Value op, int pos) {
return PreParserExpression::Default();
@@ -1381,13 +1424,13 @@ class PreParser : public ParserBase<PreParser> {
}
// Reporting errors.
- V8_INLINE void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler_->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
+ const char* arg = nullptr,
+ ParseErrorType error_type = kSyntaxError) {
+ pending_error_handler()->ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos, message,
+ arg, error_type);
}
V8_INLINE void ReportMessageAt(Scanner::Location source_location,
@@ -1427,15 +1470,6 @@ class PreParser : public ParserBase<PreParser> {
return PreParserIdentifier::Default();
}
- // Odd-ball literal creators.
- V8_INLINE PreParserExpression GetLiteralTheHole(int position) {
- return PreParserExpression::Default();
- }
-
- V8_INLINE PreParserExpression GetLiteralUndefined(int position) {
- return PreParserExpression::Default();
- }
-
// Producing data during the recursive descent.
PreParserIdentifier GetSymbol() const;
@@ -1495,6 +1529,10 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::Default();
}
+ V8_INLINE PreParserExpression ImportMetaExpression(int pos) {
+ return PreParserExpression::Default();
+ }
+
V8_INLINE PreParserExpression ExpressionFromLiteral(Token::Value token,
int pos) {
return PreParserExpression::Default();
@@ -1650,7 +1688,6 @@ class PreParser : public ParserBase<PreParser> {
int* use_counts_;
bool track_unresolved_variables_;
PreParserLogger log_;
- PendingCompilationErrorHandler* pending_error_handler_;
ProducedPreParsedScopeData* produced_preparsed_scope_data_;
};
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index ddf60a5004..c31d0ea21d 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -263,6 +263,9 @@ void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
node->finally_block()->statements()->Add(
factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
+ // We can't tell whether the finally-block is guaranteed to set .result, so
+ // reset is_set_ before visiting the try-block.
+ is_set_ = false;
}
Visit(node->try_block());
node->set_try_block(replacement_->AsBlock());
@@ -335,6 +338,10 @@ void Processor::VisitDebuggerStatement(DebuggerStatement* node) {
replacement_ = node;
}
+void Processor::VisitInitializeClassFieldsStatement(
+ InitializeClassFieldsStatement* node) {
+ replacement_ = node;
+}
// Expressions are never visited.
#define DEF_VISIT(type) \
@@ -359,7 +366,9 @@ bool Rewriter::Rewrite(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(
info->runtime_call_stats(),
- &RuntimeCallStats::CompileRewriteReturnResult);
+ info->on_background_thread()
+ ? &RuntimeCallStats::CompileBackgroundRewriteReturnResult
+ : &RuntimeCallStats::CompileRewriteReturnResult);
FunctionLiteral* function = info->literal();
DCHECK_NOT_NULL(function);
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 5229aed780..c5175c4de7 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -310,7 +310,7 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
unibrow::uchar t =
unibrow::Utf8::ValueOfIncrementalFinish(&current_.pos.incomplete_char);
if (t != unibrow::Utf8::kBufferEmpty) {
- DCHECK(t < unibrow::Utf16::kMaxNonSurrogateCharCode);
+ DCHECK_LT(t, unibrow::Utf16::kMaxNonSurrogateCharCode);
*cursor = static_cast<uc16>(t);
buffer_end_++;
current_.pos.chars++;
@@ -835,9 +835,9 @@ Utf16CharacterStream* ScannerStream::For(Handle<String> data) {
Utf16CharacterStream* ScannerStream::For(Handle<String> data, int start_pos,
int end_pos) {
- DCHECK(start_pos >= 0);
- DCHECK(start_pos <= end_pos);
- DCHECK(end_pos <= data->length());
+ DCHECK_GE(start_pos, 0);
+ DCHECK_LE(start_pos, end_pos);
+ DCHECK_LE(end_pos, data->length());
if (data->IsExternalOneByteString()) {
return new ExternalOneByteStringUtf16CharacterStream(
Handle<ExternalOneByteString>::cast(data),
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index f0ce0012ae..8030b93889 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -13,6 +13,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
+#include "src/objects/bigint.h"
#include "src/parsing/duplicate-finder.h" // For Scanner::FindSymbol
#include "src/unicode-cache-inl.h"
@@ -196,6 +197,7 @@ Scanner::Scanner(UnicodeCache* unicode_cache, int* use_counts)
octal_pos_(Location::invalid()),
octal_message_(MessageTemplate::kNone),
found_html_comment_(false),
+ allow_harmony_bigint_(false),
use_counts_(use_counts) {}
void Scanner::Initialize(Utf16CharacterStream* source, bool is_module) {
@@ -211,7 +213,7 @@ void Scanner::Initialize(Utf16CharacterStream* source, bool is_module) {
template <bool capture_raw, bool unicode>
uc32 Scanner::ScanHexNumber(int expected_length) {
- DCHECK(expected_length <= 4); // prevent overflow
+ DCHECK_LE(expected_length, 4); // prevent overflow
int begin = source_pos() - 2;
uc32 x = 0;
@@ -582,7 +584,7 @@ void Scanner::TryToParseSourceURLComment() {
Token::Value Scanner::SkipMultiLineComment() {
- DCHECK(c0_ == '*');
+ DCHECK_EQ(c0_, '*');
Advance();
while (c0_ != kEndOfInput) {
@@ -608,7 +610,7 @@ Token::Value Scanner::SkipMultiLineComment() {
Token::Value Scanner::ScanHtmlComment() {
// Check for <!-- comments.
- DCHECK(c0_ == '!');
+ DCHECK_EQ(c0_, '!');
Advance();
if (c0_ != '-') {
PushBack('!'); // undo Advance()
@@ -626,8 +628,8 @@ Token::Value Scanner::ScanHtmlComment() {
}
void Scanner::Scan() {
- next_.literal_chars = NULL;
- next_.raw_literal_chars = NULL;
+ next_.literal_chars = nullptr;
+ next_.raw_literal_chars = nullptr;
next_.invalid_template_escape_message = MessageTemplate::kNone;
Token::Value token;
do {
@@ -934,6 +936,7 @@ void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
case Token::FUTURE_STRICT_RESERVED_WORD:
case Token::IDENTIFIER:
case Token::NUMBER:
+ case Token::BIGINT:
case Token::REGEXP_LITERAL:
case Token::SMI:
case Token::STRING:
@@ -1185,8 +1188,8 @@ Token::Value Scanner::ScanTemplateSpan() {
Token::Value Scanner::ScanTemplateStart() {
- DCHECK(next_next_.token == Token::UNINITIALIZED);
- DCHECK(c0_ == '`');
+ DCHECK_EQ(next_next_.token, Token::UNINITIALIZED);
+ DCHECK_EQ(c0_, '`');
next_.location.beg_pos = source_pos();
Advance(); // Consume `
return ScanTemplateSpan();
@@ -1321,14 +1324,31 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
ScanDecimalDigits(); // optional
if (c0_ == '.') {
+ seen_period = true;
AddLiteralCharAdvance();
ScanDecimalDigits(); // optional
}
}
}
- // scan exponent, if any
- if (c0_ == 'e' || c0_ == 'E') {
+ bool is_bigint = false;
+ if (allow_harmony_bigint() && c0_ == 'n' && !seen_period &&
+ (kind == DECIMAL || kind == HEX || kind == OCTAL || kind == BINARY)) {
+ // Check that the literal is within our limits for BigInt length.
+ // For simplicity, use 4 bits per character to calculate the maximum
+ // allowed literal length.
+ static const int kMaxBigIntCharacters = BigInt::kMaxLengthBits / 4;
+ int length = source_pos() - start_pos - (kind != DECIMAL ? 2 : 0);
+ if (length > kMaxBigIntCharacters) {
+ ReportScannerError(Location(start_pos, source_pos()),
+ MessageTemplate::kBigIntTooBig);
+ return Token::ILLEGAL;
+ }
+
+ is_bigint = true;
+ Advance();
+ } else if (c0_ == 'e' || c0_ == 'E') {
+ // scan exponent, if any
DCHECK(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
if (!(kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO))
return Token::ILLEGAL;
@@ -1357,7 +1377,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
octal_pos_ = Location(start_pos, source_pos());
octal_message_ = MessageTemplate::kStrictDecimalWithLeadingZero;
}
- return Token::NUMBER;
+
+ return is_bigint ? Token::BIGINT : Token::NUMBER;
}
@@ -1481,7 +1502,7 @@ uc32 Scanner::ScanUnicodeEscape() {
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length) {
- DCHECK(input_length >= 1);
+ DCHECK_GE(input_length, 1);
const int kMinLength = 2;
const int kMaxLength = 11;
if (input_length < kMinLength || input_length > kMaxLength) {
@@ -1728,11 +1749,7 @@ Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
flag = RegExp::kMultiline;
break;
case 's':
- if (FLAG_harmony_regexp_dotall) {
- flag = RegExp::kDotAll;
- } else {
- return Nothing<RegExp::Flags>();
- }
+ flag = RegExp::kDotAll;
break;
case 'u':
flag = RegExp::kUnicode;
@@ -1787,6 +1804,16 @@ double Scanner::DoubleValue() {
ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
}
+const char* Scanner::CurrentLiteralAsCString(Zone* zone) const {
+ DCHECK(is_literal_one_byte());
+ Vector<const uint8_t> vector = literal_one_byte_string();
+ int length = vector.length();
+ char* buffer = zone->NewArray<char>(length + 1);
+ memcpy(buffer, vector.start(), length);
+ buffer[length] = '\0';
+ return buffer;
+}
+
bool Scanner::IsDuplicateSymbol(DuplicateFinder* duplicate_finder,
AstValueFactory* ast_value_factory) const {
DCHECK_NOT_NULL(duplicate_finder);
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 200054d893..08d77c686b 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -260,6 +260,8 @@ class Scanner {
double DoubleValue();
+ const char* CurrentLiteralAsCString(Zone* zone) const;
+
inline bool CurrentMatches(Token::Value token) const {
DCHECK(Token::IsKeyword(token));
return current_.token == token;
@@ -356,6 +358,9 @@ class Scanner {
bool FoundHtmlComment() const { return found_html_comment_; }
+ bool allow_harmony_bigint() const { return allow_harmony_bigint_; }
+ void set_allow_harmony_bigint(bool allow) { allow_harmony_bigint_ = allow; }
+
private:
// Scoped helper for saving & restoring scanner error state.
// This is used for tagged template literals, in which normally forbidden
@@ -409,7 +414,7 @@ class Scanner {
Vector<const uint16_t> two_byte_literal() const {
DCHECK(!is_one_byte_);
- DCHECK((position_ & 0x1) == 0);
+ DCHECK_EQ(position_ & 0x1, 0);
return Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(backing_store_.start()),
position_ >> 1);
@@ -494,18 +499,18 @@ class Scanner {
// Initialize current_ to not refer to a literal.
current_.token = Token::UNINITIALIZED;
current_.contextual_token = Token::UNINITIALIZED;
- current_.literal_chars = NULL;
- current_.raw_literal_chars = NULL;
+ current_.literal_chars = nullptr;
+ current_.raw_literal_chars = nullptr;
current_.invalid_template_escape_message = MessageTemplate::kNone;
next_.token = Token::UNINITIALIZED;
next_.contextual_token = Token::UNINITIALIZED;
- next_.literal_chars = NULL;
- next_.raw_literal_chars = NULL;
+ next_.literal_chars = nullptr;
+ next_.raw_literal_chars = nullptr;
next_.invalid_template_escape_message = MessageTemplate::kNone;
next_next_.token = Token::UNINITIALIZED;
next_next_.contextual_token = Token::UNINITIALIZED;
- next_next_.literal_chars = NULL;
- next_next_.raw_literal_chars = NULL;
+ next_next_.literal_chars = nullptr;
+ next_next_.raw_literal_chars = nullptr;
next_next_.invalid_template_escape_message = MessageTemplate::kNone;
found_html_comment_ = false;
scanner_error_ = MessageTemplate::kNone;
@@ -572,8 +577,8 @@ class Scanner {
// Stops scanning of a literal and drop the collected characters,
// e.g., due to an encountered error.
inline void DropLiteral() {
- next_.literal_chars = NULL;
- next_.raw_literal_chars = NULL;
+ next_.literal_chars = nullptr;
+ next_.raw_literal_chars = nullptr;
}
inline void AddLiteralCharAdvance() {
@@ -662,10 +667,6 @@ class Scanner {
bool is_literal_one_byte() const {
return !current_.literal_chars || current_.literal_chars->is_one_byte();
}
- int literal_length() const {
- if (current_.literal_chars) return current_.literal_chars->length();
- return Token::StringLength(current_.token);
- }
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
Vector<const uint8_t> next_literal_one_byte_string() const {
@@ -801,6 +802,9 @@ class Scanner {
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
+ // Whether to recognize BIGINT tokens.
+ bool allow_harmony_bigint_;
+
int* use_counts_;
MessageTemplate::Template scanner_error_;
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index 4cc4db288a..e4a4a5e587 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -14,7 +14,7 @@ namespace internal {
// TOKEN_LIST takes a list of 3 macros M, all of which satisfy the
// same signature M(name, string, precedence), where name is the
// symbolic token name, string is the corresponding syntactic symbol
-// (or NULL, for literals), and precedence is the precedence (or 0).
+// (or nullptr, for literals), and precedence is the precedence (or 0).
// The parameters are invoked for token categories as follows:
//
// T: Non-keyword tokens
@@ -32,171 +32,172 @@ namespace internal {
#define IGNORE_TOKEN(name, string, precedence)
-#define TOKEN_LIST(T, K, C) \
- /* End of source indicator. */ \
- T(EOS, "EOS", 0) \
- \
- /* Punctuators (ECMA-262, section 7.7, page 15). */ \
- T(LPAREN, "(", 0) \
- T(RPAREN, ")", 0) \
- T(LBRACK, "[", 0) \
- T(RBRACK, "]", 0) \
- T(LBRACE, "{", 0) \
- T(RBRACE, "}", 0) \
- T(COLON, ":", 0) \
- T(SEMICOLON, ";", 0) \
- T(PERIOD, ".", 0) \
- T(ELLIPSIS, "...", 0) \
- T(CONDITIONAL, "?", 3) \
- T(INC, "++", 0) \
- T(DEC, "--", 0) \
- T(ARROW, "=>", 0) \
- \
- /* Assignment operators. */ \
- /* IsAssignmentOp() relies on this block of enum values being */ \
- /* contiguous and sorted in the same order! */ \
- T(INIT, "=init", 2) /* AST-use only. */ \
- T(ASSIGN, "=", 2) \
- T(ASSIGN_BIT_OR, "|=", 2) \
- T(ASSIGN_BIT_XOR, "^=", 2) \
- T(ASSIGN_BIT_AND, "&=", 2) \
- T(ASSIGN_SHL, "<<=", 2) \
- T(ASSIGN_SAR, ">>=", 2) \
- T(ASSIGN_SHR, ">>>=", 2) \
- T(ASSIGN_ADD, "+=", 2) \
- T(ASSIGN_SUB, "-=", 2) \
- T(ASSIGN_MUL, "*=", 2) \
- T(ASSIGN_DIV, "/=", 2) \
- T(ASSIGN_MOD, "%=", 2) \
- T(ASSIGN_EXP, "**=", 2) \
- \
- /* Binary operators sorted by precedence. */ \
- /* IsBinaryOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(COMMA, ",", 1) \
- T(OR, "||", 4) \
- T(AND, "&&", 5) \
- T(BIT_OR, "|", 6) \
- T(BIT_XOR, "^", 7) \
- T(BIT_AND, "&", 8) \
- T(SHL, "<<", 11) \
- T(SAR, ">>", 11) \
- T(SHR, ">>>", 11) \
- T(ADD, "+", 12) \
- T(SUB, "-", 12) \
- T(MUL, "*", 13) \
- T(DIV, "/", 13) \
- T(MOD, "%", 13) \
- T(EXP, "**", 14) \
- \
- /* Compare operators sorted by precedence. */ \
- /* IsCompareOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(EQ, "==", 9) \
- T(NE, "!=", 9) \
- T(EQ_STRICT, "===", 9) \
- T(NE_STRICT, "!==", 9) \
- T(LT, "<", 10) \
- T(GT, ">", 10) \
- T(LTE, "<=", 10) \
- T(GTE, ">=", 10) \
- K(INSTANCEOF, "instanceof", 10) \
- K(IN, "in", 10) \
- \
- /* Unary operators. */ \
- /* IsUnaryOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(NOT, "!", 0) \
- T(BIT_NOT, "~", 0) \
- K(DELETE, "delete", 0) \
- K(TYPEOF, "typeof", 0) \
- K(VOID, "void", 0) \
- \
- /* Keywords (ECMA-262, section 7.5.2, page 13). */ \
- K(BREAK, "break", 0) \
- K(CASE, "case", 0) \
- K(CATCH, "catch", 0) \
- K(CONTINUE, "continue", 0) \
- K(DEBUGGER, "debugger", 0) \
- K(DEFAULT, "default", 0) \
- /* DELETE */ \
- K(DO, "do", 0) \
- K(ELSE, "else", 0) \
- K(FINALLY, "finally", 0) \
- K(FOR, "for", 0) \
- K(FUNCTION, "function", 0) \
- K(IF, "if", 0) \
- /* IN */ \
- /* INSTANCEOF */ \
- K(NEW, "new", 0) \
- K(RETURN, "return", 0) \
- K(SWITCH, "switch", 0) \
- K(THIS, "this", 0) \
- K(THROW, "throw", 0) \
- K(TRY, "try", 0) \
- /* TYPEOF */ \
- K(VAR, "var", 0) \
- /* VOID */ \
- K(WHILE, "while", 0) \
- K(WITH, "with", 0) \
- \
- /* Literals (ECMA-262, section 7.8, page 16). */ \
- K(NULL_LITERAL, "null", 0) \
- K(TRUE_LITERAL, "true", 0) \
- K(FALSE_LITERAL, "false", 0) \
- T(NUMBER, NULL, 0) \
- T(SMI, NULL, 0) \
- T(STRING, NULL, 0) \
- \
- /* Identifiers (not keywords or future reserved words). */ \
- T(IDENTIFIER, NULL, 0) \
- \
- /* Future reserved words (ECMA-262, section 7.6.1.2). */ \
- T(FUTURE_STRICT_RESERVED_WORD, NULL, 0) \
- K(ASYNC, "async", 0) \
- /* `await` is a reserved word in module code only */ \
- K(AWAIT, "await", 0) \
- K(CLASS, "class", 0) \
- K(CONST, "const", 0) \
- K(ENUM, "enum", 0) \
- K(EXPORT, "export", 0) \
- K(EXTENDS, "extends", 0) \
- K(IMPORT, "import", 0) \
- K(LET, "let", 0) \
- K(STATIC, "static", 0) \
- K(YIELD, "yield", 0) \
- K(SUPER, "super", 0) \
- \
- /* Illegal token - not able to scan. */ \
- T(ILLEGAL, "ILLEGAL", 0) \
- T(ESCAPED_KEYWORD, NULL, 0) \
- T(ESCAPED_STRICT_RESERVED_WORD, NULL, 0) \
- \
- /* Scanner-internal use only. */ \
- T(WHITESPACE, NULL, 0) \
- T(UNINITIALIZED, NULL, 0) \
- T(REGEXP_LITERAL, NULL, 0) \
- \
- /* ES6 Template Literals */ \
- T(TEMPLATE_SPAN, NULL, 0) \
- T(TEMPLATE_TAIL, NULL, 0) \
- \
- /* Contextual keyword tokens */ \
- C(GET, "get", 0) \
- C(SET, "set", 0) \
- C(OF, "of", 0) \
- C(TARGET, "target", 0) \
- C(SENT, "sent", 0) \
- C(META, "meta", 0) \
- C(AS, "as", 0) \
- C(FROM, "from", 0) \
- C(NAME, "name", 0) \
- C(PROTO_UNDERSCORED, "__proto__", 0) \
- C(CONSTRUCTOR, "constructor", 0) \
- C(PROTOTYPE, "prototype", 0) \
- C(EVAL, "eval", 0) \
- C(ARGUMENTS, "arguments", 0) \
- C(UNDEFINED, "undefined", 0) \
+#define TOKEN_LIST(T, K, C) \
+ /* End of source indicator. */ \
+ T(EOS, "EOS", 0) \
+ \
+ /* Punctuators (ECMA-262, section 7.7, page 15). */ \
+ T(LPAREN, "(", 0) \
+ T(RPAREN, ")", 0) \
+ T(LBRACK, "[", 0) \
+ T(RBRACK, "]", 0) \
+ T(LBRACE, "{", 0) \
+ T(RBRACE, "}", 0) \
+ T(COLON, ":", 0) \
+ T(SEMICOLON, ";", 0) \
+ T(PERIOD, ".", 0) \
+ T(ELLIPSIS, "...", 0) \
+ T(CONDITIONAL, "?", 3) \
+ T(INC, "++", 0) \
+ T(DEC, "--", 0) \
+ T(ARROW, "=>", 0) \
+ \
+ /* Assignment operators. */ \
+ /* IsAssignmentOp() relies on this block of enum values being */ \
+ /* contiguous and sorted in the same order! */ \
+ T(INIT, "=init", 2) /* AST-use only. */ \
+ T(ASSIGN, "=", 2) \
+ T(ASSIGN_BIT_OR, "|=", 2) \
+ T(ASSIGN_BIT_XOR, "^=", 2) \
+ T(ASSIGN_BIT_AND, "&=", 2) \
+ T(ASSIGN_SHL, "<<=", 2) \
+ T(ASSIGN_SAR, ">>=", 2) \
+ T(ASSIGN_SHR, ">>>=", 2) \
+ T(ASSIGN_ADD, "+=", 2) \
+ T(ASSIGN_SUB, "-=", 2) \
+ T(ASSIGN_MUL, "*=", 2) \
+ T(ASSIGN_DIV, "/=", 2) \
+ T(ASSIGN_MOD, "%=", 2) \
+ T(ASSIGN_EXP, "**=", 2) \
+ \
+ /* Binary operators sorted by precedence. */ \
+ /* IsBinaryOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
+ T(COMMA, ",", 1) \
+ T(OR, "||", 4) \
+ T(AND, "&&", 5) \
+ T(BIT_OR, "|", 6) \
+ T(BIT_XOR, "^", 7) \
+ T(BIT_AND, "&", 8) \
+ T(SHL, "<<", 11) \
+ T(SAR, ">>", 11) \
+ T(SHR, ">>>", 11) \
+ T(ADD, "+", 12) \
+ T(SUB, "-", 12) \
+ T(MUL, "*", 13) \
+ T(DIV, "/", 13) \
+ T(MOD, "%", 13) \
+ T(EXP, "**", 14) \
+ \
+ /* Compare operators sorted by precedence. */ \
+ /* IsCompareOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
+ T(EQ, "==", 9) \
+ T(NE, "!=", 9) \
+ T(EQ_STRICT, "===", 9) \
+ T(NE_STRICT, "!==", 9) \
+ T(LT, "<", 10) \
+ T(GT, ">", 10) \
+ T(LTE, "<=", 10) \
+ T(GTE, ">=", 10) \
+ K(INSTANCEOF, "instanceof", 10) \
+ K(IN, "in", 10) \
+ \
+ /* Unary operators. */ \
+ /* IsUnaryOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
+ T(NOT, "!", 0) \
+ T(BIT_NOT, "~", 0) \
+ K(DELETE, "delete", 0) \
+ K(TYPEOF, "typeof", 0) \
+ K(VOID, "void", 0) \
+ \
+ /* Keywords (ECMA-262, section 7.5.2, page 13). */ \
+ K(BREAK, "break", 0) \
+ K(CASE, "case", 0) \
+ K(CATCH, "catch", 0) \
+ K(CONTINUE, "continue", 0) \
+ K(DEBUGGER, "debugger", 0) \
+ K(DEFAULT, "default", 0) \
+ /* DELETE */ \
+ K(DO, "do", 0) \
+ K(ELSE, "else", 0) \
+ K(FINALLY, "finally", 0) \
+ K(FOR, "for", 0) \
+ K(FUNCTION, "function", 0) \
+ K(IF, "if", 0) \
+ /* IN */ \
+ /* INSTANCEOF */ \
+ K(NEW, "new", 0) \
+ K(RETURN, "return", 0) \
+ K(SWITCH, "switch", 0) \
+ K(THIS, "this", 0) \
+ K(THROW, "throw", 0) \
+ K(TRY, "try", 0) \
+ /* TYPEOF */ \
+ K(VAR, "var", 0) \
+ /* VOID */ \
+ K(WHILE, "while", 0) \
+ K(WITH, "with", 0) \
+ \
+ /* Literals (ECMA-262, section 7.8, page 16). */ \
+ K(NULL_LITERAL, "null", 0) \
+ K(TRUE_LITERAL, "true", 0) \
+ K(FALSE_LITERAL, "false", 0) \
+ T(NUMBER, nullptr, 0) \
+ T(SMI, nullptr, 0) \
+ T(STRING, nullptr, 0) \
+ T(BIGINT, nullptr, 0) \
+ \
+ /* Identifiers (not keywords or future reserved words). */ \
+ T(IDENTIFIER, nullptr, 0) \
+ \
+ /* Future reserved words (ECMA-262, section 7.6.1.2). */ \
+ T(FUTURE_STRICT_RESERVED_WORD, nullptr, 0) \
+ K(ASYNC, "async", 0) \
+ /* `await` is a reserved word in module code only */ \
+ K(AWAIT, "await", 0) \
+ K(CLASS, "class", 0) \
+ K(CONST, "const", 0) \
+ K(ENUM, "enum", 0) \
+ K(EXPORT, "export", 0) \
+ K(EXTENDS, "extends", 0) \
+ K(IMPORT, "import", 0) \
+ K(LET, "let", 0) \
+ K(STATIC, "static", 0) \
+ K(YIELD, "yield", 0) \
+ K(SUPER, "super", 0) \
+ \
+ /* Illegal token - not able to scan. */ \
+ T(ILLEGAL, "ILLEGAL", 0) \
+ T(ESCAPED_KEYWORD, nullptr, 0) \
+ T(ESCAPED_STRICT_RESERVED_WORD, nullptr, 0) \
+ \
+ /* Scanner-internal use only. */ \
+ T(WHITESPACE, nullptr, 0) \
+ T(UNINITIALIZED, nullptr, 0) \
+ T(REGEXP_LITERAL, nullptr, 0) \
+ \
+ /* ES6 Template Literals */ \
+ T(TEMPLATE_SPAN, nullptr, 0) \
+ T(TEMPLATE_TAIL, nullptr, 0) \
+ \
+ /* Contextual keyword tokens */ \
+ C(GET, "get", 0) \
+ C(SET, "set", 0) \
+ C(OF, "of", 0) \
+ C(TARGET, "target", 0) \
+ C(SENT, "sent", 0) \
+ C(META, "meta", 0) \
+ C(AS, "as", 0) \
+ C(FROM, "from", 0) \
+ C(NAME, "name", 0) \
+ C(PROTO_UNDERSCORED, "__proto__", 0) \
+ C(CONSTRUCTOR, "constructor", 0) \
+ C(PROTOTYPE, "prototype", 0) \
+ C(EVAL, "eval", 0) \
+ C(ARGUMENTS, "arguments", 0) \
+ C(UNDEFINED, "undefined", 0) \
C(ANONYMOUS, "anonymous", 0)
class Token {
@@ -246,10 +247,6 @@ class Token {
static bool IsBinaryOp(Value op) { return COMMA <= op && op <= EXP; }
- static bool IsTruncatingBinaryOp(Value op) {
- return BIT_OR <= op && op <= SHR;
- }
-
static bool IsCompareOp(Value op) {
return EQ <= op && op <= IN;
}
@@ -262,62 +259,6 @@ class Token {
return op == EQ || op == EQ_STRICT;
}
- static bool IsInequalityOp(Value op) {
- return op == NE || op == NE_STRICT;
- }
-
- static bool IsArithmeticCompareOp(Value op) {
- return IsOrderedRelationalCompareOp(op) ||
- IsEqualityOp(op) || IsInequalityOp(op);
- }
-
- static Value NegateCompareOp(Value op) {
- DCHECK(IsArithmeticCompareOp(op));
- switch (op) {
- case EQ: return NE;
- case NE: return EQ;
- case EQ_STRICT: return NE_STRICT;
- case NE_STRICT: return EQ_STRICT;
- case LT: return GTE;
- case GT: return LTE;
- case LTE: return GT;
- case GTE: return LT;
- default:
- UNREACHABLE();
- }
- }
-
- static Value ReverseCompareOp(Value op) {
- DCHECK(IsArithmeticCompareOp(op));
- switch (op) {
- case EQ: return EQ;
- case NE: return NE;
- case EQ_STRICT: return EQ_STRICT;
- case NE_STRICT: return NE_STRICT;
- case LT: return GT;
- case GT: return LT;
- case LTE: return GTE;
- case GTE: return LTE;
- default:
- UNREACHABLE();
- }
- }
-
- static bool EvalComparison(Value op, double op1, double op2) {
- DCHECK(IsArithmeticCompareOp(op));
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT: return (op1 == op2);
- case Token::NE: return (op1 != op2);
- case Token::LT: return (op1 < op2);
- case Token::GT: return (op1 > op2);
- case Token::LTE: return (op1 <= op2);
- case Token::GTE: return (op1 >= op2);
- default:
- UNREACHABLE();
- }
- }
-
static Value BinaryOpForAssignment(Value op) {
DCHECK(IsAssignmentOp(op));
switch (op) {
@@ -343,6 +284,8 @@ class Token {
return Token::DIV;
case Token::ASSIGN_MOD:
return Token::MOD;
+ case Token::ASSIGN_EXP:
+ return Token::EXP;
default:
UNREACHABLE();
}
@@ -365,7 +308,7 @@ class Token {
}
// Returns a string corresponding to the JS token string
- // (.e., "<" for the token LT) or NULL if the token doesn't
+ // (.e., "<" for the token LT) or nullptr if the token doesn't
// have a (unique) string (e.g. an IDENTIFIER).
static const char* String(Value tok) {
DCHECK(tok < NUM_TOKENS); // tok is unsigned.
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index e2db6db81c..26da880b12 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -14,10 +14,10 @@
namespace v8 {
namespace internal {
-Handle<String> PendingCompilationErrorHandler::ArgumentString(
- Isolate* isolate) {
- if (arg_ != NULL) return arg_->string();
- if (char_arg_ != NULL) {
+Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgumentString(
+ Isolate* isolate) const {
+ if (arg_ != nullptr) return arg_->string();
+ if (char_arg_ != nullptr) {
return isolate->factory()
->NewStringFromUtf8(CStrVector(char_arg_))
.ToHandleChecked();
@@ -25,26 +25,84 @@ Handle<String> PendingCompilationErrorHandler::ArgumentString(
return isolate->factory()->undefined_string();
}
-Handle<String> PendingCompilationErrorHandler::FormatMessage(Isolate* isolate) {
- return MessageTemplate::FormatMessage(isolate, message_,
- ArgumentString(isolate));
+MessageLocation PendingCompilationErrorHandler::MessageDetails::GetLocation(
+ Handle<Script> script) const {
+ return MessageLocation(script, start_position_, end_position_);
+}
+
+void PendingCompilationErrorHandler::ReportMessageAt(
+ int start_position, int end_position, MessageTemplate::Template message,
+ const char* arg, ParseErrorType error_type) {
+ if (has_pending_error_) return;
+ has_pending_error_ = true;
+
+ error_details_ =
+ MessageDetails(start_position, end_position, message, nullptr, arg);
+ error_type_ = error_type;
+}
+
+void PendingCompilationErrorHandler::ReportMessageAt(
+ int start_position, int end_position, MessageTemplate::Template message,
+ const AstRawString* arg, ParseErrorType error_type) {
+ if (has_pending_error_) return;
+ has_pending_error_ = true;
+
+ error_details_ =
+ MessageDetails(start_position, end_position, message, arg, nullptr);
+ error_type_ = error_type;
+}
+
+void PendingCompilationErrorHandler::ReportWarningAt(
+ int start_position, int end_position, MessageTemplate::Template message,
+ const char* arg) {
+ warning_messages_.emplace_front(
+ MessageDetails(start_position, end_position, message, nullptr, arg));
+}
+
+void PendingCompilationErrorHandler::ReportWarnings(Isolate* isolate,
+ Handle<Script> script) {
+ DCHECK(!has_pending_error());
+
+ for (const MessageDetails& warning : warning_messages_) {
+ MessageLocation location = warning.GetLocation(script);
+ Handle<String> argument = warning.ArgumentString(isolate);
+ Handle<JSMessageObject> message =
+ MessageHandler::MakeMessageObject(isolate, warning.message(), &location,
+ argument, Handle<FixedArray>::null());
+ message->set_error_level(v8::Isolate::kMessageWarning);
+ MessageHandler::ReportMessage(isolate, &location, message);
+ }
+}
+
+void PendingCompilationErrorHandler::ReportErrors(
+ Isolate* isolate, Handle<Script> script,
+ AstValueFactory* ast_value_factory) {
+ if (stack_overflow()) {
+ isolate->StackOverflow();
+ } else {
+ DCHECK(has_pending_error());
+ // Internalize ast values for throwing the pending error.
+ ast_value_factory->Internalize(isolate);
+ ThrowPendingError(isolate, script);
+ }
}
void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
Handle<Script> script) {
if (!has_pending_error_) return;
- MessageLocation location(script, start_position_, end_position_);
- Factory* factory = isolate->factory();
- Handle<String> argument = ArgumentString(isolate);
+
+ MessageLocation location = error_details_.GetLocation(script);
+ Handle<String> argument = error_details_.ArgumentString(isolate);
isolate->debug()->OnCompileError(script);
+ Factory* factory = isolate->factory();
Handle<Object> error;
switch (error_type_) {
case kReferenceError:
- error = factory->NewReferenceError(message_, argument);
+ error = factory->NewReferenceError(error_details_.message(), argument);
break;
case kSyntaxError:
- error = factory->NewSyntaxError(message_, argument);
+ error = factory->NewSyntaxError(error_details_.message(), argument);
break;
default:
UNREACHABLE();
@@ -61,17 +119,27 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
Handle<Name> key_start_pos = factory->error_start_pos_symbol();
JSObject::SetProperty(jserror, key_start_pos,
handle(Smi::FromInt(location.start_pos()), isolate),
- SLOPPY).Check();
+ LanguageMode::kSloppy)
+ .Check();
Handle<Name> key_end_pos = factory->error_end_pos_symbol();
JSObject::SetProperty(jserror, key_end_pos,
handle(Smi::FromInt(location.end_pos()), isolate),
- SLOPPY).Check();
+ LanguageMode::kSloppy)
+ .Check();
Handle<Name> key_script = factory->error_script_symbol();
- JSObject::SetProperty(jserror, key_script, script, SLOPPY).Check();
+ JSObject::SetProperty(jserror, key_script, script, LanguageMode::kSloppy)
+ .Check();
isolate->Throw(*error, &location);
}
+
+Handle<String> PendingCompilationErrorHandler::FormatErrorMessageForTest(
+ Isolate* isolate) const {
+ return MessageTemplate::FormatMessage(isolate, error_details_.message(),
+ error_details_.ArgumentString(isolate));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/pending-compilation-error-handler.h b/deps/v8/src/pending-compilation-error-handler.h
index 42c679e75c..b828fff17c 100644
--- a/deps/v8/src/pending-compilation-error-handler.h
+++ b/deps/v8/src/pending-compilation-error-handler.h
@@ -5,6 +5,8 @@
#ifndef V8_PENDING_COMPILATION_ERROR_HANDLER_H_
#define V8_PENDING_COMPILATION_ERROR_HANDLER_H_
+#include <forward_list>
+
#include "src/base/macros.h"
#include "src/globals.h"
#include "src/handles.h"
@@ -14,6 +16,7 @@ namespace v8 {
namespace internal {
class AstRawString;
+class AstValueFactory;
class Isolate;
class Script;
@@ -23,57 +26,83 @@ class PendingCompilationErrorHandler {
public:
PendingCompilationErrorHandler()
: has_pending_error_(false),
- start_position_(-1),
- end_position_(-1),
- message_(MessageTemplate::kNone),
- arg_(nullptr),
- char_arg_(nullptr),
+ stack_overflow_(false),
error_type_(kSyntaxError) {}
void ReportMessageAt(int start_position, int end_position,
MessageTemplate::Template message,
const char* arg = nullptr,
- ParseErrorType error_type = kSyntaxError) {
- if (has_pending_error_) return;
- has_pending_error_ = true;
- start_position_ = start_position;
- end_position_ = end_position;
- message_ = message;
- char_arg_ = arg;
- arg_ = nullptr;
- error_type_ = error_type;
- }
+ ParseErrorType error_type = kSyntaxError);
void ReportMessageAt(int start_position, int end_position,
MessageTemplate::Template message,
const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError) {
- if (has_pending_error_) return;
+ ParseErrorType error_type = kSyntaxError);
+
+ void ReportWarningAt(int start_position, int end_position,
+ MessageTemplate::Template message,
+ const char* arg = nullptr);
+
+ bool stack_overflow() const { return stack_overflow_; }
+
+ void set_stack_overflow() {
has_pending_error_ = true;
- start_position_ = start_position;
- end_position_ = end_position;
- message_ = message;
- char_arg_ = nullptr;
- arg_ = arg;
- error_type_ = error_type;
+ stack_overflow_ = true;
}
bool has_pending_error() const { return has_pending_error_; }
+ bool has_pending_warnings() const { return !warning_messages_.empty(); }
- void ThrowPendingError(Isolate* isolate, Handle<Script> script);
- Handle<String> FormatMessage(Isolate* isolate);
+ // Handle errors detected during parsing.
+ void ReportErrors(Isolate* isolate, Handle<Script> script,
+ AstValueFactory* ast_value_factory);
+
+ // Handle warnings detected during compilation.
+ void ReportWarnings(Isolate* isolate, Handle<Script> script);
+
+ Handle<String> FormatErrorMessageForTest(Isolate* isolate) const;
private:
- Handle<String> ArgumentString(Isolate* isolate);
+ class MessageDetails {
+ public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MessageDetails);
+ MessageDetails()
+ : start_position_(-1),
+ end_position_(-1),
+ message_(MessageTemplate::kNone),
+ arg_(nullptr),
+ char_arg_(nullptr) {}
+ MessageDetails(int start_position, int end_position,
+ MessageTemplate::Template message, const AstRawString* arg,
+ const char* char_arg)
+ : start_position_(start_position),
+ end_position_(end_position),
+ message_(message),
+ arg_(arg),
+ char_arg_(char_arg) {}
+
+ Handle<String> ArgumentString(Isolate* isolate) const;
+ MessageLocation GetLocation(Handle<Script> script) const;
+ MessageTemplate::Template message() const { return message_; }
+
+ private:
+ int start_position_;
+ int end_position_;
+ MessageTemplate::Template message_;
+ const AstRawString* arg_;
+ const char* char_arg_;
+ };
+
+ void ThrowPendingError(Isolate* isolate, Handle<Script> script);
bool has_pending_error_;
- int start_position_;
- int end_position_;
- MessageTemplate::Template message_;
- const AstRawString* arg_;
- const char* char_arg_;
+ bool stack_overflow_;
+
+ MessageDetails error_details_;
ParseErrorType error_type_;
+ std::forward_list<MessageDetails> warning_messages_;
+
DISALLOW_COPY_AND_ASSIGN(PendingCompilationErrorHandler);
};
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 1699064bda..c52bb5222a 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -134,7 +134,7 @@ void PerfJitLogger::OpenJitDumpFile() {
perf_output_handle_ = fdopen(fd, "w+");
if (perf_output_handle_ == nullptr) return;
- setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+ setvbuf(perf_output_handle_, nullptr, _IOFBF, kLogBufferSize);
}
void PerfJitLogger::CloseJitDumpFile() {
@@ -377,8 +377,9 @@ void PerfJitLogger::LogWriteUnwindingInfo(Code* code) {
}
void PerfJitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
- // Code relocation not supported.
- UNREACHABLE();
+ // We may receive a CodeMove event if a BytecodeArray object moves. Otherwise
+ // code relocation is not supported.
+ CHECK(from->IsBytecodeArray());
}
void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
@@ -388,7 +389,7 @@ void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
}
void PerfJitLogger::LogWriteHeader() {
- DCHECK(perf_output_handle_ != NULL);
+ DCHECK_NOT_NULL(perf_output_handle_);
PerfJitHeader header;
header.magic_ = PerfJitHeader::kMagic;
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/ppc/OWNERS
+++ b/deps/v8/src/ppc/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index e458364027..d9b12ac8db 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -86,12 +86,12 @@ Address RelocInfo::target_internal_reference_address() {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
if (FLAG_enable_embedded_constant_pool &&
@@ -131,14 +131,14 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -193,7 +193,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -226,14 +226,14 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, host_, NULL,
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
@@ -258,7 +258,7 @@ Operand::Operand(Register rm) : rm_(rm), rmode_(kRelocInfo_NONEPTR) {}
void Assembler::UntrackBranch() {
DCHECK(!trampoline_emitted_);
- DCHECK(tracked_branch_count_ > 0);
+ DCHECK_GT(tracked_branch_count_, 0);
int count = --tracked_branch_count_;
if (count == 0) {
// Reset
@@ -435,7 +435,7 @@ void Assembler::deserialization_set_special_target_at(
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- Code* code = NULL;
+ Code* code = nullptr;
set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 32758092c4..0c4a518772 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -111,7 +111,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
void CpuFeatures::PrintTarget() {
- const char* ppc_arch = NULL;
+ const char* ppc_arch = nullptr;
#if V8_TARGET_ARCH_PPC64
ppc_arch = "ppc64";
@@ -155,7 +155,7 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() {
- if (FLAG_enable_embedded_constant_pool && host_ != NULL) {
+ if (FLAG_enable_embedded_constant_pool && host_ != nullptr) {
Address constant_pool = host_->constant_pool();
return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
}
@@ -182,6 +182,17 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
reinterpret_cast<Address>(size), flush_mode);
}
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ set_embedded_address(isolate, address, icache_flush_mode);
+}
+
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return embedded_address();
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-ppc-inl.h for inlined constructors
@@ -228,7 +239,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = buffer_ + request.offset();
- Address constant_pool = NULL;
+ Address constant_pool = nullptr;
set_target_address_at(nullptr, pc, constant_pool,
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
@@ -277,7 +288,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
- DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
+ DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -569,7 +580,7 @@ void Assembler::bind_to(Label* L, int pos) {
if (maxReach && is_intn(offset, maxReach) == false) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry();
- CHECK(trampoline_pos != kInvalidSlotPos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
target_at_put(trampoline_pos, pos);
}
target_at_put(fixup_pos, trampoline_pos);
@@ -601,7 +612,7 @@ void Assembler::next(Label* L) {
if (link == kEndOfChain) {
L->Unuse();
} else {
- DCHECK(link >= 0);
+ DCHECK_GE(link, 0);
L->link_to(link);
}
}
@@ -1228,7 +1239,7 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
void Assembler::function_descriptor() {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
Label instructions;
- DCHECK(pc_offset() == 0);
+ DCHECK_EQ(pc_offset(), 0);
emit_label_addr(&instructions);
dp(0);
dp(0);
@@ -1288,7 +1299,7 @@ void Assembler::EnsureSpaceFor(int space_needed) {
bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
- if (assembler != NULL && assembler->predictable_code_size()) return true;
+ if (assembler != nullptr && assembler->predictable_code_size()) return true;
return assembler->serializer_enabled();
} else if (RelocInfo::IsNone(rmode_)) {
return false;
@@ -1507,7 +1518,7 @@ void Assembler::mov_label_addr(Register dst, Label* label) {
BlockTrampolinePoolScope block_trampoline_pool(this);
emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
emit(dst.code());
- DCHECK(kMovInstructionsNoConstantPool >= 2);
+ DCHECK_GE(kMovInstructionsNoConstantPool, 2);
for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
}
}
@@ -1573,7 +1584,7 @@ void Assembler::mtxer(Register src) {
void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
- DCHECK(static_cast<int>(bit) < 32);
+ DCHECK_LT(static_cast<int>(bit), 32);
int bf = cr.code();
int bfa = bit / CRWIDTH;
emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
@@ -1879,14 +1890,14 @@ void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
- DCHECK(static_cast<int>(bit) < 32);
+ DCHECK_LT(static_cast<int>(bit), 32);
int bt = bit;
emit(EXT4 | MTFSB0 | bt * B21 | rc);
}
void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
- DCHECK(static_cast<int>(bit) < 32);
+ DCHECK_LT(static_cast<int>(bit), 32);
int bt = bit;
emit(EXT4 | MTFSB1 | bt * B21 | rc);
}
@@ -2074,7 +2085,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
- Code* code = NULL;
+ Code* code = nullptr;
RelocInfo rinfo(pc, rmode, it->data(), code);
// Fix up internal references now that they are guaranteed to be bound.
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index d1411c142d..77c1422424 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -341,7 +341,7 @@ typedef DoubleRegister Simd128Register;
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
DOUBLE_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr Register no_dreg = Register::no_reg();
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
@@ -461,12 +461,10 @@ class MemOperand BASE_EMBEDDED {
// PowerPC - base register
Register ra() const {
- DCHECK(ra_ != no_reg);
return ra_;
}
Register rb() const {
- DCHECK(offset_ == 0 && rb_ != no_reg);
return rb_;
}
@@ -503,14 +501,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -1641,7 +1640,6 @@ class Assembler : public AssemblerBase {
friend class RegExpMacroAssemblerPPC;
friend class RelocInfo;
- friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 7dcc543b87..d5af6bfec0 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -9,11 +9,9 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/double.h"
#include "src/frame-constants.h"
#include "src/frames.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -40,52 +38,45 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
- Register input_reg = source();
Register result_reg = destination();
- DCHECK(is_truncating());
-
- int double_offset = offset();
// Immediate values for this stub fit in instructions, so it's safe to use ip.
- Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
- Register scratch_low =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
Register scratch_high =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low);
DoubleRegister double_scratch = kScratchDoubleReg;
__ push(scratch);
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += kPointerSize;
+ // Account for saved regs.
+ int argument_offset = 1 * kPointerSize;
- if (!skip_fastpath()) {
- // Load double input.
- __ lfd(double_scratch, MemOperand(input_reg, double_offset));
+ // Load double input.
+ __ lfd(double_scratch, MemOperand(sp, argument_offset));
- // Do fast-path convert from double to int.
- __ ConvertDoubleToInt64(double_scratch,
+ // Do fast-path convert from double to int.
+ __ ConvertDoubleToInt64(double_scratch,
#if !V8_TARGET_ARCH_PPC64
- scratch,
+ scratch,
#endif
- result_reg, d0);
+ result_reg, d0);
// Test for overflow
#if V8_TARGET_ARCH_PPC64
- __ TestIfInt32(result_reg, r0);
+ __ TestIfInt32(result_reg, r0);
#else
- __ TestIfInt32(scratch, result_reg, r0);
+ __ TestIfInt32(scratch, result_reg, r0);
#endif
- __ beq(&fastpath_done);
- }
+ __ beq(&fastpath_done);
__ Push(scratch_high, scratch_low);
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += 2 * kPointerSize;
+ // Account for saved regs.
+ argument_offset += 2 * kPointerSize;
__ lwz(scratch_high,
- MemOperand(input_reg, double_offset + Register::kExponentOffset));
+ MemOperand(sp, argument_offset + Register::kExponentOffset));
__ lwz(scratch_low,
- MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ MemOperand(sp, argument_offset + Register::kMantissaOffset));
__ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
// Load scratch with exponent - 1. This is faster than loading
@@ -157,46 +148,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ mflr(r0);
- __ MultiPush(kJSCallerSaved | r0.bit());
- if (save_doubles()) {
- __ MultiPushDoubles(kCallerSavedDoubles);
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = r4;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- if (save_doubles()) {
- __ MultiPopDoubles(kCallerSavedDoubles);
- }
- __ MultiPop(kJSCallerSaved | r0.bit());
- __ mtlr(r0);
- __ Ret();
-}
-
-
-void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ PushSafepointRegisters();
- __ blr();
-}
-
-
-void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ PopSafepointRegisters();
- __ blr();
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == r5);
@@ -307,37 +258,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-bool CEntryStub::NeedsImmovableCode() { return true; }
-
+Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreRegistersStateStub::GenerateAheadOfTime(isolate);
- RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
-void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- StoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-
-void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- RestoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-
void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
- StoreBufferOverflowStub(isolate, mode).GetCode();
}
@@ -470,7 +403,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
? no_reg
// r14: still holds argc (callee-saved).
: r14;
- __ LeaveExitFrame(save_doubles(), argc, true);
+ __ LeaveExitFrame(save_doubles(), argc);
__ blr();
// Handling of exception.
@@ -478,10 +411,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
+ ExternalReference pending_handler_constant_pool_address(
+ IsolateAddressId::kPendingHandlerConstantPoolAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -518,15 +451,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ mov(r4, Operand(pending_handler_code_address));
- __ LoadP(r4, MemOperand(r4));
- __ mov(r5, Operand(pending_handler_offset_address));
- __ LoadP(r5, MemOperand(r5));
- __ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+ __ mov(ip, Operand(pending_handler_entrypoint_address));
+ __ LoadP(ip, MemOperand(ip));
if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r4);
+ __ mov(kConstantPoolRegister,
+ Operand(pending_handler_constant_pool_address));
+ __ LoadP(kConstantPoolRegister, MemOperand(kConstantPoolRegister));
}
- __ add(ip, r4, r5);
__ Jump(ip);
}
@@ -677,125 +608,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ blr();
}
-void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
- __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ cmp(length, scratch2);
- __ beq(&check_zero_length);
- __ bind(&strings_not_equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL));
- __ Ret();
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmpi(length, Operand::Zero());
- __ bne(&compare_chars);
- __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ Ret();
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal);
-
- // Characters are equal.
- __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ Ret();
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- Label result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
- Register length_delta = scratch3;
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(gt, scratch1, scratch2, scratch1, cr0);
- } else {
- Label skip;
- __ ble(&skip, cr0);
- __ mr(scratch1, scratch2);
- __ bind(&skip);
- }
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ cmpi(min_length, Operand::Zero());
- __ beq(&compare_lengths);
-
- // Compare loop.
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ mr(r3, length_delta);
- __ cmpi(r3, Operand::Zero());
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
- __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
- __ isel(eq, r3, r0, r4);
- __ isel(lt, r3, r5, r3);
- __ Ret();
- } else {
- Label less_equal, equal;
- __ ble(&less_equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
- __ Ret();
- __ bind(&less_equal);
- __ beq(&equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
- __ bind(&equal);
- __ Ret();
- }
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ addi(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, scratch1);
- __ add(right, right, scratch1);
- __ subfic(length, length, Operand::Zero());
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ lbzx(scratch1, MemOperand(left, index));
- __ lbzx(r0, MemOperand(right, index));
- __ cmp(scratch1, r0);
- __ bne(chars_not_equal);
- __ addi(index, index, Operand(1));
- __ cmpi(index, Operand::Zero());
- __ bne(&loop);
-}
-
-
// This stub is paired with DirectCEntryStub::GenerateCall
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// Place the return address on the stack, making the call
@@ -826,390 +638,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
}
-void NameDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm, Label* miss, Label* done, Register receiver,
- Register properties, Handle<Name> name, Register scratch0) {
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
- __ subi(index, index, Operand(1));
- __ LoadSmiLiteral(
- ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
- __ and_(index, index, ip);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ ShiftLeftImm(ip, index, Operand(1));
- __ add(index, index, ip); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- Register tmp = properties;
- __ SmiToPtrArrayOffset(ip, index);
- __ add(tmp, properties, ip);
- __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- DCHECK(tmp != entity_name);
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ cmp(entity_name, tmp);
- __ beq(done);
-
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
- // Stop if found the property.
- __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0);
- __ beq(miss);
-
- Label good;
- __ cmp(entity_name, tmp);
- __ beq(&good);
-
- // Check if the entry name is not a unique name.
- __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
- __ bind(&good);
-
- // Restore the properties.
- __ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- }
-
- const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
- r5.bit() | r4.bit() | r3.bit());
-
- __ mflr(r0);
- __ MultiPush(spill_mask);
-
- __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- __ mov(r4, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmpi(r3, Operand::Zero());
-
- __ MultiPop(spill_mask); // MultiPop does not touch condition flags
- __ mtlr(r0);
-
- __ beq(done);
- __ bne(miss);
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: NameDictionary to probe
- // r4: key
- // dictionary: NameDictionary to probe.
- // index: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = r3;
- Register dictionary = r3;
- Register key = r4;
- Register index = r5;
- Register mask = r6;
- Register hash = r7;
- Register undefined = r8;
- Register entry_key = r9;
- Register scratch = r9;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ SmiUntag(mask);
- __ subi(mask, mask, Operand(1));
-
- __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ addi(index, hash,
- Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- } else {
- __ mr(index, hash);
- }
- __ srwi(r0, index, Operand(Name::kHashShift));
- __ and_(index, mask, r0);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ ShiftLeftImm(scratch, index, Operand(1));
- __ add(index, index, scratch); // index *= 3.
-
- __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
- __ add(index, dictionary, scratch);
- __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ cmp(entry_key, undefined);
- __ beq(&not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(entry_key, key);
- __ beq(&in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ li(result, Operand::Zero());
- __ Ret();
- }
-
- __ bind(&in_dictionary);
- __ li(result, Operand(1));
- __ Ret();
-
- __ bind(&not_in_dictionary);
- __ li(result, Operand::Zero());
- __ Ret();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- // Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- Instr first_instruction =
- Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- (Assembler::kInstrSize * 2));
-
- // Consider adding DCHECK here to catch unexpected instruction sequence
- if (BF == (first_instruction & kBOfieldMask)) {
- return INCREMENTAL;
- }
-
- if (BF == (second_instruction & kBOfieldMask)) {
- return INCREMENTAL_COMPACTION;
- }
-
- return STORE_BUFFER_ONLY;
-}
-
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
-
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize * 2);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize * 2);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(),
- stub->instruction_start() + Assembler::kInstrSize,
- 2 * Assembler::kInstrSize);
-}
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two branch instructions are generated with labels so as to
- // get the offset fixed up correctly by the bind(Label*) call. We patch
- // it back and forth between branch condition True and False
- // when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
-
- // Clear the bit, branch on True for NOP action initially
- __ crclr(Assembler::encode_crbit(cr2, CR_LT));
- __ blt(&skip_to_incremental_noncompacting, cr2);
- __ blt(&skip_to_incremental_compacting, cr2);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- // patching not required on PPC as the initial path is effectively NOP
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(), &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address = r3 == regs_.address() ? regs_.scratch0() : regs_.address();
- DCHECK(address != regs_.object());
- DCHECK(address != r3);
- __ mr(address, regs_.address());
- __ mr(r3, regs_.object());
- __ mr(r4, address);
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental;
- Label need_incremental_pop_scratch;
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-#endif
-
- // Get the value from the slot.
- __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask, eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(tasm,
#if V8_TARGET_ARCH_PPC64
14 * Assembler::kInstrSize);
@@ -1225,7 +656,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_PPC64
14 * Assembler::kInstrSize);
@@ -1473,7 +904,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ TestIfSmi(r7, r0);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r7, r7, r8, MAP_TYPE);
@@ -1554,7 +985,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ TestIfSmi(r6, r0);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r6, r6, r7, MAP_TYPE);
@@ -1600,8 +1031,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference thunk_ref,
int stack_space,
MemOperand* stack_space_operand,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
+ MemOperand return_value_operand) {
Isolate* isolate = masm->isolate();
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate);
@@ -1695,17 +1125,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Leave the API exit frame.
__ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ LoadP(cp, *context_restore_operand);
- }
// LeaveExitFrame expects unwind space to be in a register.
- if (stack_space_operand != NULL) {
+ if (stack_space_operand != nullptr) {
__ lwz(r14, *stack_space_operand);
} else {
__ mov(r14, Operand(stack_space));
}
- __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
+ __ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
@@ -1734,7 +1160,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r3 : callee
// -- r7 : call_data
// -- r5 : holder
// -- r4 : api_function_address
@@ -1744,21 +1169,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1)* 4] : first argument
// -- sp[argc * 4] : receiver
- // -- sp[(argc + 1)* 4] : accessor_holder
// -----------------------------------
- Register callee = r3;
Register call_data = r7;
Register holder = r5;
Register api_function_address = r4;
- Register context = cp;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1768,12 +1188,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
- // context save
- __ push(context);
-
- // callee
- __ push(callee);
-
// call data
__ push(call_data);
@@ -1789,38 +1203,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// holder
__ push(holder);
- // Enter a new context
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- sp[0] : holder
- // -- ...
- // -- sp[(FCA::kArgsLength - 1) * 4] : new_target
- // -- sp[FCA::kArgsLength * 4] : last argument
- // -- ...
- // -- sp[(FCA::kArgsLength + argc - 1) * 4] : first argument
- // -- sp[(FCA::kArgsLength + argc) * 4] : receiver
- // -- sp[(FCA::kArgsLength + argc + 1) * 4] : accessor_holder
- // -----------------------------------------------------------
-
- // Load context from accessor_holder
- Register accessor_holder = context;
- Register scratch2 = callee;
- __ LoadP(accessor_holder,
- MemOperand(sp, (FCA::kArgsLength + 1 + argc()) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ LoadP(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
- __ lbz(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ andi(r0, scratch2, Operand(1 << Map::kIsConstructor));
- __ bne(&skip_looking_for_constructor, cr0);
- __ GetMapConstructor(context, scratch, scratch, scratch2);
- __ bind(&skip_looking_for_constructor);
- __ LoadP(context, FieldMemOperand(context, JSFunction::kContextOffset));
- } else {
- // Load context from callee
- __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
-
// Prepare arguments.
__ mr(scratch, sp);
@@ -1855,21 +1237,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 2;
+ const int stack_space = argc() + FCA::kArgsLength + 1;
MemOperand* stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, return_value_operand,
- &context_restore_operand);
+ stack_space_operand, return_value_operand);
}
@@ -1962,7 +1336,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, NULL, return_value_operand, NULL);
+ kStackUnwindSpace, nullptr, return_value_operand);
}
#undef __
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index 70da70831c..80284587db 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -8,210 +8,6 @@
namespace v8 {
namespace internal {
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one-byte strings and returns result in r0.
- static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat one-byte strings for equality and returns result in r0.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
- Register left, Register right,
- Register length,
- Register scratch1,
- Label* chars_not_equal);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class StoreRegistersStateStub : public PlatformCodeStub {
- public:
- explicit StoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
-};
-
-
-class RestoreRegistersStateStub : public PlatformCodeStub {
- public:
- explicit RestoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
-};
-
-
-class RecordWriteStub : public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate, Register object, Register value,
- Register address, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- // Consider adding DCHECK here to catch bad patching
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BT);
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- // Consider adding DCHECK here to catch bad patching
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BF);
- }
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0),
- scratch1_(no_reg) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->mflr(r0);
- masm->push(r0);
- masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- // Save all volatile FP registers except d0.
- masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- // Restore all volatile FP registers except d0.
- masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
- }
- masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
- masm->pop(r0);
- masm->mtlr(r0);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits : public BitField<int, 0, 5> {};
- class ValueBits : public BitField<int, 5, 5> {};
- class AddressBits : public BitField<int, 10, 5> {};
- class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
- };
- class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
-
- Label slow_;
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
@@ -223,48 +19,12 @@ class DirectCEntryStub : public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() override { return true; }
+ Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
-
-class NameDictionaryLookupStub : public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
- Label* done, Register receiver,
- Register properties, Handle<Name> name,
- Register scratch0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class LookupModeBits : public BitField<LookupMode, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 695ae6beb6..13c9af7e22 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ppc/codegen-ppc.h"
-
#if V8_TARGET_ARCH_PPC
#include <memory>
@@ -15,22 +13,21 @@
namespace v8 {
namespace internal {
-
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
-// Called from C
+ // Called from C
__ function_descriptor();
__ MovFromFloatParameter(d1);
@@ -43,115 +40,15 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-// assume ip can be used as a scratch register below
-void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
- Register index, Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ andi(r0, result, Operand(kIsIndirectStringMask));
- __ beq(&check_sequential, cr0);
-
- // Dispatch on the indirect string shape: slice or cons or thin.
- Label cons_string, thin_string;
- __ andi(ip, result, Operand(kStringRepresentationMask));
- __ cmpi(ip, Operand(kConsStringTag));
- __ beq(&cons_string);
- __ cmpi(ip, Operand(kThinStringTag));
- __ beq(&thin_string);
-
- // Handle slices.
- __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ SmiUntag(ip, result);
- __ add(index, index, ip);
- __ b(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset));
- __ b(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kempty_stringRootIndex);
- __ bne(call_runtime);
- // Get the first of the two strings and load its instance type.
- __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
- __ b(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ andi(r0, result, Operand(kStringRepresentationMask));
- __ bne(&external_string, cr0);
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ addi(string, string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ b(&check_encoding);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ andi(r0, result, Operand(kIsIndirectStringMask));
- __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ andi(r0, result, Operand(kShortExternalStringMask));
- __ bne(call_runtime, cr0);
- __ LoadP(string,
- FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label one_byte, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ andi(r0, result, Operand(kStringEncodingMask));
- __ bne(&one_byte, cr0);
- // Two-byte string.
- __ ShiftLeftImm(result, index, Operand(1));
- __ lhzx(result, MemOperand(string, result));
- __ b(&done);
- __ bind(&one_byte);
- // One-byte string.
- __ lbzx(result, MemOperand(string, index));
- __ bind(&done);
-}
-
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
deleted file mode 100644
index b0d344a013..0000000000
--- a/deps/v8/src/ppc/codegen-ppc.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PPC_CODEGEN_PPC_H_
-#define V8_PPC_CODEGEN_PPC_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm, Register string, Register index,
- Register result, Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PPC_CODEGEN_PPC_H_
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 7bc47d4644..caa1a24354 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -100,7 +99,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ LoadP(r5, MemOperand(sp, i * kPointerSize));
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 2a1044f7ad..7e962e7849 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -149,7 +149,7 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'r');
+ DCHECK_EQ(format[0], 'r');
if ((format[1] == 't') || (format[1] == 's')) { // 'rt & 'rs register
int reg = instr->RTValue();
@@ -172,7 +172,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// Handle all FP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatFPRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'D');
+ DCHECK_EQ(format[0], 'D');
int retval = 2;
int reg = -1;
@@ -270,7 +270,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 8;
}
case 's': {
- DCHECK(format[1] == 'h');
+ DCHECK_EQ(format[1], 'h');
int32_t value = 0;
int32_t opcode = instr->OpcodeValue() << 26;
int32_t sh = instr->Bits(15, 11);
@@ -601,19 +601,19 @@ void Decoder::DecodeExt2(Instruction* instr) {
return;
}
case LFSX: {
- Format(instr, "lfsx 'rt, 'ra, 'rb");
+ Format(instr, "lfsx 'Dt, 'ra, 'rb");
return;
}
case LFSUX: {
- Format(instr, "lfsux 'rt, 'ra, 'rb");
+ Format(instr, "lfsux 'Dt, 'ra, 'rb");
return;
}
case LFDX: {
- Format(instr, "lfdx 'rt, 'ra, 'rb");
+ Format(instr, "lfdx 'Dt, 'ra, 'rb");
return;
}
case LFDUX: {
- Format(instr, "lfdux 'rt, 'ra, 'rb");
+ Format(instr, "lfdux 'Dt, 'ra, 'rb");
return;
}
case STFSX: {
diff --git a/deps/v8/src/ppc/frame-constants-ppc.cc b/deps/v8/src/ppc/frame-constants-ppc.cc
index 6497ad440d..f49296292a 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/ppc/frame-constants-ppc.cc
@@ -27,6 +27,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 7f0b8a5961..9c4fe5fd6a 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -56,9 +56,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r7; }
const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
const Register StoreTransitionDescriptor::MapRegister() { return r8; }
-const Register StringCompareDescriptor::LeftRegister() { return r4; }
-const Register StringCompareDescriptor::RightRegister() { return r3; }
-
const Register ApiGetterDescriptor::HolderRegister() { return r3; }
const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
@@ -215,7 +212,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -235,7 +232,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// r4 -- function
// r5 -- allocation site with elements kind
Register registers[] = {r4, r5, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
@@ -279,10 +276,10 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- r3, // callee
- r7, // call_data
- r5, // holder
- r4, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ r7, // call_data
+ r5, // holder
+ r4, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -331,8 +328,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // the value to pass to the generator
- r4, // the JSGeneratorObject to resume
- r5 // the resume mode (tagged)
+ r4 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index efb6c2bab9..75e176c09c 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -11,7 +11,7 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
@@ -329,12 +329,6 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
}
-void MacroAssembler::InNewSpace(Register object, Register scratch,
- Condition cond, Label* branch) {
- DCHECK(cond == eq || cond == ne);
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
-}
-
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
LinkRegisterStatus lr_status,
@@ -377,7 +371,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -389,7 +383,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -479,13 +473,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
mflr(r0);
push(r0);
}
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(r0);
mtlr(r0);
@@ -506,41 +494,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address, Register scratch,
- SaveFPRegsMode fp_mode) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(ip, Operand(store_buffer));
- LoadP(scratch, MemOperand(ip));
- // Store pointer to buffer and increment buffer top.
- StoreP(address, MemOperand(scratch));
- addi(scratch, scratch, Operand(kPointerSize));
- // Write back new top of buffer.
- StoreP(scratch, MemOperand(ip));
- // Call stub on end of buffer.
- // Check for end of buffer.
- TestBitMask(scratch, StoreBuffer::kStoreBufferMask, r0);
-
- Ret(ne, cr0);
- mflr(r0);
- push(r0);
- StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
- CallStub(&store_buffer_overflow);
- pop(r0);
- mtlr(r0);
- bind(&done);
- Ret();
-}
-
void TurboAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
mflr(r0);
@@ -603,7 +556,7 @@ void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK(num_unsaved >= 0);
+ DCHECK_GE(num_unsaved, 0);
if (num_unsaved > 0) {
subi(sp, sp, Operand(num_unsaved * kPointerSize));
}
@@ -958,50 +911,6 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
return frame_ends;
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- int fp_delta = 0;
- mflr(r0);
- if (FLAG_enable_embedded_constant_pool) {
- if (target.is_valid()) {
- Push(r0, fp, kConstantPoolRegister, context, target);
- fp_delta = 3;
- } else {
- Push(r0, fp, kConstantPoolRegister, context);
- fp_delta = 2;
- }
- } else {
- if (target.is_valid()) {
- Push(r0, fp, context, target);
- fp_delta = 2;
- } else {
- Push(r0, fp, context);
- fp_delta = 1;
- }
- }
- addi(fp, sp, Operand(fp_delta * kPointerSize));
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- if (FLAG_enable_embedded_constant_pool) {
- if (target.is_valid()) {
- Pop(r0, fp, kConstantPoolRegister, context, target);
- } else {
- Pop(r0, fp, kConstantPoolRegister, context);
- }
- } else {
- if (target.is_valid()) {
- Pop(r0, fp, context, target);
- } else {
- Pop(r0, fp, context);
- }
- }
- mtlr(r0);
-}
-
// ExitFrame layout (probably wrongish.. needs updating)
//
// SP -> previousSP
@@ -1026,7 +935,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- DCHECK(stack_space > 0);
+ DCHECK_GT(stack_space, 0);
// This is an opportunity to build a frame to wrap
// all of the pushes that have happened inside of V8
@@ -1101,7 +1010,6 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
// Optionally restore all double registers.
@@ -1121,11 +1029,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
StoreP(r6, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
- if (restore_context) {
- mov(ip, Operand(ExternalReference(IsolateAddressId::kContextAddress,
- isolate())));
- LoadP(cp, MemOperand(ip));
- }
+ mov(ip,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ LoadP(cp, MemOperand(ip));
+
#ifdef DEBUG
mov(ip,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
@@ -1458,8 +1365,8 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE < 256);
- lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(LAST_TYPE <= 0xffff);
+ lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmpi(type_reg, Operand(type));
}
@@ -1559,31 +1466,6 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- mov(value, Operand(cell));
- LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
-}
-
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp, Register temp2) {
- Label done, loop;
- LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done);
- CompareObjectType(result, temp, temp2, MAP_TYPE);
- bne(&done);
- LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
- b(&loop);
- bind(&done);
-}
-
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
@@ -1608,11 +1490,6 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
- SmiUntag(ip, smi);
- ConvertIntToDouble(ip, value);
-}
-
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
Register scratch,
@@ -1651,7 +1528,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// Put input on stack.
stfdu(double_input, MemOperand(sp, -kDoubleSize));
- CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, result));
addi(sp, sp, Operand(kDoubleSize));
pop(r0);
@@ -1747,7 +1624,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
lwz(scratch1, MemOperand(scratch2));
@@ -1759,7 +1636,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
lwz(scratch1, MemOperand(scratch2));
@@ -1786,7 +1663,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
+ if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@@ -1926,18 +1803,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
- Label* not_unique_name) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
- beq(&succeed, cr0);
- cmpi(reg, Operand(SYMBOL_TYPE));
- bne(not_unique_name);
-
- bind(&succeed);
-}
-
static const int kRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -2076,78 +1941,6 @@ void TurboAssembler::CheckPageFlag(
}
}
-
-void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
- Register scratch1, Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
- Register mask_scratch, Label* has_color,
- int first_bit, int second_bit) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- // Test the first bit
- and_(r0, ip, mask_scratch, SetRC);
- b(first_bit == 1 ? eq : ne, &other_color, cr0);
- // Shift left 1
- // May need to load the next cell
- slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
- beq(&word_boundary, cr0);
- // Test the second bit
- and_(r0, ip, mask_scratch, SetRC);
- b(second_bit == 1 ? ne : eq, has_color, cr0);
- b(&other_color);
-
- bind(&word_boundary);
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
- andi(r0, ip, Operand(1));
- b(second_bit == 1 ? ne : eq, has_color, cr0);
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
- lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
- and_(bitmap_reg, addr_reg, r0);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
- ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
- ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
- add(bitmap_reg, bitmap_reg, ip);
- li(ip, Operand(1));
- slw(mask_reg, ip, mask_reg);
-}
-
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Register load_scratch,
- Label* value_is_white) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- and_(r0, mask_scratch, load_scratch, SetRC);
- beq(value_is_white, cr0);
-}
-
void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
void TurboAssembler::ResetRoundingMode() {
@@ -2155,24 +1948,6 @@ void TurboAssembler::ResetRoundingMode() {
}
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- LoadP(dst,
- FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- const int getterOffset = AccessorPair::kGetterOffset;
- const int setterOffset = AccessorPair::kSetterOffset;
- int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
- LoadP(dst, FieldMemOperand(dst, offset));
-}
-
////////////////////////////////////////////////////////////////////////////////
//
// New MacroAssembler Interfaces added for PPC
@@ -3007,6 +2782,90 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
+void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ mr(scratch, src);
+ mr(src, dst);
+ mr(dst, scratch);
+}
+
+void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
+ if (dst.ra() != r0) DCHECK(!AreAliased(src, dst.ra(), scratch));
+ if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
+ DCHECK(!AreAliased(src, scratch));
+ mr(scratch, src);
+ LoadP(src, dst);
+ StoreP(scratch, dst);
+}
+
+void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+ Register scratch_1) {
+ if (src.ra() != r0) DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
+ if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
+ if (dst.ra() != r0) DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
+ if (dst.rb() != r0) DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadP(scratch_0, src);
+ LoadP(scratch_1, dst);
+ StoreP(scratch_0, dst);
+ StoreP(scratch_1, src);
+}
+
+void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ fmr(scratch, src);
+ fmr(src, dst);
+ fmr(dst, scratch);
+}
+
+void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
+ DoubleRegister scratch) {
+ DCHECK(!AreAliased(src, scratch));
+ fmr(scratch, src);
+ LoadSingle(src, dst);
+ StoreSingle(scratch, dst);
+}
+
+void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
+ DoubleRegister scratch_0,
+ DoubleRegister scratch_1) {
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadSingle(scratch_0, src);
+ LoadSingle(scratch_1, dst);
+ StoreSingle(scratch_0, dst);
+ StoreSingle(scratch_1, src);
+}
+
+void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ fmr(scratch, src);
+ fmr(src, dst);
+ fmr(dst, scratch);
+}
+
+void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
+ DoubleRegister scratch) {
+ DCHECK(!AreAliased(src, scratch));
+ fmr(scratch, src);
+ LoadDouble(src, dst);
+ StoreDouble(scratch, dst);
+}
+
+void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
+ DoubleRegister scratch_0,
+ DoubleRegister scratch_1) {
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadDouble(scratch_0, src);
+ LoadDouble(scratch_1, dst);
+ StoreDouble(scratch_0, dst);
+ StoreDouble(scratch_1, src);
+}
+
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
@@ -3031,52 +2890,34 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-#endif
-
-
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache)
- : address_(address),
- size_(instructions * Assembler::kInstrSize),
- masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
- flush_cache_(flush_cache) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
+bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
+ DoubleRegister reg4, DoubleRegister reg5, DoubleRegister reg6,
+ DoubleRegister reg7, DoubleRegister reg8, DoubleRegister reg9,
+ DoubleRegister reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- if (flush_cache_ == FLUSH) {
- Assembler::FlushICache(masm_.isolate(), address_, size_);
- }
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
- // Check that the code was patched as expected.
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ return n_of_valid_regs != n_of_non_aliasing_regs;
}
+#endif
-void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
-
-
-void CodePatcher::EmitCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- switch (cond) {
- case eq:
- instr = (instr & ~kCondMask) | BT;
- break;
- case ne:
- instr = (instr & ~kCondMask) | BF;
- break;
- default:
- UNIMPLEMENTED();
- }
- masm_.emit(instr);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index cc1d7a151e..c508ae128a 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -66,6 +66,11 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg, Register reg9 = no_reg,
Register reg10 = no_reg);
+bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
+ DoubleRegister reg3 = no_dreg, DoubleRegister reg4 = no_dreg,
+ DoubleRegister reg5 = no_dreg, DoubleRegister reg6 = no_dreg,
+ DoubleRegister reg7 = no_dreg, DoubleRegister reg8 = no_dreg,
+ DoubleRegister reg9 = no_dreg, DoubleRegister reg10 = no_dreg);
#endif
// These exist to provide portability between 32 and 64bit
@@ -339,6 +344,21 @@ class TurboAssembler : public Assembler {
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
+ void SwapP(Register src, Register dst, Register scratch);
+ void SwapP(Register src, MemOperand dst, Register scratch);
+ void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+ Register scratch_1);
+ void SwapFloat32(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch);
+ void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
+ void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
+ DoubleRegister scratch_1);
+ void SwapDouble(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch);
+ void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
+ void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
+ DoubleRegister scratch_1);
+
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
@@ -649,18 +669,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// ---------------------------------------------------------------------------
// GC Support
@@ -743,7 +751,6 @@ class MacroAssembler : public TurboAssembler {
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
bool argument_count_is_length = false);
// Load the global proxy from the current context.
@@ -842,11 +849,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Support functions.
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done, and |temp2| its instance type.
- void GetMapConstructor(Register result, Register map, Register temp,
- Register temp2);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -863,12 +865,6 @@ class MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the given
- // miss label if the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
@@ -890,9 +886,6 @@ class MacroAssembler : public TurboAssembler {
bne(if_not_equal);
}
- // Load the value of a smi object into a double register.
- void SmiToDouble(DoubleRegister value, Register smi);
-
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
@@ -1015,17 +1008,8 @@ class MacroAssembler : public TurboAssembler {
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// ---------------------------------------------------------------------------
- // String utilities
-
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
-
- // ---------------------------------------------------------------------------
// Patching helpers.
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template <typename Field>
void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
@@ -1037,9 +1021,6 @@ class MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg, rc);
}
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -1053,12 +1034,6 @@ class MacroAssembler : public TurboAssembler {
Condition cond, // eq for new space, ne otherwise.
Label* branch);
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@@ -1067,37 +1042,6 @@ class MacroAssembler : public TurboAssembler {
friend class StandardFrame;
};
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- enum FlushICache { FLUSH, DONT_FLUSH };
-
- CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache = FLUSH);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void EmitCondition(Condition cond);
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
- FlushICache flush_cache_; // Whether to flush the I cache after patching.
-};
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 0f90700c81..ff62c4a56e 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -12,6 +12,8 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/ostreams.h"
#include "src/ppc/constants-ppc.h"
#include "src/ppc/frame-constants-ppc.h"
#include "src/ppc/simulator-ppc.h"
@@ -132,7 +134,7 @@ bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
// Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
return false;
}
@@ -146,25 +148,25 @@ bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
bool PPCDebugger::DeleteBreakpoint(Instruction* break_pc) {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
- sim_->break_pc_ = NULL;
+ sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
return true;
}
void PPCDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void PPCDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
@@ -208,11 +210,11 @@ void PPCDebugger::Debug() {
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
- if (line == NULL) {
+ if (line == nullptr) {
break;
} else {
char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
line = last_input;
} else {
// Ownership is transferred to sim_;
@@ -371,8 +373,8 @@ void PPCDebugger::Debug() {
}
sim_->set_pc(value);
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- intptr_t* cur = NULL;
- intptr_t* end = NULL;
+ intptr_t* cur = nullptr;
+ intptr_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@@ -422,9 +424,9 @@ void PPCDebugger::Debug() {
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
- byte* prev = NULL;
- byte* cur = NULL;
- byte* end = NULL;
+ byte* prev = nullptr;
+ byte* cur = nullptr;
+ byte* end = nullptr;
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
@@ -481,7 +483,7 @@ void PPCDebugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
+ if (!DeleteBreakpoint(nullptr)) {
PrintF("deleting breakpoint failed\n");
}
} else if (strcmp(cmd, "cr") == 0) {
@@ -639,8 +641,8 @@ void PPCDebugger::Debug() {
static bool ICacheMatch(void* one, void* two) {
- DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
}
@@ -686,7 +688,7 @@ void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
CachePage* new_page = new CachePage();
entry->value = new_page;
}
@@ -697,10 +699,10 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
- DCHECK(size <= CachePage::kPageSize);
+ DCHECK_LE(size, CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
- DCHECK((start & CachePage::kLineMask) == 0);
- DCHECK((size & CachePage::kLineMask) == 0);
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@@ -741,7 +743,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
+ if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
@@ -757,7 +759,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
stack_ = reinterpret_cast<char*>(malloc(stack_size));
pc_modified_ = false;
icount_ = 0;
- break_pc_ = NULL;
+ break_pc_ = nullptr;
break_instr_ = 0;
// Set up architecture state.
@@ -782,7 +784,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[sp] =
reinterpret_cast<intptr_t>(stack_) + stack_size - stack_protection_size_;
- last_debugger_input_ = NULL;
+ last_debugger_input_ = nullptr;
}
Simulator::~Simulator() {
@@ -804,7 +806,7 @@ class Redirection {
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
type_(type),
- next_(NULL) {
+ next_(nullptr) {
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->FlushICache(
isolate->simulator_i_cache(),
@@ -831,9 +833,9 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) {
- DCHECK_EQ(current->type(), type);
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
return current;
}
}
@@ -905,10 +907,10 @@ void* Simulator::RedirectExternalReference(Isolate* isolate,
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- DCHECK(isolate_data != NULL);
+ DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
+ if (sim == nullptr) {
// TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator(isolate);
isolate_data->set_simulator(sim);
@@ -1607,13 +1609,13 @@ bool Simulator::isStopInstruction(Instruction* instr) {
bool Simulator::isWatchedStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
return code < kNumOfWatchedStops;
}
bool Simulator::isEnabledStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
!(watched_stops_[code].count & kStopDisabledBit);
@@ -1637,7 +1639,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF(
@@ -1654,7 +1656,7 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
if (!isWatchedStop(code)) {
PrintF("Stop not watched.");
} else {
@@ -2240,9 +2242,21 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
int32_t val = ReadW(ra_val + rb_val, instr);
float* fptr = reinterpret_cast<float*>(&val);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ // Conversion using double changes sNan to qNan on ia32/x64
+ if ((val & 0x7f800000) == 0x7f800000) {
+ int64_t dval = static_cast<int64_t>(val);
+ dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29) | 0x0;
+ set_d_register(frt, dval);
+ } else {
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+ }
+#else
set_d_register_from_double(frt, static_cast<double>(*fptr));
+#endif
if (opcode == LFSUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -2257,7 +2271,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + rb_val));
set_d_register(frt, *dptr);
if (opcode == LFDUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -2271,9 +2285,23 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
float frs_val = static_cast<float>(get_double_from_d_register(frs));
int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ // Conversion using double changes sNan to qNan on ia32/x64
+ int32_t sval = 0;
+ int64_t dval = get_d_register(frs);
+ if ((dval & 0x7ff0000000000000) == 0x7ff0000000000000) {
+ sval = ((dval & 0xc000000000000000) >> 32) |
+ ((dval & 0x07ffffffe0000000) >> 29);
+ p = &sval;
+ } else {
+ p = reinterpret_cast<int32_t*>(&frs_val);
+ }
+#else
+ p = reinterpret_cast<int32_t*>(&frs_val);
+#endif
WriteW(ra_val + rb_val, *p, instr);
if (opcode == STFSUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -2288,7 +2316,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int64_t frs_val = get_d_register(frs);
WriteDW(ra_val + rb_val, frs_val);
if (opcode == STFDUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -2340,7 +2368,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
set_register(rt, ReadWU(ra_val + offset, instr));
if (opcode == LWZU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -2354,7 +2382,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
set_register(rt, ReadB(ra_val + offset) & 0xFF);
if (opcode == LBZU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -2369,7 +2397,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
WriteW(ra_val + offset, rs_val, instr);
if (opcode == STWU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3029,7 +3057,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
WriteW(ra_val + rb_val, rs_val, instr);
if (opcode == STWUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -3044,7 +3072,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
WriteB(ra_val + rb_val, rs_val);
if (opcode == STBUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -3059,7 +3087,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
WriteH(ra_val + rb_val, rs_val, instr);
if (opcode == STHUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -3113,7 +3141,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
WriteDW(ra_val + rb_val, rs_val);
if (opcode == STDUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -3209,7 +3237,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
WriteB(ra_val + offset, rs_val);
if (opcode == STBU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3252,7 +3280,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
WriteH(ra_val + offset, rs_val, instr);
if (opcode == STHU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3286,7 +3314,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
set_d_register_from_double(frt, static_cast<double>(*fptr));
#endif
if (opcode == LFSU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3301,7 +3329,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + offset));
set_d_register(frt, *dptr);
if (opcode == LFDU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3331,7 +3359,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#endif
WriteW(ra_val + offset, *p, instr);
if (opcode == STFSU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3346,7 +3374,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int64_t frs_val = get_d_register(frs);
WriteDW(ra_val + offset, frs_val);
if (opcode == STFDU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3911,7 +3939,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case 1: { // ldu
intptr_t* result = ReadDW(ra_val + offset);
set_register(rt, *result);
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
break;
}
@@ -3933,7 +3961,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
WriteDW(ra_val + offset, rs_val);
if (opcode == STDU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 3a7a88a0c5..8ee4527234 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -31,14 +31,14 @@ AllocationTraceNode* AllocationTraceNode::FindChild(
for (AllocationTraceNode* node : children_) {
if (node->function_info_index() == function_info_index) return node;
}
- return NULL;
+ return nullptr;
}
AllocationTraceNode* AllocationTraceNode::FindOrAddChild(
unsigned function_info_index) {
AllocationTraceNode* child = FindChild(function_info_index);
- if (child == NULL) {
+ if (child == nullptr) {
child = new AllocationTraceNode(tree_, function_info_index);
children_.push_back(child);
}
@@ -54,7 +54,7 @@ void AllocationTraceNode::AddAllocation(unsigned size) {
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
base::OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
- if (tracker != NULL) {
+ if (tracker != nullptr) {
AllocationTracker::FunctionInfo* info =
tracker->function_info_list()[function_info_index_];
base::OS::Print("%s #%u", info->name, id_);
@@ -246,7 +246,7 @@ unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
SnapshotObjectId id) {
base::HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
FunctionInfo* info = new FunctionInfo();
info->name = names_->GetFunctionName(shared->DebugName());
info->function_id = id;
diff --git a/deps/v8/src/profiler/circular-queue-inl.h b/deps/v8/src/profiler/circular-queue-inl.h
index 9f9df656a6..413b236d37 100644
--- a/deps/v8/src/profiler/circular-queue-inl.h
+++ b/deps/v8/src/profiler/circular-queue-inl.h
@@ -28,7 +28,7 @@ T* SamplingCircularQueue<T, L>::Peek() {
if (base::Acquire_Load(&dequeue_pos_->marker) == kFull) {
return &dequeue_pos_->record;
}
- return NULL;
+ return nullptr;
}
@@ -45,7 +45,7 @@ T* SamplingCircularQueue<T, L>::StartEnqueue() {
if (base::Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
return &enqueue_pos_->record;
}
- return NULL;
+ return nullptr;
}
diff --git a/deps/v8/src/profiler/circular-queue.h b/deps/v8/src/profiler/circular-queue.h
index 272843bb2d..d3df1d9f38 100644
--- a/deps/v8/src/profiler/circular-queue.h
+++ b/deps/v8/src/profiler/circular-queue.h
@@ -11,11 +11,10 @@
namespace v8 {
namespace internal {
-
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,
-// StartEnqueue will return NULL. The queue is designed with
+// StartEnqueue will return nullptr. The queue is designed with
// a goal in mind to evade cache lines thrashing by preventing
// simultaneous reads and writes to adjanced memory locations.
template<typename T, unsigned Length>
@@ -26,14 +25,14 @@ class SamplingCircularQueue {
~SamplingCircularQueue();
// StartEnqueue returns a pointer to a memory location for storing the next
- // record or NULL if all entries are full at the moment.
+ // record or nullptr if all entries are full at the moment.
T* StartEnqueue();
// Notifies the queue that the producer has complete writing data into the
// memory returned by StartEnqueue and it can be passed to the consumer.
void FinishEnqueue();
// Executed on the consumer (analyzer) thread.
- // Retrieves, but does not remove, the head of this queue, returning NULL
+ // Retrieves, but does not remove, the head of this queue, returning nullptr
// if this queue is empty. After the record had been read by a consumer,
// Remove must be called.
T* Peek();
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index 440c6a1cce..3bc6541048 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -27,7 +27,7 @@ void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(start);
- if (entry != NULL) {
+ if (entry != nullptr) {
entry->set_bailout_reason(bailout_reason);
}
}
@@ -35,7 +35,7 @@ void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(start);
- if (entry != NULL) entry->set_deopt_info(deopt_reason, deopt_id);
+ if (entry != nullptr) entry->set_deopt_info(deopt_reason, deopt_id);
}
@@ -52,7 +52,7 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
TickSample* ProfilerEventsProcessor::StartTickSample() {
void* address = ticks_buffer_.StartEnqueue();
- if (address == NULL) return NULL;
+ if (address == nullptr) return nullptr;
TickSampleEventRecord* evt =
new (address) TickSampleEventRecord(last_code_event_id_.Value());
return &evt->sample;
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index bae592b36d..91617d7231 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -4,6 +4,9 @@
#include "src/profiler/cpu-profiler.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/template-utils.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
@@ -129,7 +132,7 @@ ProfilerEventsProcessor::SampleProcessingResult
}
const TickSampleEventRecord* record = ticks_buffer_.Peek();
- if (record == NULL) {
+ if (record == nullptr) {
if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
return FoundSampleForNextCodeEvent;
}
@@ -171,7 +174,7 @@ void ProfilerEventsProcessor::Run() {
#endif
}
- // Schedule next sample. sampler_ is NULL in tests.
+ // Schedule next sample. sampler_ is nullptr in tests.
if (sampler_) sampler_->DoSample();
}
@@ -241,14 +244,50 @@ void CpuProfiler::CodeEventHandler(const CodeEventsContainer& evt_rec) {
}
}
+namespace {
+
+class CpuProfilersManager {
+ public:
+ void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ auto result = profilers_.insert(
+ std::pair<Isolate*, std::unique_ptr<std::set<CpuProfiler*>>>(
+ isolate, base::make_unique<std::set<CpuProfiler*>>()));
+ result.first->second->insert(profiler);
+ }
+
+ void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ auto it = profilers_.find(isolate);
+ DCHECK(it != profilers_.end());
+ it->second->erase(profiler);
+ if (it->second->empty()) {
+ profilers_.erase(it);
+ }
+ }
+
+ void CallCollectSample(Isolate* isolate) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ auto profilers = profilers_.find(isolate);
+ if (profilers == profilers_.end()) return;
+ for (auto it : *profilers->second) {
+ it->CollectSample();
+ }
+ }
+
+ private:
+ std::map<Isolate*, std::unique_ptr<std::set<CpuProfiler*>>> profilers_;
+ base::Mutex mutex_;
+};
+
+base::LazyInstance<CpuProfilersManager>::type g_profilers_manager =
+ LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
CpuProfiler::CpuProfiler(Isolate* isolate)
- : isolate_(isolate),
- sampling_interval_(base::TimeDelta::FromMicroseconds(
- FLAG_cpu_profiler_sampling_interval)),
- profiles_(new CpuProfilesCollection(isolate)),
- is_profiling_(false) {
- profiles_->set_cpu_profiler(this);
-}
+ : CpuProfiler(isolate, new CpuProfilesCollection(isolate), nullptr,
+ nullptr) {}
CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
ProfileGenerator* test_generator,
@@ -261,10 +300,12 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
processor_(test_processor),
is_profiling_(false) {
profiles_->set_cpu_profiler(this);
+ g_profilers_manager.Pointer()->AddProfiler(isolate, this);
}
CpuProfiler::~CpuProfiler() {
DCHECK(!is_profiling_);
+ g_profilers_manager.Pointer()->RemoveProfiler(isolate_, this);
}
void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
@@ -292,6 +333,11 @@ void CpuProfiler::CreateEntriesForRuntimeCallStats() {
}
}
+// static
+void CpuProfiler::CollectSample(Isolate* isolate) {
+ g_profilers_manager.Pointer()->CallCollectSample(isolate);
+}
+
void CpuProfiler::CollectSample() {
if (processor_) {
processor_->AddCurrentStack(isolate_);
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 5fd7fa14da..e6e3fea333 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -195,6 +195,8 @@ class CpuProfiler : public CodeEventObserver {
~CpuProfiler() override;
+ static void CollectSample(Isolate* isolate);
+
void set_sampling_interval(base::TimeDelta value);
void CollectSample();
void StartProfiling(const char* title, bool record_samples = false);
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index e28f267176..8f0afdc771 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -44,10 +44,11 @@ void HeapProfiler::RemoveSnapshot(HeapSnapshot* snapshot) {
void HeapProfiler::DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
- DCHECK(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
+ DCHECK_NE(class_id, v8::HeapProfiler::kPersistentHandleNoClassId);
if (wrapper_callbacks_.size() <= class_id) {
wrapper_callbacks_.insert(wrapper_callbacks_.end(),
- class_id - wrapper_callbacks_.size() + 1, NULL);
+ class_id - wrapper_callbacks_.size() + 1,
+ nullptr);
}
wrapper_callbacks_[class_id] = callback;
}
@@ -55,7 +56,7 @@ void HeapProfiler::DefineWrapperClass(
v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
uint16_t class_id, Object** wrapper) {
- if (wrapper_callbacks_.size() <= class_id) return NULL;
+ if (wrapper_callbacks_.size() <= class_id) return nullptr;
return wrapper_callbacks_[class_id](
class_id, Utils::ToLocal(Handle<Object>(wrapper)));
}
@@ -82,7 +83,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
HeapSnapshotGenerator generator(result, control, resolver, heap());
if (!generator.GenerateSnapshot()) {
delete result;
- result = NULL;
+ result = nullptr;
} else {
snapshots_.push_back(result);
}
@@ -182,19 +183,18 @@ void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
}
Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
- HeapObject* object = NULL;
+ HeapObject* object = nullptr;
HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
// Make sure that object with the given id is still reachable.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
if (ids_->FindEntry(obj->address()) == id) {
- DCHECK(object == NULL);
+ DCHECK_NULL(object);
object = obj;
// Can't break -- kFilterUnreachable requires full heap traversal.
}
}
- return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
+ return object != nullptr ? Handle<HeapObject>(object) : Handle<HeapObject>();
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator-inl.h b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
index 599b5d9d92..83f210e86a 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -39,7 +39,7 @@ int HeapEntry::set_children_index(int index) {
}
std::deque<HeapGraphEdge*>::iterator HeapEntry::children_begin() {
- DCHECK(children_index_ >= 0);
+ DCHECK_GE(children_index_, 0);
SLOW_DCHECK(
children_index_ < static_cast<int>(snapshot_->children().size()) ||
(children_index_ == static_cast<int>(snapshot_->children().size()) &&
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 5c80706a3c..1f6459c904 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -201,12 +201,12 @@ void HeapSnapshot::AddSyntheticRootEntries() {
AddGcSubrootEntry(tag, id);
id += HeapObjectsMap::kObjectIdStep;
}
- DCHECK(HeapObjectsMap::kFirstAvailableObjectId == id);
+ DCHECK_EQ(HeapObjectsMap::kFirstAvailableObjectId, id);
}
HeapEntry* HeapSnapshot::AddRootEntry() {
- DCHECK(root_index_ == HeapEntry::kNoEntry);
+ DCHECK_EQ(root_index_, HeapEntry::kNoEntry);
DCHECK(entries_.empty()); // Root entry must be the first one.
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"",
@@ -214,13 +214,13 @@ HeapEntry* HeapSnapshot::AddRootEntry() {
0,
0);
root_index_ = entry->index();
- DCHECK(root_index_ == 0);
+ DCHECK_EQ(root_index_, 0);
return entry;
}
HeapEntry* HeapSnapshot::AddGcRootsEntry() {
- DCHECK(gc_roots_index_ == HeapEntry::kNoEntry);
+ DCHECK_EQ(gc_roots_index_, HeapEntry::kNoEntry);
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"(GC roots)",
HeapObjectsMap::kGcRootsObjectId,
@@ -232,7 +232,7 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry() {
HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag, SnapshotObjectId id) {
- DCHECK(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
+ DCHECK_EQ(gc_subroot_indexes_[tag], HeapEntry::kNoEntry);
DCHECK(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
VisitorSynchronization::kTagNames[tag], id, 0, 0);
@@ -273,7 +273,7 @@ HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
entries_by_id->begin(), entries_by_id->end(), id,
[](HeapEntry* first, SnapshotObjectId val) { return first->id() < val; });
- if (it == entries_by_id->end() || (*it)->id() != id) return NULL;
+ if (it == entries_by_id->end() || (*it)->id() != id) return nullptr;
return *it;
}
@@ -319,24 +319,24 @@ HeapObjectsMap::HeapObjectsMap(Heap* heap)
}
bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
- DCHECK(to != NULL);
- DCHECK(from != NULL);
+ DCHECK_NOT_NULL(to);
+ DCHECK_NOT_NULL(from);
if (from == to) return false;
void* from_value = entries_map_.Remove(from, ComputePointerHash(from));
- if (from_value == NULL) {
+ if (from_value == nullptr) {
// It may occur that some untracked object moves to an address X and there
// is a tracked object at that address. In this case we should remove the
// entry as we know that the object has died.
void* to_value = entries_map_.Remove(to, ComputePointerHash(to));
- if (to_value != NULL) {
+ if (to_value != nullptr) {
int to_entry_info_index =
static_cast<int>(reinterpret_cast<intptr_t>(to_value));
- entries_.at(to_entry_info_index).addr = NULL;
+ entries_.at(to_entry_info_index).addr = nullptr;
}
} else {
base::HashMap::Entry* to_entry =
entries_map_.LookupOrInsert(to, ComputePointerHash(to));
- if (to_entry->value != NULL) {
+ if (to_entry->value != nullptr) {
// We found the existing entry with to address for an old object.
// Without this operation we will have two EntryInfo's with the same
// value in addr field. It is bad because later at RemoveDeadEntries
@@ -344,7 +344,7 @@ bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
// entry.
int to_entry_info_index =
static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
- entries_.at(to_entry_info_index).addr = NULL;
+ entries_.at(to_entry_info_index).addr = nullptr;
}
int from_entry_info_index =
static_cast<int>(reinterpret_cast<intptr_t>(from_value));
@@ -360,7 +360,7 @@ bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
}
- return from_value != NULL;
+ return from_value != nullptr;
}
@@ -372,7 +372,7 @@ void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
base::HashMap::Entry* entry =
entries_map_.Lookup(addr, ComputePointerHash(addr));
- if (entry == NULL) return 0;
+ if (entry == nullptr) return 0;
int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_.at(entry_index);
DCHECK(static_cast<uint32_t>(entries_.size()) > entries_map_.occupancy());
@@ -386,7 +386,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
DCHECK(static_cast<uint32_t>(entries_.size()) > entries_map_.occupancy());
base::HashMap::Entry* entry =
entries_map_.LookupOrInsert(addr, ComputePointerHash(addr));
- if (entry->value != NULL) {
+ if (entry->value != nullptr) {
int entry_index =
static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_.at(entry_index);
@@ -416,8 +416,7 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
GarbageCollectionReason::kHeapProfiler);
HeapIterator iterator(heap_);
- for (HeapObject* obj = iterator.next();
- obj != NULL;
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
FindOrAddEntry(obj->address(), obj->Size());
if (FLAG_heap_profiler_trace_objects) {
@@ -485,7 +484,7 @@ SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream,
void HeapObjectsMap::RemoveDeadEntries() {
DCHECK(entries_.size() > 0 && entries_.at(0).id == 0 &&
- entries_.at(0).addr == NULL);
+ entries_.at(0).addr == nullptr);
size_t first_free_entry = 1;
for (size_t i = 1; i < entries_.size(); ++i) {
EntryInfo& entry_info = entries_.at(i);
@@ -530,7 +529,7 @@ HeapEntriesMap::HeapEntriesMap() : entries_() {}
int HeapEntriesMap::Map(HeapThing thing) {
base::HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
- if (cache_entry == NULL) return HeapEntry::kNoEntry;
+ if (cache_entry == nullptr) return HeapEntry::kNoEntry;
return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
}
@@ -538,7 +537,7 @@ int HeapEntriesMap::Map(HeapThing thing) {
void HeapEntriesMap::Pair(HeapThing thing, int entry) {
base::HashMap::Entry* cache_entry =
entries_.LookupOrInsert(thing, Hash(thing));
- DCHECK(cache_entry->value == NULL);
+ DCHECK_NULL(cache_entry->value);
cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
}
@@ -552,7 +551,7 @@ void HeapObjectsSet::Clear() {
bool HeapObjectsSet::Contains(Object* obj) {
if (!obj->IsHeapObject()) return false;
HeapObject* object = HeapObject::cast(obj);
- return entries_.Lookup(object, HeapEntriesMap::Hash(object)) != NULL;
+ return entries_.Lookup(object, HeapEntriesMap::Hash(object)) != nullptr;
}
@@ -567,9 +566,9 @@ const char* HeapObjectsSet::GetTag(Object* obj) {
HeapObject* object = HeapObject::cast(obj);
base::HashMap::Entry* cache_entry =
entries_.Lookup(object, HeapEntriesMap::Hash(object));
- return cache_entry != NULL
- ? reinterpret_cast<const char*>(cache_entry->value)
- : NULL;
+ return cache_entry != nullptr
+ ? reinterpret_cast<const char*>(cache_entry->value)
+ : nullptr;
}
@@ -581,20 +580,16 @@ V8_NOINLINE void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
cache_entry->value = const_cast<char*>(tag);
}
-
-V8HeapExplorer::V8HeapExplorer(
- HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress,
- v8::HeapProfiler::ObjectNameResolver* resolver)
+V8HeapExplorer::V8HeapExplorer(HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress,
+ v8::HeapProfiler::ObjectNameResolver* resolver)
: heap_(snapshot->profiler()->heap_object_map()->heap()),
snapshot_(snapshot),
names_(snapshot_->profiler()->names()),
heap_object_map_(snapshot_->profiler()->heap_object_map()),
progress_(progress),
- filler_(NULL),
- global_object_name_resolver_(resolver) {
-}
-
+ filler_(nullptr),
+ global_object_name_resolver_(resolver) {}
V8HeapExplorer::~V8HeapExplorer() {
}
@@ -623,7 +618,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
GetConstructorName(JSObject::cast(object)));
if (object->IsJSGlobalObject()) {
const char* tag = objects_tags_.GetTag(object);
- if (tag != NULL) {
+ if (tag != nullptr) {
name = names_->GetFormatted("%s / %s", name, tag);
}
}
@@ -710,11 +705,12 @@ class SnapshotFiller {
}
HeapEntry* FindEntry(HeapThing ptr) {
int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
+ return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index]
+ : nullptr;
}
HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
+ return entry != nullptr ? entry : AddEntry(ptr, allocator);
}
void SetIndexedReference(HeapGraphEdge::Type type,
int parent,
@@ -780,8 +776,7 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
int objects_count = 0;
- for (HeapObject* obj = iterator->next();
- obj != NULL;
+ for (HeapObject* obj = iterator->next(); obj != nullptr;
obj = iterator->next()) {
objects_count++;
}
@@ -918,21 +913,19 @@ void V8HeapExplorer::ExtractJSObjectReferences(
}
} else if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
- Object* proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole(heap_->isolate())) {
- if (!proto_or_map->IsMap()) {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_string(), proto_or_map,
- NULL,
- JSFunction::kPrototypeOrInitialMapOffset);
- } else {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_string(), js_fun->prototype());
- SetInternalReference(
- obj, entry, "initial_map", proto_or_map,
- JSFunction::kPrototypeOrInitialMapOffset);
+ if (js_fun->has_prototype_slot()) {
+ Object* proto_or_map = js_fun->prototype_or_initial_map();
+ if (!proto_or_map->IsTheHole(heap_->isolate())) {
+ if (!proto_or_map->IsMap()) {
+ SetPropertyReference(obj, entry, heap_->prototype_string(),
+ proto_or_map, nullptr,
+ JSFunction::kPrototypeOrInitialMapOffset);
+ } else {
+ SetPropertyReference(obj, entry, heap_->prototype_string(),
+ js_fun->prototype());
+ SetInternalReference(obj, entry, "initial_map", proto_or_map,
+ JSFunction::kPrototypeOrInitialMapOffset);
+ }
}
}
SharedFunctionInfo* shared_info = js_fun->shared();
@@ -1104,13 +1097,11 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
Map::kDescriptorsOffset);
SetInternalReference(map, entry, "prototype", map->prototype(),
Map::kPrototypeOffset);
-#if V8_DOUBLE_FIELDS_UNBOXING
if (FLAG_unbox_double_fields) {
SetInternalReference(map, entry, "layout_descriptor",
map->layout_descriptor(),
Map::kLayoutDescriptorOffset);
}
-#endif
Object* constructor_or_backpointer = map->constructor_or_backpointer();
if (constructor_or_backpointer->IsMap()) {
TagObject(constructor_or_backpointer, "(back pointer)");
@@ -1140,7 +1131,7 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
int entry, SharedFunctionInfo* shared) {
HeapObject* obj = shared;
String* shared_name = shared->DebugName();
- const char* name = NULL;
+ const char* name = nullptr;
if (shared_name != heap_->empty_string()) {
name = names_->GetName(shared_name);
TagObject(shared->code(), names_->GetFormatted("(code for %s)", name));
@@ -1211,18 +1202,12 @@ void V8HeapExplorer::ExtractAccessorInfoReferences(
SetInternalReference(accessor_info, entry, "expected_receiver_type",
accessor_info->expected_receiver_type(),
AccessorInfo::kExpectedReceiverTypeOffset);
- if (accessor_info->IsAccessorInfo()) {
- AccessorInfo* executable_accessor_info = AccessorInfo::cast(accessor_info);
- SetInternalReference(executable_accessor_info, entry, "getter",
- executable_accessor_info->getter(),
- AccessorInfo::kGetterOffset);
- SetInternalReference(executable_accessor_info, entry, "setter",
- executable_accessor_info->setter(),
- AccessorInfo::kSetterOffset);
- SetInternalReference(executable_accessor_info, entry, "data",
- executable_accessor_info->data(),
- AccessorInfo::kDataOffset);
- }
+ SetInternalReference(accessor_info, entry, "getter", accessor_info->getter(),
+ AccessorInfo::kGetterOffset);
+ SetInternalReference(accessor_info, entry, "setter", accessor_info->setter(),
+ AccessorInfo::kSetterOffset);
+ SetInternalReference(accessor_info, entry, "data", accessor_info->data(),
+ AccessorInfo::kDataOffset);
}
void V8HeapExplorer::ExtractAccessorPairReferences(
@@ -1358,12 +1343,21 @@ void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
return;
}
switch (it->second) {
- case JS_WEAK_COLLECTION_SUB_TYPE:
- for (int i = 0, l = array->length(); i < l; ++i) {
- SetWeakReference(array, entry, i, array->get(i),
- array->OffsetOfElementAt(i));
+ case JS_WEAK_COLLECTION_SUB_TYPE: {
+ ObjectHashTable* table = ObjectHashTable::cast(array);
+ for (int i = 0, capacity = table->Capacity(); i < capacity; ++i) {
+ int key_index =
+ ObjectHashTable::EntryToIndex(i) + ObjectHashTable::kEntryKeyIndex;
+ int value_index = ObjectHashTable::EntryToValueIndex(i);
+ SetWeakReference(table, entry, key_index, table->get(key_index),
+ table->OffsetOfElementAt(key_index));
+ SetInternalReference(table, entry, value_index, table->get(value_index),
+ table->OffsetOfElementAt(value_index));
+ // TODO(alph): Add a strong link (shortcut?) from key to value per
+ // WeakMap the key was added to. See crbug.com/778739
}
break;
+ }
// TODO(alph): Add special processing for other types of FixedArrays.
@@ -1395,7 +1389,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
field_index.is_inobject() ? field_index.offset() : -1;
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
- value, NULL, field_offset);
+ value, nullptr, field_offset);
break;
}
case kDescriptor:
@@ -1442,7 +1436,7 @@ void V8HeapExplorer::ExtractAccessorPairProperty(JSObject* js_obj, int entry,
int field_offset) {
if (!callback_obj->IsAccessorPair()) return;
AccessorPair* accessors = AccessorPair::cast(callback_obj);
- SetPropertyReference(js_obj, entry, key, accessors, NULL, field_offset);
+ SetPropertyReference(js_obj, entry, key, accessors, nullptr, field_offset);
Object* getter = accessors->getter();
if (!getter->IsOddball()) {
SetPropertyReference(js_obj, entry, key, getter, "get %s");
@@ -1467,7 +1461,7 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
}
}
} else if (js_obj->HasDictionaryElements()) {
- SeededNumberDictionary* dictionary = js_obj->element_dictionary();
+ NumberDictionary* dictionary = js_obj->element_dictionary();
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
@@ -1501,7 +1495,7 @@ String* V8HeapExplorer::GetConstructorName(JSObject* object) {
HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
- if (!obj->IsHeapObject()) return NULL;
+ if (!obj->IsHeapObject()) return nullptr;
return filler_->FindOrAddEntry(obj, this);
}
@@ -1603,11 +1597,11 @@ bool V8HeapExplorer::IterateAndExtractReferences(
IterateAndExtractSinglePass<&V8HeapExplorer::ExtractReferencesPass2>();
if (interrupted) {
- filler_ = NULL;
+ filler_ = nullptr;
return false;
}
- filler_ = NULL;
+ filler_ = nullptr;
return progress_->ProgressReport(true);
}
@@ -1618,8 +1612,7 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
bool interrupted = false;
HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
// Heap iteration with filtering must be finished in any case.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
+ for (HeapObject *obj = iterator.next(); obj != nullptr;
obj = iterator.next(), progress_->ProgressStep()) {
if (interrupted) continue;
@@ -1666,7 +1659,8 @@ bool V8HeapExplorer::IsEssentialHiddenReference(Object* parent,
if (parent->IsAllocationSite() &&
field_offset == AllocationSite::kWeakNextOffset)
return false;
- if (parent->IsCode() && field_offset == Code::kNextCodeLinkOffset)
+ if (parent->IsCodeDataContainer() &&
+ field_offset == CodeDataContainer::kNextCodeLinkOffset)
return false;
if (parent->IsContext() &&
field_offset == Context::OffsetOfElementAt(Context::NEXT_CONTEXT_LINK))
@@ -1853,7 +1847,7 @@ void V8HeapExplorer::SetRootGcRootsReference() {
void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
- DCHECK(child_entry != nullptr);
+ DCHECK_NOT_NULL(child_entry);
filler_->SetNamedAutoIndexReference(
HeapGraphEdge::kShortcut,
snapshot_->root()->index(),
@@ -1923,6 +1917,10 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
PUBLIC_SYMBOL_LIST(SYMBOL_NAME)
WELL_KNOWN_SYMBOL_LIST(SYMBOL_NAME)
#undef SYMBOL_NAME
+#define ACCESSOR_NAME(accessor_name, AccessorName) \
+ NAME_ENTRY(accessor_name##_accessor)
+ ACCESSOR_INFO_LIST(ACCESSOR_NAME)
+#undef ACCESSOR_NAME
#undef NAME_ENTRY
CHECK(!strong_gc_subroot_names_.is_empty());
}
@@ -2035,17 +2033,15 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
0);
}
-
NativeObjectsExplorer::NativeObjectsExplorer(
- HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress)
+ HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
: isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()),
snapshot_(snapshot),
names_(snapshot_->profiler()->names()),
embedder_queried_(false),
objects_by_info_(RetainedInfosMatch),
native_groups_(StringsMatch),
- filler_(NULL) {
+ filler_(nullptr) {
synthetic_entries_allocator_ =
new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
native_entries_allocator_ =
@@ -2054,7 +2050,7 @@ NativeObjectsExplorer::NativeObjectsExplorer(
NativeObjectsExplorer::~NativeObjectsExplorer() {
- for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
+ for (base::HashMap::Entry* p = objects_by_info_.Start(); p != nullptr;
p = objects_by_info_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
@@ -2063,7 +2059,7 @@ NativeObjectsExplorer::~NativeObjectsExplorer() {
reinterpret_cast<std::vector<HeapObject*>*>(p->value);
delete objects;
}
- for (base::HashMap::Entry* p = native_groups_.Start(); p != NULL;
+ for (base::HashMap::Entry* p = native_groups_.Start(); p != nullptr;
p = native_groups_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
@@ -2118,7 +2114,7 @@ void NativeObjectsExplorer::FillEdges() {
HeapObject* parent = HeapObject::cast(*parent_object);
int parent_entry =
filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
- DCHECK(parent_entry != HeapEntry::kNoEntry);
+ DCHECK_NE(parent_entry, HeapEntry::kNoEntry);
Handle<Object> child_object = v8::Utils::OpenHandle(
*pair.second->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
HeapObject* child = HeapObject::cast(*child_object);
@@ -2134,7 +2130,7 @@ std::vector<HeapObject*>* NativeObjectsExplorer::GetVectorMaybeDisposeInfo(
v8::RetainedObjectInfo* info) {
base::HashMap::Entry* entry =
objects_by_info_.LookupOrInsert(info, InfoHash(info));
- if (entry->value != NULL) {
+ if (entry->value != nullptr) {
info->Dispose();
} else {
entry->value = new std::vector<HeapObject*>();
@@ -2149,7 +2145,7 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
FillRetainedObjects();
FillEdges();
if (EstimateObjectsCount() > 0) {
- for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
+ for (base::HashMap::Entry* p = objects_by_info_.Start(); p != nullptr;
p = objects_by_info_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
@@ -2162,7 +2158,7 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
}
SetRootNativeRootsReference();
}
- filler_ = NULL;
+ filler_ = nullptr;
return true;
}
@@ -2203,7 +2199,7 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
isolate_->heap()->HashSeed());
base::HashMap::Entry* entry =
native_groups_.LookupOrInsert(const_cast<char*>(label_copy), hash);
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
entry->value = new NativeGroupRetainedObjectInfo(label);
}
return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
@@ -2214,7 +2210,7 @@ void NativeObjectsExplorer::SetNativeRootReference(
v8::RetainedObjectInfo* info) {
HeapEntry* child_entry =
filler_->FindOrAddEntry(info, native_entries_allocator_);
- DCHECK(child_entry != NULL);
+ DCHECK_NOT_NULL(child_entry);
NativeGroupRetainedObjectInfo* group_info =
FindOrAddGroupInfo(info->GetGroupLabel());
HeapEntry* group_entry =
@@ -2232,10 +2228,10 @@ void NativeObjectsExplorer::SetNativeRootReference(
void NativeObjectsExplorer::SetWrapperNativeReferences(
HeapObject* wrapper, v8::RetainedObjectInfo* info) {
HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
- DCHECK(wrapper_entry != NULL);
+ DCHECK_NOT_NULL(wrapper_entry);
HeapEntry* info_entry =
filler_->FindOrAddEntry(info, native_entries_allocator_);
- DCHECK(info_entry != NULL);
+ DCHECK_NOT_NULL(info_entry);
filler_->SetNamedReference(HeapGraphEdge::kInternal,
wrapper_entry->index(),
"native",
@@ -2253,7 +2249,7 @@ void NativeObjectsExplorer::SetRootNativeRootsReference() {
static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
HeapEntry* group_entry =
filler_->FindOrAddEntry(group_info, native_entries_allocator_);
- DCHECK(group_entry != NULL);
+ DCHECK_NOT_NULL(group_entry);
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
snapshot_->root()->index(),
@@ -2267,7 +2263,7 @@ void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
Isolate* isolate = isolate_;
v8::RetainedObjectInfo* info =
isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
- if (info == NULL) return;
+ if (info == nullptr) return;
GetVectorMaybeDisposeInfo(info)->push_back(HeapObject::cast(*p));
}
@@ -2348,18 +2344,17 @@ void HeapSnapshotGenerator::ProgressStep() {
bool HeapSnapshotGenerator::ProgressReport(bool force) {
const int kProgressReportGranularity = 10000;
- if (control_ != NULL
- && (force || progress_counter_ % kProgressReportGranularity == 0)) {
- return
- control_->ReportProgressValue(progress_counter_, progress_total_) ==
- v8::ActivityControl::kContinue;
+ if (control_ != nullptr &&
+ (force || progress_counter_ % kProgressReportGranularity == 0)) {
+ return control_->ReportProgressValue(progress_counter_, progress_total_) ==
+ v8::ActivityControl::kContinue;
}
return true;
}
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
- if (control_ == NULL) return;
+ if (control_ == nullptr) return;
HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
progress_total_ = iterations_count * (
v8_heap_explorer_.EstimateObjectsCount(&iterator) +
@@ -2394,11 +2389,11 @@ class OutputStreamWriter {
chunk_(chunk_size_),
chunk_pos_(0),
aborted_(false) {
- DCHECK(chunk_size_ > 0);
+ DCHECK_GT(chunk_size_, 0);
}
bool aborted() { return aborted_; }
void AddCharacter(char c) {
- DCHECK(c != '\0');
+ DCHECK_NE(c, '\0');
DCHECK(chunk_pos_ < chunk_size_);
chunk_[chunk_pos_++] = c;
MaybeWriteChunk();
@@ -2413,7 +2408,7 @@ class OutputStreamWriter {
while (s < s_end) {
int s_chunk_size =
Min(chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
- DCHECK(s_chunk_size > 0);
+ DCHECK_GT(s_chunk_size, 0);
MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
s += s_chunk_size;
chunk_pos_ += s_chunk_size;
@@ -2439,14 +2434,14 @@ class OutputStreamWriter {
if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
int result = SNPrintF(
chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
- DCHECK(result != -1);
+ DCHECK_NE(result, -1);
chunk_pos_ += result;
MaybeWriteChunk();
} else {
EmbeddedVector<char, kMaxNumberSize> buffer;
int result = SNPrintF(buffer, format, n);
USE(result);
- DCHECK(result != -1);
+ DCHECK_NE(result, -1);
AddString(buffer.start());
}
}
@@ -2481,16 +2476,16 @@ void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
snapshot_->profiler()->allocation_tracker()) {
allocation_tracker->PrepareForSerialization();
}
- DCHECK(writer_ == NULL);
+ DCHECK_NULL(writer_);
writer_ = new OutputStreamWriter(stream);
SerializeImpl();
delete writer_;
- writer_ = NULL;
+ writer_ = nullptr;
}
void HeapSnapshotJSONSerializer::SerializeImpl() {
- DCHECK(0 == snapshot_->root()->index());
+ DCHECK_EQ(0, snapshot_->root()->index());
writer_->AddCharacter('{');
writer_->AddString("\"snapshot\":{");
SerializeSnapshot();
@@ -2531,7 +2526,7 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
base::HashMap::Entry* cache_entry =
strings_.LookupOrInsert(const_cast<char*>(s), StringHash(s));
- if (cache_entry->value == NULL) {
+ if (cache_entry->value == nullptr) {
cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
}
return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
@@ -2680,7 +2675,8 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("native") ","
JSON_S("synthetic") ","
JSON_S("concatenated string") ","
- JSON_S("sliced string")) ","
+ JSON_S("sliced string") ","
+ JSON_S("symbol")) ","
JSON_S("string") ","
JSON_S("number") ","
JSON_S("number") ","
@@ -2789,7 +2785,7 @@ static int SerializePosition(int position, const Vector<char>& buffer,
if (position == -1) {
buffer[buffer_pos++] = '0';
} else {
- DCHECK(position >= 0);
+ DCHECK_GE(position, 0);
buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer, buffer_pos);
}
return buffer_pos;
@@ -2896,7 +2892,7 @@ void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
if (c != unibrow::Utf8::kBadChar) {
WriteUChar(writer_, c);
- DCHECK(cursor != 0);
+ DCHECK_NE(cursor, 0);
s += cursor - 1;
} else {
writer_->AddCharacter('?');
@@ -2911,7 +2907,7 @@ void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
void HeapSnapshotJSONSerializer::SerializeStrings() {
ScopedVector<const unsigned char*> sorted_strings(
strings_.occupancy() + 1);
- for (base::HashMap::Entry* entry = strings_.Start(); entry != NULL;
+ for (base::HashMap::Entry* entry = strings_.Start(); entry != nullptr;
entry = strings_.Next(entry)) {
int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value));
sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key);
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index ec2460b922..070432225a 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -25,6 +25,7 @@ class HeapEntry;
class HeapIterator;
class HeapProfiler;
class HeapSnapshot;
+class JSArrayBuffer;
class SnapshotFiller;
class HeapGraphEdge BASE_EMBEDDED {
@@ -430,17 +431,14 @@ class V8HeapExplorer : public HeapEntriesAllocator {
int index,
Object* child_obj,
int field_offset);
- void SetPropertyReference(HeapObject* parent_obj,
- int parent,
- Name* reference_name,
- Object* child,
- const char* name_format_string = NULL,
+ void SetPropertyReference(HeapObject* parent_obj, int parent,
+ Name* reference_name, Object* child,
+ const char* name_format_string = nullptr,
int field_offset = -1);
- void SetDataOrAccessorPropertyReference(PropertyKind kind,
- JSObject* parent_obj, int parent,
- Name* reference_name, Object* child,
- const char* name_format_string = NULL,
- int field_offset = -1);
+ void SetDataOrAccessorPropertyReference(
+ PropertyKind kind, JSObject* parent_obj, int parent, Name* reference_name,
+ Object* child, const char* name_format_string = nullptr,
+ int field_offset = -1);
void SetUserGlobalReference(Object* user_global);
void SetRootGcRootsReference();
@@ -572,8 +570,7 @@ class HeapSnapshotJSONSerializer {
strings_(StringsMatch),
next_node_id_(1),
next_string_id_(1),
- writer_(NULL) {
- }
+ writer_(nullptr) {}
void Serialize(v8::OutputStream* stream);
private:
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index c84f3662c8..9570c77dd2 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -26,8 +26,8 @@ JITLineInfoTable::~JITLineInfoTable() {}
void JITLineInfoTable::SetPosition(int pc_offset, int line) {
- DCHECK(pc_offset >= 0);
- DCHECK(line > 0); // The 1-based number of the source line.
+ DCHECK_GE(pc_offset, 0);
+ DCHECK_GT(line, 0); // The 1-based number of the source line.
if (GetSourceLineNumber(pc_offset) != line) {
pc_offset_map_.insert(std::make_pair(pc_offset, line));
}
@@ -144,7 +144,7 @@ void CodeEntry::AddInlineStack(int pc_offset,
const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
auto it = inline_locations_.find(pc_offset);
- return it != inline_locations_.end() ? &it->second : NULL;
+ return it != inline_locations_.end() ? &it->second : nullptr;
}
void CodeEntry::AddDeoptInlinedFrames(
@@ -190,8 +190,8 @@ void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
base::HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry));
- return map_entry != NULL ?
- reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
+ return map_entry != nullptr ? reinterpret_cast<ProfileNode*>(map_entry->value)
+ : nullptr;
}
@@ -221,7 +221,7 @@ void ProfileNode::IncrementLineTicks(int src_line) {
bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
unsigned int length) const {
- if (entries == NULL || length == 0) return false;
+ if (entries == nullptr || length == 0) return false;
unsigned line_count = line_ticks_.occupancy();
@@ -230,7 +230,7 @@ bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
v8::CpuProfileNode::LineTick* entry = entries;
- for (base::HashMap::Entry *p = line_ticks_.Start(); p != NULL;
+ for (base::HashMap::Entry *p = line_ticks_.Start(); p != nullptr;
p = line_ticks_.Next(p), entry++) {
entry->line =
static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
@@ -268,7 +268,7 @@ void ProfileNode::Print(int indent) {
base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
bailout_reason);
}
- for (base::HashMap::Entry* p = children_.Start(); p != NULL;
+ for (base::HashMap::Entry* p = children_.Start(); p != nullptr;
p = children_.Next(p)) {
reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
}
@@ -313,9 +313,9 @@ unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
int src_line, bool update_stats) {
ProfileNode* node = root_;
- CodeEntry* last_entry = NULL;
+ CodeEntry* last_entry = nullptr;
for (auto it = path.rbegin(); it != path.rend(); ++it) {
- if (*it == NULL) continue;
+ if (*it == nullptr) continue;
last_entry = *it;
node = node->FindOrAddChild(*it);
}
@@ -715,7 +715,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
if (FLAG_prof_browser_mode) {
bool no_symbolized_entries = true;
for (auto e : entries) {
- if (e != NULL) {
+ if (e != nullptr) {
no_symbolized_entries = false;
break;
}
@@ -739,7 +739,9 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
case GC:
return CodeEntry::gc_entry();
case JS:
+ case PARSER:
case COMPILER:
+ case BYTECODE_COMPILER:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
@@ -748,8 +750,8 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
return CodeEntry::program_entry();
case IDLE:
return CodeEntry::idle_entry();
- default: return NULL;
}
+ UNREACHABLE();
}
} // namespace internal
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 69a85a1422..819800ae6b 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -47,8 +47,8 @@ class CodeEntry {
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
- JITLineInfoTable* line_info = NULL,
- Address instruction_start = NULL);
+ JITLineInfoTable* line_info = nullptr,
+ Address instruction_start = nullptr);
~CodeEntry();
const char* name_prefix() const { return name_prefix_; }
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 07a10afb5d..fecfdb66b0 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -16,11 +16,7 @@ namespace internal {
ProfilerListener::ProfilerListener(Isolate* isolate)
: function_and_resource_names_(isolate->heap()) {}
-ProfilerListener::~ProfilerListener() {
- for (auto code_entry : code_entries_) {
- delete code_entry;
- }
-}
+ProfilerListener::~ProfilerListener() = default;
void ProfilerListener::CallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
@@ -39,7 +35,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
rec->entry = NewCodeEntry(
tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ CpuProfileNode::kNoColumnNumberInfo, nullptr, code->instruction_start());
RecordInliningInfo(rec->entry, code);
rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
@@ -53,7 +49,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
rec->entry = NewCodeEntry(
tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ CpuProfileNode::kNoColumnNumberInfo, nullptr, code->instruction_start());
RecordInliningInfo(rec->entry, code);
rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
@@ -70,7 +66,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
GetName(InferScriptName(script_name, shared)),
CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
- NULL, code->instruction_start());
+ nullptr, code->instruction_start());
RecordInliningInfo(rec->entry, code);
rec->entry->FillFunctionInfo(shared);
rec->size = code->ExecutableSize();
@@ -85,7 +81,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = abstract_code->address();
- JITLineInfoTable* line_table = NULL;
+ JITLineInfoTable* line_table = nullptr;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
line_table = new JITLineInfoTable();
@@ -114,20 +110,6 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, int args_count) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = NewCodeEntry(
- tag, GetName(args_count), "args_count: ", CodeEntry::kEmptyResourceName,
- CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
- NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- rec->size = code->ExecutableSize();
- DispatchCodeEvent(evt_rec);
-}
-
void ProfilerListener::CodeMoveEvent(AbstractCode* from, Address to) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
@@ -173,10 +155,11 @@ void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
- rec->entry = NewCodeEntry(
- CodeEventListener::REG_EXP_TAG, GetName(source), "RegExp: ",
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ rec->entry = NewCodeEntry(CodeEventListener::REG_EXP_TAG, GetName(source),
+ "RegExp: ", CodeEntry::kEmptyResourceName,
+ CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, nullptr,
+ code->instruction_start());
rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
}
@@ -203,8 +186,8 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
if (!abstract_code->IsCode()) return;
Code* code = abstract_code->GetCode();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
- DeoptimizationInputData* deopt_input_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
+ DeoptimizationData* deopt_input_data =
+ DeoptimizationData::cast(code->deoptimization_data());
int deopt_count = deopt_input_data->DeoptCount();
for (int i = 0; i < deopt_count; i++) {
int pc_offset = deopt_input_data->Pc(i)->value();
@@ -230,6 +213,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
deopt_input_data->LiteralArray()->get(shared_info_id));
if (!depth++) continue; // Skip the current function itself.
+
const char* resource_name =
(shared_info->script()->IsScript() &&
Script::cast(shared_info->script())->name()->IsName())
@@ -275,7 +259,7 @@ void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
DCHECK(last_position.IsKnown());
std::vector<CpuProfileDeoptFrame> inlined_frames;
for (SourcePositionInfo& pos_info : last_position.InliningStack(code)) {
- DCHECK(pos_info.position.ScriptOffset() != kNoSourcePosition);
+ DCHECK_NE(pos_info.position.ScriptOffset(), kNoSourcePosition);
if (!pos_info.function->script()->IsScript()) continue;
int script_id = Script::cast(pos_info.function->script())->id();
size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
@@ -293,19 +277,23 @@ CodeEntry* ProfilerListener::NewCodeEntry(
CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix, const char* resource_name, int line_number,
int column_number, JITLineInfoTable* line_info, Address instruction_start) {
- CodeEntry* code_entry =
- new CodeEntry(tag, name, name_prefix, resource_name, line_number,
- column_number, line_info, instruction_start);
- code_entries_.push_back(code_entry);
- return code_entry;
+ std::unique_ptr<CodeEntry> code_entry = base::make_unique<CodeEntry>(
+ tag, name, name_prefix, resource_name, line_number, column_number,
+ line_info, instruction_start);
+ CodeEntry* raw_code_entry = code_entry.get();
+ code_entries_.push_back(std::move(code_entry));
+ return raw_code_entry;
}
void ProfilerListener::AddObserver(CodeEventObserver* observer) {
base::LockGuard<base::Mutex> guard(&mutex_);
- if (std::find(observers_.begin(), observers_.end(), observer) !=
- observers_.end())
- return;
- observers_.push_back(observer);
+ if (observers_.empty()) {
+ code_entries_.clear();
+ }
+ if (std::find(observers_.begin(), observers_.end(), observer) ==
+ observers_.end()) {
+ observers_.push_back(observer);
+ }
}
void ProfilerListener::RemoveObserver(CodeEventObserver* observer) {
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index f4a9e24c7d..c111bf81c4 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -37,8 +37,6 @@ class ProfilerListener : public CodeEventListener {
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, SharedFunctionInfo* shared,
Name* script_name, int line, int column) override;
- void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, int args_count) override;
void CodeMovingGCEvent() override {}
void CodeMoveEvent(AbstractCode* from, Address to) override;
void CodeDisableOptEvent(AbstractCode* code,
@@ -56,7 +54,8 @@ class ProfilerListener : public CodeEventListener {
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
- JITLineInfoTable* line_info = NULL, Address instruction_start = NULL);
+ JITLineInfoTable* line_info = nullptr,
+ Address instruction_start = nullptr);
void AddObserver(CodeEventObserver* observer);
void RemoveObserver(CodeEventObserver* observer);
@@ -74,6 +73,7 @@ class ProfilerListener : public CodeEventListener {
const char* GetFunctionName(const char* name) {
return function_and_resource_names_.GetFunctionName(name);
}
+ size_t entries_count_for_test() const { return code_entries_.size(); }
private:
void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
@@ -87,7 +87,7 @@ class ProfilerListener : public CodeEventListener {
}
StringsStorage function_and_resource_names_;
- std::vector<CodeEntry*> code_entries_;
+ std::vector<std::unique_ptr<CodeEntry>> code_entries_;
std::vector<CodeEventObserver*> observers_;
base::Mutex mutex_;
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 8e2452b2e5..0aa0525633 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -112,14 +112,13 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
Sample* sample = new Sample(size, node, loc, this);
samples_.insert(sample);
sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
- sample->global.MarkIndependent();
}
void SamplingHeapProfiler::OnWeakCallback(
const WeakCallbackInfo<Sample>& data) {
Sample* sample = data.GetParameter();
AllocationNode* node = sample->owner;
- DCHECK(node->allocations_[sample->size] > 0);
+ DCHECK_GT(node->allocations_[sample->size], 0);
node->allocations_[sample->size]--;
if (node->allocations_[sample->size] == 0) {
node->allocations_.erase(sample->size);
@@ -144,7 +143,7 @@ SamplingHeapProfiler::AllocationNode::FindOrAddChildNode(const char* name,
FunctionId id = function_id(script_id, start_position, name);
auto it = children_.find(id);
if (it != children_.end()) {
- DCHECK(strcmp(it->second->name_, name) == 0);
+ DCHECK_EQ(strcmp(it->second->name_, name), 0);
return it->second;
}
auto child = new AllocationNode(this, name, script_id, start_position);
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 05f4778830..2e8ad779fd 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -23,7 +23,7 @@ StringsStorage::StringsStorage(Heap* heap)
StringsStorage::~StringsStorage() {
- for (base::HashMap::Entry* p = names_.Start(); p != NULL;
+ for (base::HashMap::Entry* p = names_.Start(); p != nullptr;
p = names_.Next(p)) {
DeleteArray(reinterpret_cast<const char*>(p->value));
}
@@ -33,7 +33,7 @@ StringsStorage::~StringsStorage() {
const char* StringsStorage::GetCopy(const char* src) {
int len = static_cast<int>(strlen(src));
base::HashMap::Entry* entry = GetEntry(src, len);
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
Vector<char> dst = Vector<char>::New(len + 1);
StrNCpy(dst, src, len);
dst[len] = '\0';
@@ -55,7 +55,7 @@ const char* StringsStorage::GetFormatted(const char* format, ...) {
const char* StringsStorage::AddOrDisposeString(char* str, int len) {
base::HashMap::Entry* entry = GetEntry(str, len);
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
// New entry added.
entry->key = str;
entry->value = str;
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index c00d80d6c5..f4ca28c19f 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -92,7 +92,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
v8::RegisterState* state) {
Simulator* simulator = isolate->thread_local_top()->simulator_;
// Check if there is active simulator.
- if (simulator == NULL) return false;
+ if (simulator == nullptr) return false;
#if V8_TARGET_ARCH_ARM
if (!simulator->has_bad_pc()) {
state->pc = reinterpret_cast<Address>(simulator->get_pc());
diff --git a/deps/v8/src/profiler/unbound-queue-inl.h b/deps/v8/src/profiler/unbound-queue-inl.h
index 9d45903e04..4b262b8111 100644
--- a/deps/v8/src/profiler/unbound-queue-inl.h
+++ b/deps/v8/src/profiler/unbound-queue-inl.h
@@ -12,9 +12,7 @@ namespace internal {
template<typename Record>
struct UnboundQueue<Record>::Node: public Malloced {
- explicit Node(const Record& value)
- : value(value), next(NULL) {
- }
+ explicit Node(const Record& value) : value(value), next(nullptr) {}
Record value;
Node* next;
@@ -30,7 +28,7 @@ UnboundQueue<Record>::UnboundQueue() {
template<typename Record>
UnboundQueue<Record>::~UnboundQueue() {
- while (first_ != NULL) DeleteFirst();
+ while (first_ != nullptr) DeleteFirst();
}
@@ -72,7 +70,7 @@ bool UnboundQueue<Record>::IsEmpty() const {
template<typename Record>
Record* UnboundQueue<Record>::Peek() const {
- if (divider_ == base::Acquire_Load(&last_)) return NULL;
+ if (divider_ == base::Acquire_Load(&last_)) return nullptr;
Node* next = reinterpret_cast<Node*>(divider_)->next;
return &next->value;
}
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index d007a0414c..34c43047f8 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -133,8 +133,8 @@ class Representation {
if (kind_ == kExternal && other.kind_ == kExternal) return false;
if (kind_ == kNone && other.kind_ == kExternal) return false;
- DCHECK(kind_ != kExternal);
- DCHECK(other.kind_ != kExternal);
+ DCHECK_NE(kind_, kExternal);
+ DCHECK_NE(other.kind_, kExternal);
if (IsHeapObject()) return other.IsNone();
if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false;
if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false;
@@ -197,10 +197,10 @@ class Representation {
static const int kDescriptorIndexBitCount = 10;
-// The maximum number of descriptors we want in a descriptor array (should
-// fit in a page).
-static const int kMaxNumberOfDescriptors =
- (1 << kDescriptorIndexBitCount) - 2;
+// The maximum number of descriptors we want in a descriptor array. It should
+// fit in a page and also the following should hold:
+// kMaxNumberOfDescriptors + kFieldsAdded <= PropertyArray::kMaxLength.
+static const int kMaxNumberOfDescriptors = (1 << kDescriptorIndexBitCount) - 4;
static const int kInvalidEnumCacheSentinel =
(1 << kDescriptorIndexBitCount) - 1;
@@ -342,6 +342,8 @@ class PropertyDetails BASE_EMBEDDED {
(READ_ONLY << AttributesField::kShift);
static const int kAttributesDontDeleteMask =
(DONT_DELETE << AttributesField::kShift);
+ static const int kAttributesDontEnumMask =
+ (DONT_ENUM << AttributesField::kShift);
// Bit fields for normalized objects.
class PropertyCellTypeField
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index e98cd977f0..ac3597321a 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -31,7 +31,7 @@ class PrototypeIterator {
WhereToStart where_to_start = kStartAtPrototype,
WhereToEnd where_to_end = END_AT_NULL)
: isolate_(isolate),
- object_(NULL),
+ object_(nullptr),
handle_(receiver),
where_to_end_(where_to_end),
is_at_end_(false),
@@ -68,7 +68,7 @@ class PrototypeIterator {
explicit PrototypeIterator(Handle<Map> receiver_map,
WhereToEnd where_to_end = END_AT_NULL)
: isolate_(receiver_map->GetIsolate()),
- object_(NULL),
+ object_(nullptr),
handle_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype(),
isolate_),
where_to_end_(where_to_end),
@@ -103,7 +103,7 @@ class PrototypeIterator {
template <typename T = Object>
static Handle<T> GetCurrent(const PrototypeIterator& iterator) {
DCHECK(!iterator.handle_.is_null());
- DCHECK(iterator.object_ == NULL);
+ DCHECK_NULL(iterator.object_);
return Handle<T>::cast(iterator.handle_);
}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 146312905c..2e6425568b 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -96,7 +96,7 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
@@ -867,7 +867,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
+ // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ cmp(r0, Operand::Zero());
__ b(eq, &exit_with_exception);
@@ -1131,14 +1131,14 @@ void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
void RegExpMacroAssemblerARM::BranchOrBacktrack(Condition condition,
Label* to) {
if (condition == al) { // Unconditional.
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
__ jmp(to);
return;
}
- if (to == NULL) {
+ if (to == nullptr) {
__ b(condition, &backtrack_label_);
return;
}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 8b067e998b..7c988e962f 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -170,7 +170,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
+ // is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Call and return internally in the generated code in a way that
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 5e3f37588f..558ee673f1 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -106,7 +106,7 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
@@ -788,9 +788,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Find negative length (offset of start relative to end).
__ Sub(x10, input_start(), input_end());
if (masm_->emit_debug_code()) {
- // Check that the input string length is < 2^30.
+ // Check that the size of the input string chars is in range.
__ Neg(x11, x10);
- __ Cmp(x11, (1<<30) - 1);
+ __ Cmp(x11, SeqTwoByteString::kMaxCharsSize);
__ Check(ls, kInputStringTooLong);
}
__ Mov(current_input_offset(), w10);
@@ -853,8 +853,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Get string length.
__ Sub(x10, input_end(), input_start());
if (masm_->emit_debug_code()) {
- // Check that the input string length is < 2^30.
- __ Cmp(x10, (1<<30) - 1);
+ // Check that the size of the input string chars is in range.
+ __ Cmp(x10, SeqTwoByteString::kMaxCharsSize);
__ Check(ls, kInputStringTooLong);
}
// input_start has a start_offset offset on entry. We need to include
@@ -1059,7 +1059,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, 3);
- // If return NULL, we have failed to grow the stack, and
+ // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
// Returning from the regexp code restores the stack (csp <- fp)
// so we don't need to drop the link register from it before exiting.
@@ -1408,14 +1408,14 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
void RegExpMacroAssemblerARM64::BranchOrBacktrack(Condition condition,
Label* to) {
if (condition == al) { // Unconditional.
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
__ B(to);
return;
}
- if (to == NULL) {
+ if (to == nullptr) {
to = &backtrack_label_;
}
__ B(condition, to);
@@ -1426,7 +1426,7 @@ void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
Condition condition,
Label* to) {
if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
- if (to == NULL) {
+ if (to == nullptr) {
to = &backtrack_label_;
}
if (condition == eq) {
@@ -1655,4 +1655,6 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
} // namespace internal
} // namespace v8
+#undef __
+
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 87ccf2aa8b..42a41bab5d 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -199,7 +199,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
+ // is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Compares reg against immmediate before calling BranchOrBacktrack.
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 28dab0b357..99d1466f54 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -83,7 +83,7 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
@@ -913,7 +913,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
+ // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ or_(eax, eax);
__ j(equal, &exit_with_exception);
@@ -1154,14 +1154,14 @@ void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
Label* to) {
if (condition < 0) { // No condition
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
__ jmp(to);
return;
}
- if (to == NULL) {
+ if (to == nullptr) {
__ j(condition, &backtrack_label_);
return;
}
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index fa174137a4..02afc999d1 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -152,7 +152,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
+ // is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Call and return internally in the generated code in a way that
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index 0b73c2fbc3..1266da3209 100644
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -30,7 +30,7 @@ int32_t* RegExpImpl::GlobalCache::FetchNext() {
// Fail if last batch was not even fully filled.
if (num_matches_ < max_matches_) {
num_matches_ = 0; // Signal failed match.
- return NULL;
+ return nullptr;
}
int32_t* last_match =
@@ -51,7 +51,7 @@ int32_t* RegExpImpl::GlobalCache::FetchNext() {
}
if (last_end_index > subject_->length()) {
num_matches_ = 0; // Signal failed match.
- return NULL;
+ return nullptr;
}
num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
subject_,
@@ -60,7 +60,7 @@ int32_t* RegExpImpl::GlobalCache::FetchNext() {
register_array_size_);
}
- if (num_matches_ <= 0) return NULL;
+ if (num_matches_ <= 0) return nullptr;
current_match_index_ = 0;
return register_array_;
} else {
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 52ed47cf53..9d56e4cfa3 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -132,17 +132,17 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
bool has_been_compiled = false;
- if (parse_result.simple && !(flags & JSRegExp::kIgnoreCase) &&
- !(flags & JSRegExp::kSticky) &&
+ if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
pattern->length() <= kPatternTooShortForBoyerMoore) {
// Parse-tree is a single atom that is equal to the pattern.
AtomCompile(re, pattern, flags, pattern);
has_been_compiled = true;
- } else if (parse_result.tree->IsAtom() && !(flags & JSRegExp::kIgnoreCase) &&
- !(flags & JSRegExp::kSticky) && parse_result.capture_count == 0) {
+ } else if (parse_result.tree->IsAtom() && !IsSticky(flags) &&
+ parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
- if (atom_pattern.length() <= kPatternTooShortForBoyerMoore) {
+ if (!IgnoreCase(atom->flags()) &&
+ atom_pattern.length() <= kPatternTooShortForBoyerMoore) {
Handle<String> atom_string;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, atom_string,
@@ -328,8 +328,12 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
RegExpEngine::CompilationResult result =
RegExpEngine::Compile(isolate, &zone, &compile_data, flags, pattern,
sample_subject, is_one_byte);
- if (result.error_message != NULL) {
+ if (result.error_message != nullptr) {
// Unable to compile regexp.
+ if (FLAG_abort_on_stack_or_string_length_overflow &&
+ strncmp(result.error_message, "Stack overflow", 15) == 0) {
+ FATAL("Aborting on stack overflow");
+ }
Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
CStrVector(result.error_message)).ToHandleChecked();
ThrowRegExpException(re, error_message);
@@ -532,12 +536,12 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
return MaybeHandle<Object>();
}
- int32_t* output_registers = NULL;
+ int32_t* output_registers = nullptr;
if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
output_registers = NewArray<int32_t>(required_registers);
}
std::unique_ptr<int32_t[]> auto_release(output_registers);
- if (output_registers == NULL) {
+ if (output_registers == nullptr) {
output_registers = isolate->jsregexp_static_offsets_vector();
}
@@ -582,7 +586,7 @@ Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo(
}
DisallowHeapAllocation no_allocation;
- if (match != NULL) {
+ if (match != nullptr) {
for (int i = 0; i < capture_register_count; i += 2) {
result->SetCapture(i, match[i]);
result->SetCapture(i + 1, match[i + 1]);
@@ -593,14 +597,12 @@ Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo(
return result;
}
-
RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
- Handle<String> subject,
- Isolate* isolate)
- : register_array_(NULL),
- register_array_size_(0),
- regexp_(regexp),
- subject_(subject) {
+ Handle<String> subject, Isolate* isolate)
+ : register_array_(nullptr),
+ register_array_size_(0),
+ regexp_(regexp),
+ subject_(subject) {
#ifdef V8_INTERPRETED_REGEXP
bool interpreted = true;
#else
@@ -620,7 +622,7 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
}
}
- DCHECK_NE(0, regexp->GetFlags() & JSRegExp::kGlobal);
+ DCHECK(IsGlobal(regexp->GetFlags()));
if (!interpreted) {
register_array_size_ =
Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
@@ -651,8 +653,7 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
}
int RegExpImpl::GlobalCache::AdvanceZeroLength(int last_index) {
- if ((regexp_->GetFlags() & JSRegExp::kUnicode) != 0 &&
- last_index + 1 < subject_->length() &&
+ if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() &&
unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
// Advance over the surrogate pair.
@@ -855,7 +856,7 @@ int TextElement::length() const {
DispatchTable* ChoiceNode::GetTable(bool ignore_case) {
- if (table_ == NULL) {
+ if (table_ == nullptr) {
table_ = new(zone()) DispatchTable(zone());
DispatchTableConstructor cons(table_, ignore_case, zone());
cons.BuildTable(this);
@@ -914,7 +915,7 @@ class FrequencyCollator {
class RegExpCompiler {
public:
RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- JSRegExp::Flags flags, bool is_one_byte);
+ bool is_one_byte);
int AllocateRegister() {
if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -966,13 +967,6 @@ class RegExpCompiler {
void SetRegExpTooBig() { reg_exp_too_big_ = true; }
- inline bool ignore_case() { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
- inline bool unicode() { return (flags_ & JSRegExp::kUnicode) != 0; }
- // Both unicode and ignore_case flags are set. We need to use ICU to find
- // the closure over case equivalents.
- inline bool needs_unicode_case_equivalents() {
- return unicode() && ignore_case();
- }
inline bool one_byte() { return one_byte_; }
inline bool optimize() { return optimize_; }
inline void set_optimize(bool value) { optimize_ = value; }
@@ -1002,7 +996,6 @@ class RegExpCompiler {
std::vector<RegExpNode*>* work_list_;
int recursion_depth_;
RegExpMacroAssembler* macro_assembler_;
- JSRegExp::Flags flags_;
bool one_byte_;
bool reg_exp_too_big_;
bool limiting_recursion_;
@@ -1034,13 +1027,12 @@ static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- JSRegExp::Flags flags, bool one_byte)
+ bool one_byte)
: next_register_(2 * (capture_count + 1)),
unicode_lookaround_stack_register_(kNoRegister),
unicode_lookaround_position_register_(kNoRegister),
- work_list_(NULL),
+ work_list_(nullptr),
recursion_depth_(0),
- flags_(flags),
one_byte_(one_byte),
reg_exp_too_big_(false),
limiting_recursion_(false),
@@ -1090,7 +1082,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
- work_list_ = NULL;
+ work_list_ = nullptr;
#if defined(ENABLE_DISASSEMBLER) && !defined(V8_INTERPRETED_REGEXP)
if (FLAG_print_code) {
CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
@@ -1118,8 +1110,7 @@ bool Trace::DeferredAction::Mentions(int that) {
bool Trace::mentions_reg(int reg) {
- for (DeferredAction* action = actions_;
- action != NULL;
+ for (DeferredAction* action = actions_; action != nullptr;
action = action->next()) {
if (action->Mentions(reg))
return true;
@@ -1130,8 +1121,7 @@ bool Trace::mentions_reg(int reg) {
bool Trace::GetStoredPosition(int reg, int* cp_offset) {
DCHECK_EQ(0, *cp_offset);
- for (DeferredAction* action = actions_;
- action != NULL;
+ for (DeferredAction* action = actions_; action != nullptr;
action = action->next()) {
if (action->Mentions(reg)) {
if (action->action_type() == ActionNode::STORE_POSITION) {
@@ -1149,8 +1139,7 @@ bool Trace::GetStoredPosition(int reg, int* cp_offset) {
int Trace::FindAffectedRegisters(OutSet* affected_registers,
Zone* zone) {
int max_register = RegExpCompiler::kNoRegister;
- for (DeferredAction* action = actions_;
- action != NULL;
+ for (DeferredAction* action = actions_; action != nullptr;
action = action->next()) {
if (action->action_type() == ActionNode::CLEAR_CAPTURES) {
Interval range = static_cast<DeferredClearCaptures*>(action)->range();
@@ -1214,8 +1203,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
int store_position = kNoStore;
// This is a little tricky because we are scanning the actions in reverse
// historical order (newest first).
- for (DeferredAction* action = actions_;
- action != NULL;
+ for (DeferredAction* action = actions_; action != nullptr;
action = action->next()) {
if (action->Mentions(reg)) {
switch (action->action_type()) {
@@ -1323,7 +1311,7 @@ void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
DCHECK(!is_trivial());
- if (actions_ == NULL && backtrack() == NULL) {
+ if (actions_ == nullptr && backtrack() == nullptr) {
// Here we just have some deferred cp advances to fix and we are back to
// a normal situation. We may also have to forget some information gained
// through a quick check that was already performed.
@@ -1337,7 +1325,7 @@ void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
// Generate deferred actions here along with code to undo them again.
OutSet affected_registers;
- if (backtrack() != NULL) {
+ if (backtrack() != nullptr) {
// Here we have a concrete backtrack location. These are set up by choice
// nodes and so they indicate that we have a deferred save of the current
// position which we may need to emit here.
@@ -1375,7 +1363,7 @@ void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
max_register,
registers_to_pop,
registers_to_clear);
- if (backtrack() == NULL) {
+ if (backtrack() == nullptr) {
assembler->Backtrack();
} else {
assembler->PopCurrentPosition();
@@ -1436,8 +1424,7 @@ void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
- if (guards_ == NULL)
- guards_ = new(zone) ZoneList<Guard*>(1, zone);
+ if (guards_ == nullptr) guards_ = new (zone) ZoneList<Guard*>(1, zone);
guards_->Add(guard, zone);
}
@@ -1929,13 +1916,12 @@ static void SplitSearchSpace(ZoneList<int>* ranges,
}
}
-
// Gets a series of segment boundaries representing a character class. If the
// character is in the range between an even and an odd boundary (counting from
// start_index) then go to even_label, otherwise go to odd_label. We already
// know that the character is in the range of min_char to max_char inclusive.
-// Either label can be NULL indicating backtracking. Either label can also be
-// equal to the fall_through label.
+// Either label can be nullptr indicating backtracking. Either label can also
+// be equal to the fall_through label.
static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
int start_index, int end_index, uc32 min_char,
uc32 max_char, Label* fall_through,
@@ -2192,7 +2178,7 @@ RegExpNode::~RegExpNode() {
RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
Trace* trace) {
// If we are generating a greedy loop then don't stop and don't reuse code.
- if (trace->stop_node() != NULL) {
+ if (trace->stop_node() != nullptr) {
return CONTINUE;
}
@@ -2360,10 +2346,7 @@ int LoopChoiceNode::EatsAtLeast(int still_to_find,
int ChoiceNode::EatsAtLeast(int still_to_find,
int budget,
bool not_at_start) {
- return EatsAtLeastHelper(still_to_find,
- budget,
- NULL,
- not_at_start);
+ return EatsAtLeastHelper(still_to_find, budget, nullptr, not_at_start);
}
@@ -2510,7 +2493,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
uc16 c = quarks[i];
- if (compiler->ignore_case()) {
+ if (elm.atom()->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int length = GetCaseIndependentLetters(isolate, c,
compiler->one_byte(), chars);
@@ -2718,19 +2701,17 @@ class VisitMarker {
NodeInfo* info_;
};
-
-RegExpNode* SeqRegExpNode::FilterOneByte(int depth, bool ignore_case) {
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
VisitMarker marker(info());
- return FilterSuccessor(depth - 1, ignore_case);
+ return FilterSuccessor(depth - 1);
}
-
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, bool ignore_case) {
- RegExpNode* next = on_success_->FilterOneByte(depth - 1, ignore_case);
- if (next == NULL) return set_replacement(NULL);
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
+ RegExpNode* next = on_success_->FilterOneByte(depth - 1);
+ if (next == nullptr) return set_replacement(nullptr);
on_success_ = next;
return set_replacement(this);
}
@@ -2752,8 +2733,7 @@ static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
return false;
}
-
-RegExpNode* TextNode::FilterOneByte(int depth, bool ignore_case) {
+RegExpNode* TextNode::FilterOneByte(int depth) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -2766,12 +2746,12 @@ RegExpNode* TextNode::FilterOneByte(int depth, bool ignore_case) {
for (int j = 0; j < quarks.length(); j++) {
uint16_t c = quarks[j];
if (c <= String::kMaxOneByteCharCode) continue;
- if (!ignore_case) return set_replacement(NULL);
+ if (!IgnoreCase(elm.atom()->flags())) return set_replacement(nullptr);
// Here, we need to check for characters whose upper and lower cases
// are outside the Latin-1 range.
uint16_t converted = unibrow::Latin1::ConvertNonLatin1ToLatin1(c);
// Character is outside Latin-1 completely
- if (converted == 0) return set_replacement(NULL);
+ if (converted == 0) return set_replacement(nullptr);
// Convert quark to Latin-1 in place.
uint16_t* copy = const_cast<uint16_t*>(quarks.start());
copy[j] = converted;
@@ -2788,42 +2768,41 @@ RegExpNode* TextNode::FilterOneByte(int depth, bool ignore_case) {
ranges->at(0).from() == 0 &&
ranges->at(0).to() >= String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue;
- return set_replacement(NULL);
+ if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
+ continue;
+ return set_replacement(nullptr);
}
} else {
if (range_count == 0 ||
ranges->at(0).from() > String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue;
- return set_replacement(NULL);
+ if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
+ continue;
+ return set_replacement(nullptr);
}
}
}
}
- return FilterSuccessor(depth - 1, ignore_case);
+ return FilterSuccessor(depth - 1);
}
-
-RegExpNode* LoopChoiceNode::FilterOneByte(int depth, bool ignore_case) {
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
{
VisitMarker marker(info());
- RegExpNode* continue_replacement =
- continue_node_->FilterOneByte(depth - 1, ignore_case);
+ RegExpNode* continue_replacement = continue_node_->FilterOneByte(depth - 1);
// If we can't continue after the loop then there is no sense in doing the
// loop.
- if (continue_replacement == NULL) return set_replacement(NULL);
+ if (continue_replacement == nullptr) return set_replacement(nullptr);
}
- return ChoiceNode::FilterOneByte(depth - 1, ignore_case);
+ return ChoiceNode::FilterOneByte(depth - 1);
}
-
-RegExpNode* ChoiceNode::FilterOneByte(int depth, bool ignore_case) {
+RegExpNode* ChoiceNode::FilterOneByte(int depth) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2832,20 +2811,20 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth, bool ignore_case) {
for (int i = 0; i < choice_count; i++) {
GuardedAlternative alternative = alternatives_->at(i);
- if (alternative.guards() != NULL && alternative.guards()->length() != 0) {
+ if (alternative.guards() != nullptr &&
+ alternative.guards()->length() != 0) {
set_replacement(this);
return this;
}
}
int surviving = 0;
- RegExpNode* survivor = NULL;
+ RegExpNode* survivor = nullptr;
for (int i = 0; i < choice_count; i++) {
GuardedAlternative alternative = alternatives_->at(i);
- RegExpNode* replacement =
- alternative.node()->FilterOneByte(depth - 1, ignore_case);
+ RegExpNode* replacement = alternative.node()->FilterOneByte(depth - 1);
DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK.
- if (replacement != NULL) {
+ if (replacement != nullptr) {
alternatives_->at(i).set_node(replacement);
surviving++;
survivor = replacement;
@@ -2863,8 +2842,8 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth, bool ignore_case) {
new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
for (int i = 0; i < choice_count; i++) {
RegExpNode* replacement =
- alternatives_->at(i).node()->FilterOneByte(depth - 1, ignore_case);
- if (replacement != NULL) {
+ alternatives_->at(i).node()->FilterOneByte(depth - 1);
+ if (replacement != nullptr) {
alternatives_->at(i).set_node(replacement);
new_alternatives->Add(alternatives_->at(i), zone());
}
@@ -2873,9 +2852,7 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth, bool ignore_case) {
return this;
}
-
-RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth,
- bool ignore_case) {
+RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2883,15 +2860,15 @@ RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth,
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
RegExpNode* node = alternatives_->at(1).node();
- RegExpNode* replacement = node->FilterOneByte(depth - 1, ignore_case);
- if (replacement == NULL) return set_replacement(NULL);
+ RegExpNode* replacement = node->FilterOneByte(depth - 1);
+ if (replacement == nullptr) return set_replacement(nullptr);
alternatives_->at(1).set_node(replacement);
RegExpNode* neg_node = alternatives_->at(0).node();
- RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1, ignore_case);
+ RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1);
// If the negative lookahead is always going to fail then
// we don't need to check it.
- if (neg_replacement == NULL) return set_replacement(replacement);
+ if (neg_replacement == nullptr) return set_replacement(replacement);
alternatives_->at(0).set_node(neg_replacement);
return set_replacement(this);
}
@@ -3015,7 +2992,7 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
Trace::TriBool next_is_word_character = Trace::UNKNOWN;
bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
BoyerMooreLookahead* lookahead = bm_info(not_at_start);
- if (lookahead == NULL) {
+ if (lookahead == nullptr) {
int eats_at_least =
Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(kMaxLookaheadForBoyerMoore,
kRecursionBudget,
@@ -3146,7 +3123,7 @@ void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
static bool DeterminedAlready(QuickCheckDetails* quick_check, int offset) {
- if (quick_check == NULL) return false;
+ if (quick_check == nullptr) return false;
if (offset >= quick_check->characters()) return false;
return quick_check->positions(offset)->determines_perfectly;
}
@@ -3205,11 +3182,12 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
TextElement elm = elements()->at(i);
int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
if (elm.text_type() == TextElement::ATOM) {
+ if (SkipPass(pass, elm.atom()->ignore_case())) continue;
Vector<const uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
- EmitCharacterFunction* emit_function = NULL;
+ EmitCharacterFunction* emit_function = nullptr;
switch (pass) {
case NON_LATIN1_MATCH:
DCHECK(one_byte);
@@ -3230,7 +3208,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
default:
break;
}
- if (emit_function != NULL) {
+ if (emit_function != nullptr) {
bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
bool bound_checked =
emit_function(isolate, compiler, quarks[j], backtrack,
@@ -3260,9 +3238,7 @@ int TextNode::Length() {
return elm.cp_offset() + elm.length();
}
-
-bool TextNode::SkipPass(int int_pass, bool ignore_case) {
- TextEmitPassType pass = static_cast<TextEmitPassType>(int_pass);
+bool TextNode::SkipPass(TextEmitPassType pass, bool ignore_case) {
if (ignore_case) {
return pass == SIMPLE_CHARACTER_MATCH;
} else {
@@ -3270,32 +3246,33 @@ bool TextNode::SkipPass(int int_pass, bool ignore_case) {
}
}
-
TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
ZoneList<CharacterRange>* ranges,
bool read_backward,
- RegExpNode* on_success) {
+ RegExpNode* on_success,
+ JSRegExp::Flags flags) {
DCHECK_NOT_NULL(ranges);
ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
- elms->Add(TextElement::CharClass(new (zone) RegExpCharacterClass(ranges)),
- zone);
+ elms->Add(
+ TextElement::CharClass(new (zone) RegExpCharacterClass(ranges, flags)),
+ zone);
return new (zone) TextNode(elms, read_backward, on_success);
}
-
TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
CharacterRange trail,
bool read_backward,
- RegExpNode* on_success) {
+ RegExpNode* on_success,
+ JSRegExp::Flags flags) {
ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
- elms->Add(
- TextElement::CharClass(new (zone) RegExpCharacterClass(lead_ranges)),
- zone);
- elms->Add(
- TextElement::CharClass(new (zone) RegExpCharacterClass(trail_ranges)),
- zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(lead_ranges, flags)),
+ zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(trail_ranges, flags)),
+ zone);
return new (zone) TextNode(elms, read_backward, on_success);
}
@@ -3329,27 +3306,15 @@ void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
// check that now.
if (trace->characters_preloaded() == 1) {
for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- if (!SkipPass(pass, compiler->ignore_case())) {
- TextEmitPass(compiler,
- static_cast<TextEmitPassType>(pass),
- true,
- trace,
- false,
- &bound_checked_to);
- }
+ TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), true, trace,
+ false, &bound_checked_to);
}
first_elt_done = true;
}
for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- if (!SkipPass(pass, compiler->ignore_case())) {
- TextEmitPass(compiler,
- static_cast<TextEmitPassType>(pass),
- false,
- trace,
- first_elt_done,
- &bound_checked_to);
- }
+ TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), false, trace,
+ first_elt_done, &bound_checked_to);
}
Trace successor_trace(*trace);
@@ -3392,11 +3357,20 @@ void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
TextElement elm = elements()->at(i);
if (elm.text_type() == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.char_class();
- // None of the standard character classes is different in the case
- // independent case and it slows us down if we don't know that.
- if (cc->is_standard(zone())) continue;
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- CharacterRange::AddCaseEquivalents(isolate, zone(), ranges, is_one_byte);
+#ifdef V8_INTL_SUPPORT
+ bool case_equivalents_already_added =
+ NeedsUnicodeCaseEquivalents(cc->flags());
+#else
+ bool case_equivalents_already_added = false;
+#endif
+ if (IgnoreCase(cc->flags()) && !case_equivalents_already_added) {
+ // None of the standard character classes is different in the case
+ // independent case and it slows us down if we don't know that.
+ if (cc->is_standard(zone())) continue;
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+ CharacterRange::AddCaseEquivalents(isolate, zone(), ranges,
+ is_one_byte);
+ }
}
}
}
@@ -3407,24 +3381,24 @@ int TextNode::GreedyLoopTextLength() { return Length(); }
RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) {
- if (read_backward()) return NULL;
- if (elements()->length() != 1) return NULL;
+ if (read_backward()) return nullptr;
+ if (elements()->length() != 1) return nullptr;
TextElement elm = elements()->at(0);
- if (elm.text_type() != TextElement::CHAR_CLASS) return NULL;
+ if (elm.text_type() != TextElement::CHAR_CLASS) return nullptr;
RegExpCharacterClass* node = elm.char_class();
ZoneList<CharacterRange>* ranges = node->ranges(zone());
CharacterRange::Canonicalize(ranges);
if (node->is_negated()) {
- return ranges->length() == 0 ? on_success() : NULL;
+ return ranges->length() == 0 ? on_success() : nullptr;
}
- if (ranges->length() != 1) return NULL;
+ if (ranges->length() != 1) return nullptr;
uint32_t max_char;
if (compiler->one_byte()) {
max_char = String::kMaxOneByteCharCode;
} else {
max_char = String::kMaxUtf16CodeUnit;
}
- return ranges->at(0).IsEverything(max_char) ? on_success() : NULL;
+ return ranges->at(0).IsEverything(max_char) ? on_success() : nullptr;
}
@@ -3495,10 +3469,10 @@ void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
int eats_at_least) {
int preload_characters = Min(4, eats_at_least);
+ DCHECK_LE(preload_characters, 4);
if (compiler->macro_assembler()->CanReadUnaligned()) {
bool one_byte = compiler->one_byte();
if (one_byte) {
- if (preload_characters > 4) preload_characters = 4;
// We can't preload 3 characters because there is no machine instruction
// to do that. We can't just load 4 because we could be reading
// beyond the end of the string, which could cause a memory fault.
@@ -3545,7 +3519,7 @@ class AlternativeGenerationList {
~AlternativeGenerationList() {
for (int i = kAFew; i < alt_gens_.length(); i++) {
delete alt_gens_[i];
- alt_gens_[i] = NULL;
+ alt_gens_[i] = nullptr;
}
}
@@ -3894,7 +3868,7 @@ void ChoiceNode::AssertGuardsMentionRegisters(Trace* trace) {
for (int i = 0; i < choice_count - 1; i++) {
GuardedAlternative alternative = alternatives_->at(i);
ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
for (int j = 0; j < guard_count; j++) {
DCHECK(!trace->mentions_reg(guards->at(j)->reg()));
}
@@ -3924,7 +3898,7 @@ void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
int choice_count = alternatives_->length();
- if (choice_count == 1 && alternatives_->at(0).guards() == NULL) {
+ if (choice_count == 1 && alternatives_->at(0).guards() == nullptr) {
alternatives_->at(0).node()->Emit(compiler, trace);
return;
}
@@ -3937,7 +3911,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
// For loop nodes we already flushed (see LoopChoiceNode::Emit), but for
// other choice nodes we only flush if we are out of code size budget.
- if (trace->flush_budget() == 0 && trace->actions() != NULL) {
+ if (trace->flush_budget() == 0 && trace->actions() != nullptr) {
trace->Flush(compiler, this);
return;
}
@@ -3983,7 +3957,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
// If there are actions to be flushed we have to limit how many times
// they are flushed. Take the budget of the parent trace and distribute
// it fairly amongst the children.
- if (new_trace.actions() != NULL) {
+ if (new_trace.actions() != nullptr) {
new_trace.set_flush_budget(new_flush_budget);
}
bool next_expects_preload =
@@ -4012,7 +3986,7 @@ Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler,
// and check it against the pushed value. This avoids pushing backtrack
// information for each iteration of the loop, which could take up a lot of
// space.
- DCHECK(trace->stop_node() == NULL);
+ DCHECK(trace->stop_node() == nullptr);
macro_assembler->PushCurrentPosition();
Label greedy_match_failed;
Trace greedy_match_trace;
@@ -4051,7 +4025,7 @@ int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
if (alternatives_->length() != 2) return eats_at_least;
GuardedAlternative alt1 = alternatives_->at(1);
- if (alt1.guards() != NULL && alt1.guards()->length() != 0) {
+ if (alt1.guards() != nullptr && alt1.guards()->length() != 0) {
return eats_at_least;
}
RegExpNode* eats_anything_node = alt1.node();
@@ -4077,7 +4051,7 @@ int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
// not be atoms, they can be any reasonably limited character class or
// small alternation.
BoyerMooreLookahead* bm = bm_info(false);
- if (bm == NULL) {
+ if (bm == nullptr) {
eats_at_least = Min(kMaxLookaheadForBoyerMoore,
EatsAtLeast(kMaxLookaheadForBoyerMoore,
kRecursionBudget,
@@ -4090,7 +4064,7 @@ int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
alt0.node()->FillInBMInfo(isolate, 0, kRecursionBudget, bm, false);
}
}
- if (bm != NULL) {
+ if (bm != nullptr) {
bm->EmitSkipInstructions(macro_assembler);
}
return eats_at_least;
@@ -4118,7 +4092,7 @@ void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
AlternativeGeneration* alt_gen = alt_gens->at(i);
alt_gen->quick_check_details.set_characters(preload->preload_characters_);
ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
Trace new_trace(*trace);
new_trace.set_characters_preloaded(preload->preload_is_current_ ?
preload->preload_characters_ :
@@ -4169,7 +4143,7 @@ void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
generate_full_check_inline = true;
}
if (generate_full_check_inline) {
- if (new_trace.actions() != NULL) {
+ if (new_trace.actions() != nullptr) {
new_trace.set_flush_budget(new_flush_budget);
}
for (int j = 0; j < guard_count; j++) {
@@ -4198,7 +4172,7 @@ void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE_VALUE);
ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == NULL) ? 0 : guards->length();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
if (next_expects_preload) {
Label reload_current_char;
out_of_line_trace.set_backtrack(&reload_current_char);
@@ -4210,9 +4184,7 @@ void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
// Reload the current character, since the next quick check expects that.
// We don't need to check bounds here because we only get into this
// code through a quick check which already did the checked load.
- macro_assembler->LoadCurrentCharacter(trace->cp_offset(),
- NULL,
- false,
+ macro_assembler->LoadCurrentCharacter(trace->cp_offset(), nullptr, false,
preload_characters);
macro_assembler->GoTo(&(alt_gen->after));
} else {
@@ -4337,7 +4309,7 @@ void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
int clear_registers_to = clear_registers_from + clear_register_count - 1;
assembler->ClearRegisters(clear_registers_from, clear_registers_to);
- DCHECK(trace->backtrack() == NULL);
+ DCHECK(trace->backtrack() == nullptr);
assembler->Backtrack();
return;
}
@@ -4361,9 +4333,9 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RecursionCheck rc(compiler);
DCHECK_EQ(start_reg_ + 1, end_reg_);
- if (compiler->ignore_case()) {
+ if (IgnoreCase(flags_)) {
assembler->CheckNotBackReferenceIgnoreCase(
- start_reg_, read_backward(), compiler->unicode(), trace->backtrack());
+ start_reg_, read_backward(), IsUnicode(flags_), trace->backtrack());
} else {
assembler->CheckNotBackReference(start_reg_, read_backward(),
trace->backtrack());
@@ -4372,7 +4344,7 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
// Check that the back reference does not end inside a surrogate pair.
- if (compiler->unicode() && !compiler->one_byte()) {
+ if (IsUnicode(flags_) && !compiler->one_byte()) {
assembler->CheckNotInSurrogatePair(trace->cp_offset(), trace->backtrack());
}
on_success()->Emit(compiler, trace);
@@ -4879,7 +4851,7 @@ UnicodeRangeSplitter::UnicodeRangeSplitter(Zone* zone,
void UnicodeRangeSplitter::Call(uc32 from, DispatchTable::Entry entry) {
OutSet* outset = entry.out_set();
if (!outset->Get(kBase)) return;
- ZoneList<CharacterRange>** target = NULL;
+ ZoneList<CharacterRange>** target = nullptr;
if (outset->Get(kBmpCodePoints)) {
target = &bmp_;
} else if (outset->Get(kLeadSurrogates)) {
@@ -4890,28 +4862,29 @@ void UnicodeRangeSplitter::Call(uc32 from, DispatchTable::Entry entry) {
DCHECK(outset->Get(kNonBmpCodePoints));
target = &non_bmp_;
}
- if (*target == NULL) *target = new (zone_) ZoneList<CharacterRange>(2, zone_);
+ if (*target == nullptr)
+ *target = new (zone_) ZoneList<CharacterRange>(2, zone_);
(*target)->Add(CharacterRange::Range(entry.from(), entry.to()), zone_);
}
-
void AddBmpCharacters(RegExpCompiler* compiler, ChoiceNode* result,
RegExpNode* on_success, UnicodeRangeSplitter* splitter) {
ZoneList<CharacterRange>* bmp = splitter->bmp();
if (bmp == nullptr) return;
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
result->AddAlternative(GuardedAlternative(TextNode::CreateForCharacterRanges(
- compiler->zone(), bmp, compiler->read_backward(), on_success)));
+ compiler->zone(), bmp, compiler->read_backward(), on_success,
+ default_flags)));
}
-
void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
RegExpNode* on_success,
UnicodeRangeSplitter* splitter) {
ZoneList<CharacterRange>* non_bmp = splitter->non_bmp();
if (non_bmp == nullptr) return;
- DCHECK(compiler->unicode());
DCHECK(!compiler->one_byte());
Zone* zone = compiler->zone();
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
CharacterRange::Canonicalize(non_bmp);
for (int i = 0; i < non_bmp->length(); i++) {
// Match surrogate pair.
@@ -4931,7 +4904,7 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
GuardedAlternative(TextNode::CreateForSurrogatePair(
zone, CharacterRange::Singleton(from_l),
CharacterRange::Range(from_t, to_t), compiler->read_backward(),
- on_success)));
+ on_success, default_flags)));
} else {
if (from_t != kTrailSurrogateStart) {
// Add [from_l][from_t-\udfff]
@@ -4939,7 +4912,7 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
GuardedAlternative(TextNode::CreateForSurrogatePair(
zone, CharacterRange::Singleton(from_l),
CharacterRange::Range(from_t, kTrailSurrogateEnd),
- compiler->read_backward(), on_success)));
+ compiler->read_backward(), on_success, default_flags)));
from_l++;
}
if (to_t != kTrailSurrogateEnd) {
@@ -4948,7 +4921,7 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
GuardedAlternative(TextNode::CreateForSurrogatePair(
zone, CharacterRange::Singleton(to_l),
CharacterRange::Range(kTrailSurrogateStart, to_t),
- compiler->read_backward(), on_success)));
+ compiler->read_backward(), on_success, default_flags)));
to_l--;
}
if (from_l <= to_l) {
@@ -4957,49 +4930,47 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
GuardedAlternative(TextNode::CreateForSurrogatePair(
zone, CharacterRange::Range(from_l, to_l),
CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
- compiler->read_backward(), on_success)));
+ compiler->read_backward(), on_success, default_flags)));
}
}
}
}
-
RegExpNode* NegativeLookaroundAgainstReadDirectionAndMatch(
RegExpCompiler* compiler, ZoneList<CharacterRange>* lookbehind,
- ZoneList<CharacterRange>* match, RegExpNode* on_success,
- bool read_backward) {
+ ZoneList<CharacterRange>* match, RegExpNode* on_success, bool read_backward,
+ JSRegExp::Flags flags) {
Zone* zone = compiler->zone();
RegExpNode* match_node = TextNode::CreateForCharacterRanges(
- zone, match, read_backward, on_success);
+ zone, match, read_backward, on_success, flags);
int stack_register = compiler->UnicodeLookaroundStackRegister();
int position_register = compiler->UnicodeLookaroundPositionRegister();
RegExpLookaround::Builder lookaround(false, match_node, stack_register,
position_register);
RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
- zone, lookbehind, !read_backward, lookaround.on_match_success());
+ zone, lookbehind, !read_backward, lookaround.on_match_success(), flags);
return lookaround.ForMatch(negative_match);
}
-
RegExpNode* MatchAndNegativeLookaroundInReadDirection(
RegExpCompiler* compiler, ZoneList<CharacterRange>* match,
ZoneList<CharacterRange>* lookahead, RegExpNode* on_success,
- bool read_backward) {
+ bool read_backward, JSRegExp::Flags flags) {
Zone* zone = compiler->zone();
int stack_register = compiler->UnicodeLookaroundStackRegister();
int position_register = compiler->UnicodeLookaroundPositionRegister();
RegExpLookaround::Builder lookaround(false, on_success, stack_register,
position_register);
RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
- zone, lookahead, read_backward, lookaround.on_match_success());
+ zone, lookahead, read_backward, lookaround.on_match_success(), flags);
return TextNode::CreateForCharacterRanges(
- zone, match, read_backward, lookaround.ForMatch(negative_match));
+ zone, match, read_backward, lookaround.ForMatch(negative_match), flags);
}
-
void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
RegExpNode* on_success,
UnicodeRangeSplitter* splitter) {
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
ZoneList<CharacterRange>* lead_surrogates = splitter->lead_surrogates();
if (lead_surrogates == nullptr) return;
Zone* zone = compiler->zone();
@@ -5012,20 +4983,22 @@ void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
// Reading backward. Assert that reading forward, there is no trail
// surrogate, and then backward match the lead surrogate.
match = NegativeLookaroundAgainstReadDirectionAndMatch(
- compiler, trail_surrogates, lead_surrogates, on_success, true);
+ compiler, trail_surrogates, lead_surrogates, on_success, true,
+ default_flags);
} else {
// Reading forward. Forward match the lead surrogate and assert that
// no trail surrogate follows.
match = MatchAndNegativeLookaroundInReadDirection(
- compiler, lead_surrogates, trail_surrogates, on_success, false);
+ compiler, lead_surrogates, trail_surrogates, on_success, false,
+ default_flags);
}
result->AddAlternative(GuardedAlternative(match));
}
-
void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
RegExpNode* on_success,
UnicodeRangeSplitter* splitter) {
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
ZoneList<CharacterRange>* trail_surrogates = splitter->trail_surrogates();
if (trail_surrogates == nullptr) return;
Zone* zone = compiler->zone();
@@ -5038,12 +5011,14 @@ void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
// Reading backward. Backward match the trail surrogate and assert that no
// lead surrogate precedes it.
match = MatchAndNegativeLookaroundInReadDirection(
- compiler, trail_surrogates, lead_surrogates, on_success, true);
+ compiler, trail_surrogates, lead_surrogates, on_success, true,
+ default_flags);
} else {
// Reading forward. Assert that reading backward, there is no lead
// surrogate, and then forward match the trail surrogate.
match = NegativeLookaroundAgainstReadDirectionAndMatch(
- compiler, lead_surrogates, trail_surrogates, on_success, false);
+ compiler, lead_surrogates, trail_surrogates, on_success, false,
+ default_flags);
}
result->AddAlternative(GuardedAlternative(match));
}
@@ -5059,7 +5034,9 @@ RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
// the associated trail surrogate.
ZoneList<CharacterRange>* range = CharacterRange::List(
zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
- return TextNode::CreateForCharacterRanges(zone, range, false, on_success);
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ return TextNode::CreateForCharacterRanges(zone, range, false, on_success,
+ default_flags);
}
void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges, Zone* zone) {
@@ -5100,10 +5077,10 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
set_.Canonicalize();
Zone* zone = compiler->zone();
ZoneList<CharacterRange>* ranges = this->ranges(zone);
- if (compiler->needs_unicode_case_equivalents()) {
+ if (NeedsUnicodeCaseEquivalents(flags_)) {
AddUnicodeCaseEquivalents(ranges, zone);
}
- if (compiler->unicode() && !compiler->one_byte() &&
+ if (IsUnicode(flags_) && !compiler->one_byte() &&
!contains_split_surrogate()) {
if (is_negated()) {
ZoneList<CharacterRange>* negated =
@@ -5112,9 +5089,10 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
ranges = negated;
}
if (ranges->length() == 0) {
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
ranges->Add(CharacterRange::Everything(), zone);
RegExpCharacterClass* fail =
- new (zone) RegExpCharacterClass(ranges, NEGATED);
+ new (zone) RegExpCharacterClass(ranges, default_flags, NEGATED);
return new (zone) TextNode(fail, compiler->read_backward(), on_success);
}
if (standard_type() == '*') {
@@ -5189,10 +5167,12 @@ bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
// i is length or it is the index of an atom.
if (i == length) break;
int first_atom = i;
+ JSRegExp::Flags flags = alternatives->at(i)->AsAtom()->flags();
i++;
while (i < length) {
RegExpTree* alternative = alternatives->at(i);
if (!alternative->IsAtom()) break;
+ if (alternative->AsAtom()->flags() != flags) break;
i++;
}
// Sort atoms to get ones with common prefixes together.
@@ -5204,7 +5184,7 @@ bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
DCHECK_LT(first_atom, alternatives->length());
DCHECK_LE(i, alternatives->length());
DCHECK_LE(first_atom, i);
- if (compiler->ignore_case()) {
+ if (IgnoreCase(flags)) {
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
compiler->isolate()->regexp_macro_assembler_canonicalize();
auto compare_closure =
@@ -5236,7 +5216,8 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
i++;
continue;
}
- RegExpAtom* atom = alternative->AsAtom();
+ RegExpAtom* const atom = alternative->AsAtom();
+ JSRegExp::Flags flags = atom->flags();
unibrow::uchar common_prefix = atom->data().at(0);
int first_with_prefix = i;
int prefix_length = atom->length();
@@ -5244,10 +5225,11 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
while (i < length) {
alternative = alternatives->at(i);
if (!alternative->IsAtom()) break;
- atom = alternative->AsAtom();
+ RegExpAtom* const atom = alternative->AsAtom();
+ if (atom->flags() != flags) break;
unibrow::uchar new_prefix = atom->data().at(0);
if (new_prefix != common_prefix) {
- if (!compiler->ignore_case()) break;
+ if (!IgnoreCase(flags)) break;
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
compiler->isolate()->regexp_macro_assembler_canonicalize();
new_prefix = Canonical(canonicalize, new_prefix);
@@ -5264,7 +5246,7 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
// common prefix if the terms were similar or presorted in the input.
// Find out how long the common prefix is.
int run_length = i - first_with_prefix;
- atom = alternatives->at(first_with_prefix)->AsAtom();
+ RegExpAtom* const atom = alternatives->at(first_with_prefix)->AsAtom();
for (int j = 1; j < run_length && prefix_length > 1; j++) {
RegExpAtom* old_atom =
alternatives->at(j + first_with_prefix)->AsAtom();
@@ -5275,8 +5257,8 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
}
}
}
- RegExpAtom* prefix =
- new (zone) RegExpAtom(atom->data().SubVector(0, prefix_length));
+ RegExpAtom* prefix = new (zone)
+ RegExpAtom(atom->data().SubVector(0, prefix_length), flags);
ZoneList<RegExpTree*>* pair = new (zone) ZoneList<RegExpTree*>(2, zone);
pair->Add(prefix, zone);
ZoneList<RegExpTree*>* suffixes =
@@ -5289,7 +5271,8 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
suffixes->Add(new (zone) RegExpEmpty(), zone);
} else {
RegExpTree* suffix = new (zone) RegExpAtom(
- old_atom->data().SubVector(prefix_length, old_atom->length()));
+ old_atom->data().SubVector(prefix_length, old_atom->length()),
+ flags);
suffixes->Add(suffix, zone);
}
}
@@ -5312,7 +5295,6 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
Zone* zone = compiler->zone();
ZoneList<RegExpTree*>* alternatives = this->alternatives();
int length = alternatives->length();
- const bool unicode = compiler->unicode();
int write_posn = 0;
int i = 0;
@@ -5323,24 +5305,28 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
i++;
continue;
}
- RegExpAtom* atom = alternative->AsAtom();
+ RegExpAtom* const atom = alternative->AsAtom();
if (atom->length() != 1) {
alternatives->at(write_posn++) = alternatives->at(i);
i++;
continue;
}
- DCHECK_IMPLIES(unicode,
+ JSRegExp::Flags flags = atom->flags();
+ DCHECK_IMPLIES(IsUnicode(flags),
!unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
bool contains_trail_surrogate =
unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
int first_in_run = i;
i++;
+ // Find a run of single-character atom alternatives that have identical
+ // flags (case independence and unicode-ness).
while (i < length) {
alternative = alternatives->at(i);
if (!alternative->IsAtom()) break;
- atom = alternative->AsAtom();
+ RegExpAtom* const atom = alternative->AsAtom();
if (atom->length() != 1) break;
- DCHECK_IMPLIES(unicode,
+ if (atom->flags() != flags) break;
+ DCHECK_IMPLIES(IsUnicode(flags),
!unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
contains_trail_surrogate |=
unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
@@ -5356,12 +5342,12 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
DCHECK_EQ(old_atom->length(), 1);
ranges->Add(CharacterRange::Singleton(old_atom->data().at(0)), zone);
}
- RegExpCharacterClass::Flags flags;
- if (unicode && contains_trail_surrogate) {
- flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
+ RegExpCharacterClass::CharacterClassFlags character_class_flags;
+ if (IsUnicode(flags) && contains_trail_surrogate) {
+ character_class_flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
}
alternatives->at(write_posn++) =
- new (zone) RegExpCharacterClass(ranges, flags);
+ new (zone) RegExpCharacterClass(ranges, flags, character_class_flags);
} else {
// Just copy any trivial alternatives.
for (int j = first_in_run; j < i; j++) {
@@ -5593,8 +5579,9 @@ namespace {
// \B to (?<=\w)(?=\w)|(?<=\W)(?=\W)
RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpNode* on_success,
- RegExpAssertion::AssertionType type) {
- DCHECK(compiler->needs_unicode_case_equivalents());
+ RegExpAssertion::AssertionType type,
+ JSRegExp::Flags flags) {
+ DCHECK(NeedsUnicodeCaseEquivalents(flags));
Zone* zone = compiler->zone();
ZoneList<CharacterRange>* word_range =
new (zone) ZoneList<CharacterRange>(2, zone);
@@ -5612,13 +5599,13 @@ RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpLookaround::Builder lookbehind(lookbehind_for_word, on_success,
stack_register, position_register);
RegExpNode* backward = TextNode::CreateForCharacterRanges(
- zone, word_range, true, lookbehind.on_match_success());
+ zone, word_range, true, lookbehind.on_match_success(), flags);
// Look to the right.
RegExpLookaround::Builder lookahead(lookahead_for_word,
lookbehind.ForMatch(backward),
stack_register, position_register);
RegExpNode* forward = TextNode::CreateForCharacterRanges(
- zone, word_range, false, lookahead.on_match_success());
+ zone, word_range, false, lookahead.on_match_success(), flags);
result->AddAlternative(GuardedAlternative(lookahead.ForMatch(forward)));
}
return result;
@@ -5636,13 +5623,14 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
case START_OF_INPUT:
return AssertionNode::AtStart(on_success);
case BOUNDARY:
- return compiler->needs_unicode_case_equivalents()
- ? BoundaryAssertionAsLookaround(compiler, on_success, BOUNDARY)
+ return NeedsUnicodeCaseEquivalents(flags_)
+ ? BoundaryAssertionAsLookaround(compiler, on_success, BOUNDARY,
+ flags_)
: AssertionNode::AtBoundary(on_success);
case NON_BOUNDARY:
- return compiler->needs_unicode_case_equivalents()
+ return NeedsUnicodeCaseEquivalents(flags_)
? BoundaryAssertionAsLookaround(compiler, on_success,
- NON_BOUNDARY)
+ NON_BOUNDARY, flags_)
: AssertionNode::AtNonBoundary(on_success);
case END_OF_INPUT:
return AssertionNode::AtEnd(on_success);
@@ -5658,7 +5646,9 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
ZoneList<CharacterRange>* newline_ranges =
new(zone) ZoneList<CharacterRange>(3, zone);
CharacterRange::AddClassEscape('n', newline_ranges, false, zone);
- RegExpCharacterClass* newline_atom = new (zone) RegExpCharacterClass('n');
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ RegExpCharacterClass* newline_atom =
+ new (zone) RegExpCharacterClass('n', default_flags);
TextNode* newline_matcher = new (zone) TextNode(
newline_atom, false, ActionNode::PositiveSubmatchSuccess(
stack_pointer_register, position_register,
@@ -5688,7 +5678,7 @@ RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
return new (compiler->zone())
BackReferenceNode(RegExpCapture::StartRegister(index()),
- RegExpCapture::EndRegister(index()),
+ RegExpCapture::EndRegister(index()), flags_,
compiler->read_backward(), on_success);
}
@@ -5994,7 +5984,7 @@ bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
ZoneList<CharacterRange>* CharacterSet::ranges(Zone* zone) {
- if (ranges_ == NULL) {
+ if (ranges_ == nullptr) {
ranges_ = new(zone) ZoneList<CharacterRange>(2, zone);
CharacterRange::AddClassEscape(standard_set_type_, ranges_, false, zone);
}
@@ -6082,7 +6072,7 @@ static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
void CharacterSet::Canonicalize() {
// Special/default classes are always considered canonical. The result
// of calling ranges() will be sorted.
- if (ranges_ == NULL) return;
+ if (ranges_ == nullptr) return;
CharacterRange::Canonicalize(ranges_);
}
@@ -6156,7 +6146,7 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
OutSet* OutSet::Extend(unsigned value, Zone* zone) {
if (Get(value))
return this;
- if (successors(zone) != NULL) {
+ if (successors(zone) != nullptr) {
for (int i = 0; i < successors(zone)->length(); i++) {
OutSet* successor = successors(zone)->at(i);
if (successor->Get(value))
@@ -6176,7 +6166,7 @@ void OutSet::Set(unsigned value, Zone *zone) {
if (value < kFirstLimit) {
first_ |= (1 << value);
} else {
- if (remaining_ == NULL)
+ if (remaining_ == nullptr)
remaining_ = new(zone) ZoneList<unsigned>(1, zone);
if (remaining_->is_empty() || !remaining_->Contains(value))
remaining_->Add(value, zone);
@@ -6187,7 +6177,7 @@ void OutSet::Set(unsigned value, Zone *zone) {
bool OutSet::Get(unsigned value) const {
if (value < kFirstLimit) {
return (first_ & (1 << value)) != 0;
- } else if (remaining_ == NULL) {
+ } else if (remaining_ == nullptr) {
return false;
} else {
return remaining_->Contains(value);
@@ -6344,9 +6334,7 @@ void TextNode::CalculateOffsets() {
void Analysis::VisitText(TextNode* that) {
- if (ignore_case()) {
- that->MakeCaseIndependent(isolate(), is_one_byte_);
- }
+ that->MakeCaseIndependent(isolate(), is_one_byte_);
EnsureAnalyzed(that->on_success());
if (!has_failed()) {
that->CalculateOffsets();
@@ -6427,7 +6415,7 @@ void ChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
budget = (budget - 1) / alts->length();
for (int i = 0; i < alts->length(); i++) {
GuardedAlternative& alt = alts->at(i);
- if (alt.guards() != NULL && alt.guards()->length() != 0) {
+ if (alt.guards() != nullptr && alt.guards()->length() != 0) {
bm->SetRest(offset); // Give up trying to fill in info.
SaveBMInfo(bm, not_at_start, offset);
return;
@@ -6457,7 +6445,7 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
return;
}
uc16 character = atom->data()[j];
- if (bm->compiler()->ignore_case()) {
+ if (IgnoreCase(atom->flags())) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int length = GetCaseIndependentLetters(
isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
@@ -6609,9 +6597,9 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
target->Accept(this);
}
-
RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
- RegExpNode* on_success) {
+ RegExpNode* on_success,
+ JSRegExp::Flags flags) {
// If the regexp matching starts within a surrogate pair, step back
// to the lead surrogate and start matching from there.
DCHECK(!compiler->read_backward());
@@ -6626,11 +6614,11 @@ RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
int stack_register = compiler->UnicodeLookaroundStackRegister();
int position_register = compiler->UnicodeLookaroundPositionRegister();
RegExpNode* step_back = TextNode::CreateForCharacterRanges(
- zone, lead_surrogates, true, on_success);
+ zone, lead_surrogates, true, on_success, flags);
RegExpLookaround::Builder builder(true, step_back, stack_register,
position_register);
RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
- zone, trail_surrogates, false, builder.on_match_success());
+ zone, trail_surrogates, false, builder.on_match_success(), flags);
optional_step_back->AddAlternative(
GuardedAlternative(builder.ForMatch(match_trail)));
@@ -6647,12 +6635,10 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
return IrregexpRegExpTooBig(isolate);
}
- bool ignore_case = flags & JSRegExp::kIgnoreCase;
- bool is_sticky = flags & JSRegExp::kSticky;
- bool is_global = flags & JSRegExp::kGlobal;
- bool is_unicode = flags & JSRegExp::kUnicode;
- RegExpCompiler compiler(isolate, zone, data->capture_count, flags,
- is_one_byte);
+ bool is_sticky = IsSticky(flags);
+ bool is_global = IsGlobal(flags);
+ bool is_unicode = IsUnicode(flags);
+ RegExpCompiler compiler(isolate, zone, data->capture_count, is_one_byte);
if (compiler.optimize()) compiler.set_optimize(!TooMuchRegExpCode(pattern));
@@ -6680,9 +6666,11 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
if (!is_start_anchored && !is_sticky) {
// Add a .*? at the beginning, outside the body capture, unless
// this expression is anchored at the beginning or sticky.
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
RegExpNode* loop_node = RegExpQuantifier::ToNode(
- 0, RegExpTree::kInfinity, false, new (zone) RegExpCharacterClass('*'),
- &compiler, captured_body, data->contains_anchor);
+ 0, RegExpTree::kInfinity, false,
+ new (zone) RegExpCharacterClass('*', default_flags), &compiler,
+ captured_body, data->contains_anchor);
if (data->contains_anchor) {
// Unroll loop once, to take care of the case that might start
@@ -6690,26 +6678,27 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
ChoiceNode* first_step_node = new(zone) ChoiceNode(2, zone);
first_step_node->AddAlternative(GuardedAlternative(captured_body));
first_step_node->AddAlternative(GuardedAlternative(new (zone) TextNode(
- new (zone) RegExpCharacterClass('*'), false, loop_node)));
+ new (zone) RegExpCharacterClass('*', default_flags), false,
+ loop_node)));
node = first_step_node;
} else {
node = loop_node;
}
}
if (is_one_byte) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, ignore_case);
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
// Do it again to propagate the new nodes to places where they were not
// put because they had not been calculated yet.
- if (node != NULL) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, ignore_case);
+ if (node != nullptr) {
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
}
- } else if (compiler.unicode() && (is_global || is_sticky)) {
- node = OptionallyStepBackToLeadSurrogate(&compiler, node);
+ } else if (is_unicode && (is_global || is_sticky)) {
+ node = OptionallyStepBackToLeadSurrogate(&compiler, node, flags);
}
- if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
+ if (node == nullptr) node = new (zone) EndNode(EndNode::BACKTRACK, zone);
data->node = node;
- Analysis analysis(isolate, flags, is_one_byte);
+ Analysis analysis(isolate, is_one_byte);
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 23dc8fac4b..021c59d3e4 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/assembler.h"
+#include "src/objects/js-regexp.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -20,6 +21,36 @@ class RegExpNode;
class RegExpTree;
class BoyerMooreLookahead;
+inline bool IgnoreCase(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kIgnoreCase) != 0;
+}
+
+inline bool IsUnicode(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kUnicode) != 0;
+}
+
+inline bool IsSticky(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kSticky) != 0;
+}
+
+inline bool IsGlobal(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kGlobal) != 0;
+}
+
+inline bool DotAll(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kDotAll) != 0;
+}
+
+inline bool Multiline(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kMultiline) != 0;
+}
+
+inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
+ // Both unicode and ignore_case flags are set. We need to use ICU to find
+ // the closure over case equivalents.
+ return IsUnicode(flags) && IgnoreCase(flags);
+}
+
class RegExpImpl {
public:
// Whether V8 is compiled with native regexp support or not.
@@ -105,7 +136,8 @@ class RegExpImpl {
Handle<JSRegExp> regexp, Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info);
- // Set last match info. If match is NULL, then setting captures is omitted.
+ // Set last match info. If match is nullptr, then setting captures is
+ // omitted.
static Handle<RegExpMatchInfo> SetLastMatchInfo(
Handle<RegExpMatchInfo> last_match_info, Handle<String> subject,
int capture_count, int32_t* match);
@@ -119,9 +151,9 @@ class RegExpImpl {
INLINE(~GlobalCache());
// Fetch the next entry in the cache for global regexp match results.
- // This does not set the last match info. Upon failure, NULL is returned.
- // The cause can be checked with Result(). The previous
- // result is still in available in memory when a failure happens.
+ // This does not set the last match info. Upon failure, nullptr is
+ // returned. The cause can be checked with Result(). The previous result is
+ // still in available in memory when a failure happens.
INLINE(int32_t* FetchNext());
INLINE(int32_t* LastSuccessfulMatch());
@@ -184,7 +216,7 @@ enum ElementInSetsRelation {
// integers (< 32). May do zone-allocation.
class OutSet: public ZoneObject {
public:
- OutSet() : first_(0), remaining_(NULL), successors_(NULL) { }
+ OutSet() : first_(0), remaining_(nullptr), successors_(nullptr) {}
OutSet* Extend(unsigned value, Zone* zone);
bool Get(unsigned value) const;
static const unsigned kFirstLimit = 32;
@@ -201,7 +233,7 @@ class OutSet: public ZoneObject {
ZoneList<OutSet*>* successors(Zone* zone) { return successors_; }
OutSet(uint32_t first, ZoneList<unsigned>* remaining)
- : first_(first), remaining_(remaining), successors_(NULL) { }
+ : first_(first), remaining_(remaining), successors_(nullptr) {}
uint32_t first_;
ZoneList<unsigned>* remaining_;
ZoneList<OutSet*>* successors_;
@@ -217,7 +249,7 @@ class DispatchTable : public ZoneObject {
class Entry {
public:
- Entry() : from_(0), to_(0), out_set_(NULL) { }
+ Entry() : from_(0), to_(0), out_set_(nullptr) {}
Entry(uc32 from, uc32 to, OutSet* out_set)
: from_(from), to_(to), out_set_(out_set) {
DCHECK(from <= to);
@@ -433,8 +465,11 @@ extern int kUninitializedRegExpNodePlaceHolder;
class RegExpNode: public ZoneObject {
public:
explicit RegExpNode(Zone* zone)
- : replacement_(NULL), on_work_list_(false), trace_count_(0), zone_(zone) {
- bm_info_[0] = bm_info_[1] = NULL;
+ : replacement_(nullptr),
+ on_work_list_(false),
+ trace_count_(0),
+ zone_(zone) {
+ bm_info_[0] = bm_info_[1] = nullptr;
}
virtual ~RegExpNode();
virtual void Accept(NodeVisitor* visitor) = 0;
@@ -472,7 +507,7 @@ class RegExpNode: public ZoneObject {
// character and that has no guards on it.
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) {
- return NULL;
+ return nullptr;
}
// Collects information on the possible code units (mod 128) that can match if
@@ -489,10 +524,8 @@ class RegExpNode: public ZoneObject {
// If we know that the input is one-byte then there are some nodes that can
// never match. This method returns a node that can be substituted for
- // itself, or NULL if the node can never match.
- virtual RegExpNode* FilterOneByte(int depth, bool ignore_case) {
- return this;
- }
+ // itself, or nullptr if the node can never match.
+ virtual RegExpNode* FilterOneByte(int depth) { return this; }
// Helper for FilterOneByte.
RegExpNode* replacement() {
DCHECK(info()->replacement_calculated);
@@ -564,7 +597,7 @@ class SeqRegExpNode: public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) { }
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
+ virtual RegExpNode* FilterOneByte(int depth);
virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) {
on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
@@ -572,7 +605,7 @@ class SeqRegExpNode: public RegExpNode {
}
protected:
- RegExpNode* FilterSuccessor(int depth, bool ignore_case);
+ RegExpNode* FilterSuccessor(int depth);
private:
RegExpNode* on_success_;
@@ -677,13 +710,15 @@ class TextNode: public SeqRegExpNode {
static TextNode* CreateForCharacterRanges(Zone* zone,
ZoneList<CharacterRange>* ranges,
bool read_backward,
- RegExpNode* on_success);
+ RegExpNode* on_success,
+ JSRegExp::Flags flags);
// Create TextNode for a surrogate pair with a range given for the
// lead and the trail surrogate each.
static TextNode* CreateForSurrogatePair(Zone* zone, CharacterRange lead,
CharacterRange trail,
bool read_backward,
- RegExpNode* on_success);
+ RegExpNode* on_success,
+ JSRegExp::Flags flags);
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
@@ -700,7 +735,7 @@ class TextNode: public SeqRegExpNode {
virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start);
void CalculateOffsets();
- virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
+ virtual RegExpNode* FilterOneByte(int depth);
private:
enum TextEmitPassType {
@@ -710,7 +745,7 @@ class TextNode: public SeqRegExpNode {
CASE_CHARACTER_MATCH, // Case-independent single character check.
CHARACTER_CLASS_MATCH // Character class.
};
- static bool SkipPass(int pass, bool ignore_case);
+ static bool SkipPass(TextEmitPassType pass, bool ignore_case);
static const int kFirstRealPass = SIMPLE_CHARACTER_MATCH;
static const int kLastPass = CHARACTER_CLASS_MATCH;
void TextEmitPass(RegExpCompiler* compiler,
@@ -774,11 +809,12 @@ class AssertionNode: public SeqRegExpNode {
class BackReferenceNode: public SeqRegExpNode {
public:
- BackReferenceNode(int start_reg, int end_reg, bool read_backward,
- RegExpNode* on_success)
+ BackReferenceNode(int start_reg, int end_reg, JSRegExp::Flags flags,
+ bool read_backward, RegExpNode* on_success)
: SeqRegExpNode(on_success),
start_reg_(start_reg),
end_reg_(end_reg),
+ flags_(flags),
read_backward_(read_backward) {}
virtual void Accept(NodeVisitor* visitor);
int start_register() { return start_reg_; }
@@ -800,6 +836,7 @@ class BackReferenceNode: public SeqRegExpNode {
private:
int start_reg_;
int end_reg_;
+ JSRegExp::Flags flags_;
bool read_backward_;
};
@@ -873,7 +910,8 @@ class Guard: public ZoneObject {
class GuardedAlternative {
public:
- explicit GuardedAlternative(RegExpNode* node) : node_(node), guards_(NULL) { }
+ explicit GuardedAlternative(RegExpNode* node)
+ : node_(node), guards_(nullptr) {}
void AddGuard(Guard* guard, Zone* zone);
RegExpNode* node() { return node_; }
void set_node(RegExpNode* node) { node_ = node; }
@@ -892,11 +930,11 @@ class ChoiceNode: public RegExpNode {
public:
explicit ChoiceNode(int expected_size, Zone* zone)
: RegExpNode(zone),
- alternatives_(new(zone)
- ZoneList<GuardedAlternative>(expected_size, zone)),
- table_(NULL),
+ alternatives_(new (zone)
+ ZoneList<GuardedAlternative>(expected_size, zone)),
+ table_(nullptr),
not_at_start_(false),
- being_calculated_(false) { }
+ being_calculated_(false) {}
virtual void Accept(NodeVisitor* visitor);
void AddAlternative(GuardedAlternative node) {
alternatives()->Add(node, zone());
@@ -923,7 +961,7 @@ class ChoiceNode: public RegExpNode {
virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
return true;
}
- virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
+ virtual RegExpNode* FilterOneByte(int depth);
virtual bool read_backward() { return false; }
protected:
@@ -995,7 +1033,7 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
return !is_first;
}
- virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
+ virtual RegExpNode* FilterOneByte(int depth);
};
@@ -1003,8 +1041,8 @@ class LoopChoiceNode: public ChoiceNode {
public:
LoopChoiceNode(bool body_can_be_zero_length, bool read_backward, Zone* zone)
: ChoiceNode(2, zone),
- loop_node_(NULL),
- continue_node_(NULL),
+ loop_node_(nullptr),
+ continue_node_(nullptr),
body_can_be_zero_length_(body_can_be_zero_length),
read_backward_(read_backward) {}
void AddLoopAlternative(GuardedAlternative alt);
@@ -1022,7 +1060,7 @@ class LoopChoiceNode: public ChoiceNode {
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
virtual bool read_backward() { return read_backward_; }
virtual void Accept(NodeVisitor* visitor);
- virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
+ virtual RegExpNode* FilterOneByte(int depth);
private:
// AddAlternative is made private for loop nodes because alternatives
@@ -1201,7 +1239,7 @@ class Trace {
class DeferredAction {
public:
DeferredAction(ActionNode::ActionType action_type, int reg)
- : action_type_(action_type), reg_(reg), next_(NULL) { }
+ : action_type_(action_type), reg_(reg), next_(nullptr) {}
DeferredAction* next() { return next_; }
bool Mentions(int reg);
int reg() { return reg_; }
@@ -1255,14 +1293,14 @@ class Trace {
Trace()
: cp_offset_(0),
- actions_(NULL),
- backtrack_(NULL),
- stop_node_(NULL),
- loop_label_(NULL),
+ actions_(nullptr),
+ backtrack_(nullptr),
+ stop_node_(nullptr),
+ loop_label_(nullptr),
characters_preloaded_(0),
bound_checked_up_to_(0),
flush_budget_(100),
- at_start_(UNKNOWN) { }
+ at_start_(UNKNOWN) {}
// End the trace. This involves flushing the deferred actions in the trace
// and pushing a backtrack location onto the backtrack stack. Once this is
@@ -1282,13 +1320,9 @@ class Trace {
// a trivial trace is recorded in a label in the node so that gotos can be
// generated to that code.
bool is_trivial() {
- return backtrack_ == NULL &&
- actions_ == NULL &&
- cp_offset_ == 0 &&
- characters_preloaded_ == 0 &&
- bound_checked_up_to_ == 0 &&
- quick_check_performed_.characters() == 0 &&
- at_start_ == UNKNOWN;
+ return backtrack_ == nullptr && actions_ == nullptr && cp_offset_ == 0 &&
+ characters_preloaded_ == 0 && bound_checked_up_to_ == 0 &&
+ quick_check_performed_.characters() == 0 && at_start_ == UNKNOWN;
}
TriBool at_start() { return at_start_; }
void set_at_start(TriBool at_start) { at_start_ = at_start; }
@@ -1307,7 +1341,7 @@ class Trace {
// These set methods and AdvanceCurrentPositionInTrace should be used only on
// new traces - the intention is that traces are immutable after creation.
void add_action(DeferredAction* new_action) {
- DCHECK(new_action->next_ == NULL);
+ DCHECK(new_action->next_ == nullptr);
new_action->next_ = actions_;
actions_ = new_action;
}
@@ -1433,11 +1467,8 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
// +-------+ +------------+
class Analysis: public NodeVisitor {
public:
- Analysis(Isolate* isolate, JSRegExp::Flags flags, bool is_one_byte)
- : isolate_(isolate),
- flags_(flags),
- is_one_byte_(is_one_byte),
- error_message_(NULL) {}
+ Analysis(Isolate* isolate, bool is_one_byte)
+ : isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
void EnsureAnalyzed(RegExpNode* node);
#define DECLARE_VISIT(Type) \
@@ -1446,9 +1477,9 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
#undef DECLARE_VISIT
virtual void VisitLoopChoice(LoopChoiceNode* that);
- bool has_failed() { return error_message_ != NULL; }
+ bool has_failed() { return error_message_ != nullptr; }
const char* error_message() {
- DCHECK(error_message_ != NULL);
+ DCHECK(error_message_ != nullptr);
return error_message_;
}
void fail(const char* error_message) {
@@ -1457,12 +1488,8 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
Isolate* isolate() const { return isolate_; }
- bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
- bool unicode() const { return (flags_ & JSRegExp::kUnicode) != 0; }
-
private:
Isolate* isolate_;
- JSRegExp::Flags flags_;
bool is_one_byte_;
const char* error_message_;
@@ -1472,11 +1499,11 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
struct RegExpCompileData {
RegExpCompileData()
- : tree(NULL),
- node(NULL),
- simple(true),
- contains_anchor(false),
- capture_count(0) { }
+ : tree(nullptr),
+ node(nullptr),
+ simple(true),
+ contains_anchor(false),
+ capture_count(0) {}
RegExpTree* tree;
RegExpNode* node;
bool simple;
@@ -1495,7 +1522,7 @@ class RegExpEngine: public AllStatic {
code(isolate->heap()->the_hole_value()),
num_registers(0) {}
CompilationResult(Object* code, int registers)
- : error_message(NULL), code(code), num_registers(registers) {}
+ : error_message(nullptr), code(code), num_registers(registers) {}
const char* error_message;
Object* code;
int num_registers;
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
index 3f8fbfc7c8..3fce7dd688 100644
--- a/deps/v8/src/regexp/mips/OWNERS
+++ b/deps/v8/src/regexp/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 03ceb0ee75..e45eeeb492 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -95,7 +95,7 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
@@ -879,7 +879,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// Restore regexp registers.
__ MultiPop(regexp_registers);
- // If return NULL, we have failed to grow the stack, and
+ // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
@@ -910,7 +910,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
@@ -1195,14 +1195,14 @@ void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
Register rs,
const Operand& rt) {
if (condition == al) { // Unconditional.
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
__ jmp(to);
return;
}
- if (to == NULL) {
+ if (to == nullptr) {
__ Branch(&backtrack_label_, condition, rs, rt);
return;
}
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 6c1ba64c51..6d61601a40 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -170,7 +170,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
+ // is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Label* to,
Condition condition,
Register rs,
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
index 3f8fbfc7c8..978563cab5 100644
--- a/deps/v8/src/regexp/mips64/OWNERS
+++ b/deps/v8/src/regexp/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 3b73f0bb56..68a7f87843 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -132,7 +132,7 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
@@ -917,7 +917,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// Restore regexp registers.
__ MultiPop(regexp_registers);
- // If return NULL, we have failed to grow the stack, and
+ // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
@@ -948,7 +948,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
@@ -1233,14 +1233,14 @@ void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
Register rs,
const Operand& rt) {
if (condition == al) { // Unconditional.
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
__ jmp(to);
return;
}
- if (to == NULL) {
+ if (to == nullptr) {
__ Branch(&backtrack_label_, condition, rs, rt);
return;
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 722ca01ab5..c0023f409b 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -174,7 +174,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
+ // is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Label* to,
Condition condition,
Register rs,
diff --git a/deps/v8/src/regexp/ppc/OWNERS b/deps/v8/src/regexp/ppc/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/regexp/ppc/OWNERS
+++ b/deps/v8/src/regexp/ppc/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index f8f5a0d2a3..bc3e643369 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -96,7 +96,7 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
@@ -913,7 +913,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
+ // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ cmpi(r3, Operand::Zero());
__ beq(&exit_with_exception);
@@ -1192,14 +1192,14 @@ void RegExpMacroAssemblerPPC::CheckPosition(int cp_offset,
void RegExpMacroAssemblerPPC::BranchOrBacktrack(Condition condition, Label* to,
CRegister cr) {
if (condition == al) { // Unconditional.
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
__ b(to);
return;
}
- if (to == NULL) {
+ if (to == nullptr) {
__ b(condition, &backtrack_label_, cr);
return;
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 775bc72d59..1e65600ecf 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -160,7 +160,7 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
+ // is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to, CRegister cr = cr7);
// Call and return internally in the generated code in a way that
diff --git a/deps/v8/src/regexp/regexp-ast.cc b/deps/v8/src/regexp/regexp-ast.cc
index 7755593fbf..782c9c9037 100644
--- a/deps/v8/src/regexp/regexp-ast.cc
+++ b/deps/v8/src/regexp/regexp-ast.cc
@@ -15,8 +15,8 @@ namespace internal {
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ACCEPT)
#undef MAKE_ACCEPT
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExpTree::As##Name() { return NULL; } \
+#define MAKE_TYPE_CASE(Name) \
+ RegExp##Name* RegExpTree::As##Name() { return nullptr; } \
bool RegExpTree::Is##Name() { return false; }
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
#undef MAKE_TYPE_CASE
@@ -156,7 +156,7 @@ void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
that->alternatives()->at(i)->Accept(this, data);
}
os_ << ")";
- return NULL;
+ return nullptr;
}
@@ -167,7 +167,7 @@ void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
that->nodes()->at(i)->Accept(this, data);
}
os_ << ")";
- return NULL;
+ return nullptr;
}
@@ -188,7 +188,7 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
VisitCharacterRange(that->ranges(zone_)->at(i));
}
os_ << "]";
- return NULL;
+ return nullptr;
}
@@ -213,7 +213,7 @@ void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
os_ << "@B";
break;
}
- return NULL;
+ return nullptr;
}
@@ -224,7 +224,7 @@ void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
os_ << AsUC16(chardata[i]);
}
os_ << "'";
- return NULL;
+ return nullptr;
}
@@ -239,7 +239,7 @@ void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
}
os_ << ")";
}
- return NULL;
+ return nullptr;
}
@@ -253,7 +253,7 @@ void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
os_ << (that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
that->body()->Accept(this, data);
os_ << ")";
- return NULL;
+ return nullptr;
}
@@ -261,14 +261,14 @@ void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
os_ << "(^ ";
that->body()->Accept(this, data);
os_ << ")";
- return NULL;
+ return nullptr;
}
void* RegExpUnparser::VisitGroup(RegExpGroup* that, void* data) {
os_ << "(?: ";
that->body()->Accept(this, data);
os_ << ")";
- return NULL;
+ return nullptr;
}
void* RegExpUnparser::VisitLookaround(RegExpLookaround* that, void* data) {
@@ -277,26 +277,26 @@ void* RegExpUnparser::VisitLookaround(RegExpLookaround* that, void* data) {
os_ << (that->is_positive() ? " + " : " - ");
that->body()->Accept(this, data);
os_ << ")";
- return NULL;
+ return nullptr;
}
void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
void* data) {
os_ << "(<- " << that->index() << ")";
- return NULL;
+ return nullptr;
}
void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
os_ << '%';
- return NULL;
+ return nullptr;
}
std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) { // NOLINT
RegExpUnparser unparser(os, zone);
- Accept(&unparser, NULL);
+ Accept(&unparser, nullptr);
return os;
}
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 29fa3e0b1f..e60621f8b6 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -6,6 +6,7 @@
#define V8_REGEXP_REGEXP_AST_H_
#include "src/objects.h"
+#include "src/objects/js-regexp.h"
#include "src/objects/string.h"
#include "src/utils.h"
#include "src/zone/zone-containers.h"
@@ -140,11 +141,11 @@ class CharacterRange {
class CharacterSet final BASE_EMBEDDED {
public:
explicit CharacterSet(uc16 standard_set_type)
- : ranges_(NULL), standard_set_type_(standard_set_type) {}
+ : ranges_(nullptr), standard_set_type_(standard_set_type) {}
explicit CharacterSet(ZoneList<CharacterRange>* ranges)
: ranges_(ranges), standard_set_type_(0) {}
ZoneList<CharacterRange>* ranges(Zone* zone);
- uc16 standard_set_type() { return standard_set_type_; }
+ uc16 standard_set_type() const { return standard_set_type_; }
void set_standard_set_type(uc16 special_set_type) {
standard_set_type_ = special_set_type;
}
@@ -274,7 +275,8 @@ class RegExpAssertion final : public RegExpTree {
BOUNDARY,
NON_BOUNDARY
};
- explicit RegExpAssertion(AssertionType type) : assertion_type_(type) {}
+ RegExpAssertion(AssertionType type, JSRegExp::Flags flags)
+ : assertion_type_(type), flags_(flags) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpAssertion* AsAssertion() override;
@@ -286,7 +288,8 @@ class RegExpAssertion final : public RegExpTree {
AssertionType assertion_type() { return assertion_type_; }
private:
- AssertionType assertion_type_;
+ const AssertionType assertion_type_;
+ const JSRegExp::Flags flags_;
};
@@ -300,12 +303,18 @@ class RegExpCharacterClass final : public RegExpTree {
NEGATED = 1 << 0,
CONTAINS_SPLIT_SURROGATE = 1 << 1,
};
- typedef base::Flags<Flag> Flags;
-
- explicit RegExpCharacterClass(ZoneList<CharacterRange>* ranges,
- Flags flags = Flags())
- : set_(ranges), flags_(flags) {}
- explicit RegExpCharacterClass(uc16 type) : set_(type), flags_(0) {}
+ typedef base::Flags<Flag> CharacterClassFlags;
+
+ RegExpCharacterClass(
+ ZoneList<CharacterRange>* ranges, JSRegExp::Flags flags,
+ CharacterClassFlags character_class_flags = CharacterClassFlags())
+ : set_(ranges),
+ flags_(flags),
+ character_class_flags_(character_class_flags) {}
+ RegExpCharacterClass(uc16 type, JSRegExp::Flags flags)
+ : set_(type),
+ flags_(flags),
+ character_class_flags_(CharacterClassFlags()) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpCharacterClass* AsCharacterClass() override;
@@ -332,22 +341,25 @@ class RegExpCharacterClass final : public RegExpTree {
// D : non-ASCII digit
// . : non-newline
// * : All characters, for advancing unanchored regexp
- uc16 standard_type() { return set_.standard_set_type(); }
+ uc16 standard_type() const { return set_.standard_set_type(); }
ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
- bool is_negated() const { return (flags_ & NEGATED) != 0; }
+ bool is_negated() const { return (character_class_flags_ & NEGATED) != 0; }
+ JSRegExp::Flags flags() const { return flags_; }
bool contains_split_surrogate() const {
- return (flags_ & CONTAINS_SPLIT_SURROGATE) != 0;
+ return (character_class_flags_ & CONTAINS_SPLIT_SURROGATE) != 0;
}
private:
CharacterSet set_;
- const Flags flags_;
+ const JSRegExp::Flags flags_;
+ const CharacterClassFlags character_class_flags_;
};
class RegExpAtom final : public RegExpTree {
public:
- explicit RegExpAtom(Vector<const uc16> data) : data_(data) {}
+ explicit RegExpAtom(Vector<const uc16> data, JSRegExp::Flags flags)
+ : data_(data), flags_(flags) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpAtom* AsAtom() override;
@@ -358,9 +370,12 @@ class RegExpAtom final : public RegExpTree {
void AppendToText(RegExpText* text, Zone* zone) override;
Vector<const uc16> data() { return data_; }
int length() { return data_.length(); }
+ JSRegExp::Flags flags() const { return flags_; }
+ bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
private:
Vector<const uc16> data_;
+ const JSRegExp::Flags flags_;
};
@@ -432,7 +447,7 @@ class RegExpQuantifier final : public RegExpTree {
class RegExpCapture final : public RegExpTree {
public:
explicit RegExpCapture(int index)
- : body_(NULL), index_(index), name_(nullptr) {}
+ : body_(nullptr), index_(index), name_(nullptr) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
static RegExpNode* ToNode(RegExpTree* body, int index,
@@ -532,9 +547,10 @@ class RegExpLookaround final : public RegExpTree {
class RegExpBackReference final : public RegExpTree {
public:
- RegExpBackReference() : capture_(nullptr), name_(nullptr) {}
- explicit RegExpBackReference(RegExpCapture* capture)
- : capture_(capture), name_(nullptr) {}
+ explicit RegExpBackReference(JSRegExp::Flags flags)
+ : capture_(nullptr), name_(nullptr), flags_(flags) {}
+ RegExpBackReference(RegExpCapture* capture, JSRegExp::Flags flags)
+ : capture_(capture), name_(nullptr), flags_(flags) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpBackReference* AsBackReference() override;
@@ -552,6 +568,7 @@ class RegExpBackReference final : public RegExpTree {
private:
RegExpCapture* capture_;
const ZoneVector<uc16>* name_;
+ const JSRegExp::Flags flags_;
};
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index 749393b782..076197c940 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -54,7 +54,7 @@ void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
- if (l == NULL) l = &backtrack_;
+ if (l == nullptr) l = &backtrack_;
if (l->is_bound()) {
Emit32(l->pos());
} else {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
index dad2e9ac73..8063ebbc2d 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
@@ -20,14 +20,15 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
RegExpMacroAssemblerIrregexp(Isolate* isolate, Vector<byte> buffer,
Zone* zone);
virtual ~RegExpMacroAssemblerIrregexp();
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index cb2199bf94..600757a72b 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -353,8 +353,8 @@ Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
DCHECK(stack_pointer <= old_stack_base);
DCHECK(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
Address new_stack_base = regexp_stack->EnsureCapacity(size * 2);
- if (new_stack_base == NULL) {
- return NULL;
+ if (new_stack_base == nullptr) {
+ return nullptr;
}
*stack_base = new_stack_base;
intptr_t stack_content_size = old_stack_base - stack_pointer;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 76efdf910f..6fcd5ec482 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -88,8 +88,8 @@ class RegExpMacroAssembler {
Label* on_no_match) = 0;
// Check the current character for a match with a literal character. If we
// fail to match then goto the on_failure label. End of input always
- // matches. If the label is NULL then we should pop a backtrack address off
- // the stack and go to that.
+ // matches. If the label is nullptr then we should pop a backtrack address
+ // off the stack and go to that.
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal) = 0;
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned and_with,
@@ -123,10 +123,10 @@ class RegExpMacroAssembler {
virtual Handle<HeapObject> GetCode(Handle<String> source) = 0;
virtual void GoTo(Label* label) = 0;
// Check whether a register is >= a given constant and go to a label if it
- // is. Backtracks instead if the label is NULL.
+ // is. Backtracks instead if the label is nullptr.
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge) = 0;
// Check whether a register is < a given constant and go to a label if it is.
- // Backtracks instead if the label is NULL.
+ // Backtracks instead if the label is nullptr.
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt) = 0;
// Check whether a register is == to the current position and go to a
// label if it is.
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index a7da50de20..2c6aa5b23a 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -16,6 +16,9 @@
#ifdef V8_INTL_SUPPORT
#include "unicode/uniset.h"
+// TODO(mathias): Remove this when we no longer need to check
+// `U_ICU_VERSION_MAJOR_NUM`.
+#include "unicode/uvernum.h"
#endif // V8_INTL_SUPPORT
namespace v8 {
@@ -26,15 +29,12 @@ RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
: isolate_(isolate),
zone_(zone),
error_(error),
- captures_(NULL),
- named_captures_(NULL),
- named_back_references_(NULL),
+ captures_(nullptr),
+ named_captures_(nullptr),
+ named_back_references_(nullptr),
in_(in),
current_(kEndMarker),
- dotall_(flags & JSRegExp::kDotAll),
- ignore_case_(flags & JSRegExp::kIgnoreCase),
- multiline_(flags & JSRegExp::kMultiline),
- unicode_(flags & JSRegExp::kUnicode),
+ top_level_flags_(flags),
next_pos_(0),
captures_started_(0),
capture_count_(0),
@@ -44,7 +44,6 @@ RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
is_scanned_for_captures_(false),
has_named_captures_(false),
failed_(false) {
- DCHECK_IMPLIES(dotall(), FLAG_harmony_regexp_dotall);
Advance();
}
@@ -139,7 +138,7 @@ bool RegExpParser::IsSyntaxCharacterOrSlash(uc32 c) {
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
- if (failed_) return NULL; // Do not overwrite any existing error.
+ if (failed_) return nullptr; // Do not overwrite any existing error.
failed_ = true;
*error_ = isolate()
->factory()
@@ -148,15 +147,13 @@ RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
// Zip to the end to make sure the no more input is read.
current_ = kEndMarker;
next_pos_ = in()->length();
- return NULL;
+ return nullptr;
}
-
-#define CHECK_FAILED /**/); \
- if (failed_) return NULL; \
+#define CHECK_FAILED /**/); \
+ if (failed_) return nullptr; \
((void)0
-
// Pattern ::
// Disjunction
RegExpTree* RegExpParser::ParsePattern() {
@@ -184,8 +181,8 @@ RegExpTree* RegExpParser::ParsePattern() {
// Atom Quantifier
RegExpTree* RegExpParser::ParseDisjunction() {
// Used to store current state while parsing subexpressions.
- RegExpParserState initial_state(NULL, INITIAL, RegExpLookaround::LOOKAHEAD, 0,
- nullptr, ignore_case(), unicode(), zone());
+ RegExpParserState initial_state(nullptr, INITIAL, RegExpLookaround::LOOKAHEAD,
+ 0, nullptr, top_level_flags_, zone());
RegExpParserState* state = &initial_state;
// Cache the builder in a local variable for quick access.
RegExpBuilder* builder = initial_state.builder();
@@ -255,12 +252,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(CStrVector("Nothing to repeat"));
case '^': {
Advance();
- if (multiline()) {
- builder->AddAssertion(
- new (zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
+ if (builder->multiline()) {
+ builder->AddAssertion(new (zone()) RegExpAssertion(
+ RegExpAssertion::START_OF_LINE, builder->flags()));
} else {
- builder->AddAssertion(
- new (zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
+ builder->AddAssertion(new (zone()) RegExpAssertion(
+ RegExpAssertion::START_OF_INPUT, builder->flags()));
set_contains_anchor();
}
continue;
@@ -268,9 +265,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '$': {
Advance();
RegExpAssertion::AssertionType assertion_type =
- multiline() ? RegExpAssertion::END_OF_LINE
- : RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(new (zone()) RegExpAssertion(assertion_type));
+ builder->multiline() ? RegExpAssertion::END_OF_LINE
+ : RegExpAssertion::END_OF_INPUT;
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(assertion_type, builder->flags()));
continue;
}
case '.': {
@@ -278,87 +276,26 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
- if (dotall()) {
+ if (builder->dotall()) {
// Everything.
- DCHECK(FLAG_harmony_regexp_dotall);
CharacterRange::AddClassEscape('*', ranges, false, zone());
} else {
// Everything except \x0a, \x0d, \u2028 and \u2029
CharacterRange::AddClassEscape('.', ranges, false, zone());
}
- RegExpCharacterClass* cc = new (zone()) RegExpCharacterClass(ranges);
+ RegExpCharacterClass* cc =
+ new (zone()) RegExpCharacterClass(ranges, builder->flags());
builder->AddCharacterClass(cc);
break;
}
case '(': {
- SubexpressionType subexpr_type = CAPTURE;
- RegExpLookaround::Type lookaround_type = state->lookaround_type();
- bool is_named_capture = false;
- Advance();
- if (current() == '?') {
- switch (Next()) {
- case ':':
- subexpr_type = GROUPING;
- Advance(2);
- break;
- case '=':
- lookaround_type = RegExpLookaround::LOOKAHEAD;
- subexpr_type = POSITIVE_LOOKAROUND;
- Advance(2);
- break;
- case '!':
- lookaround_type = RegExpLookaround::LOOKAHEAD;
- subexpr_type = NEGATIVE_LOOKAROUND;
- Advance(2);
- break;
- case '<':
- Advance();
- if (FLAG_harmony_regexp_lookbehind) {
- if (Next() == '=') {
- subexpr_type = POSITIVE_LOOKAROUND;
- lookaround_type = RegExpLookaround::LOOKBEHIND;
- Advance(2);
- break;
- } else if (Next() == '!') {
- subexpr_type = NEGATIVE_LOOKAROUND;
- lookaround_type = RegExpLookaround::LOOKBEHIND;
- Advance(2);
- break;
- }
- }
- if (FLAG_harmony_regexp_named_captures) {
- has_named_captures_ = true;
- is_named_capture = true;
- Advance();
- break;
- }
- // Fall through.
- default:
- return ReportError(CStrVector("Invalid group"));
- }
- }
-
- const ZoneVector<uc16>* capture_name = nullptr;
- if (subexpr_type == CAPTURE) {
- if (captures_started_ >= kMaxCaptures) {
- return ReportError(CStrVector("Too many captures"));
- }
- captures_started_++;
-
- if (is_named_capture) {
- capture_name = ParseCaptureGroupName(CHECK_FAILED);
- }
- }
- // Store current state and begin new disjunction parsing.
- state = new (zone()) RegExpParserState(
- state, subexpr_type, lookaround_type, captures_started_,
- capture_name, ignore_case(), unicode(), zone());
+ state = ParseOpenParenthesis(state CHECK_FAILED);
builder = state->builder();
continue;
}
case '[': {
- RegExpTree* cc = ParseCharacterClass(CHECK_FAILED);
+ RegExpTree* cc = ParseCharacterClass(builder CHECK_FAILED);
builder->AddCharacterClass(cc->AsCharacterClass());
break;
}
@@ -370,13 +307,13 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(CStrVector("\\ at end of pattern"));
case 'b':
Advance(2);
- builder->AddAssertion(
- new (zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
+ builder->AddAssertion(new (zone()) RegExpAssertion(
+ RegExpAssertion::BOUNDARY, builder->flags()));
continue;
case 'B':
Advance(2);
- builder->AddAssertion(
- new (zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
+ builder->AddAssertion(new (zone()) RegExpAssertion(
+ RegExpAssertion::NON_BOUNDARY, builder->flags()));
continue;
// AtomEscape ::
// CharacterClassEscape
@@ -393,10 +330,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
- CharacterRange::AddClassEscape(c, ranges,
- unicode() && ignore_case(), zone());
+ CharacterRange::AddClassEscape(
+ c, ranges, unicode() && builder->ignore_case(), zone());
RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges);
+ new (zone()) RegExpCharacterClass(ranges, builder->flags());
builder->AddCharacterClass(cc);
break;
}
@@ -412,7 +349,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(CStrVector("Invalid property name"));
}
RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges);
+ new (zone()) RegExpCharacterClass(ranges, builder->flags());
builder->AddCharacterClass(cc);
} else {
// With /u, no identity escapes except for syntax characters
@@ -445,7 +382,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddEmpty();
} else {
RegExpCapture* capture = GetCapture(index);
- RegExpTree* atom = new (zone()) RegExpBackReference(capture);
+ RegExpTree* atom =
+ new (zone()) RegExpBackReference(capture, builder->flags());
builder->AddAtom(atom);
}
break;
@@ -640,6 +578,135 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
}
+RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
+ RegExpParserState* state) {
+ RegExpLookaround::Type lookaround_type = state->lookaround_type();
+ bool is_named_capture = false;
+ JSRegExp::Flags switch_on = JSRegExp::kNone;
+ JSRegExp::Flags switch_off = JSRegExp::kNone;
+ const ZoneVector<uc16>* capture_name = nullptr;
+ SubexpressionType subexpr_type = CAPTURE;
+ Advance();
+ if (current() == '?') {
+ switch (Next()) {
+ case ':':
+ Advance(2);
+ subexpr_type = GROUPING;
+ break;
+ case '=':
+ Advance(2);
+ lookaround_type = RegExpLookaround::LOOKAHEAD;
+ subexpr_type = POSITIVE_LOOKAROUND;
+ break;
+ case '!':
+ Advance(2);
+ lookaround_type = RegExpLookaround::LOOKAHEAD;
+ subexpr_type = NEGATIVE_LOOKAROUND;
+ break;
+ case '-':
+ case 'i':
+ case 's':
+ case 'm': {
+ if (!FLAG_regexp_mode_modifiers) {
+ ReportError(CStrVector("Invalid group"));
+ return nullptr;
+ }
+ Advance();
+ bool flags_sense = true; // Switching on flags.
+ while (subexpr_type != GROUPING) {
+ switch (current()) {
+ case '-':
+ if (!flags_sense) {
+ ReportError(CStrVector("Multiple dashes in flag group"));
+ return nullptr;
+ }
+ flags_sense = false;
+ Advance();
+ continue;
+ case 's':
+ case 'i':
+ case 'm': {
+ JSRegExp::Flags bit = JSRegExp::kUnicode;
+ if (current() == 'i') bit = JSRegExp::kIgnoreCase;
+ if (current() == 'm') bit = JSRegExp::kMultiline;
+ if (current() == 's') bit = JSRegExp::kDotAll;
+ if (((switch_on | switch_off) & bit) != 0) {
+ ReportError(CStrVector("Repeated flag in flag group"));
+ return nullptr;
+ }
+ if (flags_sense) {
+ switch_on |= bit;
+ } else {
+ switch_off |= bit;
+ }
+ Advance();
+ continue;
+ }
+ case ')': {
+ Advance();
+ state->builder()
+ ->FlushText(); // Flush pending text using old flags.
+ // These (?i)-style flag switches don't put us in a subexpression
+ // at all, they just modify the flags in the rest of the current
+ // subexpression.
+ JSRegExp::Flags flags =
+ (state->builder()->flags() | switch_on) & ~switch_off;
+ state->builder()->set_flags(flags);
+ return state;
+ }
+ case ':':
+ Advance();
+ subexpr_type = GROUPING; // Will break us out of the outer loop.
+ continue;
+ default:
+ ReportError(CStrVector("Invalid flag group"));
+ return nullptr;
+ }
+ }
+ break;
+ }
+ case '<':
+ Advance();
+ if (Next() == '=') {
+ Advance(2);
+ lookaround_type = RegExpLookaround::LOOKBEHIND;
+ subexpr_type = POSITIVE_LOOKAROUND;
+ break;
+ } else if (Next() == '!') {
+ Advance(2);
+ lookaround_type = RegExpLookaround::LOOKBEHIND;
+ subexpr_type = NEGATIVE_LOOKAROUND;
+ break;
+ }
+ if (FLAG_harmony_regexp_named_captures) {
+ is_named_capture = true;
+ has_named_captures_ = true;
+ Advance();
+ break;
+ }
+ // Fall through.
+ default:
+ ReportError(CStrVector("Invalid group"));
+ return nullptr;
+ }
+ }
+ if (subexpr_type == CAPTURE) {
+ if (captures_started_ >= kMaxCaptures) {
+ ReportError(CStrVector("Too many captures"));
+ return nullptr;
+ }
+ captures_started_++;
+
+ if (is_named_capture) {
+ capture_name = ParseCaptureGroupName(CHECK_FAILED);
+ }
+ }
+ JSRegExp::Flags flags = (state->builder()->flags() | switch_on) & ~switch_off;
+ // Store current state and begin new disjunction parsing.
+ return new (zone())
+ RegExpParserState(state, subexpr_type, lookaround_type, captures_started_,
+ capture_name, flags, zone());
+}
#ifdef DEBUG
// Currently only used in an DCHECK.
@@ -703,10 +770,8 @@ void RegExpParser::ScanForCaptures() {
Advance();
if (current() != '<') break;
- if (FLAG_harmony_regexp_lookbehind) {
- Advance();
- if (current() == '=' || current() == '!') break;
- }
+ Advance();
+ if (current() == '=' || current() == '!') break;
// Found a possible named capture. It could turn out to be a syntax
// error (e.g. an unterminated or invalid name), but that distinction
@@ -857,7 +922,8 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
if (state->IsInsideCaptureGroup(name)) {
builder->AddEmpty();
} else {
- RegExpBackReference* atom = new (zone()) RegExpBackReference();
+ RegExpBackReference* atom =
+ new (zone()) RegExpBackReference(builder->flags());
atom->set_name(name);
builder->AddAtom(atom);
@@ -909,7 +975,7 @@ RegExpCapture* RegExpParser::GetCapture(int index) {
int know_captures =
is_scanned_for_captures_ ? capture_count_ : captures_started_;
DCHECK(index <= know_captures);
- if (captures_ == NULL) {
+ if (captures_ == nullptr) {
captures_ = new (zone()) ZoneList<RegExpCapture*>(know_captures, zone());
}
while (captures_->length() < know_captures) {
@@ -948,7 +1014,7 @@ bool RegExpParser::HasNamedCaptures() {
}
bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
- for (RegExpParserState* s = this; s != NULL; s = s->previous_state()) {
+ for (RegExpParserState* s = this; s != nullptr; s = s->previous_state()) {
if (s->group_type() != CAPTURE) continue;
// Return true if we found the matching capture index.
if (index == s->capture_index()) return true;
@@ -961,7 +1027,7 @@ bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
const ZoneVector<uc16>* name) {
DCHECK_NOT_NULL(name);
- for (RegExpParserState* s = this; s != NULL; s = s->previous_state()) {
+ for (RegExpParserState* s = this; s != nullptr; s = s->previous_state()) {
if (s->capture_name() == nullptr) continue;
if (*s->capture_name() == *name) return true;
}
@@ -1115,11 +1181,12 @@ namespace {
bool IsExactPropertyAlias(const char* property_name, UProperty property) {
const char* short_name = u_getPropertyName(property, U_SHORT_PROPERTY_NAME);
- if (short_name != NULL && strcmp(property_name, short_name) == 0) return true;
+ if (short_name != nullptr && strcmp(property_name, short_name) == 0)
+ return true;
for (int i = 0;; i++) {
const char* long_name = u_getPropertyName(
property, static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
- if (long_name == NULL) break;
+ if (long_name == nullptr) break;
if (strcmp(property_name, long_name) == 0) return true;
}
return false;
@@ -1129,14 +1196,14 @@ bool IsExactPropertyValueAlias(const char* property_value_name,
UProperty property, int32_t property_value) {
const char* short_name =
u_getPropertyValueName(property, property_value, U_SHORT_PROPERTY_NAME);
- if (short_name != NULL && strcmp(property_value_name, short_name) == 0) {
+ if (short_name != nullptr && strcmp(property_value_name, short_name) == 0) {
return true;
}
for (int i = 0;; i++) {
const char* long_name = u_getPropertyValueName(
property, property_value,
static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
- if (long_name == NULL) break;
+ if (long_name == nullptr) break;
if (strcmp(property_value_name, long_name) == 0) return true;
}
return false;
@@ -1226,9 +1293,9 @@ bool IsSupportedBinaryProperty(UProperty property) {
case UCHAR_DEPRECATED:
case UCHAR_DIACRITIC:
case UCHAR_EMOJI:
- // TODO(yangguo): Uncomment this once we upgrade to ICU 60.
- // See https://ssl.icu-project.org/trac/ticket/13062
- // case UCHAR_EMOJI_COMPONENT:
+#if U_ICU_VERSION_MAJOR_NUM >= 60
+ case UCHAR_EMOJI_COMPONENT:
+#endif
case UCHAR_EMOJI_MODIFIER_BASE:
case UCHAR_EMOJI_MODIFIER:
case UCHAR_EMOJI_PRESENTATION:
@@ -1250,6 +1317,9 @@ bool IsSupportedBinaryProperty(UProperty property) {
case UCHAR_PATTERN_WHITE_SPACE:
case UCHAR_QUOTATION_MARK:
case UCHAR_RADICAL:
+#if U_ICU_VERSION_MAJOR_NUM >= 60
+ case UCHAR_REGIONAL_INDICATOR:
+#endif
case UCHAR_S_TERM:
case UCHAR_SOFT_DOTTED:
case UCHAR_TERMINAL_PUNCTUATION:
@@ -1266,6 +1336,19 @@ bool IsSupportedBinaryProperty(UProperty property) {
return false;
}
+bool IsUnicodePropertyValueCharacter(char c) {
+ // https://tc39.github.io/proposal-regexp-unicode-property-escapes/
+ //
+ // Note that using this to validate each parsed char is quite conservative.
+ // A possible alternative solution would be to only ensure the parsed
+ // property name/value candidate string does not contain '\0' characters and
+ // let ICU lookups trigger the final failure.
+ if ('a' <= c && c <= 'z') return true;
+ if ('A' <= c && c <= 'Z') return true;
+ if ('0' <= c && c <= '9') return true;
+ return (c == '_');
+}
+
} // anonymous namespace
bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
@@ -1283,11 +1366,13 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
if (current() == '{') {
// Parse \p{[PropertyName=]PropertyNameValue}
for (Advance(); current() != '}' && current() != '='; Advance()) {
+ if (!IsUnicodePropertyValueCharacter(current())) return false;
if (!has_next()) return false;
first_part.push_back(static_cast<char>(current()));
}
if (current() == '=') {
for (Advance(); current() != '}'; Advance()) {
+ if (!IsUnicodePropertyValueCharacter(current())) return false;
if (!has_next()) return false;
second_part.push_back(static_cast<char>(current()));
}
@@ -1299,6 +1384,10 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
Advance();
first_part.push_back(0); // null-terminate string.
+ DCHECK(first_part.size() - 1 == std::strlen(first_part.data()));
+ DCHECK(second_part.empty() ||
+ second_part.size() - 1 == std::strlen(second_part.data()));
+
if (second_part.empty()) {
// First attempt to interpret as general category property value name.
const char* name = first_part.data();
@@ -1526,7 +1615,7 @@ void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
}
}
-RegExpTree* RegExpParser::ParseCharacterClass() {
+RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
static const char* kUnterminated = "Unterminated character class";
static const char* kRangeInvalid = "Invalid character class";
static const char* kRangeOutOfOrder = "Range out of order in character class";
@@ -1540,7 +1629,7 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
}
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
- bool add_unicode_case_equivalents = unicode() && ignore_case();
+ bool add_unicode_case_equivalents = unicode() && builder->ignore_case();
while (has_more() && current() != ']') {
uc32 char_1, char_2;
bool is_class_1, is_class_2;
@@ -1587,9 +1676,10 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
ranges->Add(CharacterRange::Everything(), zone());
is_negated = !is_negated;
}
- RegExpCharacterClass::Flags flags;
- if (is_negated) flags = RegExpCharacterClass::NEGATED;
- return new (zone()) RegExpCharacterClass(ranges, flags);
+ RegExpCharacterClass::CharacterClassFlags character_class_flags;
+ if (is_negated) character_class_flags = RegExpCharacterClass::NEGATED;
+ return new (zone())
+ RegExpCharacterClass(ranges, builder->flags(), character_class_flags);
}
@@ -1599,14 +1689,14 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
FlatStringReader* input, JSRegExp::Flags flags,
RegExpCompileData* result) {
- DCHECK(result != NULL);
+ DCHECK(result != nullptr);
RegExpParser parser(input, &result->error, flags, isolate, zone);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
- DCHECK(tree == NULL);
+ DCHECK(tree == nullptr);
DCHECK(!result->error.is_null());
} else {
- DCHECK(tree != NULL);
+ DCHECK(tree != nullptr);
DCHECK(result->error.is_null());
if (FLAG_trace_regexp_parser) {
OFStream os(stdout);
@@ -1623,12 +1713,11 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
return !parser.failed();
}
-RegExpBuilder::RegExpBuilder(Zone* zone, bool ignore_case, bool unicode)
+RegExpBuilder::RegExpBuilder(Zone* zone, JSRegExp::Flags flags)
: zone_(zone),
pending_empty_(false),
- ignore_case_(ignore_case),
- unicode_(unicode),
- characters_(NULL),
+ flags_(flags),
+ characters_(nullptr),
pending_surrogate_(kNoPendingSurrogate),
terms_(),
alternatives_()
@@ -1663,7 +1752,7 @@ void RegExpBuilder::AddTrailSurrogate(uc16 trail_surrogate) {
surrogate_pair.Add(lead_surrogate, zone());
surrogate_pair.Add(trail_surrogate, zone());
RegExpAtom* atom =
- new (zone()) RegExpAtom(surrogate_pair.ToConstVector());
+ new (zone()) RegExpAtom(surrogate_pair.ToConstVector(), flags_);
AddAtom(atom);
}
} else {
@@ -1686,9 +1775,10 @@ void RegExpBuilder::FlushPendingSurrogate() {
void RegExpBuilder::FlushCharacters() {
FlushPendingSurrogate();
pending_empty_ = false;
- if (characters_ != NULL) {
- RegExpTree* atom = new (zone()) RegExpAtom(characters_->ToConstVector());
- characters_ = NULL;
+ if (characters_ != nullptr) {
+ RegExpTree* atom =
+ new (zone()) RegExpAtom(characters_->ToConstVector(), flags_);
+ characters_ = nullptr;
text_.Add(atom, zone());
LAST(ADD_ATOM);
}
@@ -1717,7 +1807,7 @@ void RegExpBuilder::AddCharacter(uc16 c) {
if (NeedsDesugaringForIgnoreCase(c)) {
AddCharacterClassForDesugaring(c);
} else {
- if (characters_ == NULL) {
+ if (characters_ == nullptr) {
characters_ = new (zone()) ZoneList<uc16>(4, zone());
}
characters_->Add(c, zone());
@@ -1763,7 +1853,7 @@ void RegExpBuilder::AddCharacterClass(RegExpCharacterClass* cc) {
void RegExpBuilder::AddCharacterClassForDesugaring(uc32 c) {
AddTerm(new (zone()) RegExpCharacterClass(
- CharacterRange::List(zone(), CharacterRange::Singleton(c))));
+ CharacterRange::List(zone(), CharacterRange::Singleton(c)), flags_));
}
@@ -1874,18 +1964,18 @@ bool RegExpBuilder::AddQuantifierToAtom(
return true;
}
RegExpTree* atom;
- if (characters_ != NULL) {
+ if (characters_ != nullptr) {
DCHECK(last_added_ == ADD_CHAR);
// Last atom was character.
Vector<const uc16> char_vector = characters_->ToConstVector();
int num_chars = char_vector.length();
if (num_chars > 1) {
Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
- text_.Add(new (zone()) RegExpAtom(prefix), zone());
+ text_.Add(new (zone()) RegExpAtom(prefix, flags_), zone());
char_vector = char_vector.SubVector(num_chars - 1, num_chars);
}
- characters_ = NULL;
- atom = new (zone()) RegExpAtom(char_vector);
+ characters_ = nullptr;
+ atom = new (zone()) RegExpAtom(char_vector, flags_);
FlushText();
} else if (text_.length() > 0) {
DCHECK(last_added_ == ADD_ATOM);
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index e26be624b4..56d4ac8599 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -6,6 +6,7 @@
#define V8_REGEXP_REGEXP_PARSER_H_
#include "src/objects.h"
+#include "src/objects/js-regexp.h"
#include "src/regexp/regexp-ast.h"
#include "src/zone/zone.h"
@@ -14,24 +15,23 @@ namespace internal {
struct RegExpCompileData;
-
// A BufferedZoneList is an automatically growing list, just like (and backed
// by) a ZoneList, that is optimized for the case of adding and removing
// a single element. The last element added is stored outside the backing list,
// and if no more than one element is ever added, the ZoneList isn't even
// allocated.
-// Elements must not be NULL pointers.
+// Elements must not be nullptr pointers.
template <typename T, int initial_size>
class BufferedZoneList {
public:
- BufferedZoneList() : list_(NULL), last_(NULL) {}
+ BufferedZoneList() : list_(nullptr), last_(nullptr) {}
// Adds element at end of list. This element is buffered and can
// be read using last() or removed using RemoveLast until a new Add or until
// RemoveLast or GetList has been called.
void Add(T* value, Zone* zone) {
- if (last_ != NULL) {
- if (list_ == NULL) {
+ if (last_ != nullptr) {
+ if (list_ == nullptr) {
list_ = new (zone) ZoneList<T*>(initial_size, zone);
}
list_->Add(last_, zone);
@@ -40,28 +40,28 @@ class BufferedZoneList {
}
T* last() {
- DCHECK(last_ != NULL);
+ DCHECK(last_ != nullptr);
return last_;
}
T* RemoveLast() {
- DCHECK(last_ != NULL);
+ DCHECK(last_ != nullptr);
T* result = last_;
- if ((list_ != NULL) && (list_->length() > 0))
+ if ((list_ != nullptr) && (list_->length() > 0))
last_ = list_->RemoveLast();
else
- last_ = NULL;
+ last_ = nullptr;
return result;
}
T* Get(int i) {
DCHECK((0 <= i) && (i < length()));
- if (list_ == NULL) {
+ if (list_ == nullptr) {
DCHECK_EQ(0, i);
return last_;
} else {
if (i == list_->length()) {
- DCHECK(last_ != NULL);
+ DCHECK(last_ != nullptr);
return last_;
} else {
return list_->at(i);
@@ -70,22 +70,22 @@ class BufferedZoneList {
}
void Clear() {
- list_ = NULL;
- last_ = NULL;
+ list_ = nullptr;
+ last_ = nullptr;
}
int length() {
- int length = (list_ == NULL) ? 0 : list_->length();
- return length + ((last_ == NULL) ? 0 : 1);
+ int length = (list_ == nullptr) ? 0 : list_->length();
+ return length + ((last_ == nullptr) ? 0 : 1);
}
ZoneList<T*>* GetList(Zone* zone) {
- if (list_ == NULL) {
+ if (list_ == nullptr) {
list_ = new (zone) ZoneList<T*>(initial_size, zone);
}
- if (last_ != NULL) {
+ if (last_ != nullptr) {
list_->Add(last_, zone);
- last_ = NULL;
+ last_ = nullptr;
}
return list_;
}
@@ -99,7 +99,7 @@ class BufferedZoneList {
// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
class RegExpBuilder : public ZoneObject {
public:
- RegExpBuilder(Zone* zone, bool ignore_case, bool unicode);
+ RegExpBuilder(Zone* zone, JSRegExp::Flags flags);
void AddCharacter(uc16 character);
void AddUnicodeCharacter(uc32 character);
void AddEscapedUnicodeCharacter(uc32 character);
@@ -114,7 +114,14 @@ class RegExpBuilder : public ZoneObject {
void NewAlternative(); // '|'
bool AddQuantifierToAtom(int min, int max,
RegExpQuantifier::QuantifierType type);
+ void FlushText();
RegExpTree* ToRegExp();
+ JSRegExp::Flags flags() const { return flags_; }
+ void set_flags(JSRegExp::Flags flags) { flags_ = flags; }
+
+ bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
+ bool multiline() const { return (flags_ & JSRegExp::kMultiline) != 0; }
+ bool dotall() const { return (flags_ & JSRegExp::kDotAll) != 0; }
private:
static const uc16 kNoPendingSurrogate = 0;
@@ -122,18 +129,15 @@ class RegExpBuilder : public ZoneObject {
void AddTrailSurrogate(uc16 trail_surrogate);
void FlushPendingSurrogate();
void FlushCharacters();
- void FlushText();
void FlushTerms();
bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
bool NeedsDesugaringForIgnoreCase(uc32 c);
Zone* zone() const { return zone_; }
- bool ignore_case() const { return ignore_case_; }
- bool unicode() const { return unicode_; }
+ bool unicode() const { return (flags_ & JSRegExp::kUnicode) != 0; }
Zone* zone_;
bool pending_empty_;
- bool ignore_case_;
- bool unicode_;
+ JSRegExp::Flags flags_;
ZoneList<uc16>* characters_;
uc16 pending_surrogate_;
BufferedZoneList<RegExpTree, 2> terms_;
@@ -159,7 +163,6 @@ class RegExpParser BASE_EMBEDDED {
RegExpTree* ParsePattern();
RegExpTree* ParseDisjunction();
RegExpTree* ParseGroup();
- RegExpTree* ParseCharacterClass();
// Parses a {...,...} quantifier and stores the range in the given
// out parameters.
@@ -175,6 +178,7 @@ class RegExpParser BASE_EMBEDDED {
bool ParseUnicodeEscape(uc32* value);
bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
bool ParsePropertyClass(ZoneList<CharacterRange>* result, bool negate);
+ RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
uc32 ParseOctalLiteral();
@@ -205,10 +209,9 @@ class RegExpParser BASE_EMBEDDED {
int captures_started() { return captures_started_; }
int position() { return next_pos_ - 1; }
bool failed() { return failed_; }
- bool dotall() const { return dotall_; }
- bool ignore_case() const { return ignore_case_; }
- bool multiline() const { return multiline_; }
- bool unicode() const { return unicode_; }
+ // The Unicode flag can't be changed using in-regexp syntax, so it's OK to
+ // just read the initial flag value here.
+ bool unicode() const { return (top_level_flags_ & JSRegExp::kUnicode) != 0; }
static bool IsSyntaxCharacterOrSlash(uc32 c);
@@ -226,34 +229,35 @@ class RegExpParser BASE_EMBEDDED {
class RegExpParserState : public ZoneObject {
public:
+ // Push a state on the stack.
RegExpParserState(RegExpParserState* previous_state,
SubexpressionType group_type,
RegExpLookaround::Type lookaround_type,
int disjunction_capture_index,
- const ZoneVector<uc16>* capture_name, bool ignore_case,
- bool unicode, Zone* zone)
+ const ZoneVector<uc16>* capture_name,
+ JSRegExp::Flags flags, Zone* zone)
: previous_state_(previous_state),
- builder_(new (zone) RegExpBuilder(zone, ignore_case, unicode)),
+ builder_(new (zone) RegExpBuilder(zone, flags)),
group_type_(group_type),
lookaround_type_(lookaround_type),
disjunction_capture_index_(disjunction_capture_index),
capture_name_(capture_name) {}
// Parser state of containing expression, if any.
- RegExpParserState* previous_state() { return previous_state_; }
- bool IsSubexpression() { return previous_state_ != NULL; }
+ RegExpParserState* previous_state() const { return previous_state_; }
+ bool IsSubexpression() { return previous_state_ != nullptr; }
// RegExpBuilder building this regexp's AST.
- RegExpBuilder* builder() { return builder_; }
+ RegExpBuilder* builder() const { return builder_; }
// Type of regexp being parsed (parenthesized group or entire regexp).
- SubexpressionType group_type() { return group_type_; }
+ SubexpressionType group_type() const { return group_type_; }
// Lookahead or Lookbehind.
- RegExpLookaround::Type lookaround_type() { return lookaround_type_; }
+ RegExpLookaround::Type lookaround_type() const { return lookaround_type_; }
// Index in captures array of first capture in this sub-expression, if any.
// Also the capture index of this sub-expression itself, if group_type
// is CAPTURE.
- int capture_index() { return disjunction_capture_index_; }
+ int capture_index() const { return disjunction_capture_index_; }
// The name of the current sub-expression, if group_type is CAPTURE. Only
// used for named captures.
- const ZoneVector<uc16>* capture_name() { return capture_name_; }
+ const ZoneVector<uc16>* capture_name() const { return capture_name_; }
bool IsNamedCapture() const { return capture_name_ != nullptr; }
@@ -264,17 +268,17 @@ class RegExpParser BASE_EMBEDDED {
private:
// Linked list implementation of stack of states.
- RegExpParserState* previous_state_;
+ RegExpParserState* const previous_state_;
// Builder for the stored disjunction.
- RegExpBuilder* builder_;
+ RegExpBuilder* const builder_;
// Stored disjunction type (capture, look-ahead or grouping), if any.
- SubexpressionType group_type_;
+ const SubexpressionType group_type_;
// Stored read direction.
- RegExpLookaround::Type lookaround_type_;
+ const RegExpLookaround::Type lookaround_type_;
// Stored disjunction's capture index (if any).
- int disjunction_capture_index_;
+ const int disjunction_capture_index_;
// Stored capture name (if any).
- const ZoneVector<uc16>* capture_name_;
+ const ZoneVector<uc16>* const capture_name_;
};
// Return the 1-indexed RegExpCapture object, allocate if necessary.
@@ -291,6 +295,7 @@ class RegExpParser BASE_EMBEDDED {
bool ParseNamedBackReference(RegExpBuilder* builder,
RegExpParserState* state);
+ RegExpParserState* ParseOpenParenthesis(RegExpParserState* state);
// After the initial parsing pass, patch corresponding RegExpCapture objects
// into all RegExpBackReferences. This is done after initial parsing in order
@@ -323,10 +328,10 @@ class RegExpParser BASE_EMBEDDED {
ZoneList<RegExpBackReference*>* named_back_references_;
FlatStringReader* in_;
uc32 current_;
- bool dotall_;
- bool ignore_case_;
- bool multiline_;
- bool unicode_;
+ // These are the flags specified outside the regexp syntax ie after the
+ // terminating '/' or in the second argument to the constructor. The current
+ // flags are stored on the RegExpBuilder.
+ JSRegExp::Flags top_level_flags_;
int next_pos_;
int captures_started_;
int capture_count_; // Only valid after we have scanned for captures.
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 34f9127f2f..64a694cd42 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -21,11 +21,7 @@ RegExpStackScope::~RegExpStackScope() {
regexp_stack_->Reset();
}
-
-RegExpStack::RegExpStack()
- : isolate_(NULL) {
-}
-
+RegExpStack::RegExpStack() : isolate_(nullptr) {}
RegExpStack::~RegExpStack() {
thread_local_.Free();
@@ -64,7 +60,7 @@ void RegExpStack::ThreadLocal::Free() {
Address RegExpStack::EnsureCapacity(size_t size) {
- if (size > kMaximumStackSize) return NULL;
+ if (size > kMaximumStackSize) return nullptr;
if (size < kMinimumStackSize) size = kMinimumStackSize;
if (thread_local_.memory_size_ < size) {
Address new_memory = NewArray<byte>(static_cast<int>(size));
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index 03df6cd15e..760f16f24f 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -87,12 +87,12 @@ class RegExpStack {
// Structure holding the allocated memory, size and limit.
struct ThreadLocal {
ThreadLocal() { Clear(); }
- // If memory_size_ > 0 then memory_ must be non-NULL.
+ // If memory_size_ > 0 then memory_ must be non-nullptr.
Address memory_;
size_t memory_size_;
Address limit_;
void Clear() {
- memory_ = NULL;
+ memory_ = nullptr;
memory_size_ = 0;
limit_ = reinterpret_cast<Address>(kMemoryTop);
}
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index b683db27f3..16427e2933 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -43,14 +43,15 @@ V8_INLINE bool HasInitialRegExpMap(Isolate* isolate, Handle<JSReceiver> recv) {
MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
Handle<JSReceiver> recv,
- int value) {
+ uint64_t value) {
+ Handle<Object> value_as_object =
+ isolate->factory()->NewNumberFromInt64(value);
if (HasInitialRegExpMap(isolate, recv)) {
- JSRegExp::cast(*recv)->set_last_index(Smi::FromInt(value),
- SKIP_WRITE_BARRIER);
+ JSRegExp::cast(*recv)->set_last_index(*value_as_object, SKIP_WRITE_BARRIER);
return recv;
} else {
return Object::SetProperty(recv, isolate->factory()->lastIndex_string(),
- handle(Smi::FromInt(value), isolate), STRICT);
+ value_as_object, LanguageMode::kStrict);
}
}
@@ -133,6 +134,10 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// TODO(ishell): Update this check once map changes for constant field
// tracking are landing.
+#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+ if (isolate->force_slow_path()) return false;
+#endif
+
if (!obj->IsJSReceiver()) return false;
JSReceiver* recv = JSReceiver::cast(*obj);
@@ -156,12 +161,16 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
return last_index->IsSmi() && Smi::ToInt(last_index) >= 0;
}
-int RegExpUtils::AdvanceStringIndex(Isolate* isolate, Handle<String> string,
- int index, bool unicode) {
- if (unicode && index < string->length()) {
- const uint16_t first = string->Get(index);
- if (first >= 0xD800 && first <= 0xDBFF && string->length() > index + 1) {
- const uint16_t second = string->Get(index + 1);
+uint64_t RegExpUtils::AdvanceStringIndex(Isolate* isolate,
+ Handle<String> string, uint64_t index,
+ bool unicode) {
+ DCHECK_LE(static_cast<double>(index), kMaxSafeInteger);
+ const uint64_t string_length = static_cast<uint64_t>(string->length());
+ if (unicode && index < string_length) {
+ const uint16_t first = string->Get(static_cast<uint32_t>(index));
+ if (first >= 0xD800 && first <= 0xDBFF && index + 1 < string_length) {
+ DCHECK_LT(index, std::numeric_limits<uint64_t>::max());
+ const uint16_t second = string->Get(static_cast<uint32_t>(index + 1));
if (second >= 0xDC00 && second <= 0xDFFF) {
return index + 2;
}
@@ -182,8 +191,8 @@ MaybeHandle<Object> RegExpUtils::SetAdvancedStringIndex(
ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
Object::ToLength(isolate, last_index_obj), Object);
- const int last_index = PositiveNumberToUint32(*last_index_obj);
- const int new_last_index =
+ const uint64_t last_index = PositiveNumberToUint64(*last_index_obj);
+ const uint64_t new_last_index =
AdvanceStringIndex(isolate, string, last_index, unicode);
return SetLastIndex(isolate, regexp, new_last_index);
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index eb5f85c0bd..4161337ad4 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -22,7 +22,7 @@ class RegExpUtils : public AllStatic {
// Last index (RegExp.lastIndex) accessors.
static MUST_USE_RESULT MaybeHandle<Object> SetLastIndex(
- Isolate* isolate, Handle<JSReceiver> regexp, int value);
+ Isolate* isolate, Handle<JSReceiver> regexp, uint64_t value);
static MUST_USE_RESULT MaybeHandle<Object> GetLastIndex(
Isolate* isolate, Handle<JSReceiver> recv);
@@ -41,8 +41,8 @@ class RegExpUtils : public AllStatic {
// ES#sec-advancestringindex
// AdvanceStringIndex ( S, index, unicode )
- static int AdvanceStringIndex(Isolate* isolate, Handle<String> string,
- int index, bool unicode);
+ static uint64_t AdvanceStringIndex(Isolate* isolate, Handle<String> string,
+ uint64_t index, bool unicode);
static MUST_USE_RESULT MaybeHandle<Object> SetAdvancedStringIndex(
Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
bool unicode);
diff --git a/deps/v8/src/regexp/s390/OWNERS b/deps/v8/src/regexp/s390/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/regexp/s390/OWNERS
+++ b/deps/v8/src/regexp/s390/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 8449c631d3..fc9548fc78 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -98,7 +98,7 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
@@ -910,7 +910,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
+ // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ CmpP(r2, Operand::Zero());
__ beq(&exit_with_exception);
@@ -1136,14 +1136,14 @@ void RegExpMacroAssemblerS390::CheckPosition(int cp_offset,
void RegExpMacroAssemblerS390::BranchOrBacktrack(Condition condition, Label* to,
CRegister cr) {
if (condition == al) { // Unconditional.
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
__ b(to);
return;
}
- if (to == NULL) {
+ if (to == nullptr) {
__ b(condition, &backtrack_label_);
return;
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 72d0cf627d..94603cd7c9 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -160,7 +160,7 @@ class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
+ // is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to, CRegister cr = cr7);
// Call and return internally in the generated code in a way that
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 5c03f65e3d..1e21182c35 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -97,7 +97,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(isolate, NULL, kRegExpCodeSize, CodeObjectRequired::kYes),
+ masm_(isolate, nullptr, kRegExpCodeSize, CodeObjectRequired::kYes),
no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4, zone),
mode_(mode),
@@ -237,7 +237,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
if (mode_ == LATIN1) {
Label loop_increment;
- if (on_no_match == NULL) {
+ if (on_no_match == nullptr) {
on_no_match = &backtrack_label_;
}
@@ -979,7 +979,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
+ // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ testp(rax, rax);
__ j(equal, &exit_with_exception);
@@ -1256,14 +1256,14 @@ void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
void RegExpMacroAssemblerX64::BranchOrBacktrack(Condition condition,
Label* to) {
if (condition < 0) { // No condition
- if (to == NULL) {
+ if (to == nullptr) {
Backtrack();
return;
}
__ jmp(to);
return;
}
- if (to == NULL) {
+ if (to == nullptr) {
__ j(condition, &backtrack_label_);
return;
}
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 4c37771d38..e0891f3876 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -211,7 +211,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
+ // is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
void MarkPositionForCodeRelativeFixup() {
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 9c8869b1c2..6ecc5519f3 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -240,8 +240,9 @@ RegisterConfiguration::RegisterConfiguration(
float_register_names_(float_register_names),
double_register_names_(double_register_names),
simd128_register_names_(simd128_register_names) {
- DCHECK(num_general_registers_ <= RegisterConfiguration::kMaxGeneralRegisters);
- DCHECK(num_double_registers_ <= RegisterConfiguration::kMaxFPRegisters);
+ DCHECK_LE(num_general_registers_,
+ RegisterConfiguration::kMaxGeneralRegisters);
+ DCHECK_LE(num_double_registers_, RegisterConfiguration::kMaxFPRegisters);
for (int i = 0; i < num_allocatable_general_registers_; ++i) {
allocatable_general_codes_mask_ |= (1 << allocatable_general_codes_[i]);
}
diff --git a/deps/v8/src/reglist.h b/deps/v8/src/reglist.h
index f23c76e4b9..121fd2bea3 100644
--- a/deps/v8/src/reglist.h
+++ b/deps/v8/src/reglist.h
@@ -5,6 +5,11 @@
#ifndef V8_REGLIST_H_
#define V8_REGLIST_H_
+#include <cstdint>
+
+#include "src/base/bits.h"
+#include "src/base/template-utils.h"
+
namespace v8 {
namespace internal {
@@ -16,7 +21,25 @@ typedef uint32_t RegList;
#endif
// Get the number of registers in a given register list.
-inline int NumRegs(RegList list) { return base::bits::CountPopulation(list); }
+constexpr int NumRegs(RegList list) {
+ return base::bits::CountPopulation(list);
+}
+
+// Combine two RegLists by building the union of the contained registers.
+// Implemented as a Functor to pass it to base::fold even on gcc < 5 (see
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892).
+// TODO(clemensh): Remove this once we require gcc >= 5.0.
+struct CombineRegListsFunctor {
+ constexpr RegList operator()(RegList list1, RegList list2) const {
+ return list1 | list2;
+ }
+};
+
+// Combine several RegLists by building the union of the contained registers.
+template <typename... RegLists>
+constexpr RegList CombineRegLists(RegLists... lists) {
+ return base::fold(CombineRegListsFunctor{}, 0, lists...);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 815153a98a..58cf706d0c 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -123,11 +123,6 @@ void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
// If the code is not optimizable, don't try OSR.
if (shared->optimization_disabled()) return;
- // We are not prepared to do OSR for a function that already has an
- // allocated arguments object. The optimized code would bypass it for
- // arguments accesses, which is unsound. Don't try OSR.
- if (shared->uses_arguments()) return;
-
// We're using on-stack replacement: Store new loop nesting level in
// BytecodeArray header so that certain back edges in any interpreter frame
// for this bytecode will trigger on-stack replacement for that frame.
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 782acc72c5..f07c842bae 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -37,9 +37,9 @@ Object* PrepareSlowElementsForSort(Handle<JSObject> object, uint32_t limit) {
// Must stay in dictionary mode, either because of requires_slow_elements,
// or because we are not going to sort (and therefore compact) all of the
// elements.
- Handle<SeededNumberDictionary> dict(object->element_dictionary(), isolate);
- Handle<SeededNumberDictionary> new_dict =
- SeededNumberDictionary::New(isolate, dict->NumberOfElements());
+ Handle<NumberDictionary> dict(object->element_dictionary(), isolate);
+ Handle<NumberDictionary> new_dict =
+ NumberDictionary::New(isolate, dict->NumberOfElements());
uint32_t pos = 0;
uint32_t undefs = 0;
@@ -70,7 +70,7 @@ Object* PrepareSlowElementsForSort(Handle<JSObject> object, uint32_t limit) {
undefs++;
} else {
Handle<Object> result =
- SeededNumberDictionary::Add(new_dict, pos, value, details);
+ NumberDictionary::Add(new_dict, pos, value, details);
// Add should not grow the dictionary since we allocated the right size.
DCHECK(result.is_identical_to(new_dict));
USE(result);
@@ -78,7 +78,7 @@ Object* PrepareSlowElementsForSort(Handle<JSObject> object, uint32_t limit) {
}
} else {
Handle<Object> result =
- SeededNumberDictionary::Add(new_dict, key, value, details);
+ NumberDictionary::Add(new_dict, key, value, details);
// Add should not grow the dictionary since we allocated the right size.
DCHECK(result.is_identical_to(new_dict));
USE(result);
@@ -95,7 +95,7 @@ Object* PrepareSlowElementsForSort(Handle<JSObject> object, uint32_t limit) {
return bailout;
}
HandleScope scope(isolate);
- Handle<Object> result = SeededNumberDictionary::Add(
+ Handle<Object> result = NumberDictionary::Add(
new_dict, pos, isolate->factory()->undefined_value(), no_details);
// Add should not grow the dictionary since we allocated the right size.
DCHECK(result.is_identical_to(new_dict));
@@ -130,7 +130,7 @@ Object* PrepareElementsForSort(Handle<JSObject> object, uint32_t limit) {
if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
- Handle<SeededNumberDictionary> dict(object->element_dictionary());
+ Handle<NumberDictionary> dict(object->element_dictionary());
if (object->IsJSArray() || dict->requires_slow_elements() ||
dict->max_number_key() >= limit) {
return PrepareSlowElementsForSort(object, limit);
@@ -294,7 +294,7 @@ RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
FixedArrayBase* elements = array->elements();
SealHandleScope shs(isolate);
if (elements->IsDictionary()) {
- int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
+ int result = NumberDictionary::cast(elements)->NumberOfElements();
return Smi::FromInt(result);
} else {
DCHECK(array->length()->IsSmi());
@@ -378,6 +378,44 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
return *isolate->factory()->NewJSArrayWithElements(keys);
}
+RUNTIME_FUNCTION(Runtime_TrySliceSimpleNonFastElements) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_SMI_ARG_CHECKED(first, 1);
+ CONVERT_SMI_ARG_CHECKED(count, 2);
+ uint32_t length = first + count;
+
+ // Only handle elements kinds that have a ElementsAccessor Slice
+ // implementation.
+ if (receiver->IsJSArray()) {
+ // This "fastish" path must make sure the destination array is a JSArray.
+ if (!isolate->IsArraySpeciesLookupChainIntact() ||
+ !JSArray::cast(*receiver)->HasArrayPrototype(isolate)) {
+ return Smi::FromInt(0);
+ }
+ } else {
+ int len;
+ if (!receiver->IsJSObject() ||
+ !JSSloppyArgumentsObject::GetSloppyArgumentsLength(
+ isolate, Handle<JSObject>::cast(receiver), &len) ||
+ (length > static_cast<uint32_t>(len))) {
+ return Smi::FromInt(0);
+ }
+ }
+
+ // This "fastish" path must also ensure that elements are simple (no
+ // geters/setters), no elements on prototype chain.
+ Handle<JSObject> object(Handle<JSObject>::cast(receiver));
+ if (!JSObject::PrototypeHasNoElements(isolate, *object) ||
+ object->HasComplexElements()) {
+ return Smi::FromInt(0);
+ }
+
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ return *accessor->Slice(object, first, length);
+}
+
RUNTIME_FUNCTION(Runtime_NewArray) {
HandleScope scope(isolate);
DCHECK_LE(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index d6b7dfb550..ce513d2f92 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -8,18 +8,57 @@
#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/objects/bigint.h"
-#include "src/parsing/token.h"
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_BigIntEqual) {
+RUNTIME_FUNCTION(Runtime_BigIntCompareToBigInt) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0);
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1);
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, rhs, 2);
+ bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()),
+ BigInt::CompareToBigInt(lhs, rhs));
+ return *isolate->factory()->ToBoolean(result);
+}
+
+RUNTIME_FUNCTION(Runtime_BigIntCompareToNumber) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0);
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 2);
+ bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()),
+ BigInt::CompareToNumber(lhs, rhs));
+ return *isolate->factory()->ToBoolean(result);
+}
+
+RUNTIME_FUNCTION(Runtime_BigIntEqualToBigInt) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, rhs, 1);
+ bool result = BigInt::EqualToBigInt(*lhs, *rhs);
+ return *isolate->factory()->ToBoolean(result);
+}
+
+RUNTIME_FUNCTION(Runtime_BigIntEqualToNumber) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- bool result = lhs->IsBigInt() && rhs->IsBigInt() &&
- BigInt::Equal(BigInt::cast(*lhs), BigInt::cast(*rhs));
+ bool result = BigInt::EqualToNumber(lhs, rhs);
+ return *isolate->factory()->ToBoolean(result);
+}
+
+RUNTIME_FUNCTION(Runtime_BigIntEqualToString) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, rhs, 1);
+ rhs = String::Flatten(rhs);
+ bool result = BigInt::EqualToString(lhs, rhs);
return *isolate->factory()->ToBoolean(result);
}
@@ -30,12 +69,20 @@ RUNTIME_FUNCTION(Runtime_BigIntToBoolean) {
return *isolate->factory()->ToBoolean(bigint->ToBoolean());
}
+RUNTIME_FUNCTION(Runtime_BigIntToNumber) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, x, 0);
+ return *BigInt::ToNumber(x);
+}
+
RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, left_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, right_obj, 1);
CONVERT_SMI_ARG_CHECKED(opcode, 2);
+ Operation op = static_cast<Operation>(opcode);
if (!left_obj->IsBigInt() || !right_obj->IsBigInt()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -44,22 +91,70 @@ RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
Handle<BigInt> left(Handle<BigInt>::cast(left_obj));
Handle<BigInt> right(Handle<BigInt>::cast(right_obj));
MaybeHandle<BigInt> result;
- switch (opcode) {
- case Token::ADD:
+ switch (op) {
+ case Operation::kAdd:
result = BigInt::Add(left, right);
break;
- case Token::SUB:
+ case Operation::kSubtract:
result = BigInt::Subtract(left, right);
break;
- case Token::MUL:
+ case Operation::kMultiply:
result = BigInt::Multiply(left, right);
break;
- case Token::DIV:
+ case Operation::kDivide:
result = BigInt::Divide(left, right);
break;
- case Token::MOD:
+ case Operation::kModulus:
result = BigInt::Remainder(left, right);
break;
+ case Operation::kExponentiate:
+ UNIMPLEMENTED();
+ break;
+ case Operation::kBitwiseAnd:
+ result = BigInt::BitwiseAnd(left, right);
+ break;
+ case Operation::kBitwiseOr:
+ result = BigInt::BitwiseOr(left, right);
+ break;
+ case Operation::kBitwiseXor:
+ result = BigInt::BitwiseXor(left, right);
+ break;
+ case Operation::kShiftLeft:
+ result = BigInt::LeftShift(left, right);
+ break;
+ case Operation::kShiftRight:
+ result = BigInt::SignedRightShift(left, right);
+ break;
+ case Operation::kShiftRightLogical:
+ result = BigInt::UnsignedRightShift(left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ RETURN_RESULT_OR_FAILURE(isolate, result);
+}
+
+RUNTIME_FUNCTION(Runtime_BigIntUnaryOp) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, x, 0);
+ CONVERT_SMI_ARG_CHECKED(opcode, 1);
+ Operation op = static_cast<Operation>(opcode);
+
+ MaybeHandle<BigInt> result;
+ switch (op) {
+ case Operation::kBitwiseNot:
+ result = BigInt::BitwiseNot(x);
+ break;
+ case Operation::kNegate:
+ result = BigInt::UnaryMinus(x);
+ break;
+ case Operation::kIncrement:
+ result = BigInt::Increment(x);
+ break;
+ case Operation::kDecrement:
+ result = BigInt::Decrement(x);
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 815501bcfa..37e647c7dd 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -13,6 +13,7 @@
#include "src/elements.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -104,10 +105,449 @@ RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
return isolate->heap()->home_object_symbol();
}
-static MaybeHandle<Object> DefineClass(Isolate* isolate,
- Handle<Object> super_class,
- Handle<JSFunction> constructor,
- int start_position, int end_position) {
+namespace {
+
+template <typename Dictionary>
+Handle<Name> KeyToName(Isolate* isolate, Handle<Object> key);
+
+template <>
+Handle<Name> KeyToName<NameDictionary>(Isolate* isolate, Handle<Object> key) {
+ DCHECK(key->IsName());
+ return Handle<Name>::cast(key);
+}
+
+template <>
+Handle<Name> KeyToName<NumberDictionary>(Isolate* isolate, Handle<Object> key) {
+ DCHECK(key->IsNumber());
+ return isolate->factory()->NumberToString(key);
+}
+
+inline void SetHomeObject(Isolate* isolate, JSFunction* method,
+ JSObject* home_object) {
+ if (method->shared()->needs_home_object()) {
+ const int kPropertyIndex = JSFunction::kMaybeHomeObjectDescriptorIndex;
+ CHECK_EQ(method->map()->instance_descriptors()->GetKey(kPropertyIndex),
+ isolate->heap()->home_object_symbol());
+
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(method->map(), kPropertyIndex);
+ method->RawFastPropertyAtPut(field_index, home_object);
+ }
+}
+
+// Gets |index|'th argument which may be a class constructor object, a class
+// prototype object or a class method. In the latter case the following
+// post-processing may be required:
+// 1) set [[HomeObject]] slot to given |home_object| value if the method's
+// shared function info indicates that the method requires that;
+// 2) set method's name to a concatenation of |name_prefix| and |key| if the
+// method's shared function info indicates that method does not have a
+// shared name.
+template <typename Dictionary>
+MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
+ Isolate* isolate, Arguments& args, Smi* index, Handle<JSObject> home_object,
+ Handle<String> name_prefix, Handle<Object> key) {
+ int int_index = Smi::ToInt(index);
+
+ // Class constructor and prototype values do not require post processing.
+ if (int_index < ClassBoilerplate::kFirstDynamicArgumentIndex) {
+ return args.at<Object>(int_index);
+ }
+
+ Handle<JSFunction> method = args.at<JSFunction>(int_index);
+
+ SetHomeObject(isolate, *method, *home_object);
+
+ if (!method->shared()->has_shared_name()) {
+ // TODO(ishell): method does not have a shared name at this point only if
+ // the key is a computed property name. However, the bytecode generator
+ // explicitly generates ToName bytecodes to ensure that the computed
+ // property name is properly converted to Name. So, we can actually be smart
+ // here and avoid converting Smi keys back to Name.
+ Handle<Name> name = KeyToName<Dictionary>(isolate, key);
+ if (!JSFunction::SetName(method, name, name_prefix)) {
+ return MaybeHandle<Object>();
+ }
+ }
+ return method;
+}
+
+// Gets |index|'th argument which may be a class constructor object, a class
+// prototype object or a class method. In the latter case the following
+// post-processing may be required:
+// 1) set [[HomeObject]] slot to given |home_object| value if the method's
+// shared function info indicates that the method requires that;
+// This is a simplified version of GetMethodWithSharedNameAndSetHomeObject()
+// function above that is used when it's guaranteed that the method has
+// shared name.
+Object* GetMethodWithSharedNameAndSetHomeObject(Isolate* isolate,
+ Arguments& args, Object* index,
+ JSObject* home_object) {
+ DisallowHeapAllocation no_gc;
+ int int_index = Smi::ToInt(index);
+
+ // Class constructor and prototype values do not require post processing.
+ if (int_index < ClassBoilerplate::kFirstDynamicArgumentIndex) {
+ return args[int_index];
+ }
+
+ Handle<JSFunction> method = args.at<JSFunction>(int_index);
+
+ SetHomeObject(isolate, *method, home_object);
+
+ DCHECK(method->shared()->has_shared_name());
+ return *method;
+}
+
+template <typename Dictionary>
+Handle<Dictionary> ShallowCopyDictionaryTemplate(
+ Isolate* isolate, Handle<Dictionary> dictionary_template) {
+ Handle<Map> dictionary_map(dictionary_template->map(), isolate);
+ Handle<Dictionary> dictionary =
+ Handle<Dictionary>::cast(isolate->factory()->CopyFixedArrayWithMap(
+ dictionary_template, dictionary_map));
+ // Clone all AccessorPairs in the dictionary.
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* value = dictionary->ValueAt(i);
+ if (value->IsAccessorPair()) {
+ Handle<AccessorPair> pair(AccessorPair::cast(value), isolate);
+ pair = AccessorPair::Copy(pair);
+ dictionary->ValueAtPut(i, *pair);
+ }
+ }
+ return dictionary;
+}
+
+template <typename Dictionary>
+bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
+ Handle<JSObject> receiver, Arguments& args,
+ bool* install_name_accessor = nullptr) {
+ Handle<Name> name_string = isolate->factory()->name_string();
+
+ // Replace all indices with proper methods.
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* maybe_key = dictionary->KeyAt(i);
+ if (!Dictionary::IsKey(isolate, maybe_key)) continue;
+ if (install_name_accessor && *install_name_accessor &&
+ (maybe_key == *name_string)) {
+ *install_name_accessor = false;
+ }
+ Handle<Object> key(maybe_key, isolate);
+ Handle<Object> value(dictionary->ValueAt(i), isolate);
+ if (value->IsAccessorPair()) {
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(value);
+ Object* tmp = pair->getter();
+ if (tmp->IsSmi()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, result,
+ GetMethodAndSetHomeObjectAndName<Dictionary>(
+ isolate, args, Smi::cast(tmp), receiver,
+ isolate->factory()->get_string(), key),
+ false);
+ pair->set_getter(*result);
+ }
+ tmp = pair->setter();
+ if (tmp->IsSmi()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, result,
+ GetMethodAndSetHomeObjectAndName<Dictionary>(
+ isolate, args, Smi::cast(tmp), receiver,
+ isolate->factory()->set_string(), key),
+ false);
+ pair->set_setter(*result);
+ }
+ } else if (value->IsSmi()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, result,
+ GetMethodAndSetHomeObjectAndName<Dictionary>(
+ isolate, args, Smi::cast(*value), receiver,
+ isolate->factory()->empty_string(), key),
+ false);
+ dictionary->ValueAtPut(i, *result);
+ }
+ }
+ return true;
+}
+
+bool AddDescriptorsByTemplate(
+ Isolate* isolate, Handle<Map> map,
+ Handle<DescriptorArray> descriptors_template,
+ Handle<NumberDictionary> elements_dictionary_template,
+ Handle<JSObject> receiver, Arguments& args) {
+ int nof_descriptors = descriptors_template->number_of_descriptors();
+
+ Handle<DescriptorArray> descriptors =
+ DescriptorArray::Allocate(isolate, nof_descriptors, 0);
+
+ Handle<NumberDictionary> elements_dictionary =
+ *elements_dictionary_template ==
+ isolate->heap()->empty_slow_element_dictionary()
+ ? elements_dictionary_template
+ : ShallowCopyDictionaryTemplate(isolate,
+ elements_dictionary_template);
+
+ // Read values from |descriptors_template| and store possibly post-processed
+ // values into "instantiated" |descriptors| array.
+ for (int i = 0; i < nof_descriptors; i++) {
+ Object* value = descriptors_template->GetValue(i);
+ if (value->IsAccessorPair()) {
+ Handle<AccessorPair> pair =
+ AccessorPair::Copy(handle(AccessorPair::cast(value), isolate));
+ value = *pair;
+ }
+ DisallowHeapAllocation no_gc;
+ Name* name = descriptors_template->GetKey(i);
+ DCHECK(name->IsUniqueName());
+ PropertyDetails details = descriptors_template->GetDetails(i);
+ if (details.location() == kDescriptor) {
+ if (details.kind() == kData) {
+ if (value->IsSmi()) {
+ value = GetMethodWithSharedNameAndSetHomeObject(isolate, args, value,
+ *receiver);
+ }
+ details =
+ details.CopyWithRepresentation(value->OptimalRepresentation());
+
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ if (value->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(value);
+ Object* tmp = pair->getter();
+ if (tmp->IsSmi()) {
+ pair->set_getter(GetMethodWithSharedNameAndSetHomeObject(
+ isolate, args, tmp, *receiver));
+ }
+ tmp = pair->setter();
+ if (tmp->IsSmi()) {
+ pair->set_setter(GetMethodWithSharedNameAndSetHomeObject(
+ isolate, args, tmp, *receiver));
+ }
+ }
+ }
+ } else {
+ DCHECK_EQ(kField, details.location());
+ DCHECK(!details.representation().IsDouble());
+ }
+ DCHECK(value->FitsRepresentation(details.representation()));
+ descriptors->Set(i, name, value, details);
+ }
+
+ map->InitializeDescriptors(*descriptors,
+ LayoutDescriptor::FastPointerLayout());
+
+ if (elements_dictionary->NumberOfElements() > 0) {
+ if (!SubstituteValues<NumberDictionary>(isolate, elements_dictionary,
+ receiver, args)) {
+ return false;
+ }
+ map->set_elements_kind(DICTIONARY_ELEMENTS);
+ }
+
+ // Atomically commit the changes.
+ receiver->synchronized_set_map(*map);
+ if (elements_dictionary->NumberOfElements() > 0) {
+ receiver->set_elements(*elements_dictionary);
+ }
+ return true;
+}
+
+bool AddDescriptorsByTemplate(
+ Isolate* isolate, Handle<Map> map,
+ Handle<NameDictionary> properties_dictionary_template,
+ Handle<NumberDictionary> elements_dictionary_template,
+ Handle<FixedArray> computed_properties, Handle<JSObject> receiver,
+ bool install_name_accessor, Arguments& args) {
+ int computed_properties_length = computed_properties->length();
+
+ // Shallow-copy properties template.
+ Handle<NameDictionary> properties_dictionary =
+ ShallowCopyDictionaryTemplate(isolate, properties_dictionary_template);
+ Handle<NumberDictionary> elements_dictionary =
+ ShallowCopyDictionaryTemplate(isolate, elements_dictionary_template);
+
+ typedef ClassBoilerplate::ValueKind ValueKind;
+ typedef ClassBoilerplate::ComputedEntryFlags ComputedEntryFlags;
+
+ // Merge computed properties with properties and elements dictionary
+ // templates.
+ int i = 0;
+ while (i < computed_properties_length) {
+ int flags = Smi::ToInt(computed_properties->get(i++));
+
+ ValueKind value_kind = ComputedEntryFlags::ValueKindBits::decode(flags);
+ int key_index = ComputedEntryFlags::KeyIndexBits::decode(flags);
+ Object* value = Smi::FromInt(key_index + 1); // Value follows name.
+
+ Handle<Object> key = args.at<Object>(key_index);
+ DCHECK(key->IsName());
+ uint32_t element;
+ Handle<Name> name = Handle<Name>::cast(key);
+ if (name->AsArrayIndex(&element)) {
+ ClassBoilerplate::AddToElementsTemplate(
+ isolate, elements_dictionary, element, key_index, value_kind, value);
+
+ } else {
+ name = isolate->factory()->InternalizeName(name);
+ ClassBoilerplate::AddToPropertiesTemplate(
+ isolate, properties_dictionary, name, key_index, value_kind, value);
+ }
+ }
+
+ // Replace all indices with proper methods.
+ if (!SubstituteValues<NameDictionary>(isolate, properties_dictionary,
+ receiver, args,
+ &install_name_accessor)) {
+ return false;
+ }
+ if (install_name_accessor) {
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ PropertyDetails details(kAccessor, attribs, PropertyCellType::kNoCell);
+ Handle<NameDictionary> dict = NameDictionary::Add(
+ properties_dictionary, isolate->factory()->name_string(),
+ isolate->factory()->function_name_accessor(), details);
+ CHECK_EQ(*dict, *properties_dictionary);
+ }
+
+ if (elements_dictionary->NumberOfElements() > 0) {
+ if (!SubstituteValues<NumberDictionary>(isolate, elements_dictionary,
+ receiver, args)) {
+ return false;
+ }
+ map->set_elements_kind(DICTIONARY_ELEMENTS);
+ }
+
+ // Atomically commit the changes.
+ receiver->synchronized_set_map(*map);
+ receiver->set_raw_properties_or_hash(*properties_dictionary);
+ if (elements_dictionary->NumberOfElements() > 0) {
+ receiver->set_elements(*elements_dictionary);
+ }
+ return true;
+}
+
+Handle<JSObject> CreateClassPrototype(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+
+ const int kInobjectFields = 0;
+
+ // Just use some JSObject map of certain size.
+ Handle<Map> map = factory->ObjectLiteralMapFromCache(
+ isolate->native_context(), kInobjectFields);
+
+ return factory->NewJSObjectFromMap(map);
+}
+
+bool InitClassPrototype(Isolate* isolate,
+ Handle<ClassBoilerplate> class_boilerplate,
+ Handle<JSObject> prototype,
+ Handle<Object> prototype_parent,
+ Handle<JSFunction> constructor, Arguments& args) {
+ Handle<Map> map(prototype->map(), isolate);
+ map = Map::CopyDropDescriptors(map);
+ map->set_is_prototype_map(true);
+ Map::SetPrototype(map, prototype_parent);
+ constructor->set_prototype_or_initial_map(*prototype);
+ map->SetConstructor(*constructor);
+
+ Handle<FixedArray> computed_properties(
+ class_boilerplate->instance_computed_properties(), isolate);
+ Handle<NumberDictionary> elements_dictionary_template(
+ NumberDictionary::cast(class_boilerplate->instance_elements_template()),
+ isolate);
+
+ Handle<Object> properties_template(
+ class_boilerplate->instance_properties_template(), isolate);
+ if (properties_template->IsDictionary()) {
+ Handle<NameDictionary> properties_dictionary_template =
+ Handle<NameDictionary>::cast(properties_template);
+
+ map->set_dictionary_map(true);
+ map->set_migration_target(false);
+ map->set_may_have_interesting_symbols(true);
+ map->set_construction_counter(Map::kNoSlackTracking);
+
+ // We care about name property only for class constructor.
+ const bool install_name_accessor = false;
+
+ return AddDescriptorsByTemplate(
+ isolate, map, properties_dictionary_template,
+ elements_dictionary_template, computed_properties, prototype,
+ install_name_accessor, args);
+ } else {
+ Handle<DescriptorArray> descriptors_template =
+ Handle<DescriptorArray>::cast(properties_template);
+
+ // The size of the prototype object is known at this point.
+ // So we can create it now and then add the rest instance methods to the
+ // map.
+ return AddDescriptorsByTemplate(isolate, map, descriptors_template,
+ elements_dictionary_template, prototype,
+ args);
+ }
+}
+
+bool InitClassConstructor(Isolate* isolate,
+ Handle<ClassBoilerplate> class_boilerplate,
+ Handle<Object> constructor_parent,
+ Handle<JSFunction> constructor, Arguments& args) {
+ Handle<Map> map(constructor->map(), isolate);
+ map = Map::CopyDropDescriptors(map);
+ DCHECK(map->is_prototype_map());
+
+ if (!constructor_parent.is_null()) {
+ // Set map's prototype without enabling prototype setup mode for superclass
+ // because it does not make sense.
+ Map::SetPrototype(map, constructor_parent, false);
+ }
+
+ Handle<NumberDictionary> elements_dictionary_template(
+ NumberDictionary::cast(class_boilerplate->static_elements_template()),
+ isolate);
+ Handle<FixedArray> computed_properties(
+ class_boilerplate->static_computed_properties(), isolate);
+
+ Handle<Object> properties_template(
+ class_boilerplate->static_properties_template(), isolate);
+
+ if (properties_template->IsDictionary()) {
+ Handle<NameDictionary> properties_dictionary_template =
+ Handle<NameDictionary>::cast(properties_template);
+
+ map->set_dictionary_map(true);
+ map->InitializeDescriptors(isolate->heap()->empty_descriptor_array(),
+ LayoutDescriptor::FastPointerLayout());
+ map->set_migration_target(false);
+ map->set_may_have_interesting_symbols(true);
+ map->set_construction_counter(Map::kNoSlackTracking);
+
+ bool install_name_accessor =
+ class_boilerplate->install_class_name_accessor() != 0;
+
+ return AddDescriptorsByTemplate(
+ isolate, map, properties_dictionary_template,
+ elements_dictionary_template, computed_properties, constructor,
+ install_name_accessor, args);
+ } else {
+ Handle<DescriptorArray> descriptors_template =
+ Handle<DescriptorArray>::cast(properties_template);
+
+ return AddDescriptorsByTemplate(isolate, map, descriptors_template,
+ elements_dictionary_template, constructor,
+ args);
+ }
+}
+
+MaybeHandle<Object> DefineClass(Isolate* isolate,
+ Handle<ClassBoilerplate> class_boilerplate,
+ Handle<Object> super_class,
+ Handle<JSFunction> constructor,
+ Arguments& args) {
Handle<Object> prototype_parent;
Handle<Object> constructor_parent;
@@ -132,7 +572,10 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
prototype_parent),
Object);
}
- constructor_parent = super_class;
+ // Create new handle to avoid |constructor_parent| corruption because of
+ // |super_class| handle value overwriting via storing to
+ // args[ClassBoilerplate::kPrototypeArgumentIndex] below.
+ constructor_parent = handle(*super_class, isolate);
} else {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kExtendsValueNotConstructor,
@@ -141,95 +584,33 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
}
}
- Handle<Map> map =
- isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- map->set_is_prototype_map(true);
- Map::SetPrototype(map, prototype_parent);
- map->SetConstructor(*constructor);
- Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
-
- JSFunction::SetPrototype(constructor, prototype);
- PropertyAttributes attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- RETURN_ON_EXCEPTION(isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- constructor, isolate->factory()->prototype_string(),
- prototype, attribs),
- Object);
+ Handle<JSObject> prototype = CreateClassPrototype(isolate);
+ DCHECK_EQ(*constructor, args[ClassBoilerplate::kConstructorArgumentIndex]);
+ args[ClassBoilerplate::kPrototypeArgumentIndex] = *prototype;
- if (!constructor_parent.is_null()) {
- MAYBE_RETURN_NULL(JSObject::SetPrototype(constructor, constructor_parent,
- false, Object::THROW_ON_ERROR));
+ if (!InitClassConstructor(isolate, class_boilerplate, constructor_parent,
+ constructor, args) ||
+ !InitClassPrototype(isolate, class_boilerplate, prototype,
+ prototype_parent, constructor, args)) {
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
}
-
- JSObject::AddProperty(prototype, isolate->factory()->constructor_string(),
- constructor, DONT_ENUM);
-
- // Install private properties that are used to construct the FunctionToString.
- RETURN_ON_EXCEPTION(
- isolate,
- Object::SetProperty(
- constructor, isolate->factory()->class_start_position_symbol(),
- handle(Smi::FromInt(start_position), isolate), STRICT),
- Object);
- RETURN_ON_EXCEPTION(
- isolate, Object::SetProperty(
- constructor, isolate->factory()->class_end_position_symbol(),
- handle(Smi::FromInt(end_position), isolate), STRICT),
- Object);
-
- // Caller already has access to constructor, so return the prototype.
return prototype;
}
+} // namespace
RUNTIME_FUNCTION(Runtime_DefineClass) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 0);
+ DCHECK_LE(ClassBoilerplate::kFirstDynamicArgumentIndex, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(ClassBoilerplate, class_boilerplate, 0);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 1);
- CONVERT_SMI_ARG_CHECKED(start_position, 2);
- CONVERT_SMI_ARG_CHECKED(end_position, 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 2);
+ DCHECK_EQ(class_boilerplate->arguments_count(), args.length());
RETURN_RESULT_OR_FAILURE(
- isolate, DefineClass(isolate, super_class, constructor, start_position,
- end_position));
-}
-
-namespace {
-void InstallClassNameAccessor(Isolate* isolate, Handle<JSObject> object) {
- PropertyAttributes attrs =
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- // Cannot fail since this should only be called when creating an object
- // literal.
- CHECK(!JSObject::SetAccessor(
- object, Accessors::FunctionNameInfo(object->GetIsolate(), attrs))
- .is_null());
-}
-} // anonymous namespace
-
-RUNTIME_FUNCTION(Runtime_InstallClassNameAccessor) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- InstallClassNameAccessor(isolate, object);
- return *object;
-}
-
-RUNTIME_FUNCTION(Runtime_InstallClassNameAccessorWithCheck) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
- // If a property named "name" is already defined, exit.
- Handle<Name> key = isolate->factory()->name_string();
- if (JSObject::HasRealNamedProperty(object, key).FromMaybe(false)) {
- return *object;
- }
-
- // Define the "name" accessor.
- InstallClassNameAccessor(isolate, object);
- return *object;
+ isolate,
+ DefineClass(isolate, class_boilerplate, super_class, constructor, args));
}
namespace {
@@ -376,8 +757,9 @@ RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
- RETURN_RESULT_OR_FAILURE(isolate, StoreToSuper(isolate, home_object, receiver,
- name, value, STRICT));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, StoreToSuper(isolate, home_object, receiver, name, value,
+ LanguageMode::kStrict));
}
@@ -389,8 +771,9 @@ RUNTIME_FUNCTION(Runtime_StoreToSuper_Sloppy) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
- RETURN_RESULT_OR_FAILURE(isolate, StoreToSuper(isolate, home_object, receiver,
- name, value, SLOPPY));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, StoreToSuper(isolate, home_object, receiver, name, value,
+ LanguageMode::kSloppy));
}
static MaybeHandle<Object> StoreKeyedToSuper(
@@ -424,8 +807,8 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Strict) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
RETURN_RESULT_OR_FAILURE(
- isolate,
- StoreKeyedToSuper(isolate, home_object, receiver, key, value, STRICT));
+ isolate, StoreKeyedToSuper(isolate, home_object, receiver, key, value,
+ LanguageMode::kStrict));
}
@@ -438,8 +821,8 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
RETURN_RESULT_OR_FAILURE(
- isolate,
- StoreKeyedToSuper(isolate, home_object, receiver, key, value, SLOPPY));
+ isolate, StoreKeyedToSuper(isolate, home_object, receiver, key, value,
+ LanguageMode::kSloppy));
}
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 06f4a89346..44e947aafe 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -97,7 +97,7 @@ RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
CONVERT_NUMBER_CHECKED(int, max_entries, Int32, args[1]);
- CHECK(max_entries >= 0);
+ CHECK_GE(max_entries, 0);
return *JSWeakCollection::GetEntries(holder, max_entries);
}
@@ -116,10 +116,18 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_SMI_ARG_CHECKED(hash, 2)
- CHECK(key->IsJSReceiver() || key->IsSymbol());
+
+#ifdef DEBUG
+ DCHECK(key->IsJSReceiver());
+ DCHECK(ObjectHashTableShape::IsLive(isolate, *key));
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- CHECK(table->IsKey(isolate, *key));
+ // Should only be called when shrinking the table is necessary. See
+ // HashTable::Shrink().
+ DCHECK(table->NumberOfElements() - 1 <= (table->Capacity() >> 2) &&
+ table->NumberOfElements() - 1 >= 16);
+#endif
+
bool was_present = JSWeakCollection::Delete(weak_collection, key, hash);
return isolate->heap()->ToBoolean(was_present);
}
@@ -130,12 +138,20 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CHECK(key->IsJSReceiver() || key->IsSymbol());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_SMI_ARG_CHECKED(hash, 3)
+
+#ifdef DEBUG
+ DCHECK(key->IsJSReceiver());
+ DCHECK(ObjectHashTableShape::IsLive(isolate, *key));
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- CHECK(table->IsKey(isolate, *key));
+ // Should only be called when rehashing or resizing the table is necessary.
+ // See ObjectHashTable::Put() and HashTable::HasSufficientCapacityToAdd().
+ DCHECK((table->NumberOfDeletedElements() << 1) > table->NumberOfElements() ||
+ !table->HasSufficientCapacityToAdd(1));
+#endif
+
JSWeakCollection::Set(weak_collection, key, value, hash);
return *weak_collection;
}
@@ -146,7 +162,7 @@ RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
- CHECK(max_values >= 0);
+ CHECK_GE(max_values, 0);
return *JSWeakCollection::GetEntries(holder, max_values);
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index b445037d08..92ba3e6c3f 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -128,12 +128,21 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
return Smi::kZero;
}
-namespace {
-
-void MaterializeHeapObjectsAndDeleteDeoptimizer(Isolate* isolate,
- Deoptimizer* deoptimizer) {
+RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ DCHECK(deoptimizer->compiled_code()->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(deoptimizer->compiled_code()->is_turbofanned());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_NULL(isolate->context());
+
+ TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
+ Handle<JSFunction> function = deoptimizer->function();
+ Deoptimizer::BailoutType type = deoptimizer->bailout_type();
+ bool preserve_optimized_code = deoptimizer->preserve_optimized();
+
// TODO(turbofan): We currently need the native context to materialize
// the arguments object, but only to get to its map.
isolate->set_context(deoptimizer->function()->native_context());
@@ -146,39 +155,9 @@ void MaterializeHeapObjectsAndDeleteDeoptimizer(Isolate* isolate,
JavaScriptFrameIterator top_it(isolate);
JavaScriptFrame* top_frame = top_it.frame();
isolate->set_context(Context::cast(top_frame->context()));
-}
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- DCHECK(deoptimizer->compiled_code()->kind() == Code::OPTIMIZED_FUNCTION);
- DCHECK(deoptimizer->compiled_code()->is_turbofanned());
- MaterializeHeapObjectsAndDeleteDeoptimizer(isolate, deoptimizer);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- DCHECK(deoptimizer->compiled_code()->kind() == Code::OPTIMIZED_FUNCTION);
- DCHECK(deoptimizer->compiled_code()->is_turbofanned());
-
- TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
- TRACE_EVENT0("v8", "V8.DeoptimizeCode");
- Handle<JSFunction> function = deoptimizer->function();
- Deoptimizer::BailoutType type = deoptimizer->bailout_type();
-
- MaterializeHeapObjectsAndDeleteDeoptimizer(isolate, deoptimizer);
-
- // TODO(mstarzinger): The marking of the function for deoptimization is the
- // only difference to {Runtime_NotifyStubFailure} by now and we should also
- // do this if the top-most frame is a builtin stub to avoid deoptimization
- // loops. This would also unify the two runtime functions.
- if (type != Deoptimizer::LAZY) {
+ // Invalidate the underlying optimized code on non-lazy deopts.
+ if (type != Deoptimizer::LAZY && !preserve_optimized_code) {
Deoptimizer::DeoptimizeFunction(*function);
}
@@ -232,9 +211,6 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- // We're not prepared to handle a function with arguments object.
- DCHECK(!function->shared()->uses_arguments());
-
// Only reachable when OST is enabled.
CHECK(FLAG_use_osr);
@@ -263,8 +239,8 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
Handle<Code> result;
if (maybe_result.ToHandle(&result) &&
result->kind() == Code::OPTIMIZED_FUNCTION) {
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(result->deoptimization_data());
+ DeoptimizationData* data =
+ DeoptimizationData::cast(result->deoptimization_data());
if (data->OsrPcOffset()->value() >= 0) {
DCHECK(BailoutId(data->OsrBytecodeOffset()->value()) == ast_id);
@@ -299,7 +275,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
if (!function->IsOptimized()) {
function->set_code(function->shared()->code());
}
- return NULL;
+ return nullptr;
}
static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 9251fa3a7f..d7395c7a7f 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -21,16 +21,23 @@
#include "src/isolate-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
+ using interpreter::Bytecode;
+ using interpreter::Bytecodes;
+ using interpreter::OperandScale;
+
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
HandleScope scope(isolate);
+ // Return value can be changed by debugger. Last set value will be used as
+ // return value.
ReturnValueScope result_scope(isolate->debug());
isolate->debug()->set_return_value(*value);
@@ -45,16 +52,22 @@ RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
SharedFunctionInfo* shared = interpreted_frame->function()->shared();
BytecodeArray* bytecode_array = shared->bytecode_array();
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
- interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
- if (bytecode == interpreter::Bytecode::kReturn) {
+ Bytecode bytecode = Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
+ if (bytecode == Bytecode::kReturn) {
// If we are returning, reset the bytecode array on the interpreted stack
// frame to the non-debug variant so that the interpreter entry trampoline
// sees the return bytecode rather than the DebugBreak.
interpreted_frame->PatchBytecodeArray(bytecode_array);
}
- return isolate->interpreter()->GetBytecodeHandler(
- bytecode, interpreter::OperandScale::kSingle);
+
+ // We do not have to deal with operand scale here. If the bytecode at the
+ // break is prefixed by operand scaling, we would have patched over the
+ // scaling prefix. We now simply dispatch to the handler for the prefix.
+ OperandScale operand_scale = OperandScale::kSingle;
+ Code* code = isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
+ bytecode, operand_scale);
+
+ return MakePair(isolate->debug()->return_value(), code);
}
@@ -96,9 +109,8 @@ RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
return isolate->heap()->undefined_value();
}
-
static Handle<Object> DebugGetProperty(LookupIterator* it,
- bool* has_caught = NULL) {
+ bool* has_caught = nullptr) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
@@ -122,7 +134,7 @@ static Handle<Object> DebugGetProperty(LookupIterator* it,
if (!maybe_result.ToHandle(&result)) {
result = handle(it->isolate()->pending_exception(), it->isolate());
it->isolate()->clear_pending_exception();
- if (has_caught != NULL) *has_caught = true;
+ if (has_caught != nullptr) *has_caught = true;
}
return result;
}
@@ -140,7 +152,7 @@ static MaybeHandle<JSArray> GetIteratorInternalProperties(
Isolate* isolate, Handle<IteratorType> object) {
Factory* factory = isolate->factory();
Handle<IteratorType> iterator = Handle<IteratorType>::cast(object);
- const char* kind = NULL;
+ const char* kind = nullptr;
switch (iterator->map()->instance_type()) {
case JS_MAP_KEY_ITERATOR_TYPE:
kind = "keys";
@@ -435,7 +447,6 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
}
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
for (StackTraceFrameIterator it(isolate, id); !it.done(); it.Advance()) {
frames.clear();
it.frame()->Summarize(&frames);
@@ -1106,7 +1117,7 @@ RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- CHECK(source_position >= 0);
+ CHECK_GE(source_position, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2);
// Get the script from the script wrapper.
@@ -1300,7 +1311,7 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
CONVERT_ARG_HANDLE_CHECKED(Object, filter, 1);
CHECK(filter->IsUndefined(isolate) || filter->IsJSObject());
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
- CHECK(max_references >= 0);
+ CHECK_GE(max_references, 0);
std::vector<Handle<JSObject>> instances;
Heap* heap = isolate->heap();
@@ -1356,7 +1367,7 @@ RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
- CHECK(max_references >= 0);
+ CHECK_GE(max_references, 0);
std::vector<Handle<JSObject>> instances;
Heap* heap = isolate->heap();
@@ -1469,7 +1480,7 @@ RUNTIME_FUNCTION(Runtime_GetDebugContext) {
RUNTIME_FUNCTION(Runtime_CollectGarbage) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask,
GarbageCollectionReason::kRuntime);
return isolate->heap()->undefined_value();
}
@@ -1501,8 +1512,8 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
Handle<Script> found;
{
Script::Iterator iterator(isolate);
- Script* script = NULL;
- while ((script = iterator.Next()) != NULL) {
+ Script* script = nullptr;
+ while ((script = iterator.Next()) != nullptr) {
if (!script->name()->IsString()) continue;
String* name = String::cast(script->name());
if (name->Equals(*script_name)) {
@@ -1678,8 +1689,8 @@ Handle<Object> ScriptLocationFromLine(Isolate* isolate, Handle<Script> script,
// Slow traversal over all scripts on the heap.
bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
Script::Iterator iterator(isolate);
- Script* script = NULL;
- while ((script = iterator.Next()) != NULL) {
+ Script* script = nullptr;
+ while ((script = iterator.Next()) != nullptr) {
if (script->id() == needle) {
*result = handle(script);
return true;
@@ -1858,9 +1869,9 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionPromiseCreated) {
Handle<Symbol> async_stack_id_symbol =
isolate->factory()->promise_async_stack_id_symbol();
JSObject::SetProperty(promise, async_stack_id_symbol,
- handle(Smi::FromInt(id), isolate), STRICT)
+ handle(Smi::FromInt(id), isolate),
+ LanguageMode::kStrict)
.Assert();
- isolate->debug()->OnAsyncTaskEvent(debug::kDebugEnqueueAsyncFunction, id, 0);
return isolate->heap()->undefined_value();
}
@@ -1874,20 +1885,6 @@ RUNTIME_FUNCTION(Runtime_DebugPromiseReject) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugAsyncEventEnqueueRecurring) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_SMI_ARG_CHECKED(status, 1);
- if (isolate->debug()->is_active()) {
- isolate->debug()->OnAsyncTaskEvent(
- status == v8::Promise::kFulfilled ? debug::kDebugEnqueuePromiseResolve
- : debug::kDebugEnqueuePromiseReject,
- isolate->debug()->NextAsyncTaskId(promise), 0);
- }
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DebugIsActive) {
SealHandleScope shs(isolate);
return Smi::FromInt(isolate->debug()->is_active());
@@ -1895,7 +1892,7 @@ RUNTIME_FUNCTION(Runtime_DebugIsActive) {
RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
UNIMPLEMENTED();
- return NULL;
+ return nullptr;
}
namespace {
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index bbb54404a1..c78ac8f6b1 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -97,18 +97,6 @@ RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
}
-RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- CHECK(fun->IsConstructor());
- JSFunction::SetPrototype(fun, value);
- return args[0]; // return TOS
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -220,15 +208,6 @@ RUNTIME_FUNCTION(Runtime_Call) {
}
-// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
-RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- return *Object::ConvertReceiver(isolate, receiver).ToHandleChecked();
-}
-
-
RUNTIME_FUNCTION(Runtime_IsFunction) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 813a774611..6d0e2b8439 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -59,7 +59,7 @@ RUNTIME_FUNCTION(Runtime_InstallToContext) {
if (index == Context::kNotFound) {
index = Context::IntrinsicIndexForName(name);
}
- CHECK(index != Context::kNotFound);
+ CHECK_NE(index, Context::kNotFound);
native_context->set(index, *object);
}
return isolate->heap()->undefined_value();
@@ -317,8 +317,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CHECK(IsAligned(size, kPointerSize));
- CHECK(size > 0);
- CHECK(size <= kMaxRegularHeapObjectSize);
+ CHECK_GT(size, 0);
+ CHECK_LE(size, kMaxRegularHeapObjectSize);
return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
}
@@ -328,7 +328,7 @@ RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
CHECK(IsAligned(size, kPointerSize));
- CHECK(size > 0);
+ CHECK_GT(size, 0);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationSpace space = AllocateTargetSpace::decode(flags);
CHECK(size <= kMaxRegularHeapObjectSize || space == LO_SPACE);
@@ -370,7 +370,6 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
it.frame()->Summarize(&frames);
auto& summary = frames.back().AsJavaScript();
Handle<SharedFunctionInfo> shared(summary.function()->shared());
@@ -527,7 +526,7 @@ RUNTIME_FUNCTION(Runtime_DeserializeLazy) {
DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing %s\n", Builtins::name(builtin_id));
+ PrintF("Lazy-deserializing builtin %s\n", Builtins::name(builtin_id));
}
Code* code = Snapshot::DeserializeBuiltin(isolate, builtin_id);
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 75e211bf24..b65a2327a3 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -13,12 +13,34 @@
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/ostreams.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_InterpreterDeserializeLazy) {
+ HandleScope scope(isolate);
+
+ DCHECK(FLAG_lazy_handler_deserialization);
+ DCHECK(FLAG_lazy_deserialization);
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(bytecode_int, 0);
+ CONVERT_SMI_ARG_CHECKED(operand_scale_int, 1);
+
+ using interpreter::Bytecode;
+ using interpreter::Bytecodes;
+ using interpreter::OperandScale;
+
+ Bytecode bytecode = Bytecodes::FromByte(bytecode_int);
+ OperandScale operand_scale = static_cast<OperandScale>(operand_scale_int);
+
+ return isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
+ bytecode, operand_scale);
+}
+
RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -173,5 +195,40 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
#endif
+#ifdef V8_TRACE_FEEDBACK_UPDATES
+
+RUNTIME_FUNCTION(Runtime_InterpreterTraceUpdateFeedback) {
+ if (!FLAG_trace_feedback_updates) {
+ return isolate->heap()->undefined_value();
+ }
+
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_SMI_ARG_CHECKED(slot, 1);
+ CONVERT_ARG_CHECKED(String, reason, 2);
+
+ int slot_count = function->feedback_vector()->metadata()->slot_count();
+
+ OFStream os(stdout);
+ os << "[Feedback slot " << slot << "/" << slot_count << " in ";
+ function->shared()->ShortPrint(os);
+ os << " updated to ";
+ function->feedback_vector()->FeedbackSlotPrint(os, FeedbackSlot(slot));
+ os << " - ";
+
+ StringCharacterStream stream(reason);
+ while (stream.HasMore()) {
+ uint16_t character = stream.GetNext();
+ PrintF("%c", character);
+ }
+
+ os << "]" << std::endl;
+
+ return isolate->heap()->undefined_value();
+}
+
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index 783450c8ef..c4f132b134 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -8,6 +8,7 @@
#include "src/runtime/runtime-utils.h"
+#include <cmath>
#include <memory>
#include "src/api-natives.h"
@@ -45,12 +46,8 @@
#include "unicode/uloc.h"
#include "unicode/unistr.h"
#include "unicode/unum.h"
-#include "unicode/uvernum.h"
#include "unicode/uversion.h"
-#if U_ICU_VERSION_MAJOR_NUM >= 59
-#include "unicode/char16ptr.h"
-#endif
namespace v8 {
namespace internal {
@@ -105,7 +102,7 @@ RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
- const icu::Locale* available_locales = NULL;
+ const icu::Locale* available_locales = nullptr;
int32_t count = 0;
if (service->IsUtf8EqualTo(CStrVector("collator"))) {
@@ -217,7 +214,7 @@ RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- JSObject::SetProperty(input, marker, type, STRICT).Assert();
+ JSObject::SetProperty(input, marker, type, LanguageMode::kStrict).Assert();
return isolate->heap()->undefined_value();
}
@@ -260,17 +257,21 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(date, 1);
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(date));
+ double date_value = date->Number();
+ // Check for +-Infinity and Nan
+ if (!std::isfinite(date_value)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
+ }
icu::SimpleDateFormat* date_format =
DateFormat::UnpackDateFormat(isolate, date_format_holder);
CHECK_NOT_NULL(date_format);
icu::UnicodeString result;
- date_format->format(value->Number(), result);
+ date_format->format(date_value, result);
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
@@ -362,10 +363,13 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormatToParts) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(date, 1);
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(date));
+ double date_value = date->Number();
+ if (!std::isfinite(date_value)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
+ }
icu::SimpleDateFormat* date_format =
DateFormat::UnpackDateFormat(isolate, date_format_holder);
@@ -375,7 +379,7 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormatToParts) {
icu::FieldPositionIterator fp_iter;
icu::FieldPosition fp;
UErrorCode status = U_ZERO_ERROR;
- date_format->format(value->Number(), formatted, &fp_iter, status);
+ date_format->format(date_value, formatted, &fp_iter, status);
if (U_FAILURE(status)) return isolate->heap()->undefined_value();
Handle<JSArray> result = factory->NewJSArray(0);
@@ -473,21 +477,12 @@ RUNTIME_FUNCTION(Runtime_CurrencyDigits) {
CONVERT_ARG_HANDLE_CHECKED(String, currency, 0);
- // TODO(littledan): Avoid transcoding the string twice
- v8::String::Utf8Value currency_string(v8_isolate,
- v8::Utils::ToLocal(currency));
- icu::UnicodeString currency_icu =
- icu::UnicodeString::fromUTF8(*currency_string);
+ v8::String::Value currency_string(v8_isolate, v8::Utils::ToLocal(currency));
DisallowHeapAllocation no_gc;
UErrorCode status = U_ZERO_ERROR;
-#if U_ICU_VERSION_MAJOR_NUM >= 59
- uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
- icu::toUCharPtr(currency_icu.getTerminatedBuffer()), &status);
-#else
uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
- currency_icu.getTerminatedBuffer(), &status);
-#endif
+ reinterpret_cast<const UChar*>(*currency_string), &status);
// For missing currency codes, default to the most common, 2
if (!U_SUCCESS(status)) fraction_digits = 2;
return Smi::FromInt(fraction_digits);
@@ -660,7 +655,7 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
if (!break_iterator) return isolate->ThrowIllegalOperation();
local_object->SetEmbedderField(0, reinterpret_cast<Smi*>(break_iterator));
- // Make sure that the pointer to adopted text is NULL.
+ // Make sure that the pointer to adopted text is nullptr.
local_object->SetEmbedderField(1, static_cast<Smi*>(nullptr));
// Make object handle weak so we can delete the break iterator once GC kicks
@@ -800,7 +795,7 @@ RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
// Primary language tag can be up to 8 characters long in theory.
// https://tools.ietf.org/html/bcp47#section-2.2.1
- DCHECK(lang_arg->length() <= 8);
+ DCHECK_LE(lang_arg->length(), 8);
lang_arg = String::Flatten(lang_arg);
s = String::Flatten(s);
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 0dc15793a9..c82a449bda 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -165,8 +165,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
break;
}
case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> element_dictionary(
- copy->element_dictionary());
+ Handle<NumberDictionary> element_dictionary(copy->element_dictionary());
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* raw = element_dictionary->ValueAt(i);
@@ -373,7 +372,7 @@ struct ObjectBoilerplate {
// TODO(cbruni): avoid making the boilerplate fast again, the clone stub
// supports dict-mode objects directly.
JSObject::MigrateSlowToFast(boilerplate,
- boilerplate->map()->unused_property_fields(),
+ boilerplate->map()->UnusedPropertyFields(),
"FastLiteral");
}
return boilerplate;
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index bb8436cd11..1804f93229 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -8,7 +8,6 @@
#include "src/assembler.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
-#include "src/codegen.h"
#include "src/counters.h"
#include "src/double.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 6d83443ea8..bb16a772c0 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -57,5 +57,12 @@ RUNTIME_FUNCTION(Runtime_StoreModuleVariable) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_GetImportMetaObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ Handle<Module> module(isolate->context()->module());
+ return *isolate->RunHostInitializeImportMetaObjectCallback(module);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index e8c1d00573..8e351b3c74 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -7,7 +7,6 @@
#include "src/arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
-#include "src/codegen.h"
#include "src/isolate-inl.h"
namespace v8 {
@@ -108,9 +107,11 @@ RUNTIME_FUNCTION(Runtime_NumberToSmi) {
return isolate->heap()->nan_value();
}
-
-// Compare two Smis as if they were converted to strings and then
-// compared lexicographically.
+// Compare two Smis x, y as if they were converted to strings and then
+// compared lexicographically. Returns:
+// -1 if x < y
+// 0 if x == y
+// 1 if x > y
RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -118,12 +119,12 @@ RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
CONVERT_SMI_ARG_CHECKED(y_value, 1);
// If the integers are equal so are the string representations.
- if (x_value == y_value) return Smi::FromInt(EQUAL);
+ if (x_value == y_value) return Smi::FromInt(0);
// If one of the integers is zero the normal integer order is the
// same as the lexicographic order of the string representations.
if (x_value == 0 || y_value == 0)
- return Smi::FromInt(x_value < y_value ? LESS : GREATER);
+ return Smi::FromInt(x_value < y_value ? -1 : 1);
// If only one of the integers is negative the negative number is
// smallest because the char code of '-' is less than the char code
@@ -134,8 +135,8 @@ RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
uint32_t x_scaled = x_value;
uint32_t y_scaled = y_value;
if (x_value < 0 || y_value < 0) {
- if (y_value >= 0) return Smi::FromInt(LESS);
- if (x_value >= 0) return Smi::FromInt(GREATER);
+ if (y_value >= 0) return Smi::FromInt(-1);
+ if (x_value >= 0) return Smi::FromInt(1);
x_scaled = -x_value;
y_scaled = -y_value;
}
@@ -153,15 +154,15 @@ RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
// integer comes first in the lexicographic order.
// From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
- int x_log2 = 31 - base::bits::CountLeadingZeros32(x_scaled);
+ int x_log2 = 31 - base::bits::CountLeadingZeros(x_scaled);
int x_log10 = ((x_log2 + 1) * 1233) >> 12;
x_log10 -= x_scaled < kPowersOf10[x_log10];
- int y_log2 = 31 - base::bits::CountLeadingZeros32(y_scaled);
+ int y_log2 = 31 - base::bits::CountLeadingZeros(y_scaled);
int y_log10 = ((y_log2 + 1) * 1233) >> 12;
y_log10 -= y_scaled < kPowersOf10[y_log10];
- int tie = EQUAL;
+ int tie = 0;
if (x_log10 < y_log10) {
// X has fewer digits. We would like to simply scale up X but that
@@ -172,15 +173,15 @@ RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
// past the length of the shorter integer.
x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
y_scaled /= 10;
- tie = LESS;
+ tie = -1;
} else if (y_log10 < x_log10) {
y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
x_scaled /= 10;
- tie = GREATER;
+ tie = 1;
}
- if (x_scaled < y_scaled) return Smi::FromInt(LESS);
- if (x_scaled > y_scaled) return Smi::FromInt(GREATER);
+ if (x_scaled < y_scaled) return Smi::FromInt(-1);
+ if (x_scaled > y_scaled) return Smi::FromInt(1);
return Smi::FromInt(tie);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 4c8805eb25..057ead9407 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -305,7 +305,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
Maybe<bool> result =
JSReceiver::HasOwnProperty(Handle<JSProxy>::cast(object), key);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.IsNothing()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(result.FromJust());
} else if (object->IsString()) {
@@ -420,9 +420,8 @@ RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
CHECK_EQ(*function_map, function->map());
}
}
- MAYBE_RETURN(
- JSReceiver::SetPrototype(obj, prototype, false, Object::THROW_ON_ERROR),
- isolate->heap()->exception());
+ MAYBE_RETURN(JSReceiver::SetPrototype(obj, prototype, false, kThrowOnError),
+ isolate->heap()->exception());
return *obj;
}
@@ -478,7 +477,7 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
DCHECK(!name->ToArrayIndex(&index));
LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return isolate->heap()->exception();
DCHECK(!it.IsFound());
#endif
@@ -504,7 +503,7 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
LookupIterator it(isolate, object, index, object,
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return isolate->heap()->exception();
DCHECK(!it.IsFound());
if (object->IsJSArray()) {
@@ -609,7 +608,7 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
// Lookup the {name} on {receiver}.
Maybe<bool> maybe = JSReceiver::HasProperty(receiver, name);
- if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(maybe.FromJust());
}
@@ -676,8 +675,8 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
RETURN_RESULT_OR_FAILURE(isolate, JSObject::New(target, new_target));
}
-
-RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
+RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
+ DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -693,7 +692,7 @@ RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
- CHECK((index->value() & 1) == 1);
+ CHECK_EQ(index->value() & 1, 1);
FieldIndex field_index =
FieldIndex::ForLoadByFieldIndex(object->map(), index->value());
if (field_index.is_inobject()) {
@@ -810,9 +809,9 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
isolate, object, name, object, LookupIterator::OWN);
// Cannot fail since this should only be called when
// creating an object literal.
- CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs,
- Object::DONT_THROW)
- .IsJust());
+ CHECK(
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs, kDontThrow)
+ .IsJust());
return *object;
}
@@ -1020,7 +1019,7 @@ RUNTIME_FUNCTION(Runtime_DefineMethodsInternal) {
}
Maybe<bool> success = JSReceiver::DefineOwnProperty(
- isolate, target, key, &descriptor, Object::DONT_THROW);
+ isolate, target, key, &descriptor, kDontThrow);
CHECK(success.FromJust());
}
return isolate->heap()->undefined_value();
@@ -1049,15 +1048,12 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
return isolate->heap()->undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_ToObject) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ToObject(isolate, object));
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering.
+ UNREACHABLE();
}
-
RUNTIME_FUNCTION(Runtime_ToPrimitive) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -1081,6 +1077,12 @@ RUNTIME_FUNCTION(Runtime_ToNumber) {
RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumber(input));
}
+RUNTIME_FUNCTION(Runtime_ToNumeric) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumeric(input));
+}
RUNTIME_FUNCTION(Runtime_ToInteger) {
HandleScope scope(isolate);
@@ -1131,32 +1133,6 @@ RUNTIME_FUNCTION(Runtime_SameValueZero) {
return isolate->heap()->ToBoolean(x->SameValueZero(y));
}
-
-// TODO(bmeurer): Kill this special wrapper and use TF compatible LessThan,
-// GreaterThan, etc. which return true or false.
-RUNTIME_FUNCTION(Runtime_Compare) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, ncr, 2);
- Maybe<ComparisonResult> result = Object::Compare(x, y);
- if (result.IsJust()) {
- switch (result.FromJust()) {
- case ComparisonResult::kLessThan:
- return Smi::FromInt(LESS);
- case ComparisonResult::kEqual:
- return Smi::FromInt(EQUAL);
- case ComparisonResult::kGreaterThan:
- return Smi::FromInt(GREATER);
- case ComparisonResult::kUndefined:
- return *ncr;
- }
- UNREACHABLE();
- }
- return isolate->heap()->exception();
-}
-
RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -1189,9 +1165,8 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, o, key, &success, LookupIterator::OWN);
if (!success) return isolate->heap()->exception();
- MAYBE_RETURN(
- JSReceiver::CreateDataProperty(&it, value, Object::THROW_ON_ERROR),
- isolate->heap()->exception());
+ MAYBE_RETURN(JSReceiver::CreateDataProperty(&it, value, kThrowOnError),
+ isolate->heap()->exception());
return *value;
}
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index 2a9255b77e..42a7e21b82 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -114,7 +114,7 @@ RUNTIME_FUNCTION(Runtime_Equal) {
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
Maybe<bool> result = Object::Equals(x, y);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.IsNothing()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -124,7 +124,7 @@ RUNTIME_FUNCTION(Runtime_NotEqual) {
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
Maybe<bool> result = Object::Equals(x, y);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.IsNothing()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(!result.FromJust());
}
@@ -150,7 +150,7 @@ RUNTIME_FUNCTION(Runtime_LessThan) {
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
Maybe<bool> result = Object::LessThan(x, y);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.IsNothing()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -160,7 +160,7 @@ RUNTIME_FUNCTION(Runtime_GreaterThan) {
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
Maybe<bool> result = Object::GreaterThan(x, y);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.IsNothing()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -170,7 +170,7 @@ RUNTIME_FUNCTION(Runtime_LessThanOrEqual) {
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
Maybe<bool> result = Object::LessThanOrEqual(x, y);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.IsNothing()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -180,7 +180,7 @@ RUNTIME_FUNCTION(Runtime_GreaterThanOrEqual) {
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
Maybe<bool> result = Object::GreaterThanOrEqual(x, y);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.IsNothing()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 855f5360fe..1d8ca623e1 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -45,9 +45,6 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
// undefined, which will be interpreted by PromiseRejectEvent
// as being a caught exception event.
rejected_promise = isolate->GetPromiseOnStackOnThrow();
- isolate->debug()->OnAsyncTaskEvent(
- debug::kDebugEnqueuePromiseReject,
- isolate->debug()->NextAsyncTaskId(promise), 0);
}
PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
return isolate->heap()->undefined_value();
@@ -83,7 +80,7 @@ RUNTIME_FUNCTION(Runtime_EnqueuePromiseReactionJob) {
RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(PromiseResolveThenableJobInfo, info, 0);
isolate->EnqueueMicrotask(info);
return isolate->heap()->undefined_value();
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index bb4a6457c8..2ba760b847 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -21,6 +21,20 @@ namespace internal {
namespace {
+// Returns -1 for failure.
+uint32_t GetArgcForReplaceCallable(uint32_t num_captures,
+ bool has_named_captures) {
+ const uint32_t kAdditionalArgsWithoutNamedCaptures = 2;
+ const uint32_t kAdditionalArgsWithNamedCaptures = 3;
+ if (num_captures > Code::kMaxArguments) return -1;
+ uint32_t argc = has_named_captures
+ ? num_captures + kAdditionalArgsWithNamedCaptures
+ : num_captures + kAdditionalArgsWithoutNamedCaptures;
+ STATIC_ASSERT(Code::kMaxArguments < std::numeric_limits<uint32_t>::max() -
+ kAdditionalArgsWithNamedCaptures);
+ return (argc > Code::kMaxArguments) ? -1 : argc;
+}
+
// Looks up the capture of the given name. Returns the (1-based) numbered
// capture index or -1 on failure.
int LookupNamedCapture(std::function<bool(String*)> name_matches,
@@ -404,7 +418,7 @@ void FindOneByteStringIndices(Vector<const uint8_t> subject, uint8_t pattern,
while (limit > 0) {
pos = reinterpret_cast<const uint8_t*>(
memchr(pos, pattern, subject_end - pos));
- if (pos == NULL) return;
+ if (pos == nullptr) return;
indices->push_back(static_cast<int>(pos - subject_start));
pos++;
limit--;
@@ -633,7 +647,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
if (global_cache.HasException()) return isolate->heap()->exception();
int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) {
+ if (current_match == nullptr) {
if (global_cache.HasException()) return isolate->heap()->exception();
return *subject;
}
@@ -669,7 +683,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
prev = end;
current_match = global_cache.FetchNext();
- } while (current_match != NULL);
+ } while (current_match != nullptr);
if (global_cache.HasException()) return isolate->heap()->exception();
@@ -706,7 +720,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
if (global_cache.HasException()) return isolate->heap()->exception();
int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) {
+ if (current_match == nullptr) {
if (global_cache.HasException()) return isolate->heap()->exception();
return *subject;
}
@@ -742,7 +756,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
prev = end;
current_match = global_cache.FetchNext();
- } while (current_match != NULL);
+ } while (current_match != nullptr);
if (global_cache.HasException()) return isolate->heap()->exception();
@@ -808,19 +822,6 @@ Object* StringReplaceGlobalRegExpWithStringHelper(
} // namespace
-RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
-
- return StringReplaceGlobalRegExpWithStringHelper(
- isolate, regexp, subject, replacement, last_match_info);
-}
-
RUNTIME_FUNCTION(Runtime_StringSplit) {
HandleScope handle_scope(isolate);
DCHECK_EQ(3, args.length());
@@ -902,33 +903,6 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
return *result;
}
-// ES##sec-regexpcreate
-// RegExpCreate ( P, F )
-RUNTIME_FUNCTION(Runtime_RegExpCreate) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, source_object, 0);
-
- Handle<String> source;
- if (source_object->IsUndefined(isolate)) {
- source = isolate->factory()->empty_string();
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, source, Object::ToString(isolate, source_object));
- }
-
- Handle<Map> map(isolate->regexp_function()->initial_map());
- Handle<JSRegExp> regexp =
- Handle<JSRegExp>::cast(isolate->factory()->NewJSObjectFromMap(map));
-
- JSRegExp::Flags flags = JSRegExp::kNone;
-
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- JSRegExp::Initialize(regexp, source, flags));
-
- return *regexp;
-}
-
RUNTIME_FUNCTION(Runtime_RegExpExec) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -1216,7 +1190,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
while (true) {
int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) break;
+ if (current_match == nullptr) break;
match_start = current_match[0];
builder.EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
if (match_end < match_start) {
@@ -1358,7 +1332,7 @@ MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
String);
last_index = PositiveNumberToUint32(*last_index_obj);
- if (static_cast<int>(last_index) > string->length()) last_index = 0;
+ if (last_index > static_cast<uint32_t>(string->length())) last_index = 0;
}
Handle<Object> match_indices_obj;
@@ -1477,7 +1451,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
isolate, last_index_obj, Object::ToLength(isolate, last_index_obj));
last_index = PositiveNumberToUint32(*last_index_obj);
- if (static_cast<int>(last_index) > subject->length()) last_index = 0;
+ if (last_index > static_cast<uint32_t>(subject->length())) last_index = 0;
}
Handle<Object> match_indices_obj;
@@ -1523,7 +1497,11 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
}
DCHECK_IMPLIES(has_named_captures, FLAG_harmony_regexp_named_captures);
- const int argc = has_named_captures ? m + 3 : m + 2;
+ const uint32_t argc = GetArgcForReplaceCallable(m, has_named_captures);
+ if (argc == static_cast<uint32_t>(-1)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kTooManyArguments));
+ }
ScopedVector<Handle<Object>> argv(argc);
int cursor = 0;
@@ -1668,7 +1646,7 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
static const int kInitialArraySize = 8;
Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
- int num_elems = 0;
+ uint32_t num_elems = 0;
uint32_t string_index = 0;
uint32_t prev_string_index = 0;
@@ -1682,8 +1660,8 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
factory->undefined_value()));
if (result->IsNull(isolate)) {
- string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
- string_index, unicode);
+ string_index = static_cast<uint32_t>(RegExpUtils::AdvanceStringIndex(
+ isolate, string, string_index, unicode));
continue;
}
@@ -1697,8 +1675,8 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
const uint32_t end =
std::min(PositiveNumberToUint32(*last_index_obj), length);
if (end == prev_string_index) {
- string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
- string_index, unicode);
+ string_index = static_cast<uint32_t>(RegExpUtils::AdvanceStringIndex(
+ isolate, string, string_index, unicode));
continue;
}
@@ -1706,7 +1684,7 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
Handle<String> substr =
factory->NewSubString(string, prev_string_index, string_index);
elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- if (static_cast<uint32_t>(num_elems) == limit) {
+ if (num_elems == limit) {
return *NewJSArrayWithElements(isolate, elems, num_elems);
}
}
@@ -1720,14 +1698,14 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
- const int num_captures = PositiveNumberToUint32(*num_captures_obj);
+ const uint32_t num_captures = PositiveNumberToUint32(*num_captures_obj);
- for (int i = 1; i < num_captures; i++) {
+ for (uint32_t i = 1; i < num_captures; i++) {
Handle<Object> capture;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, capture, Object::GetElement(isolate, result, i));
elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
- if (static_cast<uint32_t>(num_elems) == limit) {
+ if (num_elems == limit) {
return *NewJSArrayWithElements(isolate, elems, num_elems);
}
}
@@ -1834,7 +1812,8 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, captures_length_obj,
Object::ToLength(isolate, captures_length_obj));
- const int captures_length = PositiveNumberToUint32(*captures_length_obj);
+ const uint32_t captures_length =
+ PositiveNumberToUint32(*captures_length_obj);
Handle<Object> match_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
@@ -1859,7 +1838,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
// Do not reserve capacity since captures_length is user-controlled.
ZoneVector<Handle<Object>> captures(&zone);
- for (int n = 0; n < captures_length; n++) {
+ for (uint32_t n = 0; n < captures_length; n++) {
Handle<Object> capture;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, capture, Object::GetElement(isolate, result, n));
@@ -1883,12 +1862,17 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
Handle<String> replacement;
if (functional_replace) {
- const int argc =
- has_named_captures ? captures_length + 3 : captures_length + 2;
+ const uint32_t argc =
+ GetArgcForReplaceCallable(captures_length, has_named_captures);
+ if (argc == static_cast<uint32_t>(-1)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kTooManyArguments));
+ }
+
ScopedVector<Handle<Object>> argv(argc);
int cursor = 0;
- for (int j = 0; j < captures_length; j++) {
+ for (uint32_t j = 0; j < captures_length; j++) {
argv[cursor++] = captures[j];
}
@@ -1946,6 +1930,9 @@ RUNTIME_FUNCTION(Runtime_RegExpExecReThrow) {
RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
+ // TODO(pwong): To follow the spec more closely and simplify calling code,
+ // this could handle the canonicalization of pattern and flags. See
+ // https://tc39.github.io/ecma262/#sec-regexpinitialize
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index d4bfceb257..61795fc6cb 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -69,7 +69,7 @@ Object* DeclareGlobal(
}
LookupIterator it(global, name, global, lookup_config);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return isolate->heap()->exception();
if (it.IsFound()) {
PropertyAttributes old_attributes = maybe.FromJust();
@@ -82,7 +82,7 @@ Object* DeclareGlobal(
if ((old_attributes & DONT_DELETE) != 0) {
// Only allow reconfiguring globals to functions in user code (no
// natives, which are marked as read-only).
- DCHECK((attr & READ_ONLY) == 0);
+ DCHECK_EQ(attr & READ_ONLY, 0);
// Check whether we can reconfigure the existing property into a
// function.
@@ -470,7 +470,7 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
}
}
- DCHECK(context_index >= 0);
+ DCHECK_GE(context_index, 0);
arguments->set_the_hole(index);
parameter_map->set(
index + 2,
@@ -595,11 +595,14 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
iterator.Advance();
JavaScriptFrame* function_frame = JavaScriptFrame::cast(iterator.frame());
DCHECK(function_frame->is_java_script());
- int argc = function_frame->GetArgumentsLength();
+ int argc = function_frame->ComputeParametersCount();
Address fp = function_frame->fp();
if (function_frame->has_adapted_arguments()) {
iterator.Advance();
- fp = iterator.frame()->fp();
+ ArgumentsAdaptorFrame* adaptor_frame =
+ ArgumentsAdaptorFrame::cast(iterator.frame());
+ argc = adaptor_frame->ComputeParametersCount();
+ fp = adaptor_frame->fp();
}
Object** parameters = reinterpret_cast<Object**>(
@@ -684,7 +687,7 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
LookupIterator it(global_object, name, global_object,
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return isolate->heap()->exception();
if ((maybe.FromJust() & DONT_DELETE) != 0) {
// ES#sec-globaldeclarationinstantiation 5.a:
// If envRec.HasVarDeclaration(name) is true, throw a SyntaxError
@@ -841,7 +844,7 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
namespace {
MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
- Object::ShouldThrow should_throw,
+ ShouldThrow should_throw,
Handle<Object>* receiver_return = nullptr) {
Isolate* const isolate = name->GetIsolate();
@@ -892,7 +895,7 @@ MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
return value;
}
- if (should_throw == Object::THROW_ON_ERROR) {
+ if (should_throw == kThrowOnError) {
// The property doesn't exist - throw exception.
THROW_NEW_ERROR(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
@@ -910,8 +913,7 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlot) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- RETURN_RESULT_OR_FAILURE(isolate,
- LoadLookupSlot(name, Object::THROW_ON_ERROR));
+ RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(name, kThrowOnError));
}
@@ -919,7 +921,7 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlotInsideTypeof) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(name, Object::DONT_THROW));
+ RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(name, kDontThrow));
}
@@ -931,7 +933,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
Handle<Object> value;
Handle<Object> receiver;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, LoadLookupSlot(name, Object::THROW_ON_ERROR, &receiver),
+ isolate, value, LoadLookupSlot(name, kThrowOnError, &receiver),
MakePair(isolate->heap()->exception(), nullptr));
return MakePair(*value, *receiver);
}
@@ -1012,7 +1014,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- RETURN_RESULT_OR_FAILURE(isolate, StoreLookupSlot(name, value, SLOPPY));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ StoreLookupSlot(name, value, LanguageMode::kSloppy));
}
// Store into a dynamic context for sloppy-mode block-scoped function hoisting
@@ -1025,8 +1028,9 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
const ContextLookupFlags lookup_flags = static_cast<ContextLookupFlags>(
FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT);
- RETURN_RESULT_OR_FAILURE(isolate,
- StoreLookupSlot(name, value, SLOPPY, lookup_flags));
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ StoreLookupSlot(name, value, LanguageMode::kSloppy, lookup_flags));
}
RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
@@ -1034,7 +1038,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- RETURN_RESULT_OR_FAILURE(isolate, StoreLookupSlot(name, value, STRICT));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ StoreLookupSlot(name, value, LanguageMode::kStrict));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 1382362cce..1e2d1f5a56 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -237,8 +237,9 @@ RUNTIME_FUNCTION(Runtime_SubString) {
} else {
return isolate->ThrowIllegalOperation();
}
- // The following condition is intentionally robust because the SubStringStub
- // delegates here and we test this in cctest/test-strings/RobustSubStringStub.
+ // The following condition is intentionally robust because the SubString
+ // builtin delegates here and we test this in
+ // cctest/test-strings/RobustSubStringStub.
if (end < start || start < 0 || end > string->length()) {
return isolate->ThrowIllegalOperation();
}
@@ -284,25 +285,6 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
return Smi::FromInt(subject->Get(i));
}
-RUNTIME_FUNCTION(Runtime_StringCompare) {
- HandleScope handle_scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- isolate->counters()->string_compare_runtime()->Increment();
- switch (String::Compare(x, y)) {
- case ComparisonResult::kLessThan:
- return Smi::FromInt(LESS);
- case ComparisonResult::kEqual:
- return Smi::FromInt(EQUAL);
- case ComparisonResult::kGreaterThan:
- return Smi::FromInt(GREATER);
- case ComparisonResult::kUndefined:
- break;
- }
- UNREACHABLE();
-}
-
RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -315,11 +297,11 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
size_t actual_array_length = 0;
CHECK(TryNumberToSize(array->length(), &actual_array_length));
- CHECK(array_length >= 0);
+ CHECK_GE(array_length, 0);
CHECK(static_cast<size_t>(array_length) <= actual_array_length);
// This assumption is used by the slice encoding in one or two smis.
- DCHECK(Smi::kMaxValue >= String::kMaxLength);
+ DCHECK_GE(Smi::kMaxValue, String::kMaxLength);
CHECK(array->HasFastElements());
JSObject::EnsureCanContainHeapObjectElements(array);
@@ -385,7 +367,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
}
CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
CHECK(array->HasObjectElements());
- CHECK(array_length >= 0);
+ CHECK_GE(array_length, 0);
Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
if (fixed_array->length() < array_length) {
@@ -401,7 +383,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
}
int separator_length = separator->length();
- CHECK(separator_length > 0);
+ CHECK_GT(separator_length, 0);
int max_nof_separators =
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
@@ -508,7 +490,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
int last_array_index = static_cast<int>(array_length - 1);
// Array length must be representable as a signed 32-bit number,
// otherwise the total string length would have been too large.
- DCHECK(array_length <= 0x7fffffff); // Is int32_t.
+ DCHECK_LE(array_length, 0x7fffffff); // Is int32_t.
int repeat = last_array_index - previous_separator_position;
WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat, separator_length);
cursor += repeat * separator_length;
@@ -526,7 +508,7 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
CHECK(elements_array->HasSmiOrObjectElements());
// array_length is length of original array (used to add separators);
// separator is string to put between elements. Assumed to be non-empty.
- CHECK(array_length > 0);
+ CHECK_GT(array_length, 0);
// Find total length of join result.
int string_length = 0;
@@ -534,7 +516,7 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
bool overflow = false;
CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
CHECK(elements_length <= elements_array->elements()->length());
- CHECK((elements_length & 1) == 0); // Even length.
+ CHECK_EQ(elements_length & 1, 0); // Even length.
FixedArray* elements = FixedArray::cast(elements_array->elements());
{
DisallowHeapAllocation no_gc;
@@ -616,7 +598,7 @@ static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
elements->set(i, value, mode);
}
if (i < length) {
- DCHECK(Smi::kZero == 0);
+ static_assert(Smi::kZero == 0, "Can use memset since Smi::kZero is 0");
memset(elements->data_start() + i, 0, kPointerSize * (length - i));
}
#ifdef DEBUG
@@ -669,7 +651,7 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
#ifdef DEBUG
for (int i = 0; i < length; ++i) {
- DCHECK(String::cast(elements->get(i))->length() == 1);
+ DCHECK_EQ(String::cast(elements->get(i))->length(), 1);
}
#endif
@@ -681,16 +663,10 @@ RUNTIME_FUNCTION(Runtime_StringLessThan) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- switch (String::Compare(x, y)) {
- case ComparisonResult::kLessThan:
- return isolate->heap()->true_value();
- case ComparisonResult::kEqual:
- case ComparisonResult::kGreaterThan:
- return isolate->heap()->false_value();
- case ComparisonResult::kUndefined:
- break;
- }
- UNREACHABLE();
+ ComparisonResult result = String::Compare(x, y);
+ DCHECK_NE(result, ComparisonResult::kUndefined);
+ return isolate->heap()->ToBoolean(
+ ComparisonResultToBool(Operation::kLessThan, result));
}
RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
@@ -698,16 +674,10 @@ RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- switch (String::Compare(x, y)) {
- case ComparisonResult::kEqual:
- case ComparisonResult::kLessThan:
- return isolate->heap()->true_value();
- case ComparisonResult::kGreaterThan:
- return isolate->heap()->false_value();
- case ComparisonResult::kUndefined:
- break;
- }
- UNREACHABLE();
+ ComparisonResult result = String::Compare(x, y);
+ DCHECK_NE(result, ComparisonResult::kUndefined);
+ return isolate->heap()->ToBoolean(
+ ComparisonResultToBool(Operation::kLessThanOrEqual, result));
}
RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
@@ -715,16 +685,10 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- switch (String::Compare(x, y)) {
- case ComparisonResult::kGreaterThan:
- return isolate->heap()->true_value();
- case ComparisonResult::kEqual:
- case ComparisonResult::kLessThan:
- return isolate->heap()->false_value();
- case ComparisonResult::kUndefined:
- break;
- }
- UNREACHABLE();
+ ComparisonResult result = String::Compare(x, y);
+ DCHECK_NE(result, ComparisonResult::kUndefined);
+ return isolate->heap()->ToBoolean(
+ ComparisonResultToBool(Operation::kGreaterThan, result));
}
RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
@@ -732,16 +696,10 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- switch (String::Compare(x, y)) {
- case ComparisonResult::kEqual:
- case ComparisonResult::kGreaterThan:
- return isolate->heap()->true_value();
- case ComparisonResult::kLessThan:
- return isolate->heap()->false_value();
- case ComparisonResult::kUndefined:
- break;
- }
- UNREACHABLE();
+ ComparisonResult result = String::Compare(x, y);
+ DCHECK_NE(result, ComparisonResult::kUndefined);
+ return isolate->heap()->ToBoolean(
+ ComparisonResultToBool(Operation::kGreaterThanOrEqual, result));
}
RUNTIME_FUNCTION(Runtime_StringEqual) {
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 19a4af50d1..b3cdf3fe67 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -17,9 +17,11 @@
#include "src/runtime-profiler.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-serialization.h"
namespace {
struct WasmCompileControls {
@@ -127,12 +129,6 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
// If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
- // TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
- if (function->code()->is_turbofanned() &&
- !function->shared()->HasBytecodeArray()) {
- return isolate->heap()->undefined_value();
- }
-
Deoptimizer::DeoptimizeFunction(*function);
return isolate->heap()->undefined_value();
@@ -153,12 +149,6 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
// If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
- // TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
- if (function->code()->is_turbofanned() &&
- !function->shared()->HasBytecodeArray()) {
- return isolate->heap()->undefined_value();
- }
-
Deoptimizer::DeoptimizeFunction(*function);
return isolate->heap()->undefined_value();
@@ -482,7 +472,7 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
// calls an intermediate function, and the intermediate function
// calls exactly one imported function
HandleScope scope(isolate);
- CHECK(args.length() == 2);
+ CHECK_EQ(args.length(), 2);
// It takes two parameters, the first one is the JSFunction,
// The second one is the type
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -492,48 +482,100 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
CONVERT_ARG_HANDLE_CHECKED(Smi, type, 1);
Handle<Code> export_code = handle(function->code());
CHECK(export_code->kind() == Code::JS_TO_WASM_FUNCTION);
- int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ int const mask =
+ RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
+ : RelocInfo::CODE_TARGET);
// check the type of the $export_fct
- Handle<Code> export_fct;
+ wasm::WasmCode* export_fct = nullptr;
+ Handle<Code> export_fct_handle;
+ wasm::WasmCode* intermediate_fct = nullptr;
+ Handle<Code> intermediate_fct_handle;
+
int count = 0;
for (RelocIterator it(*export_code, mask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION) {
- ++count;
- export_fct = handle(target);
+ Address target_address = FLAG_wasm_jit_to_native
+ ? rinfo->js_to_wasm_address()
+ : rinfo->target_address();
+ if (FLAG_wasm_jit_to_native) {
+ wasm::WasmCode* target =
+ isolate->wasm_code_manager()->LookupCode(target_address);
+ if (target->kind() == wasm::WasmCode::Function) {
+ ++count;
+ export_fct = target;
+ }
+ } else {
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == Code::WASM_FUNCTION) {
+ ++count;
+ export_fct_handle = handle(target);
+ }
}
}
- CHECK(count == 1);
+ CHECK_EQ(count, 1);
// check the type of the intermediate_fct
- Handle<Code> intermediate_fct;
count = 0;
- for (RelocIterator it(*export_fct, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION) {
- ++count;
- intermediate_fct = handle(target);
+ if (FLAG_wasm_jit_to_native) {
+ for (RelocIterator it(export_fct->instructions(), export_fct->reloc_info(),
+ export_fct->constant_pool(),
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL));
+ !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ wasm::WasmCode* target =
+ isolate->wasm_code_manager()->LookupCode(target_address);
+ if (target->kind() == wasm::WasmCode::Function) {
+ ++count;
+ intermediate_fct = target;
+ }
+ }
+ } else {
+ count = 0;
+ for (RelocIterator it(*export_fct_handle, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == Code::WASM_FUNCTION) {
+ ++count;
+ intermediate_fct_handle = handle(target);
+ }
}
}
- CHECK(count == 1);
+ CHECK_EQ(count, 1);
// Check the type of the imported exported function, it should be also a wasm
// function in our case.
- Handle<Code> imported_fct;
CHECK(type->value() == 0 || type->value() == 1);
- Code::Kind target_kind =
- type->value() == 0 ? Code::WASM_FUNCTION : Code::WASM_TO_JS_FUNCTION;
count = 0;
- for (RelocIterator it(*intermediate_fct, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == target_kind) {
- ++count;
- imported_fct = handle(target);
+ if (FLAG_wasm_jit_to_native) {
+ wasm::WasmCode::Kind target_kind = type->value() == 0
+ ? wasm::WasmCode::WasmToWasmWrapper
+ : wasm::WasmCode::WasmToJsWrapper;
+ for (RelocIterator it(intermediate_fct->instructions(),
+ intermediate_fct->reloc_info(),
+ intermediate_fct->constant_pool(),
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL));
+ !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ wasm::WasmCode* target =
+ isolate->wasm_code_manager()->LookupCode(target_address);
+ if (target->kind() == target_kind) {
+ ++count;
+ }
+ }
+ } else {
+ Code::Kind target_kind = type->value() == 0 ? Code::WASM_TO_WASM_FUNCTION
+ : Code::WASM_TO_JS_FUNCTION;
+ count = 0;
+ for (RelocIterator it(*intermediate_fct_handle, mask); !it.done();
+ it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == target_kind) {
+ ++count;
+ }
}
}
CHECK_LE(count, 1);
@@ -543,7 +585,7 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
HandleScope scope(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- CHECK(args.length() == 2);
+ CHECK_EQ(args.length(), 2);
CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
WasmCompileControls& ctrl = (*g_PerIsolateWasmControls.Pointer())[v8_isolate];
@@ -556,7 +598,7 @@ RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
RUNTIME_FUNCTION(Runtime_SetWasmInstantiateControls) {
HandleScope scope(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- CHECK(args.length() == 0);
+ CHECK_EQ(args.length(), 0);
v8_isolate->SetWasmInstanceCallback(WasmInstanceOverride);
return isolate->heap()->undefined_value();
}
@@ -650,12 +692,25 @@ RUNTIME_FUNCTION(Runtime_DebugTrace) {
RUNTIME_FUNCTION(Runtime_DebugTrackRetainingPath) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
+ DCHECK_LE(1, args.length());
+ DCHECK_GE(2, args.length());
if (!FLAG_track_retaining_path) {
PrintF("DebugTrackRetainingPath requires --track-retaining-path flag.\n");
} else {
CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
- isolate->heap()->AddRetainingPathTarget(object);
+ RetainingPathOption option = RetainingPathOption::kDefault;
+ if (args.length() == 2) {
+ CONVERT_ARG_HANDLE_CHECKED(String, str, 1);
+ const char track_ephemeral_path[] = "track-ephemeral-path";
+ if (str->IsOneByteEqualTo(STATIC_CHAR_VECTOR(track_ephemeral_path))) {
+ option = RetainingPathOption::kTrackEphemeralPath;
+ } else if (str->length() != 0) {
+ PrintF("Unexpected second argument of DebugTrackRetainingPath.\n");
+ PrintF("Expected an empty string or '%s', got '%s'.\n",
+ track_ephemeral_path, str->ToCString().get());
+ }
+ }
+ isolate->heap()->AddRetainingPathTarget(object, option);
}
return isolate->heap()->undefined_value();
}
@@ -802,11 +857,11 @@ RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
key = factory->NewStringFromAsciiChecked("start_pos");
value = handle(Smi::FromInt(message_obj->start_position()), isolate);
- JSObject::SetProperty(message, key, value, STRICT).Assert();
+ JSObject::SetProperty(message, key, value, LanguageMode::kStrict).Assert();
key = factory->NewStringFromAsciiChecked("end_pos");
value = handle(Smi::FromInt(message_obj->end_position()), isolate);
- JSObject::SetProperty(message, key, value, STRICT).Assert();
+ JSObject::SetProperty(message, key, value, LanguageMode::kStrict).Assert();
return *message;
}
@@ -855,12 +910,18 @@ RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
DCHECK_EQ(1, args.length());
CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- if (flag) {
- v8_isolate->SetAllowCodeGenerationFromStringsCallback(
- DisallowCodegenFromStringsCallback);
- } else {
- v8_isolate->SetAllowCodeGenerationFromStringsCallback(nullptr);
- }
+ v8_isolate->SetAllowCodeGenerationFromStringsCallback(
+ flag ? DisallowCodegenFromStringsCallback : nullptr);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DisallowWasmCodegen) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8_isolate->SetAllowWasmCodeGenerationCallback(
+ flag ? DisallowCodegenFromStringsCallback : nullptr);
return isolate->heap()->undefined_value();
}
@@ -932,13 +993,24 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
Handle<WasmCompiledModule> orig(module_obj->compiled_module());
- std::unique_ptr<ScriptData> data =
- WasmCompiledModuleSerializer::SerializeWasmModule(isolate, orig);
- void* buff = isolate->array_buffer_allocator()->Allocate(data->length());
- Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(ret, isolate, false, buff, data->length());
- memcpy(buff, data->data(), data->length());
- return *ret;
+ if (FLAG_wasm_jit_to_native) {
+ std::pair<std::unique_ptr<byte[]>, size_t> serialized_module =
+ wasm::NativeModuleSerializer::SerializeWholeModule(isolate, orig);
+ int data_size = static_cast<int>(serialized_module.second);
+ void* buff = isolate->array_buffer_allocator()->Allocate(data_size);
+ Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(ret, isolate, false, buff, data_size);
+ memcpy(buff, serialized_module.first.get(), data_size);
+ return *ret;
+ } else {
+ std::unique_ptr<ScriptData> data =
+ WasmCompiledModuleSerializer::SerializeWasmModule(isolate, orig);
+ void* buff = isolate->array_buffer_allocator()->Allocate(data->length());
+ Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(ret, isolate, false, buff, data->length());
+ memcpy(buff, data->data(), data->length());
+ return *ret;
+ }
}
// Take an array buffer and attempt to reconstruct a compiled wasm module.
@@ -950,22 +1022,31 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, wire_bytes, 1);
Address mem_start = static_cast<Address>(buffer->backing_store());
- int mem_size = static_cast<int>(buffer->byte_length()->Number());
+ size_t mem_size = static_cast<size_t>(buffer->byte_length()->Number());
// DeserializeWasmModule will allocate. We assume JSArrayBuffer doesn't
// get relocated.
- ScriptData sc(mem_start, mem_size);
bool already_external = wire_bytes->is_external();
if (!already_external) {
wire_bytes->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*wire_bytes);
}
- MaybeHandle<FixedArray> maybe_compiled_module =
- WasmCompiledModuleSerializer::DeserializeWasmModule(
- isolate, &sc,
- Vector<const uint8_t>(
- reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
- static_cast<int>(wire_bytes->byte_length()->Number())));
+ MaybeHandle<FixedArray> maybe_compiled_module;
+ if (FLAG_wasm_jit_to_native) {
+ maybe_compiled_module =
+ wasm::NativeModuleDeserializer::DeserializeFullBuffer(
+ isolate, {mem_start, mem_size},
+ Vector<const uint8_t>(
+ reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
+ static_cast<int>(wire_bytes->byte_length()->Number())));
+ } else {
+ ScriptData sc(mem_start, static_cast<int>(mem_size));
+ maybe_compiled_module = WasmCompiledModuleSerializer::DeserializeWasmModule(
+ isolate, &sc,
+ Vector<const uint8_t>(
+ reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
+ static_cast<int>(wire_bytes->byte_length()->Number())));
+ }
if (!already_external) {
wire_bytes->set_is_external(false);
isolate->heap()->RegisterNewArrayBuffer(*wire_bytes);
@@ -1058,8 +1139,10 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
uint32_t addr = (static_cast<uint32_t>(addr_low) & 0xffff) |
(static_cast<uint32_t>(addr_high) << 16);
- uint8_t* mem_start = reinterpret_cast<uint8_t*>(
- frame->wasm_instance()->memory_buffer()->allocation_base());
+ uint8_t* mem_start = reinterpret_cast<uint8_t*>(frame->wasm_instance()
+ ->memory_object()
+ ->array_buffer()
+ ->allocation_base());
int func_index = frame->function_index();
int pos = frame->position();
// TODO(titzer): eliminate dependency on WasmModule definition here.
@@ -1071,6 +1154,22 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
+ WasmCodeWrapper wrapper =
+ WasmExportedFunction::cast(*function)->GetWasmCode();
+ if (!wrapper.IsCodeObject()) {
+ const wasm::WasmCode* wasm_code = wrapper.GetWasmCode();
+ return isolate->heap()->ToBoolean(wasm_code->is_liftoff());
+ } else {
+ Handle<Code> wasm_code = wrapper.GetCode();
+ return isolate->heap()->ToBoolean(!wasm_code->is_turbofanned());
+ }
+}
+
RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -1081,6 +1180,5 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
return isolate->heap()->undefined_value();
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 8dfa8f166c..5820c4b6a4 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -36,8 +36,8 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
if (!array_buffer->is_neuterable()) {
return isolate->heap()->undefined_value();
}
- if (array_buffer->backing_store() == NULL) {
- CHECK(Smi::kZero == array_buffer->byte_length());
+ if (array_buffer->backing_store() == nullptr) {
+ CHECK_EQ(Smi::kZero, array_buffer->byte_length());
return isolate->heap()->undefined_value();
}
// Shared array buffers should never be neutered.
@@ -200,7 +200,7 @@ RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
RUNTIME_FUNCTION(Runtime_TypedArraySpeciesCreateByLength) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(args.length(), 2);
Handle<JSTypedArray> exemplar = args.at<JSTypedArray>(0);
Handle<Object> length = args.at(1);
int argc = 1;
@@ -230,8 +230,8 @@ Object* TypedArraySetFromOverlapping(Isolate* isolate,
size_t source_byte_length = NumberToSize(source->byte_length());
size_t target_byte_length = NumberToSize(target->byte_length());
- CHECK_LE(offset + source->length(), target->length());
- CHECK_GE(target->length(), source->length());
+ CHECK_LE(offset, target->length_value());
+ CHECK_LE(source->length_value(), target->length_value() - offset);
CHECK(source->length()->IsSmi());
CHECK(!target->WasNeutered());
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 4218510a26..4a80ff5d40 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -94,9 +94,9 @@ namespace internal {
// Cast the given argument to PropertyAttributes and store its value in a
// variable with the given name. If the argument is not a Smi or the
// enum value is out of range, we crash safely.
-#define CONVERT_PROPERTY_ATTRIBUTES_CHECKED(name, index) \
- CHECK(args[index]->IsSmi()); \
- CHECK((args.smi_at(index) & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); \
+#define CONVERT_PROPERTY_ATTRIBUTES_CHECKED(name, index) \
+ CHECK(args[index]->IsSmi()); \
+ CHECK_EQ(args.smi_at(index) & ~(READ_ONLY | DONT_ENUM | DONT_DELETE), 0); \
PropertyAttributes name = static_cast<PropertyAttributes>(args.smi_at(index));
// A mechanism to return a pair of Object pointers in registers (if possible).
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 8ed4e7c57d..e8aef3fa97 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -16,6 +16,7 @@
#include "src/trap-handler/trap-handler.h"
#include "src/v8memory.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
@@ -29,18 +30,41 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
Address pc =
Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
- Code* code = isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
- DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
- WasmInstanceObject* owning_instance =
- WasmInstanceObject::GetOwningInstance(code);
+ WasmInstanceObject* owning_instance = nullptr;
+ if (FLAG_wasm_jit_to_native) {
+ owning_instance = WasmInstanceObject::GetOwningInstance(
+ isolate->wasm_code_manager()->LookupCode(pc));
+ } else {
+ owning_instance = WasmInstanceObject::GetOwningInstanceGC(
+ isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code);
+ }
CHECK_NOT_NULL(owning_instance);
return owning_instance;
}
+
Context* GetWasmContextOnStackTop(Isolate* isolate) {
return GetWasmInstanceOnStackTop(isolate)
->compiled_module()
->ptr_to_native_context();
}
+
+class ClearThreadInWasmScope {
+ public:
+ explicit ClearThreadInWasmScope(bool coming_from_wasm)
+ : coming_from_wasm_(coming_from_wasm) {
+ DCHECK_EQ(trap_handler::UseTrapHandler() && coming_from_wasm,
+ trap_handler::IsThreadInWasm());
+ if (coming_from_wasm) trap_handler::ClearThreadInWasm();
+ }
+ ~ClearThreadInWasmScope() {
+ DCHECK(!trap_handler::IsThreadInWasm());
+ if (coming_from_wasm_) trap_handler::SetThreadInWasm();
+ }
+
+ private:
+ const bool coming_from_wasm_;
+};
+
} // namespace
RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
@@ -50,6 +74,9 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
isolate);
+ // This runtime function is always being called from wasm code.
+ ClearThreadInWasmScope flag_scope(true);
+
// Set the current isolate's context.
DCHECK_NULL(isolate->context());
isolate->set_context(instance->compiled_module()->ptr_to_native_context());
@@ -58,68 +85,19 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
WasmInstanceObject::GrowMemory(isolate, instance, delta_pages));
}
-Object* ThrowRuntimeError(Isolate* isolate, int message_id, int byte_offset,
- bool patch_source_position) {
+RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ ClearThreadInWasmScope clear_wasm_flag(isolate->context() == nullptr);
+
HandleScope scope(isolate);
DCHECK_NULL(isolate->context());
isolate->set_context(GetWasmContextOnStackTop(isolate));
Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
static_cast<MessageTemplate::Template>(message_id));
-
- if (!patch_source_position) {
- return isolate->Throw(*error_obj);
- }
-
- // For wasm traps, the byte offset (a.k.a source position) can not be
- // determined from relocation info, since the explicit checks for traps
- // converge in one singe block which calls this runtime function.
- // We hence pass the byte offset explicitely, and patch it into the top-most
- // frame (a wasm frame) on the collected stack trace.
- // TODO(wasm): This implementation is temporary, see bug #5007:
- // https://bugs.chromium.org/p/v8/issues/detail?id=5007
- Handle<JSObject> error = Handle<JSObject>::cast(error_obj);
- Handle<Object> stack_trace_obj = JSReceiver::GetDataProperty(
- error, isolate->factory()->stack_trace_symbol());
- // Patch the stack trace (array of <receiver, function, code, position>).
- if (stack_trace_obj->IsJSArray()) {
- Handle<FrameArray> stack_elements(
- FrameArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
- DCHECK(stack_elements->Code(0)->kind() == AbstractCode::WASM_FUNCTION);
- DCHECK_LE(0, stack_elements->Offset(0)->value());
- stack_elements->SetOffset(0, Smi::FromInt(-1 - byte_offset));
- }
-
- // Patch the detailed stack trace (array of JSObjects with various
- // properties).
- Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
- error, isolate->factory()->detailed_stack_trace_symbol());
- if (detailed_stack_trace_obj->IsFixedArray()) {
- Handle<FixedArray> stack_elements(
- FixedArray::cast(*detailed_stack_trace_obj));
- DCHECK_GE(stack_elements->length(), 1);
- Handle<StackFrameInfo> top_frame(
- StackFrameInfo::cast(stack_elements->get(0)));
- if (top_frame->column_number()) {
- top_frame->set_column_number(byte_offset + 1);
- }
- }
-
return isolate->Throw(*error_obj);
}
-RUNTIME_FUNCTION(Runtime_ThrowWasmErrorFromTrapIf) {
- DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(message_id, 0);
- return ThrowRuntimeError(isolate, message_id, 0, false);
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(message_id, 0);
- CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
- return ThrowRuntimeError(isolate, message_id, byte_offset, true);
-}
-
RUNTIME_FUNCTION(Runtime_ThrowWasmStackOverflow) {
SealHandleScope shs(isolate);
DCHECK_LE(0, args.length());
@@ -149,7 +127,7 @@ RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
CHECK(!JSReceiver::SetProperty(exception,
isolate->factory()->InternalizeUtf8String(
wasm::WasmException::kRuntimeIdStr),
- id, STRICT)
+ id, LanguageMode::kStrict)
.is_null());
CONVERT_SMI_ARG_CHECKED(size, 1);
Handle<JSTypedArray> values =
@@ -157,7 +135,7 @@ RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
CHECK(!JSReceiver::SetProperty(exception,
isolate->factory()->InternalizeUtf8String(
wasm::WasmException::kRuntimeValuesStr),
- values, STRICT)
+ values, LanguageMode::kStrict)
.is_null());
return isolate->heap()->undefined_value();
}
@@ -253,11 +231,11 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionSetElement) {
}
RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[1]);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 2);
+ CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[0]);
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 1);
+ Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate));
// The arg buffer is the raw pointer to the caller's stack. It looks like a
// Smi (lowest bit not set, as checked by IsSmi), but is no valid Smi. We just
@@ -266,6 +244,8 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
CHECK(arg_buffer_obj->IsSmi());
uint8_t* arg_buffer = reinterpret_cast<uint8_t*>(*arg_buffer_obj);
+ ClearThreadInWasmScope wasm_flag(true);
+
// Set the current isolate's context.
DCHECK_NULL(isolate->context());
isolate->set_context(instance->compiled_module()->ptr_to_native_context());
@@ -297,11 +277,7 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
DCHECK_EQ(0, args.length());
DCHECK(!trap_handler::UseTrapHandler() || trap_handler::IsThreadInWasm());
- struct ClearAndRestoreThreadInWasm {
- ClearAndRestoreThreadInWasm() { trap_handler::ClearThreadInWasm(); }
-
- ~ClearAndRestoreThreadInWasm() { trap_handler::SetThreadInWasm(); }
- } restore_thread_in_wasm;
+ ClearThreadInWasmScope wasm_flag(true);
// Set the current isolate's context.
DCHECK_NULL(isolate->context());
@@ -318,7 +294,15 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
DCHECK_EQ(0, args.length());
HandleScope scope(isolate);
- return *wasm::CompileLazy(isolate);
+ if (FLAG_wasm_jit_to_native) {
+ Address new_func = wasm::CompileLazy(isolate);
+ // The alternative to this is having 2 lazy compile builtins. The builtins
+ // are part of the snapshot, so the flag has no impact on the codegen there.
+ return reinterpret_cast<Object*>(new_func - Code::kHeaderSize +
+ kHeapObjectTag);
+ } else {
+ return *wasm::CompileLazyOnGCHeap(isolate);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index e6fa5a19cf..26880cdafa 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -108,7 +108,7 @@ const Runtime::Function* Runtime::FunctionForName(const unsigned char* name,
if (entry) {
return reinterpret_cast<Function*>(entry->value);
}
- return NULL;
+ return nullptr;
}
@@ -118,7 +118,7 @@ const Runtime::Function* Runtime::FunctionForEntry(Address entry) {
return &(kIntrinsicFunctions[i]);
}
}
- return NULL;
+ return nullptr;
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index a11d274d25..da16ee5fc8 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -36,22 +36,23 @@ namespace internal {
// A variable number of arguments is specified by a -1, additional restrictions
// are specified by inline comments
-#define FOR_EACH_INTRINSIC_ARRAY(F) \
- F(TransitionElementsKind, 2, 1) \
- F(RemoveArrayHoles, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(GetArrayKeys, 2, 1) \
- F(NewArray, -1 /* >= 3 */, 1) \
- F(FunctionBind, -1, 1) \
- F(NormalizeElements, 1, 1) \
- F(GrowArrayElements, 2, 1) \
- F(HasComplexElements, 1, 1) \
- F(IsArray, 1, 1) \
- F(ArrayIsArray, 1, 1) \
- F(ArraySpeciesConstructor, 1, 1) \
- F(ArrayIncludes_Slow, 3, 1) \
- F(ArrayIndexOf, 3, 1) \
+#define FOR_EACH_INTRINSIC_ARRAY(F) \
+ F(TransitionElementsKind, 2, 1) \
+ F(RemoveArrayHoles, 2, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(TrySliceSimpleNonFastElements, 3, 1) \
+ F(NewArray, -1 /* >= 3 */, 1) \
+ F(FunctionBind, -1, 1) \
+ F(NormalizeElements, 1, 1) \
+ F(GrowArrayElements, 2, 1) \
+ F(HasComplexElements, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(ArrayIsArray, 1, 1) \
+ F(ArraySpeciesConstructor, 1, 1) \
+ F(ArrayIncludes_Slow, 3, 1) \
+ F(ArrayIndexOf, 3, 1) \
F(SpreadIterablePrepare, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
@@ -69,27 +70,31 @@ namespace internal {
F(SetAllowAtomicsWait, 1, 1)
#define FOR_EACH_INTRINSIC_BIGINT(F) \
- F(BigIntEqual, 2, 1) \
+ F(BigIntBinaryOp, 3, 1) \
+ F(BigIntCompareToBigInt, 3, 1) \
+ F(BigIntCompareToNumber, 3, 1) \
+ F(BigIntEqualToBigInt, 2, 1) \
+ F(BigIntEqualToNumber, 2, 1) \
+ F(BigIntEqualToString, 2, 1) \
F(BigIntToBoolean, 1, 1) \
- F(BigIntBinaryOp, 3, 1)
-
-#define FOR_EACH_INTRINSIC_CLASSES(F) \
- F(ThrowUnsupportedSuperError, 0, 1) \
- F(ThrowConstructorNonCallableError, 1, 1) \
- F(ThrowStaticPrototypeError, 0, 1) \
- F(ThrowSuperAlreadyCalledError, 0, 1) \
- F(ThrowSuperNotCalled, 0, 1) \
- F(ThrowNotSuperConstructor, 2, 1) \
- F(HomeObjectSymbol, 0, 1) \
- F(DefineClass, 4, 1) \
- F(InstallClassNameAccessor, 1, 1) \
- F(InstallClassNameAccessorWithCheck, 1, 1) \
- F(LoadFromSuper, 3, 1) \
- F(LoadKeyedFromSuper, 3, 1) \
- F(StoreToSuper_Strict, 4, 1) \
- F(StoreToSuper_Sloppy, 4, 1) \
- F(StoreKeyedToSuper_Strict, 4, 1) \
- F(StoreKeyedToSuper_Sloppy, 4, 1) \
+ F(BigIntToNumber, 1, 1) \
+ F(BigIntUnaryOp, 2, 1)
+
+#define FOR_EACH_INTRINSIC_CLASSES(F) \
+ F(ThrowUnsupportedSuperError, 0, 1) \
+ F(ThrowConstructorNonCallableError, 1, 1) \
+ F(ThrowStaticPrototypeError, 0, 1) \
+ F(ThrowSuperAlreadyCalledError, 0, 1) \
+ F(ThrowSuperNotCalled, 0, 1) \
+ F(ThrowNotSuperConstructor, 2, 1) \
+ F(HomeObjectSymbol, 0, 1) \
+ F(DefineClass, -1 /* >= 3 */, 1) \
+ F(LoadFromSuper, 3, 1) \
+ F(LoadKeyedFromSuper, 3, 1) \
+ F(StoreToSuper_Strict, 4, 1) \
+ F(StoreToSuper_Sloppy, 4, 1) \
+ F(StoreKeyedToSuper_Strict, 4, 1) \
+ F(StoreKeyedToSuper_Sloppy, 4, 1) \
F(GetSuperConstructor, 1, 1)
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
@@ -117,7 +122,6 @@ namespace internal {
F(CompileOptimized_Concurrent, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
F(EvictOptimizedCodeSlot, 1, 1) \
- F(NotifyStubFailure, 0, 1) \
F(NotifyDeoptimized, 0, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(ResolvePossiblyDirectEval, 6, 1) \
@@ -130,7 +134,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_DEBUG(F) \
F(HandleDebuggerStatement, 0, 1) \
- F(DebugBreakOnBytecode, 1, 1) \
F(SetDebugEventListener, 2, 1) \
F(ScheduleBreak, 0, 1) \
F(DebugGetInternalProperties, 1, 1) \
@@ -186,7 +189,6 @@ namespace internal {
F(DebugPushPromise, 1, 1) \
F(DebugPopPromise, 0, 1) \
F(DebugPromiseReject, 2, 1) \
- F(DebugAsyncEventEnqueueRecurring, 2, 1) \
F(DebugAsyncFunctionPromiseCreated, 1, 1) \
F(DebugIsActive, 0, 1) \
F(DebugBreakInOptimizedCode, 0, 1) \
@@ -209,8 +211,17 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F)
#endif
-#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
- FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
+#ifdef V8_TRACE_FEEDBACK_UPDATES
+#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F) \
+ F(InterpreterTraceUpdateFeedback, 3, 1)
+#else
+#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F)
+#endif
+
+#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
+ FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
+ FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F) \
+ F(InterpreterDeserializeLazy, 2, 1) \
F(InterpreterNewClosure, 4, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
@@ -221,13 +232,11 @@ namespace internal {
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetContextData, 1, 1) \
F(FunctionSetLength, 2, 1) \
- F(FunctionSetPrototype, 2, 1) \
F(FunctionIsAPIFunction, 1, 1) \
F(SetCode, 2, 1) \
F(SetNativeFlag, 1, 1) \
F(IsConstructor, 1, 1) \
F(Call, -1 /* >= 2 */, 1) \
- F(ConvertReceiver, 1, 1) \
F(IsFunction, 1, 1) \
F(FunctionToString, 1, 1)
@@ -354,6 +363,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_MODULE(F) \
F(DynamicImportCall, 2, 1) \
+ F(GetImportMetaObject, 0, 1) \
F(GetModuleNamespace, 1, 1) \
F(LoadModuleVariable, 1, 1) \
F(StoreModuleVariable, 2, 1)
@@ -394,7 +404,7 @@ namespace internal {
F(ToFastProperties, 1, 1) \
F(AllocateHeapNumber, 0, 1) \
F(NewObject, 2, 1) \
- F(FinalizeInstanceSize, 1, 1) \
+ F(CompleteInobjectSlackTrackingForMap, 1, 1) \
F(LoadMutableDouble, 2, 1) \
F(TryMigrateInstance, 1, 1) \
F(IsJSGlobalProxy, 1, 1) \
@@ -416,13 +426,13 @@ namespace internal {
F(ToPrimitive, 1, 1) \
F(ToPrimitive_Number, 1, 1) \
F(ToNumber, 1, 1) \
+ F(ToNumeric, 1, 1) \
F(ToInteger, 1, 1) \
F(ToLength, 1, 1) \
F(ToString, 1, 1) \
F(ToName, 1, 1) \
F(SameValue, 2, 1) \
F(SameValueZero, 2, 1) \
- F(Compare, 3, 1) \
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
F(CreateDataProperty, 3, 1) \
@@ -478,7 +488,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_REGEXP(F) \
F(IsRegExp, 1, 1) \
- F(RegExpCreate, 1, 1) \
F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpExecReThrow, 0, 1) \
@@ -486,7 +495,6 @@ namespace internal {
F(RegExpInternalReplace, 3, 1) \
F(RegExpReplace, 3, 1) \
F(RegExpSplit, 3, 1) \
- F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
F(StringSplit, 3, 1)
@@ -528,7 +536,6 @@ namespace internal {
F(StringAdd, 2, 1) \
F(InternalizeString, 1, 1) \
F(StringCharCodeAt, 2, 1) \
- F(StringCompare, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
F(SparseJoinWithSeparator, 3, 1) \
@@ -572,7 +579,7 @@ namespace internal {
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(DebugPrint, 1, 1) \
F(DebugTrace, 0, 1) \
- F(DebugTrackRetainingPath, 1, 1) \
+ F(DebugTrackRetainingPath, -1, 1) \
F(PrintWithNameForAssert, 2, 1) \
F(GetExceptionDetails, 1, 1) \
F(GlobalPrint, 1, 1) \
@@ -613,6 +620,7 @@ namespace internal {
F(IsWasmTrapHandlerEnabled, 0, 1) \
F(GetWasmRecoveredTrapCount, 0, 1) \
F(DisallowCodegenFromStrings, 1, 1) \
+ F(DisallowWasmCodegen, 1, 1) \
F(ValidateWasmInstancesChain, 2, 1) \
F(ValidateWasmModuleState, 1, 1) \
F(ValidateWasmOrphanedInstance, 1, 1) \
@@ -622,7 +630,8 @@ namespace internal {
F(WasmNumInterpretedCalls, 1, 1) \
F(RedirectToWasmInterpreter, 2, 1) \
F(WasmTraceMemory, 4, 1) \
- F(CompleteInobjectSlackTracking, 1, 1)
+ F(CompleteInobjectSlackTracking, 1, 1) \
+ F(IsLiftoffFunction, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
@@ -643,8 +652,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_WASM(F) \
F(WasmGrowMemory, 1, 1) \
- F(ThrowWasmError, 2, 1) \
- F(ThrowWasmErrorFromTrapIf, 1, 1) \
+ F(ThrowWasmError, 1, 1) \
F(ThrowWasmStackOverflow, 0, 1) \
F(WasmThrowTypeError, 0, 1) \
F(WasmThrowCreate, 2, 1) \
@@ -652,12 +660,13 @@ namespace internal {
F(WasmGetExceptionRuntimeId, 0, 1) \
F(WasmExceptionSetElement, 2, 1) \
F(WasmExceptionGetElement, 1, 1) \
- F(WasmRunInterpreter, 3, 1) \
+ F(WasmRunInterpreter, 2, 1) \
F(WasmStackGuard, 0, 1) \
F(WasmCompileLazy, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
- F(LoadLookupSlotForCall, 1, 2)
+ F(LoadLookupSlotForCall, 1, 2) \
+ F(DebugBreakOnBytecode, 1, 2)
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
@@ -672,6 +681,7 @@ namespace internal {
F(LoadIC_Miss, 4, 1) \
F(LoadPropertyWithInterceptor, 5, 1) \
F(StoreCallbackProperty, 6, 1) \
+ F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
F(StorePropertyWithInterceptor, 5, 1) \
F(Unreachable, 0, 1)
diff --git a/deps/v8/src/s390/OWNERS b/deps/v8/src/s390/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/s390/OWNERS
+++ b/deps/v8/src/s390/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index bd364b33d5..d8d7ce4256 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -92,12 +92,12 @@ Address RelocInfo::target_internal_reference_address() {
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
@@ -119,14 +119,14 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -174,7 +174,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -205,14 +205,14 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, host_, NULL,
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
@@ -300,7 +300,7 @@ void Assembler::deserialization_set_special_target_at(
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- Code* code = NULL;
+ Code* code = nullptr;
set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index d33fc7144a..70701beb72 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -227,7 +227,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
}
void CpuFeatures::PrintTarget() {
- const char* s390_arch = NULL;
+ const char* s390_arch = nullptr;
#if V8_TARGET_ARCH_S390X
s390_arch = "s390x";
@@ -290,6 +290,17 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
reinterpret_cast<Address>(size), flush_mode);
}
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ set_embedded_address(isolate, address, icache_flush_mode);
+}
+
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return embedded_address();
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-s390-inl.h for inlined constructors
@@ -324,7 +335,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
- set_target_address_at(nullptr, pc, static_cast<Address>(NULL),
+ set_target_address_at(nullptr, pc, static_cast<Address>(nullptr),
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
break;
@@ -526,7 +537,7 @@ void Assembler::next(Label* L) {
if (link == kEndOfChain) {
L->Unuse();
} else {
- DCHECK(link >= 0);
+ DCHECK_GE(link, 0);
L->link_to(link);
}
}
@@ -1697,7 +1708,7 @@ void Assembler::sllg(Register r1, Register r3, const Operand& opnd) {
// Shift Left Double Logical (64)
void Assembler::sldl(Register r1, Register b2, const Operand& opnd) {
- DCHECK(r1.code() % 2 == 0);
+ DCHECK_EQ(r1.code() % 2, 0);
rs_form(SLDL, r1, r0, b2, opnd.immediate());
}
@@ -1709,13 +1720,13 @@ void Assembler::srl(Register r1, Register opnd) {
// Shift Right Double Arith (64)
void Assembler::srda(Register r1, Register b2, const Operand& opnd) {
- DCHECK(r1.code() % 2 == 0);
+ DCHECK_EQ(r1.code() % 2, 0);
rs_form(SRDA, r1, r0, b2, opnd.immediate());
}
// Shift Right Double Logical (64)
void Assembler::srdl(Register r1, Register b2, const Operand& opnd) {
- DCHECK(r1.code() % 2 == 0);
+ DCHECK_EQ(r1.code() % 2, 0);
rs_form(SRDL, r1, r0, b2, opnd.immediate());
}
@@ -1813,13 +1824,13 @@ void Assembler::srag(Register r1, Register r3, const Operand& opnd) {
// Shift Right Double
void Assembler::srda(Register r1, const Operand& opnd) {
- DCHECK(r1.code() % 2 == 0);
+ DCHECK_EQ(r1.code() % 2, 0);
rs_form(SRDA, r1, r0, r0, opnd.immediate());
}
// Shift Right Double Logical
void Assembler::srdl(Register r1, const Operand& opnd) {
- DCHECK(r1.code() % 2 == 0);
+ DCHECK_EQ(r1.code() % 2, 0);
rs_form(SRDL, r1, r0, r0, opnd.immediate());
}
@@ -2202,7 +2213,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
- Code* code = NULL;
+ Code* code = nullptr;
RelocInfo rinfo(pc, rmode, it->data(), code);
// Fix up internal references now that they are guaranteed to be bound.
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index f022b41072..e9863197a7 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -358,7 +358,7 @@ struct Mask {
uint8_t mask;
uint8_t value() { return mask; }
static Mask from_value(uint8_t input) {
- DCHECK(input <= 0x0F);
+ DCHECK_LE(input, 0x0F);
Mask m = {input};
return m;
}
@@ -496,14 +496,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -623,7 +624,7 @@ class Assembler : public AssemblerBase {
template <class T, int size, int lo, int hi>
inline T getfield(T value) {
DCHECK(lo < hi);
- DCHECK(size > 0);
+ DCHECK_GT(size, 0);
int mask = hi - lo;
int shift = size * 8 - hi;
uint32_t mask_value = (mask == 32) ? 0xffffffff : (1 << mask) - 1;
@@ -1592,7 +1593,6 @@ class Assembler : public AssemblerBase {
friend class RegExpMacroAssemblerS390;
friend class RelocInfo;
- friend class CodePatcher;
std::vector<Handle<Code>> code_targets_;
friend class EnsureSpace;
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 753dd2b77a..d33d09c657 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -9,10 +9,8 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/frame-constants.h"
#include "src/frames.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -38,44 +36,37 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
- Register input_reg = source();
Register result_reg = destination();
- DCHECK(is_truncating());
-
- int double_offset = offset();
// Immediate values for this stub fit in instructions, so it's safe to use ip.
- Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
- Register scratch_low =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
Register scratch_high =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low);
DoubleRegister double_scratch = kScratchDoubleReg;
__ push(scratch);
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += kPointerSize;
+ // Account for saved regs.
+ int argument_offset = 1 * kPointerSize;
- if (!skip_fastpath()) {
- // Load double input.
- __ LoadDouble(double_scratch, MemOperand(input_reg, double_offset));
+ // Load double input.
+ __ LoadDouble(double_scratch, MemOperand(sp, argument_offset));
- // Do fast-path convert from double to int.
- __ ConvertDoubleToInt64(result_reg, double_scratch);
+ // Do fast-path convert from double to int.
+ __ ConvertDoubleToInt64(result_reg, double_scratch);
- // Test for overflow
- __ TestIfInt32(result_reg);
- __ beq(&fastpath_done, Label::kNear);
- }
+ // Test for overflow
+ __ TestIfInt32(result_reg);
+ __ beq(&fastpath_done, Label::kNear);
__ Push(scratch_high, scratch_low);
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += 2 * kPointerSize;
+ // Account for saved regs.
+ argument_offset += 2 * kPointerSize;
__ LoadlW(scratch_high,
- MemOperand(input_reg, double_offset + Register::kExponentOffset));
+ MemOperand(sp, argument_offset + Register::kExponentOffset));
__ LoadlW(scratch_low,
- MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ MemOperand(sp, argument_offset + Register::kMantissaOffset));
__ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
// Load scratch with exponent - 1. This is faster than loading
@@ -151,40 +142,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ MultiPush(kJSCallerSaved | r14.bit());
- if (save_doubles()) {
- __ MultiPushDoubles(kCallerSavedDoubles);
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = r3;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- if (save_doubles()) {
- __ MultiPopDoubles(kCallerSavedDoubles);
- }
- __ MultiPop(kJSCallerSaved | r14.bit());
- __ Ret();
-}
-
-void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ PushSafepointRegisters();
- __ b(r14);
-}
-
-void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ PopSafepointRegisters();
- __ b(r14);
-}
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == r4);
@@ -292,31 +249,17 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-bool CEntryStub::NeedsImmovableCode() { return true; }
+Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreRegistersStateStub::GenerateAheadOfTime(isolate);
- RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
-void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- StoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- RestoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
- StoreBufferOverflowStub(isolate, mode).GetCode();
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -448,7 +391,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
? no_reg
// r6: still holds argc (callee-saved).
: r6;
- __ LeaveExitFrame(save_doubles(), argc, true);
+ __ LeaveExitFrame(save_doubles(), argc);
__ b(r14);
// Handling of exception.
@@ -456,10 +399,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -495,13 +436,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&skip);
// Compute the handler entry address and jump to it.
- __ mov(r3, Operand(pending_handler_code_address));
+ __ mov(r3, Operand(pending_handler_entrypoint_address));
__ LoadP(r3, MemOperand(r3));
- __ mov(r4, Operand(pending_handler_offset_address));
- __ LoadP(r4, MemOperand(r4));
- __ AddP(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
- __ AddP(ip, r3, r4);
- __ Jump(ip);
+ __ Jump(r3);
}
void JSEntryStub::Generate(MacroAssembler* masm) {
@@ -684,110 +621,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ b(r14);
}
-void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
- __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ CmpP(length, scratch2);
- __ beq(&check_zero_length);
- __ bind(&strings_not_equal);
- __ LoadSmiLiteral(r2, Smi::FromInt(NOT_EQUAL));
- __ Ret();
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ CmpP(length, Operand::Zero());
- __ bne(&compare_chars);
- __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
- __ Ret();
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal);
-
- // Characters are equal.
- __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
- __ Ret();
-}
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- Label skip, result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ SubP(scratch3, scratch1, scratch2 /*, LeaveOE, SetRC*/);
- // Removing RC looks okay here.
- Register length_delta = scratch3;
- __ ble(&skip, Label::kNear);
- __ LoadRR(scratch1, scratch2);
- __ bind(&skip);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ CmpP(min_length, Operand::Zero());
- __ beq(&compare_lengths);
-
- // Compare loop.
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ LoadRR(r2, length_delta);
- __ CmpP(length_delta, Operand::Zero());
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- Label less_equal, equal;
- __ ble(&less_equal);
- __ LoadSmiLiteral(r2, Smi::FromInt(GREATER));
- __ Ret();
- __ bind(&less_equal);
- __ beq(&equal);
- __ LoadSmiLiteral(r2, Smi::FromInt(LESS));
- __ bind(&equal);
- __ Ret();
-}
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ AddP(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ AddP(left, scratch1);
- __ AddP(right, scratch1);
- __ LoadComplementRR(length, length);
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ LoadlB(scratch1, MemOperand(left, index));
- __ LoadlB(r0, MemOperand(right, index));
- __ CmpP(scratch1, r0);
- __ bne(chars_not_equal);
- __ AddP(index, Operand(1));
- __ CmpP(index, Operand::Zero());
- __ bne(&loop);
-}
-
// This stub is paired with DirectCEntryStub::GenerateCall
void DirectCEntryStub::Generate(MacroAssembler* masm) {
__ CleanseP(r14);
@@ -809,398 +642,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
__ call(GetCode(), RelocInfo::CODE_TARGET); // Call the stub.
}
-void NameDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm, Label* miss, Label* done, Register receiver,
- Register properties, Handle<Name> name, Register scratch0) {
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
- __ SubP(index, Operand(1));
- __ LoadSmiLiteral(
- ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
- __ AndP(index, ip);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ ShiftLeftP(ip, index, Operand(1));
- __ AddP(index, ip); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- Register tmp = properties;
- __ SmiToPtrArrayOffset(ip, index);
- __ AddP(tmp, properties, ip);
- __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- DCHECK(tmp != entity_name);
- __ CompareRoot(entity_name, Heap::kUndefinedValueRootIndex);
- __ beq(done);
-
- // Stop if found the property.
- __ CmpP(entity_name, Operand(Handle<Name>(name)));
- __ beq(miss);
-
- Label good;
- __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
- __ beq(&good);
-
- // Check if the entry name is not a unique name.
- __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ LoadlB(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
- __ bind(&good);
-
- // Restore the properties.
- __ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- }
-
- const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
- r4.bit() | r3.bit() | r2.bit());
-
- __ LoadRR(r0, r14);
- __ MultiPush(spill_mask);
-
- __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- __ mov(r3, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ CmpP(r2, Operand::Zero());
-
- __ MultiPop(spill_mask); // MultiPop does not touch condition flags
- __ LoadRR(r14, r0);
-
- __ beq(done);
- __ bne(miss);
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: NameDictionary to probe
- // r3: key
- // dictionary: NameDictionary to probe.
- // index: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = r2;
- Register dictionary = r2;
- Register key = r3;
- Register index = r4;
- Register mask = r5;
- Register hash = r6;
- Register undefined = r7;
- Register entry_key = r8;
- Register scratch = r8;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ SmiUntag(mask);
- __ SubP(mask, Operand(1));
-
- __ LoadlW(hash, FieldMemOperand(key, String::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ AddP(index, hash,
- Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- } else {
- __ LoadRR(index, hash);
- }
- __ ShiftRight(r0, index, Operand(String::kHashShift));
- __ AndP(index, r0, mask);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ ShiftLeftP(scratch, index, Operand(1));
- __ AddP(index, scratch); // index *= 3.
-
- __ ShiftLeftP(scratch, index, Operand(kPointerSizeLog2));
- __ AddP(index, dictionary, scratch);
- __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ CmpP(entry_key, undefined);
- __ beq(&not_in_dictionary);
-
- // Stop if found the property.
- __ CmpP(entry_key, key);
- __ beq(&in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ LoadlB(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ LoadImmP(result, Operand::Zero());
- __ Ret();
- }
-
- __ bind(&in_dictionary);
- __ LoadImmP(result, Operand(1));
- __ Ret();
-
- __ bind(&not_in_dictionary);
- __ LoadImmP(result, Operand::Zero());
- __ Ret();
-}
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- // Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- int32_t first_instr_length =
- Instruction::InstructionLength(stub->instruction_start());
- int32_t second_instr_length = Instruction::InstructionLength(
- stub->instruction_start() + first_instr_length);
-
- uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
- uint64_t second_instr =
- Assembler::instr_at(stub->instruction_start() + first_instr_length);
-
- DCHECK(first_instr_length == 4 || first_instr_length == 6);
- DCHECK(second_instr_length == 4 || second_instr_length == 6);
-
- bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
- bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
-
- // STORE_BUFFER_ONLY has NOP on both branches
- if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
- // INCREMENTAL_COMPACTION has NOP on second branch.
- else if (isFirstInstrNOP && !isSecondInstrNOP)
- return INCREMENTAL_COMPACTION;
- // INCREMENTAL has NOP on first branch.
- else if (!isFirstInstrNOP && isSecondInstrNOP)
- return INCREMENTAL;
-
- DCHECK(false);
- return STORE_BUFFER_ONLY;
-}
-
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
-
- // Get instruction lengths of two branches
- int32_t first_instr_length = masm.instr_length_at(0);
- int32_t second_instr_length = masm.instr_length_at(first_instr_length);
-
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
-
- PatchBranchCondMask(&masm, 0, CC_NOP);
- PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchBranchCondMask(&masm, 0, CC_ALWAYS);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
- first_instr_length + second_instr_length);
-}
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two branch instructions are generated with labels so as to
- // get the offset fixed up correctly by the bind(Label*) call. We patch
- // it back and forth between branch condition True and False
- // when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
-
- // Clear the bit, branch on True for NOP action initially
- __ b(CC_NOP, &skip_to_incremental_noncompacting);
- __ b(CC_NOP, &skip_to_incremental_compacting);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- // patching not required on S390 as the initial path is effectively NOP
-}
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(), &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ Ret();
-}
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address = r2 == regs_.address() ? regs_.scratch0() : regs_.address();
- DCHECK(address != regs_.object());
- DCHECK(address != r2);
- __ LoadRR(address, regs_.address());
- __ LoadRR(r2, regs_.object());
- __ LoadRR(r3, address);
- __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-#endif
-
- // Get the value from the slot.
- __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask, eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(tasm,
#if V8_TARGET_ARCH_S390X
40);
@@ -1217,7 +661,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_S390X
40);
@@ -1471,7 +915,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ TestIfSmi(r6);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r6, r6, r7, MAP_TYPE);
@@ -1550,7 +994,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ TestIfSmi(r5);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r5, r5, r6, MAP_TYPE);
@@ -1595,8 +1039,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference thunk_ref,
int stack_space,
MemOperand* stack_space_operand,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
+ MemOperand return_value_operand) {
Isolate* isolate = masm->isolate();
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate);
@@ -1684,17 +1127,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Leave the API exit frame.
__ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ LoadP(cp, *context_restore_operand);
- }
// LeaveExitFrame expects unwind space to be in a register.
- if (stack_space_operand != NULL) {
+ if (stack_space_operand != nullptr) {
__ l(r6, *stack_space_operand);
} else {
__ mov(r6, Operand(stack_space));
}
- __ LeaveExitFrame(false, r6, !restore_context, stack_space_operand != NULL);
+ __ LeaveExitFrame(false, r6, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ mov(r7, Operand(ExternalReference::scheduled_exception_address(isolate)));
@@ -1722,7 +1161,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r2 : callee
// -- r6 : call_data
// -- r4 : holder
// -- r3 : api_function_address
@@ -1732,21 +1170,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1) * 4] : first argument
// -- sp[argc * 4] : receiver
- // -- sp[(argc + 1) * 4] : accessor_holder
// -----------------------------------
- Register callee = r2;
Register call_data = r6;
Register holder = r4;
Register api_function_address = r3;
- Register context = cp;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1756,12 +1189,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
- // context save
- __ push(context);
-
- // callee
- __ push(callee);
-
// call data
__ push(call_data);
@@ -1777,38 +1204,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// holder
__ push(holder);
- // Enter a new context
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- sp[0] : holder
- // -- ...
- // -- sp[(FCA::kArgsLength - 1) * 4] : new_target
- // -- sp[FCA::kArgsLength * 4] : last argument
- // -- ...
- // -- sp[(FCA::kArgsLength + argc - 1) * 4] : first argument
- // -- sp[(FCA::kArgsLength + argc) * 4] : receiver
- // -- sp[(FCA::kArgsLength + argc + 1) * 4] : accessor_holder
- // -----------------------------------------------------------
-
- // Load context from accessor_holder
- Register accessor_holder = context;
- Register scratch2 = callee;
- __ LoadP(accessor_holder,
- MemOperand(sp, (FCA::kArgsLength + 1 + argc()) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ LoadP(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
- __ LoadlB(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ AndP(scratch2, Operand(1 << Map::kIsConstructor));
- __ bne(&skip_looking_for_constructor, Label::kNear);
- __ GetMapConstructor(context, scratch, scratch, scratch2);
- __ bind(&skip_looking_for_constructor);
- __ LoadP(context, FieldMemOperand(context, JSFunction::kContextOffset));
- } else {
- // Load context from callee
- __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
-
// Prepare arguments.
__ LoadRR(scratch, sp);
@@ -1843,21 +1238,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 2;
+ const int stack_space = argc() + FCA::kArgsLength + 1;
MemOperand* stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, return_value_operand,
- &context_restore_operand);
+ stack_space_operand, return_value_operand);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
@@ -1949,7 +1336,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, NULL, return_value_operand, NULL);
+ kStackUnwindSpace, nullptr, return_value_operand);
}
#undef __
diff --git a/deps/v8/src/s390/code-stubs-s390.h b/deps/v8/src/s390/code-stubs-s390.h
index b40991d6e9..269d25ffb4 100644
--- a/deps/v8/src/s390/code-stubs-s390.h
+++ b/deps/v8/src/s390/code-stubs-s390.h
@@ -8,221 +8,6 @@
namespace v8 {
namespace internal {
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one-byte strings and returns result in r0.
- static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat one-byte strings for equality and returns result in r0.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
- Register left, Register right,
- Register length,
- Register scratch1,
- Label* chars_not_equal);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-class StoreRegistersStateStub : public PlatformCodeStub {
- public:
- explicit StoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
-};
-
-class RestoreRegistersStateStub : public PlatformCodeStub {
- public:
- explicit RestoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
-};
-
-class RecordWriteStub : public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate, Register object, Register value,
- Register address, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- // Patch an always taken branch into a NOP branch
- static void PatchBranchCondMask(MacroAssembler* masm, int pos, Condition c) {
- int32_t instrLen = masm->instr_length_at(pos);
- DCHECK(instrLen == 4 || instrLen == 6);
-
- if (instrLen == 4) {
- // BRC - Branch Mask @ Bits 23-20
- FourByteInstr updatedMask = static_cast<FourByteInstr>(c) << 20;
- masm->instr_at_put<FourByteInstr>(
- pos, (masm->instr_at(pos) & ~kFourByteBrCondMask) | updatedMask);
- } else {
- // BRCL - Branch Mask @ Bits 39-36
- SixByteInstr updatedMask = static_cast<SixByteInstr>(c) << 36;
- masm->instr_at_put<SixByteInstr>(
- pos, (masm->instr_at(pos) & ~kSixByteBrCondMask) | updatedMask);
- }
- }
-
- static bool isBranchNop(SixByteInstr instr, int instrLength) {
- if ((4 == instrLength && 0 == (instr & kFourByteBrCondMask)) ||
- // BRC - Check for 0x0 mask condition.
- (6 == instrLength && 0 == (instr & kSixByteBrCondMask))) {
- // BRCL - Check for 0x0 mask condition
- return true;
- }
- return false;
- }
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0),
- scratch1_(no_reg) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->push(r14);
- masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- // Save all volatile FP registers except d0.
- masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- // Restore all volatile FP registers except d0.
- masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
- }
- masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
- masm->pop(r14);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits : public BitField<int, 0, 4> {};
- class ValueBits : public BitField<int, 4, 4> {};
- class AddressBits : public BitField<int, 8, 4> {};
- class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
- };
- class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
-
- Label slow_;
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
@@ -234,162 +19,12 @@ class DirectCEntryStub : public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() override { return true; }
+ Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
-class NameDictionaryLookupStub : public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
- Label* done, Register receiver,
- Register properties, Handle<Name> name,
- Register scratch0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class LookupModeBits : public BitField<LookupMode, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination { kFPRegisters, kCoreRegisters };
-
- // Loads smis from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will be scratched.
- static void LoadSmis(MacroAssembler* masm, Register scratch1,
- Register scratch2);
-
- // Loads objects from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will still be scratched. If
- // either r0 or r1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with r0 and r1 intact.
- static void LoadOperands(MacroAssembler* masm, Register heap_number_map,
- Register scratch1, Register scratch2,
- Label* not_number);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm, Register object,
- Register dst, Register heap_number_map,
- Register scratch1, Register scratch2,
- Register scratch3,
- DoubleRegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |src| to a double, storing
- // the result to |double_dst|
- static void ConvertIntToDouble(MacroAssembler* masm, Register src,
- DoubleRegister double_dst);
-
- // Converts the unsigned integer (untagged smi) in |src| to
- // a double, storing the result to |double_dst|
- static void ConvertUnsignedIntToDouble(MacroAssembler* masm, Register src,
- DoubleRegister double_dst);
-
- // Converts the integer (untagged smi) in |src| to
- // a float, storing the result in |dst|
- static void ConvertIntToFloat(MacroAssembler* masm, const DoubleRegister dst,
- const Register src);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm, Register object,
- DoubleRegister double_dst,
- DoubleRegister double_scratch,
- Register heap_number_map,
- Register scratch1, Register scratch2,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when VFP3 is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm, Register object,
- Register dst, Register heap_number_map,
- Register scratch1, Register scratch2,
- Register scratch3,
- DoubleRegister double_scratch0,
- DoubleRegister double_scratch1,
- Label* not_int32);
-
- // Generate non VFP3 code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm, Register src1,
- Register src2, Register dst,
- Register scratch, Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when VFP3 is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in r0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Register heap_number_result,
- Register scratch);
-
- private:
- static void LoadNumber(MacroAssembler* masm, Register object,
- DoubleRegister dst, Register heap_number_map,
- Register scratch1, Register scratch2,
- Label* not_number);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index 0430ff17d8..e6c627da3a 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/s390/codegen-s390.h"
-
#if V8_TARGET_ARCH_S390
#include <memory>
@@ -21,12 +19,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
@@ -39,120 +37,15 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-// assume ip can be used as a scratch register below
-void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
- Register index, Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ mov(r0, Operand(kIsIndirectStringMask));
- __ AndP(r0, result);
- __ beq(&check_sequential, Label::kNear /*, cr0*/);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string, thin_string;
- __ LoadRR(ip, result);
- __ nilf(ip, Operand(kStringRepresentationMask));
- __ CmpP(ip, Operand(kConsStringTag));
- __ beq(&cons_string);
- __ CmpP(ip, Operand(kThinStringTag));
- __ beq(&thin_string);
-
- // Handle slices.
- __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ SmiUntag(ip, result);
- __ AddP(index, ip);
- __ b(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset));
- __ b(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kempty_stringRootIndex);
- __ bne(call_runtime);
- // Get the first of the two strings and load its instance type.
- __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
- __ b(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ mov(r0, Operand(kStringRepresentationMask));
- __ AndP(r0, result);
- __ bne(&external_string, Label::kNear);
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ b(&check_encoding, Label::kNear);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ mov(r0, Operand(kIsIndirectStringMask));
- __ AndP(r0, result);
- __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ mov(r0, Operand(kShortExternalStringMask));
- __ AndP(r0, result);
- __ bne(call_runtime /*, cr0*/);
- __ LoadP(string,
- FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label one_byte, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ mov(r0, Operand(kStringEncodingMask));
- __ AndP(r0, result);
- __ bne(&one_byte, Label::kNear);
- // Two-byte string.
- __ ShiftLeftP(result, index, Operand(1));
- __ LoadLogicalHalfWordP(result, MemOperand(string, result));
- __ b(&done, Label::kNear);
- __ bind(&one_byte);
- // One-byte string.
- __ LoadlB(result, MemOperand(string, index));
- __ bind(&done);
-}
-
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/codegen-s390.h b/deps/v8/src/s390/codegen-s390.h
deleted file mode 100644
index 3001bc13f8..0000000000
--- a/deps/v8/src/s390/codegen-s390.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-//
-// Copyright IBM Corp. 2012, 2015. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_S390_CODEGEN_S390_H_
-#define V8_S390_CODEGEN_S390_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm, Register string, Register index,
- Register result, Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_S390_CODEGEN_S390_H_
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
index e4f9c773e1..291d9d5a22 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -99,12 +98,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
- // DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
// __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
// MemOperand(sp), kNumberOfRegisters * kPointerSize);
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// TODO(john.yan): optimize the following code by using mvc instruction
- DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ LoadP(r4, MemOperand(sp, i * kPointerSize));
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index 15251ee3af..de4db00cf1 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -140,7 +140,7 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'r');
+ DCHECK_EQ(format[0], 'r');
if (format[1] == '1') { // 'r1: register resides in bit 8-11
RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
@@ -191,7 +191,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
}
int Decoder::FormatFloatingRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'f');
+ DCHECK_EQ(format[0], 'f');
// reuse 1, 5 and 6 because it is coresponding
if (format[1] == '1') { // 'r1: register resides in bit 8-11
@@ -305,7 +305,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
int Decoder::FormatMask(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'm');
+ DCHECK_EQ(format[0], 'm');
int32_t value = 0;
if ((format[1] == '1')) { // prints the mask format in bits 8-12
value = reinterpret_cast<RRInstruction*>(instr)->R1Value();
@@ -326,7 +326,7 @@ int Decoder::FormatMask(Instruction* instr, const char* format) {
}
int Decoder::FormatDisplacement(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'd');
+ DCHECK_EQ(format[0], 'd');
if (format[1] == '1') { // displacement in 20-31
RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
@@ -357,7 +357,7 @@ int Decoder::FormatDisplacement(Instruction* instr, const char* format) {
}
int Decoder::FormatImmediate(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'i');
+ DCHECK_EQ(format[0], 'i');
if (format[1] == '1') { // immediate in 16-31
RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
diff --git a/deps/v8/src/s390/frame-constants-s390.cc b/deps/v8/src/s390/frame-constants-s390.cc
index c087c5d85e..ca4a191dde 100644
--- a/deps/v8/src/s390/frame-constants-s390.cc
+++ b/deps/v8/src/s390/frame-constants-s390.cc
@@ -24,6 +24,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 5e27f59226..a8eb807131 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -56,9 +56,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r6; }
const Register StoreTransitionDescriptor::VectorRegister() { return r5; }
const Register StoreTransitionDescriptor::MapRegister() { return r7; }
-const Register StringCompareDescriptor::LeftRegister() { return r3; }
-const Register StringCompareDescriptor::RightRegister() { return r2; }
-
const Register ApiGetterDescriptor::HolderRegister() { return r2; }
const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
@@ -210,7 +207,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {r3, r5, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -230,7 +227,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// r3 -- function
// r4 -- allocation site with elements kind
Register registers[] = {r3, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@@ -272,10 +269,10 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- r2, // callee
- r6, // call_data
- r4, // holder
- r3, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ r6, // call_data
+ r4, // holder
+ r3, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -324,8 +321,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // the value to pass to the generator
- r3, // the JSGeneratorObject to resume
- r4 // the resume mode (tagged)
+ r3 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index ab95f503a8..44f1ba5abb 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -11,7 +11,7 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
@@ -330,12 +330,6 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
}
-void MacroAssembler::InNewSpace(Register object, Register scratch,
- Condition cond, Label* branch) {
- DCHECK(cond == eq || cond == ne);
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
-}
-
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
LinkRegisterStatus lr_status,
@@ -378,7 +372,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -389,7 +383,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -475,13 +469,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (lr_status == kLRHasNotBeenSaved) {
push(r14);
}
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(r14);
}
@@ -501,39 +489,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address, Register scratch,
- SaveFPRegsMode fp_mode) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(ip, Operand(store_buffer));
- LoadP(scratch, MemOperand(ip));
- // Store pointer to buffer and increment buffer top.
- StoreP(address, MemOperand(scratch));
- AddP(scratch, Operand(kPointerSize));
- // Write back new top of buffer.
- StoreP(scratch, MemOperand(ip));
- // Call stub on end of buffer.
- // Check for end of buffer.
- AndP(scratch, Operand(StoreBuffer::kStoreBufferMask));
-
- bne(&done, Label::kNear);
- push(r14);
- StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
- CallStub(&store_buffer_overflow);
- pop(r14);
- bind(&done);
- Ret();
-}
-
void TurboAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
CleanseP(r14);
@@ -584,7 +539,7 @@ void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK(num_unsaved >= 0);
+ DCHECK_GE(num_unsaved, 0);
if (num_unsaved > 0) {
lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
}
@@ -980,20 +935,6 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
return frame_ends;
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- CleanseP(r14);
- Push(r14, fp, context, target);
- la(fp, MemOperand(sp, 2 * kPointerSize));
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- Pop(r14, fp, context, target);
-}
-
// ExitFrame layout (probably wrongish.. needs updating)
//
// SP -> previousSP
@@ -1026,7 +967,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- DCHECK(stack_space > 0);
+ DCHECK_GT(stack_space, 0);
// This is an opportunity to build a frame to wrap
// all of the pushes that have happened inside of V8
@@ -1066,7 +1007,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// function.
const int frame_alignment = TurboAssembler::ActivationFrameAlignment();
if (frame_alignment > 0) {
- DCHECK(frame_alignment == 8);
+ DCHECK_EQ(frame_alignment, 8);
ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
}
@@ -1095,7 +1036,6 @@ int TurboAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
bool argument_count_is_length) {
// Optionally restore all double registers.
if (save_doubles) {
@@ -1112,11 +1052,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
// Restore current context from top and clear it in debug mode.
- if (restore_context) {
- mov(ip, Operand(ExternalReference(IsolateAddressId::kContextAddress,
- isolate())));
- LoadP(cp, MemOperand(ip));
- }
+ mov(ip,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ LoadP(cp, MemOperand(ip));
+
#ifdef DEBUG
mov(ip,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
@@ -1453,8 +1392,8 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE < 256);
- LoadlB(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(LAST_TYPE <= 0xffff);
+ LoadHalfWordP(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
CmpP(type_reg, Operand(type));
}
@@ -1462,35 +1401,6 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
}
-void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
- SmiUntag(ip, smi);
- ConvertIntToDouble(value, ip);
-}
-
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- mov(value, Operand(cell));
- LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
-}
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp, Register temp2) {
- Label done, loop;
- LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done);
- CompareObjectType(result, temp, temp2, MAP_TYPE);
- bne(&done);
- LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
- b(&loop);
- bind(&done);
-}
-
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
@@ -1539,7 +1449,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
lay(sp, MemOperand(sp, -kDoubleSize));
StoreDouble(double_input, MemOperand(sp));
- CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, result));
la(sp, MemOperand(sp, kDoubleSize));
pop(r14);
@@ -1655,7 +1565,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
+ if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@@ -1794,18 +1704,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
- Label* not_unique_name) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- AndP(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
- beq(&succeed, Label::kNear);
- CmpP(reg, Operand(SYMBOL_TYPE));
- bne(not_unique_name);
-
- bind(&succeed);
-}
-
static const int kRegisterPassedArguments = 5;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -1958,95 +1856,6 @@ void TurboAssembler::CheckPageFlag(
}
}
-void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
- Register scratch1, Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
-}
-
-void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
- Register mask_scratch, Label* has_color,
- int first_bit, int second_bit) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- // Test the first bit
- AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc
- b(first_bit == 1 ? eq : ne, &other_color, Label::kNear);
- // Shift left 1
- // May need to load the next cell
- sll(mask_scratch, Operand(1) /*, SetRC*/);
- LoadAndTest32(mask_scratch, mask_scratch);
- beq(&word_boundary, Label::kNear);
- // Test the second bit
- AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc
- b(second_bit == 1 ? ne : eq, has_color);
- b(&other_color, Label::kNear);
-
- bind(&word_boundary);
- LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
- AndP(r0, ip, Operand(1));
- b(second_bit == 1 ? ne : eq, has_color);
- bind(&other_color);
-}
-
-void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- LoadRR(bitmap_reg, addr_reg);
- nilf(bitmap_reg, Operand(~Page::kPageAlignmentMask));
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
- ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
- ShiftLeftP(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
- AddP(bitmap_reg, ip);
- LoadRR(ip, mask_reg); // Have to do some funky reg shuffling as
- // 31-bit shift left clobbers on s390.
- LoadImmP(mask_reg, Operand(1));
- ShiftLeftP(mask_reg, mask_reg, ip);
-}
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Register load_scratch,
- Label* value_is_white) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- LoadlW(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- LoadRR(r0, load_scratch);
- AndP(r0, mask_scratch);
- beq(value_is_white);
-}
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- LoadP(dst,
- FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- const int getterOffset = AccessorPair::kGetterOffset;
- const int setterOffset = AccessorPair::kSetterOffset;
- int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
- LoadP(dst, FieldMemOperand(dst, offset));
-}
-
////////////////////////////////////////////////////////////////////////////////
//
// New MacroAssembler Interfaces added for S390
@@ -2278,7 +2087,7 @@ void TurboAssembler::Mul(Register dst, Register src1, Register src2) {
void TurboAssembler::DivP(Register dividend, Register divider) {
// have to make sure the src and dst are reg pairs
- DCHECK(dividend.code() % 2 == 0);
+ DCHECK_EQ(dividend.code() % 2, 0);
#if V8_TARGET_ARCH_S390X
dsgr(dividend, divider);
#else
@@ -3425,7 +3234,7 @@ void TurboAssembler::CmpLogical32(Register dst, const Operand& opnd) {
// Compare Logical Pointer Sized Register vs Immediate
void TurboAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
- DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
+ DCHECK_EQ(static_cast<uint32_t>(opnd.immediate() >> 32), 0);
clgfi(dst, opnd);
#else
CmpLogical32(dst, opnd);
@@ -3492,7 +3301,7 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
intptr_t value = reinterpret_cast<intptr_t>(smi);
#if V8_TARGET_ARCH_S390X
- DCHECK((value & 0xffffffff) == 0);
+ DCHECK_EQ(value & 0xffffffff, 0);
// The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
llihf(dst, Operand(value >> 32));
#else
@@ -3593,7 +3402,7 @@ void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
if (dst != src) LoadRR(dst, src);
#if V8_TARGET_ARCH_S390X
- DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(smi) & 0xffffffff, 0);
int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
nihf(dst, Operand(value));
#else
@@ -3606,25 +3415,27 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
- if (scratch != no_reg && !is_int20(offset)) {
- /* cannot use d-form */
- LoadIntLiteral(scratch, offset);
#if V8_TARGET_ARCH_S390X
- lg(dst, MemOperand(mem.rb(), scratch));
+ MemOperand src = mem;
+ if (!is_int20(offset)) {
+ DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
+ DCHECK(scratch != mem.rb());
+ LoadIntLiteral(scratch, offset);
+ src = MemOperand(mem.rb(), scratch);
+ }
+ lg(dst, src);
#else
- l(dst, MemOperand(mem.rb(), scratch));
-#endif
+ if (is_uint12(offset)) {
+ l(dst, mem);
+ } else if (is_int20(offset)) {
+ ly(dst, mem);
} else {
-#if V8_TARGET_ARCH_S390X
- lg(dst, mem);
-#else
- if (is_uint12(offset)) {
- l(dst, mem);
- } else {
- ly(dst, mem);
- }
-#endif
+ DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
+ DCHECK(scratch != mem.rb());
+ LoadIntLiteral(scratch, offset);
+ l(dst, MemOperand(mem.rb(), scratch));
}
+#endif
}
// Store a "pointer" sized value to the memory location
@@ -3654,7 +3465,7 @@ void TurboAssembler::StoreP(Register src, const MemOperand& mem,
void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
- DCHECK(opnd.rmode() == kRelocInfo_NONEPTR);
+ DCHECK_EQ(opnd.rmode(), kRelocInfo_NONEPTR);
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
@@ -4319,6 +4130,90 @@ void TurboAssembler::Popcnt64(Register dst, Register src) {
}
#endif
+void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ LoadRR(scratch, src);
+ LoadRR(src, dst);
+ LoadRR(dst, scratch);
+}
+
+void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
+ if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch));
+ if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
+ DCHECK(!AreAliased(src, scratch));
+ LoadRR(scratch, src);
+ LoadP(src, dst);
+ StoreP(scratch, dst);
+}
+
+void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+ Register scratch_1) {
+ if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1));
+ if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
+ if (dst.rx() != r0) DCHECK(!AreAliased(dst.rx(), scratch_0, scratch_1));
+ if (dst.rb() != r0) DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadP(scratch_0, src);
+ LoadP(scratch_1, dst);
+ StoreP(scratch_0, dst);
+ StoreP(scratch_1, src);
+}
+
+void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ ldr(scratch, src);
+ ldr(src, dst);
+ ldr(dst, scratch);
+}
+
+void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
+ DoubleRegister scratch) {
+ DCHECK(!AreAliased(src, scratch));
+ ldr(scratch, src);
+ LoadFloat32(src, dst);
+ StoreFloat32(scratch, dst);
+}
+
+void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
+ DoubleRegister scratch_0,
+ DoubleRegister scratch_1) {
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadFloat32(scratch_0, src);
+ LoadFloat32(scratch_1, dst);
+ StoreFloat32(scratch_0, dst);
+ StoreFloat32(scratch_1, src);
+}
+
+void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ ldr(scratch, src);
+ ldr(src, dst);
+ ldr(dst, scratch);
+}
+
+void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
+ DoubleRegister scratch) {
+ DCHECK(!AreAliased(src, scratch));
+ ldr(scratch, src);
+ LoadDouble(src, dst);
+ StoreDouble(scratch, dst);
+}
+
+void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
+ DoubleRegister scratch_0,
+ DoubleRegister scratch_1) {
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadDouble(scratch_0, src);
+ LoadDouble(scratch_1, dst);
+ StoreDouble(scratch_0, dst);
+ StoreDouble(scratch_1, src);
+}
+
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
@@ -4343,30 +4238,31 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-#endif
-
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size,
- FlushICache flush_cache)
- : address_(address),
- size_(size),
- masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
- flush_cache_(flush_cache) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
+bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
+ DoubleRegister reg4, DoubleRegister reg5, DoubleRegister reg6,
+ DoubleRegister reg7, DoubleRegister reg8, DoubleRegister reg9,
+ DoubleRegister reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- if (flush_cache_ == FLUSH) {
- Assembler::FlushICache(masm_.isolate(), address_, size_);
- }
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
- // Check that the code was patched as expected.
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ return n_of_valid_regs != n_of_non_aliasing_regs;
}
+#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 707b21f1bb..4076c171ad 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -71,6 +71,11 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg, Register reg9 = no_reg,
Register reg10 = no_reg);
+bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
+ DoubleRegister reg3 = no_dreg, DoubleRegister reg4 = no_dreg,
+ DoubleRegister reg5 = no_dreg, DoubleRegister reg6 = no_dreg,
+ DoubleRegister reg7 = no_dreg, DoubleRegister reg8 = no_dreg,
+ DoubleRegister reg9 = no_dreg, DoubleRegister reg10 = no_dreg);
#endif
// These exist to provide portability between 32 and 64bit
@@ -782,6 +787,21 @@ class TurboAssembler : public Assembler {
void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
+ void SwapP(Register src, Register dst, Register scratch);
+ void SwapP(Register src, MemOperand dst, Register scratch);
+ void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+ Register scratch_1);
+ void SwapFloat32(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch);
+ void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
+ void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
+ DoubleRegister scratch_1);
+ void SwapDouble(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch);
+ void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
+ void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
+ DoubleRegister scratch_1);
+
// Cleanse pointer address on 31bit by zero out top bit.
// This is a NOP on 64-bit.
void CleanseP(Register src) {
@@ -1004,17 +1024,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
@@ -1045,11 +1054,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Support functions.
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done, and |temp2| its instance type.
- void GetMapConstructor(Register result, Register map, Register temp,
- Register temp2);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -1066,12 +1070,6 @@ class MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the given
- // miss label if the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
@@ -1097,11 +1095,6 @@ class MacroAssembler : public TurboAssembler {
bne(if_not_equal);
}
- // Load the value of a smi object into a FP double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDouble(DoubleRegister value, Register smi);
-
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
@@ -1172,12 +1165,8 @@ class MacroAssembler : public TurboAssembler {
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
bool argument_count_is_length = false);
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
@@ -1249,15 +1238,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- // ---------------------------------------------------------------------------
- // String utilities
-
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template <typename Field>
void DecodeField(Register dst, Register src) {
ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
@@ -1287,30 +1267,6 @@ class MacroAssembler : public TurboAssembler {
Condition cond = al);
void JumpToJSEntry(Register target);
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object, Register scratch0, Register scratch1,
- Label* has_color, int first_bit, int second_bit);
-
- void JumpIfBlack(Register object, Register scratch0, Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Register scratch3, Label* value_is_white);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -1348,17 +1304,6 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object, Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@@ -1367,29 +1312,6 @@ class MacroAssembler : public TurboAssembler {
friend class StandardFrame;
};
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- enum FlushICache { FLUSH, DONT_FLUSH };
-
- CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache = FLUSH);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
- FlushICache flush_cache_; // Whether to flush the I cache after patching.
-};
-
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 5647e0f980..a130f359f0 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -13,7 +13,8 @@
#include "src/base/once.h"
#include "src/codegen.h"
#include "src/disasm.h"
-#include "src/frame-constants.h"
+#include "src/macro-assembler.h"
+#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
#include "src/s390/constants-s390.h"
#include "src/s390/simulator-s390.h"
@@ -132,7 +133,7 @@ bool S390Debugger::GetFPDoubleValue(const char* desc, double* value) {
bool S390Debugger::SetBreakpoint(Instruction* break_pc) {
// Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
return false;
}
@@ -145,23 +146,23 @@ bool S390Debugger::SetBreakpoint(Instruction* break_pc) {
}
bool S390Debugger::DeleteBreakpoint(Instruction* break_pc) {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
- sim_->break_pc_ = NULL;
+ sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
return true;
}
void S390Debugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void S390Debugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
@@ -204,11 +205,11 @@ void S390Debugger::Debug() {
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
- if (line == NULL) {
+ if (line == nullptr) {
break;
} else {
char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
line = last_input;
} else {
// Ownership is transferred to sim_;
@@ -389,8 +390,8 @@ void S390Debugger::Debug() {
}
sim_->set_pc(value);
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- intptr_t* cur = NULL;
- intptr_t* end = NULL;
+ intptr_t* cur = nullptr;
+ intptr_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@@ -438,8 +439,8 @@ void S390Debugger::Debug() {
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
- byte* prev = NULL;
- byte* cur = NULL;
+ byte* prev = nullptr;
+ byte* cur = nullptr;
// Default number of instructions to disassemble.
int32_t numInstructions = 10;
@@ -497,7 +498,7 @@ void S390Debugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
+ if (!DeleteBreakpoint(nullptr)) {
PrintF("deleting breakpoint failed\n");
}
} else if (strcmp(cmd, "cr") == 0) {
@@ -640,8 +641,8 @@ void S390Debugger::Debug() {
}
static bool ICacheMatch(void* one, void* two) {
- DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
}
@@ -684,7 +685,7 @@ void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
CachePage* new_page = new CachePage();
entry->value = new_page;
}
@@ -694,10 +695,10 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
- DCHECK(size <= CachePage::kPageSize);
+ DCHECK_LE(size, CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
- DCHECK((start & CachePage::kLineMask) == 0);
- DCHECK((size & CachePage::kLineMask) == 0);
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@@ -736,7 +737,7 @@ void Simulator::Initialize(Isolate* isolate) {
base::CallOnce(&once, &Simulator::EvalTableInit);
}
-Simulator::EvaluateFuncType Simulator::EvalTable[] = {NULL};
+Simulator::EvaluateFuncType Simulator::EvalTable[] = {nullptr};
void Simulator::EvalTableInit() {
for (int i = 0; i < MAX_NUM_OPCODES; i++) {
@@ -1488,7 +1489,7 @@ void Simulator::EvalTableInit() {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
+ if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
@@ -1504,14 +1505,14 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
stack_ = reinterpret_cast<char*>(malloc(stack_size));
pc_modified_ = false;
icount_ = 0;
- break_pc_ = NULL;
+ break_pc_ = nullptr;
break_instr_ = 0;
// make sure our register type can hold exactly 4/8 bytes
#ifdef V8_TARGET_ARCH_S390X
- DCHECK(sizeof(intptr_t) == 8);
+ DCHECK_EQ(sizeof(intptr_t), 8);
#else
- DCHECK(sizeof(intptr_t) == 4);
+ DCHECK_EQ(sizeof(intptr_t), 4);
#endif
// Set up architecture state.
// All registers are initialized to zero to start with.
@@ -1532,7 +1533,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[sp] =
reinterpret_cast<intptr_t>(stack_) + stack_size - stack_protection_size_;
- last_debugger_input_ = NULL;
+ last_debugger_input_ = nullptr;
}
Simulator::~Simulator() { free(stack_); }
@@ -1556,7 +1557,7 @@ class Redirection {
swi_instruction_(0xB2FF0000 | kCallRtRedirected),
#endif
type_(type),
- next_(NULL) {
+ next_(nullptr) {
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->FlushICache(
isolate->simulator_i_cache(),
@@ -1583,9 +1584,9 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) {
- DCHECK_EQ(current->type(), type);
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
return current;
}
}
@@ -1654,10 +1655,10 @@ void* Simulator::RedirectExternalReference(Isolate* isolate,
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- DCHECK(isolate_data != NULL);
+ DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
+ if (sim == nullptr) {
// TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator(isolate);
isolate_data->set_simulator(sim);
@@ -2313,12 +2314,12 @@ bool Simulator::isStopInstruction(Instruction* instr) {
}
bool Simulator::isWatchedStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
return code < kNumOfWatchedStops;
}
bool Simulator::isEnabledStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
!(watched_stops_[code].count & kStopDisabledBit);
@@ -2339,7 +2340,7 @@ void Simulator::DisableStop(uint32_t code) {
}
void Simulator::IncreaseStopCounter(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF(
@@ -2355,7 +2356,7 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
if (!isWatchedStop(code)) {
PrintF("Stop not watched.");
} else {
@@ -2434,7 +2435,7 @@ int64_t Simulator::ByteReverse(int64_t dword) {
int Simulator::DecodeInstruction(Instruction* instr) {
Opcode op = instr->S390OpcodeValue();
- DCHECK(EvalTable[op] != NULL);
+ DCHECK_NOT_NULL(EvalTable[op]);
return (this->*EvalTable[op])(instr);
}
@@ -2930,8 +2931,8 @@ EVALUATE(VFA) {
USE(m6);
USE(m5);
USE(m4);
- DCHECK(m5 == 8);
- DCHECK(m4 == 3);
+ DCHECK_EQ(m5, 8);
+ DCHECK_EQ(m4, 3);
double r2_val = get_double_from_d_register(r2);
double r3_val = get_double_from_d_register(r3);
double r1_val = r2_val + r3_val;
@@ -2945,8 +2946,8 @@ EVALUATE(VFS) {
USE(m6);
USE(m5);
USE(m4);
- DCHECK(m5 == 8);
- DCHECK(m4 == 3);
+ DCHECK_EQ(m5, 8);
+ DCHECK_EQ(m4, 3);
double r2_val = get_double_from_d_register(r2);
double r3_val = get_double_from_d_register(r3);
double r1_val = r2_val - r3_val;
@@ -2960,8 +2961,8 @@ EVALUATE(VFM) {
USE(m6);
USE(m5);
USE(m4);
- DCHECK(m5 == 8);
- DCHECK(m4 == 3);
+ DCHECK_EQ(m5, 8);
+ DCHECK_EQ(m4, 3);
double r2_val = get_double_from_d_register(r2);
double r3_val = get_double_from_d_register(r3);
double r1_val = r2_val * r3_val;
@@ -2975,8 +2976,8 @@ EVALUATE(VFD) {
USE(m6);
USE(m5);
USE(m4);
- DCHECK(m5 == 8);
- DCHECK(m4 == 3);
+ DCHECK_EQ(m5, 8);
+ DCHECK_EQ(m4, 3);
double r2_val = get_double_from_d_register(r2);
double r3_val = get_double_from_d_register(r3);
double r1_val = r2_val / r3_val;
@@ -3526,7 +3527,7 @@ EVALUATE(MR) {
DECODE_RR_INSTRUCTION(r1, r2);
int32_t r1_val = get_low_register<int32_t>(r1);
int32_t r2_val = get_low_register<int32_t>(r2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product = static_cast<int64_t>(r1_val) * static_cast<int64_t>(r2_val);
int32_t high_bits = product >> 32;
@@ -3543,7 +3544,7 @@ EVALUATE(DR) {
int32_t r1_val = get_low_register<int32_t>(r1);
int32_t r2_val = get_low_register<int32_t>(r2);
// reg-reg pair should be even-odd pair, assert r1 is an even register
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
// leftmost 32 bits of the dividend are in r1
// rightmost 32 bits of the dividend are in r1+1
// get the signed value from r1
@@ -3894,7 +3895,7 @@ EVALUATE(M) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
int32_t mem_val = ReadW(addr, instr);
int32_t r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product =
@@ -4094,7 +4095,7 @@ EVALUATE(SLA) {
EVALUATE(SRDL) {
DCHECK_OPCODE(SRDL);
DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
- DCHECK(r1 % 2 == 0); // must be a reg pair
+ DCHECK_EQ(r1 % 2, 0); // must be a reg pair
// only takes rightmost 6bits
int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
@@ -4115,7 +4116,7 @@ EVALUATE(SLDL) {
int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
uint32_t r1_val = get_low_register<uint32_t>(r1);
uint32_t r1_next_val = get_low_register<uint32_t>(r1 + 1);
uint64_t alu_out = (static_cast<uint64_t>(r1_val) << 32) |
@@ -4129,7 +4130,7 @@ EVALUATE(SLDL) {
EVALUATE(SRDA) {
DCHECK_OPCODE(SRDA);
DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
- DCHECK(r1 % 2 == 0); // must be a reg pair
+ DCHECK_EQ(r1 % 2, 0); // must be a reg pair
// only takes rightmost 6bits
int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
@@ -4634,7 +4635,7 @@ EVALUATE(TMLL) {
return length; // Done!
}
- DCHECK(mask != 0);
+ DCHECK_NE(mask, 0);
// Test if all selected bits are one
if (mask == (mask & r1_val)) {
condition_reg_ = 0x1;
@@ -5895,7 +5896,7 @@ EVALUATE(FIEBRA) {
DCHECK_OPCODE(FIEBRA);
DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4);
float r2_val = get_float32_from_d_register(r2);
- CHECK(m4 == 0);
+ CHECK_EQ(m4, 0);
switch (m3) {
case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
set_d_register_from_float32(r1, round(r2_val));
@@ -5938,7 +5939,7 @@ EVALUATE(FIDBRA) {
DCHECK_OPCODE(FIDBRA);
DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4);
double r2_val = get_double_from_d_register(r2);
- CHECK(m4 == 0);
+ CHECK_EQ(m4, 0);
switch (m3) {
case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
set_d_register_from_double(r1, round(r2_val));
@@ -6883,7 +6884,7 @@ EVALUATE(DSGR) {
DCHECK_OPCODE(DSGR);
DECODE_RRE_INSTRUCTION(r1, r2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
int64_t dividend = get_register(r1 + 1);
int64_t divisor = get_register(r2);
@@ -7012,7 +7013,7 @@ EVALUATE(MSGFR) {
EVALUATE(DSGFR) {
DCHECK_OPCODE(DSGFR);
DECODE_RRE_INSTRUCTION(r1, r2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
int64_t r1_val = get_register(r1 + 1);
int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
int64_t quotient = r1_val / r2_val;
@@ -7231,7 +7232,7 @@ EVALUATE(FLOGR) {
DCHECK_OPCODE(FLOGR);
DECODE_RRE_INSTRUCTION(r1, r2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
int64_t r2_val = get_register(r2);
@@ -7277,7 +7278,7 @@ EVALUATE(DLGR) {
DECODE_RRE_INSTRUCTION(r1, r2);
uint64_t r1_val = get_register(r1);
uint64_t r2_val = get_register(r2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
unsigned __int128 dividend = static_cast<unsigned __int128>(r1_val) << 64;
dividend += get_register(r1 + 1);
uint64_t remainder = dividend % r2_val;
@@ -7357,7 +7358,7 @@ EVALUATE(LLHR) {
EVALUATE(MLR) {
DCHECK_OPCODE(MLR);
DECODE_RRE_INSTRUCTION(r1, r2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint32_t r2_val = get_low_register<uint32_t>(r2);
@@ -7375,7 +7376,7 @@ EVALUATE(DLR) {
DECODE_RRE_INSTRUCTION(r1, r2);
uint32_t r1_val = get_low_register<uint32_t>(r1);
uint32_t r2_val = get_low_register<uint32_t>(r2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
uint64_t dividend = static_cast<uint64_t>(r1_val) << 32;
dividend += get_low_register<uint32_t>(r1 + 1);
uint32_t remainder = dividend % r2_val;
@@ -7852,7 +7853,7 @@ EVALUATE(MSG) {
EVALUATE(DSG) {
DCHECK_OPCODE(DSG);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
@@ -7972,7 +7973,7 @@ EVALUATE(MSGF) {
EVALUATE(DSGF) {
DCHECK_OPCODE(DSGF);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
@@ -8263,7 +8264,7 @@ EVALUATE(MFY) {
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
int32_t r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product =
@@ -8495,7 +8496,7 @@ EVALUATE(DLG) {
uint64_t r1_val = get_register(r1);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
unsigned __int128 dividend = static_cast<unsigned __int128>(r1_val) << 64;
dividend += get_register(r1 + 1);
int64_t mem_val = ReadDW(b2_val + x2_val + d2);
@@ -8564,7 +8565,7 @@ EVALUATE(ML) {
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t product =
@@ -8582,7 +8583,7 @@ EVALUATE(DL) {
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- DCHECK(r1 % 2 == 0);
+ DCHECK_EQ(r1 % 2, 0);
uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t quotient =
@@ -8857,7 +8858,7 @@ EVALUATE(CS) {
int32_t r1_val = get_low_register<int32_t>(r1);
int32_t r3_val = get_low_register<int32_t>(r3);
- DCHECK((target_addr & 0x3) == 0);
+ DCHECK_EQ(target_addr & 0x3, 0);
bool is_success = __atomic_compare_exchange_n(
reinterpret_cast<int32_t*>(target_addr), &r1_val, r3_val, true,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
@@ -8880,7 +8881,7 @@ EVALUATE(CSY) {
int32_t r1_val = get_low_register<int32_t>(r1);
int32_t r3_val = get_low_register<int32_t>(r3);
- DCHECK((target_addr & 0x3) == 0);
+ DCHECK_EQ(target_addr & 0x3, 0);
bool is_success = __atomic_compare_exchange_n(
reinterpret_cast<int32_t*>(target_addr), &r1_val, r3_val, true,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 694cc88cbe..06a6888465 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -34,20 +34,28 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const {
return (bits_[byte_index] & (1 << bit_index)) != 0;
}
-
-SafepointTable::SafepointTable(Code* code) {
- DCHECK(code->is_turbofanned());
- code_ = code;
- Address header = code->instruction_start() + code->safepoint_table_offset();
+SafepointTable::SafepointTable(Address instruction_start,
+ size_t safepoint_table_offset,
+ uint32_t stack_slots, bool has_deopt)
+ : instruction_start_(instruction_start),
+ stack_slots_(stack_slots),
+ has_deopt_(has_deopt) {
+ Address header = instruction_start_ + safepoint_table_offset;
length_ = Memory::uint32_at(header + kLengthOffset);
entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize;
entries_ = pc_and_deoptimization_indexes_ + (length_ * kFixedEntrySize);
- DCHECK(entry_size_ > 0);
+ DCHECK_GT(entry_size_, 0);
STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax ==
Safepoint::kNoDeoptimizationIndex);
}
+SafepointTable::SafepointTable(Code* code)
+ : SafepointTable(code->instruction_start(), code->safepoint_table_offset(),
+ code->stack_slots(), true) {
+ DCHECK(code->is_turbofanned());
+}
+
unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
for (unsigned i = 0; i < length(); i++) {
if (GetTrampolinePcOffset(i) == static_cast<int>(pc_offset)) {
@@ -61,7 +69,7 @@ unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
}
SafepointEntry SafepointTable::FindEntry(Address pc) const {
- unsigned pc_offset = static_cast<unsigned>(pc - code_->instruction_start());
+ unsigned pc_offset = static_cast<unsigned>(pc - instruction_start_);
// We use kMaxUInt32 as sentinel value, so check that we don't hit that.
DCHECK_NE(kMaxUInt32, pc_offset);
unsigned len = length();
@@ -70,7 +78,8 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
for (unsigned i = 0; i < len; i++) {
// TODO(kasperl): Replace the linear search with binary search.
if (GetPcOffset(i) == pc_offset ||
- GetTrampolinePcOffset(i) == static_cast<int>(pc_offset)) {
+ (has_deopt_ &&
+ GetTrampolinePcOffset(i) == static_cast<int>(pc_offset))) {
return GetEntry(i);
}
}
@@ -91,7 +100,7 @@ void SafepointTable::PrintEntry(unsigned index,
const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
int last = entry_size_ - 1;
for (int i = first; i < last; i++) PrintBits(os, bits[i], kBitsPerByte);
- int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
+ int last_bits = stack_slots_ - ((last - first) * kBitsPerByte);
PrintBits(os, bits[last], last_bits);
// Print the registers (if any).
@@ -124,7 +133,7 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode deopt_mode) {
- DCHECK(arguments >= 0);
+ DCHECK_GE(arguments, 0);
DeoptimizationInfo info;
info.pc = assembler->pc_offset();
info.arguments = arguments;
@@ -137,9 +146,9 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
}
indexes_.Add(new(zone_) ZoneList<int>(8, zone_), zone_);
registers_.Add((kind & Safepoint::kWithRegisters)
- ? new(zone_) ZoneList<int>(4, zone_)
- : NULL,
- zone_);
+ ? new (zone_) ZoneList<int>(4, zone_)
+ : nullptr,
+ zone_);
return Safepoint(indexes_.last(), registers_.last());
}
@@ -164,7 +173,7 @@ int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
break;
}
}
- CHECK(index >= 0);
+ CHECK_GE(index, 0);
DCHECK(index < deoptimization_info_.length());
deoptimization_info_[index].trampoline = trampoline;
return index;
@@ -208,7 +217,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
// Run through the registers (if any).
DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- if (registers == NULL) {
+ if (registers == nullptr) {
const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
for (int j = 0; j < num_reg_bytes; j++) {
bits[j] = SafepointTable::kNoRegisters;
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 057d5d8c5e..9f063bac20 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -18,14 +18,14 @@ class Register;
class SafepointEntry BASE_EMBEDDED {
public:
- SafepointEntry() : info_(0), bits_(NULL), trampoline_pc_(-1) {}
+ SafepointEntry() : info_(0), bits_(nullptr), trampoline_pc_(-1) {}
SafepointEntry(unsigned info, uint8_t* bits, int trampoline_pc)
: info_(info), bits_(bits), trampoline_pc_(trampoline_pc) {
DCHECK(is_valid());
}
- bool is_valid() const { return bits_ != NULL; }
+ bool is_valid() const { return bits_ != nullptr; }
bool Equals(const SafepointEntry& other) const {
return info_ == other.info_ && bits_ == other.bits_;
@@ -33,7 +33,7 @@ class SafepointEntry BASE_EMBEDDED {
void Reset() {
info_ = 0;
- bits_ = NULL;
+ bits_ = nullptr;
}
int deoptimization_index() const {
@@ -90,6 +90,9 @@ class SafepointEntry BASE_EMBEDDED {
class SafepointTable BASE_EMBEDDED {
public:
explicit SafepointTable(Code* code);
+ explicit SafepointTable(Address instruction_start,
+ size_t safepoint_table_offset, uint32_t stack_slots,
+ bool has_deopt = false);
int size() const {
return kHeaderSize + (length_ * (kFixedEntrySize + entry_size_));
@@ -113,7 +116,8 @@ class SafepointTable BASE_EMBEDDED {
DCHECK(index < length_);
unsigned info = Memory::uint32_at(GetInfoLocation(index));
uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_));
- int trampoline_pc = Memory::int_at(GetTrampolineLocation(index));
+ int trampoline_pc =
+ has_deopt_ ? Memory::int_at(GetTrampolineLocation(index)) : -1;
return SafepointEntry(info, bits, trampoline_pc);
}
@@ -151,12 +155,14 @@ class SafepointTable BASE_EMBEDDED {
uint8_t byte, int digits);
DisallowHeapAllocation no_allocation_;
- Code* code_;
+ Address instruction_start_;
+ uint32_t stack_slots_;
unsigned length_;
unsigned entry_size_;
Address pc_and_deoptimization_indexes_;
Address entries_;
+ bool has_deopt_;
friend class SafepointTableBuilder;
friend class SafepointEntry;
diff --git a/deps/v8/src/setup-isolate-deserialize.cc b/deps/v8/src/setup-isolate-deserialize.cc
index a97b77fac6..e232b47148 100644
--- a/deps/v8/src/setup-isolate-deserialize.cc
+++ b/deps/v8/src/setup-isolate-deserialize.cc
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
- DCHECK(!create_heap_objects_);
+ CHECK(!create_heap_objects_);
// No actual work to be done; builtins will be deserialized from the snapshot.
}
@@ -27,11 +27,11 @@ void SetupIsolateDelegate::SetupInterpreter(
<< std::endl;
}
#endif
- DCHECK(interpreter->IsDispatchTableInitialized());
+ CHECK(interpreter->IsDispatchTableInitialized());
}
bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
- DCHECK(!create_heap_objects_);
+ CHECK(!create_heap_objects_);
// No actual work to be done; heap will be deserialized from the snapshot.
return true;
}
diff --git a/deps/v8/src/setup-isolate-full.cc b/deps/v8/src/setup-isolate-full.cc
index 14ef318b67..c3a367986c 100644
--- a/deps/v8/src/setup-isolate-full.cc
+++ b/deps/v8/src/setup-isolate-full.cc
@@ -17,7 +17,7 @@ void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
if (create_heap_objects_) {
SetupBuiltinsInternal(isolate);
} else {
- DCHECK(isolate->snapshot_available());
+ CHECK(isolate->snapshot_available());
}
}
@@ -26,7 +26,7 @@ void SetupIsolateDelegate::SetupInterpreter(
if (create_heap_objects_) {
interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
} else {
- DCHECK(interpreter->IsDispatchTableInitialized());
+ CHECK(interpreter->IsDispatchTableInitialized());
}
}
@@ -34,7 +34,7 @@ bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
if (create_heap_objects_) {
return SetupHeapInternal(heap);
} else {
- DCHECK(heap->isolate()->snapshot_available());
+ CHECK(heap->isolate()->snapshot_available());
return true;
}
}
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index 5729fbfba2..e158e4d92b 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -1,6 +1,7 @@
set noparent
jgruber@chromium.org
+petermarshall@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.cc b/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
new file mode 100644
index 0000000000..59cab6d40a
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
@@ -0,0 +1,289 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/builtin-deserializer-allocator.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/interpreter/interpreter.h"
+#include "src/snapshot/builtin-deserializer.h"
+#include "src/snapshot/deserializer.h"
+
+namespace v8 {
+namespace internal {
+
+using interpreter::Bytecodes;
+using interpreter::Interpreter;
+
+BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
+ Deserializer<BuiltinDeserializerAllocator>* deserializer)
+ : deserializer_(deserializer) {}
+
+BuiltinDeserializerAllocator::~BuiltinDeserializerAllocator() {
+ delete handler_allocations_;
+}
+
+namespace {
+int HandlerAllocationIndex(int code_object_id) {
+ return code_object_id - BuiltinSnapshotUtils::kFirstHandlerIndex;
+}
+} // namespace
+
+Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
+ int size) {
+ const int code_object_id = deserializer()->CurrentCodeObjectId();
+ DCHECK_NE(BuiltinDeserializer::kNoCodeObjectId, code_object_id);
+ DCHECK_EQ(CODE_SPACE, space);
+ DCHECK_EQ(deserializer()->ExtractCodeObjectSize(code_object_id), size);
+#ifdef DEBUG
+ RegisterCodeObjectAllocation(code_object_id);
+#endif
+
+ if (BSU::IsBuiltinIndex(code_object_id)) {
+ Object* obj = isolate()->builtins()->builtin(code_object_id);
+ DCHECK(Internals::HasHeapObjectTag(obj));
+ return HeapObject::cast(obj)->address();
+ } else if (BSU::IsHandlerIndex(code_object_id)) {
+ if (handler_allocation_ != nullptr) {
+ // Lazy deserialization.
+ DCHECK_NULL(handler_allocations_);
+ return handler_allocation_;
+ } else {
+ // Eager deserialization.
+ DCHECK_NULL(handler_allocation_);
+ DCHECK_NOT_NULL(handler_allocations_);
+ int index = HandlerAllocationIndex(code_object_id);
+ DCHECK_NOT_NULL(handler_allocations_->at(index));
+ return handler_allocations_->at(index);
+ }
+ }
+
+ UNREACHABLE();
+}
+
+Heap::Reservation
+BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
+ Heap::Reservation result;
+
+ // Reservations for builtins.
+
+ // DeserializeLazy is always the first builtin reservation (to simplify logic
+ // in InitializeBuiltinsTable).
+ {
+ DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
+ uint32_t builtin_size =
+ deserializer()->ExtractCodeObjectSize(Builtins::kDeserializeLazy);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+ result.push_back({builtin_size, nullptr, nullptr});
+ }
+
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ if (i == Builtins::kDeserializeLazy) continue;
+
+ // Skip lazy builtins. These will be replaced by the DeserializeLazy code
+ // object in InitializeFromReservations and thus require no reserved space.
+ if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
+ continue;
+ }
+
+ uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+ result.push_back({builtin_size, nullptr, nullptr});
+ }
+
+ // Reservations for bytecode handlers.
+
+ BSU::ForEachBytecode(
+ [=, &result](Bytecode bytecode, OperandScale operand_scale) {
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+ // Bytecodes without a handler don't require a reservation.
+ return;
+ } else if (FLAG_lazy_handler_deserialization &&
+ deserializer()->IsLazyDeserializationEnabled() &&
+ Bytecodes::IsLazy(bytecode)) {
+ // Skip lazy handlers. These will be replaced by the DeserializeLazy
+ // code object in InitializeFromReservations and thus require no
+ // reserved space.
+ return;
+ }
+
+ const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
+ uint32_t handler_size = deserializer()->ExtractCodeObjectSize(index);
+ DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+ result.push_back({handler_size, nullptr, nullptr});
+ });
+
+ return result;
+}
+
+void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
+ const Heap::Chunk& chunk, int builtin_id) {
+ DCHECK_EQ(deserializer()->ExtractCodeObjectSize(builtin_id), chunk.size);
+ DCHECK_EQ(chunk.size, chunk.end - chunk.start);
+
+ SkipList::Update(chunk.start, chunk.size);
+ isolate()->builtins()->set_builtin(builtin_id,
+ HeapObject::FromAddress(chunk.start));
+
+#ifdef DEBUG
+ RegisterCodeObjectReservation(builtin_id);
+#endif
+}
+
+void BuiltinDeserializerAllocator::InitializeHandlerFromReservation(
+ const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale) {
+ DCHECK_EQ(deserializer()->ExtractCodeObjectSize(
+ BSU::BytecodeToIndex(bytecode, operand_scale)),
+ chunk.size);
+ DCHECK_EQ(chunk.size, chunk.end - chunk.start);
+
+ SkipList::Update(chunk.start, chunk.size);
+
+ DCHECK_NOT_NULL(handler_allocations_);
+ const int index =
+ HandlerAllocationIndex(BSU::BytecodeToIndex(bytecode, operand_scale));
+ handler_allocations_->at(index) = chunk.start;
+
+#ifdef DEBUG
+ RegisterCodeObjectReservation(BSU::BytecodeToIndex(bytecode, operand_scale));
+#endif
+}
+
+void BuiltinDeserializerAllocator::InitializeFromReservations(
+ const Heap::Reservation& reservation) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+
+ // Initialize the builtins table.
+
+ Builtins* builtins = isolate()->builtins();
+ int reservation_index = 0;
+
+ // Other builtins can be replaced by DeserializeLazy so it may not be lazy.
+ // It always occupies the first reservation slot.
+ {
+ DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
+ InitializeBuiltinFromReservation(reservation[reservation_index],
+ Builtins::kDeserializeLazy);
+ reservation_index++;
+ }
+
+ Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
+
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ if (i == Builtins::kDeserializeLazy) continue;
+
+ if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
+ builtins->set_builtin(i, deserialize_lazy);
+ } else {
+ InitializeBuiltinFromReservation(reservation[reservation_index], i);
+ reservation_index++;
+ }
+ }
+
+ // Initialize interpreter bytecode handler reservations.
+
+ DCHECK_NULL(handler_allocations_);
+ handler_allocations_ = new std::vector<Address>(BSU::kNumberOfHandlers);
+
+ BSU::ForEachBytecode(
+ [=, &reservation_index](Bytecode bytecode, OperandScale operand_scale) {
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+ // Bytecodes without a handler don't have a reservation.
+ return;
+ } else if (FLAG_lazy_handler_deserialization &&
+ deserializer()->IsLazyDeserializationEnabled() &&
+ Bytecodes::IsLazy(bytecode)) {
+ // Likewise, bytecodes with lazy handlers don't either.
+ return;
+ }
+
+ InitializeHandlerFromReservation(reservation[reservation_index],
+ bytecode, operand_scale);
+ reservation_index++;
+ });
+
+ DCHECK_EQ(reservation.size(), reservation_index);
+}
+
+void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
+ int builtin_id) {
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ DCHECK(isolate()->builtins()->is_initialized());
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
+ DCHECK_EQ(Builtins::kDeserializeLazy,
+ isolate()->builtins()->builtin(builtin_id)->builtin_index());
+
+ const uint32_t builtin_size =
+ deserializer()->ExtractCodeObjectSize(builtin_id);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+
+ Handle<HeapObject> o =
+ isolate()->factory()->NewCodeForDeserialization(builtin_size);
+
+ // Note: After this point and until deserialization finishes, heap allocation
+ // is disallowed. We currently can't safely assert this since we'd need to
+ // pass the DisallowHeapAllocation scope out of this function.
+
+ // Write the allocated filler object into the builtins table. It will be
+ // returned by our custom Allocate method below once needed.
+
+ isolate()->builtins()->set_builtin(builtin_id, *o);
+
+#ifdef DEBUG
+ RegisterCodeObjectReservation(builtin_id);
+#endif
+}
+
+void BuiltinDeserializerAllocator::ReserveForHandler(
+ Bytecode bytecode, OperandScale operand_scale) {
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
+
+ const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
+ const uint32_t handler_size =
+ deserializer()->ExtractCodeObjectSize(code_object_id);
+ DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+
+ handler_allocation_ =
+ isolate()->factory()->NewCodeForDeserialization(handler_size)->address();
+
+// Note: After this point and until deserialization finishes, heap allocation
+// is disallowed. We currently can't safely assert this since we'd need to
+// pass the DisallowHeapAllocation scope out of this function.
+
+#ifdef DEBUG
+ RegisterCodeObjectReservation(code_object_id);
+#endif
+}
+
+#ifdef DEBUG
+void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
+ int code_object_id) {
+ const auto result = unused_reservations_.emplace(code_object_id);
+ CHECK(result.second); // False, iff builtin_id was already present in set.
+}
+
+void BuiltinDeserializerAllocator::RegisterCodeObjectAllocation(
+ int code_object_id) {
+ const size_t removed_elems = unused_reservations_.erase(code_object_id);
+ CHECK_EQ(removed_elems, 1);
+}
+
+bool BuiltinDeserializerAllocator::ReservationsAreFullyUsed() const {
+ // Not 100% precise but should be good enough.
+ return unused_reservations_.empty();
+}
+#endif // DEBUG
+
+Isolate* BuiltinDeserializerAllocator::isolate() const {
+ return deserializer()->isolate();
+}
+
+BuiltinDeserializer* BuiltinDeserializerAllocator::deserializer() const {
+ return static_cast<BuiltinDeserializer*>(deserializer_);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.h b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
new file mode 100644
index 0000000000..6fc7bfaf6b
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
@@ -0,0 +1,132 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
+
+#include <unordered_set>
+
+#include "src/globals.h"
+#include "src/heap/heap.h"
+#include "src/interpreter/interpreter.h"
+#include "src/snapshot/serializer-common.h"
+
+namespace v8 {
+namespace internal {
+
+template <class AllocatorT>
+class Deserializer;
+
+class BuiltinDeserializer;
+class BuiltinSnapshotUtils;
+
+class BuiltinDeserializerAllocator final {
+ using BSU = BuiltinSnapshotUtils;
+ using Bytecode = interpreter::Bytecode;
+ using OperandScale = interpreter::OperandScale;
+
+ public:
+ BuiltinDeserializerAllocator(
+ Deserializer<BuiltinDeserializerAllocator>* deserializer);
+
+ ~BuiltinDeserializerAllocator();
+
+ // ------- Allocation Methods -------
+ // Methods related to memory allocation during deserialization.
+
+ // Allocation works differently here than in other deserializers. Instead of
+ // a statically-known memory area determined at serialization-time, our
+ // memory requirements here are determined at runtime. Another major
+ // difference is that we create builtin Code objects up-front (before
+ // deserialization) in order to avoid having to patch builtin references
+ // later on. See also the kBuiltin case in deserializer.cc.
+ //
+ // There are three ways that we use to reserve / allocate space. In all
+ // cases, required objects are requested from the GC prior to
+ // deserialization. 1. pre-allocated builtin code objects are written into
+ // the builtins table (this is to make deserialization of builtin references
+ // easier). Pre-allocated handler code objects are 2. stored in the
+ // {handler_allocations_} vector (at eager-deserialization time) and 3.
+ // stored in {handler_allocation_} (at lazy-deserialization time).
+ //
+ // Allocate simply returns the pre-allocated object prepared by
+ // InitializeFromReservations.
+ Address Allocate(AllocationSpace space, int size);
+
+ void MoveToNextChunk(AllocationSpace space) { UNREACHABLE(); }
+ void SetAlignment(AllocationAlignment alignment) { UNREACHABLE(); }
+
+ HeapObject* GetMap(uint32_t index) { UNREACHABLE(); }
+ HeapObject* GetLargeObject(uint32_t index) { UNREACHABLE(); }
+ HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ UNREACHABLE();
+ }
+
+ // ------- Reservation Methods -------
+ // Methods related to memory reservations (prior to deserialization).
+
+ // Builtin deserialization does not bake reservations into the snapshot, hence
+ // this is a nop.
+ void DecodeReservation(Vector<const SerializedData::Reservation> res) {}
+
+ // These methods are used to pre-allocate builtin objects prior to
+ // deserialization.
+ // TODO(jgruber): Refactor reservation/allocation logic in deserializers to
+ // make this less messy.
+ Heap::Reservation CreateReservationsForEagerBuiltinsAndHandlers();
+ void InitializeFromReservations(const Heap::Reservation& reservation);
+
+ // Creates reservations and initializes the builtins table in preparation for
+ // lazily deserializing a single builtin.
+ void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
+
+ // Pre-allocates a code object preparation for lazily deserializing a single
+ // handler.
+ void ReserveForHandler(Bytecode bytecode, OperandScale operand_scale);
+
+#ifdef DEBUG
+ bool ReservationsAreFullyUsed() const;
+#endif
+
+ private:
+ Isolate* isolate() const;
+ BuiltinDeserializer* deserializer() const;
+
+ // Used after memory allocation prior to isolate initialization, to register
+ // the newly created object in code space and add it to the builtins table.
+ void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
+ int builtin_id);
+
+ // As above, but for interpreter bytecode handlers.
+ void InitializeHandlerFromReservation(
+ const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale);
+
+#ifdef DEBUG
+ void RegisterCodeObjectReservation(int code_object_id);
+ void RegisterCodeObjectAllocation(int code_object_id);
+ std::unordered_set<int> unused_reservations_;
+#endif
+
+ private:
+ // The current deserializer. Note that this always points to a
+ // BuiltinDeserializer instance, but we can't perform the cast during
+ // construction since that makes vtable-based checks fail.
+ Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
+
+ // Stores allocated space for bytecode handlers during eager deserialization.
+ std::vector<Address>* handler_allocations_ = nullptr;
+
+ // Stores the allocated space for a single handler during lazy
+ // deserialization.
+ Address handler_allocation_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
index fb41a9fec9..53a0f30612 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.cc
+++ b/deps/v8/src/snapshot/builtin-deserializer.cc
@@ -5,80 +5,128 @@
#include "src/snapshot/builtin-deserializer.h"
#include "src/assembler-inl.h"
+#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-// Tracks the builtin currently being deserialized (required for allocation).
-class DeserializingBuiltinScope {
+using interpreter::Bytecodes;
+using interpreter::Interpreter;
+
+// Tracks the code object currently being deserialized (required for
+// allocation).
+class DeserializingCodeObjectScope {
public:
- DeserializingBuiltinScope(BuiltinDeserializer* builtin_deserializer,
- int builtin_id)
+ DeserializingCodeObjectScope(BuiltinDeserializer* builtin_deserializer,
+ int code_object_id)
: builtin_deserializer_(builtin_deserializer) {
- DCHECK_EQ(BuiltinDeserializer::kNoBuiltinId,
- builtin_deserializer->current_builtin_id_);
- builtin_deserializer->current_builtin_id_ = builtin_id;
+ DCHECK_EQ(BuiltinDeserializer::kNoCodeObjectId,
+ builtin_deserializer->current_code_object_id_);
+ builtin_deserializer->current_code_object_id_ = code_object_id;
}
- ~DeserializingBuiltinScope() {
- builtin_deserializer_->current_builtin_id_ =
- BuiltinDeserializer::kNoBuiltinId;
+ ~DeserializingCodeObjectScope() {
+ builtin_deserializer_->current_code_object_id_ =
+ BuiltinDeserializer::kNoCodeObjectId;
}
private:
BuiltinDeserializer* builtin_deserializer_;
- DISALLOW_COPY_AND_ASSIGN(DeserializingBuiltinScope)
+ DISALLOW_COPY_AND_ASSIGN(DeserializingCodeObjectScope)
};
BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
const BuiltinSnapshotData* data)
: Deserializer(data, false) {
- // We may have to relax this at some point to pack reloc infos and handler
- // tables into the builtin blob (instead of the partial snapshot cache).
- DCHECK(ReservesOnlyCodeSpace());
-
- builtin_offsets_ = data->BuiltinOffsets();
- DCHECK_EQ(Builtins::builtin_count, builtin_offsets_.length());
- DCHECK(std::is_sorted(builtin_offsets_.begin(), builtin_offsets_.end()));
+ code_offsets_ = data->BuiltinOffsets();
+ DCHECK_EQ(BSU::kNumberOfCodeObjects, code_offsets_.length());
+ DCHECK(std::is_sorted(code_offsets_.begin(), code_offsets_.end()));
Initialize(isolate);
}
-void BuiltinDeserializer::DeserializeEagerBuiltins() {
+void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_EQ(0, source()->position());
+ // Deserialize builtins.
+
Builtins* builtins = isolate()->builtins();
- for (int i = 0; i < Builtins::builtin_count; i++) {
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
// Do nothing. These builtins have been replaced by DeserializeLazy in
- // InitializeBuiltinsTable.
+ // InitializeFromReservations.
DCHECK_EQ(builtins->builtin(Builtins::kDeserializeLazy),
builtins->builtin(i));
} else {
- builtins->set_builtin(i, DeserializeBuiltin(i));
+ builtins->set_builtin(i, DeserializeBuiltinRaw(i));
}
}
#ifdef DEBUG
- for (int i = 0; i < Builtins::builtin_count; i++) {
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
Object* o = builtins->builtin(i);
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
}
#endif
+
+ // Deserialize bytecode handlers.
+
+ Interpreter* interpreter = isolate()->interpreter();
+ DCHECK(!isolate()->interpreter()->IsDispatchTableInitialized());
+
+ BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
+ // Bytecodes without a dedicated handler are patched up in a second pass.
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+
+ // If lazy-deserialization is enabled and the current bytecode is lazy,
+ // we write the generic LazyDeserialization handler into the dispatch table
+ // and deserialize later upon first use.
+ Code* code = (FLAG_lazy_handler_deserialization &&
+ IsLazyDeserializationEnabled() && Bytecodes::IsLazy(bytecode))
+ ? GetDeserializeLazyHandler(operand_scale)
+ : DeserializeHandlerRaw(bytecode, operand_scale);
+
+ interpreter->SetBytecodeHandler(bytecode, operand_scale, code);
+ });
+
+ // Patch up holes in the dispatch table.
+
+ Code* illegal_handler = interpreter->GetBytecodeHandler(
+ Bytecode::kIllegal, OperandScale::kSingle);
+
+ BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
+ if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+ interpreter->SetBytecodeHandler(bytecode, operand_scale, illegal_handler);
+ });
+
+ DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
}
Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
+ allocator()->ReserveAndInitializeBuiltinsTableForBuiltin(builtin_id);
+ DisallowHeapAllocation no_gc;
+ return DeserializeBuiltinRaw(builtin_id);
+}
+
+Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
+ OperandScale operand_scale) {
+ allocator()->ReserveForHandler(bytecode, operand_scale);
+ DisallowHeapAllocation no_gc;
+ return DeserializeHandlerRaw(bytecode, operand_scale);
+}
+
+Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
- DeserializingBuiltinScope scope(this, builtin_id);
+ DeserializingCodeObjectScope scope(this, builtin_id);
const int initial_position = source()->position();
- SetPositionToBuiltin(builtin_id);
+ source()->set_position(code_offsets_[builtin_id]);
Object* o = ReadDataSingle();
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
@@ -94,35 +142,38 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
return code;
}
-void BuiltinDeserializer::SetPositionToBuiltin(int builtin_id) {
- DCHECK(Builtins::IsBuiltinId(builtin_id));
+Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
+ OperandScale operand_scale) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
- const uint32_t offset = builtin_offsets_[builtin_id];
- source()->set_position(offset);
+ const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
+ DeserializingCodeObjectScope scope(this, code_object_id);
- // Grab the size of the code object.
- byte data = source()->Get();
+ const int initial_position = source()->position();
+ source()->set_position(code_offsets_[code_object_id]);
- // The first bytecode can either be kNewObject, or kNextChunk if the current
- // chunk has been exhausted. Since we do allocations differently here, we
- // don't care about kNextChunk and can simply skip over it.
- // TODO(jgruber): When refactoring (de)serializer allocations, ensure we don't
- // generate kNextChunk bytecodes anymore for the builtins snapshot. In fact,
- // the entire reservations mechanism is unused for the builtins snapshot.
- if (data == kNextChunk) {
- source()->Get(); // Skip over kNextChunk's {space} parameter.
- } else {
- source()->set_position(offset); // Rewind.
- }
+ Object* o = ReadDataSingle();
+ DCHECK(o->IsCode() && Code::cast(o)->kind() == Code::BYTECODE_HANDLER);
+
+ // Rewind.
+ source()->set_position(initial_position);
+
+ // Flush the instruction cache.
+ Code* code = Code::cast(o);
+ Assembler::FlushICache(isolate(), code->instruction_start(),
+ code->instruction_size());
+
+ return code;
}
-uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
- DCHECK(Builtins::IsBuiltinId(builtin_id));
+uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
+ DCHECK_LT(code_object_id, BSU::kNumberOfCodeObjects);
const int initial_position = source()->position();
// Grab the size of the code object.
- SetPositionToBuiltin(builtin_id);
+ source()->set_position(code_offsets_[code_object_id]);
byte data = source()->Get();
USE(data);
@@ -135,108 +186,19 @@ uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
return result;
}
-Heap::Reservation BuiltinDeserializer::CreateReservationsForEagerBuiltins() {
- DCHECK(ReservesOnlyCodeSpace());
-
- Heap::Reservation result;
-
- // DeserializeLazy is always the first reservation (to simplify logic in
- // InitializeBuiltinsTable).
- {
- DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
- uint32_t builtin_size = ExtractBuiltinSize(Builtins::kDeserializeLazy);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
- result.push_back({builtin_size, nullptr, nullptr});
- }
-
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (i == Builtins::kDeserializeLazy) continue;
-
- // Skip lazy builtins. These will be replaced by the DeserializeLazy code
- // object in InitializeBuiltinsTable and thus require no reserved space.
- if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) continue;
-
- uint32_t builtin_size = ExtractBuiltinSize(i);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
- result.push_back({builtin_size, nullptr, nullptr});
- }
-
- return result;
-}
-
-void BuiltinDeserializer::InitializeBuiltinFromReservation(
- const Heap::Chunk& chunk, int builtin_id) {
- DCHECK_EQ(ExtractBuiltinSize(builtin_id), chunk.size);
- DCHECK_EQ(chunk.size, chunk.end - chunk.start);
-
- SkipList::Update(chunk.start, chunk.size);
- isolate()->builtins()->set_builtin(builtin_id,
- HeapObject::FromAddress(chunk.start));
-}
-
-void BuiltinDeserializer::InitializeBuiltinsTable(
- const Heap::Reservation& reservation) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
-
- Builtins* builtins = isolate()->builtins();
- int reservation_index = 0;
-
- // Other builtins can be replaced by DeserializeLazy so it may not be lazy.
- // It always occupies the first reservation slot.
- {
- DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
- InitializeBuiltinFromReservation(reservation[reservation_index],
- Builtins::kDeserializeLazy);
- reservation_index++;
- }
-
- Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
-
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (i == Builtins::kDeserializeLazy) continue;
-
- if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
- builtins->set_builtin(i, deserialize_lazy);
- } else {
- InitializeBuiltinFromReservation(reservation[reservation_index], i);
- reservation_index++;
- }
+Code* BuiltinDeserializer::GetDeserializeLazyHandler(
+ interpreter::OperandScale operand_scale) const {
+ STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
+ switch (operand_scale) {
+ case OperandScale::kSingle:
+ return Code::cast(isolate()->heap()->deserialize_lazy_handler());
+ case OperandScale::kDouble:
+ return Code::cast(isolate()->heap()->deserialize_lazy_handler_wide());
+ case OperandScale::kQuadruple:
+ return Code::cast(
+ isolate()->heap()->deserialize_lazy_handler_extra_wide());
}
-
- DCHECK_EQ(reservation.size(), reservation_index);
-}
-
-void BuiltinDeserializer::ReserveAndInitializeBuiltinsTableForBuiltin(
- int builtin_id) {
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK(isolate()->builtins()->is_initialized());
- DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
- DCHECK_EQ(Builtins::kDeserializeLazy,
- isolate()->builtins()->builtin(builtin_id)->builtin_index());
-
- const uint32_t builtin_size = ExtractBuiltinSize(builtin_id);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
-
- Handle<HeapObject> o =
- isolate()->factory()->NewCodeForDeserialization(builtin_size);
-
- // Note: After this point and until deserialization finishes, heap allocation
- // is disallowed. We currently can't safely assert this since we'd need to
- // pass the DisallowHeapAllocation scope out of this function.
-
- // Write the allocated filler object into the builtins table. It will be
- // returned by our custom Allocate method below once needed.
-
- isolate()->builtins()->set_builtin(builtin_id, *o);
-}
-
-Address BuiltinDeserializer::Allocate(int space_index, int size) {
- DCHECK_EQ(CODE_SPACE, space_index);
- DCHECK_EQ(ExtractBuiltinSize(current_builtin_id_), size);
- Object* obj = isolate()->builtins()->builtin(current_builtin_id_);
- DCHECK(Internals::HasHeapObjectTag(obj));
- return HeapObject::cast(obj)->address();
+ UNREACHABLE();
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/builtin-deserializer.h b/deps/v8/src/snapshot/builtin-deserializer.h
index a73c68ed34..38ba2fecea 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.h
+++ b/deps/v8/src/snapshot/builtin-deserializer.h
@@ -5,7 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
-#include "src/heap/heap.h"
+#include "src/interpreter/interpreter.h"
+#include "src/snapshot/builtin-deserializer-allocator.h"
+#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
@@ -14,7 +16,12 @@ namespace internal {
class BuiltinSnapshotData;
// Deserializes the builtins blob.
-class BuiltinDeserializer final : public Deserializer {
+class BuiltinDeserializer final
+ : public Deserializer<BuiltinDeserializerAllocator> {
+ using BSU = BuiltinSnapshotUtils;
+ using Bytecode = interpreter::Bytecode;
+ using OperandScale = interpreter::OperandScale;
+
public:
BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
@@ -25,45 +32,27 @@ class BuiltinDeserializer final : public Deserializer {
//
// After this, the instruction cache must be flushed by the caller (we don't
// do it ourselves since the startup serializer batch-flushes all code pages).
- void DeserializeEagerBuiltins();
+ void DeserializeEagerBuiltinsAndHandlers();
- // Deserializes the single given builtin. Assumes that reservations have
- // already been allocated.
+ // Deserializes the single given builtin. This is used whenever a builtin is
+ // lazily deserialized at runtime.
Code* DeserializeBuiltin(int builtin_id);
- // These methods are used to pre-allocate builtin objects prior to
- // deserialization.
- // TODO(jgruber): Refactor reservation/allocation logic in deserializers to
- // make this less messy.
- Heap::Reservation CreateReservationsForEagerBuiltins();
- void InitializeBuiltinsTable(const Heap::Reservation& reservation);
-
- // Creates reservations and initializes the builtins table in preparation for
- // lazily deserializing a single builtin.
- void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
+ // Deserializes the single given handler. This is used whenever a handler is
+ // lazily deserialized at runtime.
+ Code* DeserializeHandler(Bytecode bytecode, OperandScale operand_scale);
private:
- // TODO(jgruber): Remove once allocations have been refactored.
- void SetPositionToBuiltin(int builtin_id);
+ // Deserializes the single given builtin. Assumes that reservations have
+ // already been allocated.
+ Code* DeserializeBuiltinRaw(int builtin_id);
+
+ // Deserializes the single given bytecode handler. Assumes that reservations
+ // have already been allocated.
+ Code* DeserializeHandlerRaw(Bytecode bytecode, OperandScale operand_scale);
// Extracts the size builtin Code objects (baked into the snapshot).
- uint32_t ExtractBuiltinSize(int builtin_id);
-
- // Used after memory allocation prior to isolate initialization, to register
- // the newly created object in code space and add it to the builtins table.
- void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
- int builtin_id);
-
- // Allocation works differently here than in other deserializers. Instead of
- // a statically-known memory area determined at serialization-time, our
- // memory requirements here are determined at runtime. Another major
- // difference is that we create builtin Code objects up-front (before
- // deserialization) in order to avoid having to patch builtin references
- // later on. See also the kBuiltin case in deserializer.cc.
- //
- // Allocate simply returns the pre-allocated object prepared by
- // InitializeBuiltinsTable.
- Address Allocate(int space_index, int size) override;
+ uint32_t ExtractCodeObjectSize(int builtin_id);
// BuiltinDeserializer implements its own builtin iteration logic. Make sure
// the RootVisitor API is not used accidentally.
@@ -71,16 +60,29 @@ class BuiltinDeserializer final : public Deserializer {
UNREACHABLE();
}
- // Stores the builtin currently being deserialized. We need this to determine
- // where to 'allocate' from during deserialization.
- static const int kNoBuiltinId = -1;
- int current_builtin_id_ = kNoBuiltinId;
+ int CurrentCodeObjectId() const { return current_code_object_id_; }
+
+ // Convenience function to grab the handler off the heap's strong root list.
+ Code* GetDeserializeLazyHandler(OperandScale operand_scale) const;
+
+ private:
+ // Stores the code object currently being deserialized. The
+ // {current_code_object_id} stores the index of the currently-deserialized
+ // code object within the snapshot (and within {code_offsets_}). We need this
+ // to determine where to 'allocate' from during deserialization.
+ static const int kNoCodeObjectId = -1;
+ int current_code_object_id_ = kNoCodeObjectId;
// The offsets of each builtin within the serialized data. Equivalent to
// BuiltinSerializer::builtin_offsets_ but on the deserialization side.
- Vector<const uint32_t> builtin_offsets_;
+ Vector<const uint32_t> code_offsets_;
+
+ // For current_code_object_id_.
+ friend class DeserializingCodeObjectScope;
- friend class DeserializingBuiltinScope;
+ // For isolate(), IsLazyDeserializationEnabled(), CurrentCodeObjectId() and
+ // ExtractBuiltinSize().
+ friend class BuiltinDeserializerAllocator;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.cc b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
new file mode 100644
index 0000000000..dbb5789721
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/builtin-serializer-allocator.h"
+
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+SerializerReference BuiltinSerializerAllocator::Allocate(AllocationSpace space,
+ uint32_t size) {
+ DCHECK_EQ(space, CODE_SPACE);
+ DCHECK_GT(size, 0);
+
+ // Builtin serialization & deserialization does not use the reservation
+ // system. Instead of worrying about chunk indices and offsets, we simply
+ // need to generate unique offsets here.
+
+ const uint32_t virtual_chunk_index = 0;
+ const auto ref = SerializerReference::BackReference(
+ CODE_SPACE, virtual_chunk_index, virtual_chunk_offset_);
+
+ virtual_chunk_size_ += size;
+ virtual_chunk_offset_ += kObjectAlignment; // Needs to be aligned.
+
+ return ref;
+}
+
+#ifdef DEBUG
+bool BuiltinSerializerAllocator::BackReferenceIsAlreadyAllocated(
+ SerializerReference reference) const {
+ DCHECK(reference.is_back_reference());
+ AllocationSpace space = reference.space();
+ DCHECK_EQ(space, CODE_SPACE);
+ DCHECK_EQ(reference.chunk_index(), 0);
+ return reference.chunk_offset() < virtual_chunk_offset_;
+}
+#endif // DEBUG
+
+std::vector<SerializedData::Reservation>
+BuiltinSerializerAllocator::EncodeReservations() const {
+ return std::vector<SerializedData::Reservation>();
+}
+
+void BuiltinSerializerAllocator::OutputStatistics() {
+ DCHECK(FLAG_serialization_statistics);
+
+ PrintF(" Spaces (bytes):\n");
+
+ STATIC_ASSERT(NEW_SPACE == 0);
+ for (int space = 0; space < kNumberOfSpaces; space++) {
+ PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
+ }
+ PrintF("\n");
+
+ STATIC_ASSERT(NEW_SPACE == 0);
+ for (int space = 0; space < kNumberOfSpaces; space++) {
+ uint32_t space_size = (space == CODE_SPACE) ? virtual_chunk_size_ : 0;
+ PrintF("%16d", space_size);
+ }
+ PrintF("\n");
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.h b/deps/v8/src/snapshot/builtin-serializer-allocator.h
new file mode 100644
index 0000000000..a2c9a036e4
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-serializer-allocator.h
@@ -0,0 +1,52 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
+
+#include "src/snapshot/serializer-common.h"
+
+namespace v8 {
+namespace internal {
+
+template <class AllocatorT>
+class Serializer;
+
+class BuiltinSerializerAllocator final {
+ public:
+ BuiltinSerializerAllocator(
+ Serializer<BuiltinSerializerAllocator>* serializer) {}
+
+ SerializerReference Allocate(AllocationSpace space, uint32_t size);
+ SerializerReference AllocateMap() { UNREACHABLE(); }
+ SerializerReference AllocateLargeObject(uint32_t size) { UNREACHABLE(); }
+ SerializerReference AllocateOffHeapBackingStore() { UNREACHABLE(); }
+
+#ifdef DEBUG
+ bool BackReferenceIsAlreadyAllocated(
+ SerializerReference back_reference) const;
+#endif
+
+ std::vector<SerializedData::Reservation> EncodeReservations() const;
+
+ void OutputStatistics();
+
+ private:
+ static constexpr int kNumberOfPreallocatedSpaces =
+ SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static constexpr int kNumberOfSpaces =
+ SerializerDeserializer::kNumberOfSpaces;
+
+ // We need to track a faked offset to create back-references. The size is
+ // kept simply to display statistics.
+ uint32_t virtual_chunk_size_ = 0;
+ uint32_t virtual_chunk_offset_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(BuiltinSerializerAllocator)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/builtin-serializer.cc b/deps/v8/src/snapshot/builtin-serializer.cc
index 6e90ea18be..893c79c05e 100644
--- a/deps/v8/src/snapshot/builtin-serializer.cc
+++ b/deps/v8/src/snapshot/builtin-serializer.cc
@@ -4,12 +4,17 @@
#include "src/snapshot/builtin-serializer.h"
+#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "src/snapshot/startup-serializer.h"
namespace v8 {
namespace internal {
+using interpreter::Bytecode;
+using interpreter::Bytecodes;
+using interpreter::OperandScale;
+
BuiltinSerializer::BuiltinSerializer(Isolate* isolate,
StartupSerializer* startup_serializer)
: Serializer(isolate), startup_serializer_(startup_serializer) {}
@@ -18,17 +23,45 @@ BuiltinSerializer::~BuiltinSerializer() {
OutputStatistics("BuiltinSerializer");
}
-void BuiltinSerializer::SerializeBuiltins() {
- for (int i = 0; i < Builtins::builtin_count; i++) {
- builtin_offsets_[i] = sink_.Position();
+void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
+ // Serialize builtins.
+
+ STATIC_ASSERT(0 == BSU::kFirstBuiltinIndex);
+
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ SetBuiltinOffset(i, sink_.Position());
SerializeBuiltin(isolate()->builtins()->builtin(i));
}
- Pad(); // Pad with kNop since GetInt() might read too far.
+
+ // Serialize bytecode handlers.
+
+ STATIC_ASSERT(BSU::kNumberOfBuiltins == BSU::kFirstHandlerIndex);
+
+ BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
+ SetHandlerOffset(bytecode, operand_scale, sink_.Position());
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+
+ SerializeHandler(
+ isolate()->interpreter()->GetBytecodeHandler(bytecode, operand_scale));
+ });
+
+ STATIC_ASSERT(BSU::kFirstHandlerIndex + BSU::kNumberOfHandlers ==
+ BSU::kNumberOfCodeObjects);
+
+ // The DeserializeLazy handlers are serialized by the StartupSerializer
+ // during strong root iteration.
+
+ DCHECK(isolate()->heap()->deserialize_lazy_handler()->IsCode());
+ DCHECK(isolate()->heap()->deserialize_lazy_handler_wide()->IsCode());
+ DCHECK(isolate()->heap()->deserialize_lazy_handler_extra_wide()->IsCode());
+
+ // Pad with kNop since GetInt() might read too far.
+ Pad();
// Append the offset table. During deserialization, the offset table is
// extracted by BuiltinSnapshotData.
- const byte* data = reinterpret_cast<const byte*>(&builtin_offsets_[0]);
- int data_length = static_cast<int>(sizeof(builtin_offsets_));
+ const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
+ int data_length = static_cast<int>(sizeof(code_offsets_));
sink_.PutRaw(data, data_length, "BuiltinOffsets");
}
@@ -50,6 +83,13 @@ void BuiltinSerializer::SerializeBuiltin(Code* code) {
object_serializer.Serialize();
}
+void BuiltinSerializer::SerializeHandler(Code* code) {
+ DCHECK(ObjectIsBytecodeHandler(code));
+ ObjectSerializer object_serializer(this, code, &sink_, kPlain,
+ kStartOfObject);
+ object_serializer.Serialize();
+}
+
void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!o->IsSmi());
@@ -86,5 +126,19 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
sink_.PutInt(cache_index, "partial_snapshot_cache_index");
}
+void BuiltinSerializer::SetBuiltinOffset(int builtin_id, uint32_t offset) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK(BSU::IsBuiltinIndex(builtin_id));
+ code_offsets_[builtin_id] = offset;
+}
+
+void BuiltinSerializer::SetHandlerOffset(Bytecode bytecode,
+ OperandScale operand_scale,
+ uint32_t offset) {
+ const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
+ DCHECK(BSU::IsHandlerIndex(index));
+ code_offsets_[index] = offset;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-serializer.h b/deps/v8/src/snapshot/builtin-serializer.h
index 85c59f84c0..bb8bbdebfa 100644
--- a/deps/v8/src/snapshot/builtin-serializer.h
+++ b/deps/v8/src/snapshot/builtin-serializer.h
@@ -5,6 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
+#include "src/interpreter/interpreter.h"
+#include "src/snapshot/builtin-serializer-allocator.h"
+#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/serializer.h"
namespace v8 {
@@ -12,31 +15,45 @@ namespace internal {
class StartupSerializer;
-// Responsible for serializing all builtin objects during startup snapshot
-// creation. Builtins are serialized into a dedicated area of the snapshot.
+// Responsible for serializing builtin and bytecode handler objects during
+// startup snapshot creation into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
-class BuiltinSerializer : public Serializer<> {
+class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
+ using BSU = BuiltinSnapshotUtils;
+
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;
- void SerializeBuiltins();
+ void SerializeBuiltinsAndHandlers();
private:
void VisitRootPointers(Root root, Object** start, Object** end) override;
void SerializeBuiltin(Code* code);
+ void SerializeHandler(Code* code);
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
+ void SetBuiltinOffset(int builtin_id, uint32_t offset);
+ void SetHandlerOffset(interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale,
+ uint32_t offset);
+
// The startup serializer is needed for access to the partial snapshot cache,
// which is used to serialize things like embedded constants.
StartupSerializer* startup_serializer_;
- // Stores the starting offset, within the serialized data, of each builtin.
- // This is later packed into the builtin snapshot, and used by the builtin
- // deserializer to deserialize individual builtins.
- uint32_t builtin_offsets_[Builtins::builtin_count];
+ // Stores the starting offset, within the serialized data, of each code
+ // object. This is later packed into the builtin snapshot, and used by the
+ // builtin deserializer to deserialize individual builtins and bytecode
+ // handlers.
+ //
+ // Indices [kFirstBuiltinIndex, kFirstBuiltinIndex + kNumberOfBuiltins[:
+ // Builtin offsets.
+ // Indices [kFirstHandlerIndex, kFirstHandlerIndex + kNumberOfHandlers[:
+ // Bytecode handler offsets.
+ uint32_t code_offsets_[BuiltinSnapshotUtils::kNumberOfCodeObjects];
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
};
diff --git a/deps/v8/src/snapshot/builtin-snapshot-utils.cc b/deps/v8/src/snapshot/builtin-snapshot-utils.cc
new file mode 100644
index 0000000000..e32a857c0b
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-snapshot-utils.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/builtin-snapshot-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+bool BuiltinSnapshotUtils::IsBuiltinIndex(int maybe_index) {
+ return (kFirstBuiltinIndex <= maybe_index &&
+ maybe_index < kFirstBuiltinIndex + kNumberOfBuiltins);
+}
+
+// static
+bool BuiltinSnapshotUtils::IsHandlerIndex(int maybe_index) {
+ return (kFirstHandlerIndex <= maybe_index &&
+ maybe_index < kFirstHandlerIndex + kNumberOfHandlers);
+}
+
+// static
+int BuiltinSnapshotUtils::BytecodeToIndex(Bytecode bytecode,
+ OperandScale operand_scale) {
+ int index =
+ BuiltinSnapshotUtils::kNumberOfBuiltins + static_cast<int>(bytecode);
+ switch (operand_scale) { // clang-format off
+ case OperandScale::kSingle: return index;
+ case OperandScale::kDouble: return index + Bytecodes::kBytecodeCount;
+ case OperandScale::kQuadruple: return index + 2 * Bytecodes::kBytecodeCount;
+ } // clang-format on
+ UNREACHABLE();
+}
+
+// static
+std::pair<interpreter::Bytecode, interpreter::OperandScale>
+BuiltinSnapshotUtils::BytecodeFromIndex(int index) {
+ DCHECK(IsHandlerIndex(index));
+
+ const int x = index - BuiltinSnapshotUtils::kNumberOfBuiltins;
+ Bytecode bytecode = Bytecodes::FromByte(x % Bytecodes::kBytecodeCount);
+ switch (x / Bytecodes::kBytecodeCount) { // clang-format off
+ case 0: return {bytecode, OperandScale::kSingle};
+ case 1: return {bytecode, OperandScale::kDouble};
+ case 2: return {bytecode, OperandScale::kQuadruple};
+ default: UNREACHABLE();
+ } // clang-format on
+}
+
+// static
+void BuiltinSnapshotUtils::ForEachBytecode(
+ std::function<void(Bytecode, OperandScale)> f) {
+ static const OperandScale kOperandScales[] = {
+#define VALUE(Name, _) OperandScale::k##Name,
+ OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+ };
+
+ for (OperandScale operand_scale : kOperandScales) {
+ for (int i = 0; i < Bytecodes::kBytecodeCount; i++) {
+ f(Bytecodes::FromByte(i), operand_scale);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-snapshot-utils.h b/deps/v8/src/snapshot/builtin-snapshot-utils.h
new file mode 100644
index 0000000000..587b4a35b0
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-snapshot-utils.h
@@ -0,0 +1,56 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
+#define V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
+
+#include <functional>
+
+#include "src/interpreter/interpreter.h"
+
+namespace v8 {
+namespace internal {
+
+// Constants and utility methods used by builtin and bytecode handler
+// (de)serialization.
+class BuiltinSnapshotUtils : public AllStatic {
+ using Bytecode = interpreter::Bytecode;
+ using BytecodeOperands = interpreter::BytecodeOperands;
+ using Bytecodes = interpreter::Bytecodes;
+ using Interpreter = interpreter::Interpreter;
+ using OperandScale = interpreter::OperandScale;
+
+ public:
+ static const int kFirstBuiltinIndex = 0;
+ static const int kNumberOfBuiltins = Builtins::builtin_count;
+
+ static const int kFirstHandlerIndex = kFirstBuiltinIndex + kNumberOfBuiltins;
+ static const int kNumberOfHandlers =
+ Bytecodes::kBytecodeCount * BytecodeOperands::kOperandScaleCount;
+
+ // The number of code objects in the builtin snapshot.
+ // TODO(jgruber): This could be reduced by a bit since not every
+ // {bytecode, operand_scale} combination has an associated handler
+ // (see Bytecodes::BytecodeHasHandler).
+ static const int kNumberOfCodeObjects = kNumberOfBuiltins + kNumberOfHandlers;
+
+ // Indexes into the offsets vector contained in snapshot.
+ // See e.g. BuiltinSerializer::code_offsets_.
+ static bool IsBuiltinIndex(int maybe_index);
+ static bool IsHandlerIndex(int maybe_index);
+ static int BytecodeToIndex(Bytecode bytecode, OperandScale operand_scale);
+
+ // Converts an index back into the {bytecode,operand_scale} tuple. This is the
+ // inverse operation of BytecodeToIndex().
+ static std::pair<Bytecode, OperandScale> BytecodeFromIndex(int index);
+
+ // Iteration over all {bytecode,operand_scale} pairs. Implemented here since
+ // (de)serialization depends on the iteration order.
+ static void ForEachBytecode(std::function<void(Bytecode, OperandScale)> f);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 29e1e783e4..3350ef3c0f 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -13,6 +13,7 @@
#include "src/objects-inl.h"
#include "src/snapshot/object-deserializer.h"
#include "src/snapshot/snapshot.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/version.h"
#include "src/visitors.h"
#include "src/wasm/wasm-module.h"
@@ -123,8 +124,8 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
- // There should be no hash table embedded. They would require rehashing.
- CHECK(!obj->IsHashTable());
+ // Embedded FixedArrays that need rehashing must support rehashing.
+ CHECK_IMPLIES(obj->NeedsRehashing(), obj->CanBeRehashed());
// We expect no instantiated function objects or contexts.
CHECK(!obj->IsJSFunction() && !obj->IsContext());
@@ -242,6 +243,8 @@ MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
return nothing;
}
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
MaybeHandle<WasmCompiledModule> maybe_result =
ObjectDeserializer::DeserializeWasmCompiledModule(isolate, &scd,
wire_bytes);
@@ -260,6 +263,8 @@ void WasmCompiledModuleSerializer::SerializeCodeObject(
switch (kind) {
case Code::WASM_FUNCTION:
case Code::JS_TO_WASM_FUNCTION: {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate()->heap());
// Because the trap handler index is not meaningful across copies and
// serializations, we need to serialize it as kInvalidIndex. We do this by
// saving the old value, setting the index to kInvalidIndex and then
@@ -276,6 +281,7 @@ void WasmCompiledModuleSerializer::SerializeCodeObject(
}
case Code::WASM_INTERPRETER_ENTRY:
case Code::WASM_TO_JS_FUNCTION:
+ case Code::WASM_TO_WASM_FUNCTION:
// Serialize the illegal builtin instead. On instantiation of a
// deserialized module, these will be replaced again.
SerializeBuiltinReference(*BUILTIN_CODE(isolate(), Illegal), how_to_code,
@@ -422,7 +428,7 @@ ScriptData* SerializedCodeData::GetScriptData() {
ScriptData* result = new ScriptData(data_, size_);
result->AcquireDataOwnership();
owns_data_ = false;
- data_ = NULL;
+ data_ = nullptr;
return result;
}
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.cc b/deps/v8/src/snapshot/default-deserializer-allocator.cc
new file mode 100644
index 0000000000..b352409f7e
--- /dev/null
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.cc
@@ -0,0 +1,246 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/default-deserializer-allocator.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/snapshot/builtin-deserializer.h"
+#include "src/snapshot/deserializer.h"
+#include "src/snapshot/startup-deserializer.h"
+
+namespace v8 {
+namespace internal {
+
+DefaultDeserializerAllocator::DefaultDeserializerAllocator(
+ Deserializer<DefaultDeserializerAllocator>* deserializer)
+ : deserializer_(deserializer) {}
+
+// We know the space requirements before deserialization and can
+// pre-allocate that reserved space. During deserialization, all we need
+// to do is to bump up the pointer for each space in the reserved
+// space. This is also used for fixing back references.
+// We may have to split up the pre-allocation into several chunks
+// because it would not fit onto a single page. We do not have to keep
+// track of when to move to the next chunk. An opcode will signal this.
+// Since multiple large objects cannot be folded into one large object
+// space allocation, we have to do an actual allocation when deserializing
+// each large object. Instead of tracking offset for back references, we
+// reference large objects by index.
+Address DefaultDeserializerAllocator::AllocateRaw(AllocationSpace space,
+ int size) {
+ if (space == LO_SPACE) {
+ AlwaysAllocateScope scope(isolate());
+ LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
+ // TODO(jgruber): May be cleaner to pass in executability as an argument.
+ Executability exec =
+ static_cast<Executability>(deserializer_->source()->Get());
+ AllocationResult result = lo_space->AllocateRaw(size, exec);
+ HeapObject* obj = result.ToObjectChecked();
+ deserialized_large_objects_.push_back(obj);
+ return obj->address();
+ } else if (space == MAP_SPACE) {
+ DCHECK_EQ(Map::kSize, size);
+ return allocated_maps_[next_map_index_++];
+ } else {
+ DCHECK_LT(space, kNumberOfPreallocatedSpaces);
+ Address address = high_water_[space];
+ DCHECK_NOT_NULL(address);
+ high_water_[space] += size;
+#ifdef DEBUG
+ // Assert that the current reserved chunk is still big enough.
+ const Heap::Reservation& reservation = reservations_[space];
+ int chunk_index = current_chunk_[space];
+ DCHECK_LE(high_water_[space], reservation[chunk_index].end);
+#endif
+ if (space == CODE_SPACE) SkipList::Update(address, size);
+ return address;
+ }
+}
+
+Address DefaultDeserializerAllocator::Allocate(AllocationSpace space,
+ int size) {
+ Address address;
+ HeapObject* obj;
+
+ if (next_alignment_ != kWordAligned) {
+ const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
+ address = AllocateRaw(space, reserved);
+ obj = HeapObject::FromAddress(address);
+ // If one of the following assertions fails, then we are deserializing an
+ // aligned object when the filler maps have not been deserialized yet.
+ // We require filler maps as padding to align the object.
+ Heap* heap = isolate()->heap();
+ DCHECK(heap->free_space_map()->IsMap());
+ DCHECK(heap->one_pointer_filler_map()->IsMap());
+ DCHECK(heap->two_pointer_filler_map()->IsMap());
+ obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
+ address = obj->address();
+ next_alignment_ = kWordAligned;
+ return address;
+ } else {
+ return AllocateRaw(space, size);
+ }
+}
+
+void DefaultDeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
+ DCHECK_LT(space, kNumberOfPreallocatedSpaces);
+ uint32_t chunk_index = current_chunk_[space];
+ const Heap::Reservation& reservation = reservations_[space];
+ // Make sure the current chunk is indeed exhausted.
+ CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
+ // Move to next reserved chunk.
+ chunk_index = ++current_chunk_[space];
+ CHECK_LT(chunk_index, reservation.size());
+ high_water_[space] = reservation[chunk_index].start;
+}
+
+HeapObject* DefaultDeserializerAllocator::GetMap(uint32_t index) {
+ DCHECK_LT(index, next_map_index_);
+ return HeapObject::FromAddress(allocated_maps_[index]);
+}
+
+HeapObject* DefaultDeserializerAllocator::GetLargeObject(uint32_t index) {
+ DCHECK_LT(index, deserialized_large_objects_.size());
+ return deserialized_large_objects_[index];
+}
+
+HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
+ uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ DCHECK_LT(space, kNumberOfPreallocatedSpaces);
+ DCHECK_LE(chunk_index, current_chunk_[space]);
+ Address address = reservations_[space][chunk_index].start + chunk_offset;
+ if (next_alignment_ != kWordAligned) {
+ int padding = Heap::GetFillToAlign(address, next_alignment_);
+ next_alignment_ = kWordAligned;
+ DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
+ address += padding;
+ }
+ return HeapObject::FromAddress(address);
+}
+
+void DefaultDeserializerAllocator::DecodeReservation(
+ Vector<const SerializedData::Reservation> res) {
+ DCHECK_EQ(0, reservations_[NEW_SPACE].size());
+ STATIC_ASSERT(NEW_SPACE == 0);
+ int current_space = NEW_SPACE;
+ for (auto& r : res) {
+ reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
+ if (r.is_last()) current_space++;
+ }
+ DCHECK_EQ(kNumberOfSpaces, current_space);
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
+}
+
+bool DefaultDeserializerAllocator::ReserveSpace() {
+#ifdef DEBUG
+ for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
+ DCHECK_GT(reservations_[i].size(), 0);
+ }
+#endif // DEBUG
+ DCHECK(allocated_maps_.empty());
+ if (!isolate()->heap()->ReserveSpace(reservations_, &allocated_maps_)) {
+ return false;
+ }
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ high_water_[i] = reservations_[i][0].start;
+ }
+ return true;
+}
+
+// static
+bool DefaultDeserializerAllocator::ReserveSpace(
+ StartupDeserializer* startup_deserializer,
+ BuiltinDeserializer* builtin_deserializer) {
+ const int first_space = NEW_SPACE;
+ const int last_space = SerializerDeserializer::kNumberOfSpaces;
+ Isolate* isolate = startup_deserializer->isolate();
+
+ // Create a set of merged reservations to reserve space in one go.
+ // The BuiltinDeserializer's reservations are ignored, since our actual
+ // requirements vary based on whether lazy deserialization is enabled.
+ // Instead, we manually determine the required code-space.
+
+ Heap::Reservation merged_reservations[kNumberOfSpaces];
+ for (int i = first_space; i < last_space; i++) {
+ merged_reservations[i] =
+ startup_deserializer->allocator()->reservations_[i];
+ }
+
+ Heap::Reservation builtin_reservations =
+ builtin_deserializer->allocator()
+ ->CreateReservationsForEagerBuiltinsAndHandlers();
+ DCHECK(!builtin_reservations.empty());
+
+ for (const auto& c : builtin_reservations) {
+ merged_reservations[CODE_SPACE].push_back(c);
+ }
+
+ if (!isolate->heap()->ReserveSpace(
+ merged_reservations,
+ &startup_deserializer->allocator()->allocated_maps_)) {
+ return false;
+ }
+
+ DisallowHeapAllocation no_allocation;
+
+ // Distribute the successful allocations between both deserializers.
+ // There's nothing to be done here except for code space.
+
+ {
+ const int num_builtin_reservations =
+ static_cast<int>(builtin_reservations.size());
+ for (int i = num_builtin_reservations - 1; i >= 0; i--) {
+ const auto& c = merged_reservations[CODE_SPACE].back();
+ DCHECK_EQ(c.size, builtin_reservations[i].size);
+ DCHECK_EQ(c.size, c.end - c.start);
+ builtin_reservations[i].start = c.start;
+ builtin_reservations[i].end = c.end;
+ merged_reservations[CODE_SPACE].pop_back();
+ }
+
+ builtin_deserializer->allocator()->InitializeFromReservations(
+ builtin_reservations);
+ }
+
+ // Write back startup reservations.
+
+ for (int i = first_space; i < last_space; i++) {
+ startup_deserializer->allocator()->reservations_[i].swap(
+ merged_reservations[i]);
+ }
+
+ for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
+ startup_deserializer->allocator()->high_water_[i] =
+ startup_deserializer->allocator()->reservations_[i][0].start;
+ }
+
+ return true;
+}
+
+bool DefaultDeserializerAllocator::ReservationsAreFullyUsed() const {
+ for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
+ const uint32_t chunk_index = current_chunk_[space];
+ if (reservations_[space].size() != chunk_index + 1) {
+ return false;
+ }
+ if (reservations_[space][chunk_index].end != high_water_[space]) {
+ return false;
+ }
+ }
+ return (allocated_maps_.size() == next_map_index_);
+}
+
+void DefaultDeserializerAllocator::
+ RegisterDeserializedObjectsForBlackAllocation() {
+ isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
+ reservations_, deserialized_large_objects_, allocated_maps_);
+}
+
+Isolate* DefaultDeserializerAllocator::isolate() const {
+ return deserializer_->isolate();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.h b/deps/v8/src/snapshot/default-deserializer-allocator.h
new file mode 100644
index 0000000000..08d9f48cec
--- /dev/null
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.h
@@ -0,0 +1,102 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
+
+#include "src/globals.h"
+#include "src/heap/heap.h"
+#include "src/snapshot/serializer-common.h"
+
+namespace v8 {
+namespace internal {
+
+template <class AllocatorT>
+class Deserializer;
+
+class BuiltinDeserializer;
+class StartupDeserializer;
+
+class DefaultDeserializerAllocator final {
+ public:
+ DefaultDeserializerAllocator(
+ Deserializer<DefaultDeserializerAllocator>* deserializer);
+
+ // ------- Allocation Methods -------
+ // Methods related to memory allocation during deserialization.
+
+ Address Allocate(AllocationSpace space, int size);
+
+ void MoveToNextChunk(AllocationSpace space);
+ void SetAlignment(AllocationAlignment alignment) {
+ DCHECK_EQ(kWordAligned, next_alignment_);
+ DCHECK_LE(kWordAligned, alignment);
+ DCHECK_LE(alignment, kDoubleUnaligned);
+ next_alignment_ = static_cast<AllocationAlignment>(alignment);
+ }
+
+ HeapObject* GetMap(uint32_t index);
+ HeapObject* GetLargeObject(uint32_t index);
+ HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
+ uint32_t chunk_offset);
+
+ // ------- Reservation Methods -------
+ // Methods related to memory reservations (prior to deserialization).
+
+ void DecodeReservation(Vector<const SerializedData::Reservation> res);
+ bool ReserveSpace();
+
+ // Atomically reserves space for the two given deserializers. Guarantees
+ // reservation for both without garbage collection in-between.
+ static bool ReserveSpace(StartupDeserializer* startup_deserializer,
+ BuiltinDeserializer* builtin_deserializer);
+
+ bool ReservationsAreFullyUsed() const;
+
+ // ------- Misc Utility Methods -------
+
+ void RegisterDeserializedObjectsForBlackAllocation();
+
+ private:
+ Isolate* isolate() const;
+
+ // Raw allocation without considering alignment.
+ Address AllocateRaw(AllocationSpace space, int size);
+
+ private:
+ static constexpr int kNumberOfPreallocatedSpaces =
+ SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static constexpr int kNumberOfSpaces =
+ SerializerDeserializer::kNumberOfSpaces;
+
+ // The address of the next object that will be allocated in each space.
+ // Each space has a number of chunks reserved by the GC, with each chunk
+ // fitting into a page. Deserialized objects are allocated into the
+ // current chunk of the target space by bumping up high water mark.
+ Heap::Reservation reservations_[kNumberOfSpaces];
+ uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
+ Address high_water_[kNumberOfPreallocatedSpaces];
+
+ // The alignment of the next allocation.
+ AllocationAlignment next_alignment_ = kWordAligned;
+
+ // All required maps are pre-allocated during reservation. {next_map_index_}
+ // stores the index of the next map to return from allocation.
+ uint32_t next_map_index_ = 0;
+ std::vector<Address> allocated_maps_;
+
+ // Allocated large objects are kept in this map and may be fetched later as
+ // back-references.
+ std::vector<HeapObject*> deserialized_large_objects_;
+
+ // The current deserializer.
+ Deserializer<DefaultDeserializerAllocator>* const deserializer_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultDeserializerAllocator)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.cc b/deps/v8/src/snapshot/default-serializer-allocator.cc
index 1dfa21ad2b..b8cc55ff2b 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-serializer-allocator.cc
@@ -80,13 +80,6 @@ bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
}
}
}
-
-bool DefaultSerializerAllocator::HasNotExceededFirstPageOfEachSpace() const {
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- if (!completed_chunks_[i].empty()) return false;
- }
- return true;
-}
#endif
std::vector<SerializedData::Reservation>
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.h b/deps/v8/src/snapshot/default-serializer-allocator.h
index 7bd247aaf1..b01532752a 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.h
+++ b/deps/v8/src/snapshot/default-serializer-allocator.h
@@ -26,7 +26,6 @@ class DefaultSerializerAllocator final {
#ifdef DEBUG
bool BackReferenceIsAlreadyAllocated(
SerializerReference back_reference) const;
- bool HasNotExceededFirstPageOfEachSpace() const;
#endif
std::vector<SerializedData::Reservation> EncodeReservations() const;
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 1eb15d6c38..5d7d551c98 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -4,134 +4,18 @@
#include "src/snapshot/deserializer.h"
-#include "src/api.h"
#include "src/assembler-inl.h"
-#include "src/bootstrapper.h"
-#include "src/deoptimizer.h"
-#include "src/external-reference-table.h"
-#include "src/heap/heap-inl.h"
#include "src/isolate.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/snapshot/builtin-deserializer.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/string.h"
+#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/natives.h"
-#include "src/snapshot/startup-deserializer.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
-void Deserializer::DecodeReservation(
- Vector<const SerializedData::Reservation> res) {
- DCHECK_EQ(0, reservations_[NEW_SPACE].size());
- STATIC_ASSERT(NEW_SPACE == 0);
- int current_space = NEW_SPACE;
- for (auto& r : res) {
- reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
- if (r.is_last()) current_space++;
- }
- DCHECK_EQ(kNumberOfSpaces, current_space);
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
-}
-
-void Deserializer::RegisterDeserializedObjectsForBlackAllocation() {
- isolate_->heap()->RegisterDeserializedObjectsForBlackAllocation(
- reservations_, deserialized_large_objects_, allocated_maps_);
-}
-
-bool Deserializer::ReserveSpace() {
-#ifdef DEBUG
- for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
- DCHECK(reservations_[i].size() > 0);
- }
-#endif // DEBUG
- DCHECK(allocated_maps_.empty());
- if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_))
- return false;
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- high_water_[i] = reservations_[i][0].start;
- }
- return true;
-}
-
-// static
-bool Deserializer::ReserveSpace(StartupDeserializer* startup_deserializer,
- BuiltinDeserializer* builtin_deserializer) {
- const int first_space = NEW_SPACE;
- const int last_space = SerializerDeserializer::kNumberOfSpaces;
- Isolate* isolate = startup_deserializer->isolate();
-
- // Create a set of merged reservations to reserve space in one go.
- // The BuiltinDeserializer's reservations are ignored, since our actual
- // requirements vary based on whether lazy deserialization is enabled.
- // Instead, we manually determine the required code-space.
-
- DCHECK(builtin_deserializer->ReservesOnlyCodeSpace());
- Heap::Reservation merged_reservations[kNumberOfSpaces];
- for (int i = first_space; i < last_space; i++) {
- merged_reservations[i] = startup_deserializer->reservations_[i];
- }
-
- Heap::Reservation builtin_reservations =
- builtin_deserializer->CreateReservationsForEagerBuiltins();
- DCHECK(!builtin_reservations.empty());
-
- for (const auto& c : builtin_reservations) {
- merged_reservations[CODE_SPACE].push_back(c);
- }
-
- if (!isolate->heap()->ReserveSpace(merged_reservations,
- &startup_deserializer->allocated_maps_)) {
- return false;
- }
-
- DisallowHeapAllocation no_allocation;
-
- // Distribute the successful allocations between both deserializers.
- // There's nothing to be done here except for code space.
-
- {
- const int num_builtin_reservations =
- static_cast<int>(builtin_reservations.size());
- for (int i = num_builtin_reservations - 1; i >= 0; i--) {
- const auto& c = merged_reservations[CODE_SPACE].back();
- DCHECK_EQ(c.size, builtin_reservations[i].size);
- DCHECK_EQ(c.size, c.end - c.start);
- builtin_reservations[i].start = c.start;
- builtin_reservations[i].end = c.end;
- merged_reservations[CODE_SPACE].pop_back();
- }
-
- builtin_deserializer->InitializeBuiltinsTable(builtin_reservations);
- }
-
- // Write back startup reservations.
-
- for (int i = first_space; i < last_space; i++) {
- startup_deserializer->reservations_[i].swap(merged_reservations[i]);
- }
-
- for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
- startup_deserializer->high_water_[i] =
- startup_deserializer->reservations_[i][0].start;
- builtin_deserializer->high_water_[i] = nullptr;
- }
-
- return true;
-}
-
-bool Deserializer::ReservesOnlyCodeSpace() const {
- for (int space = NEW_SPACE; space < kNumberOfSpaces; space++) {
- if (space == CODE_SPACE) continue;
- const auto& r = reservations_[space];
- for (const Heap::Chunk& c : r)
- if (c.size != 0) return false;
- }
- return true;
-}
-
-void Deserializer::Initialize(Isolate* isolate) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
@@ -150,60 +34,65 @@ void Deserializer::Initialize(Isolate* isolate) {
SerializedData::ComputeMagicNumber(external_reference_table_));
}
-void Deserializer::SortMapDescriptors() {
- for (const auto& address : allocated_maps_) {
- Map* map = Map::cast(HeapObject::FromAddress(address));
- if (map->instance_descriptors()->number_of_descriptors() > 1) {
- map->instance_descriptors()->Sort();
- }
- }
+template <class AllocatorT>
+bool Deserializer<AllocatorT>::IsLazyDeserializationEnabled() const {
+ return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
}
-bool Deserializer::IsLazyDeserializationEnabled() const {
- return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Rehash() {
+ DCHECK(can_rehash() || deserializing_user_code());
+ for (const auto& item : to_rehash_) item->RehashBasedOnMap();
}
-Deserializer::~Deserializer() {
+template <class AllocatorT>
+Deserializer<AllocatorT>::~Deserializer() {
#ifdef DEBUG
// Do not perform checks if we aborted deserialization.
if (source_.position() == 0) return;
// Check that we only have padding bytes remaining.
while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- int chunk_index = current_chunk_[space];
- DCHECK_EQ(reservations_[space].size(), chunk_index + 1);
- DCHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
- }
- DCHECK_EQ(allocated_maps_.size(), next_map_index_);
+ // Check that we've fully used all reserved space.
+ DCHECK(allocator()->ReservationsAreFullyUsed());
#endif // DEBUG
}
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
-void Deserializer::VisitRootPointers(Root root, Object** start, Object** end) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ // Builtins and bytecode handlers are deserialized in a separate pass by the
+ // BuiltinDeserializer.
+ if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
+
// The space must be new space. Any other space would cause ReadChunk to try
- // to update the remembered using NULL as the address.
- ReadData(start, end, NEW_SPACE, NULL);
+ // to update the remembered using nullptr as the address.
+ ReadData(start, end, NEW_SPACE, nullptr);
}
-void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Synchronize(
+ VisitorSynchronization::SyncTag tag) {
static const byte expected = kSynchronize;
CHECK_EQ(expected, source_.Get());
- deserializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
}
-void Deserializer::DeserializeDeferredObjects() {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
switch (code) {
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2:
- SetAlignment(code);
+ case kAlignmentPrefix + 2: {
+ int alignment = code - (SerializerDeserializer::kAlignmentPrefix - 1);
+ allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
break;
+ }
default: {
int space = code & kSpaceMask;
- DCHECK(space <= kNumberOfSpaces);
- DCHECK(code - space == kNewObject);
+ DCHECK_LE(space, kNumberOfSpaces);
+ DCHECK_EQ(code - space, kNewObject);
HeapObject* object = GetBackReferencedObject(space);
int size = source_.GetInt() << kPointerSizeLog2;
Address obj_address = object->address();
@@ -241,24 +130,33 @@ uint32_t StringTableInsertionKey::ComputeHashField(String* string) {
return string->hash_field();
}
-HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
- if (deserializing_user_code()) {
+template <class AllocatorT>
+HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
+ int space) {
+ if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj->IsString()) {
+ // Uninitialize hash field as we need to recompute the hash.
String* string = String::cast(obj);
- // Uninitialize hash field as the hash seed may have changed.
string->set_hash_field(String::kEmptyHashField);
+ } else if (obj->NeedsRehashing()) {
+ to_rehash_.push_back(obj);
+ }
+ }
+
+ if (deserializing_user_code()) {
+ if (obj->IsString()) {
+ String* string = String::cast(obj);
if (string->IsInternalizedString()) {
// Canonicalize the internalized string. If it already exists in the
// string table, set it to forward to the existing one.
StringTableInsertionKey key(string);
- String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
- if (canonical == NULL) {
- new_internalized_strings_.push_back(handle(string));
- return string;
- } else {
- string->SetForwardedInternalizedString(canonical);
- return canonical;
- }
+ String* canonical =
+ StringTable::ForwardStringIfExists(isolate_, &key, string);
+
+ if (canonical != nullptr) return canonical;
+
+ new_internalized_strings_.push_back(handle(string));
+ return string;
}
} else if (obj->IsScript()) {
new_scripts_.push_back(handle(Script::cast(obj)));
@@ -290,6 +188,10 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (isolate_->external_reference_redirector()) {
accessor_infos_.push_back(AccessorInfo::cast(obj));
}
+ } else if (obj->IsCallHandlerInfo()) {
+ if (isolate_->external_reference_redirector()) {
+ call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
+ }
} else if (obj->IsExternalOneByteString()) {
DCHECK(obj->map() == isolate_->heap()->native_source_string_map());
ExternalOneByteString* string = ExternalOneByteString::cast(obj);
@@ -298,6 +200,21 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
NativesExternalStringResource::DecodeForDeserialization(
string->resource()));
isolate_->heap()->RegisterExternalString(string);
+ } else if (obj->IsJSTypedArray()) {
+ JSTypedArray* typed_array = JSTypedArray::cast(obj);
+ CHECK(typed_array->byte_offset()->IsSmi());
+ int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
+ if (byte_offset > 0) {
+ FixedTypedArrayBase* elements =
+ FixedTypedArrayBase::cast(typed_array->elements());
+ // Must be off-heap layout.
+ DCHECK_NULL(elements->base_pointer());
+
+ void* pointer_with_offset = reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(elements->external_pointer()) +
+ byte_offset);
+ elements->set_external_pointer(pointer_with_offset);
+ }
} else if (obj->IsJSArrayBuffer()) {
JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
// Only fixup for the off-heap case.
@@ -315,61 +232,46 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (fta->base_pointer() == nullptr) {
Smi* store_index = reinterpret_cast<Smi*>(fta->external_pointer());
void* backing_store = off_heap_backing_stores_[store_index->value()];
-
fta->set_external_pointer(backing_store);
}
}
- if (FLAG_rehash_snapshot && can_rehash_ && !deserializing_user_code()) {
- if (obj->IsString()) {
- // Uninitialize hash field as we are going to reinitialize the hash seed.
- String* string = String::cast(obj);
- string->set_hash_field(String::kEmptyHashField);
- } else if (obj->IsTransitionArray() &&
- TransitionArray::cast(obj)->number_of_entries() > 1) {
- transition_arrays_.push_back(TransitionArray::cast(obj));
- }
- }
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
return obj;
}
-int Deserializer::MaybeReplaceWithDeserializeLazy(int builtin_id) {
+template <class AllocatorT>
+int Deserializer<AllocatorT>::MaybeReplaceWithDeserializeLazy(int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
- return (IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id) &&
- !deserializing_builtins_)
+ return IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id)
? Builtins::kDeserializeLazy
: builtin_id;
}
-HeapObject* Deserializer::GetBackReferencedObject(int space) {
+template <class AllocatorT>
+HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
HeapObject* obj;
SerializerReference back_reference =
SerializerReference::FromBitfield(source_.GetInt());
- if (space == LO_SPACE) {
- uint32_t index = back_reference.large_object_index();
- obj = deserialized_large_objects_[index];
- } else if (space == MAP_SPACE) {
- int index = back_reference.map_index();
- DCHECK(index < next_map_index_);
- obj = HeapObject::FromAddress(allocated_maps_[index]);
- } else {
- DCHECK(space < kNumberOfPreallocatedSpaces);
- uint32_t chunk_index = back_reference.chunk_index();
- DCHECK_LE(chunk_index, current_chunk_[space]);
- uint32_t chunk_offset = back_reference.chunk_offset();
- Address address = reservations_[space][chunk_index].start + chunk_offset;
- if (next_alignment_ != kWordAligned) {
- int padding = Heap::GetFillToAlign(address, next_alignment_);
- next_alignment_ = kWordAligned;
- DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
- address += padding;
- }
- obj = HeapObject::FromAddress(address);
+
+ switch (space) {
+ case LO_SPACE:
+ obj = allocator()->GetLargeObject(back_reference.large_object_index());
+ break;
+ case MAP_SPACE:
+ obj = allocator()->GetMap(back_reference.map_index());
+ break;
+ default:
+ obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
+ back_reference.chunk_index(),
+ back_reference.chunk_offset());
+ break;
}
- if (deserializing_user_code() && obj->IsInternalizedString()) {
- obj = String::cast(obj)->GetForwardedInternalizedString();
+
+ if (deserializing_user_code() && obj->IsThinString()) {
+ obj = ThinString::cast(obj)->actual();
}
+
hot_objects_.Add(obj);
return obj;
}
@@ -379,29 +281,14 @@ HeapObject* Deserializer::GetBackReferencedObject(int space) {
// The reason for this strange interface is that otherwise the object is
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
-void Deserializer::ReadObject(int space_number, Object** write_back) {
- Address address;
- HeapObject* obj;
- int size = source_.GetInt() << kObjectAlignmentBits;
-
- if (next_alignment_ != kWordAligned) {
- int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
- address = Allocate(space_number, reserved);
- obj = HeapObject::FromAddress(address);
- // If one of the following assertions fails, then we are deserializing an
- // aligned object when the filler maps have not been deserialized yet.
- // We require filler maps as padding to align the object.
- Heap* heap = isolate_->heap();
- DCHECK(heap->free_space_map()->IsMap());
- DCHECK(heap->one_pointer_filler_map()->IsMap());
- DCHECK(heap->two_pointer_filler_map()->IsMap());
- obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
- address = obj->address();
- next_alignment_ = kWordAligned;
- } else {
- address = Allocate(space_number, size);
- obj = HeapObject::FromAddress(address);
- }
+template <class AllocatorT>
+void Deserializer<AllocatorT>::ReadObject(int space_number,
+ Object** write_back) {
+ const int size = source_.GetInt() << kObjectAlignmentBits;
+
+ Address address =
+ allocator()->Allocate(static_cast<AllocationSpace>(space_number), size);
+ HeapObject* obj = HeapObject::FromAddress(address);
isolate_->heap()->OnAllocationEvent(obj, size);
Object** current = reinterpret_cast<Object**>(address);
@@ -423,46 +310,8 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
#endif // DEBUG
}
-// We know the space requirements before deserialization and can
-// pre-allocate that reserved space. During deserialization, all we need
-// to do is to bump up the pointer for each space in the reserved
-// space. This is also used for fixing back references.
-// We may have to split up the pre-allocation into several chunks
-// because it would not fit onto a single page. We do not have to keep
-// track of when to move to the next chunk. An opcode will signal this.
-// Since multiple large objects cannot be folded into one large object
-// space allocation, we have to do an actual allocation when deserializing
-// each large object. Instead of tracking offset for back references, we
-// reference large objects by index.
-Address Deserializer::Allocate(int space_index, int size) {
- if (space_index == LO_SPACE) {
- AlwaysAllocateScope scope(isolate_);
- LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
- Executability exec = static_cast<Executability>(source_.Get());
- AllocationResult result = lo_space->AllocateRaw(size, exec);
- HeapObject* obj = result.ToObjectChecked();
- deserialized_large_objects_.push_back(obj);
- return obj->address();
- } else if (space_index == MAP_SPACE) {
- DCHECK_EQ(Map::kSize, size);
- return allocated_maps_[next_map_index_++];
- } else {
- DCHECK(space_index < kNumberOfPreallocatedSpaces);
- Address address = high_water_[space_index];
- DCHECK_NOT_NULL(address);
- high_water_[space_index] += size;
-#ifdef DEBUG
- // Assert that the current reserved chunk is still big enough.
- const Heap::Reservation& reservation = reservations_[space_index];
- int chunk_index = current_chunk_[space_index];
- DCHECK_LE(high_water_[space_index], reservation[chunk_index].end);
-#endif
- if (space_index == CODE_SPACE) SkipList::Update(address, size);
- return address;
- }
-}
-
-Object* Deserializer::ReadDataSingle() {
+template <class AllocatorT>
+Object* Deserializer<AllocatorT>::ReadDataSingle() {
Object* o;
Object** start = &o;
Object** end = start + 1;
@@ -474,14 +323,24 @@ Object* Deserializer::ReadDataSingle() {
return o;
}
-bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
- Address current_object_address) {
+static void NoExternalReferencesCallback() {
+ // The following check will trigger if a function or object template
+ // with references to native functions have been deserialized from
+ // snapshot, but no actual external references were provided when the
+ // isolate was created.
+ CHECK_WITH_MSG(false, "No external references provided via API");
+}
+
+template <class AllocatorT>
+bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
+ int source_space,
+ Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
bool write_barrier_needed =
- (current_object_address != NULL && source_space != NEW_SPACE &&
+ (current_object_address != nullptr && source_space != NEW_SPACE &&
source_space != CODE_SPACE);
while (current < limit) {
byte data = source_.Get();
@@ -618,15 +477,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kNextChunk: {
int space = source_.Get();
- DCHECK(space < kNumberOfPreallocatedSpaces);
- int chunk_index = current_chunk_[space];
- const Heap::Reservation& reservation = reservations_[space];
- // Make sure the current chunk is indeed exhausted.
- CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
- // Move to next reserved chunk.
- chunk_index = ++current_chunk_[space];
- CHECK_LT(chunk_index, reservation.size());
- high_water_[space] = reservation[chunk_index].start;
+ allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space));
break;
}
@@ -691,10 +542,16 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
current = reinterpret_cast<Object**>(
reinterpret_cast<Address>(current) + skip);
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
- DCHECK_WITH_MSG(reference_id < num_api_references_,
- "too few external references provided through the API");
- Address address = reinterpret_cast<Address>(
- isolate->api_external_references()[reference_id]);
+ Address address;
+ if (isolate->api_external_references()) {
+ DCHECK_WITH_MSG(
+ reference_id < num_api_references_,
+ "too few external references provided through the API");
+ address = reinterpret_cast<Address>(
+ isolate->api_external_references()[reference_id]);
+ } else {
+ address = reinterpret_cast<Address>(NoExternalReferencesCallback);
+ }
memcpy(current, &address, kPointerSize);
current++;
break;
@@ -702,9 +559,11 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2:
- SetAlignment(data);
+ case kAlignmentPrefix + 2: {
+ int alignment = data - (SerializerDeserializer::kAlignmentPrefix - 1);
+ allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
break;
+ }
STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
@@ -783,10 +642,13 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
return true;
}
+template <class AllocatorT>
template <int where, int how, int within, int space_number_if_any>
-Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
- Address current_object_address, byte data,
- bool write_barrier_needed) {
+Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
+ Object** current,
+ Address current_object_address,
+ byte data,
+ bool write_barrier_needed) {
bool emit_write_barrier = false;
bool current_was_incremented = false;
int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
@@ -795,7 +657,7 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
ReadObject(space_number, current);
emit_write_barrier = (space_number == NEW_SPACE);
} else {
- Object* new_object = NULL; /* May not be a real Object pointer. */
+ Object* new_object = nullptr; /* May not be a real Object pointer. */
if (where == kNewObject) {
ReadObject(space_number, &new_object);
} else if (where == kBackref) {
@@ -829,13 +691,13 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
new_object = *attached_objects_[index];
emit_write_barrier = isolate->heap()->InNewSpace(new_object);
} else {
- DCHECK(where == kBuiltin);
+ DCHECK_EQ(where, kBuiltin);
int builtin_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
new_object = isolate->builtins()->builtin(builtin_id);
emit_write_barrier = false;
}
if (within == kInnerPointer) {
- DCHECK(how == kFromCode);
+ DCHECK_EQ(how, kFromCode);
if (where == kBuiltin) {
// At this point, new_object may still be uninitialized, thus the
// unchecked Code cast.
@@ -877,5 +739,9 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
return current;
}
+// Explicit instantiation.
+template class Deserializer<BuiltinDeserializerAllocator>;
+template class Deserializer<DefaultDeserializerAllocator>;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 5aa2f8d656..5c9bda43ac 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -7,14 +7,17 @@
#include <vector>
-#include "src/heap/heap.h"
-#include "src/objects.h"
+#include "src/objects/js-array.h"
+#include "src/snapshot/default-deserializer-allocator.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h"
namespace v8 {
namespace internal {
+class HeapObject;
+class Object;
+
// Used for platforms with embedded constant pools to trigger deserialization
// of objects found in code.
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
@@ -25,55 +28,33 @@ namespace internal {
#define V8_CODE_EMBEDS_OBJECT_POINTER 0
#endif
-class BuiltinDeserializer;
-class Heap;
-class StartupDeserializer;
-
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+template <class AllocatorT = DefaultDeserializerAllocator>
class Deserializer : public SerializerDeserializer {
public:
~Deserializer() override;
- // Add an object to back an attached reference. The order to add objects must
- // mirror the order they are added in the serializer.
- void AddAttachedObject(Handle<HeapObject> attached_object) {
- attached_objects_.push_back(attached_object);
- }
-
void SetRehashability(bool v) { can_rehash_ = v; }
protected:
// Create a deserializer from a snapshot byte source.
template <class Data>
Deserializer(Data* data, bool deserializing_user_code)
- : isolate_(NULL),
+ : isolate_(nullptr),
source_(data->Payload()),
magic_number_(data->GetMagicNumber()),
- next_map_index_(0),
- external_reference_table_(NULL),
- deserialized_large_objects_(0),
+ external_reference_table_(nullptr),
+ allocator_(this),
deserializing_user_code_(deserializing_user_code),
- next_alignment_(kWordAligned),
can_rehash_(false) {
- DecodeReservation(data->Reservations());
- // We start the indicies here at 1, so that we can distinguish between an
+ allocator()->DecodeReservation(data->Reservations());
+ // We start the indices here at 1, so that we can distinguish between an
// actual index and a nullptr in a deserialized object requiring fix-up.
off_heap_backing_stores_.push_back(nullptr);
}
- bool ReserveSpace();
-
- // Atomically reserves space for the two given deserializers. Guarantees
- // reservation for both without garbage collection in-between.
- static bool ReserveSpace(StartupDeserializer* startup_deserializer,
- BuiltinDeserializer* builtin_deserializer);
- bool ReservesOnlyCodeSpace() const;
-
void Initialize(Isolate* isolate);
void DeserializeDeferredObjects();
- void RegisterDeserializedObjectsForBlackAllocation();
-
- virtual Address Allocate(int space_index, int size);
// Deserializes into a single pointer and returns the resulting object.
Object* ReadDataSingle();
@@ -82,8 +63,11 @@ class Deserializer : public SerializerDeserializer {
// snapshot by chunk index and offset.
HeapObject* GetBackReferencedObject(int space);
- // Sort descriptors of deserialized maps using new string hashes.
- void SortMapDescriptors();
+ // Add an object to back an attached reference. The order to add objects must
+ // mirror the order they are added in the serializer.
+ void AddAttachedObject(Handle<HeapObject> attached_object) {
+ attached_objects_.push_back(attached_object);
+ }
Isolate* isolate() const { return isolate_; }
SnapshotByteSource* source() { return &source_; }
@@ -93,42 +77,36 @@ class Deserializer : public SerializerDeserializer {
const std::vector<AccessorInfo*>& accessor_infos() const {
return accessor_infos_;
}
+ const std::vector<CallHandlerInfo*>& call_handler_infos() const {
+ return call_handler_infos_;
+ }
const std::vector<Handle<String>>& new_internalized_strings() const {
return new_internalized_strings_;
}
const std::vector<Handle<Script>>& new_scripts() const {
return new_scripts_;
}
- const std::vector<TransitionArray*>& transition_arrays() const {
- return transition_arrays_;
- }
+
+ AllocatorT* allocator() { return &allocator_; }
bool deserializing_user_code() const { return deserializing_user_code_; }
bool can_rehash() const { return can_rehash_; }
bool IsLazyDeserializationEnabled() const;
+ void Rehash();
+
private:
void VisitRootPointers(Root root, Object** start, Object** end) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
- void DecodeReservation(Vector<const SerializedData::Reservation> res);
-
void UnalignedCopy(Object** dest, Object** src) {
memcpy(dest, src, sizeof(*src));
}
- void SetAlignment(byte data) {
- DCHECK_EQ(kWordAligned, next_alignment_);
- int alignment = data - (kAlignmentPrefix - 1);
- DCHECK_LE(kWordAligned, alignment);
- DCHECK_LE(alignment, kDoubleUnaligned);
- next_alignment_ = static_cast<AllocationAlignment>(alignment);
- }
-
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
- // of the object we are writing into, or NULL if we are not writing into an
+ // of the object we are writing into, or nullptr if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred.
bool ReadData(Object** start, Object** end, int space,
@@ -159,41 +137,29 @@ class Deserializer : public SerializerDeserializer {
SnapshotByteSource source_;
uint32_t magic_number_;
- // The address of the next object that will be allocated in each space.
- // Each space has a number of chunks reserved by the GC, with each chunk
- // fitting into a page. Deserialized objects are allocated into the
- // current chunk of the target space by bumping up high water mark.
- Heap::Reservation reservations_[kNumberOfSpaces];
- uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
- Address high_water_[kNumberOfPreallocatedSpaces];
- int next_map_index_;
- std::vector<Address> allocated_maps_;
-
ExternalReferenceTable* external_reference_table_;
- std::vector<HeapObject*> deserialized_large_objects_;
std::vector<Code*> new_code_objects_;
std::vector<AccessorInfo*> accessor_infos_;
+ std::vector<CallHandlerInfo*> call_handler_infos_;
std::vector<Handle<String>> new_internalized_strings_;
std::vector<Handle<Script>> new_scripts_;
- std::vector<TransitionArray*> transition_arrays_;
std::vector<byte*> off_heap_backing_stores_;
+ AllocatorT allocator_;
const bool deserializing_user_code_;
- // TODO(jgruber): This workaround will no longer be necessary once builtin
- // reference patching has been removed (through advance allocation).
- bool deserializing_builtins_ = false;
-
- AllocationAlignment next_alignment_;
-
// TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_;
+ std::vector<HeapObject*> to_rehash_;
#ifdef DEBUG
uint32_t num_api_references_;
#endif // DEBUG
+ // For source(), isolate(), and allocator().
+ friend class DefaultDeserializerAllocator;
+
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index a6d9862c10..b1ecd61f2f 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -17,7 +17,8 @@
class SnapshotWriter {
public:
- SnapshotWriter() : snapshot_cpp_path_(NULL), snapshot_blob_path_(NULL) {}
+ SnapshotWriter()
+ : snapshot_cpp_path_(nullptr), snapshot_blob_path_(nullptr) {}
void SetSnapshotFile(const char* snapshot_cpp_file) {
snapshot_cpp_path_ = snapshot_cpp_file;
@@ -102,7 +103,7 @@ class SnapshotWriter {
static FILE* GetFileDescriptorOrDie(const char* filename) {
FILE* fp = v8::base::OS::FOpen(filename, "wb");
- if (fp == NULL) {
+ if (fp == nullptr) {
i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
exit(1);
}
@@ -114,10 +115,10 @@ class SnapshotWriter {
};
char* GetExtraCode(char* filename, const char* description) {
- if (filename == NULL || strlen(filename) == 0) return NULL;
+ if (filename == nullptr || strlen(filename) == 0) return nullptr;
::printf("Loading script for %s: %s\n", description, filename);
FILE* file = v8::base::OS::FOpen(filename, "rb");
- if (file == NULL) {
+ if (file == nullptr) {
fprintf(stderr, "Failed to open '%s': errno %d\n", filename, errno);
exit(1);
}
@@ -155,8 +156,8 @@ int main(int argc, char** argv) {
i::CpuFeatures::Probe(true);
v8::V8::InitializeICUDefaultLocation(argv[0]);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
{
@@ -164,11 +165,13 @@ int main(int argc, char** argv) {
if (i::FLAG_startup_src) writer.SetSnapshotFile(i::FLAG_startup_src);
if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
- char* embed_script = GetExtraCode(argc >= 2 ? argv[1] : NULL, "embedding");
+ char* embed_script =
+ GetExtraCode(argc >= 2 ? argv[1] : nullptr, "embedding");
v8::StartupData blob = v8::V8::CreateSnapshotDataBlob(embed_script);
delete[] embed_script;
- char* warmup_script = GetExtraCode(argc >= 3 ? argv[2] : NULL, "warm up");
+ char* warmup_script =
+ GetExtraCode(argc >= 3 ? argv[2] : nullptr, "warm up");
if (warmup_script) {
v8::StartupData cold = blob;
blob = v8::V8::WarmUpSnapshotDataBlob(cold, warmup_script);
@@ -183,6 +186,5 @@ int main(int argc, char** argv) {
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
- delete platform;
return 0;
}
diff --git a/deps/v8/src/snapshot/natives-common.cc b/deps/v8/src/snapshot/natives-common.cc
index 71f81ea971..e865498c7d 100644
--- a/deps/v8/src/snapshot/natives-common.cc
+++ b/deps/v8/src/snapshot/natives-common.cc
@@ -15,7 +15,7 @@ NativesExternalStringResource::NativesExternalStringResource(NativeType type,
int index)
: type_(type), index_(index) {
Vector<const char> source;
- DCHECK(0 <= index);
+ DCHECK_LE(0, index);
switch (type_) {
case CORE:
DCHECK(index < Natives::GetBuiltinsCount());
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index d8e74049f1..ea2a9e6f84 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -120,23 +120,21 @@ class NativesHolder {
CHECK(store);
holder_ = store;
}
- static bool empty() { return holder_ == NULL; }
+ static bool empty() { return holder_ == nullptr; }
static void Dispose() {
delete holder_;
- holder_ = NULL;
+ holder_ = nullptr;
}
private:
static NativesStore* holder_;
};
-template<NativeType type>
-NativesStore* NativesHolder<type>::holder_ = NULL;
-
+template <NativeType type>
+NativesStore* NativesHolder<type>::holder_ = nullptr;
// The natives blob. Memory is owned by caller.
-static StartupData* natives_blob_ = NULL;
-
+static StartupData* natives_blob_ = nullptr;
/**
* Read the Natives blob, as previously set by SetNativesFromFile.
@@ -161,7 +159,7 @@ void SetNativesFromFile(StartupData* natives_blob) {
DCHECK(!natives_blob_);
DCHECK(natives_blob);
DCHECK(natives_blob->data);
- DCHECK(natives_blob->raw_size > 0);
+ DCHECK_GT(natives_blob->raw_size, 0);
natives_blob_ = natives_blob;
ReadNatives();
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 253480535b..3f92e7757f 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -67,7 +67,8 @@ ObjectDeserializer::DeserializeWasmCompiledModule(
MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
Initialize(isolate);
- if (!ReserveSpace()) return MaybeHandle<HeapObject>();
+
+ if (!allocator()->ReserveSpace()) return MaybeHandle<HeapObject>();
DCHECK(deserializing_user_code());
HandleScope scope(isolate);
@@ -79,7 +80,8 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
DeserializeDeferredObjects();
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
result = Handle<HeapObject>(HeapObject::cast(root));
- RegisterDeserializedObjectsForBlackAllocation();
+ Rehash();
+ allocator()->RegisterDeserializedObjectsForBlackAllocation();
}
CommitPostProcessedObjects();
return scope.CloseAndEscape(result);
@@ -97,12 +99,12 @@ void ObjectDeserializer::
}
void ObjectDeserializer::CommitPostProcessedObjects() {
- CHECK(new_internalized_strings().size() <= kMaxInt);
+ CHECK_LE(new_internalized_strings().size(), kMaxInt);
StringTable::EnsureCapacityForDeserialization(
isolate(), static_cast<int>(new_internalized_strings().size()));
for (Handle<String> string : new_internalized_strings()) {
StringTableInsertionKey key(*string);
- DCHECK_NULL(StringTable::LookupKeyIfExists(isolate(), &key));
+ DCHECK_NULL(StringTable::ForwardStringIfExists(isolate(), &key, *string));
StringTable::LookupKey(isolate(), &key);
}
diff --git a/deps/v8/src/snapshot/object-deserializer.h b/deps/v8/src/snapshot/object-deserializer.h
index 00e6a5b486..8f236f5f20 100644
--- a/deps/v8/src/snapshot/object-deserializer.h
+++ b/deps/v8/src/snapshot/object-deserializer.h
@@ -15,7 +15,7 @@ class SharedFunctionInfo;
class WasmCompiledModule;
// Deserializes the object graph rooted at a given object.
-class ObjectDeserializer final : public Deserializer {
+class ObjectDeserializer final : public Deserializer<> {
public:
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source);
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index f4786006f8..41df5dbba7 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -30,7 +30,9 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
Initialize(isolate);
- if (!ReserveSpace()) V8::FatalProcessOutOfMemory("PartialDeserializer");
+ if (!allocator()->ReserveSpace()) {
+ V8::FatalProcessOutOfMemory("PartialDeserializer");
+ }
AddAttachedObject(global_proxy);
@@ -44,14 +46,14 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
- RegisterDeserializedObjectsForBlackAllocation();
+ allocator()->RegisterDeserializedObjectsForBlackAllocation();
// There's no code deserialized here. If this assert fires then that's
// changed and logging should be added to notify the profiler et al of the
// new code, which also has to be flushed from instruction cache.
CHECK_EQ(start_address, code_space->top());
- if (FLAG_rehash_snapshot && can_rehash()) RehashContext(Context::cast(root));
+ if (FLAG_rehash_snapshot && can_rehash()) Rehash();
return Handle<Object>(root, isolate);
}
@@ -67,8 +69,8 @@ void PartialDeserializer::DeserializeEmbedderFields(
code = source()->Get()) {
HandleScope scope(isolate());
int space = code & kSpaceMask;
- DCHECK(space <= kNumberOfSpaces);
- DCHECK(code - space == kNewObject);
+ DCHECK_LE(space, kNumberOfSpaces);
+ DCHECK_EQ(code - space, kNewObject);
Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
isolate());
int index = source()->GetInt();
@@ -82,13 +84,5 @@ void PartialDeserializer::DeserializeEmbedderFields(
delete[] data;
}
}
-
-void PartialDeserializer::RehashContext(Context* context) {
- DCHECK(can_rehash());
- for (const auto& array : transition_arrays()) array->Sort();
- context->global_object()->global_dictionary()->Rehash();
- SortMapDescriptors();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/partial-deserializer.h b/deps/v8/src/snapshot/partial-deserializer.h
index fec28ce0af..bbc55b7b51 100644
--- a/deps/v8/src/snapshot/partial-deserializer.h
+++ b/deps/v8/src/snapshot/partial-deserializer.h
@@ -15,7 +15,7 @@ class Context;
// Deserializes the context-dependent object graph rooted at a given object.
// The PartialDeserializer is not expected to deserialize any code objects.
-class PartialDeserializer final : public Deserializer {
+class PartialDeserializer final : public Deserializer<> {
public:
static MaybeHandle<Context> DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
@@ -33,9 +33,6 @@ class PartialDeserializer final : public Deserializer {
void DeserializeEmbedderFields(
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
-
- // Rehash after deserializing a context.
- void RehashContext(Context* context);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index cae28234c1..11b21a17b3 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -17,7 +17,6 @@ PartialSerializer::PartialSerializer(
: Serializer(isolate),
startup_serializer_(startup_serializer),
serialize_embedder_fields_(callback),
- rehashable_global_dictionary_(nullptr),
can_be_rehashed_(true) {
InitializeCodeAddressMap();
}
@@ -42,8 +41,6 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
// Reset math random cache to get fresh random numbers.
context->set_math_random_index(Smi::kZero);
context->set_math_random_cache(isolate()->heap()->undefined_value());
- DCHECK_NULL(rehashable_global_dictionary_);
- rehashable_global_dictionary_ = context->global_object()->global_dictionary();
VisitRootPointer(Root::kPartialSnapshotCache, o);
SerializeDeferredObjects();
@@ -53,6 +50,8 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
+ DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
+
BuiltinReferenceSerializationMode mode =
startup_serializer_->clear_function_code() ? kCanonicalizeCompileLazy
: kDefault;
@@ -102,7 +101,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
}
- if (obj->IsHashTable()) CheckRehashability(obj);
+ CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
@@ -153,17 +152,10 @@ void PartialSerializer::SerializeEmbedderFields() {
sink_.Put(kSynchronize, "Finished with embedder fields data");
}
-void PartialSerializer::CheckRehashability(HeapObject* table) {
- DCHECK(table->IsHashTable());
+void PartialSerializer::CheckRehashability(HeapObject* obj) {
if (!can_be_rehashed_) return;
- if (table->IsUnseededNumberDictionary()) return;
- if (table->IsOrderedHashMap() &&
- OrderedHashMap::cast(table)->NumberOfElements() == 0) {
- return;
- }
- // We can only correctly rehash if the global dictionary is the only hash
- // table that we deserialize.
- if (table == rehashable_global_dictionary_) return;
+ if (!obj->NeedsRehashing()) return;
+ if (obj->CanBeRehashed()) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index 6eb8b91436..b436c40cbe 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -33,12 +33,11 @@ class PartialSerializer : public Serializer<> {
void SerializeEmbedderFields();
- void CheckRehashability(HeapObject* table);
+ void CheckRehashability(HeapObject* obj);
StartupSerializer* startup_serializer_;
std::vector<JSObject*> embedder_field_holders_;
v8::SerializeEmbedderFieldsCallback serialize_embedder_fields_;
- GlobalDictionary* rehashable_global_dictionary_;
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index ec7b7b25c7..f201342105 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -106,7 +106,7 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
}
bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
- return !o->IsString() && !o->IsScript();
+ return !o->IsString() && !o->IsScript() && !o->IsJSTypedArray();
}
void SerializerDeserializer::RestoreExternalReferenceRedirectors(
@@ -118,5 +118,13 @@ void SerializerDeserializer::RestoreExternalReferenceRedirectors(
}
}
+void SerializerDeserializer::RestoreExternalReferenceRedirectors(
+ const std::vector<CallHandlerInfo*>& call_handler_infos) {
+ for (CallHandlerInfo* info : call_handler_infos) {
+ Foreign::cast(info->js_callback())
+ ->set_foreign_address(info->redirected_callback());
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index f753402d15..6482c350f7 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -57,7 +57,7 @@ class ExternalReferenceEncoder {
class HotObjectsList {
public:
HotObjectsList() : index_(0) {
- for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
+ for (int i = 0; i < kSize; i++) circular_queue_[i] = nullptr;
}
void Add(HeapObject* object) {
@@ -111,6 +111,8 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<AccessorInfo*>& accessor_infos);
+ void RestoreExternalReferenceRedirectors(
+ const std::vector<CallHandlerInfo*>& call_handler_infos);
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
@@ -192,9 +194,8 @@ class SerializerDeserializer : public RootVisitor {
// Used for embedder-allocated backing stores for TypedArrays.
static const int kOffHeapBackingStore = 0x1c;
- // Used to encode deoptimizer entry code.
- static const int kDeoptimizerEntryPlain = 0x1d;
- static const int kDeoptimizerEntryFromCode = 0x1e;
+ // 0x1d, 0x1e unused.
+
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
@@ -268,7 +269,7 @@ class SerializedData {
SerializedData(byte* data, int size)
: data_(data), size_(size), owns_data_(false) {}
- SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
+ SerializedData() : data_(nullptr), size_(0), owns_data_(false) {}
SerializedData(SerializedData&& other)
: data_(other.data_), size_(other.size_), owns_data_(other.owns_data_) {
// Ensure |other| will not attempt to destroy our data in destructor.
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 9db7d798a5..fd96850890 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -5,7 +5,10 @@
#include "src/snapshot/serializer.h"
#include "src/assembler-inl.h"
+#include "src/interpreter/interpreter.h"
+#include "src/objects/code.h"
#include "src/objects/map.h"
+#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/natives.h"
namespace v8 {
@@ -26,17 +29,17 @@ Serializer<AllocatorT>::Serializer(Isolate* isolate)
instance_type_size_[i] = 0;
}
} else {
- instance_type_count_ = NULL;
- instance_type_size_ = NULL;
+ instance_type_count_ = nullptr;
+ instance_type_size_ = nullptr;
}
#endif // OBJECT_PRINT
}
template <class AllocatorT>
Serializer<AllocatorT>::~Serializer() {
- if (code_address_map_ != NULL) delete code_address_map_;
+ if (code_address_map_ != nullptr) delete code_address_map_;
#ifdef OBJECT_PRINT
- if (instance_type_count_ != NULL) {
+ if (instance_type_count_ != nullptr) {
DeleteArray(instance_type_count_);
DeleteArray(instance_type_size_);
}
@@ -91,6 +94,10 @@ bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
template <class AllocatorT>
void Serializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
Object** end) {
+ // Builtins and bytecode handlers are serialized in a separate pass by the
+ // BuiltinSerializer.
+ if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
+
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
PutSmi(Smi::cast(*current));
@@ -207,6 +214,14 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
}
template <class AllocatorT>
+bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
+ if (!obj->IsCode()) return false;
+ Code* code = Code::cast(obj);
+ if (isolate()->heap()->IsDeserializeLazyHandler(code)) return false;
+ return (code->kind() == Code::BYTECODE_HANDLER);
+}
+
+template <class AllocatorT>
void Serializer<AllocatorT>::PutRoot(
int root_index, HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
@@ -379,19 +394,44 @@ int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
return static_cast<int32_t>(reference.off_heap_backing_store_index());
}
-// When a JSArrayBuffer is neutered, the FixedTypedArray that points to the
-// same backing store does not know anything about it. This fixup step finds
-// neutered TypedArrays and clears the values in the FixedTypedArray so that
-// we don't try to serialize the now invalid backing store.
template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::FixupIfNeutered() {
- JSTypedArray* array = JSTypedArray::cast(object_);
- if (!array->WasNeutered()) return;
+void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
+ JSTypedArray* typed_array = JSTypedArray::cast(object_);
+ FixedTypedArrayBase* elements =
+ FixedTypedArrayBase::cast(typed_array->elements());
+
+ if (!typed_array->WasNeutered()) {
+ bool off_heap = elements->base_pointer() == nullptr;
+
+ if (off_heap) {
+ // Explicitly serialize the backing store now.
+ JSArrayBuffer* buffer = JSArrayBuffer::cast(typed_array->buffer());
+ CHECK(buffer->byte_length()->IsSmi());
+ CHECK(typed_array->byte_offset()->IsSmi());
+ int32_t byte_length = NumberToInt32(buffer->byte_length());
+ int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
- FixedTypedArrayBase* fta = FixedTypedArrayBase::cast(array->elements());
- DCHECK(fta->base_pointer() == nullptr);
- fta->set_external_pointer(Smi::kZero);
- fta->set_length(0);
+ // We need to calculate the backing store from the external pointer
+ // because the ArrayBuffer may already have been serialized.
+ void* backing_store = reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(elements->external_pointer()) -
+ byte_offset);
+ int32_t ref = SerializeBackingStore(backing_store, byte_length);
+
+ // The external_pointer is the backing_store + typed_array->byte_offset.
+ // To properly share the buffer, we set the backing store ref here. On
+ // deserialization we re-add the byte_offset to external_pointer.
+ elements->set_external_pointer(Smi::FromInt(ref));
+ }
+ } else {
+ // When a JSArrayBuffer is neutered, the FixedTypedArray that points to the
+ // same backing store does not know anything about it. This fixup step finds
+ // neutered TypedArrays and clears the values in the FixedTypedArray so that
+ // we don't try to serialize the now invalid backing store.
+ elements->set_external_pointer(Smi::kZero);
+ elements->set_length(0);
+ }
+ SerializeObject();
}
template <class AllocatorT>
@@ -412,26 +452,6 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
}
template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializeFixedTypedArray() {
- FixedTypedArrayBase* fta = FixedTypedArrayBase::cast(object_);
- void* backing_store = fta->DataPtr();
- // We cannot store byte_length larger than Smi range in the snapshot.
- CHECK(fta->ByteLength() < Smi::kMaxValue);
- int32_t byte_length = static_cast<int32_t>(fta->ByteLength());
-
- // The heap contains empty FixedTypedArrays for each type, with a byte_length
- // of 0 (e.g. empty_fixed_uint8_array). These look like they are are 'on-heap'
- // but have no data to copy, so we skip the backing store here.
-
- // The embedder-allocated backing store only exists for the off-heap case.
- if (byte_length > 0 && fta->base_pointer() == nullptr) {
- int32_t ref = SerializeBackingStore(backing_store, byte_length);
- fta->set_external_pointer(Smi::FromInt(ref));
- }
- SerializeObject();
-}
-
-template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
Heap* heap = serializer_->isolate()->heap();
if (object_->map() != heap->native_source_string_map()) {
@@ -559,16 +579,13 @@ void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
SeqTwoByteString::cast(object_)->clear_padding();
}
if (object_->IsJSTypedArray()) {
- FixupIfNeutered();
+ SerializeJSTypedArray();
+ return;
}
if (object_->IsJSArrayBuffer()) {
SerializeJSArrayBuffer();
return;
}
- if (object_->IsFixedTypedArrayBase()) {
- SerializeFixedTypedArray();
- return;
- }
// We don't expect fillers.
DCHECK(!object_->IsFiller());
@@ -795,7 +812,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
int to_skip = up_to_offset - bytes_processed_so_far_;
int bytes_to_output = to_skip;
bytes_processed_so_far_ += to_skip;
- DCHECK(to_skip >= 0);
+ DCHECK_GE(to_skip, 0);
if (bytes_to_output != 0) {
DCHECK(to_skip == bytes_to_output);
if (IsAligned(bytes_to_output, kPointerAlignment) &&
@@ -810,7 +827,22 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
// Check that we do not serialize uninitialized memory.
__msan_check_mem_is_initialized(object_start + base, bytes_to_output);
#endif // MEMORY_SANITIZER
- sink_->PutRaw(object_start + base, bytes_to_output, "Bytes");
+ if (object_->IsBytecodeArray()) {
+ // The code age byte can be changed concurrently by GC.
+ const int bytes_to_age_byte = BytecodeArray::kBytecodeAgeOffset - base;
+ if (0 <= bytes_to_age_byte && bytes_to_age_byte < bytes_to_output) {
+ sink_->PutRaw(object_start + base, bytes_to_age_byte, "Bytes");
+ byte bytecode_age = BytecodeArray::kNoAgeBytecodeAge;
+ sink_->PutRaw(&bytecode_age, 1, "Bytes");
+ const int bytes_written = bytes_to_age_byte + 1;
+ sink_->PutRaw(object_start + base + bytes_written,
+ bytes_to_output - bytes_written, "Bytes");
+ } else {
+ sink_->PutRaw(object_start + base, bytes_to_output, "Bytes");
+ }
+ } else {
+ sink_->PutRaw(object_start + base, bytes_to_output, "Bytes");
+ }
}
}
@@ -822,7 +854,7 @@ int Serializer<AllocatorT>::ObjectSerializer::SkipTo(Address to) {
bytes_processed_so_far_ += to_skip;
// This assert will fail if the reloc info gives us the target_address_address
// locations in a non-ascending order. Luckily that doesn't happen.
- DCHECK(to_skip >= 0);
+ DCHECK_GE(to_skip, 0);
return to_skip;
}
@@ -863,6 +895,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
}
// Explicit instantiation.
+template class Serializer<BuiltinSerializerAllocator>;
template class Serializer<DefaultSerializerAllocator>;
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 1fe607b530..eda25fbd35 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -44,7 +44,7 @@ class CodeAddressMap : public CodeEventLogger {
NameMap() : impl_() {}
~NameMap() {
- for (base::HashMap::Entry* p = impl_.Start(); p != NULL;
+ for (base::HashMap::Entry* p = impl_.Start(); p != nullptr;
p = impl_.Next(p)) {
DeleteArray(static_cast<const char*>(p->value));
}
@@ -52,19 +52,20 @@ class CodeAddressMap : public CodeEventLogger {
void Insert(Address code_address, const char* name, int name_size) {
base::HashMap::Entry* entry = FindOrCreateEntry(code_address);
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
entry->value = CopyName(name, name_size);
}
}
const char* Lookup(Address code_address) {
base::HashMap::Entry* entry = FindEntry(code_address);
- return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+ return (entry != nullptr) ? static_cast<const char*>(entry->value)
+ : nullptr;
}
void Remove(Address code_address) {
base::HashMap::Entry* entry = FindEntry(code_address);
- if (entry != NULL) {
+ if (entry != nullptr) {
DeleteArray(static_cast<char*>(entry->value));
RemoveEntry(entry);
}
@@ -73,11 +74,11 @@ class CodeAddressMap : public CodeEventLogger {
void Move(Address from, Address to) {
if (from == to) return;
base::HashMap::Entry* from_entry = FindEntry(from);
- DCHECK(from_entry != NULL);
+ DCHECK_NOT_NULL(from_entry);
void* value = from_entry->value;
RemoveEntry(from_entry);
base::HashMap::Entry* to_entry = FindOrCreateEntry(to);
- DCHECK(to_entry->value == NULL);
+ DCHECK_NULL(to_entry->value);
to_entry->value = value;
}
@@ -193,6 +194,9 @@ class Serializer : public SerializerDeserializer {
HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
int skip, BuiltinReferenceSerializationMode mode = kDefault);
+ // Returns true if the given heap object is a bytecode handler code object.
+ bool ObjectIsBytecodeHandler(HeapObject* obj) const;
+
inline void FlushSkip(int skip) {
if (skip != 0) {
sink_.Put(kSkip, "SkipFromSerializeObject");
@@ -303,9 +307,8 @@ class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
void OutputCode(int size);
int SkipTo(Address to);
int32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
- void FixupIfNeutered();
+ void SerializeJSTypedArray();
void SerializeJSArrayBuffer();
- void SerializeFixedTypedArray();
void SerializeExternalString();
void SerializeExternalStringAsSequentialString();
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index da528a50ba..e7efd87bd8 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -96,11 +96,8 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
BuiltinSnapshotData builtin_snapshot_data(builtin_data);
+ CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
- builtin_deserializer.ReserveAndInitializeBuiltinsTableForBuiltin(builtin_id);
-
- DisallowHeapAllocation no_gc;
-
Code* code = builtin_deserializer.DeserializeBuiltin(builtin_id);
DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
@@ -111,6 +108,40 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
Builtins::name(builtin_id), bytes, ms);
}
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+ isolate->logger()->LogCodeObject(code);
+ }
+
+ return code;
+}
+
+// static
+Code* Snapshot::DeserializeHandler(Isolate* isolate,
+ interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+
+ const v8::StartupData* blob = isolate->snapshot_blob();
+ Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
+ BuiltinSnapshotData builtin_snapshot_data(builtin_data);
+
+ CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
+ BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
+ Code* code = builtin_deserializer.DeserializeHandler(bytecode, operand_scale);
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int bytes = code->Size();
+ PrintF("[Deserializing handler %s (%d bytes) took %0.3f ms]\n",
+ interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str(),
+ bytes, ms);
+ }
+
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+ isolate->logger()->LogCodeObject(code);
+ }
+
return code;
}
@@ -349,7 +380,8 @@ Vector<const byte> BuiltinSnapshotData::Payload() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
- int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ const int builtin_offsets_size =
+ BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + payload_length);
DCHECK_GT(payload_length, builtin_offsets_size);
@@ -360,13 +392,15 @@ Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
- int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ const int builtin_offsets_size =
+ BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + payload_length);
DCHECK_GT(payload_length, builtin_offsets_size);
const uint32_t* data = reinterpret_cast<const uint32_t*>(
payload + payload_length - builtin_offsets_size);
- return Vector<const uint32_t>(data, Builtins::builtin_count);
+ return Vector<const uint32_t>(data,
+ BuiltinSnapshotUtils::kNumberOfCodeObjects);
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/snapshot-empty.cc b/deps/v8/src/snapshot/snapshot-empty.cc
index 35cb6c38f5..a13f2e8870 100644
--- a/deps/v8/src/snapshot/snapshot-empty.cc
+++ b/deps/v8/src/snapshot/snapshot-empty.cc
@@ -21,7 +21,6 @@ void ReadNatives() {}
void DisposeNatives() {}
#endif // V8_USE_EXTERNAL_STARTUP_DATA
-
-const v8::StartupData* Snapshot::DefaultSnapshotBlob() { return NULL; }
+const v8::StartupData* Snapshot::DefaultSnapshotBlob() { return nullptr; }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-external.cc b/deps/v8/src/snapshot/snapshot-external.cc
index 67dcb60f0b..fc68a85c5f 100644
--- a/deps/v8/src/snapshot/snapshot-external.cc
+++ b/deps/v8/src/snapshot/snapshot-external.cc
@@ -20,14 +20,14 @@ namespace v8 {
namespace internal {
static base::LazyMutex external_startup_data_mutex = LAZY_MUTEX_INITIALIZER;
-static v8::StartupData external_startup_blob = {NULL, 0};
+static v8::StartupData external_startup_blob = {nullptr, 0};
void SetSnapshotFromFile(StartupData* snapshot_blob) {
base::LockGuard<base::Mutex> lock_guard(
external_startup_data_mutex.Pointer());
DCHECK(snapshot_blob);
DCHECK(snapshot_blob->data);
- DCHECK(snapshot_blob->raw_size > 0);
+ DCHECK_GT(snapshot_blob->raw_size, 0);
DCHECK(!external_startup_blob.data);
DCHECK(Snapshot::SnapshotIsValid(snapshot_blob));
external_startup_blob = *snapshot_blob;
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index 66210be709..77b19d51a1 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
- DCHECK(integer < 1 << 30);
+ DCHECK_LT(integer, 1 << 30);
integer <<= 2;
int bytes = 1;
if (integer > 0xff) bytes = 2;
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 0c639d4c53..2ffe5b6086 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -98,6 +98,12 @@ class Snapshot : public AllStatic {
// initialized.
static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
+ // Deserializes a single given handler code object. Intended to be called at
+ // runtime after the isolate has been fully initialized.
+ static Code* DeserializeHandler(Isolate* isolate,
+ interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale);
+
// ---------------- Helper methods ----------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index a6e9d6a203..91432e185a 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -18,7 +18,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
BuiltinDeserializer builtin_deserializer(isolate, builtin_data_);
- if (!Deserializer::ReserveSpace(this, &builtin_deserializer)) {
+ if (!DefaultDeserializerAllocator::ReserveSpace(this,
+ &builtin_deserializer)) {
V8::FatalProcessOutOfMemory("StartupDeserializer");
}
@@ -33,18 +34,17 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
-
- isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate->heap()->RepairFreeListsAfterDeserialization();
isolate->heap()->IterateWeakRoots(this, VISIT_ALL);
DeserializeDeferredObjects();
RestoreExternalReferenceRedirectors(accessor_infos());
+ RestoreExternalReferenceRedirectors(call_handler_infos());
// Deserialize eager builtins from the builtin snapshot. Note that deferred
// objects must have been deserialized prior to this.
- builtin_deserializer.DeserializeEagerBuiltins();
+ builtin_deserializer.DeserializeEagerBuiltinsAndHandlers();
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
@@ -71,7 +71,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// to display the builtin names.
PrintDisassembledCodeObjects();
- if (FLAG_rehash_snapshot && can_rehash()) Rehash();
+ if (FLAG_rehash_snapshot && can_rehash()) RehashHeap();
}
void StartupDeserializer::FlushICacheForNewIsolate() {
@@ -93,7 +93,7 @@ void StartupDeserializer::PrintDisassembledCodeObjects() {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
- for (HeapObject* obj = iterator.next(); obj != NULL;
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
if (obj->IsCode()) {
Code::cast(obj)->Disassemble(nullptr, os);
@@ -103,12 +103,10 @@ void StartupDeserializer::PrintDisassembledCodeObjects() {
#endif
}
-void StartupDeserializer::Rehash() {
+void StartupDeserializer::RehashHeap() {
DCHECK(FLAG_rehash_snapshot && can_rehash());
isolate()->heap()->InitializeHashSeed();
- isolate()->heap()->string_table()->Rehash();
- isolate()->heap()->weak_object_to_code_table()->Rehash();
- SortMapDescriptors();
+ Rehash();
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/startup-deserializer.h b/deps/v8/src/snapshot/startup-deserializer.h
index 269ac8b555..6e1b5db332 100644
--- a/deps/v8/src/snapshot/startup-deserializer.h
+++ b/deps/v8/src/snapshot/startup-deserializer.h
@@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
-class StartupDeserializer final : public Deserializer {
+class StartupDeserializer final : public Deserializer<> {
public:
StartupDeserializer(const SnapshotData* startup_data,
const BuiltinSnapshotData* builtin_data)
@@ -26,7 +26,7 @@ class StartupDeserializer final : public Deserializer {
void PrintDisassembledCodeObjects();
// Rehash after deserializing an isolate.
- void Rehash();
+ void RehashHeap();
const BuiltinSnapshotData* builtin_data_;
};
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 8fec389ee9..8b4a79b8b1 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -17,18 +17,19 @@ StartupSerializer::StartupSerializer(
: Serializer(isolate),
clear_function_code_(function_code_handling ==
v8::SnapshotCreator::FunctionCodeHandling::kClear),
- serializing_builtins_(false),
can_be_rehashed_(true) {
InitializeCodeAddressMap();
}
StartupSerializer::~StartupSerializer() {
RestoreExternalReferenceRedirectors(accessor_infos_);
+ RestoreExternalReferenceRedirectors(call_handler_infos_);
OutputStatistics("StartupSerializer");
}
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
+ DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
DCHECK(!obj->IsJSFunction());
if (clear_function_code() && obj->IsBytecodeArray()) {
@@ -36,9 +37,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
BuiltinReferenceSerializationMode mode =
- (clear_function_code() && !serializing_builtins_)
- ? kCanonicalizeCompileLazy
- : kDefault;
+ clear_function_code() ? kCanonicalizeCompileLazy : kDefault;
if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip, mode)) {
return;
}
@@ -64,6 +63,13 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
Address original_address = Foreign::cast(info->getter())->foreign_address();
Foreign::cast(info->js_getter())->set_foreign_address(original_address);
accessor_infos_.push_back(info);
+ } else if (isolate()->external_reference_redirector() &&
+ obj->IsCallHandlerInfo()) {
+ CallHandlerInfo* info = CallHandlerInfo::cast(obj);
+ Address original_address =
+ Foreign::cast(info->callback())->foreign_address();
+ Foreign::cast(info->js_callback())->set_foreign_address(original_address);
+ call_handler_infos_.push_back(info);
} else if (obj->IsScript() && Script::cast(obj)->IsUserJavaScript()) {
Script::cast(obj)->set_context_data(
isolate()->heap()->uninitialized_symbol());
@@ -75,7 +81,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
}
- if (obj->IsHashTable()) CheckRehashability(obj);
+ CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
@@ -107,9 +113,6 @@ int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
}
void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
- // We expect the builtins tag after builtins have been serialized.
- DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
- serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
sink_.Put(kSynchronize, "Synchronize");
}
@@ -121,19 +124,13 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK(isolate->handle_scope_implementer()->blocks()->empty());
CHECK_EQ(0, isolate->global_handles()->global_handles_count());
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
- // First visit immortal immovables to make sure they end up in the first page.
- serializing_immortal_immovables_roots_ = true;
- isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
- // Check that immortal immovable roots are allocated on the first page.
- DCHECK(allocator()->HasNotExceededFirstPageOfEachSpace());
- serializing_immortal_immovables_roots_ = false;
- // Visit the rest of the strong roots.
+ // Visit smi roots.
// Clear the stack limits to make the snapshot reproducible.
// Reset it again afterwards.
isolate->heap()->ClearStackLimits();
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->SetStackLimits();
-
+ // First visit immortal immovables to make sure they end up in the first page.
isolate->heap()->IterateStrongRoots(this,
VISIT_ONLY_STRONG_FOR_SERIALIZATION);
}
@@ -149,20 +146,15 @@ void StartupSerializer::VisitRootPointers(Root root, Object** start,
int skip = 0;
for (Object** current = start; current < end; current++) {
int root_index = static_cast<int>(current - start);
- if (RootShouldBeSkipped(root_index)) {
- skip += kPointerSize;
- continue;
+ if ((*current)->IsSmi()) {
+ FlushSkip(skip);
+ PutSmi(Smi::cast(*current));
} else {
- if ((*current)->IsSmi()) {
- FlushSkip(skip);
- PutSmi(Smi::cast(*current));
- } else {
- SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject,
- skip);
- }
- root_has_been_serialized_.set(root_index);
- skip = 0;
+ SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject,
+ skip);
}
+ root_has_been_serialized_.set(root_index);
+ skip = 0;
}
FlushSkip(skip);
} else {
@@ -170,26 +162,10 @@ void StartupSerializer::VisitRootPointers(Root root, Object** start,
}
}
-bool StartupSerializer::RootShouldBeSkipped(int root_index) {
- if (root_index == Heap::kStackLimitRootIndex ||
- root_index == Heap::kRealStackLimitRootIndex) {
- return true;
- }
- return Heap::RootIsImmortalImmovable(root_index) !=
- serializing_immortal_immovables_roots_;
-}
-
-void StartupSerializer::CheckRehashability(HeapObject* table) {
- DCHECK(table->IsHashTable());
+void StartupSerializer::CheckRehashability(HeapObject* obj) {
if (!can_be_rehashed_) return;
- // We can only correctly rehash if the four hash tables below are the only
- // ones that we deserialize.
- if (table->IsUnseededNumberDictionary()) return;
- if (table == isolate()->heap()->empty_ordered_hash_table()) return;
- if (table == isolate()->heap()->empty_slow_element_dictionary()) return;
- if (table == isolate()->heap()->empty_property_dictionary()) return;
- if (table == isolate()->heap()->weak_object_to_code_table()) return;
- if (table == isolate()->heap()->string_table()) return;
+ if (!obj->NeedsRehashing()) return;
+ if (obj->CanBeRehashed()) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 9c575adbe1..69985388e9 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -69,20 +69,13 @@ class StartupSerializer : public Serializer<> {
void Synchronize(VisitorSynchronization::SyncTag tag) override;
bool MustBeDeferred(HeapObject* object) override;
- // Some roots should not be serialized, because their actual value depends on
- // absolute addresses and they are reset after deserialization, anyway.
- // In the first pass over the root list, we only serialize immortal immovable
- // roots. In the second pass, we serialize the rest.
- bool RootShouldBeSkipped(int root_index);
-
- void CheckRehashability(HeapObject* hashtable);
+ void CheckRehashability(HeapObject* obj);
const bool clear_function_code_;
- bool serializing_builtins_;
- bool serializing_immortal_immovables_roots_;
std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
PartialCacheIndexMap partial_cache_index_map_;
std::vector<AccessorInfo*> accessor_infos_;
+ std::vector<CallHandlerInfo*> call_handler_infos_;
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;
diff --git a/deps/v8/src/source-position-table.cc b/deps/v8/src/source-position-table.cc
index f7697e5694..f7306c82ce 100644
--- a/deps/v8/src/source-position-table.cc
+++ b/deps/v8/src/source-position-table.cc
@@ -4,7 +4,6 @@
#include "src/source-position-table.h"
-#include "src/log.h"
#include "src/objects-inl.h"
#include "src/objects.h"
@@ -48,7 +47,7 @@ void SubtractFromEntry(PositionTableEntry& value,
// Helper: Encode an integer.
template <typename T>
-void EncodeInt(ZoneVector<byte>& bytes, T value) {
+void EncodeInt(std::vector<byte>& bytes, T value) {
// Zig-zag encoding.
static const int kShift = sizeof(T) * kBitsPerByte - 1;
value = ((value << 1) ^ (value >> kShift));
@@ -65,9 +64,9 @@ void EncodeInt(ZoneVector<byte>& bytes, T value) {
}
// Encode a PositionTableEntry.
-void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
+void EncodeEntry(std::vector<byte>& bytes, const PositionTableEntry& entry) {
// We only accept ascending code offsets.
- DCHECK(entry.code_offset >= 0);
+ DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
EncodeInt(bytes,
entry.is_statement ? entry.code_offset : -entry.code_offset - 1);
@@ -109,14 +108,8 @@ void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
} // namespace
SourcePositionTableBuilder::SourcePositionTableBuilder(
- Zone* zone, SourcePositionTableBuilder::RecordingMode mode)
- : mode_(mode),
- bytes_(zone),
-#ifdef ENABLE_SLOW_DCHECKS
- raw_entries_(zone),
-#endif
- previous_() {
-}
+ SourcePositionTableBuilder::RecordingMode mode)
+ : mode_(mode), previous_() {}
void SourcePositionTableBuilder::AddPosition(size_t code_offset,
SourcePosition source_position,
@@ -138,7 +131,7 @@ void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
}
Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
- Isolate* isolate, Handle<AbstractCode> code) {
+ Isolate* isolate) {
if (bytes_.empty()) return isolate->factory()->empty_byte_array();
DCHECK(!Omit());
@@ -147,8 +140,6 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
MemCopy(table->GetDataStartAddress(), &*bytes_.begin(), bytes_.size());
- LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(*code, *table));
-
#ifdef ENABLE_SLOW_DCHECKS
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index f185062694..9028e50f79 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -14,8 +14,6 @@
namespace v8 {
namespace internal {
-class AbstractCode;
-class BytecodeArray;
class ByteArray;
template <typename T>
class Handle;
@@ -37,14 +35,13 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
public:
enum RecordingMode { OMIT_SOURCE_POSITIONS, RECORD_SOURCE_POSITIONS };
- SourcePositionTableBuilder(Zone* zone,
- RecordingMode mode = RECORD_SOURCE_POSITIONS);
+ explicit SourcePositionTableBuilder(
+ RecordingMode mode = RECORD_SOURCE_POSITIONS);
void AddPosition(size_t code_offset, SourcePosition source_position,
bool is_statement);
- Handle<ByteArray> ToSourcePositionTable(Isolate* isolate,
- Handle<AbstractCode> code);
+ Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
private:
void AddEntry(const PositionTableEntry& entry);
@@ -52,9 +49,9 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
inline bool Omit() const { return mode_ == OMIT_SOURCE_POSITIONS; }
RecordingMode mode_;
- ZoneVector<byte> bytes_;
+ std::vector<byte> bytes_;
#ifdef ENABLE_SLOW_DCHECKS
- ZoneVector<PositionTableEntry> raw_entries_;
+ std::vector<PositionTableEntry> raw_entries_;
#endif
PositionTableEntry previous_; // Previously written entry, to compute delta.
};
diff --git a/deps/v8/src/source-position.cc b/deps/v8/src/source-position.cc
index 02bb339357..b45567629a 100644
--- a/deps/v8/src/source-position.cc
+++ b/deps/v8/src/source-position.cc
@@ -64,8 +64,8 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
Handle<Code> code) const {
- Handle<DeoptimizationInputData> deopt_data(
- DeoptimizationInputData::cast(code->deoptimization_data()));
+ Handle<DeoptimizationData> deopt_data(
+ DeoptimizationData::cast(code->deoptimization_data()));
SourcePosition pos = *this;
std::vector<SourcePositionInfo> stack;
while (pos.isInlined()) {
@@ -103,8 +103,8 @@ void SourcePosition::Print(std::ostream& out,
}
void SourcePosition::Print(std::ostream& out, Code* code) const {
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
+ DeoptimizationData* deopt_data =
+ DeoptimizationData::cast(code->deoptimization_data());
if (!isInlined()) {
SharedFunctionInfo* function(
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
diff --git a/deps/v8/src/source-position.h b/deps/v8/src/source-position.h
index beab996c04..41e32557ce 100644
--- a/deps/v8/src/source-position.h
+++ b/deps/v8/src/source-position.h
@@ -27,8 +27,7 @@ struct SourcePositionInfo;
//
// A defined inlining_id refers to positions in
// CompilationInfo::inlined_functions or
-// DeoptimizationInputData::InliningPositions, depending on the compilation
-// stage.
+// DeoptimizationData::InliningPositions, depending on the compilation stage.
class SourcePosition final {
public:
explicit SourcePosition(int script_offset, int inlining_id = kNotInlined)
@@ -54,12 +53,12 @@ class SourcePosition final {
void SetScriptOffset(int script_offset) {
DCHECK(script_offset <= ScriptOffsetField::kMax - 2);
- DCHECK(script_offset >= kNoSourcePosition);
+ DCHECK_GE(script_offset, kNoSourcePosition);
value_ = ScriptOffsetField::update(value_, script_offset + 1);
}
void SetInliningId(int inlining_id) {
DCHECK(inlining_id <= InliningIdField::kMax - 2);
- DCHECK(inlining_id >= kNotInlined);
+ DCHECK_GE(inlining_id, kNotInlined);
value_ = InliningIdField::update(value_, inlining_id + 1);
}
@@ -97,7 +96,7 @@ struct InliningPosition {
// position of the inlined call
SourcePosition position = SourcePosition::Unknown();
- // references position in DeoptimizationInputData::literals()
+ // references position in DeoptimizationData::literals()
int inlined_function_id;
};
diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/splay-tree-inl.h
index c18e2b0e5d..d83b15faa5 100644
--- a/deps/v8/src/splay-tree-inl.h
+++ b/deps/v8/src/splay-tree-inl.h
@@ -50,11 +50,11 @@ void SplayTree<Config, Allocator>::InsertInternal(int cmp, Node* node) {
if (cmp > 0) {
node->left_ = root_;
node->right_ = root_->right_;
- root_->right_ = NULL;
+ root_->right_ = nullptr;
} else {
node->right_ = root_;
node->left_ = root_->left_;
- root_->left_ = NULL;
+ root_->left_ = nullptr;
}
root_ = node;
}
@@ -139,8 +139,7 @@ bool SplayTree<Config, Allocator>::FindGreatest(Locator* locator) {
if (is_empty())
return false;
Node* current = root_;
- while (current->right_ != NULL)
- current = current->right_;
+ while (current->right_ != nullptr) current = current->right_;
locator->bind(current);
return true;
}
@@ -151,8 +150,7 @@ bool SplayTree<Config, Allocator>::FindLeast(Locator* locator) {
if (is_empty())
return false;
Node* current = root_;
- while (current->left_ != NULL)
- current = current->left_;
+ while (current->left_ != nullptr) current = current->left_;
locator->bind(current);
return true;
}
@@ -191,7 +189,7 @@ bool SplayTree<Config, Allocator>::Remove(const Key& key) {
template<typename Config, class Allocator>
void SplayTree<Config, Allocator>::RemoveRootNode(const Key& key) {
- if (root_->left_ == NULL) {
+ if (root_->left_ == nullptr) {
// No left child, so the new tree is just the right child.
root_ = root_->right_;
} else {
@@ -225,32 +223,28 @@ void SplayTree<Config, Allocator>::Splay(const Key& key) {
while (true) {
int cmp = Config::Compare(key, current->key_);
if (cmp < 0) {
- if (current->left_ == NULL)
- break;
+ if (current->left_ == nullptr) break;
if (Config::Compare(key, current->left_->key_) < 0) {
// Rotate right.
Node* temp = current->left_;
current->left_ = temp->right_;
temp->right_ = current;
current = temp;
- if (current->left_ == NULL)
- break;
+ if (current->left_ == nullptr) break;
}
// Link right.
right->left_ = current;
right = current;
current = current->left_;
} else if (cmp > 0) {
- if (current->right_ == NULL)
- break;
+ if (current->right_ == nullptr) break;
if (Config::Compare(key, current->right_->key_) > 0) {
// Rotate left.
Node* temp = current->right_;
current->right_ = temp->left_;
temp->left_ = current;
current = temp;
- if (current->right_ == NULL)
- break;
+ if (current->right_ == nullptr) break;
}
// Link left.
left->right_ = current;
@@ -278,15 +272,15 @@ void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
template <typename Config, class Allocator> template <class Callback>
void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
- if (root_ == NULL) return;
+ if (root_ == nullptr) return;
// Pre-allocate some space for tiny trees.
std::vector<Node*> nodes_to_visit;
nodes_to_visit.push_back(root_);
size_t pos = 0;
while (pos < nodes_to_visit.size()) {
Node* node = nodes_to_visit[pos++];
- if (node->left() != NULL) nodes_to_visit.push_back(node->left());
- if (node->right() != NULL) nodes_to_visit.push_back(node->right());
+ if (node->left() != nullptr) nodes_to_visit.push_back(node->left());
+ if (node->right() != nullptr) nodes_to_visit.push_back(node->right());
callback->Call(node);
}
}
diff --git a/deps/v8/src/splay-tree.h b/deps/v8/src/splay-tree.h
index bee8429e39..e26d21331f 100644
--- a/deps/v8/src/splay-tree.h
+++ b/deps/v8/src/splay-tree.h
@@ -36,7 +36,7 @@ class SplayTree {
class Locator;
explicit SplayTree(AllocationPolicy allocator = AllocationPolicy())
- : root_(NULL), allocator_(allocator) {}
+ : root_(nullptr), allocator_(allocator) {}
~SplayTree();
INLINE(void* operator new(size_t size,
@@ -89,7 +89,7 @@ class SplayTree {
// Remove all keys from the tree.
void Clear() { ResetRoot(); }
- bool is_empty() { return root_ == NULL; }
+ bool is_empty() { return root_ == nullptr; }
// Perform the splay operation for the given key. Moves the node with
// the given key to the top of the tree. If no node has the given
@@ -100,10 +100,7 @@ class SplayTree {
class Node {
public:
Node(const Key& key, const Value& value)
- : key_(key),
- value_(value),
- left_(NULL),
- right_(NULL) { }
+ : key_(key), value_(value), left_(nullptr), right_(nullptr) {}
INLINE(void* operator new(size_t size, AllocationPolicy allocator)) {
return allocator.New(static_cast<int>(size));
@@ -136,7 +133,7 @@ class SplayTree {
class Locator BASE_EMBEDDED {
public:
explicit Locator(Node* node) : node_(node) { }
- Locator() : node_(NULL) { }
+ Locator() : node_(nullptr) {}
const Key& key() { return node_->key_; }
Value& value() { return node_->value_; }
void set_value(const Value& value) { node_->value_ = value; }
@@ -151,7 +148,7 @@ class SplayTree {
protected:
// Resets tree root. Existing nodes become unreachable.
- void ResetRoot() { root_ = NULL; }
+ void ResetRoot() { root_ = nullptr; }
private:
// Search for a node with a given key. If found, root_ points
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index 33f0332202..53e0462c67 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -91,8 +91,8 @@ static inline int StringBuilderConcatLength(int special_length,
pos = Smi::ToInt(next_smi);
if (pos < 0) return -1;
}
- DCHECK(pos >= 0);
- DCHECK(len >= 0);
+ DCHECK_GE(pos, 0);
+ DCHECK_GE(len, 0);
if (pos > special_length || len > special_length - pos) return -1;
increment = len;
} else if (elt->IsString()) {
@@ -122,14 +122,14 @@ class FixedArrayBuilder {
has_non_smi_elements_(false) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
- DCHECK(initial_capacity > 0);
+ DCHECK_GT(initial_capacity, 0);
}
explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
: array_(backing_store), length_(0), has_non_smi_elements_(false) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
- DCHECK(backing_store->length() > 0);
+ DCHECK_GT(backing_store->length(), 0);
}
bool HasCapacity(int elements) {
@@ -198,14 +198,14 @@ class ReplacementStringBuilder {
is_one_byte_(subject->IsOneByteRepresentation()) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
- DCHECK(estimated_part_count > 0);
+ DCHECK_GT(estimated_part_count, 0);
}
static inline void AddSubjectSlice(FixedArrayBuilder* builder, int from,
int to) {
- DCHECK(from >= 0);
+ DCHECK_GE(from, 0);
int length = to - from;
- DCHECK(length > 0);
+ DCHECK_GT(length, 0);
if (StringBuilderSubstringLength::is_valid(length) &&
StringBuilderSubstringPosition::is_valid(from)) {
int encoded_slice = StringBuilderSubstringLength::encode(length) |
@@ -230,7 +230,7 @@ class ReplacementStringBuilder {
void AddString(Handle<String> string) {
int length = string->length();
- DCHECK(length > 0);
+ DCHECK_GT(length, 0);
AddElement(*string);
if (!string->IsOneByteRepresentation()) {
is_one_byte_ = false;
diff --git a/deps/v8/src/string-case.cc b/deps/v8/src/string-case.cc
index 52d9636083..f1a7f9e979 100644
--- a/deps/v8/src/string-case.cc
+++ b/deps/v8/src/string-case.cc
@@ -62,7 +62,7 @@ int FastAsciiConvert(char* dst, const char* src, int length,
DisallowHeapAllocation no_gc;
// We rely on the distance between upper and lower case letters
// being a known power of 2.
- DCHECK('a' - 'A' == (1 << 5));
+ DCHECK_EQ('a' - 'A', 1 << 5);
// Boundaries for the range of input characters than require conversion.
static const char lo = is_lower ? 'A' - 1 : 'a' - 1;
static const char hi = is_lower ? 'Z' + 1 : 'z' + 1;
diff --git a/deps/v8/src/string-hasher-inl.h b/deps/v8/src/string-hasher-inl.h
index 7d1f106e02..b002f12d4a 100644
--- a/deps/v8/src/string-hasher-inl.h
+++ b/deps/v8/src/string-hasher-inl.h
@@ -45,7 +45,7 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
uint32_t StringHasher::ComputeRunningHash(uint32_t running_hash,
const uc16* chars, int length) {
DCHECK_NOT_NULL(chars);
- DCHECK(length >= 0);
+ DCHECK_GE(length, 0);
for (int i = 0; i < length; ++i) {
running_hash = AddCharacterCore(running_hash, *chars++);
}
@@ -56,7 +56,7 @@ uint32_t StringHasher::ComputeRunningHashOneByte(uint32_t running_hash,
const char* chars,
int length) {
DCHECK_NOT_NULL(chars);
- DCHECK(length >= 0);
+ DCHECK_GE(length, 0);
for (int i = 0; i < length; ++i) {
uint16_t c = static_cast<uint16_t>(*chars++);
running_hash = AddCharacterCore(running_hash, c);
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index 637d000c56..aa7b847ce6 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -87,7 +87,7 @@ class StringSearch : private StringSearchBase {
// Latin1 needle.
return kLatin1AlphabetSize;
} else {
- DCHECK(sizeof(PatternChar) == 2);
+ DCHECK_EQ(sizeof(PatternChar), 2);
// UC16 needle.
return kUC16AlphabetSize;
}
@@ -220,7 +220,7 @@ inline int FindFirstCharacter(Vector<const PatternChar> pattern,
const SubjectChar* char_pos = reinterpret_cast<const SubjectChar*>(
memchr(subject.start() + pos, search_byte,
(max_n - pos) * sizeof(SubjectChar)));
- if (char_pos == NULL) return -1;
+ if (char_pos == nullptr) return -1;
char_pos = AlignDown(char_pos, sizeof(SubjectChar));
pos = static_cast<int>(char_pos - subject.start());
if (subject[pos] == search_char) return pos;
@@ -258,7 +258,7 @@ template <typename PatternChar, typename SubjectChar>
inline bool CharCompare(const PatternChar* pattern,
const SubjectChar* subject,
int length) {
- DCHECK(length > 0);
+ DCHECK_GT(length, 0);
int pos = 0;
do {
if (pattern[pos] != subject[pos]) {
@@ -277,7 +277,7 @@ int StringSearch<PatternChar, SubjectChar>::LinearSearch(
Vector<const SubjectChar> subject,
int index) {
Vector<const PatternChar> pattern = search->pattern_;
- DCHECK(pattern.length() > 1);
+ DCHECK_GT(pattern.length(), 1);
int pattern_length = pattern.length();
int i = index;
int n = subject.length() - pattern_length;
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 6697191494..be508f4f45 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -48,7 +48,7 @@ bool StringStream::Put(char c) {
buffer_ = new_buffer;
} else {
// Reached the end of the available buffer.
- DCHECK(capacity_ >= 5);
+ DCHECK_GE(capacity_, 5);
length_ = capacity_ - 1; // Indicate fullness of the stream.
buffer_[length_ - 4] = '.';
buffer_[length_ - 3] = '.';
@@ -173,7 +173,7 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
}
// Verify that the buffer is 0-terminated
- DCHECK(buffer_[length_] == '\0');
+ DCHECK_EQ(buffer_[length_], '\0');
}
@@ -242,8 +242,8 @@ Handle<String> StringStream::ToString(Isolate* isolate) {
void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
- isolate->set_string_stream_current_security_token(NULL);
- if (isolate->string_stream_debug_object_cache() == NULL) {
+ isolate->set_string_stream_current_security_token(nullptr);
+ if (isolate->string_stream_debug_object_cache() == nullptr) {
isolate->set_string_stream_debug_object_cache(new DebugObjectCache());
}
isolate->string_stream_debug_object_cache()->clear();
@@ -529,7 +529,7 @@ char* HeapStringAllocator::grow(unsigned* bytes) {
return space_;
}
char* new_space = NewArray<char>(new_bytes);
- if (new_space == NULL) {
+ if (new_space == nullptr) {
return space_;
}
MemCopy(new_space, space_, *bytes);
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index c98660b5bf..4bdd5378fa 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -98,7 +98,7 @@ static void TrimToMaxSignificantDigits(Vector<const char> buffer,
}
// The input buffer has been trimmed. Therefore the last digit must be
// different from '0'.
- DCHECK(buffer[buffer.length() - 1] != '0');
+ DCHECK_NE(buffer[buffer.length() - 1], '0');
// Set the last digit to be non-zero. This is sufficient to guarantee
// correct rounding.
significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
@@ -207,11 +207,11 @@ static bool DoubleStrtod(Vector<const char> trimmed,
// Returns 10^exponent as an exact DiyFp.
// The given exponent must be in the range [1; kDecimalExponentDistance[.
static DiyFp AdjustmentPowerOfTen(int exponent) {
- DCHECK(0 < exponent);
- DCHECK(exponent < PowersOfTenCache::kDecimalExponentDistance);
+ DCHECK_LT(0, exponent);
+ DCHECK_LT(exponent, PowersOfTenCache::kDecimalExponentDistance);
// Simply hardcode the remaining powers for the given decimal exponent
// distance.
- DCHECK(PowersOfTenCache::kDecimalExponentDistance == 8);
+ DCHECK_EQ(PowersOfTenCache::kDecimalExponentDistance, 8);
switch (exponent) {
case 1: return DiyFp(V8_2PART_UINT64_C(0xa0000000, 00000000), -60);
case 2: return DiyFp(V8_2PART_UINT64_C(0xc8000000, 00000000), -57);
@@ -250,7 +250,7 @@ static bool DiyFpStrtod(Vector<const char> buffer,
input.Normalize();
error <<= old_e - input.e();
- DCHECK(exponent <= PowersOfTenCache::kMaxDecimalExponent);
+ DCHECK_LE(exponent, PowersOfTenCache::kMaxDecimalExponent);
if (exponent < PowersOfTenCache::kMinDecimalExponent) {
*result = 0.0;
return true;
@@ -268,7 +268,7 @@ static bool DiyFpStrtod(Vector<const char> buffer,
if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) {
// The product of input with the adjustment power fits into a 64 bit
// integer.
- DCHECK(DiyFp::kSignificandSize == 64);
+ DCHECK_EQ(DiyFp::kSignificandSize, 64);
} else {
// The adjustment power is exact. There is hence only an error of 0.5.
error += kDenominator / 2;
@@ -310,8 +310,8 @@ static bool DiyFpStrtod(Vector<const char> buffer,
precision_digits_count -= shift_amount;
}
// We use uint64_ts now. This only works if the DiyFp uses uint64_ts too.
- DCHECK(DiyFp::kSignificandSize == 64);
- DCHECK(precision_digits_count < 64);
+ DCHECK_EQ(DiyFp::kSignificandSize, 64);
+ DCHECK_LT(precision_digits_count, 64);
uint64_t one64 = 1;
uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1;
uint64_t precision_bits = input.f() & precision_bits_mask;
@@ -356,13 +356,13 @@ static double BignumStrtod(Vector<const char> buffer,
DiyFp upper_boundary = Double(guess).UpperBoundary();
DCHECK(buffer.length() + exponent <= kMaxDecimalPower + 1);
- DCHECK(buffer.length() + exponent > kMinDecimalPower);
- DCHECK(buffer.length() <= kMaxSignificantDecimalDigits);
+ DCHECK_GT(buffer.length() + exponent, kMinDecimalPower);
+ DCHECK_LE(buffer.length(), kMaxSignificantDecimalDigits);
// Make sure that the Bignum will be able to hold all our numbers.
// Our Bignum implementation has a separate field for exponents. Shifts will
// consume at most one bigit (< 64 bits).
// ln(10) == 3.3219...
- DCHECK(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
+ DCHECK_LT((kMaxDecimalPower + 1) * 333 / 100, Bignum::kMaxSignificantBits);
Bignum input;
Bignum boundary;
input.AssignDecimalString(buffer);
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 0ff2b82503..d237aed720 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -96,23 +96,6 @@ enum CategoryGroupEnabledFlags {
// unsigned int flags)
#define TRACE_EVENT_API_ADD_TRACE_EVENT v8::internal::tracing::AddTraceEventImpl
-// Add a trace event to the platform tracing system.
-// uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
-// char phase,
-// const uint8_t* category_group_enabled,
-// const char* name,
-// const char* scope,
-// uint64_t id,
-// uint64_t bind_id,
-// int num_args,
-// const char** arg_names,
-// const uint8_t* arg_types,
-// const uint64_t* arg_values,
-// unsigned int flags,
-// int64_t timestamp)
-#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP \
- v8::internal::tracing::AddTraceEventWithTimestampImpl
-
// Set the duration field of a COMPLETE trace event.
// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
// const uint8_t* category_group_enabled,
@@ -229,18 +212,10 @@ enum CategoryGroupEnabledFlags {
} \
} while (0)
-// Adds a trace event with a given timestamp.
+// Adds a trace event with a given timestamp. Not Implemented.
#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
timestamp, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- v8::internal::tracing::AddTraceEventWithTimestamp( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId, \
- v8::internal::tracing::kNoId, flags, timestamp, ##__VA_ARGS__); \
- } \
- } while (0)
+ UNIMPLEMENTED()
// Adds a trace event with a given id and timestamp. Not Implemented.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
@@ -448,7 +423,7 @@ static V8_INLINE uint64_t AddTraceEventImpl(
arg_convertables[1].reset(reinterpret_cast<ConvertableToTraceFormat*>(
static_cast<intptr_t>(arg_values[1])));
}
- DCHECK(num_args <= 2);
+ DCHECK_LE(num_args, 2);
v8::TracingController* controller =
v8::internal::tracing::TraceEventHelper::GetTracingController();
return controller->AddTraceEvent(phase, category_group_enabled, name, scope,
@@ -456,28 +431,6 @@ static V8_INLINE uint64_t AddTraceEventImpl(
arg_values, arg_convertables, flags);
}
-static V8_INLINE uint64_t AddTraceEventWithTimestampImpl(
- char phase, const uint8_t* category_group_enabled, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values, unsigned int flags, int64_t timestamp) {
- std::unique_ptr<ConvertableToTraceFormat> arg_convertables[2];
- if (num_args > 0 && arg_types[0] == TRACE_VALUE_TYPE_CONVERTABLE) {
- arg_convertables[0].reset(reinterpret_cast<ConvertableToTraceFormat*>(
- static_cast<intptr_t>(arg_values[0])));
- }
- if (num_args > 1 && arg_types[1] == TRACE_VALUE_TYPE_CONVERTABLE) {
- arg_convertables[1].reset(reinterpret_cast<ConvertableToTraceFormat*>(
- static_cast<intptr_t>(arg_values[1])));
- }
- DCHECK_LE(num_args, 2);
- v8::TracingController* controller =
- v8::internal::tracing::TraceEventHelper::GetTracingController();
- return controller->AddTraceEventWithTimestamp(
- phase, category_group_enabled, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, arg_convertables, flags, timestamp);
-}
-
// Define SetTraceValue for each allowed type. It stores the type and
// value in the return arguments. This allows this API to avoid declaring any
// structures so that it is portable to third_party libraries.
@@ -580,53 +533,11 @@ static V8_INLINE uint64_t AddTraceEvent(
arg_names, arg_types, arg_values, flags);
}
-static V8_INLINE uint64_t AddTraceEventWithTimestamp(
- char phase, const uint8_t* category_group_enabled, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
- int64_t timestamp) {
- return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
- phase, category_group_enabled, name, scope, id, bind_id, kZeroNumArgs,
- nullptr, nullptr, nullptr, flags, timestamp);
-}
-
-template <class ARG1_TYPE>
-static V8_INLINE uint64_t AddTraceEventWithTimestamp(
- char phase, const uint8_t* category_group_enabled, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
- int64_t timestamp, const char* arg1_name, ARG1_TYPE&& arg1_val) {
- const int num_args = 1;
- uint8_t arg_type;
- uint64_t arg_value;
- SetTraceValue(std::forward<ARG1_TYPE>(arg1_val), &arg_type, &arg_value);
- return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
- phase, category_group_enabled, name, scope, id, bind_id, num_args,
- &arg1_name, &arg_type, &arg_value, flags, timestamp);
-}
-
-template <class ARG1_TYPE, class ARG2_TYPE>
-static V8_INLINE uint64_t AddTraceEventWithTimestamp(
- char phase, const uint8_t* category_group_enabled, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
- int64_t timestamp, const char* arg1_name, ARG1_TYPE&& arg1_val,
- const char* arg2_name, ARG2_TYPE&& arg2_val) {
- const int num_args = 2;
- const char* arg_names[2] = {arg1_name, arg2_name};
- unsigned char arg_types[2];
- uint64_t arg_values[2];
- SetTraceValue(std::forward<ARG1_TYPE>(arg1_val), &arg_types[0],
- &arg_values[0]);
- SetTraceValue(std::forward<ARG2_TYPE>(arg2_val), &arg_types[1],
- &arg_values[1]);
- return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
- phase, category_group_enabled, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, flags, timestamp);
-}
-
// Used by TRACE_EVENTx macros. Do not use directly.
class ScopedTracer {
public:
// Note: members of data_ intentionally left uninitialized. See Initialize.
- ScopedTracer() : p_data_(NULL) {}
+ ScopedTracer() : p_data_(nullptr) {}
~ScopedTracer() {
if (p_data_ && *data_.category_group_enabled)
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 05178ac5f6..ac564ad9c2 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -9,6 +9,9 @@
#include "src/ic/handler-configuration-inl.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
@@ -31,12 +34,7 @@ TransitionArray* TransitionsAccessor::transitions() {
return TransitionArray::cast(raw_transitions_);
}
-// static
-TransitionArray* TransitionArray::cast(Object* object) {
- DCHECK(object->IsTransitionArray());
- return reinterpret_cast<TransitionArray*>(object);
-}
-
+CAST_ACCESSOR(TransitionArray)
bool TransitionArray::HasPrototypeTransitions() {
return get(kPrototypeTransitionsIndex) != Smi::kZero;
@@ -215,4 +213,6 @@ void TransitionArray::SetNumberOfTransitions(int number_of_transitions) {
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_TRANSITIONS_INL_H_
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 28b14e1d05..8e087b2e67 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -137,7 +137,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
}
++new_nof;
- CHECK(new_nof <= kMaxNumberOfTransitions);
+ CHECK_LE(new_nof, kMaxNumberOfTransitions);
DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
// If there is enough capacity, insert new entry into the existing array.
@@ -289,7 +289,7 @@ Map* TransitionsAccessor::SearchTransition(Name* name, PropertyKind kind,
Map* TransitionsAccessor::SearchSpecial(Symbol* name) {
if (encoding() != kFullTransitionArray) return nullptr;
int transition = transitions()->SearchSpecial(name);
- if (transition == kNotFound) return NULL;
+ if (transition == kNotFound) return nullptr;
return transitions()->GetTarget(transition);
}
@@ -307,7 +307,7 @@ Handle<Map> TransitionsAccessor::FindTransitionToField(Handle<Name> name) {
DCHECK(name->IsUniqueName());
DisallowHeapAllocation no_gc;
Map* target = SearchTransition(*name, kData, NONE);
- if (target == NULL) return Handle<Map>::null();
+ if (target == nullptr) return Handle<Map>::null();
PropertyDetails details = target->GetLastDescriptorDetails();
DCHECK_EQ(NONE, details.attributes());
if (details.location() != kField) return Handle<Map>::null();
@@ -475,7 +475,7 @@ FixedArray* TransitionsAccessor::GetPrototypeTransitions() {
// static
void TransitionArray::SetNumberOfPrototypeTransitions(
FixedArray* proto_transitions, int value) {
- DCHECK(proto_transitions->length() != 0);
+ DCHECK_NE(proto_transitions->length(), 0);
proto_transitions->set(kProtoTransitionNumberOfEntriesOffset,
Smi::FromInt(value));
}
@@ -645,7 +645,7 @@ int TransitionArray::SearchDetails(int transition, PropertyKind kind,
break;
}
}
- if (out_insertion_index != NULL) *out_insertion_index = transition;
+ if (out_insertion_index != nullptr) *out_insertion_index = transition;
return kNotFound;
}
@@ -664,24 +664,27 @@ void TransitionArray::Sort() {
int length = number_of_transitions();
for (int i = 1; i < length; i++) {
Name* key = GetKey(i);
- Map* target = GetTarget(i);
+ Object* target = GetRawTarget(i);
PropertyKind kind = kData;
PropertyAttributes attributes = NONE;
if (!TransitionsAccessor::IsSpecialTransition(key)) {
+ Map* target_map = TransitionsAccessor::GetTargetFromRaw(target);
PropertyDetails details =
- TransitionsAccessor::GetTargetDetails(key, target);
+ TransitionsAccessor::GetTargetDetails(key, target_map);
kind = details.kind();
attributes = details.attributes();
}
int j;
for (j = i - 1; j >= 0; j--) {
Name* temp_key = GetKey(j);
- Map* temp_target = GetTarget(j);
+ Object* temp_target = GetRawTarget(j);
PropertyKind temp_kind = kData;
PropertyAttributes temp_attributes = NONE;
if (!TransitionsAccessor::IsSpecialTransition(temp_key)) {
+ Map* temp_target_map =
+ TransitionsAccessor::GetTargetFromRaw(temp_target);
PropertyDetails details =
- TransitionsAccessor::GetTargetDetails(temp_key, temp_target);
+ TransitionsAccessor::GetTargetDetails(temp_key, temp_target_map);
temp_kind = details.kind();
temp_attributes = details.attributes();
}
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 4fa9800571..62a8bb50d4 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -12,6 +12,9 @@
#include "src/objects/map.h"
#include "src/objects/name.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
@@ -205,7 +208,7 @@ class TransitionsAccessor {
// [3 + number of transitions * kTransitionSize]: start of slack
class TransitionArray : public FixedArray {
public:
- inline static TransitionArray* cast(Object* object);
+ DECL_CAST(TransitionArray)
inline FixedArray* GetPrototypeTransitions();
inline Object** GetPrototypeTransitionsSlot();
@@ -227,7 +230,7 @@ class TransitionArray : public FixedArray {
return GetKey(transition_number);
}
int GetSortedKeyIndex(int transition_number) { return transition_number; }
- inline int number_of_entries() { return number_of_transitions(); }
+ inline int number_of_entries() const { return number_of_transitions(); }
#ifdef DEBUG
bool IsSortedNoDuplicates(int valid_entries = -1);
#endif
@@ -240,13 +243,8 @@ class TransitionArray : public FixedArray {
void Print(std::ostream& os);
#endif
-#ifdef OBJECT_PRINT
- void TransitionArrayPrint(std::ostream& os); // NOLINT
-#endif
-
-#ifdef VERIFY_HEAP
- void TransitionArrayVerify();
-#endif
+ DECL_PRINTER(TransitionArray)
+ DECL_VERIFIER(TransitionArray)
private:
friend class MarkCompactCollector;
@@ -314,19 +312,19 @@ class TransitionArray : public FixedArray {
// Search a transition for a given kind, property name and attributes.
int Search(PropertyKind kind, Name* name, PropertyAttributes attributes,
- int* out_insertion_index = NULL);
+ int* out_insertion_index = nullptr);
// Search a non-property transition (like elements kind, observe or frozen
// transitions).
- inline int SearchSpecial(Symbol* symbol, int* out_insertion_index = NULL) {
+ inline int SearchSpecial(Symbol* symbol, int* out_insertion_index = nullptr) {
return SearchName(symbol, out_insertion_index);
}
// Search a first transition for a given property name.
- inline int SearchName(Name* name, int* out_insertion_index = NULL);
+ inline int SearchName(Name* name, int* out_insertion_index = nullptr);
int SearchDetails(int transition, PropertyKind kind,
PropertyAttributes attributes, int* out_insertion_index);
- int number_of_transitions() {
+ int number_of_transitions() const {
if (length() < kFirstIndex) return 0;
return Smi::ToInt(get(kTransitionLengthIndex));
}
@@ -362,8 +360,9 @@ class TransitionArray : public FixedArray {
DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
};
-
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_TRANSITIONS_H_
diff --git a/deps/v8/src/trap-handler/handler-inside.cc b/deps/v8/src/trap-handler/handler-inside.cc
index 9336636b21..d3c543f4f4 100644
--- a/deps/v8/src/trap-handler/handler-inside.cc
+++ b/deps/v8/src/trap-handler/handler-inside.cc
@@ -102,13 +102,15 @@ bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context) {
if (TryFindLandingPad(fault_addr, &landing_pad)) {
// Tell the caller to return to the landing pad.
context->uc_mcontext.gregs[REG_RIP] = landing_pad;
+ // We will return to wasm code, so restore the g_thread_in_wasm_code flag.
+ g_thread_in_wasm_code = true;
return true;
}
} // end signal mask scope
// If we get here, it's not a recoverable wasm fault, so we go to the next
- // handler.
- g_thread_in_wasm_code = true;
+ // handler. Leave the g_thread_in_wasm_code flag unset since we do not return
+ // to wasm code.
return false;
}
@@ -160,18 +162,14 @@ void HandleSignal(int signum, siginfo_t* info, void* context) {
if (!TryHandleSignal(signum, info, uc)) {
// Since V8 didn't handle this signal, we want to re-raise the same signal.
- // For kernel-generated SEGV signals, we do this by restoring the default
+ // For kernel-generated SEGV signals, we do this by restoring the original
// SEGV handler and then returning. The fault will happen again and the
// usual SEGV handling will happen.
//
// We handle user-generated signals by calling raise() instead. This is for
// completeness. We should never actually see one of these, but just in
// case, we do the right thing.
- struct sigaction action;
- action.sa_handler = SIG_DFL;
- sigemptyset(&action.sa_mask);
- action.sa_flags = 0;
- sigaction(signum, &action, nullptr);
+ RestoreOriginalSignalHandler();
if (!IsKernelGeneratedSignal(info)) {
raise(signum);
}
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index 5cb9661e7b..2c9225d485 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -115,7 +115,7 @@ void ValidateCodeObjects() {
CodeProtectionInfo* CreateHandlerData(
void* base, size_t size, size_t num_protected_instructions,
- ProtectedInstructionData* protected_instructions) {
+ const ProtectedInstructionData* protected_instructions) {
const size_t alloc_size = HandlerDataSize(num_protected_instructions);
CodeProtectionInfo* data =
reinterpret_cast<CodeProtectionInfo*>(malloc(alloc_size));
@@ -143,9 +143,9 @@ void UpdateHandlerDataCodePointer(int index, void* base) {
data->base = base;
}
-int RegisterHandlerData(void* base, size_t size,
- size_t num_protected_instructions,
- ProtectedInstructionData* protected_instructions) {
+int RegisterHandlerData(
+ void* base, size_t size, size_t num_protected_instructions,
+ const ProtectedInstructionData* protected_instructions) {
// TODO(eholk): in debug builds, make sure this data isn't already registered.
CodeProtectionInfo* data = CreateHandlerData(
@@ -248,6 +248,8 @@ void ReleaseHandlerData(int index) {
bool RegisterDefaultSignalHandler() {
#if V8_TRAP_HANDLER_SUPPORTED
+ CHECK(!g_is_default_signal_handler_registered);
+
struct sigaction action;
action.sa_sigaction = HandleSignal;
action.sa_flags = SA_SIGINFO;
@@ -255,10 +257,11 @@ bool RegisterDefaultSignalHandler() {
// {sigaction} installs a new custom segfault handler. On success, it returns
// 0. If we get a nonzero value, we report an error to the caller by returning
// false.
- if (sigaction(SIGSEGV, &action, nullptr) != 0) {
+ if (sigaction(SIGSEGV, &action, &g_old_handler) != 0) {
return false;
}
+ g_is_default_signal_handler_registered = true;
return true;
#else
return false;
diff --git a/deps/v8/src/trap-handler/handler-shared.cc b/deps/v8/src/trap-handler/handler-shared.cc
index 19f8b5bf68..d07f7ae131 100644
--- a/deps/v8/src/trap-handler/handler-shared.cc
+++ b/deps/v8/src/trap-handler/handler-shared.cc
@@ -26,7 +26,22 @@ namespace trap_handler {
// We declare this as int rather than bool as a workaround for a glibc bug, in
// which the dynamic loader cannot handle executables whose TLS area is only
// 1 byte in size; see https://sourceware.org/bugzilla/show_bug.cgi?id=14898.
-THREAD_LOCAL int g_thread_in_wasm_code = false;
+THREAD_LOCAL int g_thread_in_wasm_code;
+
+#if V8_TRAP_HANDLER_SUPPORTED
+// When using the default signal handler, we save the old one to restore in case
+// V8 chooses not to handle the signal.
+struct sigaction g_old_handler;
+bool g_is_default_signal_handler_registered;
+#endif
+
+V8_EXPORT_PRIVATE void RestoreOriginalSignalHandler() {
+#if V8_TRAP_HANDLER_SUPPORTED
+ if (sigaction(SIGSEGV, &g_old_handler, nullptr) == 0) {
+ g_is_default_signal_handler_registered = false;
+ }
+#endif
+}
static_assert(sizeof(g_thread_in_wasm_code) > 1,
"sizeof(thread_local_var) must be > 1, see "
diff --git a/deps/v8/src/trap-handler/trap-handler-internal.h b/deps/v8/src/trap-handler/trap-handler-internal.h
index 7897bd0ecc..1476eb844b 100644
--- a/deps/v8/src/trap-handler/trap-handler-internal.h
+++ b/deps/v8/src/trap-handler/trap-handler-internal.h
@@ -68,6 +68,13 @@ extern std::atomic_size_t gRecoveredTrapCount;
// unchanged.
bool TryFindLandingPad(uintptr_t fault_addr, uintptr_t* landing_pad);
+#if V8_TRAP_HANDLER_SUPPORTED
+// When using the default signal handler, we save the old one to restore in case
+// V8 chooses not to handle the signal.
+extern struct sigaction g_old_handler;
+extern bool g_is_default_signal_handler_registered;
+#endif
+
} // namespace trap_handler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 7189c27e29..612cf51b45 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -50,9 +50,11 @@ void UpdateHandlerDataCodePointer(int index, void* base);
/// UpdateHandlerDataCodePointer and ReleaseHandlerData, or -1 on failure.
int RegisterHandlerData(void* base, size_t size,
size_t num_protected_instructions,
- ProtectedInstructionData* protected_instructions);
+ const ProtectedInstructionData* protected_instructions);
/// Removes the data from the master list and frees any memory, if necessary.
+/// TODO(mtrofin): once FLAG_wasm_jit_to_native is not needed, we can switch
+/// to using size_t for index and not need kInvalidIndex.
void ReleaseHandlerData(int index);
#if V8_OS_WIN
@@ -87,6 +89,7 @@ inline void ClearThreadInWasm() {
}
bool RegisterDefaultSignalHandler();
+V8_EXPORT_PRIVATE void RestoreOriginalSignalHandler();
#if V8_OS_LINUX
bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context);
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index 8e64f3f583..11ce1561f9 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -67,93 +67,6 @@ std::ostream& operator<<(std::ostream& os, ForInHint hint) {
UNREACHABLE();
}
-std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
- switch (hint) {
- case ToBooleanHint::kNone:
- return os << "None";
- case ToBooleanHint::kUndefined:
- return os << "Undefined";
- case ToBooleanHint::kBoolean:
- return os << "Boolean";
- case ToBooleanHint::kNull:
- return os << "Null";
- case ToBooleanHint::kSmallInteger:
- return os << "SmallInteger";
- case ToBooleanHint::kReceiver:
- return os << "Receiver";
- case ToBooleanHint::kString:
- return os << "String";
- case ToBooleanHint::kSymbol:
- return os << "Symbol";
- case ToBooleanHint::kHeapNumber:
- return os << "HeapNumber";
- case ToBooleanHint::kAny:
- return os << "Any";
- case ToBooleanHint::kNeedsMap:
- return os << "NeedsMap";
- }
- UNREACHABLE();
-}
-
-std::string ToString(ToBooleanHint hint) {
- switch (hint) {
- case ToBooleanHint::kNone:
- return "None";
- case ToBooleanHint::kUndefined:
- return "Undefined";
- case ToBooleanHint::kBoolean:
- return "Boolean";
- case ToBooleanHint::kNull:
- return "Null";
- case ToBooleanHint::kSmallInteger:
- return "SmallInteger";
- case ToBooleanHint::kReceiver:
- return "Receiver";
- case ToBooleanHint::kString:
- return "String";
- case ToBooleanHint::kSymbol:
- return "Symbol";
- case ToBooleanHint::kHeapNumber:
- return "HeapNumber";
- case ToBooleanHint::kAny:
- return "Any";
- case ToBooleanHint::kNeedsMap:
- return "NeedsMap";
- }
- UNREACHABLE();
-}
-
-std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
- if (hints == ToBooleanHint::kAny) return os << "Any";
- if (hints == ToBooleanHint::kNone) return os << "None";
- bool first = true;
- for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * 8; ++i) {
- ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
- if (hints & hint) {
- if (!first) os << "|";
- first = false;
- os << hint;
- }
- }
- return os;
-}
-
-std::string ToString(ToBooleanHints hints) {
- if (hints == ToBooleanHint::kAny) return "Any";
- if (hints == ToBooleanHint::kNone) return "None";
- std::string ret;
- bool first = true;
- for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * 8; ++i) {
- ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
- if (hints & hint) {
- if (!first) ret += "|";
- first = false;
- ret += ToString(hint);
- }
- }
- return ret;
-}
-
std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
switch (flags) {
case STRING_ADD_CHECK_NONE:
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index 6e50649646..66102eae9a 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -58,33 +58,6 @@ enum class ForInHint : uint8_t {
std::ostream& operator<<(std::ostream&, ForInHint);
-// Type hints for the ToBoolean type conversion.
-enum class ToBooleanHint : uint16_t {
- kNone = 0u,
- kUndefined = 1u << 0,
- kBoolean = 1u << 1,
- kNull = 1u << 2,
- kSmallInteger = 1u << 3,
- kReceiver = 1u << 4,
- kString = 1u << 5,
- kSymbol = 1u << 6,
- kHeapNumber = 1u << 7,
- kAny = kUndefined | kBoolean | kNull | kSmallInteger | kReceiver | kString |
- kSymbol | kHeapNumber,
- kNeedsMap = kReceiver | kString | kSymbol | kHeapNumber,
- kCanBeUndetectable = kReceiver,
-};
-
-std::ostream& operator<<(std::ostream&, ToBooleanHint);
-std::string ToString(ToBooleanHint);
-
-typedef base::Flags<ToBooleanHint, uint16_t> ToBooleanHints;
-
-std::ostream& operator<<(std::ostream&, ToBooleanHints);
-std::string ToString(ToBooleanHints);
-
-DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
-
enum StringAddFlags {
// Omit both parameter checks.
STRING_ADD_CHECK_NONE = 0,
diff --git a/deps/v8/src/unicode-decoder.cc b/deps/v8/src/unicode-decoder.cc
index 2289e08342..d2360b6c68 100644
--- a/deps/v8/src/unicode-decoder.cc
+++ b/deps/v8/src/unicode-decoder.cc
@@ -14,7 +14,7 @@ void Utf8DecoderBase::Reset(uint16_t* buffer, size_t buffer_length,
const uint8_t* stream, size_t stream_length) {
// Assume everything will fit in the buffer and stream won't be needed.
last_byte_of_buffer_unused_ = false;
- unbuffered_start_ = NULL;
+ unbuffered_start_ = nullptr;
unbuffered_length_ = 0;
bool writing_to_buffer = true;
// Loop until stream is read, writing to buffer as long as buffer has space.
@@ -72,7 +72,7 @@ void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
*data++ = Utf16::LeadSurrogate(character);
*data++ = Utf16::TrailSurrogate(character);
- DCHECK(data_length > 1);
+ DCHECK_GT(data_length, 1);
data_length -= 2;
} else {
*data++ = character;
diff --git a/deps/v8/src/unicode-decoder.h b/deps/v8/src/unicode-decoder.h
index 35d23a2ac7..38a1837af3 100644
--- a/deps/v8/src/unicode-decoder.h
+++ b/deps/v8/src/unicode-decoder.h
@@ -47,14 +47,12 @@ class Utf8Decoder : public Utf8DecoderBase {
uint16_t buffer_[kBufferSize];
};
-
Utf8DecoderBase::Utf8DecoderBase()
- : unbuffered_start_(NULL),
+ : unbuffered_start_(nullptr),
unbuffered_length_(0),
utf16_length_(0),
last_byte_of_buffer_unused_(false) {}
-
Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer, size_t buffer_length,
const uint8_t* stream, size_t stream_length) {
Reset(buffer, buffer_length, stream, stream_length);
@@ -77,7 +75,7 @@ void Utf8Decoder<kBufferSize>::Reset(const char* stream, size_t length) {
template <size_t kBufferSize>
size_t Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
size_t length) const {
- DCHECK(length > 0);
+ DCHECK_GT(length, 0);
if (length > utf16_length_) length = utf16_length_;
// memcpy everything in buffer.
size_t buffer_length =
@@ -85,7 +83,7 @@ size_t Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
size_t memcpy_length = length <= buffer_length ? length : buffer_length;
v8::internal::MemCopy(data, buffer_, memcpy_length * sizeof(uint16_t));
if (length <= buffer_length) return length;
- DCHECK(unbuffered_start_ != NULL);
+ DCHECK_NOT_NULL(unbuffered_start_);
// Copy the rest the slow way.
WriteUtf16Slow(unbuffered_start_, unbuffered_length_, data + buffer_length,
length - buffer_length);
@@ -103,7 +101,7 @@ class Latin1 {
uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
- DCHECK(c > Latin1::kMaxChar);
+ DCHECK_GT(c, Latin1::kMaxChar);
switch (c) {
// This are equivalent characters in unicode.
case 0x39c:
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
index a6ad3ddb9d..3ebf58857b 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/uri.cc
@@ -360,7 +360,7 @@ MaybeHandle<String> UnescapeSlow(Isolate* isolate, Handle<String> string,
int dest_position = 0;
Handle<String> second_part;
- DCHECK(unescaped_length <= String::kMaxLength);
+ DCHECK_LE(unescaped_length, String::kMaxLength);
if (one_byte) {
Handle<SeqOneByteString> dest = isolate->factory()
->NewRawOneByteString(unescaped_length)
@@ -444,7 +444,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
}
// We don't allow strings that are longer than a maximal length.
- DCHECK(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
+ DCHECK_LT(String::kMaxLength, 0x7fffffff - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) break; // Provoke exception.
}
}
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 9d166e06c6..8f20fba139 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -14,7 +14,6 @@
namespace v8 {
namespace internal {
-
SimpleStringBuilder::SimpleStringBuilder(int size) {
buffer_ = Vector<char>::New(size);
position_ = 0;
@@ -151,19 +150,19 @@ void Flush(FILE* out) {
char* ReadLine(const char* prompt) {
- char* result = NULL;
+ char* result = nullptr;
char line_buf[256];
int offset = 0;
bool keep_going = true;
fprintf(stdout, "%s", prompt);
fflush(stdout);
while (keep_going) {
- if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
+ if (fgets(line_buf, sizeof(line_buf), stdin) == nullptr) {
// fgets got an error. Just give up.
- if (result != NULL) {
+ if (result != nullptr) {
DeleteArray(result);
}
- return NULL;
+ return nullptr;
}
int len = StrLength(line_buf);
if (len > 1 &&
@@ -179,7 +178,7 @@ char* ReadLine(const char* prompt) {
// will exit the loop after copying this buffer into the result.
keep_going = false;
}
- if (result == NULL) {
+ if (result == nullptr) {
// Allocate the initial result and make room for the terminating '\0'
result = NewArray<char>(len + 1);
} else {
@@ -196,7 +195,7 @@ char* ReadLine(const char* prompt) {
MemCopy(result + offset, line_buf, len * kCharSize);
offset += len;
}
- DCHECK(result != NULL);
+ DCHECK_NOT_NULL(result);
result[offset] = '\0';
return result;
}
@@ -207,11 +206,11 @@ char* ReadCharsFromFile(FILE* file,
int extra_space,
bool verbose,
const char* filename) {
- if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
+ if (file == nullptr || fseek(file, 0, SEEK_END) != 0) {
if (verbose) {
base::OS::PrintError("Cannot read from file %s.\n", filename);
}
- return NULL;
+ return nullptr;
}
// Get the size of the file and rewind it.
@@ -224,7 +223,7 @@ char* ReadCharsFromFile(FILE* file,
if (read != (*size - i) && ferror(file) != 0) {
fclose(file);
DeleteArray(result);
- return NULL;
+ return nullptr;
}
i += read;
}
@@ -238,7 +237,7 @@ char* ReadCharsFromFile(const char* filename,
bool verbose) {
FILE* file = base::OS::FOpen(filename, "rb");
char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
- if (file != NULL) fclose(file);
+ if (file != nullptr) fclose(file);
return result;
}
@@ -299,7 +298,7 @@ int AppendChars(const char* filename,
int size,
bool verbose) {
FILE* f = base::OS::FOpen(filename, "ab");
- if (f == NULL) {
+ if (f == nullptr) {
if (verbose) {
base::OS::PrintError("Cannot open file %s for writing.\n", filename);
}
@@ -316,7 +315,7 @@ int WriteChars(const char* filename,
int size,
bool verbose) {
FILE* f = base::OS::FOpen(filename, "wb");
- if (f == NULL) {
+ if (f == nullptr) {
if (verbose) {
base::OS::PrintError("Cannot open file %s for writing.\n", filename);
}
@@ -412,7 +411,7 @@ void init_memcopy_functions(Isolate* isolate) {
g_memcopy_functions_initialized = true;
#if V8_TARGET_ARCH_IA32
MemMoveFunction generated_memmove = CreateMemMoveFunction(isolate);
- if (generated_memmove != NULL) {
+ if (generated_memmove != nullptr) {
memmove_function = generated_memmove;
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index db20fe0b99..e6e98fabba 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -22,6 +22,10 @@
#include "src/vector.h"
#include "src/zone/zone.h"
+#if defined(V8_OS_AIX)
+#include <fenv.h> // NOLINT(build/c++11)
+#endif
+
namespace v8 {
namespace internal {
@@ -46,10 +50,9 @@ inline char HexCharOfValue(int value) {
inline int BoolToInt(bool b) { return b ? 1 : 0; }
-
// Same as strcmp, but can handle NULL arguments.
inline bool CStringEquals(const char* s1, const char* s2) {
- return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0);
+ return (s1 == s2) || (s1 != nullptr && s2 != nullptr && strcmp(s1, s2) == 0);
}
// X must be a power of 2. Returns the number of trailing zeros.
@@ -211,6 +214,27 @@ inline double Floor(double x) {
return std::floor(x);
}
+inline double Modulo(double x, double y) {
+#if defined(V8_OS_WIN)
+ // Workaround MS fmod bugs. ECMA-262 says:
+ // dividend is finite and divisor is an infinity => result equals dividend
+ // dividend is a zero and divisor is nonzero finite => result equals dividend
+ if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) &&
+ !(x == 0 && (y != 0 && std::isfinite(y)))) {
+ x = fmod(x, y);
+ }
+ return x;
+#elif defined(V8_OS_AIX)
+ // AIX raises an underflow exception for (Number.MIN_VALUE % Number.MAX_VALUE)
+ feclearexcept(FE_ALL_EXCEPT);
+ double result = std::fmod(x, y);
+ int exception = fetestexcept(FE_UNDERFLOW);
+ return (exception ? x : result);
+#else
+ return std::fmod(x, y);
+#endif
+}
+
inline double Pow(double x, double y) {
if (y == 0.0) return 1.0;
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
@@ -264,7 +288,7 @@ T SaturateAdd(T a, T b) {
template <typename T>
T SaturateSub(T a, T b) {
if (std::is_signed<T>::value) {
- if (a > 0 && b < 0) {
+ if (a >= 0 && b < 0) {
if (a > std::numeric_limits<T>::max() + b) {
return std::numeric_limits<T>::max();
}
@@ -299,9 +323,10 @@ class BitFieldBase {
static const U kShift = shift;
static const U kSize = size;
static const U kNext = kShift + kSize;
+ static const U kNumValues = kOne << size;
// Value for the field with all bits set.
- static const T kMax = static_cast<T>((kOne << size) - 1);
+ static const T kMax = static_cast<T>(kNumValues - 1);
// Tells whether the provided value fits into the bit field.
static constexpr bool is_valid(T value) {
@@ -430,7 +455,7 @@ class BitSetComputer {
//
// DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
//
-#define DEFINE_ONE_FIELD_OFFSET(Name, Size) Name, Name##End = Name + Size - 1,
+#define DEFINE_ONE_FIELD_OFFSET(Name, Size) Name, Name##End = Name + (Size)-1,
#define DEFINE_FIELD_OFFSET_CONSTANTS(StartOffset, LIST_MACRO) \
enum { \
@@ -600,8 +625,8 @@ class Access {
~Access() {
resource_->is_reserved_ = false;
- resource_ = NULL;
- instance_ = NULL;
+ resource_ = nullptr;
+ instance_ = nullptr;
}
T* value() { return instance_; }
@@ -612,22 +637,21 @@ class Access {
T* instance_;
};
-
// A pointer that can only be set once and doesn't allow NULL values.
template<typename T>
class SetOncePointer {
public:
- SetOncePointer() : pointer_(NULL) { }
+ SetOncePointer() : pointer_(nullptr) {}
- bool is_set() const { return pointer_ != NULL; }
+ bool is_set() const { return pointer_ != nullptr; }
T* get() const {
- DCHECK(pointer_ != NULL);
+ DCHECK_NOT_NULL(pointer_);
return pointer_;
}
void set(T* value) {
- DCHECK(pointer_ == NULL && value != NULL);
+ DCHECK(pointer_ == nullptr && value != nullptr);
pointer_ = value;
}
@@ -687,8 +711,8 @@ inline int CompareCharsUnsigned(const lchar* lhs, const rchar* rhs,
template <typename lchar, typename rchar>
inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) {
- DCHECK(sizeof(lchar) <= 2);
- DCHECK(sizeof(rchar) <= 2);
+ DCHECK_LE(sizeof(lchar), 2);
+ DCHECK_LE(sizeof(rchar), 2);
if (sizeof(lchar) == 1) {
if (sizeof(rchar) == 1) {
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
@@ -715,8 +739,8 @@ inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) {
// Calculate 10^exponent.
inline int TenToThe(int exponent) {
- DCHECK(exponent <= 9);
- DCHECK(exponent >= 1);
+ DCHECK_LE(exponent, 9);
+ DCHECK_GE(exponent, 1);
int answer = 10;
for (int i = 1; i < exponent; i++) answer *= 10;
return answer;
@@ -791,7 +815,7 @@ class SimpleStringBuilder {
// 0-characters; use the Finalize() method to terminate the string
// instead.
void AddCharacter(char c) {
- DCHECK(c != '\0');
+ DCHECK_NE(c, '\0');
DCHECK(!is_finalized() && position_ < buffer_.length());
buffer_[position_++] = c;
}
@@ -1085,7 +1109,7 @@ inline void CopyWords(T* dst, const T* src, size_t num_words) {
STATIC_ASSERT(sizeof(T) == kPointerSize);
DCHECK(Min(dst, const_cast<T*>(src)) + num_words <=
Max(dst, const_cast<T*>(src)));
- DCHECK(num_words > 0);
+ DCHECK_GT(num_words, 0);
// Use block copying MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
@@ -1106,7 +1130,7 @@ inline void CopyWords(T* dst, const T* src, size_t num_words) {
template <typename T>
inline void MoveWords(T* dst, const T* src, size_t num_words) {
STATIC_ASSERT(sizeof(T) == kPointerSize);
- DCHECK(num_words > 0);
+ DCHECK_GT(num_words, 0);
// Use block copying MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
@@ -1151,8 +1175,8 @@ inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
template <typename T, typename U>
inline void MemsetPointer(T** dest, U* value, int counter) {
#ifdef DEBUG
- T* a = NULL;
- U* b = NULL;
+ T* a = nullptr;
+ U* b = nullptr;
a = b; // Fake assignment to check assignability.
USE(a);
#endif // DEBUG
@@ -1224,8 +1248,8 @@ INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars));
template <typename sourcechar, typename sinkchar>
void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) {
- DCHECK(sizeof(sourcechar) <= 2);
- DCHECK(sizeof(sinkchar) <= 2);
+ DCHECK_LE(sizeof(sourcechar), 2);
+ DCHECK_LE(sizeof(sinkchar), 2);
if (sizeof(sinkchar) == 1) {
if (sizeof(sourcechar) == 1) {
CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 4e1c96b187..16107fdefc 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -33,8 +33,7 @@ V8_DECLARE_ONCE(init_natives_once);
V8_DECLARE_ONCE(init_snapshot_once);
#endif
-v8::Platform* V8::platform_ = NULL;
-
+v8::Platform* V8::platform_ = nullptr;
bool V8::Initialize() {
InitializeOncePerProcess();
@@ -66,7 +65,7 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_semi_space_size = 1;
}
- base::OS::Initialize(FLAG_hard_abort, FLAG_gc_fake_mmap);
+ base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
Isolate::InitializeOncePerProcess();
@@ -97,7 +96,7 @@ void V8::ShutdownPlatform() {
CHECK(platform_);
v8::tracing::TracingCategoryObserver::TearDown();
v8::base::SetPrintStackTrace(nullptr);
- platform_ = NULL;
+ platform_ = nullptr;
}
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index 89eb271f61..a6f72c9f1e 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -200,12 +200,14 @@
'builtins/builtins-iterator-gen.h',
'builtins/builtins-iterator-gen.cc',
'builtins/builtins-math-gen.cc',
+ 'builtins/builtins-math-gen.h',
'builtins/builtins-number-gen.cc',
'builtins/builtins-object-gen.cc',
'builtins/builtins-promise-gen.cc',
'builtins/builtins-promise-gen.h',
'builtins/builtins-proxy-gen.cc',
'builtins/builtins-proxy-gen.h',
+ 'builtins/builtins-reflect-gen.cc',
'builtins/builtins-regexp-gen.cc',
'builtins/builtins-regexp-gen.h',
'builtins/builtins-sharedarraybuffer-gen.cc',
@@ -580,8 +582,6 @@
'assembler-inl.h',
'assert-scope.h',
'assert-scope.cc',
- 'ast/ast-expression-rewriter.cc',
- 'ast/ast-expression-rewriter.h',
'ast/ast-function-literal-id-reindexer.cc',
'ast/ast-function-literal-id-reindexer.h',
'ast/ast-numbering.cc',
@@ -604,8 +604,6 @@
'ast/scopes.h',
'ast/variables.cc',
'ast/variables.h',
- 'background-parsing-task.cc',
- 'background-parsing-task.h',
'bailout-reason.cc',
'bailout-reason.h',
'basic-block-profiler.cc',
@@ -688,6 +686,7 @@
'compiler/access-info.h',
'compiler/all-nodes.cc',
'compiler/all-nodes.h',
+ 'compiler/allocation-builder.h',
'compiler/basic-block-instrumentor.cc',
'compiler/basic-block-instrumentor.h',
'compiler/branch-elimination.cc',
@@ -992,6 +991,8 @@
'handles.cc',
'handles.h',
'heap-symbols.h',
+ 'heap/array-buffer-collector.cc',
+ 'heap/array-buffer-collector.h',
'heap/array-buffer-tracker-inl.h',
'heap/array-buffer-tracker.cc',
'heap/array-buffer-tracker.h',
@@ -1043,18 +1044,15 @@
'heap/spaces.h',
'heap/store-buffer.cc',
'heap/store-buffer.h',
+ 'heap/sweeper.cc',
+ 'heap/sweeper.h',
'heap/worklist.h',
'intl.cc',
'intl.h',
'icu_util.cc',
'icu_util.h',
- 'ic/access-compiler-data.h',
- 'ic/access-compiler.cc',
- 'ic/access-compiler.h',
'ic/call-optimization.cc',
'ic/call-optimization.h',
- 'ic/handler-compiler.cc',
- 'ic/handler-compiler.h',
'ic/handler-configuration.cc',
'ic/handler-configuration-inl.h',
'ic/handler-configuration.h',
@@ -1156,9 +1154,10 @@
'objects.h',
'objects/arguments-inl.h',
'objects/arguments.h',
- 'objects/bigint-inl.h',
'objects/bigint.cc',
'objects/bigint.h',
+ 'objects/code-inl.h',
+ 'objects/code.h',
'objects/compilation-cache.h',
'objects/compilation-cache-inl.h',
'objects/debug-objects-inl.h',
@@ -1172,7 +1171,12 @@
'objects/hash-table.h',
'objects/intl-objects.cc',
'objects/intl-objects.h',
+ 'objects/js-array.h',
+ 'objects/js-array-inl.h',
+ 'objects/js-regexp.h',
+ 'objects/js-regexp-inl.h',
'objects/literal-objects.cc',
+ 'objects/literal-objects-inl.h',
'objects/literal-objects.h',
'objects/map-inl.h',
'objects/map.h',
@@ -1199,6 +1203,8 @@
'objects/template-objects.h',
'ostreams.cc',
'ostreams.h',
+ 'parsing/background-parsing-task.cc',
+ 'parsing/background-parsing-task.h',
'parsing/duplicate-finder.h',
'parsing/expression-classifier.h',
'parsing/expression-scope-reparenter.cc',
@@ -1331,12 +1337,20 @@
'setup-isolate.h',
'signature.h',
'simulator.h',
+ 'snapshot/builtin-deserializer-allocator.cc',
+ 'snapshot/builtin-deserializer-allocator.h',
'snapshot/builtin-deserializer.cc',
'snapshot/builtin-deserializer.h',
+ 'snapshot/builtin-serializer-allocator.cc',
+ 'snapshot/builtin-serializer-allocator.h',
'snapshot/builtin-serializer.cc',
'snapshot/builtin-serializer.h',
+ 'snapshot/builtin-snapshot-utils.cc',
+ 'snapshot/builtin-snapshot-utils.h',
'snapshot/code-serializer.cc',
'snapshot/code-serializer.h',
+ 'snapshot/default-deserializer-allocator.cc',
+ 'snapshot/default-deserializer-allocator.h',
'snapshot/default-serializer-allocator.cc',
'snapshot/default-serializer-allocator.h',
'snapshot/deserializer.cc',
@@ -1423,6 +1437,9 @@
'visitors.h',
'vm-state-inl.h',
'vm-state.h',
+ 'wasm/baseline/liftoff-assembler.cc',
+ 'wasm/baseline/liftoff-assembler.h',
+ 'wasm/baseline/liftoff-compiler.cc',
'wasm/compilation-manager.cc',
'wasm/compilation-manager.h',
'wasm/decoder.h',
@@ -1444,8 +1461,10 @@
'wasm/streaming-decoder.h',
'wasm/wasm-api.cc',
'wasm/wasm-api.h',
- 'wasm/wasm-code-specialization.h',
'wasm/wasm-code-specialization.cc',
+ 'wasm/wasm-code-specialization.h',
+ 'wasm/wasm-code-wrapper.cc',
+ 'wasm/wasm-code-wrapper.h',
'wasm/wasm-debug.cc',
'wasm/wasm-external-refs.cc',
'wasm/wasm-external-refs.h',
@@ -1469,6 +1488,8 @@
'wasm/wasm-opcodes.h',
'wasm/wasm-result.cc',
'wasm/wasm-result.h',
+ 'wasm/wasm-serialization.cc',
+ 'wasm/wasm-serialization.h',
'wasm/wasm-text.cc',
'wasm/wasm-text.h',
'wasm/wasm-value.h',
@@ -1500,7 +1521,6 @@
'arm/code-stubs-arm.cc',
'arm/code-stubs-arm.h',
'arm/codegen-arm.cc',
- 'arm/codegen-arm.h',
'arm/constants-arm.h',
'arm/constants-arm.cc',
'arm/cpu-arm.cc',
@@ -1519,13 +1539,13 @@
'compiler/arm/instruction-codes-arm.h',
'compiler/arm/instruction-scheduler-arm.cc',
'compiler/arm/instruction-selector-arm.cc',
- 'compiler/arm/unwinding-info-writer-arm.h',
'compiler/arm/unwinding-info-writer-arm.cc',
+ 'compiler/arm/unwinding-info-writer-arm.h',
'debug/arm/debug-arm.cc',
- 'ic/arm/access-compiler-arm.cc',
- 'ic/arm/handler-compiler-arm.cc',
'regexp/arm/regexp-macro-assembler-arm.cc',
'regexp/arm/regexp-macro-assembler-arm.h',
+ 'wasm/baseline/arm/liftoff-assembler-arm-defs.h',
+ 'wasm/baseline/arm/liftoff-assembler-arm.h',
],
}],
['v8_target_arch=="arm64"', {
@@ -1534,7 +1554,6 @@
'arm64/assembler-arm64.h',
'arm64/assembler-arm64-inl.h',
'arm64/codegen-arm64.cc',
- 'arm64/codegen-arm64.h',
'arm64/code-stubs-arm64.cc',
'arm64/code-stubs-arm64.h',
'arm64/constants-arm64.h',
@@ -1567,13 +1586,13 @@
'compiler/arm64/instruction-codes-arm64.h',
'compiler/arm64/instruction-scheduler-arm64.cc',
'compiler/arm64/instruction-selector-arm64.cc',
- 'compiler/arm64/unwinding-info-writer-arm64.h',
'compiler/arm64/unwinding-info-writer-arm64.cc',
+ 'compiler/arm64/unwinding-info-writer-arm64.h',
'debug/arm64/debug-arm64.cc',
- 'ic/arm64/access-compiler-arm64.cc',
- 'ic/arm64/handler-compiler-arm64.cc',
'regexp/arm64/regexp-macro-assembler-arm64.cc',
'regexp/arm64/regexp-macro-assembler-arm64.h',
+ 'wasm/baseline/arm64/liftoff-assembler-arm64-defs.h',
+ 'wasm/baseline/arm64/liftoff-assembler-arm64.h',
],
}],
['v8_target_arch=="ia32"', {
@@ -1582,9 +1601,7 @@
'ia32/assembler-ia32.cc',
'ia32/assembler-ia32.h',
'ia32/code-stubs-ia32.cc',
- 'ia32/code-stubs-ia32.h',
'ia32/codegen-ia32.cc',
- 'ia32/codegen-ia32.h',
'ia32/cpu-ia32.cc',
'ia32/deoptimizer-ia32.cc',
'ia32/disasm-ia32.cc',
@@ -1601,10 +1618,10 @@
'compiler/ia32/instruction-scheduler-ia32.cc',
'compiler/ia32/instruction-selector-ia32.cc',
'debug/ia32/debug-ia32.cc',
- 'ic/ia32/access-compiler-ia32.cc',
- 'ic/ia32/handler-compiler-ia32.cc',
'regexp/ia32/regexp-macro-assembler-ia32.cc',
'regexp/ia32/regexp-macro-assembler-ia32.h',
+ 'wasm/baseline/ia32/liftoff-assembler-ia32-defs.h',
+ 'wasm/baseline/ia32/liftoff-assembler-ia32.h',
],
}],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
@@ -1613,7 +1630,6 @@
'mips/assembler-mips.h',
'mips/assembler-mips-inl.h',
'mips/codegen-mips.cc',
- 'mips/codegen-mips.h',
'mips/code-stubs-mips.cc',
'mips/code-stubs-mips.h',
'mips/constants-mips.cc',
@@ -1633,10 +1649,10 @@
'compiler/mips/instruction-scheduler-mips.cc',
'compiler/mips/instruction-selector-mips.cc',
'debug/mips/debug-mips.cc',
- 'ic/mips/access-compiler-mips.cc',
- 'ic/mips/handler-compiler-mips.cc',
'regexp/mips/regexp-macro-assembler-mips.cc',
'regexp/mips/regexp-macro-assembler-mips.h',
+ 'wasm/baseline/mips/liftoff-assembler-mips-defs.h',
+ 'wasm/baseline/mips/liftoff-assembler-mips.h',
],
}],
['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
@@ -1645,7 +1661,6 @@
'mips64/assembler-mips64.h',
'mips64/assembler-mips64-inl.h',
'mips64/codegen-mips64.cc',
- 'mips64/codegen-mips64.h',
'mips64/code-stubs-mips64.cc',
'mips64/code-stubs-mips64.h',
'mips64/constants-mips64.cc',
@@ -1665,10 +1680,10 @@
'compiler/mips64/instruction-scheduler-mips64.cc',
'compiler/mips64/instruction-selector-mips64.cc',
'debug/mips64/debug-mips64.cc',
- 'ic/mips64/access-compiler-mips64.cc',
- 'ic/mips64/handler-compiler-mips64.cc',
'regexp/mips64/regexp-macro-assembler-mips64.cc',
'regexp/mips64/regexp-macro-assembler-mips64.h',
+ 'wasm/baseline/mips64/liftoff-assembler-mips64-defs.h',
+ 'wasm/baseline/mips64/liftoff-assembler-mips64.h',
],
}],
['v8_target_arch=="x64"', {
@@ -1677,15 +1692,13 @@
'compiler/x64/instruction-codes-x64.h',
'compiler/x64/instruction-scheduler-x64.cc',
'compiler/x64/instruction-selector-x64.cc',
- 'compiler/x64/unwinding-info-writer-x64.h',
'compiler/x64/unwinding-info-writer-x64.cc',
+ 'compiler/x64/unwinding-info-writer-x64.h',
'x64/assembler-x64-inl.h',
'x64/assembler-x64.cc',
'x64/assembler-x64.h',
'x64/code-stubs-x64.cc',
- 'x64/code-stubs-x64.h',
'x64/codegen-x64.cc',
- 'x64/codegen-x64.h',
'x64/cpu-x64.cc',
'x64/deoptimizer-x64.cc',
'x64/disasm-x64.cc',
@@ -1699,11 +1712,11 @@
'x64/simulator-x64.h',
'x64/sse-instr.h',
'debug/x64/debug-x64.cc',
- 'ic/x64/access-compiler-x64.cc',
- 'ic/x64/handler-compiler-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.h',
'third_party/valgrind/valgrind.h',
+ 'wasm/baseline/x64/liftoff-assembler-x64-defs.h',
+ 'wasm/baseline/x64/liftoff-assembler-x64.h',
],
}],
['v8_target_arch=="x64" and OS=="linux"', {
@@ -1716,15 +1729,12 @@
'compiler/ppc/instruction-scheduler-ppc.cc',
'compiler/ppc/instruction-selector-ppc.cc',
'debug/ppc/debug-ppc.cc',
- 'ic/ppc/access-compiler-ppc.cc',
- 'ic/ppc/handler-compiler-ppc.cc',
'ppc/assembler-ppc-inl.h',
'ppc/assembler-ppc.cc',
'ppc/assembler-ppc.h',
'ppc/code-stubs-ppc.cc',
'ppc/code-stubs-ppc.h',
'ppc/codegen-ppc.cc',
- 'ppc/codegen-ppc.h',
'ppc/constants-ppc.h',
'ppc/constants-ppc.cc',
'ppc/cpu-ppc.cc',
@@ -1739,6 +1749,8 @@
'ppc/simulator-ppc.h',
'regexp/ppc/regexp-macro-assembler-ppc.cc',
'regexp/ppc/regexp-macro-assembler-ppc.h',
+ 'wasm/baseline/ppc/liftoff-assembler-ppc-defs.h',
+ 'wasm/baseline/ppc/liftoff-assembler-ppc.h',
],
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
@@ -1748,15 +1760,12 @@
'compiler/s390/instruction-scheduler-s390.cc',
'compiler/s390/instruction-selector-s390.cc',
'debug/s390/debug-s390.cc',
- 'ic/s390/access-compiler-s390.cc',
- 'ic/s390/handler-compiler-s390.cc',
'regexp/s390/regexp-macro-assembler-s390.cc',
'regexp/s390/regexp-macro-assembler-s390.h',
's390/assembler-s390.cc',
's390/assembler-s390.h',
's390/assembler-s390-inl.h',
's390/codegen-s390.cc',
- 's390/codegen-s390.h',
's390/code-stubs-s390.cc',
's390/code-stubs-s390.h',
's390/constants-s390.cc',
@@ -1771,6 +1780,8 @@
's390/macro-assembler-s390.h',
's390/simulator-s390.cc',
's390/simulator-s390.h',
+ 'wasm/baseline/s390/liftoff-assembler-s390-defs.h',
+ 'wasm/baseline/s390/liftoff-assembler-s390.h',
],
}],
['OS=="win"', {
@@ -2186,6 +2197,10 @@
'../include/libplatform/libplatform.h',
'../include/libplatform/libplatform-export.h',
'../include/libplatform/v8-tracing.h',
+ 'libplatform/default-background-task-runner.cc',
+ 'libplatform/default-background-task-runner.h',
+ 'libplatform/default-foreground-task-runner.cc',
+ 'libplatform/default-foreground-task-runner.h',
'libplatform/default-platform.cc',
'libplatform/default-platform.h',
'libplatform/task-queue.cc',
@@ -2323,9 +2338,7 @@
'js/prologue.js',
'js/v8natives.js',
'js/array.js',
- 'js/string.js',
'js/typedarray.js',
- 'js/weak-collection.js',
'js/messages.js',
'js/spread.js',
'js/proxy.js',
@@ -2450,6 +2463,12 @@
'heapobject_files': [
'objects.h',
'objects-inl.h',
+ 'objects/code.h',
+ 'objects/code-inl.h',
+ 'objects/js-array.h',
+ 'objects/js-array-inl.h',
+ 'objects/js-regexp.h',
+ 'objects/js-regexp-inl.h',
'objects/map.h',
'objects/map-inl.h',
'objects/script.h',
@@ -2541,6 +2560,7 @@
'is_ubsan_vptr=0',
'target_cpu=<(target_arch)',
'v8_enable_i18n_support=<(v8_enable_i18n_support)',
+ 'v8_enable_verify_predictable=<(v8_enable_verify_predictable)',
'v8_target_cpu=<(v8_target_arch)',
'v8_use_snapshot=<(v8_use_snapshot)',
],
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 202323ec0d..db927010ef 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -27,7 +27,7 @@ base::Atomic32 g_locker_was_ever_used_ = 0;
// Once the Locker is initialized, the current thread will be guaranteed to have
// the lock for a given isolate.
void Locker::Initialize(v8::Isolate* isolate) {
- DCHECK(isolate != NULL);
+ DCHECK_NOT_NULL(isolate);
has_lock_ = false;
top_level_ = true;
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
@@ -53,7 +53,7 @@ void Locker::Initialize(v8::Isolate* isolate) {
bool Locker::IsLocked(v8::Isolate* isolate) {
- DCHECK(isolate != NULL);
+ DCHECK_NOT_NULL(isolate);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
return internal_isolate->thread_manager()->IsLockedByCurrentThread();
}
@@ -78,7 +78,7 @@ Locker::~Locker() {
void Unlocker::Initialize(v8::Isolate* isolate) {
- DCHECK(isolate != NULL);
+ DCHECK_NOT_NULL(isolate);
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
isolate_->thread_manager()->ArchiveThread();
@@ -105,12 +105,12 @@ bool ThreadManager::RestoreThread() {
lazily_archived_thread_ = ThreadId::Invalid();
Isolate::PerIsolateThreadData* per_thread =
isolate_->FindPerThreadDataForThisThread();
- DCHECK(per_thread != NULL);
+ DCHECK_NOT_NULL(per_thread);
DCHECK(per_thread->thread_state() == lazily_archived_thread_state_);
lazily_archived_thread_state_->set_id(ThreadId::Invalid());
lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
- lazily_archived_thread_state_ = NULL;
- per_thread->set_thread_state(NULL);
+ lazily_archived_thread_state_ = nullptr;
+ per_thread->set_thread_state(nullptr);
return true;
}
@@ -125,7 +125,7 @@ bool ThreadManager::RestoreThread() {
}
Isolate::PerIsolateThreadData* per_thread =
isolate_->FindPerThreadDataForThisThread();
- if (per_thread == NULL || per_thread->thread_state() == NULL) {
+ if (per_thread == nullptr || per_thread->thread_state() == nullptr) {
// This is a new thread.
isolate_->stack_guard()->InitThread(access);
return false;
@@ -139,7 +139,7 @@ bool ThreadManager::RestoreThread() {
from = isolate_->stack_guard()->RestoreStackGuard(from);
from = isolate_->regexp_stack()->RestoreStack(from);
from = isolate_->bootstrapper()->RestoreState(from);
- per_thread->set_thread_state(NULL);
+ per_thread->set_thread_state(nullptr);
if (state->terminate_on_restore()) {
isolate_->stack_guard()->RequestTerminateExecution();
state->set_terminate_on_restore(false);
@@ -174,16 +174,13 @@ static int ArchiveSpacePerThread() {
Relocatable::ArchiveSpacePerThread();
}
-
ThreadState::ThreadState(ThreadManager* thread_manager)
: id_(ThreadId::Invalid()),
terminate_on_restore_(false),
- data_(NULL),
+ data_(nullptr),
next_(this),
previous_(this),
- thread_manager_(thread_manager) {
-}
-
+ thread_manager_(thread_manager) {}
ThreadState::~ThreadState() {
DeleteArray<char>(data_);
@@ -230,20 +227,19 @@ ThreadState* ThreadManager::FirstThreadStateInUse() {
ThreadState* ThreadState::Next() {
- if (next_ == thread_manager_->in_use_anchor_) return NULL;
+ if (next_ == thread_manager_->in_use_anchor_) return nullptr;
return next_;
}
-
// Thread ids must start with 1, because in TLS having thread id 0 can't
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
ThreadManager::ThreadManager()
: mutex_owner_(ThreadId::Invalid()),
lazily_archived_thread_(ThreadId::Invalid()),
- lazily_archived_thread_state_(NULL),
- free_anchor_(NULL),
- in_use_anchor_(NULL) {
+ lazily_archived_thread_state_(nullptr),
+ free_anchor_(nullptr),
+ in_use_anchor_(nullptr) {
free_anchor_ = new ThreadState(this);
in_use_anchor_ = new ThreadState(this);
}
@@ -298,14 +294,14 @@ void ThreadManager::EagerlyArchiveThread() {
to = isolate_->regexp_stack()->ArchiveStack(to);
to = isolate_->bootstrapper()->ArchiveState(to);
lazily_archived_thread_ = ThreadId::Invalid();
- lazily_archived_thread_state_ = NULL;
+ lazily_archived_thread_state_ = nullptr;
}
void ThreadManager::FreeThreadResources() {
DCHECK(!isolate_->has_pending_exception());
DCHECK(!isolate_->external_caught_exception());
- DCHECK(isolate_->try_catch_handler() == NULL);
+ DCHECK_NULL(isolate_->try_catch_handler());
isolate_->handle_scope_implementer()->FreeThreadResources();
isolate_->FreeThreadResources();
isolate_->debug()->FreeThreadResources();
@@ -318,13 +314,12 @@ void ThreadManager::FreeThreadResources() {
bool ThreadManager::IsArchived() {
Isolate::PerIsolateThreadData* data =
isolate_->FindPerThreadDataForThisThread();
- return data != NULL && data->thread_state() != NULL;
+ return data != nullptr && data->thread_state() != nullptr;
}
void ThreadManager::Iterate(RootVisitor* v) {
// Expecting no threads during serialization/deserialization
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
+ for (ThreadState* state = FirstThreadStateInUse(); state != nullptr;
state = state->Next()) {
char* data = state->data();
data = HandleScopeImplementer::Iterate(v, data);
@@ -335,8 +330,7 @@ void ThreadManager::Iterate(RootVisitor* v) {
void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
+ for (ThreadState* state = FirstThreadStateInUse(); state != nullptr;
state = state->Next()) {
char* data = state->data();
data += HandleScopeImplementer::ArchiveSpacePerThread();
@@ -351,8 +345,7 @@ ThreadId ThreadManager::CurrentId() {
void ThreadManager::TerminateExecution(ThreadId thread_id) {
- for (ThreadState* state = FirstThreadStateInUse();
- state != NULL;
+ for (ThreadState* state = FirstThreadStateInUse(); state != nullptr;
state = state->Next()) {
if (thread_id.Equals(state->id())) {
state->set_terminate_on_restore(true);
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index 8fc6f0c62f..bb87afea7d 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -15,7 +15,7 @@ class ThreadLocalTop;
class ThreadState {
public:
- // Returns NULL after the last one.
+ // Returns nullptr after the last one.
ThreadState* Next();
enum List {FREE_LIST, IN_USE_LIST};
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 48efbc7c31..974ee2c76d 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -21,6 +21,7 @@
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
+#include "src/wasm/wasm-serialization.h"
namespace v8 {
namespace internal {
@@ -143,6 +144,9 @@ enum class SerializationTag : uint8_t {
// The delegate is responsible for processing all following data.
// This "escapes" to whatever wire format the delegate chooses.
kHostObject = '\\',
+ // A transferred WebAssembly.Memory object. maximumPages:int32_t, then by
+ // SharedArrayBuffer tag and its data.
+ kWasmMemoryTransfer = 'm',
};
namespace {
@@ -479,11 +483,18 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
if (!FLAG_wasm_disable_structured_cloning) {
// Only write WebAssembly modules if not disabled by a flag.
return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
- } // fall through to error case
+ }
+ break;
+ case WASM_MEMORY_TYPE:
+ if (FLAG_experimental_wasm_threads) {
+ return WriteWasmMemory(Handle<WasmMemoryObject>::cast(receiver));
+ }
+ break;
default:
- ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
- return Nothing<bool>();
+ break;
}
+
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
return Nothing<bool>();
}
@@ -845,16 +856,34 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
String::WriteToFlat(*wire_bytes, destination, 0, wire_bytes_length);
}
- std::unique_ptr<ScriptData> script_data =
- WasmCompiledModuleSerializer::SerializeWasmModule(isolate_,
- compiled_part);
- int script_data_length = script_data->length();
- WriteVarint<uint32_t>(script_data_length);
- WriteRawBytes(script_data->data(), script_data_length);
-
+ if (FLAG_wasm_jit_to_native) {
+ std::pair<std::unique_ptr<byte[]>, size_t> serialized_module =
+ wasm::NativeModuleSerializer::SerializeWholeModule(isolate_,
+ compiled_part);
+ WriteVarint<uint32_t>(static_cast<uint32_t>(serialized_module.second));
+ WriteRawBytes(serialized_module.first.get(), serialized_module.second);
+ } else {
+ std::unique_ptr<ScriptData> script_data =
+ WasmCompiledModuleSerializer::SerializeWasmModule(isolate_,
+ compiled_part);
+ int script_data_length = script_data->length();
+ WriteVarint<uint32_t>(script_data_length);
+ WriteRawBytes(script_data->data(), script_data_length);
+ }
return ThrowIfOutOfMemory();
}
+Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
+ if (!object->array_buffer()->is_shared()) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
+ return Nothing<bool>();
+ }
+
+ WriteTag(SerializationTag::kWasmMemoryTransfer);
+ WriteZigZag<int32_t>(object->maximum_pages());
+ return WriteJSReceiver(Handle<JSReceiver>(object->array_buffer(), isolate_));
+}
+
Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
WriteTag(SerializationTag::kHostObject);
if (!delegate_) {
@@ -1065,13 +1094,13 @@ bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
void ValueDeserializer::TransferArrayBuffer(
uint32_t transfer_id, Handle<JSArrayBuffer> array_buffer) {
if (array_buffer_transfer_map_.is_null()) {
- array_buffer_transfer_map_ = isolate_->global_handles()->Create(
- *UnseededNumberDictionary::New(isolate_, 0));
+ array_buffer_transfer_map_ =
+ isolate_->global_handles()->Create(*NumberDictionary::New(isolate_, 0));
}
- Handle<UnseededNumberDictionary> dictionary =
+ Handle<NumberDictionary> dictionary =
array_buffer_transfer_map_.ToHandleChecked();
- Handle<UnseededNumberDictionary> new_dictionary =
- UnseededNumberDictionary::Set(dictionary, transfer_id, array_buffer);
+ Handle<NumberDictionary> new_dictionary =
+ NumberDictionary::Set(dictionary, transfer_id, array_buffer);
if (!new_dictionary.is_identical_to(dictionary)) {
GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
array_buffer_transfer_map_ =
@@ -1177,6 +1206,8 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
return ReadWasmModule();
case SerializationTag::kWasmModuleTransfer:
return ReadWasmModuleTransfer();
+ case SerializationTag::kWasmMemoryTransfer:
+ return ReadWasmMemory();
case SerializationTag::kHostObject:
return ReadHostObject();
default:
@@ -1465,11 +1496,8 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
return MaybeHandle<JSRegExp>();
}
- // Ensure the deserialized flags are valid. The context behind this is that
- // the JSRegExp::Flags enum statically includes kDotAll, but it is only valid
- // to set kDotAll if FLAG_harmony_regexp_dotall is enabled. Fuzzers don't
- // know about this and happily set kDotAll anyways, leading to CHECK failures
- // later on.
+ // Ensure the deserialized flags are valid.
+ // TODO(adamk): Can we remove this check now that dotAll is always-on?
uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::FlagCount();
if ((raw_flags & flags_mask) ||
!JSRegExp::New(pattern, static_cast<JSRegExp::Flags>(raw_flags))
@@ -1557,7 +1585,6 @@ MaybeHandle<JSSet> ValueDeserializer::ReadJSSet() {
MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer() {
uint32_t id = next_id_++;
uint32_t byte_length;
- Vector<const uint8_t> bytes;
if (!ReadVarint<uint32_t>().To(&byte_length) ||
byte_length > static_cast<size_t>(end_ - position_)) {
return MaybeHandle<JSArrayBuffer>();
@@ -1579,13 +1606,13 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer(
bool is_shared) {
uint32_t id = next_id_++;
uint32_t transfer_id;
- Handle<UnseededNumberDictionary> transfer_map;
+ Handle<NumberDictionary> transfer_map;
if (!ReadVarint<uint32_t>().To(&transfer_id) ||
!array_buffer_transfer_map_.ToHandle(&transfer_map)) {
return MaybeHandle<JSArrayBuffer>();
}
int index = transfer_map->FindEntry(isolate_, transfer_id);
- if (index == UnseededNumberDictionary::kNotFound) {
+ if (index == NumberDictionary::kNotFound) {
return MaybeHandle<JSArrayBuffer>();
}
Handle<JSArrayBuffer> array_buffer(
@@ -1689,15 +1716,25 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
}
// Try to deserialize the compiled module first.
- ScriptData script_data(compiled_bytes.start(), compiled_bytes.length());
Handle<FixedArray> compiled_part;
MaybeHandle<JSObject> result;
- if (WasmCompiledModuleSerializer::DeserializeWasmModule(
- isolate_, &script_data, wire_bytes)
- .ToHandle(&compiled_part)) {
- result = WasmModuleObject::New(
- isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
+ if (FLAG_wasm_jit_to_native) {
+ if (wasm::NativeModuleDeserializer::DeserializeFullBuffer(
+ isolate_, compiled_bytes, wire_bytes)
+ .ToHandle(&compiled_part)) {
+ result = WasmModuleObject::New(
+ isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
+ }
} else {
+ ScriptData script_data(compiled_bytes.start(), compiled_bytes.length());
+ if (WasmCompiledModuleSerializer::DeserializeWasmModule(
+ isolate_, &script_data, wire_bytes)
+ .ToHandle(&compiled_part)) {
+ result = WasmModuleObject::New(
+ isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
+ }
+ }
+ if (result.is_null()) {
wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
result = wasm::SyncCompile(isolate_, &thrower,
wasm::ModuleWireBytes(wire_bytes));
@@ -1710,6 +1747,36 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
return result;
}
+MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
+ uint32_t id = next_id_++;
+
+ if (!FLAG_experimental_wasm_threads) {
+ return MaybeHandle<WasmMemoryObject>();
+ }
+
+ int32_t maximum_pages;
+ if (!ReadZigZag<int32_t>().To(&maximum_pages)) {
+ return MaybeHandle<WasmMemoryObject>();
+ }
+
+ SerializationTag tag;
+ if (!ReadTag().To(&tag) || tag != SerializationTag::kSharedArrayBuffer) {
+ return MaybeHandle<WasmMemoryObject>();
+ }
+
+ const bool is_shared = true;
+ Handle<JSArrayBuffer> buffer;
+ if (!ReadTransferredJSArrayBuffer(is_shared).ToHandle(&buffer)) {
+ return MaybeHandle<WasmMemoryObject>();
+ }
+
+ Handle<WasmMemoryObject> result =
+ WasmMemoryObject::New(isolate_, buffer, maximum_pages);
+
+ AddObjectWithID(id, result);
+ return result;
+}
+
MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
if (!delegate_) return MaybeHandle<JSObject>();
STACK_CHECK(isolate_, MaybeHandle<JSObject>());
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index 43c73cbb56..a272fa0945 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -31,6 +31,7 @@ class JSValue;
class Object;
class Oddball;
class Smi;
+class WasmMemoryObject;
class WasmModuleObject;
enum class SerializationTag : uint8_t;
@@ -127,6 +128,8 @@ class ValueSerializer {
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
WARN_UNUSED_RESULT;
+ Maybe<bool> WriteWasmMemory(Handle<WasmMemoryObject> object)
+ WARN_UNUSED_RESULT;
Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
/*
@@ -270,6 +273,7 @@ class ValueDeserializer {
Handle<JSArrayBuffer> buffer) WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadWasmModule() WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadWasmModuleTransfer() WARN_UNUSED_RESULT;
+ MaybeHandle<WasmMemoryObject> ReadWasmMemory() WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadHostObject() WARN_UNUSED_RESULT;
/*
@@ -296,7 +300,7 @@ class ValueDeserializer {
// Always global handles.
Handle<FixedArray> id_map_;
- MaybeHandle<UnseededNumberDictionary> array_buffer_transfer_map_;
+ MaybeHandle<NumberDictionary> array_buffer_transfer_map_;
DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
};
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index 7ae4f0eb04..97ce43e8aa 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -19,14 +19,14 @@ namespace internal {
template <typename T>
class Vector {
public:
- Vector() : start_(NULL), length_(0) {}
+ constexpr Vector() : start_(nullptr), length_(0) {}
Vector(T* data, size_t length) : start_(data), length_(length) {
- DCHECK(length == 0 || data != NULL);
+ DCHECK(length == 0 || data != nullptr);
}
template <int N>
- explicit Vector(T (&arr)[N]) : start_(arr), length_(N) {}
+ explicit constexpr Vector(T (&arr)[N]) : start_(arr), length_(N) {}
static Vector<T> New(int length) {
return Vector<T>(NewArray<T>(length), length);
@@ -47,13 +47,13 @@ class Vector {
}
// Returns the length of the vector as a size_t.
- size_t size() const { return length_; }
+ constexpr size_t size() const { return length_; }
// Returns whether or not the vector is empty.
- bool is_empty() const { return length_ == 0; }
+ constexpr bool is_empty() const { return length_ == 0; }
// Returns the pointer to the start of the data in the vector.
- T* start() const { return start_; }
+ constexpr T* start() const { return start_; }
// Access individual vector elements - checks bounds in debug mode.
T& operator[](size_t index) const {
@@ -65,11 +65,14 @@ class Vector {
T& first() { return start_[0]; }
- T& last() { return start_[length_ - 1]; }
+ T& last() {
+ DCHECK_LT(0, length_);
+ return start_[length_ - 1];
+ }
typedef T* iterator;
- inline iterator begin() const { return &start_[0]; }
- inline iterator end() const { return &start_[length_]; }
+ constexpr iterator begin() const { return start_; }
+ constexpr iterator end() const { return start_ + length_; }
// Returns a clone of this vector with a new backing store.
Vector<T> Clone() const {
@@ -115,12 +118,12 @@ class Vector {
// vector is empty.
void Dispose() {
DeleteArray(start_);
- start_ = NULL;
+ start_ = nullptr;
length_ = 0;
}
inline Vector<T> operator+(size_t offset) {
- DCHECK_LT(offset, length_);
+ DCHECK_LE(offset, length_);
return Vector<T>(start_ + offset, length_ - offset);
}
@@ -128,10 +131,10 @@ class Vector {
inline operator Vector<const T>() { return Vector<const T>::cast(*this); }
// Factory method for creating empty vectors.
- static Vector<T> empty() { return Vector<T>(NULL, 0); }
+ static Vector<T> empty() { return Vector<T>(nullptr, 0); }
- template<typename S>
- static Vector<T> cast(Vector<S> input) {
+ template <typename S>
+ static constexpr Vector<T> cast(Vector<S> input) {
return Vector<T>(reinterpret_cast<T*>(input.start()),
input.length() * sizeof(S) / sizeof(T));
}
@@ -214,7 +217,7 @@ inline Vector<char> MutableCStrVector(char* data, int max) {
}
template <typename T, int N>
-inline Vector<T> ArrayVector(T (&arr)[N]) {
+inline constexpr Vector<T> ArrayVector(T (&arr)[N]) {
return Vector<T>(arr);
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 12caf9835e..be8c85fdb9 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -40,7 +40,7 @@ void Version::GetString(Vector<char> str) {
// Calculate the SONAME for the V8 shared library.
void Version::GetSONAME(Vector<char> str) {
- if (soname_ == NULL || *soname_ == '\0') {
+ if (soname_ == nullptr || *soname_ == '\0') {
// Generate generic SONAME if no specific SONAME is defined.
const char* candidate = IsCandidate() ? "-candidate" : "";
if (GetPatch() > 0) {
diff --git a/deps/v8/src/visitors.cc b/deps/v8/src/visitors.cc
index 4f93c63f0d..98911f1c28 100644
--- a/deps/v8/src/visitors.cc
+++ b/deps/v8/src/visitors.cc
@@ -4,6 +4,8 @@
#include "src/visitors.h"
+#include "src/objects/code.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/visitors.h b/deps/v8/src/visitors.h
index baf989182b..7696df8faf 100644
--- a/deps/v8/src/visitors.h
+++ b/deps/v8/src/visitors.h
@@ -10,6 +10,7 @@
namespace v8 {
namespace internal {
+class CodeDataContainer;
class Object;
#define ROOT_ID_LIST(V) \
@@ -75,6 +76,44 @@ class RootVisitor BASE_EMBEDDED {
virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
};
+// Abstract base class for visiting, and optionally modifying, the
+// pointers contained in Objects. Used in GC and serialization/deserialization.
+class ObjectVisitor BASE_EMBEDDED {
+ public:
+ virtual ~ObjectVisitor() {}
+
+ // Visits a contiguous arrays of pointers in the half-open range
+ // [start, end). Any or all of the values may be modified on return.
+ virtual void VisitPointers(HeapObject* host, Object** start,
+ Object** end) = 0;
+
+ // Handy shorthand for visiting a single pointer.
+ virtual void VisitPointer(HeapObject* host, Object** p) {
+ VisitPointers(host, p, p + 1);
+ }
+
+ // To allow lazy clearing of inline caches the visitor has
+ // a rich interface for iterating over Code objects ...
+
+ // Visits a code target in the instruction stream.
+ virtual void VisitCodeTarget(Code* host, RelocInfo* rinfo);
+
+ // Visits a runtime entry in the instruction stream.
+ virtual void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) {}
+
+ // Visit pointer embedded into a code object.
+ virtual void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo);
+
+ // Visits an external reference embedded into a code object.
+ virtual void VisitExternalReference(Code* host, RelocInfo* rinfo) {}
+
+ // Visits an external reference.
+ virtual void VisitExternalReference(Foreign* host, Address* p) {}
+
+ // Visits an (encoded) internal reference.
+ virtual void VisitInternalReference(Code* host, RelocInfo* rinfo) {}
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/DEPS b/deps/v8/src/wasm/baseline/DEPS
new file mode 100644
index 0000000000..c2651fbd17
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/DEPS
@@ -0,0 +1,5 @@
+# Liftoff (the baseline compiler for WebAssembly) depends on some compiler
+# internals, like the linkage location for parameters and returns.
+include_rules = [
+ "+src/compiler/linkage.h",
+]
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h
new file mode 100644
index 0000000000..d115b3f83d
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(clemensh): Implement the LiftoffAssembler on this platform.
+static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+
+static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
new file mode 100644
index 0000000000..d632e39aff
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+
+void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+
+void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
+ int size) {}
+
+void LiftoffAssembler::SpillContext(Register context) {}
+
+void LiftoffAssembler::Load(Register dst, Register src_addr,
+ uint32_t offset_imm, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
+ Register src, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+ uint32_t caller_slot_idx) {}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
+
+void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
+
+void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
+
+#define DEFAULT_I32_BINOP(name, internal_name) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) {}
+
+// clang-format off
+DEFAULT_I32_BINOP(add, add)
+DEFAULT_I32_BINOP(sub, sub)
+DEFAULT_I32_BINOP(mul, imul)
+DEFAULT_I32_BINOP(and, and)
+DEFAULT_I32_BINOP(or, or)
+DEFAULT_I32_BINOP(xor, xor)
+// clang-format on
+
+#undef DEFAULT_I32_BINOP
+
+void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h
new file mode 100644
index 0000000000..18f49fae68
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(clemensh): Implement the LiftoffAssembler on this platform.
+static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+
+static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
new file mode 100644
index 0000000000..2578301ad5
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+
+void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+
+void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
+ int size) {}
+
+void LiftoffAssembler::SpillContext(Register context) {}
+
+void LiftoffAssembler::Load(Register dst, Register src_addr,
+ uint32_t offset_imm, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
+ Register src, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+ uint32_t caller_slot_idx) {}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
+
+void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
+
+void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
+
+#define DEFAULT_I32_BINOP(name, internal_name) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) {}
+
+// clang-format off
+DEFAULT_I32_BINOP(add, add)
+DEFAULT_I32_BINOP(sub, sub)
+DEFAULT_I32_BINOP(mul, imul)
+DEFAULT_I32_BINOP(and, and)
+DEFAULT_I32_BINOP(or, or)
+DEFAULT_I32_BINOP(xor, xor)
+// clang-format on
+
+#undef DEFAULT_I32_BINOP
+
+void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h
new file mode 100644
index 0000000000..6fd95caf41
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
+
+static constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
new file mode 100644
index 0000000000..696e2544c0
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -0,0 +1,177 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+#include "src/assembler.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline Operand GetStackSlot(uint32_t index) {
+ // ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
+ // is located at ebp-24.
+ constexpr int32_t kStackSlotSize = 8;
+ constexpr int32_t kFirstStackSlotOffset = -24;
+ return Operand(ebp, kFirstStackSlotOffset - index * kStackSlotSize);
+}
+
+// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
+inline Operand GetContextOperand() { return Operand(ebp, -16); }
+
+} // namespace liftoff
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
+ stack_space_ = space;
+ sub(esp, Immediate(space));
+}
+
+void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
+ switch (value.type()) {
+ case kWasmI32:
+ if (value.to_i32() == 0) {
+ xor_(reg, reg);
+ } else {
+ mov(reg, Immediate(value.to_i32()));
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
+ int size) {
+ DCHECK_LE(offset, kMaxInt);
+ mov(dst, liftoff::GetContextOperand());
+ DCHECK_EQ(4, size);
+ mov(dst, Operand(dst, offset));
+}
+
+void LiftoffAssembler::SpillContext(Register context) {
+ mov(liftoff::GetContextOperand(), context);
+}
+
+void LiftoffAssembler::Load(Register dst, Register src_addr,
+ uint32_t offset_imm, int size,
+ PinnedRegisterScope pinned) {
+ Operand src_op = Operand(src_addr, offset_imm);
+ if (offset_imm > kMaxInt) {
+ // The immediate can not be encoded in the operand. Load it to a register
+ // first.
+ Register src = GetUnusedRegister(kGpReg, pinned);
+ mov(src, Immediate(offset_imm));
+ src_op = Operand(src_addr, src, times_1, 0);
+ }
+ DCHECK_EQ(4, size);
+ mov(dst, src_op);
+}
+
+void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
+ Register src, int size,
+ PinnedRegisterScope pinned) {
+ Operand dst_op = Operand(dst_addr, offset_imm);
+ if (offset_imm > kMaxInt) {
+ // The immediate can not be encoded in the operand. Load it to a register
+ // first.
+ Register dst = GetUnusedRegister(kGpReg, pinned);
+ mov(dst, Immediate(offset_imm));
+ dst_op = Operand(dst_addr, dst, times_1, 0);
+ }
+ DCHECK_EQ(4, size);
+ mov(dst_op, src);
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+ uint32_t caller_slot_idx) {
+ constexpr int32_t kCallerStackSlotSize = 4;
+ mov(dst, Operand(ebp, kCallerStackSlotSize * (caller_slot_idx + 1)));
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ DCHECK_NE(dst_index, src_index);
+ if (cache_state_.has_unused_register()) {
+ Register reg = GetUnusedRegister(kGpReg);
+ Fill(reg, src_index);
+ Spill(dst_index, reg);
+ } else {
+ push(liftoff::GetStackSlot(src_index));
+ pop(liftoff::GetStackSlot(dst_index));
+ }
+}
+
+void LiftoffAssembler::MoveToReturnRegister(Register reg) {
+ if (reg != eax) mov(eax, reg);
+}
+
+void LiftoffAssembler::Spill(uint32_t index, Register reg) {
+ // TODO(clemensh): Handle different types here.
+ mov(liftoff::GetStackSlot(index), reg);
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ // TODO(clemensh): Handle different types here.
+ mov(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
+}
+
+void LiftoffAssembler::Fill(Register reg, uint32_t index) {
+ // TODO(clemensh): Handle different types here.
+ mov(reg, liftoff::GetStackSlot(index));
+}
+
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
+ if (lhs != dst) {
+ lea(dst, Operand(lhs, rhs, times_1, 0));
+ } else {
+ add(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
+ if (dst == rhs) {
+ neg(dst);
+ add(dst, lhs);
+ } else {
+ if (dst != lhs) mov(dst, lhs);
+ sub(dst, rhs);
+ }
+}
+
+#define COMMUTATIVE_I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ if (dst == rhs) { \
+ instruction(dst, lhs); \
+ } else { \
+ if (dst != lhs) mov(dst, lhs); \
+ instruction(dst, rhs); \
+ } \
+ }
+
+// clang-format off
+COMMUTATIVE_I32_BINOP(mul, imul)
+COMMUTATIVE_I32_BINOP(and, and_)
+COMMUTATIVE_I32_BINOP(or, or_)
+COMMUTATIVE_I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef DEFAULT_I32_BINOP
+
+void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
+ test(reg, reg);
+ j(zero, label);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
new file mode 100644
index 0000000000..8a68fe4d91
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -0,0 +1,389 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+#include "src/assembler-inl.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/counters.h"
+#include "src/macro-assembler-inl.h"
+#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using VarState = LiftoffAssembler::VarState;
+
+namespace {
+
+#define __ asm_->
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
+ } while (false)
+
+class StackTransferRecipe {
+ struct RegisterMove {
+ Register dst;
+ Register src;
+ constexpr RegisterMove(Register dst, Register src) : dst(dst), src(src) {}
+ };
+ struct RegisterLoad {
+ Register dst;
+ bool is_constant_load; // otherwise load it from the stack.
+ union {
+ uint32_t stack_slot;
+ WasmValue constant;
+ };
+ RegisterLoad(Register dst, WasmValue constant)
+ : dst(dst), is_constant_load(true), constant(constant) {}
+ RegisterLoad(Register dst, uint32_t stack_slot)
+ : dst(dst), is_constant_load(false), stack_slot(stack_slot) {}
+ };
+
+ public:
+ explicit StackTransferRecipe(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
+ ~StackTransferRecipe() { Execute(); }
+
+ void Execute() {
+ // First, execute register moves. Then load constants and stack values into
+ // registers.
+
+ if ((move_dst_regs & move_src_regs) == 0) {
+ // No overlap in src and dst registers. Just execute the moves in any
+ // order.
+ for (RegisterMove& rm : register_moves) asm_->Move(rm.dst, rm.src);
+ register_moves.clear();
+ } else {
+ // Keep use counters of src registers.
+ constexpr size_t kRegArrSize =
+ LiftoffAssembler::CacheState::kMaxRegisterCode + 1;
+ uint32_t src_reg_use_count[kRegArrSize] = {0};
+ for (RegisterMove& rm : register_moves) {
+ ++src_reg_use_count[rm.src.code()];
+ }
+ // Now repeatedly iterate the list of register moves, and execute those
+ // whose dst register does not appear as src any more. The remaining moves
+ // are compacted during this iteration.
+ // If no more moves can be executed (because of a cycle), spill one
+ // register to the stack, add a RegisterLoad to reload it later, and
+ // continue.
+ uint32_t next_spill_slot = asm_->cache_state()->stack_height();
+ while (!register_moves.empty()) {
+ int executed_moves = 0;
+ for (auto& rm : register_moves) {
+ if (src_reg_use_count[rm.dst.code()] == 0) {
+ asm_->Move(rm.dst, rm.src);
+ ++executed_moves;
+ DCHECK_LT(0, src_reg_use_count[rm.src.code()]);
+ --src_reg_use_count[rm.src.code()];
+ } else if (executed_moves) {
+ // Compaction: Move not-executed moves to the beginning of the list.
+ (&rm)[-executed_moves] = rm;
+ }
+ }
+ if (executed_moves == 0) {
+ // There is a cycle. Spill one register, then continue.
+ Register spill_reg = register_moves.back().src;
+ asm_->Spill(next_spill_slot, spill_reg);
+ // Remember to reload into the destination register later.
+ LoadStackSlot(register_moves.back().dst, next_spill_slot);
+ DCHECK_EQ(1, src_reg_use_count[spill_reg.code()]);
+ src_reg_use_count[spill_reg.code()] = 0;
+ ++next_spill_slot;
+ executed_moves = 1;
+ }
+ constexpr RegisterMove dummy(no_reg, no_reg);
+ register_moves.resize(register_moves.size() - executed_moves, dummy);
+ }
+ }
+
+ for (RegisterLoad& rl : register_loads) {
+ if (rl.is_constant_load) {
+ asm_->LoadConstant(rl.dst, rl.constant);
+ } else {
+ asm_->Fill(rl.dst, rl.stack_slot);
+ }
+ }
+ register_loads.clear();
+ }
+
+ void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
+ uint32_t dst_index, uint32_t src_index) {
+ const VarState& dst = dst_state.stack_state[dst_index];
+ const VarState& src = __ cache_state()->stack_state[src_index];
+ switch (dst.loc()) {
+ case VarState::kStack:
+ switch (src.loc()) {
+ case VarState::kStack:
+ if (src_index == dst_index) break;
+ asm_->MoveStackValue(dst_index, src_index);
+ break;
+ case VarState::kRegister:
+ asm_->Spill(dst_index, src.reg());
+ break;
+ case VarState::kConstant:
+ // TODO(clemensh): Handle other types than i32.
+ asm_->Spill(dst_index, WasmValue(src.i32_const()));
+ break;
+ }
+ break;
+ case VarState::kRegister:
+ switch (src.loc()) {
+ case VarState::kStack:
+ LoadStackSlot(dst.reg(), src_index);
+ break;
+ case VarState::kRegister:
+ if (dst.reg() != src.reg()) MoveRegister(dst.reg(), src.reg());
+ break;
+ case VarState::kConstant:
+ LoadConstant(dst.reg(), WasmValue(src.i32_const()));
+ break;
+ }
+ break;
+ case VarState::kConstant:
+ DCHECK_EQ(dst, src);
+ break;
+ }
+ }
+
+ void MoveRegister(Register dst, Register src) {
+ DCHECK_EQ(0, move_dst_regs & dst.bit());
+ move_dst_regs |= dst.bit();
+ move_src_regs |= src.bit();
+ register_moves.emplace_back(dst, src);
+ }
+
+ void LoadConstant(Register dst, WasmValue value) {
+ register_loads.emplace_back(dst, value);
+ }
+
+ void LoadStackSlot(Register dst, uint32_t stack_index) {
+ register_loads.emplace_back(dst, stack_index);
+ }
+
+ private:
+ // TODO(clemensh): Avoid unconditionally allocating on the heap.
+ std::vector<RegisterMove> register_moves;
+ std::vector<RegisterLoad> register_loads;
+ RegList move_dst_regs = 0;
+ RegList move_src_regs = 0;
+ LiftoffAssembler* const asm_;
+};
+
+} // namespace
+
+// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
+void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
+ uint32_t num_locals,
+ uint32_t arity) {
+ DCHECK(stack_state.empty());
+ DCHECK_GE(source.stack_height(), stack_base);
+ stack_state.resize(stack_base + arity, VarState(kWasmStmt));
+
+ // |------locals------|--(in between)--|--(discarded)--|----merge----|
+ // <-- num_locals --> ^stack_base <-- arity -->
+
+ // First, initialize merge slots and locals. Keep them in the registers which
+ // are being used in {source}, but avoid using a register multiple times. Use
+ // unused registers where necessary and possible.
+ for (int range = 0; range < 2; ++range) {
+ auto src_idx = range ? 0 : source.stack_state.size() - arity;
+ auto src_end = range ? num_locals : source.stack_state.size();
+ auto dst_idx = range ? 0 : stack_state.size() - arity;
+ for (; src_idx < src_end; ++src_idx, ++dst_idx) {
+ auto& dst = stack_state[dst_idx];
+ auto& src = source.stack_state[src_idx];
+ Register reg = no_reg;
+ if (src.is_reg() && is_free(src.reg())) {
+ reg = src.reg();
+ } else if (has_unused_register()) {
+ reg = unused_register();
+ } else {
+ // Make this a stack slot.
+ DCHECK(src.is_stack());
+ dst = VarState(src.type());
+ continue;
+ }
+ dst = VarState(src.type(), reg);
+ inc_used(reg);
+ }
+ }
+ // Last, initialize the section in between. Here, constants are allowed, but
+ // registers which are already used for the merge region or locals must be
+ // spilled.
+ for (uint32_t i = num_locals; i < stack_base; ++i) {
+ auto& dst = stack_state[i];
+ auto& src = source.stack_state[i];
+ if (src.is_reg()) {
+ if (is_used(src.reg())) {
+ // Make this a stack slot.
+ dst = VarState(src.type());
+ continue;
+ }
+ dst = VarState(src.type(), src.reg());
+ inc_used(src.reg());
+ } else if (src.is_const()) {
+ dst = src;
+ } else {
+ // Keep this a stack slot (which is the initial value).
+ DCHECK(src.is_stack());
+ DCHECK(dst.is_stack());
+ continue;
+ }
+ }
+ last_spilled_reg = source.last_spilled_reg;
+}
+
+void LiftoffAssembler::CacheState::Steal(CacheState& source) {
+ // Just use the move assignment operator.
+ *this = std::move(source);
+}
+
+void LiftoffAssembler::CacheState::Split(const CacheState& source) {
+ // Call the private copy assignment operator.
+ *this = source;
+}
+
+LiftoffAssembler::LiftoffAssembler(Isolate* isolate)
+ : TurboAssembler(isolate, nullptr, 0, CodeObjectRequired::kYes) {}
+
+LiftoffAssembler::~LiftoffAssembler() {
+ if (num_locals_ > kInlineLocalTypes) {
+ free(more_local_types_);
+ }
+}
+
+Register LiftoffAssembler::GetBinaryOpTargetRegister(
+ RegClass rc, PinnedRegisterScope pinned) {
+ auto& slot_lhs = *(cache_state_.stack_state.end() - 2);
+ if (slot_lhs.is_reg() && GetNumUses(slot_lhs.reg()) == 1) {
+ return slot_lhs.reg();
+ }
+ auto& slot_rhs = *(cache_state_.stack_state.end() - 1);
+ if (slot_rhs.is_reg() && GetNumUses(slot_rhs.reg()) == 1) {
+ return slot_rhs.reg();
+ }
+ return GetUnusedRegister(rc, pinned);
+}
+
+Register LiftoffAssembler::PopToRegister(RegClass rc,
+ PinnedRegisterScope pinned) {
+ DCHECK(!cache_state_.stack_state.empty());
+ VarState slot = cache_state_.stack_state.back();
+ cache_state_.stack_state.pop_back();
+ switch (slot.loc()) {
+ case VarState::kStack: {
+ Register reg = GetUnusedRegister(rc, pinned);
+ Fill(reg, cache_state_.stack_height());
+ return reg;
+ }
+ case VarState::kRegister:
+ cache_state_.dec_used(slot.reg());
+ return slot.reg();
+ case VarState::kConstant: {
+ Register reg = GetUnusedRegister(rc, pinned);
+ LoadConstant(reg, WasmValue(slot.i32_const()));
+ return reg;
+ }
+ }
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::MergeFullStackWith(CacheState& target) {
+ DCHECK_EQ(cache_state_.stack_height(), target.stack_height());
+ // TODO(clemensh): Reuse the same StackTransferRecipe object to save some
+ // allocations.
+ StackTransferRecipe transfers(this);
+ for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
+ transfers.TransferStackSlot(target, i, i);
+ }
+}
+
+void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
+ // Before: ----------------|------ pop_count -----|--- arity ---|
+ // ^target_stack_height ^stack_base ^stack_height
+ // After: ----|-- arity --|
+ // ^ ^target_stack_height
+ // ^target_stack_base
+ uint32_t stack_height = cache_state_.stack_height();
+ uint32_t target_stack_height = target.stack_height();
+ uint32_t stack_base = stack_height - arity;
+ uint32_t target_stack_base = target_stack_height - arity;
+ StackTransferRecipe transfers(this);
+ for (uint32_t i = 0; i < target_stack_base; ++i) {
+ transfers.TransferStackSlot(target, i, i);
+ }
+ for (uint32_t i = 0; i < arity; ++i) {
+ transfers.TransferStackSlot(target, target_stack_base + i, stack_base + i);
+ }
+}
+
+void LiftoffAssembler::Spill(uint32_t index) {
+ auto& slot = cache_state_.stack_state[index];
+ switch (slot.loc()) {
+ case VarState::kStack:
+ return;
+ case VarState::kRegister:
+ Spill(index, slot.reg());
+ cache_state_.dec_used(slot.reg());
+ break;
+ case VarState::kConstant:
+ Spill(index, WasmValue(slot.i32_const()));
+ break;
+ }
+ slot.MakeStack();
+}
+
+void LiftoffAssembler::SpillLocals() {
+ for (uint32_t i = 0; i < num_locals_; ++i) {
+ Spill(i);
+ }
+}
+
+Register LiftoffAssembler::SpillOneRegister(RegClass rc,
+ PinnedRegisterScope pinned_regs) {
+ DCHECK_EQ(kGpReg, rc);
+
+ // Spill one cached value to free a register.
+ Register spill_reg = cache_state_.GetNextSpillReg(pinned_regs);
+ int remaining_uses = cache_state_.register_use_count[spill_reg.code()];
+ DCHECK_LT(0, remaining_uses);
+ for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
+ DCHECK_GT(cache_state_.stack_height(), idx);
+ auto& slot = cache_state_.stack_state[idx];
+ if (!slot.is_reg() || slot.reg() != spill_reg) continue;
+ Spill(idx, spill_reg);
+ slot.MakeStack();
+ if (--remaining_uses == 0) break;
+ }
+ cache_state_.register_use_count[spill_reg.code()] = 0;
+ cache_state_.used_registers &= ~spill_reg.bit();
+ return spill_reg;
+}
+
+void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
+ DCHECK_EQ(0, num_locals_); // only call this once.
+ num_locals_ = num_locals;
+ if (num_locals > kInlineLocalTypes) {
+ more_local_types_ =
+ reinterpret_cast<ValueType*>(malloc(num_locals * sizeof(ValueType)));
+ DCHECK_NOT_NULL(more_local_types_);
+ }
+}
+
+uint32_t LiftoffAssembler::GetTotalFrameSlotCount() const {
+ return kPointerSize * (num_locals() + kMaxValueStackHeight);
+}
+
+#undef __
+#undef TRACE
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
new file mode 100644
index 0000000000..55deb593f8
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -0,0 +1,378 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
+
+#include <memory>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/frames.h"
+#include "src/macro-assembler.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-value.h"
+
+// Include platform specific definitions.
+#if V8_TARGET_ARCH_IA32
+#include "src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/wasm/baseline/x64/liftoff-assembler-x64-defs.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/wasm/baseline/arm/liftoff-assembler-arm-defs.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/wasm/baseline/mips/liftoff-assembler-mips-defs.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/wasm/baseline/s390/liftoff-assembler-s390-defs.h"
+#else
+#error Unsupported architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Forward declarations.
+struct ModuleEnv;
+
+enum RegClass { kNoReg, kGpReg, kFpReg };
+
+// TODO(clemensh): Switch to a switch once we require C++14 support.
+static constexpr RegClass reg_class_for(ValueType type) {
+ return type == kWasmI32 || type == kWasmI64 // int types
+ ? kGpReg
+ : type == kWasmF32 || type == kWasmF64 // float types
+ ? kFpReg
+ : kNoReg; // other (unsupported) types
+}
+
+class LiftoffAssembler : public TurboAssembler {
+ public:
+ // TODO(clemensh): Remove this limitation by allocating more stack space if
+ // needed.
+ static constexpr int kMaxValueStackHeight = 8;
+
+ class PinnedRegisterScope {
+ public:
+ PinnedRegisterScope() : pinned_regs_(0) {}
+ explicit PinnedRegisterScope(RegList regs) : pinned_regs_(regs) {}
+
+ Register pin(Register reg) {
+ pinned_regs_ |= reg.bit();
+ return reg;
+ }
+
+ RegList pinned_regs() const { return pinned_regs_; }
+ bool has(Register reg) const { return (pinned_regs_ & reg.bit()) != 0; }
+
+ private:
+ RegList pinned_regs_ = 0;
+ };
+ static_assert(IS_TRIVIALLY_COPYABLE(PinnedRegisterScope),
+ "PinnedRegisterScope can be passed by value");
+
+ class VarState {
+ public:
+ enum Location : uint8_t { kStack, kRegister, kConstant };
+
+ explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
+ explicit VarState(ValueType type, Register r)
+ : loc_(kRegister), type_(type), reg_(r) {}
+ explicit VarState(ValueType type, uint32_t i32_const)
+ : loc_(kConstant), type_(type), i32_const_(i32_const) {
+ DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ }
+
+ bool operator==(const VarState& other) const {
+ if (loc_ != other.loc_) return false;
+ switch (loc_) {
+ case kStack:
+ return true;
+ case kRegister:
+ return reg_ == other.reg_;
+ case kConstant:
+ return i32_const_ == other.i32_const_;
+ }
+ UNREACHABLE();
+ }
+
+ bool is_stack() const { return loc_ == kStack; }
+ bool is_reg() const { return loc_ == kRegister; }
+ bool is_const() const { return loc_ == kConstant; }
+
+ ValueType type() const { return type_; }
+
+ Location loc() const { return loc_; }
+
+ uint32_t i32_const() const {
+ DCHECK_EQ(loc_, kConstant);
+ return i32_const_;
+ }
+
+ Register reg() const {
+ DCHECK_EQ(loc_, kRegister);
+ return reg_;
+ }
+
+ void MakeStack() { loc_ = kStack; }
+
+ private:
+ Location loc_;
+ // TODO(wasm): This is redundant, the decoder already knows the type of each
+ // stack value. Try to collapse.
+ ValueType type_;
+
+ union {
+ Register reg_; // used if loc_ == kRegister
+ uint32_t i32_const_; // used if loc_ == kConstant
+ };
+ };
+ static_assert(IS_TRIVIALLY_COPYABLE(VarState),
+ "VarState should be trivially copyable");
+
+ struct CacheState {
+ // Allow default construction, move construction, and move assignment.
+ CacheState() = default;
+ CacheState(CacheState&&) = default;
+ CacheState& operator=(CacheState&&) = default;
+
+ // TODO(clemensh): Improve memory management here; avoid std::vector.
+ std::vector<VarState> stack_state;
+ RegList used_registers = 0;
+ // TODO(clemensh): Replace this by CountLeadingZeros(kGpCacheRegs) once that
+ // method is constexpr.
+ static constexpr int kMaxRegisterCode = 7;
+ uint32_t register_use_count[kMaxRegisterCode + 1] = {0};
+ // TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
+ uint32_t stack_base = 0;
+ Register last_spilled_reg = Register::from_code<0>();
+
+ // InitMerge: Initialize this CacheState from the {source} cache state, but
+ // make sure that other code paths can still jump here (i.e. avoid constants
+ // in the locals or the merge region as specified by {arity}).
+ // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
+ void InitMerge(const CacheState& source, uint32_t num_locals,
+ uint32_t arity);
+
+ void Steal(CacheState& source);
+
+ void Split(const CacheState& source);
+
+ bool has_unused_register(PinnedRegisterScope pinned_scope = {}) const {
+ RegList available_regs =
+ kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
+ return available_regs != 0;
+ }
+
+ Register unused_register(PinnedRegisterScope pinned_scope = {}) const {
+ RegList available_regs =
+ kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
+ Register reg =
+ Register::from_code(base::bits::CountTrailingZeros(available_regs));
+ DCHECK_EQ(0, used_registers & reg.bit());
+ return reg;
+ }
+
+ void inc_used(Register reg) {
+ used_registers |= reg.bit();
+ DCHECK_GE(kMaxRegisterCode, reg.code());
+ ++register_use_count[reg.code()];
+ }
+
+ // Returns whether this was the last use.
+ bool dec_used(Register reg) {
+ DCHECK(is_used(reg));
+ DCHECK_GE(kMaxRegisterCode, reg.code());
+ if (--register_use_count[reg.code()] == 0) {
+ used_registers &= ~reg.bit();
+ return true;
+ }
+ return false;
+ }
+
+ bool is_used(Register reg) const {
+ DCHECK_GE(kMaxRegisterCode, reg.code());
+ bool used = used_registers & reg.bit();
+ DCHECK_EQ(used, register_use_count[reg.code()] != 0);
+ return used;
+ }
+
+ bool is_free(Register reg) const { return !is_used(reg); }
+
+ uint32_t stack_height() const {
+ return static_cast<uint32_t>(stack_state.size());
+ }
+
+ Register GetNextSpillReg(PinnedRegisterScope scope = {}) {
+ uint32_t mask = (1u << (last_spilled_reg.code() + 1)) - 1;
+ RegList unpinned_regs = kGpCacheRegs & ~scope.pinned_regs();
+ DCHECK_NE(0, unpinned_regs);
+ RegList remaining_regs = unpinned_regs & ~mask;
+ if (!remaining_regs) remaining_regs = unpinned_regs;
+ last_spilled_reg =
+ Register::from_code(base::bits::CountTrailingZeros(remaining_regs));
+ return last_spilled_reg;
+ }
+
+ private:
+ // Make the copy assignment operator private (to be used from {Split()}).
+ CacheState& operator=(const CacheState&) = default;
+ // Disallow copy construction.
+ CacheState(const CacheState&) = delete;
+ };
+
+ explicit LiftoffAssembler(Isolate* isolate);
+ ~LiftoffAssembler();
+
+ Register GetBinaryOpTargetRegister(RegClass, PinnedRegisterScope = {});
+
+ Register PopToRegister(RegClass, PinnedRegisterScope = {});
+
+ void PushRegister(ValueType type, Register reg) {
+ cache_state_.inc_used(reg);
+ cache_state_.stack_state.emplace_back(type, reg);
+ }
+
+ uint32_t GetNumUses(Register reg) const {
+ DCHECK_GE(CacheState::kMaxRegisterCode, reg.code());
+ return cache_state_.register_use_count[reg.code()];
+ }
+
+ Register GetUnusedRegister(RegClass rc,
+ PinnedRegisterScope pinned_regs = {}) {
+ DCHECK_EQ(kGpReg, rc);
+ if (cache_state_.has_unused_register(pinned_regs)) {
+ return cache_state_.unused_register(pinned_regs);
+ }
+ return SpillOneRegister(rc, pinned_regs);
+ }
+
+ void DropStackSlot(VarState* slot) {
+ // The only loc we care about is register. Other types don't occupy
+ // anything.
+ if (!slot->is_reg()) return;
+ // Free the register, then set the loc to "stack".
+ // No need to write back, the value should be dropped.
+ cache_state_.dec_used(slot->reg());
+ slot->MakeStack();
+ }
+
+ void MergeFullStackWith(CacheState&);
+ void MergeStackWith(CacheState&, uint32_t arity);
+
+ void Spill(uint32_t index);
+ void SpillLocals();
+
+ ////////////////////////////////////
+ // Platform-specific part. //
+ ////////////////////////////////////
+
+ inline void ReserveStackSpace(uint32_t);
+
+ inline void LoadConstant(Register, WasmValue);
+ inline void LoadFromContext(Register dst, uint32_t offset, int size);
+ inline void SpillContext(Register context);
+ inline void Load(Register dst, Register src_addr, uint32_t offset_imm,
+ int size, PinnedRegisterScope = {});
+ inline void Store(Register dst_addr, uint32_t offset_imm, Register src,
+ int size, PinnedRegisterScope = {});
+ inline void LoadCallerFrameSlot(Register, uint32_t caller_slot_idx);
+ inline void MoveStackValue(uint32_t dst_index, uint32_t src_index);
+
+ inline void MoveToReturnRegister(Register);
+
+ inline void Spill(uint32_t index, Register);
+ inline void Spill(uint32_t index, WasmValue);
+ inline void Fill(Register, uint32_t index);
+
+ inline void emit_i32_add(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_and(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_or(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
+
+ inline void JumpIfZero(Register, Label*);
+
+ // Platform-specific constant.
+ static constexpr RegList kGpCacheRegs = kLiftoffAssemblerGpCacheRegs;
+
+ ////////////////////////////////////
+ // End of platform-specific part. //
+ ////////////////////////////////////
+
+ uint32_t num_locals() const { return num_locals_; }
+ void set_num_locals(uint32_t num_locals);
+
+ uint32_t GetTotalFrameSlotCount() const;
+ size_t GetSafepointTableOffset() const { return 0; }
+
+ ValueType local_type(uint32_t index) {
+ DCHECK_GT(num_locals_, index);
+ ValueType* locals =
+ num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
+ return locals[index];
+ }
+
+ void set_local_type(uint32_t index, ValueType type) {
+ ValueType* locals =
+ num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
+ locals[index] = type;
+ }
+
+ CacheState* cache_state() { return &cache_state_; }
+
+ private:
+ static_assert(
+ base::bits::CountPopulation(kGpCacheRegs) >= 2,
+ "We need at least two cache registers to execute binary operations");
+
+ uint32_t num_locals_ = 0;
+ uint32_t stack_space_ = 0;
+ static constexpr uint32_t kInlineLocalTypes = 8;
+ union {
+ ValueType local_types_[kInlineLocalTypes];
+ ValueType* more_local_types_;
+ };
+ static_assert(sizeof(ValueType) == 1,
+ "Reconsider this inlining if ValueType gets bigger");
+ CacheState cache_state_;
+
+ Register SpillOneRegister(RegClass, PinnedRegisterScope = {});
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+// Include platform specific implementation.
+#if V8_TARGET_ARCH_IA32
+#include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/wasm/baseline/x64/liftoff-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/wasm/baseline/arm/liftoff-assembler-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
+#else
+#error Unsupported architecture.
+#endif
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
new file mode 100644
index 0000000000..a0aea7503a
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -0,0 +1,550 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+#include "src/assembler-inl.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/counters.h"
+#include "src/macro-assembler-inl.h"
+#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
+constexpr auto kConstant = LiftoffAssembler::VarState::kConstant;
+constexpr auto kStack = LiftoffAssembler::VarState::kStack;
+
+namespace {
+
+#define __ asm_->
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
+ } while (false)
+
+#if V8_TARGET_ARCH_ARM64
+// On ARM64, the Assembler keeps track of pointers to Labels to resolve
+// branches to distant targets. Moving labels would confuse the Assembler,
+// thus store the label on the heap and keep a unique_ptr.
+class MovableLabel {
+ public:
+ Label* get() { return label_.get(); }
+
+ private:
+ std::unique_ptr<Label> label_ = base::make_unique<Label>();
+};
+#else
+// On all other platforms, just store the Label directly.
+class MovableLabel {
+ public:
+ Label* get() { return &label_; }
+
+ private:
+ Label label_;
+};
+#endif
+
+class LiftoffCompiler {
+ public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
+
+ // TODO(clemensh): Make this a template parameter.
+ static constexpr wasm::Decoder::ValidateFlag validate =
+ wasm::Decoder::kValidate;
+
+ using Value = ValueBase;
+
+ struct Control : public ControlWithNamedConstructors<Control, Value> {
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(Control);
+
+ LiftoffAssembler::CacheState label_state;
+ MovableLabel label;
+ };
+
+ using Decoder = WasmFullDecoder<validate, LiftoffCompiler>;
+
+ LiftoffCompiler(LiftoffAssembler* liftoff_asm,
+ compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env)
+ : asm_(liftoff_asm), call_desc_(call_desc), env_(env) {}
+
+ bool ok() const { return ok_; }
+
+ void unsupported(Decoder* decoder, const char* reason) {
+ ok_ = false;
+ TRACE("unsupported: %s\n", reason);
+ decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason);
+ BindUnboundLabels(decoder);
+ }
+
+ void BindUnboundLabels(Decoder* decoder) {
+#ifndef DEBUG
+ return;
+#endif
+ // Bind all labels now, otherwise their destructor will fire a DCHECK error
+ // if they where referenced before.
+ for (uint32_t i = 0, e = decoder->control_depth(); i < e; ++i) {
+ Label* label = decoder->control_at(i)->label.get();
+ if (!label->is_bound()) __ bind(label);
+ }
+ }
+
+ void CheckStackSizeLimit(Decoder* decoder) {
+ DCHECK_GE(__ cache_state()->stack_height(), __ num_locals());
+ int stack_height = __ cache_state()->stack_height() - __ num_locals();
+ if (stack_height > LiftoffAssembler::kMaxValueStackHeight) {
+ unsupported(decoder, "value stack grows too large");
+ }
+ }
+
+ void StartFunction(Decoder* decoder) {
+ int num_locals = decoder->NumLocals();
+ __ set_num_locals(num_locals);
+ for (int i = 0; i < num_locals; ++i) {
+ __ set_local_type(i, decoder->GetLocalType(i));
+ }
+ }
+
+ void StartFunctionBody(Decoder* decoder, Control* block) {
+ if (!kLiftoffAssemblerImplementedOnThisPlatform) {
+ unsupported(decoder, "platform");
+ return;
+ }
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ __ ReserveStackSpace(__ GetTotalFrameSlotCount());
+ // Parameter 0 is the wasm context.
+ uint32_t num_params =
+ static_cast<uint32_t>(call_desc_->ParameterCount()) - 1;
+ for (uint32_t i = 0; i < __ num_locals(); ++i) {
+ // We can currently only handle i32 parameters and locals.
+ if (__ local_type(i) != kWasmI32) {
+ unsupported(decoder, "non-i32 param/local");
+ return;
+ }
+ }
+ // Input 0 is the call target, the context is at 1.
+ constexpr int kContextParameterIndex = 1;
+ // Store the context parameter to a special stack slot.
+ compiler::LinkageLocation context_loc =
+ call_desc_->GetInputLocation(kContextParameterIndex);
+ DCHECK(context_loc.IsRegister());
+ DCHECK(!context_loc.IsAnyRegister());
+ Register context_reg = Register::from_code(context_loc.AsRegister());
+ __ SpillContext(context_reg);
+ uint32_t param_idx = 0;
+ for (; param_idx < num_params; ++param_idx) {
+ constexpr uint32_t kFirstActualParamIndex = kContextParameterIndex + 1;
+ ValueType type = __ local_type(param_idx);
+ compiler::LinkageLocation param_loc =
+ call_desc_->GetInputLocation(param_idx + kFirstActualParamIndex);
+ if (param_loc.IsRegister()) {
+ DCHECK(!param_loc.IsAnyRegister());
+ Register param_reg = Register::from_code(param_loc.AsRegister());
+ if (param_reg.bit() & __ kGpCacheRegs) {
+ // This is a cache register, just use it.
+ __ PushRegister(type, param_reg);
+ } else {
+ // No cache register. Push to the stack.
+ __ Spill(param_idx, param_reg);
+ __ cache_state()->stack_state.emplace_back(type);
+ }
+ } else if (param_loc.IsCallerFrameSlot()) {
+ Register tmp_reg = __ GetUnusedRegister(reg_class_for(type));
+ __ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
+ __ PushRegister(type, tmp_reg);
+ } else {
+ UNIMPLEMENTED();
+ }
+ }
+ for (; param_idx < __ num_locals(); ++param_idx) {
+ ValueType type = decoder->GetLocalType(param_idx);
+ switch (type) {
+ case kWasmI32:
+ __ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+ block->label_state.stack_base = __ num_locals();
+ DCHECK_EQ(__ num_locals(), param_idx);
+ DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
+ CheckStackSizeLimit(decoder);
+ }
+
+ void FinishFunction(Decoder* decoder) {}
+
+ void OnFirstError(Decoder* decoder) {
+ ok_ = false;
+ BindUnboundLabels(decoder);
+ }
+
+ void Block(Decoder* decoder, Control* new_block) {
+ // Note: This is called for blocks and loops.
+ DCHECK_EQ(new_block, decoder->control_at(0));
+
+ new_block->label_state.stack_base = __ cache_state()->stack_height();
+
+ if (new_block->is_loop()) {
+ // Before entering a loop, spill all locals to the stack, in order to free
+ // the cache registers, and to avoid unnecessarily reloading stack values
+ // into registers at branches.
+ // TODO(clemensh): Come up with a better strategy here, involving
+ // pre-analysis of the function.
+ __ SpillLocals();
+
+ // Loop labels bind at the beginning of the block, block labels at the
+ // end.
+ __ bind(new_block->label.get());
+
+ new_block->label_state.Split(*__ cache_state());
+ }
+ }
+
+ void Loop(Decoder* decoder, Control* block) { Block(decoder, block); }
+
+ void Try(Decoder* decoder, Control* block) { unsupported(decoder, "try"); }
+ void If(Decoder* decoder, const Value& cond, Control* if_block) {
+ unsupported(decoder, "if");
+ }
+
+ void FallThruTo(Decoder* decoder, Control* c) {
+ if (c->end_merge.reached) {
+ __ MergeFullStackWith(c->label_state);
+ } else {
+ c->label_state.Split(*__ cache_state());
+ }
+ }
+
+ void PopControl(Decoder* decoder, Control* c) {
+ if (!c->is_loop() && c->end_merge.reached) {
+ __ cache_state()->Steal(c->label_state);
+ }
+ if (!c->label.get()->is_bound()) {
+ __ bind(c->label.get());
+ }
+ }
+
+ void EndControl(Decoder* decoder, Control* c) {}
+
+ void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
+ const Value& value, Value* result) {
+ unsupported(decoder, "unary operation");
+ }
+
+ void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
+ const Value& lhs, const Value& rhs, Value* result) {
+ void (LiftoffAssembler::*emit_fn)(Register, Register, Register);
+#define CASE_EMIT_FN(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ emit_fn = &LiftoffAssembler::emit_##fn; \
+ break;
+ switch (opcode) {
+ CASE_EMIT_FN(I32Add, i32_add)
+ CASE_EMIT_FN(I32Sub, i32_sub)
+ CASE_EMIT_FN(I32Mul, i32_mul)
+ CASE_EMIT_FN(I32And, i32_and)
+ CASE_EMIT_FN(I32Ior, i32_or)
+ CASE_EMIT_FN(I32Xor, i32_xor)
+ default:
+ return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ }
+#undef CASE_EMIT_FN
+
+ LiftoffAssembler::PinnedRegisterScope pinned_regs;
+ Register target_reg = pinned_regs.pin(__ GetBinaryOpTargetRegister(kGpReg));
+ Register rhs_reg = pinned_regs.pin(__ PopToRegister(kGpReg, pinned_regs));
+ Register lhs_reg = __ PopToRegister(kGpReg, pinned_regs);
+ (asm_->*emit_fn)(target_reg, lhs_reg, rhs_reg);
+ __ PushRegister(kWasmI32, target_reg);
+ }
+
+ void I32Const(Decoder* decoder, Value* result, int32_t value) {
+ __ cache_state()->stack_state.emplace_back(kWasmI32, value);
+ CheckStackSizeLimit(decoder);
+ }
+
+ void I64Const(Decoder* decoder, Value* result, int64_t value) {
+ unsupported(decoder, "i64.const");
+ }
+ void F32Const(Decoder* decoder, Value* result, float value) {
+ unsupported(decoder, "f32.const");
+ }
+ void F64Const(Decoder* decoder, Value* result, double value) {
+ unsupported(decoder, "f64.const");
+ }
+
+ void Drop(Decoder* decoder, const Value& value) {
+ __ DropStackSlot(&__ cache_state()->stack_state.back());
+ __ cache_state()->stack_state.pop_back();
+ }
+
+ void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
+ if (implicit) {
+ DCHECK_EQ(1, decoder->control_depth());
+ Control* func_block = decoder->control_at(0);
+ __ bind(func_block->label.get());
+ __ cache_state()->Steal(func_block->label_state);
+ }
+ if (!values.is_empty()) {
+ if (values.size() > 1) return unsupported(decoder, "multi-return");
+ // TODO(clemensh): Handle other types.
+ if (values[0].type != kWasmI32)
+ return unsupported(decoder, "non-i32 return");
+ Register reg = __ PopToRegister(kGpReg);
+ __ MoveToReturnRegister(reg);
+ }
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
+ }
+
+ void GetLocal(Decoder* decoder, Value* result,
+ const LocalIndexOperand<validate>& operand) {
+ auto& slot = __ cache_state()->stack_state[operand.index];
+ switch (slot.loc()) {
+ case kRegister:
+ __ PushRegister(operand.type, slot.reg());
+ break;
+ case kConstant:
+ __ cache_state()->stack_state.emplace_back(operand.type,
+ slot.i32_const());
+ break;
+ case kStack: {
+ auto rc = reg_class_for(operand.type);
+ Register reg = __ GetUnusedRegister(rc);
+ __ Fill(reg, operand.index);
+ __ PushRegister(operand.type, reg);
+ } break;
+ }
+ CheckStackSizeLimit(decoder);
+ }
+
+ void SetLocal(uint32_t local_index, bool is_tee) {
+ auto& state = *__ cache_state();
+ auto& source_slot = state.stack_state.back();
+ auto& target_slot = state.stack_state[local_index];
+ switch (source_slot.loc()) {
+ case kRegister:
+ __ DropStackSlot(&target_slot);
+ target_slot = source_slot;
+ if (is_tee) state.inc_used(target_slot.reg());
+ break;
+ case kConstant:
+ __ DropStackSlot(&target_slot);
+ target_slot = source_slot;
+ break;
+ case kStack: {
+ switch (target_slot.loc()) {
+ case kRegister:
+ if (state.register_use_count[target_slot.reg().code()] == 1) {
+ __ Fill(target_slot.reg(), state.stack_height() - 1);
+ break;
+ } else {
+ state.dec_used(target_slot.reg());
+ // and fall through to use a new register.
+ }
+ case kConstant:
+ case kStack: {
+ ValueType type = __ local_type(local_index);
+ Register target_reg = __ GetUnusedRegister(reg_class_for(type));
+ __ Fill(target_reg, state.stack_height() - 1);
+ target_slot = LiftoffAssembler::VarState(type, target_reg);
+ state.inc_used(target_reg);
+ } break;
+ }
+ break;
+ }
+ }
+ if (!is_tee) __ cache_state()->stack_state.pop_back();
+ }
+
+ void SetLocal(Decoder* decoder, const Value& value,
+ const LocalIndexOperand<validate>& operand) {
+ SetLocal(operand.index, false);
+ }
+
+ void TeeLocal(Decoder* decoder, const Value& value, Value* result,
+ const LocalIndexOperand<validate>& operand) {
+ SetLocal(operand.index, true);
+ }
+
+ void GetGlobal(Decoder* decoder, Value* result,
+ const GlobalIndexOperand<validate>& operand) {
+ const auto* global = &env_->module->globals[operand.index];
+ if (global->type != kWasmI32 && global->type != kWasmI64)
+ return unsupported(decoder, "non-int global");
+ LiftoffAssembler::PinnedRegisterScope pinned;
+ Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
+ __ LoadFromContext(addr, offsetof(WasmContext, globals_start),
+ kPointerSize);
+ Register value =
+ pinned.pin(__ GetUnusedRegister(reg_class_for(global->type), pinned));
+ int size = 1 << ElementSizeLog2Of(global->type);
+ if (size > kPointerSize)
+ return unsupported(decoder, "global > kPointerSize");
+ __ Load(value, addr, global->offset, size, pinned);
+ __ PushRegister(global->type, value);
+ }
+
+ void SetGlobal(Decoder* decoder, const Value& value,
+ const GlobalIndexOperand<validate>& operand) {
+ auto* global = &env_->module->globals[operand.index];
+ if (global->type != kWasmI32) return unsupported(decoder, "non-i32 global");
+ LiftoffAssembler::PinnedRegisterScope pinned;
+ Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
+ __ LoadFromContext(addr, offsetof(WasmContext, globals_start),
+ kPointerSize);
+ Register reg =
+ pinned.pin(__ PopToRegister(reg_class_for(global->type), pinned));
+ int size = 1 << ElementSizeLog2Of(global->type);
+ __ Store(addr, global->offset, reg, size, pinned);
+ }
+
+ void Unreachable(Decoder* decoder) { unsupported(decoder, "unreachable"); }
+
+ void Select(Decoder* decoder, const Value& cond, const Value& fval,
+ const Value& tval, Value* result) {
+ unsupported(decoder, "select");
+ }
+
+ void Br(Decoder* decoder, Control* target) {
+ if (!target->br_merge()->reached) {
+ target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
+ target->br_merge()->arity);
+ }
+ __ MergeStackWith(target->label_state, target->br_merge()->arity);
+ __ jmp(target->label.get());
+ }
+
+ void BrIf(Decoder* decoder, const Value& cond, Control* target) {
+ Label cont_false;
+ Register value = __ PopToRegister(kGpReg);
+ __ JumpIfZero(value, &cont_false);
+
+ Br(decoder, target);
+ __ bind(&cont_false);
+ }
+
+ void BrTable(Decoder* decoder, const BranchTableOperand<validate>& operand,
+ const Value& key) {
+ unsupported(decoder, "br_table");
+ }
+ void Else(Decoder* decoder, Control* if_block) {
+ unsupported(decoder, "else");
+ }
+ void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
+ const MemoryAccessOperand<validate>& operand, const Value& index,
+ Value* result) {
+ unsupported(decoder, "memory load");
+ }
+ void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
+ const MemoryAccessOperand<validate>& operand,
+ const Value& index, const Value& value) {
+ unsupported(decoder, "memory store");
+ }
+ void CurrentMemoryPages(Decoder* decoder, Value* result) {
+ unsupported(decoder, "current_memory");
+ }
+ void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
+ unsupported(decoder, "grow_memory");
+ }
+ void CallDirect(Decoder* decoder,
+ const CallFunctionOperand<validate>& operand,
+ const Value args[], Value returns[]) {
+ unsupported(decoder, "call");
+ }
+ void CallIndirect(Decoder* decoder, const Value& index,
+ const CallIndirectOperand<validate>& operand,
+ const Value args[], Value returns[]) {
+ unsupported(decoder, "call_indirect");
+ }
+ void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ Value* result) {
+ unsupported(decoder, "simd");
+ }
+ void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
+ const SimdLaneOperand<validate>& operand,
+ const Vector<Value> inputs, Value* result) {
+ unsupported(decoder, "simd");
+ }
+ void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
+ const SimdShiftOperand<validate>& operand,
+ const Value& input, Value* result) {
+ unsupported(decoder, "simd");
+ }
+ void Simd8x16ShuffleOp(Decoder* decoder,
+ const Simd8x16ShuffleOperand<validate>& operand,
+ const Value& input0, const Value& input1,
+ Value* result) {
+ unsupported(decoder, "simd");
+ }
+ void Throw(Decoder* decoder, const ExceptionIndexOperand<validate>&,
+ Control* block, const Vector<Value>& args) {
+ unsupported(decoder, "throw");
+ }
+ void CatchException(Decoder* decoder,
+ const ExceptionIndexOperand<validate>& operand,
+ Control* block, Vector<Value> caught_values) {
+ unsupported(decoder, "catch");
+ }
+ void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ const MemoryAccessOperand<validate>& operand, Value* result) {
+ unsupported(decoder, "atomicop");
+ }
+
+ private:
+ LiftoffAssembler* asm_;
+ compiler::CallDescriptor* call_desc_;
+ compiler::ModuleEnv* env_;
+ bool ok_ = true;
+};
+
+} // namespace
+} // namespace wasm
+
+bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
+ base::ElapsedTimer compile_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ compile_timer.Start();
+ }
+
+ Zone zone(isolate_->allocator(), "LiftoffCompilationZone");
+ const wasm::WasmModule* module = env_ ? env_->module : nullptr;
+ auto* call_desc = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
+ wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
+ decoder(&zone, module, func_body_, &liftoff_.asm_, call_desc, env_);
+ decoder.Decode();
+ if (!decoder.interface().ok()) {
+ // Liftoff compilation failed.
+ isolate_->counters()->liftoff_unsupported_functions()->Increment();
+ return false;
+ }
+ if (decoder.failed()) return false; // Validation error
+
+ if (FLAG_trace_wasm_decode_time) {
+ double compile_ms = compile_timer.Elapsed().InMillisecondsF();
+ PrintF(
+ "wasm-compilation liftoff phase 1 ok: %u bytes, %0.3f ms decode and "
+ "compile\n",
+ static_cast<unsigned>(func_body_.end - func_body_.start), compile_ms);
+ }
+
+ // Record the memory cost this unit places on the system until
+ // it is finalized.
+ memory_cost_ = liftoff_.asm_.pc_offset();
+ isolate_->counters()->liftoff_compiled_functions()->Increment();
+ return true;
+}
+
+#undef __
+#undef TRACE
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h
new file mode 100644
index 0000000000..edc52d74b6
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(clemensh): Implement the LiftoffAssembler on this platform.
+static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+
+static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
new file mode 100644
index 0000000000..bc3ec1667e
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+
+void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+
+void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
+ int size) {}
+
+void LiftoffAssembler::SpillContext(Register context) {}
+
+void LiftoffAssembler::Load(Register dst, Register src_addr,
+ uint32_t offset_imm, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
+ Register src, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+ uint32_t caller_slot_idx) {}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
+
+void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
+
+void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
+
+#define DEFAULT_I32_BINOP(name, internal_name) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) {}
+
+// clang-format off
+DEFAULT_I32_BINOP(add, add)
+DEFAULT_I32_BINOP(sub, sub)
+DEFAULT_I32_BINOP(mul, imul)
+DEFAULT_I32_BINOP(and, and)
+DEFAULT_I32_BINOP(or, or)
+DEFAULT_I32_BINOP(xor, xor)
+// clang-format on
+
+#undef DEFAULT_I32_BINOP
+
+void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h
new file mode 100644
index 0000000000..1652562515
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(clemensh): Implement the LiftoffAssembler on this platform.
+static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+
+static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
new file mode 100644
index 0000000000..2a10d0712e
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+
+void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+
+void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
+ int size) {}
+
+void LiftoffAssembler::SpillContext(Register context) {}
+
+void LiftoffAssembler::Load(Register dst, Register src_addr,
+ uint32_t offset_imm, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
+ Register src, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+ uint32_t caller_slot_idx) {}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
+
+void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
+
+void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
+
+#define DEFAULT_I32_BINOP(name, internal_name) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) {}
+
+// clang-format off
+DEFAULT_I32_BINOP(add, add)
+DEFAULT_I32_BINOP(sub, sub)
+DEFAULT_I32_BINOP(mul, imul)
+DEFAULT_I32_BINOP(and, and)
+DEFAULT_I32_BINOP(or, or)
+DEFAULT_I32_BINOP(xor, xor)
+// clang-format on
+
+#undef DEFAULT_I32_BINOP
+
+void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h
new file mode 100644
index 0000000000..b0d1317166
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(clemensh): Implement the LiftoffAssembler on this platform.
+static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+
+static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
new file mode 100644
index 0000000000..55a1475efe
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+
+void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+
+void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
+ int size) {}
+
+void LiftoffAssembler::SpillContext(Register context) {}
+
+void LiftoffAssembler::Load(Register dst, Register src_addr,
+ uint32_t offset_imm, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
+ Register src, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+ uint32_t caller_slot_idx) {}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
+
+void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
+
+void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
+
+#define DEFAULT_I32_BINOP(name, internal_name) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) {}
+
+// clang-format off
+DEFAULT_I32_BINOP(add, add)
+DEFAULT_I32_BINOP(sub, sub)
+DEFAULT_I32_BINOP(mul, imul)
+DEFAULT_I32_BINOP(and, and)
+DEFAULT_I32_BINOP(or, or)
+DEFAULT_I32_BINOP(xor, xor)
+// clang-format on
+
+#undef DEFAULT_I32_BINOP
+
+void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h
new file mode 100644
index 0000000000..e60dfb923b
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(clemensh): Implement the LiftoffAssembler on this platform.
+static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+
+static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
new file mode 100644
index 0000000000..1c56971a20
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+
+void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+
+void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
+ int size) {}
+
+void LiftoffAssembler::SpillContext(Register context) {}
+
+void LiftoffAssembler::Load(Register dst, Register src_addr,
+ uint32_t offset_imm, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
+ Register src, int size,
+ PinnedRegisterScope pinned) {}
+
+void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+ uint32_t caller_slot_idx) {}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
+
+void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
+
+void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
+
+#define DEFAULT_I32_BINOP(name, internal_name) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) {}
+
+// clang-format off
+DEFAULT_I32_BINOP(add, add)
+DEFAULT_I32_BINOP(sub, sub)
+DEFAULT_I32_BINOP(mul, imul)
+DEFAULT_I32_BINOP(and, and)
+DEFAULT_I32_BINOP(or, or)
+DEFAULT_I32_BINOP(xor, xor)
+// clang-format on
+
+#undef DEFAULT_I32_BINOP
+
+void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h
new file mode 100644
index 0000000000..ce568eab97
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
+
+static constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
new file mode 100644
index 0000000000..559965ab96
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -0,0 +1,190 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
+
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+#include "src/assembler.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline Operand GetStackSlot(uint32_t index) {
+ // rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
+ // is located at rbp-24.
+ constexpr int32_t kStackSlotSize = 8;
+ constexpr int32_t kFirstStackSlotOffset = -24;
+ return Operand(rbp, kFirstStackSlotOffset - index * kStackSlotSize);
+}
+
+// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
+inline Operand GetContextOperand() { return Operand(rbp, -16); }
+
+} // namespace liftoff
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
+ stack_space_ = space;
+ subl(rsp, Immediate(space));
+}
+
+void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
+ switch (value.type()) {
+ case kWasmI32:
+ if (value.to_i32() == 0) {
+ xorl(reg, reg);
+ } else {
+ movl(reg, Immediate(value.to_i32()));
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
+ int size) {
+ DCHECK_LE(offset, kMaxInt);
+ movp(dst, liftoff::GetContextOperand());
+ DCHECK(size == 4 || size == 8);
+ if (size == 4) {
+ movl(dst, Operand(dst, offset));
+ } else {
+ movq(dst, Operand(dst, offset));
+ }
+}
+
+void LiftoffAssembler::SpillContext(Register context) {
+ movp(liftoff::GetContextOperand(), context);
+}
+
+void LiftoffAssembler::Load(Register dst, Register src_addr,
+ uint32_t offset_imm, int size,
+ PinnedRegisterScope pinned) {
+ Operand src_op = Operand(src_addr, offset_imm);
+ if (offset_imm > kMaxInt) {
+ // The immediate can not be encoded in the operand. Load it to a register
+ // first.
+ Register src = GetUnusedRegister(kGpReg, pinned);
+ movl(src, Immediate(offset_imm));
+ src_op = Operand(src_addr, src, times_1, 0);
+ }
+ DCHECK(size == 4 || size == 8);
+ if (size == 4) {
+ movl(dst, src_op);
+ } else {
+ movq(dst, src_op);
+ }
+}
+
+void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
+ Register src, int size,
+ PinnedRegisterScope pinned) {
+ Operand dst_op = Operand(dst_addr, offset_imm);
+ if (offset_imm > kMaxInt) {
+ // The immediate can not be encoded in the operand. Load it to a register
+ // first.
+ Register dst = GetUnusedRegister(kGpReg, pinned);
+ movl(dst, Immediate(offset_imm));
+ dst_op = Operand(dst_addr, dst, times_1, 0);
+ }
+ DCHECK(size == 4 || size == 8);
+ if (size == 4) {
+ movl(dst_op, src);
+ } else {
+ movp(dst_op, src);
+ }
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+ uint32_t caller_slot_idx) {
+ constexpr int32_t kStackSlotSize = 8;
+ movl(dst, Operand(rbp, kStackSlotSize * (caller_slot_idx + 1)));
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ DCHECK_NE(dst_index, src_index);
+ if (cache_state_.has_unused_register()) {
+ Register reg = GetUnusedRegister(kGpReg);
+ Fill(reg, src_index);
+ Spill(dst_index, reg);
+ } else {
+ pushq(liftoff::GetStackSlot(src_index));
+ popq(liftoff::GetStackSlot(dst_index));
+ }
+}
+
+void LiftoffAssembler::MoveToReturnRegister(Register reg) {
+ // TODO(clemensh): Handle different types here.
+ if (reg != rax) movl(rax, reg);
+}
+
+void LiftoffAssembler::Spill(uint32_t index, Register reg) {
+ // TODO(clemensh): Handle different types here.
+ movl(liftoff::GetStackSlot(index), reg);
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ // TODO(clemensh): Handle different types here.
+ movl(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
+}
+
+void LiftoffAssembler::Fill(Register reg, uint32_t index) {
+ // TODO(clemensh): Handle different types here.
+ movl(reg, liftoff::GetStackSlot(index));
+}
+
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
+ if (lhs != dst) {
+ leal(dst, Operand(lhs, rhs, times_1, 0));
+ } else {
+ addl(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
+ if (dst == rhs) {
+ negl(dst);
+ addl(dst, lhs);
+ } else {
+ if (dst != lhs) movl(dst, lhs);
+ subl(dst, rhs);
+ }
+}
+
+#define COMMUTATIVE_I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ if (dst == rhs) { \
+ instruction##l(dst, lhs); \
+ } else { \
+ if (dst != lhs) movl(dst, lhs); \
+ instruction##l(dst, rhs); \
+ } \
+ }
+
+// clang-format off
+COMMUTATIVE_I32_BINOP(mul, imul)
+COMMUTATIVE_I32_BINOP(and, and)
+COMMUTATIVE_I32_BINOP(or, or)
+COMMUTATIVE_I32_BINOP(xor, xor)
+// clang-format on
+
+#undef DEFAULT_I32_BINOP
+
+void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
+ testl(reg, reg);
+ j(zero, label);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 87373100f5..9c0fa268f3 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -43,6 +43,12 @@ using DecodeResult = Result<std::nullptr_t>;
// a buffer of bytes.
class Decoder {
public:
+ enum ValidateFlag : bool { kValidate = true, kNoValidate = false };
+
+ enum AdvancePCFlag : bool { kAdvancePc = true, kNoAdvancePc = false };
+
+ enum TraceFlag : bool { kTrace = true, kNoTrace = false };
+
Decoder(const byte* start, const byte* end, uint32_t buffer_offset = 0)
: start_(start), pc_(start), end_(end), buffer_offset_(buffer_offset) {}
Decoder(const byte* start, const byte* pc, const byte* end,
@@ -51,7 +57,7 @@ class Decoder {
virtual ~Decoder() {}
- inline bool check(const byte* pc, uint32_t length, const char* msg) {
+ inline bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
if (V8_UNLIKELY(pc + length > end_)) {
error(pc, msg);
@@ -61,58 +67,62 @@ class Decoder {
}
// Reads an 8-bit unsigned integer.
- template <bool checked>
+ template <ValidateFlag validate>
inline uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
- return read_little_endian<uint8_t, checked>(pc, msg);
+ return read_little_endian<uint8_t, validate>(pc, msg);
}
// Reads a 16-bit unsigned integer (little endian).
- template <bool checked>
+ template <ValidateFlag validate>
inline uint16_t read_u16(const byte* pc,
const char* msg = "expected 2 bytes") {
- return read_little_endian<uint16_t, checked>(pc, msg);
+ return read_little_endian<uint16_t, validate>(pc, msg);
}
// Reads a 32-bit unsigned integer (little endian).
- template <bool checked>
+ template <ValidateFlag validate>
inline uint32_t read_u32(const byte* pc,
const char* msg = "expected 4 bytes") {
- return read_little_endian<uint32_t, checked>(pc, msg);
+ return read_little_endian<uint32_t, validate>(pc, msg);
}
// Reads a 64-bit unsigned integer (little endian).
- template <bool checked>
+ template <ValidateFlag validate>
inline uint64_t read_u64(const byte* pc,
const char* msg = "expected 8 bytes") {
- return read_little_endian<uint64_t, checked>(pc, msg);
+ return read_little_endian<uint64_t, validate>(pc, msg);
}
// Reads a variable-length unsigned integer (little endian).
- template <bool checked>
+ template <ValidateFlag validate>
uint32_t read_u32v(const byte* pc, uint32_t* length,
const char* name = "LEB32") {
- return read_leb<uint32_t, checked, false, false>(pc, length, name);
+ return read_leb<uint32_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
+ name);
}
// Reads a variable-length signed integer (little endian).
- template <bool checked>
+ template <ValidateFlag validate>
int32_t read_i32v(const byte* pc, uint32_t* length,
const char* name = "signed LEB32") {
- return read_leb<int32_t, checked, false, false>(pc, length, name);
+ return read_leb<int32_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
+ name);
}
// Reads a variable-length unsigned integer (little endian).
- template <bool checked>
+ template <ValidateFlag validate>
uint64_t read_u64v(const byte* pc, uint32_t* length,
const char* name = "LEB64") {
- return read_leb<uint64_t, checked, false, false>(pc, length, name);
+ return read_leb<uint64_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
+ name);
}
// Reads a variable-length signed integer (little endian).
- template <bool checked>
+ template <ValidateFlag validate>
int64_t read_i64v(const byte* pc, uint32_t* length,
const char* name = "signed LEB64") {
- return read_leb<int64_t, checked, false, false>(pc, length, name);
+ return read_leb<int64_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
+ name);
}
// Reads a 8-bit unsigned integer (byte) and advances {pc_}.
@@ -133,13 +143,14 @@ class Decoder {
// Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
uint32_t consume_u32v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<uint32_t, true, true, true>(pc_, &length, name);
+ return read_leb<uint32_t, kValidate, kAdvancePc, kTrace>(pc_, &length,
+ name);
}
// Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
int32_t consume_i32v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<int32_t, true, true, true>(pc_, &length, name);
+ return read_leb<int32_t, kValidate, kAdvancePc, kTrace>(pc_, &length, name);
}
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
@@ -261,11 +272,11 @@ class Decoder {
std::string error_msg_;
private:
- template <typename IntType, bool checked>
+ template <typename IntType, bool validate>
inline IntType read_little_endian(const byte* pc, const char* msg) {
- if (!checked) {
- DCHECK(check(pc, sizeof(IntType), msg));
- } else if (!check(pc, sizeof(IntType), msg)) {
+ if (!validate) {
+ DCHECK(validate_size(pc, sizeof(IntType), msg));
+ } else if (!validate_size(pc, sizeof(IntType), msg)) {
return IntType{0};
}
return ReadLittleEndianValue<IntType>(pc);
@@ -286,17 +297,18 @@ class Decoder {
return val;
}
- template <typename IntType, bool checked, bool advance_pc, bool trace>
+ template <typename IntType, ValidateFlag validate, AdvancePCFlag advance_pc,
+ TraceFlag trace>
inline IntType read_leb(const byte* pc, uint32_t* length,
const char* name = "varint") {
DCHECK_IMPLIES(advance_pc, pc == pc_);
TRACE_IF(trace, " +%u %-20s: ", pc_offset(), name);
- return read_leb_tail<IntType, checked, advance_pc, trace, 0>(pc, length,
- name, 0);
+ return read_leb_tail<IntType, validate, advance_pc, trace, 0>(pc, length,
+ name, 0);
}
- template <typename IntType, bool checked, bool advance_pc, bool trace,
- int byte_index>
+ template <typename IntType, ValidateFlag validate, AdvancePCFlag advance_pc,
+ TraceFlag trace, int byte_index>
IntType read_leb_tail(const byte* pc, uint32_t* length, const char* name,
IntType result) {
constexpr bool is_signed = std::is_signed<IntType>::value;
@@ -304,7 +316,7 @@ class Decoder {
static_assert(byte_index < kMaxLength, "invalid template instantiation");
constexpr int shift = byte_index * 7;
constexpr bool is_last_byte = byte_index == kMaxLength - 1;
- const bool at_end = checked && pc >= end_;
+ const bool at_end = validate && pc >= end_;
byte b = 0;
if (!at_end) {
DCHECK_LT(pc, end_);
@@ -317,12 +329,12 @@ class Decoder {
// Compilers are not smart enough to figure out statically that the
// following call is unreachable if is_last_byte is false.
constexpr int next_byte_index = byte_index + (is_last_byte ? 0 : 1);
- return read_leb_tail<IntType, checked, advance_pc, trace,
+ return read_leb_tail<IntType, validate, advance_pc, trace,
next_byte_index>(pc + 1, length, name, result);
}
if (advance_pc) pc_ = pc + (at_end ? 0 : 1);
*length = byte_index + (at_end ? 0 : 1);
- if (checked && (at_end || (b & 0x80))) {
+ if (validate && (at_end || (b & 0x80))) {
TRACE_IF(trace, at_end ? "<end> " : "<length overflow> ");
errorf(pc, "expected %s", name);
result = 0;
@@ -341,7 +353,7 @@ class Decoder {
bool valid_extra_bits =
checked_bits == 0 ||
(is_signed && checked_bits == kSignExtendedExtraBits);
- if (!checked) {
+ if (!validate) {
DCHECK(valid_extra_bits);
} else if (!valid_extra_bits) {
error(pc, "extra bits in varint");
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index de17401752..ffbf85cde8 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -81,13 +81,14 @@ struct WasmException;
V(I32AtomicStore8U, Uint8) \
V(I32AtomicStore16U, Uint16)
-template <typename T>
-Vector<T> vec2vec(std::vector<T>& vec) {
+template <typename T, typename Allocator>
+Vector<T> vec2vec(std::vector<T, Allocator>& vec) {
return Vector<T>(vec.data(), vec.size());
}
+
// Helpers for decoding different kinds of operands which follow bytecodes.
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct LocalIndexOperand {
uint32_t index;
ValueType type = kWasmStmt;
@@ -98,7 +99,7 @@ struct LocalIndexOperand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct ExceptionIndexOperand {
uint32_t index;
const WasmException* exception = nullptr;
@@ -109,7 +110,7 @@ struct ExceptionIndexOperand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct ImmI32Operand {
int32_t value;
unsigned length;
@@ -118,7 +119,7 @@ struct ImmI32Operand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct ImmI64Operand {
int64_t value;
unsigned length;
@@ -127,7 +128,7 @@ struct ImmI64Operand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct ImmF32Operand {
float value;
unsigned length = 4;
@@ -138,7 +139,7 @@ struct ImmF32Operand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct ImmF64Operand {
double value;
unsigned length = 8;
@@ -149,7 +150,7 @@ struct ImmF64Operand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct GlobalIndexOperand {
uint32_t index;
ValueType type = kWasmStmt;
@@ -161,53 +162,33 @@ struct GlobalIndexOperand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct BlockTypeOperand {
- uint32_t arity = 0;
- const byte* types = nullptr; // pointer to encoded types for the block.
unsigned length = 1;
+ ValueType type = kWasmStmt;
+ uint32_t sig_index = 0;
+ FunctionSig* sig = nullptr;
inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
uint8_t val = decoder->read_u8<validate>(pc + 1, "block type");
- ValueType type = kWasmStmt;
- if (decode_local_type(val, &type)) {
- arity = type == kWasmStmt ? 0 : 1;
- types = pc + 1;
- } else {
+ if (!decode_local_type(val, &type)) {
// Handle multi-value blocks.
if (!VALIDATE(FLAG_experimental_wasm_mv)) {
- decoder->error(pc + 1, "invalid block arity > 1");
- return;
- }
- if (!VALIDATE(val == kMultivalBlock)) {
decoder->error(pc + 1, "invalid block type");
return;
}
- // Decode and check the types vector of the block.
- unsigned len = 0;
- uint32_t count =
- decoder->read_u32v<validate>(pc + 2, &len, "block arity");
- // {count} is encoded as {arity-2}, so that a {0} count here corresponds
- // to a block with 2 values. This makes invalid/redundant encodings
- // impossible.
- arity = count + 2;
- length = 1 + len + arity;
- types = pc + 1 + 1 + len;
-
- for (uint32_t i = 0; i < arity; i++) {
- uint32_t offset = 1 + 1 + len + i;
- val = decoder->read_u8<validate>(pc + offset, "block type");
- decode_local_type(val, &type);
- if (!VALIDATE(type != kWasmStmt)) {
- decoder->error(pc + offset, "invalid block type");
- return;
- }
+ int32_t index =
+ decoder->read_i32v<validate>(pc + 1, &length, "block arity");
+ if (!VALIDATE(length > 0 && index >= 0)) {
+ decoder->error(pc + 1, "invalid block type index");
+ return;
}
+ sig_index = static_cast<uint32_t>(index);
}
}
// Decode a byte representing a local type. Return {false} if the encoded
- // byte was invalid or {kMultivalBlock}.
+ // byte was invalid or the start of a type index.
inline bool decode_local_type(uint8_t val, ValueType* result) {
switch (static_cast<ValueTypeCode>(val)) {
case kLocalVoid:
@@ -229,22 +210,33 @@ struct BlockTypeOperand {
*result = kWasmS128;
return true;
default:
- *result = kWasmStmt;
+ *result = kWasmVar;
return false;
}
}
- ValueType read_entry(unsigned index) {
- DCHECK_LT(index, arity);
- ValueType result;
- bool success = decode_local_type(types[index], &result);
- DCHECK(success);
- USE(success);
- return result;
+ uint32_t in_arity() const {
+ if (type != kWasmVar) return 0;
+ return static_cast<uint32_t>(sig->parameter_count());
+ }
+ uint32_t out_arity() const {
+ if (type == kWasmStmt) return 0;
+ if (type != kWasmVar) return 1;
+ return static_cast<uint32_t>(sig->return_count());
+ }
+ ValueType in_type(uint32_t index) {
+ DCHECK_EQ(kWasmVar, type);
+ return sig->GetParam(index);
+ }
+ ValueType out_type(uint32_t index) {
+ if (type == kWasmVar) return sig->GetReturn(index);
+ DCHECK_NE(kWasmStmt, type);
+ DCHECK_EQ(0, index);
+ return type;
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct BreakDepthOperand {
uint32_t depth;
unsigned length;
@@ -253,7 +245,7 @@ struct BreakDepthOperand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct CallIndirectOperand {
uint32_t table_index;
uint32_t index;
@@ -271,7 +263,7 @@ struct CallIndirectOperand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct CallFunctionOperand {
uint32_t index;
FunctionSig* sig = nullptr;
@@ -281,7 +273,7 @@ struct CallFunctionOperand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct MemoryIndexOperand {
uint32_t index;
unsigned length = 1;
@@ -293,7 +285,7 @@ struct MemoryIndexOperand {
}
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct BranchTableOperand {
uint32_t table_count;
const byte* start;
@@ -308,7 +300,7 @@ struct BranchTableOperand {
};
// A helper to iterate over a branch table.
-template <bool validate>
+template <Decoder::ValidateFlag validate>
class BranchTableIterator {
public:
unsigned cur_index() { return index_; }
@@ -346,7 +338,7 @@ class BranchTableIterator {
uint32_t table_count_; // the count of entries, not including default.
};
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct MemoryAccessOperand {
uint32_t alignment;
uint32_t offset;
@@ -370,7 +362,7 @@ struct MemoryAccessOperand {
};
// Operand for SIMD lane operations.
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct SimdLaneOperand {
uint8_t lane;
unsigned length = 1;
@@ -381,7 +373,7 @@ struct SimdLaneOperand {
};
// Operand for SIMD shift operations.
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct SimdShiftOperand {
uint8_t shift;
unsigned length = 1;
@@ -392,7 +384,7 @@ struct SimdShiftOperand {
};
// Operand for SIMD S8x16 shuffle operations.
-template <bool validate>
+template <Decoder::ValidateFlag validate>
struct Simd8x16ShuffleOperand {
uint8_t shuffle[kSimd128Size];
@@ -422,13 +414,19 @@ struct Merge {
Value first;
} vals; // Either multiple values or a single value.
+ // Tracks whether this merge was ever reached. Uses precise reachability, like
+ // Reachability::kReachable.
+ bool reached;
+
+ Merge(bool reached = false) : reached(reached) {}
+
Value& operator[](uint32_t i) {
DCHECK_GT(arity, i);
return arity == 1 ? vals.first : vals.array[i];
}
};
-enum ControlKind {
+enum ControlKind : uint8_t {
kControlIf,
kControlIfElse,
kControlBlock,
@@ -437,41 +435,73 @@ enum ControlKind {
kControlTryCatch
};
+enum Reachability : uint8_t {
+ // reachable code.
+ kReachable,
+ // reachable code in unreachable block (implies normal validation).
+ kSpecOnlyReachable,
+ // code unreachable in its own block (implies polymorphic validation).
+ kUnreachable
+};
+
// An entry on the control stack (i.e. if, block, loop, or try).
template <typename Value>
struct ControlBase {
- const byte* pc;
ControlKind kind;
uint32_t stack_depth; // stack height at the beginning of the construct.
- bool unreachable; // The current block has been ended.
+ const byte* pc;
+ Reachability reachability = kReachable;
+
+ // Values merged into the start or end of this control construct.
+ Merge<Value> start_merge;
+ Merge<Value> end_merge;
- // Values merged into the end of this control construct.
- Merge<Value> merge;
+ ControlBase() = default;
+ ControlBase(ControlKind kind, uint32_t stack_depth, const byte* pc)
+ : kind(kind), stack_depth(stack_depth), pc(pc) {}
- inline bool is_if() const { return is_onearmed_if() || is_if_else(); }
- inline bool is_onearmed_if() const { return kind == kControlIf; }
- inline bool is_if_else() const { return kind == kControlIfElse; }
- inline bool is_block() const { return kind == kControlBlock; }
- inline bool is_loop() const { return kind == kControlLoop; }
- inline bool is_try() const { return is_incomplete_try() || is_try_catch(); }
- inline bool is_incomplete_try() const { return kind == kControlTry; }
- inline bool is_try_catch() const { return kind == kControlTryCatch; }
+ // Check whether the current block is reachable.
+ bool reachable() const { return reachability == kReachable; }
+
+ // Check whether the rest of the block is unreachable.
+ // Note that this is different from {!reachable()}, as there is also the
+ // "indirect unreachable state", for which both {reachable()} and
+ // {unreachable()} return false.
+ bool unreachable() const { return reachability == kUnreachable; }
+
+ // Return the reachability of new control structs started in this block.
+ Reachability innerReachability() const {
+ return reachability == kReachable ? kReachable : kSpecOnlyReachable;
+ }
+
+ bool is_if() const { return is_onearmed_if() || is_if_else(); }
+ bool is_onearmed_if() const { return kind == kControlIf; }
+ bool is_if_else() const { return kind == kControlIfElse; }
+ bool is_block() const { return kind == kControlBlock; }
+ bool is_loop() const { return kind == kControlLoop; }
+ bool is_try() const { return is_incomplete_try() || is_try_catch(); }
+ bool is_incomplete_try() const { return kind == kControlTry; }
+ bool is_try_catch() const { return kind == kControlTryCatch; }
+
+ inline Merge<Value>* br_merge() {
+ return is_loop() ? &this->start_merge : &this->end_merge;
+ }
// Named constructors.
- static ControlBase Block(const byte* pc, size_t stack_depth) {
- return {pc, kControlBlock, static_cast<uint32_t>(stack_depth), false, {}};
+ static ControlBase Block(const byte* pc, uint32_t stack_depth) {
+ return {kControlBlock, stack_depth, pc};
}
- static ControlBase If(const byte* pc, size_t stack_depth) {
- return {pc, kControlIf, static_cast<uint32_t>(stack_depth), false, {}};
+ static ControlBase If(const byte* pc, uint32_t stack_depth) {
+ return {kControlIf, stack_depth, pc};
}
- static ControlBase Loop(const byte* pc, size_t stack_depth) {
- return {pc, kControlLoop, static_cast<uint32_t>(stack_depth), false, {}};
+ static ControlBase Loop(const byte* pc, uint32_t stack_depth) {
+ return {kControlLoop, stack_depth, pc};
}
- static ControlBase Try(const byte* pc, size_t stack_depth) {
- return {pc, kControlTry, static_cast<uint32_t>(stack_depth), false, {}};
+ static ControlBase Try(const byte* pc, uint32_t stack_depth) {
+ return {kControlTry, stack_depth, pc};
}
};
@@ -519,13 +549,14 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(StartFunction) \
F(StartFunctionBody, Control* block) \
F(FinishFunction) \
+ F(OnFirstError) \
/* Control: */ \
F(Block, Control* block) \
F(Loop, Control* block) \
F(Try, Control* block) \
F(If, const Value& cond, Control* if_block) \
F(FallThruTo, Control* c) \
- F(PopControl, const Control& block) \
+ F(PopControl, Control* block) \
F(EndControl, Control* block) \
/* Instructions: */ \
F(UnOp, WasmOpcode opcode, FunctionSig*, const Value& value, Value* result) \
@@ -535,7 +566,8 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(I64Const, Value* result, int64_t value) \
F(F32Const, Value* result, float value) \
F(F64Const, Value* result, double value) \
- F(DoReturn, Vector<Value> values) \
+ F(Drop, const Value& value) \
+ F(DoReturn, Vector<Value> values, bool implicit) \
F(GetLocal, Value* result, const LocalIndexOperand<validate>& operand) \
F(SetLocal, const Value& value, const LocalIndexOperand<validate>& operand) \
F(TeeLocal, const Value& value, Value* result, \
@@ -546,8 +578,8 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(Unreachable) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
Value* result) \
- F(BreakTo, uint32_t depth) \
- F(BrIf, const Value& cond, uint32_t depth) \
+ F(Br, Control* target) \
+ F(BrIf, const Value& cond, Control* target) \
F(BrTable, const BranchTableOperand<validate>& operand, const Value& key) \
F(Else, Control* if_block) \
F(LoadMem, ValueType type, MachineType mem_type, \
@@ -579,7 +611,7 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
// Generic Wasm bytecode decoder with utilities for decoding operands,
// lengths, etc.
-template <bool validate>
+template <Decoder::ValidateFlag validate>
class WasmDecoder : public Decoder {
public:
WasmDecoder(const WasmModule* module, FunctionSig* sig, const byte* start,
@@ -676,7 +708,7 @@ class WasmDecoder : public Decoder {
break;
case kExprSetLocal: // fallthru
case kExprTeeLocal: {
- LocalIndexOperand<validate> operand(decoder, pc);
+ LocalIndexOperand<Decoder::kValidate> operand(decoder, pc);
if (assigned->length() > 0 &&
operand.index < static_cast<uint32_t>(assigned->length())) {
// Unverified code might have an out-of-bounds index.
@@ -688,9 +720,9 @@ class WasmDecoder : public Decoder {
case kExprGrowMemory:
case kExprCallFunction:
case kExprCallIndirect:
- // Add mem_size and mem_start to the assigned set.
- assigned->Add(locals_count - 2); // mem_size
- assigned->Add(locals_count - 1); // mem_start
+ // Add context cache nodes to the assigned set.
+ // TODO(titzer): make this more clear.
+ assigned->Add(locals_count - 1);
length = OpcodeLength(decoder, pc);
break;
case kExprEnd:
@@ -706,7 +738,8 @@ class WasmDecoder : public Decoder {
return decoder->ok() ? assigned : nullptr;
}
- inline bool Validate(const byte* pc, LocalIndexOperand<validate>& operand) {
+ inline bool Validate(const byte* pc,
+ LocalIndexOperand<Decoder::kValidate>& operand) {
if (!VALIDATE(operand.index < total_locals())) {
errorf(pc + 1, "invalid local index: %u", operand.index);
return false;
@@ -916,7 +949,7 @@ class WasmDecoder : public Decoder {
case kExprSetLocal:
case kExprTeeLocal:
case kExprGetLocal: {
- LocalIndexOperand<validate> operand(decoder, pc);
+ LocalIndexOperand<Decoder::kValidate> operand(decoder, pc);
return 1 + operand.length;
}
case kExprBrTable: {
@@ -993,13 +1026,13 @@ class WasmDecoder : public Decoder {
std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
+ }
// Handle "simple" opcodes with a fixed signature first.
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!sig) sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) return {sig->parameter_count(), sig->return_count()};
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
- }
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
// clang-format off
@@ -1060,18 +1093,32 @@ class WasmDecoder : public Decoder {
}
};
-template <bool validate, typename Interface>
+#define CALL_INTERFACE(name, ...) interface_.name(this, ##__VA_ARGS__)
+#define CALL_INTERFACE_IF_REACHABLE(name, ...) \
+ do { \
+ DCHECK(!control_.empty()); \
+ if (this->ok() && control_.back().reachable()) { \
+ interface_.name(this, ##__VA_ARGS__); \
+ } \
+ } while (false)
+#define CALL_INTERFACE_IF_PARENT_REACHABLE(name, ...) \
+ do { \
+ DCHECK(!control_.empty()); \
+ if (this->ok() && (control_.size() == 1 || control_at(1)->reachable())) { \
+ interface_.name(this, ##__VA_ARGS__); \
+ } \
+ } while (false)
+
+template <Decoder::ValidateFlag validate, typename Interface>
class WasmFullDecoder : public WasmDecoder<validate> {
using Value = typename Interface::Value;
using Control = typename Interface::Control;
using MergeValues = Merge<Value>;
- // All Value and Control types should be trivially copyable for
- // performance. We push and pop them, and store them in local variables.
+ // All Value types should be trivially copyable for performance. We push, pop,
+ // and store them in local variables.
static_assert(IS_TRIVIALLY_COPYABLE(Value),
"all Value<...> types should be trivially copyable");
- static_assert(IS_TRIVIALLY_COPYABLE(Control),
- "all Control<...> types should be trivially copyable");
public:
template <typename... InterfaceArgs>
@@ -1084,6 +1131,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
local_type_vec_(zone),
stack_(zone),
control_(zone),
+ args_(zone),
last_end_found_(false) {
this->local_types_ = &local_type_vec_;
}
@@ -1109,9 +1157,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK_EQ(0, this->local_types_->size());
WasmDecoder<validate>::DecodeLocals(this, this->sig_, this->local_types_);
- interface_.StartFunction(this);
+ CALL_INTERFACE(StartFunction);
DecodeFunctionBody();
- if (!this->failed()) interface_.FinishFunction(this);
+ if (!this->failed()) CALL_INTERFACE(FinishFunction);
if (this->failed()) return this->TraceFailed();
@@ -1175,10 +1223,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
inline Control* control_at(uint32_t depth) {
DCHECK_GT(control_.size(), depth);
- return &control_[control_.size() - depth - 1];
+ return &control_.back() - depth;
}
inline uint32_t stack_size() const {
+ DCHECK_GE(kMaxUInt32, stack_.size());
return static_cast<uint32_t>(stack_.size());
}
@@ -1187,10 +1236,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return &stack_[stack_.size() - depth - 1];
}
- inline Value& GetMergeValueFromStack(Control* c, uint32_t i) {
- DCHECK_GT(c->merge.arity, i);
- DCHECK_GE(stack_.size(), c->stack_depth + c->merge.arity);
- return stack_[stack_.size() - c->merge.arity + i];
+ inline Value& GetMergeValueFromStack(
+ Control* c, Merge<Value>* merge, uint32_t i) {
+ DCHECK(merge == &c->start_merge || merge == &c->end_merge);
+ DCHECK_GT(merge->arity, i);
+ DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
+ return stack_[stack_.size() - merge->arity + i];
}
private:
@@ -1203,6 +1254,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<ValueType> local_type_vec_; // types of local variables.
ZoneVector<Value> stack_; // stack of values.
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
+ ZoneVector<Value> args_; // parameters of current block or call
bool last_end_found_;
bool CheckHasMemory() {
@@ -1231,18 +1283,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Set up initial function block.
{
auto* c = PushBlock();
- c->merge.arity = static_cast<uint32_t>(this->sig_->return_count());
-
- if (c->merge.arity == 1) {
- c->merge.vals.first = Value::New(this->pc_, this->sig_->GetReturn(0));
- } else if (c->merge.arity > 1) {
- c->merge.vals.array = zone_->NewArray<Value>(c->merge.arity);
- for (unsigned i = 0; i < c->merge.arity; i++) {
- c->merge.vals.array[i] =
- Value::New(this->pc_, this->sig_->GetReturn(i));
- }
- }
- interface_.StartFunctionBody(this, c);
+ InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
+ InitMerge(&c->end_merge,
+ static_cast<uint32_t>(this->sig_->return_count()),
+ [&] (uint32_t i) {
+ return Value::New(this->pc_, this->sig_->GetReturn(i)); });
+ CALL_INTERFACE(StartFunctionBody, c);
}
while (this->pc_ < this->end_) { // decoding loop.
@@ -1265,10 +1311,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
case kExprBlock: {
BlockTypeOperand<validate> operand(this, this->pc_);
+ if (!LookupBlockType(&operand)) break;
+ PopArgs(operand.sig);
auto* block = PushBlock();
- SetBlockType(block, operand);
+ SetBlockType(block, operand, args_);
+ CALL_INTERFACE_IF_REACHABLE(Block, block);
+ PushMergeValues(block, &block->start_merge);
len = 1 + operand.length;
- interface_.Block(this, block);
break;
}
case kExprRethrow: {
@@ -1279,27 +1328,31 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprThrow: {
CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexOperand<true> operand(this, this->pc_);
+ ExceptionIndexOperand<Decoder::kValidate> operand(this, this->pc_);
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
- std::vector<Value> args;
- PopArgs(operand.exception->ToFunctionSig(), &args);
- interface_.Throw(this, operand, &control_.back(), vec2vec(args));
+ PopArgs(operand.exception->ToFunctionSig());
+ CALL_INTERFACE_IF_REACHABLE(Throw, operand, &control_.back(),
+ vec2vec(args_));
+ EndControl();
break;
}
case kExprTry: {
CHECK_PROTOTYPE_OPCODE(eh);
BlockTypeOperand<validate> operand(this, this->pc_);
+ if (!LookupBlockType(&operand)) break;
+ PopArgs(operand.sig);
auto* try_block = PushTry();
- SetBlockType(try_block, operand);
+ SetBlockType(try_block, operand, args_);
len = 1 + operand.length;
- interface_.Try(this, try_block);
+ CALL_INTERFACE_IF_REACHABLE(Try, try_block);
+ PushMergeValues(try_block, &try_block->start_merge);
break;
}
case kExprCatch: {
// TODO(kschimpf): Fix to use type signature of exception.
CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexOperand<true> operand(this, this->pc_);
+ ExceptionIndexOperand<Decoder::kValidate> operand(this, this->pc_);
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
@@ -1328,7 +1381,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
Vector<Value> values(stack_.data() + c->stack_depth,
sig->parameter_count());
- interface_.CatchException(this, operand, c, values);
+ CALL_INTERFACE_IF_PARENT_REACHABLE(CatchException, operand, c,
+ values);
+ c->reachability = control_at(1)->innerReachability();
break;
}
case kExprCatchAll: {
@@ -1339,21 +1394,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprLoop: {
BlockTypeOperand<validate> operand(this, this->pc_);
- // The continue environment is the inner environment.
+ if (!LookupBlockType(&operand)) break;
+ PopArgs(operand.sig);
auto* block = PushLoop();
- SetBlockType(&control_.back(), operand);
+ SetBlockType(&control_.back(), operand, args_);
len = 1 + operand.length;
- interface_.Loop(this, block);
+ CALL_INTERFACE_IF_REACHABLE(Loop, block);
+ PushMergeValues(block, &block->start_merge);
break;
}
case kExprIf: {
- // Condition on top of stack. Split environments for branches.
BlockTypeOperand<validate> operand(this, this->pc_);
+ if (!LookupBlockType(&operand)) break;
auto cond = Pop(0, kWasmI32);
+ PopArgs(operand.sig);
+ if (!this->ok()) break;
auto* if_block = PushIf();
- SetBlockType(if_block, operand);
- interface_.If(this, cond, if_block);
+ SetBlockType(if_block, operand, args_);
+ CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
len = 1 + operand.length;
+ PushMergeValues(if_block, &if_block->start_merge);
break;
}
case kExprElse: {
@@ -1372,8 +1432,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
c->kind = kControlIfElse;
FallThruTo(c);
- stack_.resize(c->stack_depth);
- interface_.Else(this, c);
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
+ PushMergeValues(c, &c->start_merge);
+ c->reachability = control_at(1)->innerReachability();
break;
}
case kExprEnd: {
@@ -1382,28 +1443,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return;
}
Control* c = &control_.back();
- if (c->is_loop()) {
- // A loop just leaves the values on the stack.
- TypeCheckFallThru(c);
- PopControl(c);
+ if (!VALIDATE(!c->is_incomplete_try())) {
+ this->error(this->pc_, "missing catch in try");
break;
}
if (c->is_onearmed_if()) {
- // End the true branch of a one-armed if.
- if (!VALIDATE(c->unreachable ||
- stack_.size() == c->stack_depth)) {
- this->error("end of if expected empty stack");
- stack_.resize(c->stack_depth);
- }
- if (!VALIDATE(c->merge.arity == 0)) {
- this->error("non-void one-armed if");
- }
- } else if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch in try");
- break;
+ // Emulate empty else arm.
+ FallThruTo(c);
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
+ PushMergeValues(c, &c->start_merge);
+ c->reachability = control_at(1)->innerReachability();
}
+
FallThruTo(c);
- PushEndValues(c);
+ // A loop just leaves the values on the stack.
+ if (!c->is_loop()) PushMergeValues(c, &c->end_merge);
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
@@ -1415,7 +1469,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// The result of the block is the return value.
TRACE(" @%-8d #xx:%-20s|", startrel(this->pc_),
"(implicit) return");
- DoReturn();
+ DoReturn(c, true);
TRACE("\n");
}
@@ -1427,15 +1481,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto fval = Pop();
auto tval = Pop(0, fval.type);
auto* result = Push(tval.type == kWasmVar ? fval.type : tval.type);
- interface_.Select(this, cond, fval, tval, result);
+ CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
break;
}
case kExprBr: {
BreakDepthOperand<validate> operand(this, this->pc_);
- if (this->Validate(this->pc_, operand, control_.size()) &&
- TypeCheckBreak(operand.depth)) {
- interface_.BreakTo(this, operand.depth);
- }
+ if (!this->Validate(this->pc_, operand, control_.size())) break;
+ Control* c = control_at(operand.depth);
+ if (!TypeCheckBreak(c)) break;
+ CALL_INTERFACE_IF_REACHABLE(Br, c);
+ BreakTo(c);
len = 1 + operand.length;
EndControl();
break;
@@ -1443,10 +1498,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprBrIf: {
BreakDepthOperand<validate> operand(this, this->pc_);
auto cond = Pop(0, kWasmI32);
- if (this->Validate(this->pc_, operand, control_.size()) &&
- TypeCheckBreak(operand.depth)) {
- interface_.BrIf(this, cond, operand.depth);
- }
+ if (!this->Validate(this->pc_, operand, control_.size())) break;
+ Control* c = control_at(operand.depth);
+ if (!TypeCheckBreak(c)) break;
+ CALL_INTERFACE_IF_REACHABLE(BrIf, cond, c);
+ BreakTo(c);
len = 1 + operand.length;
break;
}
@@ -1466,7 +1522,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
// Check that label types match up.
Control* c = control_at(target);
- uint32_t arity = c->is_loop() ? 0 : c->merge.arity;
+ uint32_t arity = c->br_merge()->arity;
if (i == 0) {
br_arity = arity;
} else if (!VALIDATE(br_arity == arity)) {
@@ -1475,91 +1531,81 @@ class WasmFullDecoder : public WasmDecoder<validate> {
" (previous was %u, this one %u)",
i, br_arity, arity);
}
- if (!VALIDATE(TypeCheckBreak(target))) break;
+ if (!TypeCheckBreak(c)) break;
+ BreakTo(c);
}
- if (!VALIDATE(this->ok())) break;
- if (operand.table_count > 0) {
- interface_.BrTable(this, operand, key);
- } else {
- // Only a default target. Do the equivalent of br.
- BranchTableIterator<validate> iterator(this, operand);
- const byte* pos = iterator.pc();
- uint32_t target = iterator.next();
- if (!VALIDATE(target < control_.size())) {
- this->error(pos, "improper branch in br_table");
- break;
- }
- interface_.BreakTo(this, target);
- }
+ CALL_INTERFACE_IF_REACHABLE(BrTable, operand, key);
+
len = 1 + iterator.length();
EndControl();
break;
}
case kExprReturn: {
- DoReturn();
+ DoReturn(&control_.back(), false);
break;
}
case kExprUnreachable: {
- interface_.Unreachable(this);
+ CALL_INTERFACE_IF_REACHABLE(Unreachable);
EndControl();
break;
}
case kExprI32Const: {
ImmI32Operand<validate> operand(this, this->pc_);
auto* value = Push(kWasmI32);
- interface_.I32Const(this, value, operand.value);
+ CALL_INTERFACE_IF_REACHABLE(I32Const, value, operand.value);
len = 1 + operand.length;
break;
}
case kExprI64Const: {
ImmI64Operand<validate> operand(this, this->pc_);
auto* value = Push(kWasmI64);
- interface_.I64Const(this, value, operand.value);
+ CALL_INTERFACE_IF_REACHABLE(I64Const, value, operand.value);
len = 1 + operand.length;
break;
}
case kExprF32Const: {
ImmF32Operand<validate> operand(this, this->pc_);
auto* value = Push(kWasmF32);
- interface_.F32Const(this, value, operand.value);
+ CALL_INTERFACE_IF_REACHABLE(F32Const, value, operand.value);
len = 1 + operand.length;
break;
}
case kExprF64Const: {
ImmF64Operand<validate> operand(this, this->pc_);
auto* value = Push(kWasmF64);
- interface_.F64Const(this, value, operand.value);
+ CALL_INTERFACE_IF_REACHABLE(F64Const, value, operand.value);
len = 1 + operand.length;
break;
}
case kExprGetLocal: {
- LocalIndexOperand<validate> operand(this, this->pc_);
+ LocalIndexOperand<Decoder::kValidate> operand(this, this->pc_);
if (!this->Validate(this->pc_, operand)) break;
auto* value = Push(operand.type);
- interface_.GetLocal(this, value, operand);
+ CALL_INTERFACE_IF_REACHABLE(GetLocal, value, operand);
len = 1 + operand.length;
break;
}
case kExprSetLocal: {
- LocalIndexOperand<validate> operand(this, this->pc_);
+ LocalIndexOperand<Decoder::kValidate> operand(this, this->pc_);
if (!this->Validate(this->pc_, operand)) break;
auto value = Pop(0, local_type_vec_[operand.index]);
- interface_.SetLocal(this, value, operand);
+ CALL_INTERFACE_IF_REACHABLE(SetLocal, value, operand);
len = 1 + operand.length;
break;
}
case kExprTeeLocal: {
- LocalIndexOperand<validate> operand(this, this->pc_);
+ LocalIndexOperand<Decoder::kValidate> operand(this, this->pc_);
if (!this->Validate(this->pc_, operand)) break;
auto value = Pop(0, local_type_vec_[operand.index]);
auto* result = Push(value.type);
- interface_.TeeLocal(this, value, result, operand);
+ CALL_INTERFACE_IF_REACHABLE(TeeLocal, value, result, operand);
len = 1 + operand.length;
break;
}
case kExprDrop: {
- Pop();
+ auto value = Pop();
+ CALL_INTERFACE_IF_REACHABLE(Drop, value);
break;
}
case kExprGetGlobal: {
@@ -1567,7 +1613,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
auto* result = Push(operand.type);
- interface_.GetGlobal(this, result, operand);
+ CALL_INTERFACE_IF_REACHABLE(GetGlobal, result, operand);
break;
}
case kExprSetGlobal: {
@@ -1580,7 +1626,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
auto value = Pop(0, operand.type);
- interface_.SetGlobal(this, value, operand);
+ CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, operand);
break;
}
case kExprI32LoadMem8S:
@@ -1663,7 +1709,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
auto value = Pop(0, kWasmI32);
auto* result = Push(kWasmI32);
- interface_.GrowMemory(this, value, result);
+ CALL_INTERFACE_IF_REACHABLE(GrowMemory, value, result);
break;
}
case kExprMemorySize: {
@@ -1671,7 +1717,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryIndexOperand<validate> operand(this, this->pc_);
auto* result = Push(kWasmI32);
len = 1 + operand.length;
- interface_.CurrentMemoryPages(this, result);
+ CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
break;
}
case kExprCallFunction: {
@@ -1679,10 +1725,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
// TODO(clemensh): Better memory management.
- std::vector<Value> args;
- PopArgs(operand.sig, &args);
+ PopArgs(operand.sig);
auto* returns = PushReturns(operand.sig);
- interface_.CallDirect(this, operand, args.data(), returns);
+ CALL_INTERFACE_IF_REACHABLE(CallDirect, operand, args_.data(),
+ returns);
break;
}
case kExprCallIndirect: {
@@ -1690,11 +1736,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
auto index = Pop(0, kWasmI32);
- // TODO(clemensh): Better memory management.
- std::vector<Value> args;
- PopArgs(operand.sig, &args);
+ PopArgs(operand.sig);
auto* returns = PushReturns(operand.sig);
- interface_.CallIndirect(this, index, operand, args.data(), returns);
+ CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, operand,
+ args_.data(), returns);
break;
}
case kSimdPrefix: {
@@ -1738,9 +1783,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#if DEBUG
if (FLAG_trace_wasm_decoder) {
PrintF(" ");
- for (size_t i = 0; i < control_.size(); ++i) {
- Control* c = &control_[i];
- switch (c->kind) {
+ for (Control& c : control_) {
+ switch (c.kind) {
case kControlIf:
PrintF("I");
break;
@@ -1756,8 +1800,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
default:
break;
}
- PrintF("%u", c->merge.arity);
- if (c->unreachable) PrintF("*");
+ if (c.start_merge.arity) PrintF("%u-", c.start_merge.arity);
+ PrintF("%u", c.end_merge.arity);
+ if (!c.reachable()) PrintF("%c", c.unreachable() ? '*' : '#');
}
PrintF(" | ");
for (size_t i = 0; i < stack_.size(); ++i) {
@@ -1778,7 +1823,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprGetLocal:
case kExprSetLocal:
case kExprTeeLocal: {
- LocalIndexOperand<validate> operand(this, val.pc);
+ LocalIndexOperand<Decoder::kValidate> operand(this, val.pc);
PrintF("[%u]", operand.index);
break;
}
@@ -1804,29 +1849,54 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK(!control_.empty());
auto* current = &control_.back();
stack_.resize(current->stack_depth);
- current->unreachable = true;
- interface_.EndControl(this, current);
- }
-
- void SetBlockType(Control* c, BlockTypeOperand<validate>& operand) {
- c->merge.arity = operand.arity;
- if (c->merge.arity == 1) {
- c->merge.vals.first = Value::New(this->pc_, operand.read_entry(0));
- } else if (c->merge.arity > 1) {
- c->merge.vals.array = zone_->NewArray<Value>(c->merge.arity);
- for (unsigned i = 0; i < c->merge.arity; i++) {
- c->merge.vals.array[i] = Value::New(this->pc_, operand.read_entry(i));
+ CALL_INTERFACE_IF_REACHABLE(EndControl, current);
+ current->reachability = kUnreachable;
+ }
+
+ bool LookupBlockType(BlockTypeOperand<validate>* operand) {
+ if (operand->type == kWasmVar) {
+ if (!VALIDATE(this->module_ &&
+ operand->sig_index < this->module_->signatures.size())) {
+ this->errorf(
+ this->pc_, "block type index %u out of bounds (%d signatures)",
+ operand->sig_index,
+ static_cast<int>(this->module_
+ ? this->module_->signatures.size() : 0));
+ return false;
+ }
+ operand->sig = this->module_->signatures[operand->sig_index];
+ }
+ return true;
+ }
+
+ template<typename func>
+ void InitMerge(Merge<Value>* merge, uint32_t arity, func get_val) {
+ merge->arity = arity;
+ if (arity == 1) {
+ merge->vals.first = get_val(0);
+ } else if (arity > 1) {
+ merge->vals.array = zone_->NewArray<Value>(arity);
+ for (unsigned i = 0; i < arity; i++) {
+ merge->vals.array[i] = get_val(i);
}
}
}
- // TODO(clemensh): Better memory management.
- void PopArgs(FunctionSig* sig, std::vector<Value>* result) {
- DCHECK(result->empty());
- int count = static_cast<int>(sig->parameter_count());
- result->resize(count);
+ void SetBlockType(Control* c, BlockTypeOperand<validate>& operand,
+ ZoneVector<Value>& params) {
+ InitMerge(&c->end_merge, operand.out_arity(),
+ [&] (uint32_t i) {
+ return Value::New(this->pc_, operand.out_type(i)); });
+ InitMerge(&c->start_merge, operand.in_arity(),
+ [&] (uint32_t i) { return params[i]; });
+ }
+
+ // Pops arguments as required by signature into {args_}.
+ V8_INLINE void PopArgs(FunctionSig* sig) {
+ int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
+ args_.resize(count);
for (int i = count - 1; i >= 0; --i) {
- (*result)[i] = Pop(i, sig->GetParam(i));
+ args_[i] = Pop(i, sig->GetParam(i));
}
}
@@ -1835,31 +1905,40 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
}
- Control* PushBlock() {
- control_.emplace_back(Control::Block(this->pc_, stack_.size()));
- return &control_.back();
+ Control* PushControl(Control&& new_control) {
+ Reachability reachability =
+ control_.empty() ? kReachable : control_.back().innerReachability();
+ control_.emplace_back(std::move(new_control));
+ Control* c = &control_.back();
+ c->reachability = reachability;
+ c->start_merge.reached = c->reachable();
+ return c;
}
+ Control* PushBlock() {
+ return PushControl(Control::Block(this->pc_, stack_size()));
+ }
Control* PushLoop() {
- control_.emplace_back(Control::Loop(this->pc_, stack_.size()));
- return &control_.back();
+ return PushControl(Control::Loop(this->pc_, stack_size()));
}
-
Control* PushIf() {
- control_.emplace_back(Control::If(this->pc_, stack_.size()));
- return &control_.back();
+ return PushControl(Control::If(this->pc_, stack_size()));
}
-
Control* PushTry() {
- control_.emplace_back(Control::Try(this->pc_, stack_.size()));
// current_catch_ = static_cast<int32_t>(control_.size() - 1);
- return &control_.back();
+ return PushControl(Control::Try(this->pc_, stack_size()));
}
void PopControl(Control* c) {
DCHECK_EQ(c, &control_.back());
- interface_.PopControl(this, *c);
+ CALL_INTERFACE_IF_PARENT_REACHABLE(PopControl, c);
+ bool reached = c->end_merge.reached;
control_.pop_back();
+ // If the parent block was reachable before, but the popped control does not
+ // return to here, this block becomes indirectly unreachable.
+ if (!control_.empty() && !reached && control_.back().reachable()) {
+ control_.back().reachability = kSpecOnlyReachable;
+ }
}
int DecodeLoadMem(ValueType type, MachineType mem_type) {
@@ -1869,7 +1948,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto index = Pop(0, kWasmI32);
auto* result = Push(type);
- interface_.LoadMem(this, type, mem_type, operand, index, result);
+ CALL_INTERFACE_IF_REACHABLE(LoadMem, type, mem_type, operand, index,
+ result);
return 1 + operand.length;
}
@@ -1879,7 +1959,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this, this->pc_, ElementSizeLog2Of(mem_type.representation()));
auto value = Pop(1, type);
auto index = Pop(0, kWasmI32);
- interface_.StoreMem(this, type, mem_type, operand, index, value);
+ CALL_INTERFACE_IF_REACHABLE(StoreMem, type, mem_type, operand, index,
+ value);
return 1 + operand.length;
}
@@ -1890,7 +1971,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto index = Pop(0, kWasmI32);
auto* result = Push(type);
- interface_.LoadMem(this, type, mem_type, operand, index, result);
+ CALL_INTERFACE_IF_REACHABLE(LoadMem, type, mem_type, operand, index,
+ result);
return operand.length;
}
@@ -1900,7 +1982,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this, this->pc_ + 1, ElementSizeLog2Of(mem_type.representation()));
auto value = Pop(1, type);
auto index = Pop(0, kWasmI32);
- interface_.StoreMem(this, type, mem_type, operand, index, value);
+ CALL_INTERFACE_IF_REACHABLE(StoreMem, type, mem_type, operand, index,
+ value);
return operand.length;
}
@@ -1909,7 +1992,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (this->Validate(this->pc_, opcode, operand)) {
Value inputs[] = {Pop(0, ValueType::kSimd128)};
auto* result = Push(type);
- interface_.SimdLaneOp(this, opcode, operand, ArrayVector(inputs), result);
+ CALL_INTERFACE_IF_REACHABLE(SimdLaneOp, opcode, operand,
+ ArrayVector(inputs), result);
}
return operand.length;
}
@@ -1921,7 +2005,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
inputs[1] = Pop(1, type);
inputs[0] = Pop(0, ValueType::kSimd128);
auto* result = Push(ValueType::kSimd128);
- interface_.SimdLaneOp(this, opcode, operand, ArrayVector(inputs), result);
+ CALL_INTERFACE_IF_REACHABLE(SimdLaneOp, opcode, operand,
+ ArrayVector(inputs), result);
}
return operand.length;
}
@@ -1931,7 +2016,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (this->Validate(this->pc_, opcode, operand)) {
auto input = Pop(0, ValueType::kSimd128);
auto* result = Push(ValueType::kSimd128);
- interface_.SimdShiftOp(this, opcode, operand, input, result);
+ CALL_INTERFACE_IF_REACHABLE(SimdShiftOp, opcode, operand, input, result);
}
return operand.length;
}
@@ -1942,7 +2027,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto input1 = Pop(1, ValueType::kSimd128);
auto input0 = Pop(0, ValueType::kSimd128);
auto* result = Push(ValueType::kSimd128);
- interface_.Simd8x16ShuffleOp(this, operand, input0, input1, result);
+ CALL_INTERFACE_IF_REACHABLE(Simd8x16ShuffleOp, operand, input0, input1,
+ result);
}
return 16;
}
@@ -1998,11 +2084,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("invalid simd opcode");
break;
}
- std::vector<Value> args;
- PopArgs(sig, &args);
- auto* result =
+ PopArgs(sig);
+ auto* results =
sig->return_count() == 0 ? nullptr : Push(GetReturnType(sig));
- interface_.SimdOp(this, opcode, vec2vec(args), result);
+ CALL_INTERFACE_IF_REACHABLE(SimdOp, opcode, vec2vec(args_), results);
}
}
return len;
@@ -2011,7 +2096,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
unsigned DecodeAtomicOpcode(WasmOpcode opcode) {
unsigned len = 0;
ValueType ret_type;
- FunctionSig* sig = WasmOpcodes::AtomicSignature(opcode);
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig != nullptr) {
MachineType memtype;
switch (opcode) {
@@ -2033,38 +2118,36 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#undef CASE_ATOMIC_OP
default:
this->error("invalid atomic opcode");
- break;
+ return 0;
}
- // TODO(clemensh): Better memory management here.
- std::vector<Value> args(sig->parameter_count());
MemoryAccessOperand<validate> operand(
this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
len += operand.length;
- for (int i = static_cast<int>(sig->parameter_count() - 1); i >= 0; --i) {
- args[i] = Pop(i, sig->GetParam(i));
- }
+ PopArgs(sig);
auto result = ret_type == MachineRepresentation::kNone
? nullptr
: Push(GetReturnType(sig));
- interface_.AtomicOp(this, opcode, vec2vec(args), operand, result);
+ CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, vec2vec(args_), operand,
+ result);
} else {
this->error("invalid atomic opcode");
}
return len;
}
- void DoReturn() {
- // TODO(clemensh): Optimize memory usage here (it will be mostly 0 or 1
- // returned values).
+ void DoReturn(Control* c, bool implicit) {
int return_count = static_cast<int>(this->sig_->return_count());
- std::vector<Value> values(return_count);
+ args_.resize(return_count);
// Pop return values off the stack in reverse order.
for (int i = return_count - 1; i >= 0; --i) {
- values[i] = Pop(i, this->sig_->GetReturn(i));
+ args_[i] = Pop(i, this->sig_->GetReturn(i));
}
- interface_.DoReturn(this, vec2vec(values));
+ // Simulate that an implicit return morally comes after the current block.
+ if (implicit && c->end_merge.reached) c->reachability = kReachable;
+ CALL_INTERFACE_IF_REACHABLE(DoReturn, vec2vec(args_), implicit);
+
EndControl();
}
@@ -2074,17 +2157,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return &stack_.back();
}
- void PushEndValues(Control* c) {
+ void PushMergeValues(Control* c, Merge<Value>* merge) {
DCHECK_EQ(c, &control_.back());
+ DCHECK(merge == &c->start_merge || merge == &c->end_merge);
stack_.resize(c->stack_depth);
- if (c->merge.arity == 1) {
- stack_.push_back(c->merge.vals.first);
+ if (merge->arity == 1) {
+ stack_.push_back(merge->vals.first);
} else {
- for (unsigned i = 0; i < c->merge.arity; i++) {
- stack_.push_back(c->merge.vals.array[i]);
+ for (unsigned i = 0; i < merge->arity; i++) {
+ stack_.push_back(merge->vals.array[i]);
}
}
- DCHECK_EQ(c->stack_depth + c->merge.arity, stack_.size());
+ DCHECK_EQ(c->stack_depth + merge->arity, stack_.size());
}
Value* PushReturns(FunctionSig* sig) {
@@ -2114,7 +2198,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t limit = control_.back().stack_depth;
if (stack_.size() <= limit) {
// Popping past the current control start in reachable code.
- if (!VALIDATE(control_.back().unreachable)) {
+ if (!VALIDATE(control_.back().unreachable())) {
this->errorf(this->pc_, "%s found empty stack",
SafeOpcodeNameAt(this->pc_));
}
@@ -2127,20 +2211,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
+ inline void BreakTo(Control* c) {
+ if (control_.back().reachable()) c->br_merge()->reached = true;
+ }
+
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!TypeCheckFallThru(c)) return;
- c->unreachable = false;
+ if (!c->reachable()) return;
- interface_.FallThruTo(this, c);
+ if (!c->is_loop()) CALL_INTERFACE(FallThruTo, c);
+ c->end_merge.reached = true;
}
- bool TypeCheckMergeValues(Control* c) {
- DCHECK_GE(stack_.size(), c->stack_depth + c->merge.arity);
- // Typecheck the topmost {c->merge.arity} values on the stack.
- for (uint32_t i = 0; i < c->merge.arity; ++i) {
- auto& val = GetMergeValueFromStack(c, i);
- auto& old = c->merge[i];
+ bool TypeCheckMergeValues(Control* c, Merge<Value>* merge) {
+ DCHECK(merge == &c->start_merge || merge == &c->end_merge);
+ DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
+ // Typecheck the topmost {merge->arity} values on the stack.
+ for (uint32_t i = 0; i < merge->arity; ++i) {
+ auto& val = GetMergeValueFromStack(c, merge, i);
+ auto& old = (*merge)[i];
if (val.type != old.type) {
// If {val.type} is polymorphic, which results from unreachable, make
// it more specific by using the merge value's expected type.
@@ -2161,7 +2251,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool TypeCheckFallThru(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!validate) return true;
- uint32_t expected = c->merge.arity;
+ uint32_t expected = c->end_merge.arity;
DCHECK_GE(stack_.size(), c->stack_depth);
uint32_t actual = static_cast<uint32_t>(stack_.size()) - c->stack_depth;
// Fallthrus must match the arity of the control exactly.
@@ -2173,17 +2263,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return false;
}
- return TypeCheckMergeValues(c);
+ return TypeCheckMergeValues(c, &c->end_merge);
}
- bool TypeCheckBreak(unsigned depth) {
- Control* c = control_at(depth);
- if (c->is_loop()) {
- // This is the inner loop block, which does not have a value.
- return true;
- }
+ bool TypeCheckBreak(Control* c) {
// Breaks must have at least the number of values expected; can have more.
- uint32_t expected = c->merge.arity;
+ uint32_t expected = c->br_merge()->arity;
DCHECK_GE(stack_.size(), control_.back().stack_depth);
uint32_t actual =
static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
@@ -2193,7 +2278,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
expected, startrel(c->pc), actual);
return false;
}
- return TypeCheckMergeValues(c);
+ return TypeCheckMergeValues(c, c->br_merge());
}
inline bool InsertUnreachablesIfNecessary(uint32_t expected,
@@ -2201,7 +2286,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (V8_LIKELY(actual >= expected)) {
return true; // enough actual values are there.
}
- if (!VALIDATE(control_.back().unreachable)) {
+ if (!VALIDATE(control_.back().unreachable())) {
// There aren't enough values on the stack.
return false;
}
@@ -2217,6 +2302,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
virtual void onFirstError() {
this->end_ = this->pc_; // Terminate decoding loop.
TRACE(" !%s\n", this->error_msg_.c_str());
+ CALL_INTERFACE(OnFirstError);
}
inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
@@ -2225,7 +2311,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto val = Pop(0, sig->GetParam(0));
auto* ret =
sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- interface_.UnOp(this, opcode, sig, val, ret);
+ CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, sig, val, ret);
break;
}
case 2: {
@@ -2233,7 +2319,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto lval = Pop(0, sig->GetParam(0));
auto* ret =
sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- interface_.BinOp(this, opcode, sig, lval, rval, ret);
+ CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, sig, lval, rval, ret);
break;
}
default:
@@ -2242,9 +2328,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
};
+#undef CALL_INTERFACE
+#undef CALL_INTERFACE_IF_REACHABLE
+#undef CALL_INTERFACE_IF_PARENT_REACHABLE
+
class EmptyInterface {
public:
- constexpr static bool validate = true;
+ static constexpr wasm::Decoder::ValidateFlag validate =
+ wasm::Decoder::kValidate;
using Value = ValueBase;
using Control = ControlBase<Value>;
using Decoder = WasmFullDecoder<validate, EmptyInterface>;
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index bcd57fe616..e9130f001d 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -27,11 +27,6 @@ namespace wasm {
namespace {
-template <typename T>
-Vector<T> vec2vec(ZoneVector<T>& vec) {
- return Vector<T>(vec.data(), vec.size());
-}
-
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
// It maintains a control state that tracks whether the environment
@@ -42,8 +37,7 @@ struct SsaEnv {
State state;
TFNode* control;
TFNode* effect;
- TFNode* mem_size;
- TFNode* mem_start;
+ compiler::WasmContextCacheNodes context_cache;
TFNode** locals;
bool go() { return state >= kReached; }
@@ -52,26 +46,27 @@ struct SsaEnv {
locals = nullptr;
control = nullptr;
effect = nullptr;
- mem_size = nullptr;
- mem_start = nullptr;
+ context_cache = {0};
}
void SetNotMerged() {
if (state == kMerged) state = kReached;
}
};
-// Macros that build nodes only if there is a graph and the current SSA
-// environment is reachable from start. This avoids problems with malformed
-// TF graphs when decoding inputs that have unreachable code.
-#define BUILD(func, ...) \
- (build(decoder) ? CheckForException(decoder, builder_->func(__VA_ARGS__)) \
- : nullptr)
+#define BUILD(func, ...) \
+ ([&] { \
+ DCHECK(ssa_env_->go()); \
+ DCHECK(decoder->ok()); \
+ return CheckForException(decoder, builder_->func(__VA_ARGS__)); \
+ })()
constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
class WasmGraphBuildingInterface {
public:
- using Decoder = WasmFullDecoder<true, WasmGraphBuildingInterface>;
+ static constexpr wasm::Decoder::ValidateFlag validate =
+ wasm::Decoder::kValidate;
+ using Decoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
struct Value : public ValueWithNamedConstructors<Value> {
TFNode* node;
@@ -97,8 +92,7 @@ class WasmGraphBuildingInterface {
SsaEnv* ssa_env =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
uint32_t num_locals = decoder->NumLocals();
- // The '+ 2' here is to accommodate for mem_size and mem_start nodes.
- uint32_t env_count = num_locals + 2;
+ uint32_t env_count = num_locals;
size_t size = sizeof(TFNode*) * env_count;
ssa_env->state = SsaEnv::kReached;
ssa_env->locals =
@@ -131,20 +125,15 @@ class WasmGraphBuildingInterface {
// Initialize effect and control before loading the context.
builder_->set_effect_ptr(&ssa_env->effect);
builder_->set_control_ptr(&ssa_env->control);
- // Always load mem_size and mem_start from the WasmContext into the ssa.
LoadContextIntoSsa(ssa_env);
SetEnv(ssa_env);
}
// Reload the wasm context variables from the WasmContext structure attached
- // to the memory object into the Ssa Environment. This does not automatically
- // set the mem_size_ and mem_start_ pointers in WasmGraphBuilder.
+ // to the memory object into the Ssa Environment.
void LoadContextIntoSsa(SsaEnv* ssa_env) {
if (!ssa_env || !ssa_env->go()) return;
- DCHECK_NOT_NULL(builder_->Effect());
- DCHECK_NOT_NULL(builder_->Control());
- ssa_env->mem_size = builder_->LoadMemSize();
- ssa_env->mem_start = builder_->LoadMemStart();
+ builder_->InitContextCache(&ssa_env->context_cache);
}
void StartFunctionBody(Decoder* decoder, Control* block) {
@@ -157,6 +146,8 @@ class WasmGraphBuildingInterface {
builder_->PatchInStackCheckIfNeeded();
}
+ void OnFirstError(Decoder* decoder) {}
+
void Block(Decoder* decoder, Control* block) {
// The break environment is the outer environment.
block->end_env = ssa_env_;
@@ -169,6 +160,12 @@ class WasmGraphBuildingInterface {
// The continue environment is the inner environment.
SetEnv(PrepareForLoop(decoder, finish_try_env));
ssa_env_->SetNotMerged();
+ if (!decoder->ok()) return;
+ // Wrap input merge into phis.
+ for (unsigned i = 0; i < block->start_merge.arity; ++i) {
+ Value& val = block->start_merge[i];
+ val.node = builder_->Phi(val.type, 1, &val.node, block->end_env->control);
+ }
}
void Try(Decoder* decoder, Control* block) {
@@ -189,7 +186,7 @@ class WasmGraphBuildingInterface {
void If(Decoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- BUILD(BranchNoHint, cond.node, &if_true, &if_false);
+ if (ssa_env_->go()) BUILD(BranchNoHint, cond.node, &if_true, &if_false);
SsaEnv* end_env = ssa_env_;
SsaEnv* false_env = Split(decoder, ssa_env_);
false_env->control = if_false;
@@ -201,14 +198,12 @@ class WasmGraphBuildingInterface {
}
void FallThruTo(Decoder* decoder, Control* c) {
- MergeValuesInto(decoder, c);
- SetEnv(c->end_env);
+ DCHECK(!c->is_loop());
+ MergeValuesInto(decoder, c, &c->end_merge);
}
- void PopControl(Decoder* decoder, Control& block) {
- if (block.is_onearmed_if()) {
- Goto(decoder, block.false_env, block.end_env);
- }
+ void PopControl(Decoder* decoder, Control* block) {
+ if (!block->is_loop()) SetEnv(block->end_env);
}
void EndControl(Decoder* decoder, Control* block) { ssa_env_->Kill(); }
@@ -240,7 +235,13 @@ class WasmGraphBuildingInterface {
result->node = builder_->Float64Constant(value);
}
- void DoReturn(Decoder* decoder, Vector<Value> values) {
+ void Drop(Decoder* decoder, const Value& value) {}
+
+ void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
+ if (implicit) {
+ DCHECK_EQ(1, decoder->control_depth());
+ SetEnv(decoder->control_at(0)->end_env);
+ }
size_t num_values = values.size();
TFNode** buffer = GetNodes(values);
for (size_t i = 0; i < num_values; ++i) {
@@ -250,31 +251,31 @@ class WasmGraphBuildingInterface {
}
void GetLocal(Decoder* decoder, Value* result,
- const LocalIndexOperand<true>& operand) {
+ const LocalIndexOperand<validate>& operand) {
if (!ssa_env_->locals) return; // unreachable
result->node = ssa_env_->locals[operand.index];
}
void SetLocal(Decoder* decoder, const Value& value,
- const LocalIndexOperand<true>& operand) {
+ const LocalIndexOperand<validate>& operand) {
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[operand.index] = value.node;
}
void TeeLocal(Decoder* decoder, const Value& value, Value* result,
- const LocalIndexOperand<true>& operand) {
+ const LocalIndexOperand<validate>& operand) {
result->node = value.node;
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[operand.index] = value.node;
}
void GetGlobal(Decoder* decoder, Value* result,
- const GlobalIndexOperand<true>& operand) {
+ const GlobalIndexOperand<validate>& operand) {
result->node = BUILD(GetGlobal, operand.index);
}
void SetGlobal(Decoder* decoder, const Value& value,
- const GlobalIndexOperand<true>& operand) {
+ const GlobalIndexOperand<validate>& operand) {
BUILD(SetGlobal, operand.index, value.node);
}
@@ -293,41 +294,43 @@ class WasmGraphBuildingInterface {
ssa_env_->control = merge;
}
- void BreakTo(Decoder* decoder, uint32_t depth) {
- Control* target = decoder->control_at(depth);
- if (target->is_loop()) {
- Goto(decoder, ssa_env_, target->end_env);
- } else {
- MergeValuesInto(decoder, target);
- }
+ void Br(Decoder* decoder, Control* target) {
+ MergeValuesInto(decoder, target, target->br_merge());
}
- void BrIf(Decoder* decoder, const Value& cond, uint32_t depth) {
+ void BrIf(Decoder* decoder, const Value& cond, Control* target) {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder, fenv);
fenv->SetNotMerged();
BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
ssa_env_ = tenv;
- BreakTo(decoder, depth);
+ Br(decoder, target);
ssa_env_ = fenv;
}
- void BrTable(Decoder* decoder, const BranchTableOperand<true>& operand,
+ void BrTable(Decoder* decoder, const BranchTableOperand<validate>& operand,
const Value& key) {
+ if (operand.table_count == 0) {
+ // Only a default target. Do the equivalent of br.
+ uint32_t target = BranchTableIterator<validate>(decoder, operand).next();
+ Br(decoder, decoder->control_at(target));
+ return;
+ }
+
SsaEnv* break_env = ssa_env_;
// Build branches to the various blocks based on the table.
TFNode* sw = BUILD(Switch, operand.table_count + 1, key.node);
SsaEnv* copy = Steal(decoder->zone(), break_env);
ssa_env_ = copy;
- BranchTableIterator<true> iterator(decoder, operand);
+ BranchTableIterator<validate> iterator(decoder, operand);
while (iterator.has_next()) {
uint32_t i = iterator.cur_index();
uint32_t target = iterator.next();
ssa_env_ = Split(decoder, copy);
ssa_env_->control = (i == operand.table_count) ? BUILD(IfDefault, sw)
: BUILD(IfValue, i, sw);
- BreakTo(decoder, target);
+ Br(decoder, decoder->control_at(target));
}
DCHECK(decoder->ok());
ssa_env_ = break_env;
@@ -338,15 +341,15 @@ class WasmGraphBuildingInterface {
}
void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
- const MemoryAccessOperand<true>& operand, const Value& index,
+ const MemoryAccessOperand<validate>& operand, const Value& index,
Value* result) {
result->node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
operand.alignment, decoder->position());
}
void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
- const MemoryAccessOperand<true>& operand, const Value& index,
- const Value& value) {
+ const MemoryAccessOperand<validate>& operand,
+ const Value& index, const Value& value) {
BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
value.node, decoder->position(), type);
}
@@ -357,17 +360,18 @@ class WasmGraphBuildingInterface {
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
result->node = BUILD(GrowMemory, value.node);
- // Reload mem_size and mem_start after growing memory.
+ // Always reload the context cache after growing memory.
LoadContextIntoSsa(ssa_env_);
}
- void CallDirect(Decoder* decoder, const CallFunctionOperand<true>& operand,
+ void CallDirect(Decoder* decoder,
+ const CallFunctionOperand<validate>& operand,
const Value args[], Value returns[]) {
DoCall(decoder, nullptr, operand, args, returns, false);
}
void CallIndirect(Decoder* decoder, const Value& index,
- const CallIndirectOperand<true>& operand,
+ const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
DoCall(decoder, index.node, operand, args, returns, true);
}
@@ -380,21 +384,21 @@ class WasmGraphBuildingInterface {
}
void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
- const SimdLaneOperand<true> operand, Vector<Value> inputs,
+ const SimdLaneOperand<validate> operand, Vector<Value> inputs,
Value* result) {
TFNode** nodes = GetNodes(inputs);
result->node = BUILD(SimdLaneOp, opcode, operand.lane, nodes);
}
void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
- const SimdShiftOperand<true> operand, const Value& input,
+ const SimdShiftOperand<validate> operand, const Value& input,
Value* result) {
TFNode* inputs[] = {input.node};
result->node = BUILD(SimdShiftOp, opcode, operand.shift, inputs);
}
void Simd8x16ShuffleOp(Decoder* decoder,
- const Simd8x16ShuffleOperand<true>& operand,
+ const Simd8x16ShuffleOperand<validate>& operand,
const Value& input0, const Value& input1,
Value* result) {
TFNode* input_nodes[] = {input0.node, input1.node};
@@ -402,13 +406,13 @@ class WasmGraphBuildingInterface {
}
TFNode* GetExceptionTag(Decoder* decoder,
- const ExceptionIndexOperand<true>& operand) {
+ const ExceptionIndexOperand<validate>& operand) {
// TODO(kschimpf): Need to get runtime exception tag values. This
// code only handles non-imported/exported exceptions.
return BUILD(Int32Constant, operand.index);
}
- void Throw(Decoder* decoder, const ExceptionIndexOperand<true>& operand,
+ void Throw(Decoder* decoder, const ExceptionIndexOperand<validate>& operand,
Control* block, const Vector<Value>& value_args) {
int count = value_args.length();
ZoneVector<TFNode*> args(count, decoder->zone());
@@ -421,7 +425,7 @@ class WasmGraphBuildingInterface {
}
void CatchException(Decoder* decoder,
- const ExceptionIndexOperand<true>& operand,
+ const ExceptionIndexOperand<validate>& operand,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
current_catch_ = block->previous_catch;
@@ -481,7 +485,7 @@ class WasmGraphBuildingInterface {
}
void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
- const MemoryAccessOperand<true>& operand, Value* result) {
+ const MemoryAccessOperand<validate>& operand, Value* result) {
TFNode** inputs = GetNodes(args);
TFNode* node = BUILD(AtomicOp, opcode, inputs, operand.alignment,
operand.offset, decoder->position());
@@ -493,8 +497,6 @@ class WasmGraphBuildingInterface {
TFBuilder* builder_;
uint32_t current_catch_ = kNullCatch;
- bool build(Decoder* decoder) { return ssa_env_->go() && decoder->ok(); }
-
TryInfo* current_try_info(Decoder* decoder) {
return decoder->control_at(decoder->control_depth() - 1 - current_catch_)
->try_info;
@@ -541,13 +543,10 @@ class WasmGraphBuildingInterface {
}
#endif
ssa_env_ = env;
- // TODO(wasm): Create a WasmEnv class with control, effect, mem_size and
- // mem_start. SsaEnv can inherit from it. This way WasmEnv can be passed
- // directly to WasmGraphBuilder instead of always copying four pointers.
+ // TODO(wasm): combine the control and effect pointers with context cache.
builder_->set_control_ptr(&env->control);
builder_->set_effect_ptr(&env->effect);
- builder_->set_mem_size(&env->mem_size);
- builder_->set_mem_start(&env->mem_start);
+ builder_->set_context_cache(&env->context_cache);
}
TFNode* CheckForException(Decoder* decoder, TFNode* node) {
@@ -577,8 +576,8 @@ class WasmGraphBuildingInterface {
} else {
DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
try_info->exception =
- CreateOrMergeIntoPhi(kWasmI32, try_info->catch_env->control,
- try_info->exception, if_exception);
+ builder_->CreateOrMergeIntoPhi(kWasmI32, try_info->catch_env->control,
+ try_info->exception, if_exception);
}
SetEnv(success_env);
@@ -602,7 +601,8 @@ class WasmGraphBuildingInterface {
}
}
- void MergeValuesInto(Decoder* decoder, Control* c) {
+ void MergeValuesInto(Decoder* decoder, Control* c, Merge<Value>* merge) {
+ DCHECK(merge == &c->start_merge || merge == &c->end_merge);
if (!ssa_env_->go()) return;
SsaEnv* target = c->end_env;
@@ -611,15 +611,15 @@ class WasmGraphBuildingInterface {
uint32_t avail =
decoder->stack_size() - decoder->control_at(0)->stack_depth;
- uint32_t start = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
- for (uint32_t i = start; i < c->merge.arity; ++i) {
- auto& val = decoder->GetMergeValueFromStack(c, i);
- auto& old = c->merge[i];
+ uint32_t start = avail >= merge->arity ? 0 : merge->arity - avail;
+ for (uint32_t i = start; i < merge->arity; ++i) {
+ auto& val = decoder->GetMergeValueFromStack(c, merge, i);
+ auto& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
DCHECK(val.type == old.type || val.type == kWasmVar);
old.node = first ? val.node
- : CreateOrMergeIntoPhi(old.type, target->control,
- old.node, val.node);
+ : builder_->CreateOrMergeIntoPhi(
+ old.type, target->control, old.node, val.node);
}
}
@@ -632,8 +632,7 @@ class WasmGraphBuildingInterface {
to->locals = from->locals;
to->control = from->control;
to->effect = from->effect;
- to->mem_size = from->mem_size;
- to->mem_start = from->mem_start;
+ to->context_cache = from->context_cache;
break;
}
case SsaEnv::kReached: { // Create a new merge.
@@ -657,47 +656,26 @@ class WasmGraphBuildingInterface {
builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
}
}
- // Merge mem_size and mem_start.
- if (to->mem_size != from->mem_size) {
- TFNode* vals[] = {to->mem_size, from->mem_size};
- to->mem_size =
- builder_->Phi(MachineRepresentation::kWord32, 2, vals, merge);
- }
- if (to->mem_start != from->mem_start) {
- TFNode* vals[] = {to->mem_start, from->mem_start};
- to->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 2,
- vals, merge);
- }
+ // Start a new merge from the context cache.
+ builder_->NewContextCacheMerge(&to->context_cache, &from->context_cache,
+ merge);
break;
}
case SsaEnv::kMerged: {
TFNode* merge = to->control;
- // Extend the existing merge.
+ // Extend the existing merge control node.
builder_->AppendToMerge(merge, from->control);
// Merge effects.
- if (builder_->IsPhiWithMerge(to->effect, merge)) {
- builder_->AppendToPhi(to->effect, from->effect);
- } else if (to->effect != from->effect) {
- uint32_t count = builder_->InputCount(merge);
- TFNode** effects = builder_->Buffer(count);
- for (uint32_t j = 0; j < count - 1; j++) {
- effects[j] = to->effect;
- }
- effects[count - 1] = from->effect;
- to->effect = builder_->EffectPhi(count, effects, merge);
- }
+ to->effect = builder_->CreateOrMergeIntoEffectPhi(merge, to->effect,
+ from->effect);
// Merge locals.
for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
- to->locals[i] = CreateOrMergeIntoPhi(decoder->GetLocalType(i), merge,
- to->locals[i], from->locals[i]);
+ to->locals[i] = builder_->CreateOrMergeIntoPhi(
+ decoder->GetLocalType(i), merge, to->locals[i], from->locals[i]);
}
- // Merge mem_size and mem_start.
- to->mem_size =
- CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
- to->mem_size, from->mem_size);
- to->mem_start =
- CreateOrMergeIntoPhi(MachineType::PointerRepresentation(), merge,
- to->mem_start, from->mem_start);
+ // Merge the context caches.
+ builder_->MergeContextCacheInto(&to->context_cache,
+ &from->context_cache, merge);
break;
}
default:
@@ -706,20 +684,6 @@ class WasmGraphBuildingInterface {
return from->Kill();
}
- TFNode* CreateOrMergeIntoPhi(ValueType type, TFNode* merge, TFNode* tnode,
- TFNode* fnode) {
- if (builder_->IsPhiWithMerge(tnode, merge)) {
- builder_->AppendToPhi(tnode, fnode);
- } else if (tnode != fnode) {
- uint32_t count = builder_->InputCount(merge);
- TFNode** vals = builder_->Buffer(count);
- for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode;
- vals[count - 1] = fnode;
- return builder_->Phi(type, count, vals, merge);
- }
- return tnode;
- }
-
SsaEnv* PrepareForLoop(Decoder* decoder, SsaEnv* env) {
if (!env->go()) return Split(decoder, env);
env->state = SsaEnv::kMerged;
@@ -727,27 +691,21 @@ class WasmGraphBuildingInterface {
env->control = builder_->Loop(env->control);
env->effect = builder_->EffectPhi(1, &env->effect, env->control);
builder_->Terminate(env->effect, env->control);
- // The '+ 2' here is to be able to set mem_size and mem_start as assigned.
- BitVector* assigned = WasmDecoder<true>::AnalyzeLoopAssignment(
- decoder, decoder->pc(), decoder->total_locals() + 2, decoder->zone());
+ // The '+ 1' here is to be able to set the context cache as assigned.
+ BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
+ decoder, decoder->pc(), decoder->total_locals() + 1, decoder->zone());
if (decoder->failed()) return env;
if (assigned != nullptr) {
// Only introduce phis for variables assigned in this loop.
- int mem_size_index = decoder->total_locals();
- int mem_start_index = decoder->total_locals() + 1;
+ int context_cache_index = decoder->total_locals();
for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
&env->locals[i], env->control);
}
- // Introduce phis for mem_size and mem_start if necessary.
- if (assigned->Contains(mem_size_index)) {
- env->mem_size = builder_->Phi(MachineRepresentation::kWord32, 1,
- &env->mem_size, env->control);
- }
- if (assigned->Contains(mem_start_index)) {
- env->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 1,
- &env->mem_start, env->control);
+ // Introduce phis for context cache pointers if necessary.
+ if (assigned->Contains(context_cache_index)) {
+ builder_->PrepareContextCacheForLoop(&env->context_cache, env->control);
}
SsaEnv* loop_body_env = Split(decoder, env);
@@ -762,11 +720,8 @@ class WasmGraphBuildingInterface {
&env->locals[i], env->control);
}
- // Conservatively introduce phis for mem_size and mem_start.
- env->mem_size = builder_->Phi(MachineRepresentation::kWord32, 1,
- &env->mem_size, env->control);
- env->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 1,
- &env->mem_start, env->control);
+ // Conservatively introduce phis for context cache.
+ builder_->PrepareContextCacheForLoop(&env->context_cache, env->control);
SsaEnv* loop_body_env = Split(decoder, env);
builder_->StackCheck(decoder->position(), &loop_body_env->effect,
@@ -780,7 +735,7 @@ class WasmGraphBuildingInterface {
SsaEnv* result =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
// The '+ 2' here is to accommodate for mem_size and mem_start nodes.
- size_t size = sizeof(TFNode*) * (decoder->NumLocals() + 2);
+ size_t size = sizeof(TFNode*) * (decoder->NumLocals());
result->control = from->control;
result->effect = from->effect;
@@ -790,13 +745,11 @@ class WasmGraphBuildingInterface {
size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
: nullptr;
memcpy(result->locals, from->locals, size);
- result->mem_size = from->mem_size;
- result->mem_start = from->mem_start;
+ result->context_cache = from->context_cache;
} else {
result->state = SsaEnv::kUnreachable;
result->locals = nullptr;
- result->mem_size = nullptr;
- result->mem_start = nullptr;
+ result->context_cache = {0};
}
return result;
@@ -812,8 +765,7 @@ class WasmGraphBuildingInterface {
result->locals = from->locals;
result->control = from->control;
result->effect = from->effect;
- result->mem_size = from->mem_size;
- result->mem_start = from->mem_start;
+ result->context_cache = from->context_cache;
from->Kill(SsaEnv::kUnreachable);
return result;
}
@@ -825,16 +777,14 @@ class WasmGraphBuildingInterface {
result->control = nullptr;
result->effect = nullptr;
result->locals = nullptr;
- result->mem_size = nullptr;
- result->mem_start = nullptr;
+ result->context_cache = {0};
return result;
}
template <typename Operand>
- void DoCall(WasmFullDecoder<true, WasmGraphBuildingInterface>* decoder,
+ void DoCall(WasmFullDecoder<validate, WasmGraphBuildingInterface>* decoder,
TFNode* index_node, const Operand& operand, const Value args[],
Value returns[], bool is_indirect) {
- if (!build(decoder)) return;
int param_count = static_cast<int>(operand.sig->parameter_count());
TFNode** arg_nodes = builder_->Buffer(param_count + 1);
TFNode** return_nodes = nullptr;
@@ -864,7 +814,8 @@ class WasmGraphBuildingInterface {
bool DecodeLocalDecls(BodyLocalDecls* decls, const byte* start,
const byte* end) {
Decoder decoder(start, end);
- if (WasmDecoder<true>::DecodeLocals(&decoder, nullptr, &decls->type_list)) {
+ if (WasmDecoder<Decoder::kValidate>::DecodeLocals(&decoder, nullptr,
+ &decls->type_list)) {
DCHECK(decoder.ok());
decls->encoded_size = decoder.pc_offset();
return true;
@@ -887,7 +838,8 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const wasm::WasmModule* module,
FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<true, EmptyInterface> decoder(&zone, module, body);
+ WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(&zone, module,
+ body);
decoder.Decode();
return decoder.toResult(nullptr);
}
@@ -906,7 +858,7 @@ DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<true, WasmGraphBuildingInterface> decoder(
+ WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder(
&zone, builder->module(), body, builder);
decoder.Decode();
return decoder.toResult(nullptr);
@@ -914,13 +866,13 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
unsigned OpcodeLength(const byte* pc, const byte* end) {
Decoder decoder(pc, end);
- return WasmDecoder<false>::OpcodeLength(&decoder, pc);
+ return WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, pc);
}
std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
FunctionSig* sig, const byte* pc,
const byte* end) {
- WasmDecoder<false> decoder(module, sig, pc, end);
+ WasmDecoder<Decoder::kNoValidate> decoder(module, sig, pc, end);
return decoder.StackEffect(pc);
}
@@ -948,7 +900,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
const wasm::WasmModule* module) {
OFStream os(stdout);
Zone zone(allocator, ZONE_NAME);
- WasmDecoder<false> decoder(module, body.sig, body.start, body.end);
+ WasmDecoder<Decoder::kNoValidate> decoder(module, body.sig, body.start,
+ body.end);
int line_nr = 0;
// Print the function signature.
@@ -989,7 +942,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
++line_nr;
unsigned control_depth = 0;
for (; i.has_next(); i.next()) {
- unsigned length = WasmDecoder<false>::OpcodeLength(&decoder, i.pc());
+ unsigned length =
+ WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, i.pc());
WasmOpcode opcode = i.current();
if (opcode == kExprElse) control_depth--;
@@ -1016,10 +970,10 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeOperand<false> operand(&i, i.pc());
+ BlockTypeOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << " // @" << i.pc_offset();
- for (unsigned i = 0; i < operand.arity; i++) {
- os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
+ for (unsigned i = 0; i < operand.out_arity(); i++) {
+ os << " " << WasmOpcodes::TypeName(operand.out_type(i));
}
control_depth++;
break;
@@ -1029,22 +983,22 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
control_depth--;
break;
case kExprBr: {
- BreakDepthOperand<false> operand(&i, i.pc());
+ BreakDepthOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << " // depth=" << operand.depth;
break;
}
case kExprBrIf: {
- BreakDepthOperand<false> operand(&i, i.pc());
+ BreakDepthOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << " // depth=" << operand.depth;
break;
}
case kExprBrTable: {
- BranchTableOperand<false> operand(&i, i.pc());
+ BranchTableOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << " // entries=" << operand.table_count;
break;
}
case kExprCallIndirect: {
- CallIndirectOperand<false> operand(&i, i.pc());
+ CallIndirectOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << " // sig #" << operand.index;
if (decoder.Complete(i.pc(), operand)) {
os << ": " << *operand.sig;
@@ -1052,7 +1006,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
break;
}
case kExprCallFunction: {
- CallFunctionOperand<false> operand(&i, i.pc());
+ CallFunctionOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << " // function #" << operand.index;
if (decoder.Complete(i.pc(), operand)) {
os << ": " << *operand.sig;
@@ -1072,7 +1026,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
Decoder decoder(start, end);
- return WasmDecoder<true>::AnalyzeLoopAssignment(
+ return WasmDecoder<Decoder::kValidate>::AnalyzeLoopAssignment(
&decoder, start, static_cast<uint32_t>(num_locals), zone);
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index a244e294c8..8df1c8a09e 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -178,7 +178,8 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
}
WasmOpcode current() {
- return static_cast<WasmOpcode>(read_u8<false>(pc_, "expected bytecode"));
+ return static_cast<WasmOpcode>(
+ read_u8<Decoder::kNoValidate>(pc_, "expected bytecode"));
}
void next() {
@@ -191,8 +192,8 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
bool has_next() { return pc_ < end_; }
WasmOpcode prefixed_opcode() {
- byte prefix = read_u8<false>(pc_, "expected prefix");
- byte index = read_u8<false>(pc_ + 1, "expected index");
+ byte prefix = read_u8<Decoder::kNoValidate>(pc_, "expected prefix");
+ byte index = read_u8<Decoder::kNoValidate>(pc_ + 1, "expected index");
return static_cast<WasmOpcode>(prefix << 8 | index);
}
};
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index e42c139ce1..4bd52a2a8f 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -9,15 +9,18 @@
#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
+#include "src/base/optional.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/code-stubs.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/property-descriptor.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -42,6 +45,12 @@
do { \
if (FLAG_trace_wasm_streaming) PrintF(__VA_ARGS__); \
} while (false)
+
+#define TRACE_LAZY(...) \
+ do { \
+ if (FLAG_trace_wasm_lazy_compilation) PrintF(__VA_ARGS__); \
+ } while (false)
+
static const int kInvalidSigIndex = -1;
namespace v8 {
@@ -51,8 +60,8 @@ namespace wasm {
// A class compiling an entire module.
class ModuleCompiler {
public:
- ModuleCompiler(Isolate* isolate, WasmModule* module,
- Handle<Code> centry_stub);
+ ModuleCompiler(Isolate* isolate, WasmModule* module, Handle<Code> centry_stub,
+ wasm::NativeModule* native_module);
// The actual runnable task that performs compilations in the background.
class CompilationTask : public CancelableTask {
@@ -81,14 +90,16 @@ class ModuleCompiler {
~CompilationUnitBuilder() { DCHECK(units_.empty()); }
- void AddUnit(compiler::ModuleEnv* module_env, const WasmFunction* function,
- uint32_t buffer_offset, Vector<const uint8_t> bytes,
- WasmName name) {
+ void AddUnit(compiler::ModuleEnv* module_env,
+ wasm::NativeModule* native_module,
+ const WasmFunction* function, uint32_t buffer_offset,
+ Vector<const uint8_t> bytes, WasmName name) {
units_.emplace_back(new compiler::WasmCompilationUnit(
- compiler_->isolate_, module_env,
+ compiler_->isolate_, module_env, native_module,
wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
bytes.end()},
name, function->func_index, compiler_->centry_stub_,
+ compiler::WasmCompilationUnit::GetDefaultCompilationMode(),
compiler_->counters()));
}
@@ -171,8 +182,7 @@ class ModuleCompiler {
void SetFinisherIsRunning(bool value);
- MaybeHandle<Code> FinishCompilationUnit(ErrorThrower* thrower,
- int* func_index);
+ WasmCodeWrapper FinishCompilationUnit(ErrorThrower* thrower, int* func_index);
void CompileInParallel(const ModuleWireBytes& wire_bytes,
compiler::ModuleEnv* module_env,
@@ -215,8 +225,11 @@ class ModuleCompiler {
size_t stopped_compilation_tasks_ = 0;
base::Mutex tasks_mutex_;
Handle<Code> centry_stub_;
+ wasm::NativeModule* native_module_;
};
+namespace {
+
class JSToWasmWrapperCache {
public:
void SetContextAddress(Address context_address) {
@@ -227,23 +240,37 @@ class JSToWasmWrapperCache {
Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
wasm::WasmModule* module,
- Handle<Code> wasm_code,
+ WasmCodeWrapper wasm_code,
uint32_t index) {
const wasm::WasmFunction* func = &module->functions[index];
int cached_idx = sig_map_.Find(func->sig);
if (cached_idx >= 0) {
Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
// Now patch the call to wasm code.
- for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
- DCHECK(!it.done());
- Code* target =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() == Code::WASM_FUNCTION ||
- target->kind() == Code::WASM_TO_JS_FUNCTION ||
- target->builtin_index() == Builtins::kIllegal ||
- target->builtin_index() == Builtins::kWasmCompileLazy) {
- it.rinfo()->set_target_address(isolate,
- wasm_code->instruction_start());
+ if (wasm_code.IsCodeObject()) {
+ for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
+ DCHECK(!it.done());
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION ||
+ target->kind() == Code::WASM_TO_JS_FUNCTION ||
+ target->kind() == Code::WASM_TO_WASM_FUNCTION ||
+ target->builtin_index() == Builtins::kIllegal ||
+ target->builtin_index() == Builtins::kWasmCompileLazy) {
+ it.rinfo()->set_target_address(
+ isolate, wasm_code.GetCode()->instruction_start());
+ break;
+ }
+ }
+ } else {
+ for (RelocIterator it(*code,
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ ; it.next()) {
+ DCHECK(!it.done());
+ it.rinfo()->set_js_to_wasm_address(
+ isolate, wasm_code.is_null()
+ ? nullptr
+ : wasm_code.GetWasmCode()->instructions().start());
break;
}
}
@@ -335,6 +362,8 @@ class InstanceBuilder {
ERROR_THROWER_WITH_MESSAGE(LinkError)
ERROR_THROWER_WITH_MESSAGE(TypeError)
+#undef ERROR_THROWER_WITH_MESSAGE
+
// Look up an import value in the {ffi_} object.
MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
Handle<String> import_name);
@@ -349,7 +378,7 @@ class InstanceBuilder {
uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
// Load data segments into the memory.
- void LoadDataSegments(Address mem_addr, size_t mem_size);
+ void LoadDataSegments(WasmContext* wasm_context);
void WriteGlobalValue(WasmGlobal& global, Handle<Object> value);
@@ -388,7 +417,7 @@ class InstanceBuilder {
};
// TODO(titzer): move to wasm-objects.cc
-static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
DisallowHeapAllocation no_gc;
JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
@@ -396,21 +425,32 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
// If a link to shared memory instances exists, update the list of memory
// instances before the instance is destroyed.
WasmCompiledModule* compiled_module = owner->compiled_module();
- TRACE("Finalizing %d {\n", compiled_module->instance_id());
- DCHECK(compiled_module->has_weak_wasm_module());
- WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
+ wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+ if (FLAG_wasm_jit_to_native) {
+ if (native_module) {
+ TRACE("Finalizing %zu {\n", native_module->instance_id);
+ } else {
+ TRACE("Finalized already cleaned up compiled module\n");
+ }
+ } else {
+ TRACE("Finalizing %d {\n", compiled_module->instance_id());
- if (trap_handler::UseTrapHandler()) {
- Handle<FixedArray> code_table = compiled_module->code_table();
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
- int index = code->trap_handler_index()->value();
- if (index >= 0) {
- trap_handler::ReleaseHandlerData(index);
- code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
+ if (trap_handler::UseTrapHandler()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ Handle<FixedArray> code_table = compiled_module->code_table();
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ int index = code->trap_handler_index()->value();
+ if (index >= 0) {
+ trap_handler::ReleaseHandlerData(index);
+ code->set_trap_handler_index(
+ Smi::FromInt(trap_handler::kInvalidIndex));
+ }
}
}
}
+ WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
// Since the order of finalizers is not guaranteed, it can be the case
// that {instance->compiled_module()->module()}, which is a
@@ -425,56 +465,32 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
}
// weak_wasm_module may have been cleared, meaning the module object
- // was GC-ed. In that case, there won't be any new instances created,
- // and we don't need to maintain the links between instances.
+ // was GC-ed. We still want to maintain the links between instances, to
+ // release the WasmCompiledModule corresponding to the WasmModuleInstance
+ // being finalized here.
+ WasmModuleObject* wasm_module = nullptr;
if (!weak_wasm_module->cleared()) {
- WasmModuleObject* wasm_module =
- WasmModuleObject::cast(weak_wasm_module->value());
+ wasm_module = WasmModuleObject::cast(weak_wasm_module->value());
WasmCompiledModule* current_template = wasm_module->compiled_module();
TRACE("chain before {\n");
TRACE_CHAIN(current_template);
TRACE("}\n");
- DCHECK(!current_template->has_weak_prev_instance());
- WeakCell* next = compiled_module->maybe_ptr_to_weak_next_instance();
- WeakCell* prev = compiled_module->maybe_ptr_to_weak_prev_instance();
-
+ DCHECK(!current_template->has_prev_instance());
if (current_template == compiled_module) {
- if (next == nullptr) {
+ if (!compiled_module->has_next_instance()) {
WasmCompiledModule::Reset(isolate, compiled_module);
} else {
- WasmCompiledModule* next_compiled_module =
- WasmCompiledModule::cast(next->value());
WasmModuleObject::cast(wasm_module)
- ->set_compiled_module(next_compiled_module);
- DCHECK_NULL(prev);
- next_compiled_module->reset_weak_prev_instance();
- }
- } else {
- DCHECK(!(prev == nullptr && next == nullptr));
- // the only reason prev or next would be cleared is if the
- // respective objects got collected, but if that happened,
- // we would have relinked the list.
- if (prev != nullptr) {
- DCHECK(!prev->cleared());
- if (next == nullptr) {
- WasmCompiledModule::cast(prev->value())->reset_weak_next_instance();
- } else {
- WasmCompiledModule::cast(prev->value())
- ->set_ptr_to_weak_next_instance(next);
- }
- }
- if (next != nullptr) {
- DCHECK(!next->cleared());
- if (prev == nullptr) {
- WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
- } else {
- WasmCompiledModule::cast(next->value())
- ->set_ptr_to_weak_prev_instance(prev);
- }
+ ->set_compiled_module(compiled_module->ptr_to_next_instance());
}
}
+ }
+
+ compiled_module->RemoveFromChain();
+
+ if (wasm_module != nullptr) {
TRACE("chain after {\n");
TRACE_CHAIN(wasm_module->compiled_module());
TRACE("}\n");
@@ -484,6 +500,8 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
TRACE("}\n");
}
+} // namespace
+
bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
if (bytes.start() == nullptr || bytes.length() == 0) return false;
ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
@@ -547,9 +565,8 @@ MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
DCHECK_EQ(thrower->error(), module.is_null());
if (module.is_null()) return {};
- return SyncInstantiate(isolate, thrower, module.ToHandleChecked(),
- Handle<JSReceiver>::null(),
- Handle<JSArrayBuffer>::null());
+ return SyncInstantiate(isolate, thrower, module.ToHandleChecked(), imports,
+ memory);
}
void RejectPromise(Isolate* isolate, Handle<Context> context,
@@ -617,7 +634,7 @@ void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
promise);
}
-Handle<Code> CompileLazy(Isolate* isolate) {
+Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
HistogramTimerScope lazy_time_scope(
isolate->counters()->wasm_lazy_compilation_time());
@@ -635,13 +652,14 @@ Handle<Code> CompileLazy(Isolate* isolate) {
Handle<WasmInstanceObject> instance;
Handle<FixedArray> exp_deopt_data;
int func_index = -1;
+ // If the lazy compile stub has deopt data, use that to determine the
+ // instance and function index. Otherwise this must be a wasm->wasm call
+ // within one instance, so extract the information from the caller.
if (lazy_compile_code->deoptimization_data()->length() > 0) {
- // Then it's an indirect call or via JS->wasm wrapper.
- DCHECK_LE(2, lazy_compile_code->deoptimization_data()->length());
exp_deopt_data = handle(lazy_compile_code->deoptimization_data(), isolate);
- auto* weak_cell = WeakCell::cast(exp_deopt_data->get(0));
- instance = handle(WasmInstanceObject::cast(weak_cell->value()), isolate);
- func_index = Smi::ToInt(exp_deopt_data->get(1));
+ auto func_info = GetWasmFunctionInfo(isolate, lazy_compile_code);
+ instance = func_info.instance.ToHandleChecked();
+ func_index = func_info.func_index;
}
it.Advance();
// Third frame: The calling wasm code or js-to-wasm wrapper.
@@ -655,7 +673,7 @@ Handle<Code> CompileLazy(Isolate* isolate) {
// via deopt data to the lazy compile stub). Just use the instance of the
// caller.
instance =
- handle(WasmInstanceObject::GetOwningInstance(*caller_code), isolate);
+ handle(WasmInstanceObject::GetOwningInstanceGC(*caller_code), isolate);
}
int offset =
static_cast<int>(it.frame()->pc() - caller_code->instruction_start());
@@ -668,9 +686,17 @@ Handle<Code> CompileLazy(Isolate* isolate) {
bool patch_caller = caller_code->kind() == Code::JS_TO_WASM_FUNCTION ||
exp_deopt_data.is_null() || exp_deopt_data->length() <= 2;
- Handle<Code> compiled_code = WasmCompiledModule::CompileLazy(
+ wasm::LazyCompilationOrchestrator* orchestrator =
+ Managed<wasm::LazyCompilationOrchestrator>::cast(
+ instance->compiled_module()
+ ->shared()
+ ->lazy_compilation_orchestrator())
+ ->get();
+ Handle<Code> compiled_code = orchestrator->CompileLazyOnGCHeap(
isolate, instance, caller_code, offset, func_index, patch_caller);
if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
+ TRACE_LAZY("Patching %d position(s) in function tables.\n",
+ (exp_deopt_data->length() - 2) / 2);
// See EnsureExportedLazyDeoptData: exp_deopt_data[2...(len-1)] are pairs of
// <export_table, index> followed by undefined values.
// Use this information here to patch all export tables.
@@ -682,6 +708,8 @@ Handle<Code> CompileLazy(Isolate* isolate) {
DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
exp_table->set(exp_index, *compiled_code);
}
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
// After processing, remove the list of exported entries, such that we don't
// do the patching redundantly.
Handle<FixedArray> new_deopt_data =
@@ -692,48 +720,178 @@ Handle<Code> CompileLazy(Isolate* isolate) {
return compiled_code;
}
+Address CompileLazy(Isolate* isolate) {
+ HistogramTimerScope lazy_time_scope(
+ isolate->counters()->wasm_lazy_compilation_time());
+
+ // Find the wasm frame which triggered the lazy compile, to get the wasm
+ // instance.
+ StackFrameIterator it(isolate);
+ // First frame: C entry stub.
+ DCHECK(!it.done());
+ DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
+ it.Advance();
+ // Second frame: WasmCompileLazy builtin.
+ DCHECK(!it.done());
+ Handle<WasmInstanceObject> instance;
+ Maybe<uint32_t> func_index_to_compile = Nothing<uint32_t>();
+ Handle<Object> exp_deopt_data_entry;
+ const wasm::WasmCode* lazy_stub_or_copy =
+ isolate->wasm_code_manager()->LookupCode(it.frame()->pc());
+ DCHECK_EQ(wasm::WasmCode::LazyStub, lazy_stub_or_copy->kind());
+ if (!lazy_stub_or_copy->IsAnonymous()) {
+ // Then it's an indirect call or via JS->wasm wrapper.
+ instance = lazy_stub_or_copy->owner()->compiled_module()->owning_instance();
+ func_index_to_compile = Just(lazy_stub_or_copy->index());
+ exp_deopt_data_entry =
+ handle(instance->compiled_module()->lazy_compile_data()->get(
+ static_cast<int>(lazy_stub_or_copy->index())),
+ isolate);
+ }
+ it.Advance();
+ // Third frame: The calling wasm code (direct or indirect), or js-to-wasm
+ // wrapper.
+ DCHECK(!it.done());
+ DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
+ Handle<Code> js_to_wasm_caller_code;
+ const WasmCode* wasm_caller_code = nullptr;
+ Maybe<uint32_t> offset = Nothing<uint32_t>();
+ if (it.frame()->is_js_to_wasm()) {
+ DCHECK(!instance.is_null());
+ js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate);
+ } else {
+ wasm_caller_code =
+ isolate->wasm_code_manager()->LookupCode(it.frame()->pc());
+ offset = Just(static_cast<uint32_t>(
+ it.frame()->pc() - wasm_caller_code->instructions().start()));
+ if (instance.is_null()) {
+ // Then this is a direct call (otherwise we would have attached the
+ // instance via deopt data to the lazy compile stub). Just use the
+ // instance of the caller.
+ instance =
+ wasm_caller_code->owner()->compiled_module()->owning_instance();
+ }
+ }
+
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module());
+
+ wasm::LazyCompilationOrchestrator* orchestrator =
+ Managed<wasm::LazyCompilationOrchestrator>::cast(
+ compiled_module->shared()->lazy_compilation_orchestrator())
+ ->get();
+ const wasm::WasmCode* result = nullptr;
+ // The caller may be js to wasm calling a function
+ // also available for indirect calls.
+ if (!js_to_wasm_caller_code.is_null()) {
+ result = orchestrator->CompileFromJsToWasm(
+ isolate, instance, js_to_wasm_caller_code,
+ func_index_to_compile.ToChecked());
+ } else {
+ DCHECK_NOT_NULL(wasm_caller_code);
+ if (func_index_to_compile.IsNothing() ||
+ (!exp_deopt_data_entry.is_null() &&
+ !exp_deopt_data_entry->IsFixedArray())) {
+ result = orchestrator->CompileDirectCall(
+ isolate, instance, func_index_to_compile, wasm_caller_code,
+ offset.ToChecked());
+ } else {
+ result = orchestrator->CompileIndirectCall(
+ isolate, instance, func_index_to_compile.ToChecked());
+ }
+ }
+ DCHECK_NOT_NULL(result);
+
+ int func_index = static_cast<int>(result->index());
+ if (!exp_deopt_data_entry.is_null() && exp_deopt_data_entry->IsFixedArray()) {
+ Handle<FixedArray> exp_deopt_data =
+ Handle<FixedArray>::cast(exp_deopt_data_entry);
+
+ TRACE_LAZY("Patching %d position(s) in function tables.\n",
+ exp_deopt_data->length() / 2);
+
+ // See EnsureExportedLazyDeoptData: exp_deopt_data[0...(len-1)] are pairs
+ // of <export_table, index> followed by undefined values. Use this
+ // information here to patch all export tables.
+ for (int idx = 0, end = exp_deopt_data->length(); idx < end; idx += 2) {
+ if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
+ FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
+ int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
+ Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
+ result->instructions().start(), TENURED);
+ exp_table->set(exp_index, *foreign_holder);
+ }
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ // After processing, remove the list of exported entries, such that we don't
+ // do the patching redundantly.
+ compiled_module->lazy_compile_data()->set(
+ func_index, isolate->heap()->undefined_value());
+ }
+
+ return result->instructions().start();
+}
+
compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
DisallowHeapAllocation no_gc;
WasmModule* module = compiled_module->module();
+ std::vector<Handle<Code>> empty_code;
+ if (FLAG_wasm_jit_to_native) {
+ NativeModule* native_module = compiled_module->GetNativeModule();
+ std::vector<GlobalHandleAddress> function_tables =
+ native_module->function_tables();
+ std::vector<GlobalHandleAddress> signature_tables =
+ native_module->signature_tables();
+
+ compiler::ModuleEnv result = {module, // --
+ function_tables, // --
+ signature_tables, // --
+ empty_code,
+ BUILTIN_CODE(isolate, WasmCompileLazy)};
+ return result;
+ } else {
+ std::vector<GlobalHandleAddress> function_tables;
+ std::vector<GlobalHandleAddress> signature_tables;
- std::vector<GlobalHandleAddress> function_tables;
- std::vector<GlobalHandleAddress> signature_tables;
- std::vector<SignatureMap*> signature_maps;
+ int num_function_tables = static_cast<int>(module->function_tables.size());
+ for (int i = 0; i < num_function_tables; ++i) {
+ FixedArray* ft = compiled_module->ptr_to_function_tables();
+ FixedArray* st = compiled_module->ptr_to_signature_tables();
- int num_function_tables = static_cast<int>(module->function_tables.size());
- for (int i = 0; i < num_function_tables; ++i) {
- FixedArray* ft = compiled_module->ptr_to_function_tables();
- FixedArray* st = compiled_module->ptr_to_signature_tables();
+ // TODO(clemensh): defer these handles for concurrent compilation.
+ function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
+ signature_tables.push_back(WasmCompiledModule::GetTableValue(st, i));
+ }
- // TODO(clemensh): defer these handles for concurrent compilation.
- function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
- signature_tables.push_back(WasmCompiledModule::GetTableValue(st, i));
- signature_maps.push_back(&module->function_tables[i].map);
+ compiler::ModuleEnv result = {module, // --
+ function_tables, // --
+ signature_tables, // --
+ empty_code, // --
+ BUILTIN_CODE(isolate, WasmCompileLazy)};
+ return result;
}
-
- std::vector<Handle<Code>> empty_code;
-
- compiler::ModuleEnv result = {
- module, // --
- function_tables, // --
- signature_tables, // --
- signature_maps, // --
- empty_code, // --
- BUILTIN_CODE(isolate, WasmCompileLazy), // --
- reinterpret_cast<uintptr_t>( // --
- compiled_module->GetGlobalsStartOrNull()) // --
- };
- return result;
}
-void LazyCompilationOrchestrator::CompileFunction(
+const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
+ base::ElapsedTimer compilation_timer;
+ compilation_timer.Start();
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
- if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
- Code::WASM_FUNCTION) {
- return;
+ if (FLAG_wasm_jit_to_native) {
+ wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode(
+ static_cast<uint32_t>(func_index));
+ if (existing_code != nullptr &&
+ existing_code->kind() == wasm::WasmCode::Function) {
+ TRACE_LAZY("Function %d already compiled.\n", func_index);
+ return existing_code;
+ }
+ } else {
+ if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
+ Code::WASM_FUNCTION) {
+ TRACE_LAZY("Function %d already compiled.\n", func_index);
+ return nullptr;
+ }
}
compiler::ModuleEnv module_env =
@@ -756,11 +914,14 @@ void LazyCompilationOrchestrator::CompileFunction(
func_name.assign(name.start(), static_cast<size_t>(name.length()));
}
ErrorThrower thrower(isolate, "WasmLazyCompile");
- compiler::WasmCompilationUnit unit(isolate, &module_env, body,
+ compiler::WasmCompilationUnit unit(isolate, &module_env,
+ compiled_module->GetNativeModule(), body,
CStrVector(func_name.c_str()), func_index,
CEntryStub(isolate, 1).GetCode());
unit.ExecuteCompilation();
- MaybeHandle<Code> maybe_code = unit.FinishCompilation(&thrower);
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ WasmCodeWrapper code_wrapper = unit.FinishCompilation(&thrower);
// If there is a pending error, something really went wrong. The module was
// verified before starting execution with lazy compilation.
@@ -768,33 +929,53 @@ void LazyCompilationOrchestrator::CompileFunction(
// TODO(clemensh): According to the spec, we can actually skip validation at
// module creation time, and return a function that always traps here.
CHECK(!thrower.error());
- Handle<Code> code = maybe_code.ToHandleChecked();
-
- Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
- Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
- // TODO(wasm): Introduce constants for the indexes in wasm deopt data.
- deopt_data->set(0, *weak_instance);
- deopt_data->set(1, Smi::FromInt(func_index));
- code->set_deoptimization_data(*deopt_data);
-
- DCHECK_EQ(Builtins::kWasmCompileLazy,
- Code::cast(compiled_module->code_table()->get(func_index))
- ->builtin_index());
- compiled_module->code_table()->set(func_index, *code);
-
// Now specialize the generated code for this instance.
+
+ // {code} is used only when !FLAG_wasm_jit_to_native, so it may be removed
+ // when that flag is removed.
+ Handle<Code> code;
+ if (code_wrapper.IsCodeObject()) {
+ code = code_wrapper.GetCode();
+ AttachWasmFunctionInfo(isolate, code, instance, func_index);
+ DCHECK_EQ(Builtins::kWasmCompileLazy,
+ Code::cast(compiled_module->code_table()->get(func_index))
+ ->builtin_index());
+ compiled_module->code_table()->set(func_index, *code);
+ }
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
CodeSpecialization code_specialization(isolate, &specialization_zone);
code_specialization.RelocateDirectCalls(instance);
- code_specialization.ApplyToWasmCode(*code, SKIP_ICACHE_FLUSH);
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ code_specialization.ApplyToWasmCode(code_wrapper, SKIP_ICACHE_FLUSH);
+ int64_t func_size =
+ static_cast<int64_t>(func->code.end_offset() - func->code.offset());
+ int64_t compilation_time = compilation_timer.Elapsed().InMicroseconds();
+
auto counters = isolate->counters();
counters->wasm_lazily_compiled_functions()->Increment();
- counters->wasm_generated_code_size()->Increment(code->body_size());
- counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
+
+ if (!code_wrapper.IsCodeObject()) {
+ const wasm::WasmCode* wasm_code = code_wrapper.GetWasmCode();
+ Assembler::FlushICache(isolate, wasm_code->instructions().start(),
+ wasm_code->instructions().size());
+ counters->wasm_generated_code_size()->Increment(
+ static_cast<int>(wasm_code->instructions().size()));
+ counters->wasm_reloc_size()->Increment(
+ static_cast<int>(wasm_code->reloc_info().size()));
+
+ } else {
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ counters->wasm_generated_code_size()->Increment(code->body_size());
+ counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
+ }
+ counters->wasm_lazy_compilation_throughput()->AddSample(
+ compilation_time != 0 ? static_cast<int>(func_size / compilation_time)
+ : 0);
+ return !code_wrapper.IsCodeObject() ? code_wrapper.GetWasmCode() : nullptr;
}
+namespace {
+
int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
int offset) {
DCHECK(!iterator.done());
@@ -806,7 +987,38 @@ int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
return byte_pos;
}
-Handle<Code> LazyCompilationOrchestrator::CompileLazy(
+Code* ExtractWasmToWasmCallee(Code* wasm_to_wasm) {
+ DCHECK_EQ(Code::WASM_TO_WASM_FUNCTION, wasm_to_wasm->kind());
+ // Find the one code target in this wrapper.
+ RelocIterator it(wasm_to_wasm, RelocInfo::kCodeTargetMask);
+ DCHECK(!it.done());
+ Code* callee = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+#ifdef DEBUG
+ it.next();
+ DCHECK(it.done());
+#endif
+ return callee;
+}
+
+void PatchWasmToWasmWrapper(Isolate* isolate, Code* wasm_to_wasm,
+ Code* new_target) {
+ DCHECK_EQ(Code::WASM_TO_WASM_FUNCTION, wasm_to_wasm->kind());
+ // Find the one code target in this wrapper.
+ RelocIterator it(wasm_to_wasm, RelocInfo::kCodeTargetMask);
+ DCHECK(!it.done());
+ DCHECK_EQ(Builtins::kWasmCompileLazy,
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address())
+ ->builtin_index());
+ it.rinfo()->set_target_address(isolate, new_target->instruction_start());
+#ifdef DEBUG
+ it.next();
+ DCHECK(it.done());
+#endif
+}
+
+} // namespace
+
+Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
int call_offset, int exported_func_index, bool patch_caller) {
struct NonCompiledFunction {
@@ -820,65 +1032,122 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazy(
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
- if (is_js_to_wasm) {
- non_compiled_functions.push_back({0, exported_func_index});
- } else if (patch_caller) {
+ TRACE_LAZY(
+ "Starting lazy compilation (func %d @%d, js-to-wasm: %d, "
+ "patch caller: %d).\n",
+ exported_func_index, call_offset, is_js_to_wasm, patch_caller);
+
+ // If this lazy compile stub is being called through a wasm-to-wasm wrapper,
+ // remember that code object.
+ Handle<Code> wasm_to_wasm_callee;
+
+ // For js-to-wasm wrappers, don't iterate the reloc info. There is just one
+ // call site in there anyway.
+ if (patch_caller && !is_js_to_wasm) {
DisallowHeapAllocation no_gc;
- SeqOneByteString* module_bytes = compiled_module->module_bytes();
SourcePositionTableIterator source_pos_iterator(
caller->SourcePositionTable());
- DCHECK_EQ(2, caller->deoptimization_data()->length());
- int caller_func_index = Smi::ToInt(caller->deoptimization_data()->get(1));
+ auto caller_func_info = GetWasmFunctionInfo(isolate, caller);
+ Handle<WasmCompiledModule> caller_module(
+ caller_func_info.instance.ToHandleChecked()->compiled_module(),
+ isolate);
+ SeqOneByteString* module_bytes = caller_module->module_bytes();
const byte* func_bytes =
- module_bytes->GetChars() +
- compiled_module->module()->functions[caller_func_index].code.offset();
+ module_bytes->GetChars() + caller_module->module()
+ ->functions[caller_func_info.func_index]
+ .code.offset();
+ Code* lazy_callee = nullptr;
for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
it.next()) {
Code* callee =
Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
// TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
// (depending on the bool) against limits of T and then static_casts.
size_t offset_l = it.rinfo()->pc() - caller->instruction_start();
DCHECK_GE(kMaxInt, offset_l);
int offset = static_cast<int>(offset_l);
+ // Call offset points to the instruction after the call. Remember the last
+ // called code object before that offset.
+ if (offset < call_offset) lazy_callee = callee;
+ if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
int byte_pos =
AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
int called_func_index =
ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
non_compiled_functions.push_back({offset, called_func_index});
- // Call offset one instruction after the call. Remember the last called
- // function before that offset.
if (offset < call_offset) func_to_return_idx = called_func_index;
}
+ TRACE_LAZY("Found %zu non-compiled functions in caller.\n",
+ non_compiled_functions.size());
+ DCHECK_NOT_NULL(lazy_callee);
+ if (lazy_callee->kind() == Code::WASM_TO_WASM_FUNCTION) {
+ TRACE_LAZY("Callee is a wasm-to-wasm.\n");
+ wasm_to_wasm_callee = handle(lazy_callee, isolate);
+ // If we call a wasm-to-wasm wrapper, then this wrapper actually
+ // tail-called the lazy compile stub. Find it in the wrapper.
+ lazy_callee = ExtractWasmToWasmCallee(lazy_callee);
+ // This lazy compile stub belongs to the instance that was passed.
+ DCHECK_EQ(*instance,
+ *GetWasmFunctionInfo(isolate, handle(lazy_callee, isolate))
+ .instance.ToHandleChecked());
+ DCHECK_LE(2, lazy_callee->deoptimization_data()->length());
+ func_to_return_idx =
+ Smi::ToInt(lazy_callee->deoptimization_data()->get(1));
+ }
+ DCHECK_EQ(Builtins::kWasmCompileLazy, lazy_callee->builtin_index());
+ // There must be at least one call to patch (the one that lead to calling
+ // the lazy compile stub).
+ DCHECK(!non_compiled_functions.empty() || !wasm_to_wasm_callee.is_null());
}
+ TRACE_LAZY("Compiling function %d.\n", func_to_return_idx);
+
// TODO(clemensh): compile all functions in non_compiled_functions in
// background, wait for func_to_return_idx.
CompileFunction(isolate, instance, func_to_return_idx);
- if (is_js_to_wasm || patch_caller) {
+ Handle<Code> compiled_function(
+ Code::cast(compiled_module->code_table()->get(func_to_return_idx)),
+ isolate);
+ DCHECK_EQ(Code::WASM_FUNCTION, compiled_function->kind());
+
+ if (patch_caller || is_js_to_wasm) {
DisallowHeapAllocation no_gc;
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
// Now patch the code object with all functions which are now compiled.
int idx = 0;
+ int patched = 0;
for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
it.next()) {
Code* callee =
Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
- DCHECK_GT(non_compiled_functions.size(), idx);
- int called_func_index = non_compiled_functions[idx].func_index;
+ if (callee->builtin_index() != Builtins::kWasmCompileLazy) {
+ // If the callee is the wasm-to-wasm wrapper triggering this lazy
+ // compilation, patch it. If is_js_to_wasm is set, we did not set the
+ // wasm_to_wasm_callee, so just check the code kind (this is the only
+ // call in that wrapper anyway).
+ if ((is_js_to_wasm && callee->kind() == Code::WASM_TO_WASM_FUNCTION) ||
+ (!wasm_to_wasm_callee.is_null() &&
+ callee == *wasm_to_wasm_callee)) {
+ TRACE_LAZY("Patching wasm-to-wasm wrapper.\n");
+ PatchWasmToWasmWrapper(isolate, callee, *compiled_function);
+ ++patched;
+ }
+ continue;
+ }
+ int called_func_index = func_to_return_idx;
+ if (!is_js_to_wasm) {
+ DCHECK_GT(non_compiled_functions.size(), idx);
+ called_func_index = non_compiled_functions[idx].func_index;
+ DCHECK_EQ(non_compiled_functions[idx].offset,
+ it.rinfo()->pc() - caller->instruction_start());
+ ++idx;
+ }
// Check that the callee agrees with our assumed called_func_index.
DCHECK_IMPLIES(callee->deoptimization_data()->length() > 0,
Smi::ToInt(callee->deoptimization_data()->get(1)) ==
called_func_index);
- if (is_js_to_wasm) {
- DCHECK_EQ(func_to_return_idx, called_func_index);
- } else {
- DCHECK_EQ(non_compiled_functions[idx].offset,
- it.rinfo()->pc() - caller->instruction_start());
- }
- ++idx;
Handle<Code> callee_compiled(
Code::cast(compiled_module->code_table()->get(called_func_index)));
if (callee_compiled->builtin_index() == Builtins::kWasmCompileLazy) {
@@ -888,14 +1157,160 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazy(
DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
it.rinfo()->set_target_address(isolate,
callee_compiled->instruction_start());
+ ++patched;
}
DCHECK_EQ(non_compiled_functions.size(), idx);
+ TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
+ DCHECK_LT(0, patched);
+ USE(patched);
+ }
+
+ return compiled_function;
+}
+
+const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ Handle<Code> js_to_wasm_caller, uint32_t exported_func_index) {
+ Decoder decoder(nullptr, nullptr);
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+
+ TRACE_LAZY(
+ "Starting lazy compilation (func %u, js_to_wasm: true, patch caller: "
+ "true). \n",
+ exported_func_index);
+ CompileFunction(isolate, instance, exported_func_index);
+ {
+ DisallowHeapAllocation no_gc;
+ int idx = 0;
+ for (RelocIterator it(*js_to_wasm_caller,
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ !it.done(); it.next()) {
+ ++idx;
+ const wasm::WasmCode* callee_compiled =
+ compiled_module->GetNativeModule()->GetCode(exported_func_index);
+ DCHECK_NOT_NULL(callee_compiled);
+ it.rinfo()->set_js_to_wasm_address(
+ isolate, callee_compiled->instructions().start());
+ }
+ DCHECK_EQ(1, idx);
+ }
+
+ wasm::WasmCode* ret =
+ compiled_module->GetNativeModule()->GetCode(exported_func_index);
+ DCHECK_NOT_NULL(ret);
+ DCHECK_EQ(wasm::WasmCode::Function, ret->kind());
+ return ret;
+}
+
+const wasm::WasmCode* LazyCompilationOrchestrator::CompileIndirectCall(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t func_index) {
+ TRACE_LAZY(
+ "Starting lazy compilation (func %u, js_to_wasm: false, patch caller: "
+ "false). \n",
+ func_index);
+ return CompileFunction(isolate, instance, func_index);
+}
+
+const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ Maybe<uint32_t> maybe_func_to_return_idx, const wasm::WasmCode* wasm_caller,
+ int call_offset) {
+ struct WasmDirectCallData {
+ uint32_t offset = 0;
+ uint32_t func_index = 0;
+ };
+ std::vector<Maybe<WasmDirectCallData>> non_compiled_functions;
+ Decoder decoder(nullptr, nullptr);
+ {
+ DisallowHeapAllocation no_gc;
+ Handle<WasmCompiledModule> caller_module(
+ wasm_caller->owner()->compiled_module(), isolate);
+ SeqOneByteString* module_bytes = caller_module->module_bytes();
+ uint32_t caller_func_index = wasm_caller->index();
+ SourcePositionTableIterator source_pos_iterator(
+ Handle<ByteArray>(ByteArray::cast(
+ caller_module->source_positions()->get(caller_func_index))));
+
+ const byte* func_bytes =
+ module_bytes->GetChars() +
+ caller_module->module()->functions[caller_func_index].code.offset();
+ for (RelocIterator it(wasm_caller->instructions(),
+ wasm_caller->reloc_info(),
+ wasm_caller->constant_pool(),
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL));
+ !it.done(); it.next()) {
+ const WasmCode* callee = isolate->wasm_code_manager()->LookupCode(
+ it.rinfo()->target_address());
+ if (callee->kind() != WasmCode::LazyStub) {
+ non_compiled_functions.push_back(Nothing<WasmDirectCallData>());
+ continue;
+ }
+ // TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
+ // (depending on the bool) against limits of T and then static_casts.
+ size_t offset_l = it.rinfo()->pc() - wasm_caller->instructions().start();
+ DCHECK_GE(kMaxInt, offset_l);
+ int offset = static_cast<int>(offset_l);
+ int byte_pos =
+ AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
+ uint32_t called_func_index =
+ ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
+ DCHECK_LT(called_func_index,
+ caller_module->GetNativeModule()->FunctionCount());
+ WasmDirectCallData data;
+ data.offset = offset;
+ data.func_index = called_func_index;
+ non_compiled_functions.push_back(Just<WasmDirectCallData>(data));
+ // Call offset one instruction after the call. Remember the last called
+ // function before that offset.
+ if (offset < call_offset) {
+ maybe_func_to_return_idx = Just(called_func_index);
+ }
+ }
}
+ uint32_t func_to_return_idx = maybe_func_to_return_idx.ToChecked();
- Code* ret =
- Code::cast(compiled_module->code_table()->get(func_to_return_idx));
- DCHECK_EQ(Code::WASM_FUNCTION, ret->kind());
- return handle(ret, isolate);
+ TRACE_LAZY(
+ "Starting lazy compilation (func %u @%d, js_to_wasm: false, patch "
+ "caller: true). \n",
+ func_to_return_idx, call_offset);
+
+ // TODO(clemensh): compile all functions in non_compiled_functions in
+ // background, wait for func_to_return_idx.
+ CompileFunction(isolate, instance, func_to_return_idx);
+
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+ WasmCode* ret =
+ compiled_module->GetNativeModule()->GetCode(func_to_return_idx);
+
+ DCHECK_NOT_NULL(ret);
+ {
+ DisallowHeapAllocation no_gc;
+ // Now patch the code object with all functions which are now compiled. This
+ // will pick up any other compiled functions, not only {ret}.
+ size_t idx = 0;
+ size_t patched = 0;
+ for (RelocIterator
+ it(wasm_caller->instructions(), wasm_caller->reloc_info(),
+ wasm_caller->constant_pool(),
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL));
+ !it.done(); it.next(), ++idx) {
+ auto& info = non_compiled_functions[idx];
+ if (info.IsNothing()) continue;
+ uint32_t lookup = info.ToChecked().func_index;
+ const WasmCode* callee_compiled =
+ compiled_module->GetNativeModule()->GetCode(lookup);
+ if (callee_compiled->kind() != WasmCode::Function) continue;
+ it.rinfo()->set_wasm_call_address(
+ isolate, callee_compiled->instructions().start());
+ ++patched;
+ }
+ DCHECK_EQ(non_compiled_functions.size(), idx);
+ TRACE_LAZY("Patched %zu location(s) in the caller.\n", patched);
+ }
+ return ret;
}
ModuleCompiler::CodeGenerationSchedule::CodeGenerationSchedule(
@@ -942,7 +1357,8 @@ size_t ModuleCompiler::CodeGenerationSchedule::GetRandomIndexInSchedule() {
}
ModuleCompiler::ModuleCompiler(Isolate* isolate, WasmModule* module,
- Handle<Code> centry_stub)
+ Handle<Code> centry_stub,
+ wasm::NativeModule* native_module)
: isolate_(isolate),
module_(module),
async_counters_(isolate->async_counters()),
@@ -956,7 +1372,8 @@ ModuleCompiler::ModuleCompiler(Isolate* isolate, WasmModule* module,
Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())),
stopped_compilation_tasks_(num_background_tasks_),
- centry_stub_(centry_stub) {}
+ centry_stub_(centry_stub),
+ native_module_(native_module) {}
// The actual runnable task that performs compilations in the background.
void ModuleCompiler::OnBackgroundTaskStopped() {
@@ -965,7 +1382,7 @@ void ModuleCompiler::OnBackgroundTaskStopped() {
DCHECK_LE(stopped_compilation_tasks_, num_background_tasks_);
}
-// Run by each compilation task The no_finisher_callback is called
+// Run by each compilation task. The no_finisher_callback is called
// within the result_mutex_ lock when no finishing task is running,
// i.e. when the finisher_is_running_ flag is not set.
bool ModuleCompiler::FetchAndExecuteCompilationUnit(
@@ -1009,18 +1426,22 @@ size_t ModuleCompiler::InitializeCompilationUnits(
Vector<const uint8_t> bytes(wire_bytes.start() + func->code.offset(),
func->code.end_offset() - func->code.offset());
WasmName name = wire_bytes.GetName(func);
- builder.AddUnit(module_env, func, buffer_offset, bytes, name);
+ DCHECK_IMPLIES(FLAG_wasm_jit_to_native, native_module_ != nullptr);
+ builder.AddUnit(module_env, native_module_, func, buffer_offset, bytes,
+ name);
}
builder.Commit();
return funcs_to_compile;
}
void ModuleCompiler::RestartCompilationTasks() {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ std::shared_ptr<v8::TaskRunner> task_runner =
+ V8::GetCurrentPlatform()->GetBackgroundTaskRunner(v8_isolate);
+
base::LockGuard<base::Mutex> guard(&tasks_mutex_);
for (; stopped_compilation_tasks_ > 0; --stopped_compilation_tasks_) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompilationTask(this),
- v8::Platform::ExpectedRuntime::kShortRunningTask);
+ task_runner->PostTask(base::make_unique<CompilationTask>(this));
}
}
@@ -1029,12 +1450,14 @@ size_t ModuleCompiler::FinishCompilationUnits(
size_t finished = 0;
while (true) {
int func_index = -1;
- MaybeHandle<Code> result = FinishCompilationUnit(thrower, &func_index);
+ WasmCodeWrapper result = FinishCompilationUnit(thrower, &func_index);
if (func_index < 0) break;
++finished;
DCHECK_IMPLIES(result.is_null(), thrower->error());
if (result.is_null()) break;
- results[func_index] = result.ToHandleChecked();
+ if (result.IsCodeObject()) {
+ results[func_index] = result.GetCode();
+ }
}
bool do_restart;
{
@@ -1050,8 +1473,8 @@ void ModuleCompiler::SetFinisherIsRunning(bool value) {
finisher_is_running_ = value;
}
-MaybeHandle<Code> ModuleCompiler::FinishCompilationUnit(ErrorThrower* thrower,
- int* func_index) {
+WasmCodeWrapper ModuleCompiler::FinishCompilationUnit(ErrorThrower* thrower,
+ int* func_index) {
std::unique_ptr<compiler::WasmCompilationUnit> unit;
{
base::LockGuard<base::Mutex> guard(&result_mutex_);
@@ -1132,15 +1555,17 @@ void ModuleCompiler::CompileSequentially(const ModuleWireBytes& wire_bytes,
if (func.imported) continue; // Imports are compiled at instantiation time.
// Compile the function.
- MaybeHandle<Code> code = compiler::WasmCompilationUnit::CompileWasmFunction(
- thrower, isolate_, wire_bytes, module_env, &func);
+ WasmCodeWrapper code = compiler::WasmCompilationUnit::CompileWasmFunction(
+ native_module_, thrower, isolate_, wire_bytes, module_env, &func);
if (code.is_null()) {
TruncatedUserString<> name(wire_bytes.GetName(&func));
thrower->CompileError("Compilation of #%d:%.*s failed.", i, name.length(),
name.start());
break;
}
- results[i] = code.ToHandleChecked();
+ if (code.IsCodeObject()) {
+ results[i] = code.GetCode();
+ }
}
}
@@ -1175,7 +1600,13 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObject(
const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
- ModuleCompiler compiler(isolate, module.get(), centry_stub);
+ // TODO(mtrofin): the wasm::NativeModule parameter to the ModuleCompiler
+ // constructor is null here, and initialized in CompileToModuleObjectInternal.
+ // This is a point-in-time, until we remove the FLAG_wasm_jit_to_native flag,
+ // and stop needing a FixedArray for code for the non-native case. Otherwise,
+ // we end up moving quite a bit of initialization logic here that is also
+ // needed in CompileToModuleObjectInternal, complicating the change.
+ ModuleCompiler compiler(isolate, module.get(), centry_stub, nullptr);
return compiler.CompileToModuleObjectInternal(thrower, std::move(module),
wire_bytes, asm_js_script,
asm_js_offset_table_bytes);
@@ -1187,9 +1618,19 @@ bool compile_lazy(const WasmModule* module) {
(FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
}
-void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+void FlushICache(Isolate* isolate, const wasm::NativeModule* native_module) {
+ for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
+ const wasm::WasmCode* code = native_module->GetCode(i);
+ if (code == nullptr) continue;
+ Assembler::FlushICache(isolate, code->instructions().start(),
+ code->instructions().size());
+ }
+}
+
+void FlushICache(Isolate* isolate, Handle<FixedArray> functions) {
+ for (int i = 0, e = functions->length(); i < e; ++i) {
+ if (!functions->get(i)->IsCode()) continue;
+ Code* code = Code::cast(functions->get(i));
Assembler::FlushICache(isolate, code->instruction_start(),
code->instruction_size());
}
@@ -1199,11 +1640,26 @@ byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
-void RecordStats(Code* code, Counters* counters) {
+void RecordStats(const Code* code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
+void RecordStats(const wasm::WasmCode* code, Counters* counters) {
+ counters->wasm_generated_code_size()->Increment(
+ static_cast<int>(code->instructions().size()));
+ counters->wasm_reloc_size()->Increment(
+ static_cast<int>(code->reloc_info().size()));
+}
+
+void RecordStats(WasmCodeWrapper wrapper, Counters* counters) {
+ if (wrapper.IsCodeObject()) {
+ RecordStats(*wrapper.GetCode(), counters);
+ } else {
+ RecordStats(wrapper.GetWasmCode(), counters);
+ }
+}
+
void RecordStats(Handle<FixedArray> functions, Counters* counters) {
DisallowHeapAllocation no_gc;
for (int i = 0; i < functions->length(); ++i) {
@@ -1211,123 +1667,163 @@ void RecordStats(Handle<FixedArray> functions, Counters* counters) {
if (val->IsCode()) RecordStats(Code::cast(val), counters);
}
}
-Handle<Script> CreateWasmScript(Isolate* isolate,
- const ModuleWireBytes& wire_bytes) {
- Handle<Script> script =
- isolate->factory()->NewScript(isolate->factory()->empty_string());
- script->set_context_data(isolate->native_context()->debug_context_id());
- script->set_type(Script::TYPE_WASM);
-
- int hash = StringHasher::HashSequentialString(
- reinterpret_cast<const char*>(wire_bytes.start()),
- static_cast<int>(wire_bytes.length()), kZeroHashSeed);
- const int kBufferSize = 32;
- char buffer[kBufferSize];
- int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
- DCHECK(url_chars >= 0 && url_chars < kBufferSize);
- MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
- TENURED);
- script->set_source_url(*url_str.ToHandleChecked());
-
- int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
- DCHECK(name_chars >= 0 && name_chars < kBufferSize);
- MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
- TENURED);
- script->set_name(*name_str.ToHandleChecked());
-
- return script;
+void RecordStats(const wasm::NativeModule* native_module, Counters* counters) {
+ for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
+ const wasm::WasmCode* code = native_module->GetCode(i);
+ if (code != nullptr) RecordStats(code, counters);
+ }
}
// Ensure that the code object in <code_table> at offset <func_index> has
// deoptimization data attached. This is needed for lazy compile stubs which are
// called from JS_TO_WASM functions or via exported function tables. The deopt
// data is used to determine which function this lazy compile stub belongs to.
-Handle<Code> EnsureExportedLazyDeoptData(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- Handle<FixedArray> code_table,
- int func_index) {
- Handle<Code> code(Code::cast(code_table->get(func_index)), isolate);
- if (code->builtin_index() != Builtins::kWasmCompileLazy) {
- // No special deopt data needed for compiled functions, and imported
- // functions, which map to Illegal at this point (they get compiled at
- // instantiation time).
- DCHECK(code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_TO_JS_FUNCTION ||
- code->builtin_index() == Builtins::kIllegal);
- return code;
+// TODO(mtrofin): remove the instance and code_table members once we remove the
+// FLAG_wasm_jit_to_native
+WasmCodeWrapper EnsureExportedLazyDeoptData(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> code_table,
+ wasm::NativeModule* native_module,
+ uint32_t func_index) {
+ if (!FLAG_wasm_jit_to_native) {
+ Handle<Code> code(Code::cast(code_table->get(func_index)), isolate);
+ if (code->builtin_index() != Builtins::kWasmCompileLazy) {
+ // No special deopt data needed for compiled functions, and imported
+ // functions, which map to Illegal at this point (they get compiled at
+ // instantiation time).
+ DCHECK(code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_JS_FUNCTION ||
+ code->kind() == Code::WASM_TO_WASM_FUNCTION ||
+ code->builtin_index() == Builtins::kIllegal);
+ return WasmCodeWrapper(code);
+ }
+
+ // deopt_data:
+ // #0: weak instance
+ // #1: func_index
+ // might be extended later for table exports (see
+ // EnsureTableExportLazyDeoptData).
+ Handle<FixedArray> deopt_data(code->deoptimization_data());
+ DCHECK_EQ(0, deopt_data->length() % 2);
+ if (deopt_data->length() == 0) {
+ code = isolate->factory()->CopyCode(code);
+ code_table->set(func_index, *code);
+ AttachWasmFunctionInfo(isolate, code, instance, func_index);
+ }
+#ifdef DEBUG
+ auto func_info = GetWasmFunctionInfo(isolate, code);
+ DCHECK_IMPLIES(!instance.is_null(),
+ *func_info.instance.ToHandleChecked() == *instance);
+ DCHECK_EQ(func_index, func_info.func_index);
+#endif
+ return WasmCodeWrapper(code);
+ } else {
+ wasm::WasmCode* code = native_module->GetCode(func_index);
+ // {code} will be nullptr when exporting imports.
+ if (code == nullptr || code->kind() != wasm::WasmCode::LazyStub ||
+ !code->IsAnonymous()) {
+ return WasmCodeWrapper(code);
+ }
+ // Clone the lazy builtin into the native module.
+ return WasmCodeWrapper(native_module->CloneLazyBuiltinInto(func_index));
}
- // deopt_data:
- // #0: weak instance
- // #1: func_index
- // might be extended later for table exports (see
- // EnsureTableExportLazyDeoptData).
- Handle<FixedArray> deopt_data(code->deoptimization_data());
- DCHECK_EQ(0, deopt_data->length() % 2);
- if (deopt_data->length() == 0) {
- code = isolate->factory()->CopyCode(code);
- code_table->set(func_index, *code);
- deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
- code->set_deoptimization_data(*deopt_data);
- if (!instance.is_null()) {
- Handle<WeakCell> weak_instance =
- isolate->factory()->NewWeakCell(instance);
- deopt_data->set(0, *weak_instance);
- }
- deopt_data->set(1, Smi::FromInt(func_index));
- }
- DCHECK_IMPLIES(!instance.is_null(),
- WeakCell::cast(code->deoptimization_data()->get(0))->value() ==
- *instance);
- DCHECK_EQ(func_index, Smi::ToInt(code->deoptimization_data()->get(1)));
- return code;
}
// Ensure that the code object in <code_table> at offset <func_index> has
// deoptimization data attached. This is needed for lazy compile stubs which are
// called from JS_TO_WASM functions or via exported function tables. The deopt
// data is used to determine which function this lazy compile stub belongs to.
-Handle<Code> EnsureTableExportLazyDeoptData(
+// TODO(mtrofin): remove the instance and code_table members once we remove the
+// FLAG_wasm_jit_to_native
+WasmCodeWrapper EnsureTableExportLazyDeoptData(
Isolate* isolate, Handle<WasmInstanceObject> instance,
- Handle<FixedArray> code_table, int func_index,
- Handle<FixedArray> export_table, int export_index,
- std::unordered_map<uint32_t, uint32_t>& table_export_count) {
- Handle<Code> code =
- EnsureExportedLazyDeoptData(isolate, instance, code_table, func_index);
- if (code->builtin_index() != Builtins::kWasmCompileLazy) return code;
-
- // deopt_data:
- // #0: weak instance
- // #1: func_index
- // [#2: export table
- // #3: export table index]
- // [#4: export table
- // #5: export table index]
- // ...
- // table_export_count counts down and determines the index for the new export
- // table entry.
- auto table_export_entry = table_export_count.find(func_index);
- DCHECK(table_export_entry != table_export_count.end());
- DCHECK_LT(0, table_export_entry->second);
- uint32_t this_idx = 2 * table_export_entry->second;
- --table_export_entry->second;
- Handle<FixedArray> deopt_data(code->deoptimization_data());
- DCHECK_EQ(0, deopt_data->length() % 2);
- if (deopt_data->length() == 2) {
- // Then only the "header" (#0 and #1) exists. Extend for the export table
- // entries (make space for this_idx + 2 elements).
- deopt_data = isolate->factory()->CopyFixedArrayAndGrow(deopt_data, this_idx,
- TENURED);
- code->set_deoptimization_data(*deopt_data);
- }
- DCHECK_LE(this_idx + 2, deopt_data->length());
- DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate));
- DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate));
- deopt_data->set(this_idx, *export_table);
- deopt_data->set(this_idx + 1, Smi::FromInt(export_index));
- return code;
+ Handle<FixedArray> code_table, wasm::NativeModule* native_module,
+ uint32_t func_index, Handle<FixedArray> export_table, int export_index,
+ std::unordered_map<uint32_t, uint32_t>* table_export_count) {
+ if (!FLAG_wasm_jit_to_native) {
+ Handle<Code> code =
+ EnsureExportedLazyDeoptData(isolate, instance, code_table,
+ native_module, func_index)
+ .GetCode();
+ if (code->builtin_index() != Builtins::kWasmCompileLazy)
+ return WasmCodeWrapper(code);
+
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+
+ // deopt_data:
+ // #0: weak instance
+ // #1: func_index
+ // [#2: export table
+ // #3: export table index]
+ // [#4: export table
+ // #5: export table index]
+ // ...
+ // table_export_count counts down and determines the index for the new
+ // export table entry.
+ auto table_export_entry = table_export_count->find(func_index);
+ DCHECK(table_export_entry != table_export_count->end());
+ DCHECK_LT(0, table_export_entry->second);
+ uint32_t this_idx = 2 * table_export_entry->second;
+ --table_export_entry->second;
+ Handle<FixedArray> deopt_data(code->deoptimization_data());
+ DCHECK_EQ(0, deopt_data->length() % 2);
+ if (deopt_data->length() == 2) {
+ // Then only the "header" (#0 and #1) exists. Extend for the export table
+ // entries (make space for this_idx + 2 elements).
+ deopt_data = isolate->factory()->CopyFixedArrayAndGrow(deopt_data,
+ this_idx, TENURED);
+ code->set_deoptimization_data(*deopt_data);
+ }
+ DCHECK_LE(this_idx + 2, deopt_data->length());
+ DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate));
+ DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate));
+ deopt_data->set(this_idx, *export_table);
+ deopt_data->set(this_idx + 1, Smi::FromInt(export_index));
+ return WasmCodeWrapper(code);
+ } else {
+ const wasm::WasmCode* code =
+ EnsureExportedLazyDeoptData(isolate, instance, code_table,
+ native_module, func_index)
+ .GetWasmCode();
+ if (code == nullptr || code->kind() != wasm::WasmCode::LazyStub)
+ return WasmCodeWrapper(code);
+
+ // deopt_data:
+ // [#0: export table
+ // #1: export table index]
+ // [#2: export table
+ // #3: export table index]
+ // ...
+ // table_export_count counts down and determines the index for the new
+ // export table entry.
+ auto table_export_entry = table_export_count->find(func_index);
+ DCHECK(table_export_entry != table_export_count->end());
+ DCHECK_LT(0, table_export_entry->second);
+ --table_export_entry->second;
+ uint32_t this_idx = 2 * table_export_entry->second;
+ int int_func_index = static_cast<int>(func_index);
+ Object* deopt_entry =
+ native_module->compiled_module()->lazy_compile_data()->get(
+ int_func_index);
+ FixedArray* deopt_data = nullptr;
+ if (!deopt_entry->IsFixedArray()) {
+ // we count indices down, so we enter here first for the
+ // largest index.
+ deopt_data = *isolate->factory()->NewFixedArray(this_idx + 2, TENURED);
+ native_module->compiled_module()->lazy_compile_data()->set(int_func_index,
+ deopt_data);
+ } else {
+ deopt_data = FixedArray::cast(deopt_entry);
+ DCHECK_LE(this_idx + 2, deopt_data->length());
+ }
+ DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate));
+ DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate));
+ deopt_data->set(this_idx, *export_table);
+ deopt_data->set(this_idx + 1, Smi::FromInt(export_index));
+ return WasmCodeWrapper(code);
+ }
}
bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
@@ -1337,47 +1833,66 @@ bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
using WasmInstanceMap =
IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
-Handle<Code> UnwrapExportOrCompileImportWrapper(
- Isolate* isolate, int index, FunctionSig* sig, Handle<JSReceiver> target,
- ModuleOrigin origin, WasmInstanceMap* imported_instances,
- Handle<FixedArray> js_imports_table, Handle<WasmInstanceObject> instance) {
- WasmFunction* other_func = GetWasmFunctionForExport(isolate, target);
- if (other_func) {
- if (!sig->Equals(other_func->sig)) return Handle<Code>::null();
- // Signature matched. Unwrap the import wrapper and return the raw wasm
- // function code.
- // Remember the wasm instance of the import. We have to keep it alive.
- Handle<WasmInstanceObject> imported_instance(
- Handle<WasmExportedFunction>::cast(target)->instance(), isolate);
- imported_instances->Set(imported_instance, imported_instance);
- Handle<Code> wasm_code =
- UnwrapExportWrapper(Handle<JSFunction>::cast(target));
- // Create a WasmToWasm wrapper to replace the current wasm context with
- // the imported_instance one, in order to access the right memory.
- // If the imported instance does not have memory, avoid the wrapper.
- // TODO(wasm): Avoid the wrapper also if instance memory and imported
- // instance share the same memory object.
- bool needs_wasm_to_wasm_wrapper = imported_instance->has_memory_object();
- if (!needs_wasm_to_wasm_wrapper) return wasm_code;
- Address new_wasm_context =
- reinterpret_cast<Address>(imported_instance->wasm_context());
+WasmCodeWrapper MakeWasmToWasmWrapper(
+ Isolate* isolate, Handle<WasmExportedFunction> imported_function,
+ FunctionSig* expected_sig, FunctionSig** sig,
+ WasmInstanceMap* imported_instances, Handle<WasmInstanceObject> instance,
+ uint32_t index) {
+ // TODO(wasm): cache WASM-to-WASM wrappers by signature and clone+patch.
+ Handle<WasmInstanceObject> imported_instance(imported_function->instance(),
+ isolate);
+ imported_instances->Set(imported_instance, imported_instance);
+ WasmContext* new_wasm_context = imported_instance->wasm_context()->get();
+ Address new_wasm_context_address =
+ reinterpret_cast<Address>(new_wasm_context);
+ *sig = imported_instance->module()
+ ->functions[imported_function->function_index()]
+ .sig;
+ if (expected_sig && !expected_sig->Equals(*sig)) return {};
+
+ if (!FLAG_wasm_jit_to_native) {
Handle<Code> wrapper_code = compiler::CompileWasmToWasmWrapper(
- isolate, wasm_code, sig, index, new_wasm_context);
- // Set the deoptimization data for the WasmToWasm wrapper.
- // TODO(wasm): Remove the deoptimization data when we will use tail calls
- // for WasmToWasm wrappers.
- Factory* factory = isolate->factory();
- Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
- Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
- deopt_data->set(0, *weak_link);
- deopt_data->set(1, Smi::FromInt(index));
- wrapper_code->set_deoptimization_data(*deopt_data);
- return wrapper_code;
+ isolate, imported_function->GetWasmCode(), *sig,
+ new_wasm_context_address);
+ // Set the deoptimization data for the WasmToWasm wrapper. This is
+ // needed by the interpreter to find the imported instance for
+ // a cross-instance call.
+ AttachWasmFunctionInfo(isolate, wrapper_code, imported_instance,
+ imported_function->function_index());
+ return WasmCodeWrapper(wrapper_code);
+ } else {
+ Handle<Code> code = compiler::CompileWasmToWasmWrapper(
+ isolate, imported_function->GetWasmCode(), *sig,
+ new_wasm_context_address);
+ return WasmCodeWrapper(
+ instance->compiled_module()->GetNativeModule()->AddCodeCopy(
+ code, wasm::WasmCode::WasmToWasmWrapper, index));
+ }
+}
+
+WasmCodeWrapper UnwrapExportOrCompileImportWrapper(
+ Isolate* isolate, FunctionSig* sig, Handle<JSReceiver> target,
+ uint32_t import_index, ModuleOrigin origin,
+ WasmInstanceMap* imported_instances, Handle<FixedArray> js_imports_table,
+ Handle<WasmInstanceObject> instance) {
+ if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
+ FunctionSig* unused = nullptr;
+ return MakeWasmToWasmWrapper(
+ isolate, Handle<WasmExportedFunction>::cast(target), sig, &unused,
+ imported_instances, instance, import_index);
}
// No wasm function or being debugged. Compile a new wrapper for the new
// signature.
- return compiler::CompileWasmToJSWrapper(isolate, target, sig, index, origin,
- js_imports_table);
+ if (FLAG_wasm_jit_to_native) {
+ Handle<Code> temp_code = compiler::CompileWasmToJSWrapper(
+ isolate, target, sig, import_index, origin, js_imports_table);
+ return WasmCodeWrapper(
+ instance->compiled_module()->GetNativeModule()->AddCodeCopy(
+ temp_code, wasm::WasmCode::WasmToJsWrapper, import_index));
+ } else {
+ return WasmCodeWrapper(compiler::CompileWasmToJSWrapper(
+ isolate, target, sig, import_index, origin, js_imports_table));
+ }
}
double MonotonicallyIncreasingTimeInMs() {
@@ -1394,7 +1909,6 @@ std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
Isolate* isolate, WasmModule* module, Handle<Code> illegal_builtin) {
std::vector<GlobalHandleAddress> function_tables;
std::vector<GlobalHandleAddress> signature_tables;
- std::vector<SignatureMap*> signature_maps;
for (size_t i = 0; i < module->function_tables.size(); i++) {
Handle<Object> func_table =
@@ -1409,7 +1923,6 @@ std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
v8::WeakCallbackType::kFinalizer);
function_tables.push_back(func_table.address());
signature_tables.push_back(sig_table.address());
- signature_maps.push_back(&module->function_tables[i].map);
}
std::vector<Handle<Code>> empty_code;
@@ -1418,20 +1931,20 @@ std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
module, // --
function_tables, // --
signature_tables, // --
- signature_maps, // --
empty_code, // --
- illegal_builtin, // --
- 0 // --
+ illegal_builtin // --
};
return std::unique_ptr<compiler::ModuleEnv>(new compiler::ModuleEnv(result));
}
-Handle<WasmCompiledModule> NewCompiledModule(
- Isolate* isolate, Handle<WasmSharedModuleData> shared,
- Handle<FixedArray> code_table, Handle<FixedArray> export_wrappers,
- compiler::ModuleEnv* env) {
+// TODO(mtrofin): remove code_table when we don't need FLAG_wasm_jit_to_native
+Handle<WasmCompiledModule> NewCompiledModule(Isolate* isolate,
+ WasmModule* module,
+ Handle<FixedArray> code_table,
+ Handle<FixedArray> export_wrappers,
+ compiler::ModuleEnv* env) {
Handle<WasmCompiledModule> compiled_module =
- WasmCompiledModule::New(isolate, shared, code_table, export_wrappers,
+ WasmCompiledModule::New(isolate, module, code_table, export_wrappers,
env->function_tables, env->signature_tables);
return compiled_module;
}
@@ -1453,7 +1966,11 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
TimedHistogramScope wasm_compile_module_time_scope(
module_->is_wasm() ? counters()->wasm_compile_wasm_module_time()
: counters()->wasm_compile_asm_module_time());
- // The {module> parameter is passed in to transfer ownership of the WasmModule
+ // TODO(6792): No longer needed once WebAssembly code is off heap. Use
+ // base::Optional to be able to close the scope before notifying the debugger.
+ base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
+ base::in_place_t(), isolate_->heap());
+ // The {module} parameter is passed in to transfer ownership of the WasmModule
// to this function. The WasmModule itself existed already as an instance
// variable of the ModuleCompiler. We check here that the parameter and the
// instance variable actually point to the same object.
@@ -1462,69 +1979,6 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
bool lazy_compile = compile_lazy(module_);
Factory* factory = isolate_->factory();
-
- // If lazy compile: Initialize the code table with the lazy compile builtin.
- // Otherwise: Initialize with the illegal builtin. All call sites will be
- // patched at instantiation.
- Handle<Code> init_builtin = lazy_compile
- ? BUILTIN_CODE(isolate_, WasmCompileLazy)
- : BUILTIN_CODE(isolate_, Illegal);
-
- auto env = CreateDefaultModuleEnv(isolate_, module_, init_builtin);
-
- // The {code_table} array contains import wrappers and functions (which
- // are both included in {functions.size()}, and export wrappers).
- int code_table_size = static_cast<int>(module_->functions.size());
- int export_wrappers_size = static_cast<int>(module_->num_exported_functions);
- Handle<FixedArray> code_table =
- factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
- Handle<FixedArray> export_wrappers =
- factory->NewFixedArray(static_cast<int>(export_wrappers_size), TENURED);
- // Initialize the code table.
- for (int i = 0, e = code_table->length(); i < e; ++i) {
- code_table->set(i, *init_builtin);
- }
-
- for (int i = 0, e = export_wrappers->length(); i < e; ++i) {
- export_wrappers->set(i, *init_builtin);
- }
-
- if (!lazy_compile) {
- size_t funcs_to_compile =
- module_->functions.size() - module_->num_imported_functions;
- bool compile_parallel =
- !FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks > 0 &&
- funcs_to_compile > 1 &&
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() > 0;
- // Avoid a race condition by collecting results into a second vector.
- std::vector<Handle<Code>> results(env->module->functions.size());
-
- if (compile_parallel) {
- CompileInParallel(wire_bytes, env.get(), results, thrower);
- } else {
- CompileSequentially(wire_bytes, env.get(), results, thrower);
- }
- if (thrower->error()) return {};
-
- // At this point, compilation has completed. Update the code table.
- for (size_t i =
- module_->num_imported_functions + FLAG_skip_compiling_wasm_funcs;
- i < results.size(); ++i) {
- Code* code = *results[i];
- code_table->set(static_cast<int>(i), code);
- RecordStats(code, counters());
- }
- } else if (module_->is_wasm()) {
- // Validate wasm modules for lazy compilation. Don't validate asm.js
- // modules, they are valid by construction (otherwise a CHECK will fail
- // during lazy compilation).
- // TODO(clemensh): According to the spec, we can actually skip validation
- // at module creation time, and return a function that always traps at
- // (lazy) compilation time.
- ValidateSequentially(wire_bytes, env.get(), thrower);
- }
- if (thrower->error()) return {};
-
// Create heap objects for script, module bytes and asm.js offset table to
// be stored in the shared module data.
Handle<Script> script;
@@ -1562,34 +2016,104 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
script, asm_js_offset_table);
if (lazy_compile) WasmSharedModuleData::PrepareForLazyCompilation(shared);
+ Handle<Code> init_builtin = lazy_compile
+ ? BUILTIN_CODE(isolate_, WasmCompileLazy)
+ : BUILTIN_CODE(isolate_, Illegal);
+
+ // TODO(mtrofin): remove code_table and code_table_size when we don't
+ // need FLAG_wasm_jit_to_native anymore. Keep export_wrappers.
+ int code_table_size = static_cast<int>(module_->functions.size());
+ int export_wrappers_size = static_cast<int>(module_->num_exported_functions);
+ Handle<FixedArray> code_table =
+ factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
+ Handle<FixedArray> export_wrappers =
+ factory->NewFixedArray(static_cast<int>(export_wrappers_size), TENURED);
+ // Initialize the code table.
+ for (int i = 0, e = code_table->length(); i < e; ++i) {
+ code_table->set(i, *init_builtin);
+ }
+
+ for (int i = 0, e = export_wrappers->length(); i < e; ++i) {
+ export_wrappers->set(i, *init_builtin);
+ }
+ auto env = CreateDefaultModuleEnv(isolate_, module_, init_builtin);
+
// Create the compiled module object and populate with compiled functions
// and information needed at instantiation time. This object needs to be
// serializable. Instantiation may occur off a deserialized version of this
// object.
Handle<WasmCompiledModule> compiled_module = NewCompiledModule(
- isolate_, shared, code_table, export_wrappers, env.get());
+ isolate_, shared->module(), code_table, export_wrappers, env.get());
+ native_module_ = compiled_module->GetNativeModule();
+ compiled_module->OnWasmModuleDecodingComplete(shared);
+ if (lazy_compile && FLAG_wasm_jit_to_native) {
+ compiled_module->set_lazy_compile_data(isolate_->factory()->NewFixedArray(
+ static_cast<int>(module_->functions.size()), TENURED));
+ }
+
+ if (!lazy_compile) {
+ size_t funcs_to_compile =
+ module_->functions.size() - module_->num_imported_functions;
+ bool compile_parallel =
+ !FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks > 0 &&
+ funcs_to_compile > 1 &&
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() > 0;
+ // Avoid a race condition by collecting results into a second vector.
+ std::vector<Handle<Code>> results(
+ FLAG_wasm_jit_to_native ? 0 : env->module->functions.size());
+
+ if (compile_parallel) {
+ CompileInParallel(wire_bytes, env.get(), results, thrower);
+ } else {
+ CompileSequentially(wire_bytes, env.get(), results, thrower);
+ }
+ if (thrower->error()) return {};
+
+ if (!FLAG_wasm_jit_to_native) {
+ // At this point, compilation has completed. Update the code table.
+ for (size_t i =
+ module_->num_imported_functions + FLAG_skip_compiling_wasm_funcs;
+ i < results.size(); ++i) {
+ Code* code = *results[i];
+ code_table->set(static_cast<int>(i), code);
+ RecordStats(code, counters());
+ }
+ } else {
+ RecordStats(native_module_, counters());
+ }
+ } else {
+ if (module_->is_wasm()) {
+ // Validate wasm modules for lazy compilation. Don't validate asm.js
+ // modules, they are valid by construction (otherwise a CHECK will fail
+ // during lazy compilation).
+ // TODO(clemensh): According to the spec, we can actually skip validation
+ // at module creation time, and return a function that always traps at
+ // (lazy) compilation time.
+ ValidateSequentially(wire_bytes, env.get(), thrower);
+ }
+ if (FLAG_wasm_jit_to_native) {
+ native_module_->SetLazyBuiltin(BUILTIN_CODE(isolate_, WasmCompileLazy));
+ }
+ }
+ if (thrower->error()) return {};
+
+ // Compile JS->wasm wrappers for exported functions.
+ CompileJsToWasmWrappers(isolate_, compiled_module, counters());
+
+ Handle<WasmModuleObject> result =
+ WasmModuleObject::New(isolate_, compiled_module);
// If we created a wasm script, finish it now and make it public to the
// debugger.
if (asm_js_script.is_null()) {
+ // Close the CodeSpaceMemoryModificationScope before calling into the
+ // debugger.
+ modification_scope.reset();
script->set_wasm_compiled_module(*compiled_module);
isolate_->debug()->OnAfterCompile(script);
}
- // Compile JS->wasm wrappers for exported functions.
- JSToWasmWrapperCache js_to_wasm_cache;
- int wrapper_index = 0;
- for (auto exp : module_->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Handle<Code> wasm_code = EnsureExportedLazyDeoptData(
- isolate_, Handle<WasmInstanceObject>::null(), code_table, exp.index);
- Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, wasm_code, exp.index);
- export_wrappers->set(wrapper_index, *wrapper_code);
- RecordStats(*wrapper_code, counters());
- ++wrapper_index;
- }
- return WasmModuleObject::New(isolate_, compiled_module);
+ return result;
}
InstanceBuilder::InstanceBuilder(
@@ -1621,6 +2145,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
SanitizeImports();
if (thrower_->error()) return {};
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ // Use base::Optional to be able to close the scope before executing the start
+ // function.
+ base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
+ base::in_place_t(), isolate_->heap());
// From here on, we expect the build pipeline to run without exiting to JS.
// Exception is when we run the startup function.
DisallowJavascriptExecution no_js(isolate_);
@@ -1633,6 +2162,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Reuse the compiled module (if no owner), otherwise clone.
//--------------------------------------------------------------------------
+ // TODO(mtrofin): remove code_table and old_code_table
+ // when FLAG_wasm_jit_to_native is not needed
Handle<FixedArray> code_table;
Handle<FixedArray> wrapper_table;
// We keep around a copy of the old code table, because we'll be replacing
@@ -1640,6 +2171,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// able to relocate.
Handle<FixedArray> old_code_table;
MaybeHandle<WasmInstanceObject> owner;
+ // native_module is the one we're building now, old_module
+ // is the one we clone from. They point to the same place if
+ // we don't need to clone.
+ wasm::NativeModule* native_module = nullptr;
+ wasm::NativeModule* old_module = nullptr;
TRACE("Starting new module instantiation\n");
{
@@ -1665,41 +2201,49 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// the owner + original state used for cloning and patching
// won't be mutated by possible finalizer runs.
DCHECK(!owner.is_null());
- TRACE("Cloning from %d\n", original->instance_id());
- old_code_table = original->code_table();
- compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
- code_table = compiled_module_->code_table();
- wrapper_table = compiled_module_->export_wrappers();
- // Avoid creating too many handles in the outer scope.
- HandleScope scope(isolate_);
-
- // Clone the code for wasm functions and exports.
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> orig_code(Code::cast(code_table->get(i)), isolate_);
- switch (orig_code->kind()) {
- case Code::WASM_TO_JS_FUNCTION:
- // Imports will be overwritten with newly compiled wrappers.
- break;
- case Code::BUILTIN:
- DCHECK_EQ(Builtins::kWasmCompileLazy, orig_code->builtin_index());
- // If this code object has deoptimization data, then we need a
- // unique copy to attach updated deoptimization data.
- if (orig_code->deoptimization_data()->length() > 0) {
+ if (FLAG_wasm_jit_to_native) {
+ TRACE("Cloning from %zu\n", original->GetNativeModule()->instance_id);
+ compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
+ native_module = compiled_module_->GetNativeModule();
+ wrapper_table = compiled_module_->export_wrappers();
+ } else {
+ TRACE("Cloning from %d\n", original->instance_id());
+ old_code_table = original->code_table();
+ compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
+ code_table = compiled_module_->code_table();
+ wrapper_table = compiled_module_->export_wrappers();
+ // Avoid creating too many handles in the outer scope.
+ HandleScope scope(isolate_);
+
+ // Clone the code for wasm functions and exports.
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> orig_code(Code::cast(code_table->get(i)), isolate_);
+ switch (orig_code->kind()) {
+ case Code::WASM_TO_JS_FUNCTION:
+ case Code::WASM_TO_WASM_FUNCTION:
+ // Imports will be overwritten with newly compiled wrappers.
+ break;
+ case Code::BUILTIN:
+ DCHECK_EQ(Builtins::kWasmCompileLazy, orig_code->builtin_index());
+ // If this code object has deoptimization data, then we need a
+ // unique copy to attach updated deoptimization data.
+ if (orig_code->deoptimization_data()->length() > 0) {
+ Handle<Code> code = factory->CopyCode(orig_code);
+ AttachWasmFunctionInfo(isolate_, code,
+ Handle<WasmInstanceObject>(), i);
+ code_table->set(i, *code);
+ }
+ break;
+ case Code::WASM_FUNCTION: {
Handle<Code> code = factory->CopyCode(orig_code);
- Handle<FixedArray> deopt_data =
- factory->NewFixedArray(2, TENURED);
- deopt_data->set(1, Smi::FromInt(i));
- code->set_deoptimization_data(*deopt_data);
+ AttachWasmFunctionInfo(isolate_, code,
+ Handle<WasmInstanceObject>(), i);
code_table->set(i, *code);
+ break;
}
- break;
- case Code::WASM_FUNCTION: {
- Handle<Code> code = factory->CopyCode(orig_code);
- code_table->set(i, *code);
- break;
+ default:
+ UNREACHABLE();
}
- default:
- UNREACHABLE();
}
}
for (int i = 0; i < wrapper_table->length(); ++i) {
@@ -1708,22 +2252,34 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<Code> code = factory->CopyCode(orig_code);
wrapper_table->set(i, *code);
}
-
- RecordStats(code_table, counters());
+ if (FLAG_wasm_jit_to_native) {
+ RecordStats(native_module, counters());
+ } else {
+ RecordStats(code_table, counters());
+ }
RecordStats(wrapper_table, counters());
} else {
// There was no owner, so we can reuse the original.
compiled_module_ = original;
- old_code_table = factory->CopyFixedArray(compiled_module_->code_table());
- code_table = compiled_module_->code_table();
wrapper_table = compiled_module_->export_wrappers();
- TRACE("Reusing existing instance %d\n", compiled_module_->instance_id());
+ if (FLAG_wasm_jit_to_native) {
+ old_module = compiled_module_->GetNativeModule();
+ native_module = old_module;
+ TRACE("Reusing existing instance %zu\n",
+ compiled_module_->GetNativeModule()->instance_id);
+ } else {
+ old_code_table =
+ factory->CopyFixedArray(compiled_module_->code_table());
+ code_table = compiled_module_->code_table();
+ TRACE("Reusing existing instance %d\n",
+ compiled_module_->instance_id());
+ }
}
compiled_module_->set_native_context(isolate_->native_context());
}
//--------------------------------------------------------------------------
- // Allocate the instance object.
+ // Create the WebAssembly.Instance object.
//--------------------------------------------------------------------------
Zone instantiation_zone(isolate_->allocator(), ZONE_NAME);
CodeSpecialization code_specialization(isolate_, &instantiation_zone);
@@ -1733,6 +2289,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
//--------------------------------------------------------------------------
+ WasmContext* wasm_context = instance->wasm_context()->get();
MaybeHandle<JSArrayBuffer> old_globals;
uint32_t globals_size = module_->globals_size;
if (globals_size > 0) {
@@ -1744,22 +2301,13 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
thrower_->RangeError("Out of memory: wasm globals");
return {};
}
- Address old_globals_start = compiled_module_->GetGlobalsStartOrNull();
- Address new_globals_start =
- static_cast<Address>(global_buffer->backing_store());
- code_specialization.RelocateGlobals(old_globals_start, new_globals_start);
- // The address of the backing buffer for the golbals is in native memory
- // and, thus, not moving. We need it saved for
- // serialization/deserialization purposes - so that the other end
- // understands how to relocate the references. We still need to save the
- // JSArrayBuffer on the instance, to keep it all alive.
- WasmCompiledModule::SetGlobalsStartAddressFrom(factory, compiled_module_,
- global_buffer);
+ wasm_context->globals_start =
+ reinterpret_cast<byte*>(global_buffer->backing_store());
instance->set_globals_buffer(*global_buffer);
}
//--------------------------------------------------------------------------
- // Prepare for initialization of function tables.
+ // Reserve the metadata for indirect function tables.
//--------------------------------------------------------------------------
int function_table_count = static_cast<int>(module_->function_tables.size());
table_instances_.reserve(module_->function_tables.size());
@@ -1781,13 +2329,14 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
InitGlobals();
//--------------------------------------------------------------------------
- // Set up the indirect function tables for the new instance.
+ // Initialize the indirect tables.
//--------------------------------------------------------------------------
- if (function_table_count > 0)
+ if (function_table_count > 0) {
InitializeTables(instance, &code_specialization);
+ }
//--------------------------------------------------------------------------
- // Set up the memory for the new instance.
+ // Allocate the memory array buffer.
//--------------------------------------------------------------------------
uint32_t initial_pages = module_->initial_pages;
(module_->is_wasm() ? counters()->wasm_wasm_min_mem_pages_count()
@@ -1795,18 +2344,45 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
->AddSample(initial_pages);
if (!memory_.is_null()) {
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
// Set externally passed ArrayBuffer non neuterable.
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_neuterable(false);
DCHECK_IMPLIES(trap_handler::UseTrapHandler(),
module_->is_asm_js() || memory->has_guard_region());
} else if (initial_pages > 0) {
+ // Allocate memory if the initial size is more than 0 pages.
memory_ = AllocateMemory(initial_pages);
if (memory_.is_null()) return {}; // failed to allocate memory
}
//--------------------------------------------------------------------------
+ // Create the WebAssembly.Memory object.
+ //--------------------------------------------------------------------------
+ if (module_->has_memory) {
+ if (!instance->has_memory_object()) {
+ // No memory object exists. Create one.
+ Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
+ isolate_, memory_,
+ module_->maximum_pages != 0 ? module_->maximum_pages : -1);
+ instance->set_memory_object(*memory_object);
+ }
+
+ // Add the instance object to the list of instances for this memory.
+ Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_);
+ WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
+
+ if (!memory_.is_null()) {
+ // Double-check the {memory} array buffer matches the context.
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
+ uint32_t mem_size = 0;
+ CHECK(memory->byte_length()->ToUint32(&mem_size));
+ CHECK_EQ(wasm_context->mem_size, mem_size);
+ CHECK_EQ(wasm_context->mem_start, memory->backing_store());
+ }
+ }
+
+ //--------------------------------------------------------------------------
// Check that indirect function table segments are within bounds.
//--------------------------------------------------------------------------
for (WasmTableInit& table_init : module_->table_inits) {
@@ -1826,78 +2402,44 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
for (WasmDataSegment& seg : module_->data_segments) {
uint32_t base = EvalUint32InitExpr(seg.dest_addr);
- uint32_t mem_size = 0;
- if (!memory_.is_null()) {
- CHECK(memory_.ToHandleChecked()->byte_length()->ToUint32(&mem_size));
- }
- if (!in_bounds(base, seg.source.length(), mem_size)) {
+ if (!in_bounds(base, seg.source.length(), wasm_context->mem_size)) {
thrower_->LinkError("data segment is out of bounds");
return {};
}
}
- //--------------------------------------------------------------------------
- // Initialize memory.
- //--------------------------------------------------------------------------
- Address mem_start = nullptr;
- uint32_t mem_size = 0;
- if (!memory_.is_null()) {
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- mem_start = static_cast<Address>(memory->backing_store());
- CHECK(memory->byte_length()->ToUint32(&mem_size));
- LoadDataSegments(mem_start, mem_size);
- // Just like with globals, we need to keep both the JSArrayBuffer
- // and save the start pointer.
- instance->set_memory_buffer(*memory);
- }
-
- //--------------------------------------------------------------------------
- // Create a memory object to have a WasmContext.
- //--------------------------------------------------------------------------
- if (module_->has_memory) {
- if (!instance->has_memory_object()) {
- Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
- isolate_,
- instance->has_memory_buffer() ? handle(instance->memory_buffer())
- : Handle<JSArrayBuffer>::null(),
- module_->maximum_pages != 0 ? module_->maximum_pages : -1);
- instance->set_memory_object(*memory_object);
- }
-
- code_specialization.RelocateWasmContextReferences(
- reinterpret_cast<Address>(instance->wasm_context()));
- // Store the wasm_context address in the JSToWasmWrapperCache so that it can
- // be used to compile JSToWasmWrappers.
- js_to_wasm_cache_.SetContextAddress(
- reinterpret_cast<Address>(instance->wasm_context()));
- }
+ // Set the WasmContext address in wrappers.
+ // TODO(wasm): the wasm context should only appear as a constant in wrappers;
+ // this code specialization is applied to the whole instance.
+ Address wasm_context_address = reinterpret_cast<Address>(wasm_context);
+ code_specialization.RelocateWasmContextReferences(wasm_context_address);
+ js_to_wasm_cache_.SetContextAddress(wasm_context_address);
+
+ if (!FLAG_wasm_jit_to_native) {
+ //--------------------------------------------------------------------------
+ // Set up the runtime support for the new instance.
+ //--------------------------------------------------------------------------
+ Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
- //--------------------------------------------------------------------------
- // Set up the runtime support for the new instance.
- //--------------------------------------------------------------------------
- Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
-
- for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs,
- num_functions = static_cast<int>(module_->functions.size());
- i < num_functions; ++i) {
- Handle<Code> code = handle(Code::cast(code_table->get(i)), isolate_);
- if (code->kind() == Code::WASM_FUNCTION) {
- Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
- deopt_data->set(0, *weak_link);
- deopt_data->set(1, Smi::FromInt(i));
- code->set_deoptimization_data(*deopt_data);
- continue;
- }
- DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
- int deopt_len = code->deoptimization_data()->length();
- if (deopt_len == 0) continue;
- DCHECK_LE(2, deopt_len);
- DCHECK_EQ(i, Smi::ToInt(code->deoptimization_data()->get(1)));
- code->deoptimization_data()->set(0, *weak_link);
- // Entries [2, deopt_len) encode information about table exports of this
- // function. This is rebuilt in {LoadTableSegments}, so reset it here.
- for (int i = 2; i < deopt_len; ++i) {
- code->deoptimization_data()->set_undefined(isolate_, i);
+ for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs,
+ num_functions = static_cast<int>(module_->functions.size());
+ i < num_functions; ++i) {
+ Handle<Code> code = handle(Code::cast(code_table->get(i)), isolate_);
+ if (code->kind() == Code::WASM_FUNCTION) {
+ AttachWasmFunctionInfo(isolate_, code, weak_link, i);
+ continue;
+ }
+ DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ int deopt_len = code->deoptimization_data()->length();
+ if (deopt_len == 0) continue;
+ DCHECK_LE(2, deopt_len);
+ DCHECK_EQ(i, Smi::ToInt(code->deoptimization_data()->get(1)));
+ code->deoptimization_data()->set(0, *weak_link);
+ // Entries [2, deopt_len) encode information about table exports of this
+ // function. This is rebuilt in {LoadTableSegments}, so reset it here.
+ for (int i = 2; i < deopt_len; ++i) {
+ code->deoptimization_data()->set_undefined(isolate_, i);
+ }
}
}
@@ -1908,67 +2450,58 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (thrower_->error()) return {};
//--------------------------------------------------------------------------
- // Add instance to Memory object
+ // Initialize the indirect function tables.
//--------------------------------------------------------------------------
- if (instance->has_memory_object()) {
- Handle<WasmMemoryObject> memory(instance->memory_object(), isolate_);
- WasmMemoryObject::AddInstance(isolate_, memory, instance);
+ if (function_table_count > 0) {
+ LoadTableSegments(code_table, instance);
}
//--------------------------------------------------------------------------
- // Initialize the indirect function tables.
+ // Initialize the memory by loading data segments.
//--------------------------------------------------------------------------
- if (function_table_count > 0) LoadTableSegments(code_table, instance);
+ if (module_->data_segments.size() > 0) {
+ LoadDataSegments(wasm_context);
+ }
// Patch all code with the relocations registered in code_specialization.
code_specialization.RelocateDirectCalls(instance);
code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
- FlushICache(isolate_, code_table);
+ if (FLAG_wasm_jit_to_native) {
+ FlushICache(isolate_, native_module);
+ } else {
+ FlushICache(isolate_, code_table);
+ }
FlushICache(isolate_, wrapper_table);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
//--------------------------------------------------------------------------
if (trap_handler::UseTrapHandler()) {
- UnpackAndRegisterProtectedInstructions(isolate_, code_table);
+ if (FLAG_wasm_jit_to_native) {
+ UnpackAndRegisterProtectedInstructions(isolate_, native_module);
+ } else {
+ UnpackAndRegisterProtectedInstructionsGC(isolate_, code_table);
+ }
}
//--------------------------------------------------------------------------
- // Set up and link the new instance.
+ // Insert the compiled module into the weak list of compiled modules.
//--------------------------------------------------------------------------
{
Handle<Object> global_handle =
isolate_->global_handles()->Create(*instance);
- Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module_);
Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
- MaybeHandle<WeakCell> link_to_original;
- MaybeHandle<WasmCompiledModule> original;
if (!owner.is_null()) {
- // prepare the data needed for publishing in a chain, but don't link
- // just yet, because
- // we want all the publishing to happen free from GC interruptions, and
- // so we do it in
- // one GC-free scope afterwards.
- original = handle(owner.ToHandleChecked()->compiled_module());
- link_to_original = factory->NewWeakCell(original.ToHandleChecked());
- }
- // Publish the new instance to the instances chain.
- {
+ // Publish the new instance to the instances chain.
DisallowHeapAllocation no_gc;
- if (!link_to_original.is_null()) {
- compiled_module_->set_weak_next_instance(
- link_to_original.ToHandleChecked());
- original.ToHandleChecked()->set_weak_prev_instance(link_to_clone);
- compiled_module_->set_weak_wasm_module(
- original.ToHandleChecked()->weak_wasm_module());
- }
- module_object_->set_compiled_module(*compiled_module_);
- compiled_module_->set_weak_owning_instance(link_to_owning_instance);
- GlobalHandles::MakeWeak(
- global_handle.location(), global_handle.location(),
- instance_finalizer_callback_, v8::WeakCallbackType::kFinalizer);
+ compiled_module_->InsertInChain(*module_object_);
}
+ module_object_->set_compiled_module(*compiled_module_);
+ compiled_module_->set_weak_owning_instance(link_to_owning_instance);
+ GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ instance_finalizer_callback_,
+ v8::WeakCallbackType::kFinalizer);
}
//--------------------------------------------------------------------------
@@ -1993,22 +2526,24 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Run the start function if one was specified.
+ // Execute the start function if one was specified.
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
HandleScope scope(isolate_);
int start_index = module_->start_function_index;
- Handle<Code> startup_code = EnsureExportedLazyDeoptData(
- isolate_, instance, code_table, start_index);
+ WasmCodeWrapper startup_code = EnsureExportedLazyDeoptData(
+ isolate_, instance, code_table, native_module, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
isolate_, module_, startup_code, start_index);
Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
- RecordStats(*startup_code, counters());
+ RecordStats(startup_code, counters());
// Call the JS function.
Handle<Object> undefined = factory->undefined_value();
+ // Close the CodeSpaceMemoryModificationScope to execute the start function.
+ modification_scope.reset();
{
// We're OK with JS execution here. The instance is fully setup.
AllowJavascriptExecution allow_js(isolate_);
@@ -2027,7 +2562,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
DCHECK(!isolate_->has_pending_exception());
- TRACE("Finishing instance %d\n", compiled_module_->instance_id());
+ if (FLAG_wasm_jit_to_native) {
+ TRACE("Successfully built instance %zu\n",
+ compiled_module_->GetNativeModule()->instance_id);
+ } else {
+ TRACE("Finishing instance %d\n", compiled_module_->instance_id());
+ }
TRACE_CHAIN(module_object_->compiled_module());
return instance;
}
@@ -2117,7 +2657,7 @@ uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
}
// Load data segments into the memory.
-void InstanceBuilder::LoadDataSegments(Address mem_addr, size_t mem_size) {
+void InstanceBuilder::LoadDataSegments(WasmContext* wasm_context) {
Handle<SeqOneByteString> module_bytes(compiled_module_->module_bytes(),
isolate_);
for (const WasmDataSegment& segment : module_->data_segments) {
@@ -2125,9 +2665,8 @@ void InstanceBuilder::LoadDataSegments(Address mem_addr, size_t mem_size) {
// Segments of size == 0 are just nops.
if (source_size == 0) continue;
uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
- DCHECK(
- in_bounds(dest_offset, source_size, static_cast<uint32_t>(mem_size)));
- byte* dest = mem_addr + dest_offset;
+ DCHECK(in_bounds(dest_offset, source_size, wasm_context->mem_size));
+ byte* dest = wasm_context->mem_start + dest_offset;
const byte* src = reinterpret_cast<const byte*>(
module_bytes->GetCharsAddress() + segment.source.offset());
memcpy(dest, src, source_size);
@@ -2137,8 +2676,9 @@ void InstanceBuilder::LoadDataSegments(Address mem_addr, size_t mem_size) {
void InstanceBuilder::WriteGlobalValue(WasmGlobal& global,
Handle<Object> value) {
double num = value->Number();
- TRACE("init [globals+%u] = %lf, type = %s\n", global.offset, num,
- WasmOpcodes::TypeName(global.type));
+ TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
+ reinterpret_cast<void*>(raw_buffer_ptr(globals_, 0)), global.offset,
+ num, WasmOpcodes::TypeName(global.type));
switch (global.type) {
case kWasmI32:
*GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
@@ -2245,18 +2785,20 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
module_name, import_name);
return -1;
}
-
- Handle<Code> import_code = UnwrapExportOrCompileImportWrapper(
- isolate_, index, module_->functions[import.index].sig,
- Handle<JSReceiver>::cast(value), module_->origin(),
- &imported_wasm_instances, js_imports_table, instance);
+ WasmCodeWrapper import_code = UnwrapExportOrCompileImportWrapper(
+ isolate_, module_->functions[import.index].sig,
+ Handle<JSReceiver>::cast(value), num_imported_functions,
+ module_->origin(), &imported_wasm_instances, js_imports_table,
+ instance);
if (import_code.is_null()) {
ReportLinkError("imported function does not match the expected type",
index, module_name, import_name);
return -1;
}
- code_table->set(num_imported_functions, *import_code);
- RecordStats(*import_code, counters());
+ if (!FLAG_wasm_jit_to_native) {
+ code_table->set(num_imported_functions, *import_code.GetCode());
+ }
+ RecordStats(import_code, counters());
num_imported_functions++;
break;
}
@@ -2270,6 +2812,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
module_->function_tables[num_imported_tables];
TableInstance& table_instance = table_instances_[num_imported_tables];
table_instance.table_object = Handle<WasmTableObject>::cast(value);
+ instance->set_table_object(*table_instance.table_object);
table_instance.js_wrappers = Handle<FixedArray>(
table_instance.table_object->functions(), isolate_);
@@ -2313,17 +2856,55 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
// that are already in the table.
for (int i = 0; i < table_size; ++i) {
Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
+ // TODO(mtrofin): this is the same logic as WasmTableObject::Set:
+ // insert in the local table a wrapper from the other module, and add
+ // a reference to the owning instance of the other module.
if (!val->IsJSFunction()) continue;
- WasmFunction* function = GetWasmFunctionForExport(isolate_, val);
- if (function == nullptr) {
+ if (!WasmExportedFunction::IsWasmExportedFunction(*val)) {
thrower_->LinkError("table import %d[%d] is not a wasm function",
index, i);
return -1;
}
- int sig_index = table.map.FindOrInsert(function->sig);
- table_instance.signature_table->set(i, Smi::FromInt(sig_index));
- table_instance.function_table->set(
- i, *UnwrapExportWrapper(Handle<JSFunction>::cast(val)));
+ // Look up the signature's canonical id. If there is no canonical
+ // id, then the signature does not appear at all in this module,
+ // so putting {-1} in the table will cause checks to always fail.
+ auto target = Handle<WasmExportedFunction>::cast(val);
+ if (!FLAG_wasm_jit_to_native) {
+ FunctionSig* sig = nullptr;
+ Handle<Code> code =
+ MakeWasmToWasmWrapper(isolate_, target, nullptr, &sig,
+ &imported_wasm_instances, instance, 0)
+ .GetCode();
+ int sig_index = module_->signature_map.Find(sig);
+ table_instance.signature_table->set(i, Smi::FromInt(sig_index));
+ table_instance.function_table->set(i, *code);
+ } else {
+ const wasm::WasmCode* exported_code =
+ target->GetWasmCode().GetWasmCode();
+ wasm::NativeModule* exporting_module = exported_code->owner();
+ Handle<WasmInstanceObject> imported_instance =
+ handle(target->instance());
+ imported_wasm_instances.Set(imported_instance, imported_instance);
+ FunctionSig* sig = imported_instance->module()
+ ->functions[exported_code->index()]
+ .sig;
+ wasm::WasmCode* wrapper_code =
+ exporting_module->GetExportedWrapper(exported_code->index());
+ if (wrapper_code == nullptr) {
+ WasmContext* other_context =
+ imported_instance->wasm_context()->get();
+ Handle<Code> wrapper = compiler::CompileWasmToWasmWrapper(
+ isolate_, target->GetWasmCode(), sig,
+ reinterpret_cast<Address>(other_context));
+ wrapper_code = exporting_module->AddExportedWrapper(
+ wrapper, exported_code->index());
+ }
+ int sig_index = module_->signature_map.Find(sig);
+ table_instance.signature_table->set(i, Smi::FromInt(sig_index));
+ Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
+ wrapper_code->instructions().start(), TENURED);
+ table_instance.function_table->set(i, *foreign_holder);
+ }
}
num_imported_tables++;
@@ -2642,7 +3223,7 @@ void InstanceBuilder::ProcessExports(
}
v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate_, export_to, name, &desc, Object::THROW_ON_ERROR);
+ isolate_, export_to, name, &desc, kThrowOnError);
if (!status.IsJust()) {
TruncatedUserString<> trunc_name(name->GetCharVector<uint8_t>());
thrower_->LinkError("export of %.*s failed.", trunc_name.length(),
@@ -2653,8 +3234,8 @@ void InstanceBuilder::ProcessExports(
DCHECK_EQ(export_index, weak_exported_functions->length());
if (module_->is_wasm()) {
- v8::Maybe<bool> success = JSReceiver::SetIntegrityLevel(
- exports_object, FROZEN, Object::DONT_THROW);
+ v8::Maybe<bool> success =
+ JSReceiver::SetIntegrityLevel(exports_object, FROZEN, kDontThrow);
DCHECK(success.FromMaybe(false));
USE(success);
}
@@ -2663,28 +3244,56 @@ void InstanceBuilder::ProcessExports(
void InstanceBuilder::InitializeTables(
Handle<WasmInstanceObject> instance,
CodeSpecialization* code_specialization) {
- int function_table_count = static_cast<int>(module_->function_tables.size());
- Handle<FixedArray> new_function_tables =
- isolate_->factory()->NewFixedArray(function_table_count, TENURED);
- Handle<FixedArray> new_signature_tables =
- isolate_->factory()->NewFixedArray(function_table_count, TENURED);
- Handle<FixedArray> old_function_tables = compiled_module_->function_tables();
- Handle<FixedArray> old_signature_tables =
- compiled_module_->signature_tables();
+ size_t function_table_count = module_->function_tables.size();
+ std::vector<GlobalHandleAddress> new_function_tables(function_table_count);
+ std::vector<GlobalHandleAddress> new_signature_tables(function_table_count);
+
+ wasm::NativeModule* native_module = compiled_module_->GetNativeModule();
+ std::vector<GlobalHandleAddress> empty;
+ std::vector<GlobalHandleAddress>& old_function_tables =
+ FLAG_wasm_jit_to_native ? native_module->function_tables() : empty;
+ std::vector<GlobalHandleAddress>& old_signature_tables =
+ FLAG_wasm_jit_to_native ? native_module->signature_tables() : empty;
+
+ Handle<FixedArray> old_function_tables_gc =
+ FLAG_wasm_jit_to_native ? Handle<FixedArray>::null()
+ : compiled_module_->function_tables();
+ Handle<FixedArray> old_signature_tables_gc =
+ FLAG_wasm_jit_to_native ? Handle<FixedArray>::null()
+ : compiled_module_->signature_tables();
+
+ // function_table_count is 0 or 1, so we just create these objects even if not
+ // needed for native wasm.
+ // TODO(mtrofin): remove the {..}_gc variables when we don't need
+ // FLAG_wasm_jit_to_native
+ Handle<FixedArray> new_function_tables_gc =
+ isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
+ TENURED);
+ Handle<FixedArray> new_signature_tables_gc =
+ isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
+ TENURED);
// These go on the instance.
Handle<FixedArray> rooted_function_tables =
- isolate_->factory()->NewFixedArray(function_table_count, TENURED);
+ isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
+ TENURED);
Handle<FixedArray> rooted_signature_tables =
- isolate_->factory()->NewFixedArray(function_table_count, TENURED);
+ isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
+ TENURED);
instance->set_function_tables(*rooted_function_tables);
instance->set_signature_tables(*rooted_signature_tables);
- DCHECK_EQ(old_function_tables->length(), new_function_tables->length());
- DCHECK_EQ(old_signature_tables->length(), new_signature_tables->length());
-
- for (int index = 0; index < function_table_count; ++index) {
+ if (FLAG_wasm_jit_to_native) {
+ DCHECK_EQ(old_function_tables.size(), new_function_tables.size());
+ DCHECK_EQ(old_signature_tables.size(), new_signature_tables.size());
+ } else {
+ DCHECK_EQ(old_function_tables_gc->length(),
+ new_function_tables_gc->length());
+ DCHECK_EQ(old_signature_tables_gc->length(),
+ new_signature_tables_gc->length());
+ }
+ for (size_t index = 0; index < function_table_count; ++index) {
WasmIndirectFunctionTable& table = module_->function_tables[index];
TableInstance& table_instance = table_instances_[index];
int table_size = static_cast<int>(table.initial_size);
@@ -2734,31 +3343,45 @@ void InstanceBuilder::InitializeTables(
GlobalHandleAddress new_func_table_addr = global_func_table.address();
GlobalHandleAddress new_sig_table_addr = global_sig_table.address();
- WasmCompiledModule::SetTableValue(isolate_, new_function_tables, int_index,
- new_func_table_addr);
- WasmCompiledModule::SetTableValue(isolate_, new_signature_tables, int_index,
- new_sig_table_addr);
-
- GlobalHandleAddress old_func_table_addr =
- WasmCompiledModule::GetTableValue(*old_function_tables, int_index);
- GlobalHandleAddress old_sig_table_addr =
- WasmCompiledModule::GetTableValue(*old_signature_tables, int_index);
+ GlobalHandleAddress old_func_table_addr;
+ GlobalHandleAddress old_sig_table_addr;
+ if (!FLAG_wasm_jit_to_native) {
+ WasmCompiledModule::SetTableValue(isolate_, new_function_tables_gc,
+ int_index, new_func_table_addr);
+ WasmCompiledModule::SetTableValue(isolate_, new_signature_tables_gc,
+ int_index, new_sig_table_addr);
+
+ old_func_table_addr =
+ WasmCompiledModule::GetTableValue(*old_function_tables_gc, int_index);
+ old_sig_table_addr = WasmCompiledModule::GetTableValue(
+ *old_signature_tables_gc, int_index);
+ } else {
+ new_function_tables[int_index] = new_func_table_addr;
+ new_signature_tables[int_index] = new_sig_table_addr;
+ old_func_table_addr = old_function_tables[int_index];
+ old_sig_table_addr = old_signature_tables[int_index];
+ }
code_specialization->RelocatePointer(old_func_table_addr,
new_func_table_addr);
code_specialization->RelocatePointer(old_sig_table_addr,
new_sig_table_addr);
}
- compiled_module_->set_function_tables(new_function_tables);
- compiled_module_->set_signature_tables(new_signature_tables);
+ if (FLAG_wasm_jit_to_native) {
+ native_module->function_tables() = new_function_tables;
+ native_module->signature_tables() = new_signature_tables;
+ } else {
+ compiled_module_->set_function_tables(new_function_tables_gc);
+ compiled_module_->set_signature_tables(new_signature_tables_gc);
+ }
}
void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
Handle<WasmInstanceObject> instance) {
+ wasm::NativeModule* native_module = compiled_module_->GetNativeModule();
int function_table_count = static_cast<int>(module_->function_tables.size());
for (int index = 0; index < function_table_count; ++index) {
- WasmIndirectFunctionTable& table = module_->function_tables[index];
TableInstance& table_instance = table_instances_[index];
Handle<FixedArray> all_dispatch_tables;
@@ -2774,12 +3397,20 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
if (compile_lazy(module_)) {
for (auto& table_init : module_->table_inits) {
for (uint32_t func_index : table_init.entries) {
- Code* code =
- Code::cast(code_table->get(static_cast<int>(func_index)));
- // Only increase the counter for lazy compile builtins (it's not
- // needed otherwise).
- if (code->is_wasm_code()) continue;
- DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ if (!FLAG_wasm_jit_to_native) {
+ Code* code =
+ Code::cast(code_table->get(static_cast<int>(func_index)));
+ // Only increase the counter for lazy compile builtins (it's not
+ // needed otherwise).
+ if (code->is_wasm_code()) continue;
+ DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ } else {
+ const wasm::WasmCode* code = native_module->GetCode(func_index);
+ // Only increase the counter for lazy compile builtins (it's not
+ // needed otherwise).
+ if (code->kind() == wasm::WasmCode::Function) continue;
+ DCHECK_EQ(wasm::WasmCode::LazyStub, code->kind());
+ }
++num_table_exports[func_index];
}
}
@@ -2796,15 +3427,22 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
int table_index = static_cast<int>(i + base);
- int32_t sig_index = table.map.Find(function->sig);
- DCHECK_GE(sig_index, 0);
+ uint32_t sig_index = module_->signature_ids[function->sig_index];
table_instance.signature_table->set(table_index,
Smi::FromInt(sig_index));
- Handle<Code> wasm_code = EnsureTableExportLazyDeoptData(
- isolate_, instance, code_table, func_index,
- table_instance.function_table, table_index, num_table_exports);
- table_instance.function_table->set(table_index, *wasm_code);
-
+ WasmCodeWrapper wasm_code = EnsureTableExportLazyDeoptData(
+ isolate_, instance, code_table, native_module, func_index,
+ table_instance.function_table, table_index, &num_table_exports);
+ Handle<Object> value_to_update_with;
+ if (!wasm_code.IsCodeObject()) {
+ Handle<Foreign> as_foreign = isolate_->factory()->NewForeign(
+ wasm_code.GetWasmCode()->instructions().start(), TENURED);
+ table_instance.function_table->set(table_index, *as_foreign);
+ value_to_update_with = as_foreign;
+ } else {
+ table_instance.function_table->set(table_index, *wasm_code.GetCode());
+ value_to_update_with = wasm_code.GetCode();
+ }
if (!all_dispatch_tables.is_null()) {
if (js_wrappers_[func_index].is_null()) {
// No JSFunction entry yet exists for this function. Create one.
@@ -2831,9 +3469,30 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
}
table_instance.js_wrappers->set(table_index,
*js_wrappers_[func_index]);
-
+ // When updating dispatch tables, we need to provide a wasm-to-wasm
+ // wrapper for wasm_code - unless wasm_code is already a wrapper. If
+ // it's a wasm-to-js wrapper, we don't need to construct a
+ // wasm-to-wasm wrapper because there's no context switching required.
+ // The remaining case is that it's a wasm-to-wasm wrapper, in which
+ // case it's already doing "the right thing", and wrapping it again
+ // would be redundant.
+ if (func_index >= module_->num_imported_functions) {
+ value_to_update_with = GetOrCreateIndirectCallWrapper(
+ isolate_, instance, wasm_code, func_index, function->sig);
+ } else {
+ if (wasm_code.IsCodeObject()) {
+ DCHECK(wasm_code.GetCode()->kind() == Code::WASM_TO_JS_FUNCTION ||
+ wasm_code.GetCode()->kind() ==
+ Code::WASM_TO_WASM_FUNCTION);
+ } else {
+ DCHECK(wasm_code.GetWasmCode()->kind() ==
+ WasmCode::WasmToJsWrapper ||
+ wasm_code.GetWasmCode()->kind() ==
+ WasmCode::WasmToWasmWrapper);
+ }
+ }
UpdateDispatchTables(isolate_, all_dispatch_tables, table_index,
- function, wasm_code);
+ function, value_to_update_with);
}
}
}
@@ -2866,6 +3525,10 @@ AsyncCompileJob::AsyncCompileJob(Isolate* isolate,
async_counters_(isolate->async_counters()),
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Platform* platform = V8::GetCurrentPlatform();
+ foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
+ background_task_runner_ = platform->GetBackgroundTaskRunner(v8_isolate);
// The handles for the context and promise must be deferred.
DeferredHandleScope deferred(isolate);
context_ = Handle<Context>(*context);
@@ -3007,11 +3670,7 @@ void AsyncCompileJob::StartForegroundTask() {
++num_pending_foreground_tasks_;
DCHECK_EQ(1, num_pending_foreground_tasks_);
- v8::Platform* platform = V8::GetCurrentPlatform();
- // TODO(ahaas): This is a CHECK to debug issue 764313.
- CHECK(platform);
- platform->CallOnForegroundThread(reinterpret_cast<v8::Isolate*>(isolate_),
- new CompileTask(this, true));
+ foreground_task_runner_->PostTask(base::make_unique<CompileTask>(this, true));
}
template <typename Step, typename... Args>
@@ -3021,8 +3680,8 @@ void AsyncCompileJob::DoSync(Args&&... args) {
}
void AsyncCompileJob::StartBackgroundTask() {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompileTask(this, false), v8::Platform::kShortRunningTask);
+ background_task_runner_->PostTask(
+ base::make_unique<CompileTask>(this, false));
}
void AsyncCompileJob::RestartBackgroundTasks() {
@@ -3111,26 +3770,30 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(2) Prepare and start compile...\n");
Isolate* isolate = job_->isolate_;
-
Factory* factory = isolate->factory();
+
Handle<Code> illegal_builtin = BUILTIN_CODE(isolate, Illegal);
+ if (!FLAG_wasm_jit_to_native) {
+ // The {code_table} array contains import wrappers and functions (which
+ // are both included in {functions.size()}.
+ // The results of compilation will be written into it.
+ // Initialize {code_table_} with the illegal builtin. All call sites
+ // will be patched at instantiation.
+ int code_table_size = static_cast<int>(module_->functions.size());
+ job_->code_table_ = factory->NewFixedArray(code_table_size, TENURED);
+
+ for (int i = 0, e = module_->num_imported_functions; i < e; ++i) {
+ job_->code_table_->set(i, *illegal_builtin);
+ }
+ } else {
+ // Just makes it easier to deal with code that wants code_table, while
+ // we have FLAG_wasm_jit_to_native around.
+ job_->code_table_ = factory->NewFixedArray(0, TENURED);
+ }
+
job_->module_env_ =
CreateDefaultModuleEnv(isolate, module_, illegal_builtin);
- // The {code_table} array contains import wrappers and functions (which
- // are both included in {functions.size()}.
- // The results of compilation will be written into it.
- // Initialize {code_table_} with the illegal builtin. All call sites
- // will be patched at instantiation.
- int code_table_size = static_cast<int>(module_->functions.size());
- int export_wrapper_size = static_cast<int>(module_->num_exported_functions);
- job_->code_table_ = factory->NewFixedArray(code_table_size, TENURED);
- job_->export_wrappers_ =
- factory->NewFixedArray(export_wrapper_size, TENURED);
-
- for (int i = 0, e = module_->num_imported_functions; i < e; ++i) {
- job_->code_table_->set(i, *illegal_builtin);
- }
// Transfer ownership of the {WasmModule} to the {ModuleCompiler}, but
// keep a pointer.
Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
@@ -3141,8 +3804,6 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
centry_stub = Handle<Code>(*centry_stub, isolate);
job_->code_table_ = Handle<FixedArray>(*job_->code_table_, isolate);
- job_->export_wrappers_ =
- Handle<FixedArray>(*job_->export_wrappers_, isolate);
compiler::ModuleEnv* env = job_->module_env_.get();
ReopenHandles(isolate, env->function_code);
Handle<Code>* mut =
@@ -3152,12 +3813,32 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
job_->deferred_handles_.push_back(deferred.Detach());
}
- job_->compiler_.reset(new ModuleCompiler(isolate, module_, centry_stub));
+ DCHECK_LE(module_->num_imported_functions, module_->functions.size());
+ // Create the compiled module object and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of
+ // this object.
+ int export_wrapper_size = static_cast<int>(module_->num_exported_functions);
+ Handle<FixedArray> export_wrappers =
+ job_->isolate_->factory()->NewFixedArray(export_wrapper_size, TENURED);
+
+ job_->compiled_module_ =
+ NewCompiledModule(job_->isolate_, module_, job_->code_table_,
+ export_wrappers, job_->module_env_.get());
+
+ job_->compiler_.reset(
+ new ModuleCompiler(isolate, module_, centry_stub,
+ job_->compiled_module_->GetNativeModule()));
job_->compiler_->EnableThrottling();
- DCHECK_LE(module_->num_imported_functions, module_->functions.size());
+ {
+ DeferredHandleScope deferred(job_->isolate_);
+ job_->compiled_module_ = handle(*job_->compiled_module_, job_->isolate_);
+ job_->deferred_handles_.push_back(deferred.Detach());
+ }
size_t num_functions =
module_->functions.size() - module_->num_imported_functions;
+
if (num_functions == 0) {
// Degenerate case of an empty module.
job_->DoSync<FinishCompile>();
@@ -3171,7 +3852,6 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
V8::GetCurrentPlatform()
->NumberOfAvailableBackgroundThreads())));
-
if (start_compilation_) {
// TODO(ahaas): Try to remove the {start_compilation_} check when
// streaming decoding is done in the background. If
@@ -3216,6 +3896,11 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
}
void RunInForeground() override {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ // Use base::Optional to be able to close the scope before we resolve or
+ // reject the promise.
+ base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
+ base::in_place_t(), job_->isolate_->heap());
TRACE_COMPILE("(4a) Finishing compilation units...\n");
if (failed_) {
// The job failed already, no need to do more work.
@@ -3234,7 +3919,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
int func_index = -1;
- MaybeHandle<Code> result =
+ WasmCodeWrapper result =
job_->compiler_->FinishCompilationUnit(&thrower, &func_index);
if (thrower.error()) {
@@ -3249,7 +3934,9 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
break;
} else {
DCHECK_LE(0, func_index);
- job_->code_table_->set(func_index, *result.ToHandleChecked());
+ if (result.IsCodeObject()) {
+ job_->code_table_->set(func_index, *result.GetCode());
+ }
--job_->outstanding_units_;
}
@@ -3267,6 +3954,10 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
if (thrower.error()) {
// Make sure all compilation tasks stopped running.
job_->background_task_manager_.CancelAndWait();
+
+ // Close the CodeSpaceMemoryModificationScope before we reject the promise
+ // in AsyncCompileFailed. Promise::Reject calls directly into JavaScript.
+ modification_scope.reset();
return job_->AsyncCompileFailed(thrower);
}
if (job_->outstanding_units_ == 0) {
@@ -3287,12 +3978,16 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
class AsyncCompileJob::FinishCompile : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(5b) Finish compile...\n");
- // At this point, compilation has completed. Update the code table.
- for (int i = FLAG_skip_compiling_wasm_funcs,
- e = job_->code_table_->length();
- i < e; ++i) {
- Object* val = job_->code_table_->get(i);
- if (val->IsCode()) RecordStats(Code::cast(val), job_->counters());
+ if (FLAG_wasm_jit_to_native) {
+ RecordStats(job_->compiled_module_->GetNativeModule(), job_->counters());
+ } else {
+ // At this point, compilation has completed. Update the code table.
+ for (int i = FLAG_skip_compiling_wasm_funcs,
+ e = job_->code_table_->length();
+ i < e; ++i) {
+ Object* val = job_->code_table_->get(i);
+ if (val->IsCode()) RecordStats(Code::cast(val), job_->counters());
+ }
}
// Create heap objects for script and module bytes to be stored in the
@@ -3326,21 +4021,13 @@ class AsyncCompileJob::FinishCompile : public CompileStep {
WasmSharedModuleData::New(job_->isolate_, module_wrapper,
Handle<SeqOneByteString>::cast(module_bytes),
script, asm_js_offset_table);
+ job_->compiled_module_->OnWasmModuleDecodingComplete(shared);
+ script->set_wasm_compiled_module(*job_->compiled_module_);
- // Create the compiled module object and populate with compiled functions
- // and information needed at instantiation time. This object needs to be
- // serializable. Instantiation may occur off a deserialized version of
- // this object.
- job_->compiled_module_ =
- NewCompiledModule(job_->isolate_, shared, job_->code_table_,
- job_->export_wrappers_, job_->module_env_.get());
// Finish the wasm script now and make it public to the debugger.
- script->set_wasm_compiled_module(*job_->compiled_module_);
- job_->isolate_->debug()->OnAfterCompile(script);
+ job_->isolate_->debug()->OnAfterCompile(
+ handle(job_->compiled_module_->script()));
- DeferredHandleScope deferred(job_->isolate_);
- job_->compiled_module_ = handle(*job_->compiled_module_, job_->isolate_);
- job_->deferred_handles_.push_back(deferred.Detach());
// TODO(wasm): compiling wrappers should be made async as well.
job_->DoSync<CompileWrappers>();
}
@@ -3354,22 +4041,11 @@ class AsyncCompileJob::CompileWrappers : public CompileStep {
// and the wrappers for the function table elements.
void RunInForeground() override {
TRACE_COMPILE("(6) Compile wrappers...\n");
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(job_->isolate_->heap());
// Compile JS->wasm wrappers for exported functions.
- JSToWasmWrapperCache js_to_wasm_cache;
- int wrapper_index = 0;
- WasmModule* module = job_->compiled_module_->module();
- for (auto exp : module->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Handle<Code> wasm_code(Code::cast(job_->code_table_->get(exp.index)),
- job_->isolate_);
- Handle<Code> wrapper_code =
- js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(job_->isolate_, module,
- wasm_code, exp.index);
- job_->export_wrappers_->set(wrapper_index, *wrapper_code);
- RecordStats(*wrapper_code, job_->counters());
- ++wrapper_index;
- }
-
+ CompileJsToWasmWrappers(job_->isolate_, job_->compiled_module_,
+ job_->counters());
job_->DoSync<FinishModule>();
}
};
@@ -3483,8 +4159,6 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count,
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.SetValue(2);
- next_function_ = decoder_.module()->num_imported_functions +
- FLAG_skip_compiling_wasm_funcs;
compilation_unit_builder_.reset(
new ModuleCompiler::CompilationUnitBuilder(job_->compiler_.get()));
return true;
@@ -3495,16 +4169,20 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process function body %d ...\n", next_function_);
- decoder_.DecodeFunctionBody(
- next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
- if (next_function_ >= decoder_.module()->num_imported_functions +
- FLAG_skip_compiling_wasm_funcs) {
- const WasmFunction* func = &decoder_.module()->functions[next_function_];
+ if (next_function_ >= FLAG_skip_compiling_wasm_funcs) {
+ decoder_.DecodeFunctionBody(
+ next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
+
+ uint32_t index = next_function_ + decoder_.module()->num_imported_functions;
+ const WasmFunction* func = &decoder_.module()->functions[index];
WasmName name = {nullptr, 0};
- compilation_unit_builder_->AddUnit(job_->module_env_.get(), func, offset,
- bytes, name);
+ compilation_unit_builder_->AddUnit(
+ job_->module_env_.get(), job_->compiled_module_->GetNativeModule(),
+ func, offset, bytes, name);
}
++next_function_;
+ // This method always succeeds. The return value is necessary to comply with
+ // the StreamingProcessor interface.
return true;
}
@@ -3526,8 +4204,18 @@ void AsyncStreamingProcessor::OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
ModuleResult result = decoder_.FinishDecoding(false);
DCHECK(result.ok());
job_->module_ = std::move(result.val);
- if (job_->DecrementAndCheckFinisherCount())
- job_->DoSync<AsyncCompileJob::FinishCompile>();
+ if (job_->DecrementAndCheckFinisherCount()) {
+ if (!job_->compiler_) {
+ // We are processing a WebAssembly module without code section. We need to
+ // prepare compilation first before we can finish it.
+ // {PrepareAndStartCompile} will call {FinishCompile} by itself if there
+ // is no code section.
+ job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(job_->module_.get(),
+ true);
+ } else {
+ job_->DoSync<AsyncCompileJob::FinishCompile>();
+ }
+ }
}
// Report an error detected in the StreamingDecoder.
@@ -3541,12 +4229,62 @@ void AsyncStreamingProcessor::OnAbort() {
job_->Abort();
}
+void CompileJsToWasmWrappers(Isolate* isolate,
+ Handle<WasmCompiledModule> compiled_module,
+ Counters* counters) {
+ JSToWasmWrapperCache js_to_wasm_cache;
+ int wrapper_index = 0;
+ Handle<FixedArray> export_wrappers = compiled_module->export_wrappers();
+ NativeModule* native_module = compiled_module->GetNativeModule();
+ for (auto exp : compiled_module->module()->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ WasmCodeWrapper wasm_code = EnsureExportedLazyDeoptData(
+ isolate, Handle<WasmInstanceObject>::null(),
+ compiled_module->code_table(), native_module, exp.index);
+ Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
+ isolate, compiled_module->module(), wasm_code, exp.index);
+ export_wrappers->set(wrapper_index, *wrapper_code);
+ RecordStats(*wrapper_code, counters);
+ ++wrapper_index;
+ }
+}
+
+Handle<Script> CreateWasmScript(Isolate* isolate,
+ const ModuleWireBytes& wire_bytes) {
+ Handle<Script> script =
+ isolate->factory()->NewScript(isolate->factory()->empty_string());
+ script->set_context_data(isolate->native_context()->debug_context_id());
+ script->set_type(Script::TYPE_WASM);
+
+ int hash = StringHasher::HashSequentialString(
+ reinterpret_cast<const char*>(wire_bytes.start()),
+ static_cast<int>(wire_bytes.length()), kZeroHashSeed);
+
+ const int kBufferSize = 32;
+ char buffer[kBufferSize];
+ int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
+ DCHECK(url_chars >= 0 && url_chars < kBufferSize);
+ MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
+ TENURED);
+ script->set_source_url(*url_str.ToHandleChecked());
+
+ int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
+ DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+ MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ TENURED);
+ script->set_name(*name_str.ToHandleChecked());
+
+ return script;
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
#undef TRACE
+#undef TRACE_CHAIN
#undef TRACE_COMPILE
#undef TRACE_STREAMING
-#undef TRACE_CHAIN
-#undef ERROR_THROWER_WITH_MESSAGE
+#undef TRACE_LAZY
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 42ea037d03..864af287cf 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -21,6 +21,7 @@ namespace internal {
namespace wasm {
class ModuleCompiler;
+class WasmCode;
V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate,
const ModuleWireBytes& bytes);
@@ -49,6 +50,13 @@ V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> imports);
+V8_EXPORT_PRIVATE void CompileJsToWasmWrappers(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ Counters* counters);
+
+V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
+ Isolate* isolate, const ModuleWireBytes& wire_bytes);
+
// Triggered by the WasmCompileLazy builtin.
// Walks the stack (top three frames) to determine the wasm instance involved
// and which function to compile.
@@ -58,7 +66,8 @@ V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
// an error occurred. In the latter case, a pending exception has been set,
// which will be triggered when returning from the runtime function, i.e. the
// Illegal builtin will never be called.
-Handle<Code> CompileLazy(Isolate* isolate);
+Address CompileLazy(Isolate* isolate);
+Handle<Code> CompileLazyOnGCHeap(Isolate* isolate);
// This class orchestrates the lazy compilation of wasm functions. It is
// triggered by the WasmCompileLazy builtin.
@@ -68,12 +77,24 @@ Handle<Code> CompileLazy(Isolate* isolate);
// logic to actually orchestrate parallel execution of wasm compilation jobs.
// TODO(clemensh): Implement concurrent lazy compilation.
class LazyCompilationOrchestrator {
- void CompileFunction(Isolate*, Handle<WasmInstanceObject>, int func_index);
+ const WasmCode* CompileFunction(Isolate*, Handle<WasmInstanceObject>,
+ int func_index);
public:
- Handle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
- Handle<Code> caller, int call_offset,
- int exported_func_index, bool patch_caller);
+ Handle<Code> CompileLazyOnGCHeap(Isolate*, Handle<WasmInstanceObject>,
+ Handle<Code> caller, int call_offset,
+ int exported_func_index, bool patch_caller);
+ const wasm::WasmCode* CompileFromJsToWasm(Isolate*,
+ Handle<WasmInstanceObject>,
+ Handle<Code> caller,
+ uint32_t exported_func_index);
+ const wasm::WasmCode* CompileDirectCall(Isolate*, Handle<WasmInstanceObject>,
+ Maybe<uint32_t>,
+ const WasmCode* caller,
+ int call_offset);
+ const wasm::WasmCode* CompileIndirectCall(Isolate*,
+ Handle<WasmInstanceObject>,
+ uint32_t func_index);
};
// Encapsulates all the state and steps of an asynchronous compilation.
@@ -161,10 +182,12 @@ class AsyncCompileJob {
Handle<WasmModuleObject> module_object_;
Handle<WasmCompiledModule> compiled_module_;
Handle<FixedArray> code_table_;
- Handle<FixedArray> export_wrappers_;
size_t outstanding_units_ = 0;
std::unique_ptr<CompileStep> step_;
CancelableTaskManager background_task_manager_;
+
+ std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
+ std::shared_ptr<v8::TaskRunner> background_task_runner_;
// The number of background tasks which stopped executing within a step.
base::AtomicNumber<size_t> stopped_tasks_{0};
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index d7a0156a7b..1176c56935 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -228,7 +228,6 @@ class WasmSectionIterator {
section_code_ = decoder_.failed() ? kUnknownSectionCode
: static_cast<SectionCode>(section_code);
- TRACE("Section: %s\n", SectionName(section_code_));
if (section_code_ == kUnknownSectionCode && section_end_ > decoder_.pc()) {
// skip to the end of the unknown section.
uint32_t remaining = static_cast<uint32_t>(section_end_ - decoder_.pc());
@@ -423,7 +422,10 @@ class ModuleDecoderImpl : public Decoder {
static_cast<int>(pc_ - start_));
FunctionSig* s = consume_sig(module_->signature_zone.get());
module_->signatures.push_back(s);
+ uint32_t id = s ? module_->signature_map.FindOrInsert(s) : 0;
+ module_->signature_ids.push_back(id);
}
+ module_->signature_map.Freeze();
}
void DecodeImportSection() {
@@ -684,12 +686,10 @@ class ModuleDecoderImpl : public Decoder {
if (table_index != 0) {
errorf(pos, "illegal table index %u != 0", table_index);
}
- WasmIndirectFunctionTable* table = nullptr;
if (table_index >= module_->function_tables.size()) {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
- table = &module_->function_tables[table_index];
WasmInitExpr offset = consume_init_expr(module_.get(), kWasmI32);
uint32_t num_elem =
consume_count("number of elements", kV8MaxWasmTableEntries);
@@ -702,8 +702,6 @@ class ModuleDecoderImpl : public Decoder {
if (!ok()) break;
DCHECK_EQ(index, func->func_index);
init->entries.push_back(index);
- // Canonicalize signature indices during decoding.
- table->map.FindOrInsert(func->sig);
}
}
}
@@ -713,7 +711,13 @@ class ModuleDecoderImpl : public Decoder {
uint32_t functions_count = consume_u32v("functions count");
CheckFunctionsCount(functions_count, pos);
for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+ const byte* pos = pc();
uint32_t size = consume_u32v("body size");
+ if (size > kV8MaxWasmFunctionSize) {
+ errorf(pos, "size %u > maximum function size %zu", size,
+ kV8MaxWasmFunctionSize);
+ return;
+ }
uint32_t offset = pc_offset();
consume_bytes(size, "function body");
if (failed()) break;
@@ -733,10 +737,6 @@ class ModuleDecoderImpl : public Decoder {
void DecodeFunctionBody(uint32_t index, uint32_t length, uint32_t offset,
bool verify_functions) {
- auto size_histogram = module_->is_wasm()
- ? GetCounters()->wasm_wasm_function_size_bytes()
- : GetCounters()->wasm_asm_function_size_bytes();
- size_histogram->AddSample(length);
WasmFunction* function =
&module_->functions[index + module_->num_imported_functions];
function->code = {offset, length};
@@ -1179,7 +1179,7 @@ class ModuleDecoderImpl : public Decoder {
unsigned len = 0;
switch (opcode) {
case kExprGetGlobal: {
- GlobalIndexOperand<true> operand(this, pc() - 1);
+ GlobalIndexOperand<Decoder::kValidate> operand(this, pc() - 1);
if (module->globals.size() <= operand.index) {
error("global index is out of bounds");
expr.kind = WasmInitExpr::kNone;
@@ -1201,28 +1201,28 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExprI32Const: {
- ImmI32Operand<true> operand(this, pc() - 1);
+ ImmI32Operand<Decoder::kValidate> operand(this, pc() - 1);
expr.kind = WasmInitExpr::kI32Const;
expr.val.i32_const = operand.value;
len = operand.length;
break;
}
case kExprF32Const: {
- ImmF32Operand<true> operand(this, pc() - 1);
+ ImmF32Operand<Decoder::kValidate> operand(this, pc() - 1);
expr.kind = WasmInitExpr::kF32Const;
expr.val.f32_const = operand.value;
len = operand.length;
break;
}
case kExprI64Const: {
- ImmI64Operand<true> operand(this, pc() - 1);
+ ImmI64Operand<Decoder::kValidate> operand(this, pc() - 1);
expr.kind = WasmInitExpr::kI64Const;
expr.val.i64_const = operand.value;
len = operand.length;
break;
}
case kExprF64Const: {
- ImmF64Operand<true> operand(this, pc() - 1);
+ ImmF64Operand<Decoder::kValidate> operand(this, pc() - 1);
expr.kind = WasmInitExpr::kF64Const;
expr.val.f64_const = operand.value;
len = operand.length;
@@ -1544,8 +1544,13 @@ std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
uint32_t name_offset = decoder.pc_offset();
decoder.consume_bytes(name_length, "section name");
uint32_t payload_offset = decoder.pc_offset();
+ if (section_length < (payload_offset - section_start)) {
+ decoder.error("invalid section length");
+ break;
+ }
uint32_t payload_length = section_length - (payload_offset - section_start);
decoder.consume_bytes(payload_length);
+ if (decoder.failed()) break;
result.push_back({{section_start, section_length},
{name_offset, name_length},
{payload_offset, payload_length}});
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index b6cd869ae7..8b36205ed3 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -21,7 +21,7 @@ namespace wasm {
const uint8_t kWasmFunctionTypeForm = 0x60;
const uint8_t kWasmAnyFunctionTypeForm = 0x70;
-const uint8_t kResizableMaximumFlag = 1;
+const uint8_t kHasMaximumFlag = 1;
const uint8_t kNoMaximumFlag = 0;
enum MemoryFlags : uint8_t {
diff --git a/deps/v8/src/wasm/signature-map.cc b/deps/v8/src/wasm/signature-map.cc
index e7ee4eba4e..15b8ec0aa3 100644
--- a/deps/v8/src/wasm/signature-map.cc
+++ b/deps/v8/src/wasm/signature-map.cc
@@ -8,10 +8,8 @@ namespace v8 {
namespace internal {
namespace wasm {
-SignatureMap::SignatureMap() : mutex_(new base::Mutex()) {}
-
uint32_t SignatureMap::FindOrInsert(FunctionSig* sig) {
- base::LockGuard<base::Mutex> guard(mutex_.get());
+ CHECK(!frozen_);
auto pos = map_.find(sig);
if (pos != map_.end()) {
return pos->second;
@@ -23,7 +21,6 @@ uint32_t SignatureMap::FindOrInsert(FunctionSig* sig) {
}
int32_t SignatureMap::Find(FunctionSig* sig) const {
- base::LockGuard<base::Mutex> guard(mutex_.get());
auto pos = map_.find(sig);
if (pos != map_.end()) {
return static_cast<int32_t>(pos->second);
diff --git a/deps/v8/src/wasm/signature-map.h b/deps/v8/src/wasm/signature-map.h
index 7434ed43b7..b7802bd2b8 100644
--- a/deps/v8/src/wasm/signature-map.h
+++ b/deps/v8/src/wasm/signature-map.h
@@ -22,8 +22,7 @@ class V8_EXPORT_PRIVATE SignatureMap {
// Allow default construction and move construction (because we have vectors
// of objects containing SignatureMaps), but disallow copy or assign. It's
// too easy to get security bugs by accidentally updating a copy of the map.
- SignatureMap();
- SignatureMap(SignatureMap&&) = default;
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(SignatureMap);
// Gets the index for a signature, assigning a new index if necessary.
uint32_t FindOrInsert(FunctionSig* sig);
@@ -31,17 +30,17 @@ class V8_EXPORT_PRIVATE SignatureMap {
// Gets the index for a signature, returning {-1} if not found.
int32_t Find(FunctionSig* sig) const;
+ // Disallows further insertions to this signature map.
+ void Freeze() { frozen_ = true; }
+
private:
// TODO(wasm): use a hashmap instead of an ordered map?
struct CompareFunctionSigs {
bool operator()(FunctionSig* a, FunctionSig* b) const;
};
uint32_t next_ = 0;
- // TODO(wasm): performance-critical, replace with a reader-writer lock
- std::unique_ptr<base::Mutex> mutex_;
+ bool frozen_ = false;
std::map<FunctionSig*, uint32_t, CompareFunctionSigs> map_;
-
- DISALLOW_COPY_AND_ASSIGN(SignatureMap);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index b48d11c902..2387edba34 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -15,11 +15,17 @@
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
+#define TRACE_STREAMING(...) \
+ do { \
+ if (FLAG_trace_wasm_streaming) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace internal {
namespace wasm {
void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
+ TRACE_STREAMING("OnBytesReceived(%zu bytes)\n", bytes.size());
size_t current = 0;
while (ok() && current < bytes.size()) {
size_t num_bytes =
@@ -39,12 +45,14 @@ void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
Vector<const uint8_t> bytes) {
size_t num_bytes = std::min(bytes.size(), remaining());
+ TRACE_STREAMING("ReadBytes(%zu bytes)\n", num_bytes);
memcpy(buffer() + offset(), &bytes.first(), num_bytes);
set_offset(offset() + num_bytes);
return num_bytes;
}
void StreamingDecoder::Finish() {
+ TRACE_STREAMING("Finish\n");
if (!ok()) {
return;
}
@@ -73,6 +81,7 @@ void StreamingDecoder::Finish() {
}
void StreamingDecoder::Abort() {
+ TRACE_STREAMING("Abort\n");
if (ok()) processor_->OnAbort();
}
@@ -95,7 +104,7 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
StreamingDecoder* streaming) = 0;
size_t value() const { return value_; }
- size_t bytes_needed() const { return bytes_needed_; }
+ size_t bytes_consumed() const { return bytes_consumed_; }
private:
uint8_t byte_buffer_[kMaxVarInt32Size];
@@ -104,7 +113,7 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
size_t max_value_;
const char* field_name_;
size_t value_ = 0;
- size_t bytes_needed_ = 0;
+ size_t bytes_consumed_ = 0;
};
class StreamingDecoder::DecodeModuleHeader : public DecodingState {
@@ -257,13 +266,15 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
StreamingDecoder* streaming, Vector<const uint8_t> bytes) {
size_t bytes_read = std::min(bytes.size(), remaining());
+ TRACE_STREAMING("ReadBytes of a VarInt\n");
memcpy(buffer() + offset(), &bytes.first(), bytes_read);
Decoder decoder(buffer(), buffer() + offset() + bytes_read,
streaming->module_offset());
value_ = decoder.consume_u32v(field_name_);
// The number of bytes we actually needed to read.
DCHECK_GT(decoder.pc(), buffer());
- bytes_needed_ = static_cast<size_t>(decoder.pc() - buffer());
+ bytes_consumed_ = static_cast<size_t>(decoder.pc() - buffer());
+ TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
if (decoder.failed()) {
if (offset() + bytes_read == size()) {
@@ -273,8 +284,8 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
set_offset(offset() + bytes_read);
return bytes_read;
} else {
- DCHECK_GT(bytes_needed_, offset());
- size_t result = bytes_needed_ - offset();
+ DCHECK_GT(bytes_consumed_, offset());
+ size_t result = bytes_consumed_ - offset();
// We read all the bytes we needed.
set_offset(size());
return result;
@@ -298,6 +309,7 @@ StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
+ TRACE_STREAMING("DecodeModuleHeader\n");
streaming->ProcessModuleHeader();
if (streaming->ok()) {
return base::make_unique<DecodeSectionID>(streaming->module_offset());
@@ -307,15 +319,18 @@ StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
+ TRACE_STREAMING("DecodeSectionID: %s section\n",
+ SectionName(static_cast<SectionCode>(id())));
return base::make_unique<DecodeSectionLength>(id(), module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionLength::NextWithValue(
StreamingDecoder* streaming) {
+ TRACE_STREAMING("DecodeSectionLength(%zu)\n", value());
SectionBuffer* buf = streaming->CreateNewBuffer(
module_offset(), section_id(), value(),
- Vector<const uint8_t>(buffer(), static_cast<int>(bytes_needed())));
+ Vector<const uint8_t>(buffer(), static_cast<int>(bytes_consumed())));
if (!buf) return nullptr;
if (value() == 0) {
if (section_id() == SectionCode::kCodeSectionCode) {
@@ -342,6 +357,7 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
+ TRACE_STREAMING("DecodeSectionPayload\n");
streaming->ProcessSection(section_buffer());
if (streaming->ok()) {
return base::make_unique<DecodeSectionID>(streaming->module_offset());
@@ -352,10 +368,11 @@ StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
StreamingDecoder* streaming) {
+ TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value());
// Copy the bytes we read into the section buffer.
- if (section_buffer_->payload_length() >= bytes_needed()) {
- memcpy(section_buffer_->bytes() + section_buffer_->payload_offset(),
- buffer(), bytes_needed());
+ if (section_buffer()->payload_length() >= bytes_consumed()) {
+ memcpy(section_buffer()->bytes() + section_buffer()->payload_offset(),
+ buffer(), bytes_consumed());
} else {
return streaming->Error("Invalid code section length");
}
@@ -365,9 +382,12 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
streaming->StartCodeSection(value());
if (!streaming->ok()) return nullptr;
return base::make_unique<DecodeFunctionLength>(
- section_buffer(), section_buffer()->payload_offset() + bytes_needed(),
+ section_buffer(), section_buffer()->payload_offset() + bytes_consumed(),
value());
} else {
+ if (section_buffer()->payload_length() != bytes_consumed()) {
+ return streaming->Error("not all code section bytes were consumed");
+ }
return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
}
@@ -375,9 +395,11 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeFunctionLength::NextWithValue(
StreamingDecoder* streaming) {
- // Copy the bytes we read into the section buffer.
- if (section_buffer_->length() >= buffer_offset_ + bytes_needed()) {
- memcpy(section_buffer_->bytes() + buffer_offset_, buffer(), bytes_needed());
+ TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value());
+ // Copy the bytes we consumed into the section buffer.
+ if (section_buffer_->length() >= buffer_offset_ + bytes_consumed()) {
+ memcpy(section_buffer_->bytes() + buffer_offset_, buffer(),
+ bytes_consumed());
} else {
return streaming->Error("Invalid code section length");
}
@@ -385,19 +407,20 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
// {value} is the length of the function.
if (value() == 0) {
return streaming->Error("Invalid function length (0)");
- } else if (buffer_offset() + bytes_needed() + value() >
+ } else if (buffer_offset() + bytes_consumed() + value() >
section_buffer()->length()) {
streaming->Error("not enough code section bytes");
return nullptr;
}
return base::make_unique<DecodeFunctionBody>(
- section_buffer(), buffer_offset() + bytes_needed(), value(),
+ section_buffer(), buffer_offset() + bytes_consumed(), value(),
num_remaining_functions(), streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
+ TRACE_STREAMING("DecodeFunctionBody\n");
streaming->ProcessFunctionBody(
Vector<const uint8_t>(buffer(), static_cast<int>(size())),
module_offset());
@@ -423,3 +446,5 @@ StreamingDecoder::StreamingDecoder(
} // namespace wasm
} // namespace internal
} // namespace v8
+
+#undef TRACE_STREAMING
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 2bf5f625d5..571179c64d 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -211,32 +211,38 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
void ProcessModuleHeader() {
if (!ok_) return;
- ok_ &= processor_->ProcessModuleHeader(
- Vector<const uint8_t>(state_->buffer(),
- static_cast<int>(state_->size())),
- 0);
+ if (!processor_->ProcessModuleHeader(
+ Vector<const uint8_t>(state_->buffer(),
+ static_cast<int>(state_->size())),
+ 0)) {
+ ok_ = false;
+ }
}
void ProcessSection(SectionBuffer* buffer) {
if (!ok_) return;
- ok_ &= processor_->ProcessSection(
- buffer->section_code(), buffer->payload(),
- buffer->module_offset() +
- static_cast<uint32_t>(buffer->payload_offset()));
+ if (!processor_->ProcessSection(
+ buffer->section_code(), buffer->payload(),
+ buffer->module_offset() +
+ static_cast<uint32_t>(buffer->payload_offset()))) {
+ ok_ = false;
+ }
}
void StartCodeSection(size_t num_functions) {
if (!ok_) return;
// The offset passed to {ProcessCodeSectionHeader} is an error offset and
// not the start offset of a buffer. Therefore we need the -1 here.
- ok_ &= processor_->ProcessCodeSectionHeader(num_functions,
- module_offset() - 1);
+ if (!processor_->ProcessCodeSectionHeader(num_functions,
+ module_offset() - 1)) {
+ ok_ = false;
+ }
}
void ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t module_offset) {
if (!ok_) return;
- ok_ &= processor_->ProcessFunctionBody(bytes, module_offset);
+ if (!processor_->ProcessFunctionBody(bytes, module_offset)) ok_ = false;
}
bool ok() const { return ok_; }
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 33db8bb7d2..40a9dac9a3 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -5,6 +5,7 @@
#include "src/wasm/wasm-code-specialization.h"
#include "src/assembler-inl.h"
+#include "src/base/optional.h"
#include "src/objects-inl.h"
#include "src/source-position-table.h"
#include "src/wasm/decoder.h"
@@ -16,13 +17,13 @@ namespace v8 {
namespace internal {
namespace wasm {
-int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
+uint32_t ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
DCHECK_EQ(static_cast<int>(kExprCallFunction), static_cast<int>(*pc));
decoder.Reset(pc + 1, pc + 6);
uint32_t call_idx = decoder.consume_u32v("call index");
DCHECK(decoder.ok());
DCHECK_GE(kMaxInt, call_idx);
- return static_cast<int>(call_idx);
+ return call_idx;
}
namespace {
@@ -42,6 +43,17 @@ int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
class PatchDirectCallsHelper {
public:
+ PatchDirectCallsHelper(WasmInstanceObject* instance, const WasmCode* code)
+ : source_pos_it(ByteArray::cast(
+ instance->compiled_module()->source_positions()->get(
+ static_cast<int>(code->index())))),
+ decoder(nullptr, nullptr) {
+ uint32_t func_index = code->index();
+ WasmCompiledModule* comp_mod = instance->compiled_module();
+ func_bytes = comp_mod->module_bytes()->GetChars() +
+ comp_mod->module()->functions[func_index].code.offset();
+ }
+
PatchDirectCallsHelper(WasmInstanceObject* instance, Code* code)
: source_pos_it(code->SourcePositionTable()), decoder(nullptr, nullptr) {
FixedArray* deopt_data = code->deoptimization_data();
@@ -62,6 +74,7 @@ bool IsAtWasmDirectCallTarget(RelocIterator& it) {
Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
return code->kind() == Code::WASM_FUNCTION ||
code->kind() == Code::WASM_TO_JS_FUNCTION ||
+ code->kind() == Code::WASM_TO_WASM_FUNCTION ||
code->kind() == Code::WASM_INTERPRETER_ENTRY ||
code->builtin_index() == Builtins::kIllegal ||
code->builtin_index() == Builtins::kWasmCompileLazy;
@@ -69,7 +82,8 @@ bool IsAtWasmDirectCallTarget(RelocIterator& it) {
} // namespace
-CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone) {}
+CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone)
+ : isolate_(isolate) {}
CodeSpecialization::~CodeSpecialization() {}
@@ -79,12 +93,6 @@ void CodeSpecialization::RelocateWasmContextReferences(Address new_context) {
new_wasm_context_address = new_context;
}
-void CodeSpecialization::RelocateGlobals(Address old_start, Address new_start) {
- DCHECK(old_globals_start == 0 && new_globals_start == 0);
- old_globals_start = old_start;
- new_globals_start = new_start;
-}
-
void CodeSpecialization::PatchTableSize(uint32_t old_size, uint32_t new_size) {
DCHECK(old_function_table_size == 0 && new_function_table_size == 0);
old_function_table_size = old_size;
@@ -106,11 +114,11 @@ bool CodeSpecialization::ApplyToWholeInstance(
WasmInstanceObject* instance, ICacheFlushMode icache_flush_mode) {
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = instance->compiled_module();
+ NativeModule* native_module = compiled_module->GetNativeModule();
FixedArray* code_table = compiled_module->ptr_to_code_table();
WasmModule* module = compiled_module->module();
std::vector<WasmFunction>* wasm_functions =
&compiled_module->module()->functions;
- DCHECK_EQ(wasm_functions->size(), code_table->length());
DCHECK_EQ(compiled_module->export_wrappers()->length(),
compiled_module->module()->num_exported_functions);
@@ -120,9 +128,19 @@ bool CodeSpecialization::ApplyToWholeInstance(
// Patch all wasm functions.
for (int num_wasm_functions = static_cast<int>(wasm_functions->size());
func_index < num_wasm_functions; ++func_index) {
- Code* wasm_function = Code::cast(code_table->get(func_index));
- if (wasm_function->kind() != Code::WASM_FUNCTION) continue;
- changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
+ WasmCodeWrapper wrapper;
+ if (FLAG_wasm_jit_to_native) {
+ const WasmCode* wasm_function = native_module->GetCode(func_index);
+ if (wasm_function->kind() != WasmCode::Function) {
+ continue;
+ }
+ wrapper = WasmCodeWrapper(wasm_function);
+ } else {
+ Code* wasm_function = Code::cast(code_table->get(func_index));
+ if (wasm_function->kind() != Code::WASM_FUNCTION) continue;
+ wrapper = WasmCodeWrapper(handle(wasm_function));
+ }
+ changed |= ApplyToWasmCode(wrapper, icache_flush_mode);
}
// Patch all exported functions (JS_TO_WASM_FUNCTION).
@@ -136,7 +154,9 @@ bool CodeSpecialization::ApplyToWholeInstance(
// should match the instance we currently patch (instance).
if (!relocate_direct_calls_instance.is_null()) {
DCHECK_EQ(instance, *relocate_direct_calls_instance);
- reloc_mode |= RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ reloc_mode |=
+ RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
+ : RelocInfo::CODE_TARGET);
}
if (!reloc_mode) return changed;
int wrapper_index = 0;
@@ -153,7 +173,14 @@ bool CodeSpecialization::ApplyToWholeInstance(
new_wasm_context_address,
icache_flush_mode);
break;
+ case RelocInfo::JS_TO_WASM_CALL: {
+ DCHECK(FLAG_wasm_jit_to_native);
+ const WasmCode* new_code = native_module->GetCode(exp.index);
+ it.rinfo()->set_js_to_wasm_address(
+ nullptr, new_code->instructions().start(), SKIP_ICACHE_FLUSH);
+ } break;
case RelocInfo::CODE_TARGET: {
+ DCHECK(!FLAG_wasm_jit_to_native);
// Ignore calls to other builtins like ToNumber.
if (!IsAtWasmDirectCallTarget(it)) continue;
Code* new_code = Code::cast(code_table->get(exp.index));
@@ -168,17 +195,20 @@ bool CodeSpecialization::ApplyToWholeInstance(
changed = true;
++wrapper_index;
}
- DCHECK_EQ(code_table->length(), func_index);
+ DCHECK_EQ(module->functions.size(), func_index);
DCHECK_EQ(compiled_module->export_wrappers()->length(), wrapper_index);
return changed;
}
-bool CodeSpecialization::ApplyToWasmCode(Code* code,
+bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
ICacheFlushMode icache_flush_mode) {
DisallowHeapAllocation no_gc;
- DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
+ if (code.IsCodeObject()) {
+ DCHECK_EQ(Code::WASM_FUNCTION, code.GetCode()->kind());
+ } else {
+ DCHECK_EQ(wasm::WasmCode::Function, code.GetWasmCode()->kind());
+ }
- bool reloc_globals = old_globals_start || new_globals_start;
bool patch_table_size = old_function_table_size || new_function_table_size;
bool reloc_direct_calls = !relocate_direct_calls_instance.is_null();
bool reloc_pointers = pointers_to_relocate.size() > 0;
@@ -187,25 +217,31 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
if (cond) reloc_mode |= RelocInfo::ModeMask(mode);
};
- add_mode(reloc_globals, RelocInfo::WASM_GLOBAL_REFERENCE);
add_mode(patch_table_size, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
- add_mode(reloc_direct_calls, RelocInfo::CODE_TARGET);
+ if (code.IsCodeObject()) {
+ add_mode(reloc_direct_calls, RelocInfo::CODE_TARGET);
+ } else {
+ add_mode(reloc_direct_calls, RelocInfo::WASM_CALL);
+ }
add_mode(reloc_pointers, RelocInfo::WASM_GLOBAL_HANDLE);
- std::unique_ptr<PatchDirectCallsHelper> patch_direct_calls_helper;
+ base::Optional<PatchDirectCallsHelper> patch_direct_calls_helper;
bool changed = false;
- for (RelocIterator it(code, reloc_mode); !it.done(); it.next()) {
+ NativeModule* native_module =
+ code.IsCodeObject() ? nullptr : code.GetWasmCode()->owner();
+
+ RelocIterator it =
+ code.IsCodeObject()
+ ? RelocIterator(*code.GetCode(), reloc_mode)
+ : RelocIterator(code.GetWasmCode()->instructions(),
+ code.GetWasmCode()->reloc_info(),
+ code.GetWasmCode()->constant_pool(), reloc_mode);
+ for (; !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
- case RelocInfo::WASM_GLOBAL_REFERENCE:
- DCHECK(reloc_globals);
- it.rinfo()->update_wasm_global_reference(
- code->GetIsolate(), old_globals_start, new_globals_start,
- icache_flush_mode);
- changed = true;
- break;
case RelocInfo::CODE_TARGET: {
+ DCHECK(!FLAG_wasm_jit_to_native);
DCHECK(reloc_direct_calls);
// Skip everything which is not a wasm call (stack checks, traps, ...).
if (!IsAtWasmDirectCallTarget(it)) continue;
@@ -214,10 +250,10 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
// position iterator forward to that position to find the byte offset of
// the respective call. Then extract the call index from the module wire
// bytes to find the new compiled function.
- size_t offset = it.rinfo()->pc() - code->instruction_start();
+ size_t offset = it.rinfo()->pc() - code.GetCode()->instruction_start();
if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.reset(new PatchDirectCallsHelper(
- *relocate_direct_calls_instance, code));
+ patch_direct_calls_helper.emplace(*relocate_direct_calls_instance,
+ *code.GetCode());
}
int byte_pos = AdvanceSourcePositionTableIterator(
patch_direct_calls_helper->source_pos_it, offset);
@@ -233,21 +269,44 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
UPDATE_WRITE_BARRIER, icache_flush_mode);
changed = true;
} break;
+ case RelocInfo::WASM_CALL: {
+ DCHECK(FLAG_wasm_jit_to_native);
+ DCHECK(reloc_direct_calls);
+ // Iterate simultaneously over the relocation information and the source
+ // position table. For each call in the reloc info, move the source
+ // position iterator forward to that position to find the byte offset of
+ // the respective call. Then extract the call index from the module wire
+ // bytes to find the new compiled function.
+ size_t offset =
+ it.rinfo()->pc() - code.GetWasmCode()->instructions().start();
+ if (!patch_direct_calls_helper) {
+ patch_direct_calls_helper.emplace(*relocate_direct_calls_instance,
+ code.GetWasmCode());
+ }
+ int byte_pos = AdvanceSourcePositionTableIterator(
+ patch_direct_calls_helper->source_pos_it, offset);
+ uint32_t called_func_index = ExtractDirectCallIndex(
+ patch_direct_calls_helper->decoder,
+ patch_direct_calls_helper->func_bytes + byte_pos);
+ const WasmCode* new_code = native_module->GetCode(called_func_index);
+ it.rinfo()->set_wasm_call_address(
+ isolate_, new_code->instructions().start(), icache_flush_mode);
+ changed = true;
+ } break;
case RelocInfo::WASM_GLOBAL_HANDLE: {
DCHECK(reloc_pointers);
Address old_ptr = it.rinfo()->global_handle();
if (pointers_to_relocate.count(old_ptr) == 1) {
Address new_ptr = pointers_to_relocate[old_ptr];
- it.rinfo()->set_global_handle(code->GetIsolate(), new_ptr,
- icache_flush_mode);
+ it.rinfo()->set_global_handle(isolate_, new_ptr, icache_flush_mode);
changed = true;
}
} break;
case RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE:
DCHECK(patch_table_size);
it.rinfo()->update_wasm_function_table_size_reference(
- code->GetIsolate(), old_function_table_size,
- new_function_table_size, icache_flush_mode);
+ isolate_, old_function_table_size, new_function_table_size,
+ icache_flush_mode);
changed = true;
break;
default:
diff --git a/deps/v8/src/wasm/wasm-code-specialization.h b/deps/v8/src/wasm/wasm-code-specialization.h
index 4cf422b64f..8f68677fbf 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.h
+++ b/deps/v8/src/wasm/wasm-code-specialization.h
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace wasm {
-int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc);
+uint32_t ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc);
// Helper class to specialize wasm code for a specific instance, or to update
// code when memory / globals / tables change.
@@ -30,8 +30,6 @@ class CodeSpecialization {
// Update WasmContext references.
void RelocateWasmContextReferences(Address new_context);
- // Update references to global variables.
- void RelocateGlobals(Address old_start, Address new_start);
// Update function table size.
// TODO(wasm): Prepare this for more than one indirect function table.
void PatchTableSize(uint32_t old_size, uint32_t new_size);
@@ -45,14 +43,13 @@ class CodeSpecialization {
bool ApplyToWholeInstance(WasmInstanceObject*,
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
// Apply all relocations and patching to one wasm code object.
- bool ApplyToWasmCode(Code*, ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
+ bool ApplyToWasmCode(WasmCodeWrapper,
+ ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
private:
+ Isolate* isolate_;
Address new_wasm_context_address = 0;
- Address old_globals_start = 0;
- Address new_globals_start = 0;
-
uint32_t old_function_table_size = 0;
uint32_t new_function_table_size = 0;
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.cc b/deps/v8/src/wasm/wasm-code-wrapper.cc
new file mode 100644
index 0000000000..28a96d16bf
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-code-wrapper.cc
@@ -0,0 +1,38 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-code-wrapper.h"
+
+#include "src/objects.h"
+#include "src/objects/code.h"
+
+namespace v8 {
+namespace internal {
+
+// When constructing, we check the flag. After that, we just
+// check using the member.
+WasmCodeWrapper::WasmCodeWrapper(Handle<Code> code) {
+ DCHECK(!FLAG_wasm_jit_to_native);
+ code_ptr_.code_handle_ = code.location();
+}
+
+WasmCodeWrapper::WasmCodeWrapper(const wasm::WasmCode* code) {
+ DCHECK(FLAG_wasm_jit_to_native);
+ code_ptr_.wasm_code_ = code;
+}
+
+Handle<Code> WasmCodeWrapper::GetCode() const {
+ DCHECK(IsCodeObject());
+ return Handle<Code>(code_ptr_.code_handle_);
+}
+
+const wasm::WasmCode* WasmCodeWrapper::GetWasmCode() const {
+ DCHECK(!IsCodeObject());
+ return code_ptr_.wasm_code_;
+}
+
+bool WasmCodeWrapper::IsCodeObject() const { return !FLAG_wasm_jit_to_native; }
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.h b/deps/v8/src/wasm/wasm-code-wrapper.h
new file mode 100644
index 0000000000..f80aee8056
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-code-wrapper.h
@@ -0,0 +1,38 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef V8_WASM_CODE_WRAPPER_H_
+#define V8_WASM_CODE_WRAPPER_H_
+
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+class WasmCode;
+} // namespace wasm
+
+class Code;
+
+// TODO(mtrofin): remove once we remove FLAG_wasm_jit_to_native
+class WasmCodeWrapper {
+ public:
+ WasmCodeWrapper() {}
+
+ explicit WasmCodeWrapper(Handle<Code> code);
+ explicit WasmCodeWrapper(const wasm::WasmCode* code);
+ Handle<Code> GetCode() const;
+ const wasm::WasmCode* GetWasmCode() const;
+ bool is_null() const { return code_ptr_.wasm_code_ == nullptr; }
+ bool IsCodeObject() const;
+
+ private:
+ union {
+ const wasm::WasmCode* wasm_code_;
+ Code** code_handle_;
+ } code_ptr_ = {};
+};
+
+} // namespace internal
+} // namespace v8
+#endif // V8_WASM_CODE_WRAPPER_H_
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 79c784a0f7..49ca995f5d 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -136,36 +136,12 @@ class InterpreterHandle {
return {bytes_str->GetChars(), static_cast<size_t>(bytes_str->length())};
}
- static uint32_t GetMemSize(WasmDebugInfo* debug_info) {
- DisallowHeapAllocation no_gc;
- return debug_info->wasm_instance()->has_memory_object()
- ? debug_info->wasm_instance()->wasm_context()->mem_size
- : 0;
- }
-
- static byte* GetMemStart(WasmDebugInfo* debug_info) {
- DisallowHeapAllocation no_gc;
- return debug_info->wasm_instance()->has_memory_object()
- ? debug_info->wasm_instance()->wasm_context()->mem_start
- : nullptr;
- }
-
- static byte* GetGlobalsStart(WasmDebugInfo* debug_info) {
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module =
- debug_info->wasm_instance()->compiled_module();
- return reinterpret_cast<byte*>(compiled_module->has_globals_start()
- ? compiled_module->globals_start()
- : 0);
- }
-
public:
InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
: isolate_(isolate),
module_(debug_info->wasm_instance()->compiled_module()->module()),
interpreter_(isolate, module_, GetBytes(debug_info),
- GetGlobalsStart(debug_info), GetMemStart(debug_info),
- GetMemSize(debug_info)) {}
+ debug_info->wasm_instance()->wasm_context()->get()) {}
~InterpreterHandle() { DCHECK_EQ(0, activations_.size()); }
@@ -430,13 +406,6 @@ class InterpreterHandle {
return interpreter()->GetThread(0)->NumInterpretedCalls();
}
- void UpdateMemory(JSArrayBuffer* new_memory) {
- byte* mem_start = reinterpret_cast<byte*>(new_memory->backing_store());
- uint32_t mem_size;
- CHECK(new_memory->byte_length()->ToUint32(&mem_size));
- interpreter()->UpdateMemory(mem_start, mem_size);
- }
-
Handle<JSObject> GetGlobalScopeObject(wasm::InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
Isolate* isolate = debug_info->GetIsolate();
@@ -445,10 +414,11 @@ class InterpreterHandle {
// TODO(clemensh): Add globals to the global scope.
Handle<JSObject> global_scope_object =
isolate_->factory()->NewJSObjectWithNullProto();
- if (instance->has_memory_buffer()) {
+ if (instance->has_memory_object()) {
Handle<String> name = isolate_->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("memory"));
- Handle<JSArrayBuffer> memory_buffer(instance->memory_buffer(), isolate_);
+ Handle<JSArrayBuffer> memory_buffer(
+ instance->memory_object()->array_buffer(), isolate_);
uint32_t byte_length;
CHECK(memory_buffer->byte_length()->ToUint32(&byte_length));
Handle<JSTypedArray> uint8_array = isolate_->factory()->NewJSTypedArray(
@@ -604,9 +574,11 @@ Handle<FixedArray> GetOrCreateInterpretedFunctions(
return new_arr;
}
-using CodeRelocationMap = IdentityMap<Handle<Code>, FreeStoreAllocationPolicy>;
+using CodeRelocationMap = std::map<Address, Address>;
+using CodeRelocationMapGC =
+ IdentityMap<Handle<Code>, FreeStoreAllocationPolicy>;
-void RedirectCallsitesInCode(Code* code, CodeRelocationMap& map) {
+void RedirectCallsitesInCodeGC(Code* code, CodeRelocationMapGC& map) {
DisallowHeapAllocation no_gc;
for (RelocIterator it(code, RelocInfo::kCodeTargetMask); !it.done();
it.next()) {
@@ -619,13 +591,40 @@ void RedirectCallsitesInCode(Code* code, CodeRelocationMap& map) {
}
}
-void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
- CodeRelocationMap& map) {
+void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
+ CodeRelocationMap* map) {
+ DisallowHeapAllocation no_gc;
+ for (RelocIterator it(code->instructions(), code->reloc_info(),
+ code->constant_pool(),
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL));
+ !it.done(); it.next()) {
+ Address target = it.rinfo()->target_address();
+ auto new_target = map->find(target);
+ if (new_target == map->end()) continue;
+ it.rinfo()->set_wasm_call_address(isolate, new_target->second);
+ }
+}
+
+void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
+ CodeRelocationMap* map) {
+ DisallowHeapAllocation no_gc;
+ for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ !it.done(); it.next()) {
+ Address target = it.rinfo()->js_to_wasm_address();
+ auto new_target = map->find(target);
+ if (new_target == map->end()) continue;
+ it.rinfo()->set_js_to_wasm_address(isolate, new_target->second);
+ }
+}
+
+void RedirectCallsitesInInstanceGC(Isolate* isolate,
+ WasmInstanceObject* instance,
+ CodeRelocationMapGC& map) {
DisallowHeapAllocation no_gc;
// Redirect all calls in wasm functions.
FixedArray* code_table = instance->compiled_module()->ptr_to_code_table();
for (int i = 0, e = GetNumFunctions(instance); i < e; ++i) {
- RedirectCallsitesInCode(Code::cast(code_table->get(i)), map);
+ RedirectCallsitesInCodeGC(Code::cast(code_table->get(i)), map);
}
// TODO(6668): Find instances that imported our code and also patch those.
@@ -636,7 +635,29 @@ void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
if (weak_function->cleared()) continue;
Code* code = JSFunction::cast(weak_function->value())->code();
- RedirectCallsitesInCode(code, map);
+ RedirectCallsitesInCodeGC(code, map);
+ }
+}
+
+void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
+ CodeRelocationMap* map) {
+ DisallowHeapAllocation no_gc;
+ // Redirect all calls in wasm functions.
+ for (uint32_t i = 0, e = GetNumFunctions(instance); i < e; ++i) {
+ wasm::WasmCode* code =
+ instance->compiled_module()->GetNativeModule()->GetCode(i);
+ RedirectCallsitesInCode(isolate, code, map);
+ }
+ // TODO(6668): Find instances that imported our code and also patch those.
+
+ // Redirect all calls in exported functions.
+ FixedArray* weak_exported_functions =
+ instance->compiled_module()->ptr_to_weak_exported_functions();
+ for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
+ WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
+ if (weak_function->cleared()) continue;
+ Code* code = JSFunction::cast(weak_function->value())->code();
+ RedirectCallsitesInJSWrapperCode(isolate, code, map);
}
}
@@ -700,8 +721,14 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
Handle<FixedArray> interpreted_functions =
GetOrCreateInterpretedFunctions(isolate, debug_info);
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
+ wasm::NativeModule* native_module =
+ instance->compiled_module()->GetNativeModule();
+ CodeRelocationMap code_to_relocate;
+
Handle<FixedArray> code_table = instance->compiled_module()->code_table();
- CodeRelocationMap code_to_relocate(isolate->heap());
+ CodeRelocationMapGC code_to_relocate_gc(isolate->heap());
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (int func_index : func_indexes) {
DCHECK_LE(0, func_index);
DCHECK_GT(debug_info->wasm_instance()->module()->functions.size(),
@@ -712,13 +739,30 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
isolate, func_index,
instance->compiled_module()->module()->functions[func_index].sig,
instance);
-
- Code* old_code = Code::cast(code_table->get(func_index));
- interpreted_functions->set(func_index, *new_code);
- DCHECK_NULL(code_to_relocate.Find(old_code));
- code_to_relocate.Set(old_code, new_code);
+ if (FLAG_wasm_jit_to_native) {
+ const wasm::WasmCode* wasm_new_code =
+ native_module->AddInterpreterWrapper(new_code, func_index);
+ const wasm::WasmCode* old_code =
+ native_module->GetCode(static_cast<uint32_t>(func_index));
+ Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
+ wasm_new_code->instructions().start(), TENURED);
+ interpreted_functions->set(func_index, *foreign_holder);
+ DCHECK_EQ(0, code_to_relocate.count(old_code->instructions().start()));
+ code_to_relocate.insert(
+ std::make_pair(old_code->instructions().start(),
+ wasm_new_code->instructions().start()));
+ } else {
+ Code* old_code = Code::cast(code_table->get(func_index));
+ interpreted_functions->set(func_index, *new_code);
+ DCHECK_NULL(code_to_relocate_gc.Find(old_code));
+ code_to_relocate_gc.Set(old_code, new_code);
+ }
+ }
+ if (FLAG_wasm_jit_to_native) {
+ RedirectCallsitesInInstance(isolate, *instance, &code_to_relocate);
+ } else {
+ RedirectCallsitesInInstanceGC(isolate, *instance, code_to_relocate_gc);
}
- RedirectCallsitesInInstance(isolate, *instance, code_to_relocate);
}
void WasmDebugInfo::PrepareStep(StepAction step_action) {
@@ -752,12 +796,6 @@ uint64_t WasmDebugInfo::NumInterpretedCalls() {
return handle ? handle->NumInterpretedCalls() : 0;
}
-void WasmDebugInfo::UpdateMemory(JSArrayBuffer* new_memory) {
- auto* interp_handle = GetInterpreterHandleOrNull(this);
- if (!interp_handle) return;
- interp_handle->UpdateMemory(new_memory);
-}
-
// static
Handle<JSObject> WasmDebugInfo::GetScopeDetails(
Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
@@ -816,8 +854,9 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
isolate->factory()->NewSharedFunctionInfo(name, new_entry_code, false);
shared->set_internal_formal_parameter_count(
compiler::CWasmEntryParameters::kNumParameters);
- Handle<JSFunction> new_entry = isolate->factory()->NewFunction(
- isolate->sloppy_function_map(), name, new_entry_code);
+ NewFunctionArgs args = NewFunctionArgs::ForWasm(
+ name, new_entry_code, isolate->sloppy_function_map());
+ Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
new_entry->set_context(
*debug_info->wasm_instance()->compiled_module()->native_context());
new_entry->set_shared(*shared);
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 93a84583b9..238785ca3c 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -201,12 +201,12 @@ int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
}
uint32_t word32_ctz_wrapper(uint32_t* input) {
- return static_cast<uint32_t>(base::bits::CountTrailingZeros32(*input));
+ return static_cast<uint32_t>(base::bits::CountTrailingZeros(*input));
}
uint32_t word64_ctz_wrapper(uint64_t* input) {
return static_cast<uint32_t>(
- base::bits::CountTrailingZeros64(ReadUnalignedValue<uint64_t>(input)));
+ base::bits::CountTrailingZeros(ReadUnalignedValue<uint64_t>(input)));
}
uint32_t word32_popcnt_wrapper(uint32_t* input) {
diff --git a/deps/v8/src/wasm/wasm-heap.cc b/deps/v8/src/wasm/wasm-heap.cc
index b7d13b067f..e111ec55f5 100644
--- a/deps/v8/src/wasm/wasm-heap.cc
+++ b/deps/v8/src/wasm/wasm-heap.cc
@@ -4,10 +4,68 @@
#include "src/wasm/wasm-heap.h"
+#include "src/assembler-inl.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/disassembler.h"
+#include "src/globals.h"
+#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-objects.h"
+
+#define TRACE_HEAP(...) \
+ do { \
+ if (FLAG_wasm_trace_native_heap) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace internal {
namespace wasm {
+namespace {
+size_t native_module_ids = 0;
+
+#if V8_TARGET_ARCH_X64
+#define __ masm->
+constexpr bool kModuleCanAllocateMoreMemory = false;
+
+void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
+ __ movq(kScratchRegister, reinterpret_cast<uint64_t>(target));
+ __ jmp(kScratchRegister);
+}
+#undef __
+#else
+const bool kModuleCanAllocateMoreMemory = true;
+#endif
+
+void PatchTrampolineAndStubCalls(
+ const WasmCode* original_code, const WasmCode* new_code,
+ const std::unordered_map<Address, Address, AddressHasher>& reverse_lookup) {
+ RelocIterator orig_it(
+ original_code->instructions(), original_code->reloc_info(),
+ original_code->constant_pool(), RelocInfo::kCodeTargetMask);
+ for (RelocIterator it(new_code->instructions(), new_code->reloc_info(),
+ new_code->constant_pool(), RelocInfo::kCodeTargetMask);
+ !it.done(); it.next(), orig_it.next()) {
+ Address old_target = orig_it.rinfo()->target_address();
+#if V8_TARGET_ARCH_X64
+ auto found = reverse_lookup.find(old_target);
+ DCHECK(found != reverse_lookup.end());
+ Address new_target = found->second;
+#else
+ Address new_target = old_target;
+#endif
+ it.rinfo()->set_target_address(nullptr, new_target, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ }
+}
+} // namespace
+
DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
ranges_.push_back({start, end});
}
@@ -96,6 +154,733 @@ DisjointAllocationPool DisjointAllocationPool::Extract(size_t size,
return ret;
}
+Address WasmCode::constant_pool() const {
+ if (FLAG_enable_embedded_constant_pool) {
+ if (constant_pool_offset_ < instructions().size()) {
+ return instructions().start() + constant_pool_offset_;
+ }
+ }
+ return nullptr;
+}
+
+size_t WasmCode::trap_handler_index() const {
+ CHECK(HasTrapHandlerIndex());
+ return static_cast<size_t>(trap_handler_index_);
+}
+
+void WasmCode::set_trap_handler_index(size_t value) {
+ trap_handler_index_ = value;
+}
+
+bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
+
+void WasmCode::ResetTrapHandlerIndex() { trap_handler_index_ = -1; }
+
+// TODO(mtrofin): rework the dependency on isolate and code in
+// Disassembler::Decode.
+void WasmCode::Disassemble(Isolate* isolate, const char* name,
+ std::ostream& os) const {
+ os << name << std::endl;
+ Disassembler::Decode(isolate, &os, instructions().start(),
+ instructions().end(), nullptr);
+}
+
+void WasmCode::Print(Isolate* isolate) const {
+ OFStream os(stdout);
+ Disassemble(isolate, "", os);
+}
+
+WasmCode::~WasmCode() {
+ // Depending on finalizer order, the WasmCompiledModule finalizer may be
+ // called first, case in which we release here. If the InstanceFinalizer is
+ // called first, the handlers will be cleared in Reset, as-if the NativeModule
+ // may be later used again (which would be the case if the WasmCompiledModule
+ // were still held by a WasmModuleObject)
+ if (HasTrapHandlerIndex()) {
+ CHECK_LT(trap_handler_index(),
+ static_cast<size_t>(std::numeric_limits<int>::max()));
+ trap_handler::ReleaseHandlerData(static_cast<int>(trap_handler_index()));
+ }
+}
+
+NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
+ bool can_request_more, VirtualMemory* mem,
+ WasmCodeManager* code_manager)
+ : instance_id(native_module_ids++),
+ code_table_(num_functions),
+ num_imported_functions_(num_imports),
+ free_memory_(reinterpret_cast<Address>(mem->address()),
+ reinterpret_cast<Address>(mem->end())),
+ wasm_code_manager_(code_manager),
+ can_request_more_memory_(can_request_more) {
+ VirtualMemory my_mem;
+ owned_memory_.push_back(my_mem);
+ owned_memory_.back().TakeControl(mem);
+ owned_code_.reserve(num_functions);
+}
+
+void NativeModule::ResizeCodeTableForTest(size_t last_index) {
+ size_t new_size = last_index + 1;
+ if (new_size > FunctionCount()) {
+ Isolate* isolate = compiled_module()->GetIsolate();
+ code_table_.resize(new_size);
+ int grow_by = static_cast<int>(new_size) -
+ compiled_module()->source_positions()->length();
+ compiled_module()->set_source_positions(
+ isolate->factory()->CopyFixedArrayAndGrow(
+ compiled_module()->source_positions(), grow_by, TENURED));
+ compiled_module()->set_handler_table(
+ isolate->factory()->CopyFixedArrayAndGrow(
+ compiled_module()->handler_table(), grow_by, TENURED));
+ }
+}
+
+WasmCode* NativeModule::GetCode(uint32_t index) const {
+ return code_table_[index];
+}
+
+uint32_t NativeModule::FunctionCount() const {
+ DCHECK_LE(code_table_.size(), std::numeric_limits<uint32_t>::max());
+ return static_cast<uint32_t>(code_table_.size());
+}
+
+WasmCode* NativeModule::AddOwnedCode(
+ Vector<const byte> orig_instructions,
+ std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
+ Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
+ uint32_t stack_slots, size_t safepoint_table_offset,
+ std::shared_ptr<ProtectedInstructions> protected_instructions,
+ bool is_liftoff) {
+ // both allocation and insertion in owned_code_ happen in the same critical
+ // section, thus ensuring owned_code_'s elements are rarely if ever moved.
+ base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ Address executable_buffer = AllocateForCode(orig_instructions.size());
+ if (executable_buffer == nullptr) return nullptr;
+ memcpy(executable_buffer, orig_instructions.start(),
+ orig_instructions.size());
+ std::unique_ptr<WasmCode> code(new WasmCode(
+ {executable_buffer, orig_instructions.size()}, std::move(reloc_info),
+ reloc_size, this, index, kind, constant_pool_offset, stack_slots,
+ safepoint_table_offset, protected_instructions, is_liftoff));
+ WasmCode* ret = code.get();
+
+ // TODO(mtrofin): We allocate in increasing address order, and
+ // even if we end up with segmented memory, we may end up only with a few
+ // large moves - if, for example, a new segment is below the current ones.
+ auto insert_before = std::upper_bound(owned_code_.begin(), owned_code_.end(),
+ code, owned_code_comparer_);
+ owned_code_.insert(insert_before, std::move(code));
+ return ret;
+}
+
+WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
+ uint32_t index) {
+ WasmCode* ret = AddAnonymousCode(code, kind);
+ SetCodeTable(index, ret);
+ ret->index_ = Just(index);
+ compiled_module()->ptr_to_source_positions()->set(
+ static_cast<int>(index), code->source_position_table());
+ compiled_module()->ptr_to_handler_table()->set(static_cast<int>(index),
+ code->handler_table());
+ return ret;
+}
+
+WasmCode* NativeModule::AddInterpreterWrapper(Handle<Code> code,
+ uint32_t index) {
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::InterpreterStub);
+ ret->index_ = Just(index);
+ return ret;
+}
+
+WasmCode* NativeModule::SetLazyBuiltin(Handle<Code> code) {
+ DCHECK_NULL(lazy_builtin_);
+ lazy_builtin_ = AddAnonymousCode(code, WasmCode::LazyStub);
+
+ for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
+ SetCodeTable(i, lazy_builtin_);
+ }
+
+ return lazy_builtin_;
+}
+
+WasmCompiledModule* NativeModule::compiled_module() const {
+ return *compiled_module_;
+}
+
+void NativeModule::SetCompiledModule(
+ Handle<WasmCompiledModule> compiled_module) {
+ DCHECK(compiled_module_.is_null());
+ compiled_module_ = compiled_module;
+}
+
+WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
+ WasmCode::Kind kind) {
+ std::unique_ptr<byte[]> reloc_info;
+ if (code->relocation_size() > 0) {
+ reloc_info.reset(new byte[code->relocation_size()]);
+ memcpy(reloc_info.get(), code->relocation_start(), code->relocation_size());
+ }
+ WasmCode* ret = AddOwnedCode(
+ {code->instruction_start(),
+ static_cast<size_t>(code->instruction_size())},
+ std::move(reloc_info), static_cast<size_t>(code->relocation_size()),
+ Nothing<uint32_t>(), kind, code->constant_pool_offset(),
+ (code->is_turbofanned() ? code->stack_slots() : 0),
+ (code->is_turbofanned() ? code->safepoint_table_offset() : 0), {});
+ if (ret == nullptr) return nullptr;
+ intptr_t delta = ret->instructions().start() - code->instruction_start();
+ int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+
+ RelocIterator orig_it(*code, mask);
+ for (RelocIterator it(ret->instructions(), ret->reloc_info(),
+ ret->constant_pool(), mask);
+ !it.done(); it.next(), orig_it.next()) {
+ if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
+ Code* call_target =
+ Code::GetCodeFromTargetAddress(orig_it.rinfo()->target_address());
+ it.rinfo()->set_target_address(nullptr,
+ GetLocalAddressFor(handle(call_target)),
+ SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ } else {
+ if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
+ DCHECK(Heap::IsImmovable(it.rinfo()->target_object()));
+ } else {
+ it.rinfo()->apply(delta);
+ }
+ }
+ }
+ return ret;
+}
+
+WasmCode* NativeModule::AddCode(
+ const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
+ size_t safepoint_table_offset,
+ std::shared_ptr<ProtectedInstructions> protected_instructions,
+ bool is_liftoff) {
+ std::unique_ptr<byte[]> reloc_info;
+ if (desc.reloc_size) {
+ reloc_info.reset(new byte[desc.reloc_size]);
+ memcpy(reloc_info.get(), desc.buffer + desc.buffer_size - desc.reloc_size,
+ desc.reloc_size);
+ }
+ TurboAssembler* origin = reinterpret_cast<TurboAssembler*>(desc.origin);
+ WasmCode* ret = AddOwnedCode(
+ {desc.buffer, static_cast<size_t>(desc.instr_size)},
+ std::move(reloc_info), static_cast<size_t>(desc.reloc_size), Just(index),
+ WasmCode::Function, desc.instr_size - desc.constant_pool_size,
+ frame_slots, safepoint_table_offset, protected_instructions, is_liftoff);
+ if (ret == nullptr) return nullptr;
+
+ SetCodeTable(index, ret);
+ // TODO(mtrofin): this is a copy and paste from Code::CopyFrom.
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::kApplyMask;
+ // Needed to find target_object and runtime_entry on X64
+
+ AllowDeferredHandleDereference embedding_raw_address;
+ for (RelocIterator it(ret->instructions(), ret->reloc_info(),
+ ret->constant_pool(), mode_mask);
+ !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
+ DCHECK_EQ(*p, p->GetIsolate()->heap()->undefined_value());
+ it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ // rewrite code handles to direct pointers to the first instruction in the
+ // code object
+ Handle<Object> p = it.rinfo()->target_object_handle(origin);
+ Code* code = Code::cast(*p);
+ it.rinfo()->set_target_address(nullptr, GetLocalAddressFor(handle(code)),
+ SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ Address p = it.rinfo()->target_runtime_entry(origin);
+ it.rinfo()->set_target_runtime_entry(
+ origin->isolate(), p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ } else {
+ intptr_t delta = ret->instructions().start() - desc.buffer;
+ it.rinfo()->apply(delta);
+ }
+ }
+ return ret;
+}
+
+#if V8_TARGET_ARCH_X64
+Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
+ MacroAssembler masm(code->GetIsolate(), nullptr, 0, CodeObjectRequired::kNo);
+ Address dest = code->instruction_start();
+ GenerateJumpTrampoline(&masm, dest);
+ CodeDesc code_desc;
+ masm.GetCode(nullptr, &code_desc);
+ WasmCode* wasm_code = AddOwnedCode(
+ {code_desc.buffer, static_cast<size_t>(code_desc.instr_size)}, nullptr, 0,
+ Nothing<uint32_t>(), WasmCode::Trampoline, 0, 0, 0, {});
+ if (wasm_code == nullptr) return nullptr;
+ Address ret = wasm_code->instructions().start();
+ trampolines_.emplace(std::make_pair(dest, ret));
+ return ret;
+}
+#else
+Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
+ Address ret = code->instruction_start();
+ trampolines_.insert(std::make_pair(ret, ret));
+ return ret;
+}
+#endif
+
+Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
+ if (!Heap::IsImmovable(*code)) {
+ DCHECK(code->kind() == Code::STUB &&
+ CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI);
+ uint32_t key = code->stub_key();
+ auto copy = stubs_.find(key);
+ if (copy == stubs_.end()) {
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::CopiedStub);
+ copy = stubs_.emplace(std::make_pair(key, ret)).first;
+ }
+ return copy->second->instructions().start();
+ } else {
+ Address index = code->instruction_start();
+ auto trampoline_iter = trampolines_.find(index);
+ if (trampoline_iter == trampolines_.end()) {
+ return CreateTrampolineTo(code);
+ } else {
+ return trampoline_iter->second;
+ }
+ }
+}
+
+WasmCode* NativeModule::GetExportedWrapper(uint32_t index) {
+ auto found = exported_wasm_to_wasm_wrappers_.find(index);
+ if (found != exported_wasm_to_wasm_wrappers_.end()) {
+ return found->second;
+ }
+ return nullptr;
+}
+
+WasmCode* NativeModule::AddExportedWrapper(Handle<Code> code, uint32_t index) {
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::WasmToWasmWrapper);
+ ret->index_ = Just(index);
+ exported_wasm_to_wasm_wrappers_.insert(std::make_pair(index, ret));
+ return ret;
+}
+
+void NativeModule::LinkAll() {
+ for (uint32_t index = 0; index < code_table_.size(); ++index) {
+ Link(index);
+ }
+}
+
+void NativeModule::Link(uint32_t index) {
+ WasmCode* code = code_table_[index];
+ // skip imports
+ if (!code) return;
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL);
+ for (RelocIterator it(code->instructions(), code->reloc_info(),
+ code->constant_pool(), mode_mask);
+ !it.done(); it.next()) {
+ uint32_t index =
+ *(reinterpret_cast<uint32_t*>(it.rinfo()->target_address_address()));
+ const WasmCode* target = GetCode(index);
+ if (target == nullptr) continue;
+ Address target_addr = target->instructions().start();
+ DCHECK_NOT_NULL(target);
+ it.rinfo()->set_wasm_call_address(nullptr, target_addr,
+ ICacheFlushMode::SKIP_ICACHE_FLUSH);
+ }
+}
+
+Address NativeModule::AllocateForCode(size_t size) {
+ // this happens under a lock assumed by the caller.
+ size = RoundUp(size, kCodeAlignment);
+ DisjointAllocationPool mem = free_memory_.Allocate(size);
+ if (mem.IsEmpty()) {
+ if (!can_request_more_memory_) return nullptr;
+
+ Address hint = owned_memory_.empty()
+ ? nullptr
+ : reinterpret_cast<Address>(owned_memory_.back().end());
+ VirtualMemory empty_mem;
+ owned_memory_.push_back(empty_mem);
+ VirtualMemory& new_mem = owned_memory_.back();
+ wasm_code_manager_->TryAllocate(size, &new_mem, hint);
+ if (!new_mem.IsReserved()) return nullptr;
+ DisjointAllocationPool mem_pool(
+ reinterpret_cast<Address>(new_mem.address()),
+ reinterpret_cast<Address>(new_mem.end()));
+ wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
+
+ free_memory_.Merge(std::move(mem_pool));
+ mem = free_memory_.Allocate(size);
+ if (mem.IsEmpty()) return nullptr;
+ }
+ Address ret = mem.ranges().front().first;
+ Address end = ret + size;
+ Address commit_start = RoundUp(ret, base::OS::AllocatePageSize());
+ Address commit_end = RoundUp(end, base::OS::AllocatePageSize());
+ // {commit_start} will be either ret or the start of the next page.
+ // {commit_end} will be the start of the page after the one in which
+ // the allocation ends.
+ // We start from an aligned start, and we know we allocated vmem in
+ // page multiples.
+ // We just need to commit what's not committed. The page in which we
+ // start is already committed (or we start at the beginning of a page).
+ // The end needs to be committed all through the end of the page.
+ if (commit_start < commit_end) {
+#if V8_OS_WIN
+ // On Windows, we cannot commit a range that straddles different
+ // reservations of virtual memory. Because we bump-allocate, and because, if
+ // we need more memory, we append that memory at the end of the
+ // owned_memory_ list, we traverse that list in reverse order to find the
+ // reservation(s) that guide how to chunk the region to commit.
+ for (auto it = owned_memory_.crbegin(), rend = owned_memory_.crend();
+ it != rend && commit_start < commit_end; ++it) {
+ if (commit_end > it->end() || it->address() >= commit_end) continue;
+ Address start =
+ std::max(commit_start, reinterpret_cast<Address>(it->address()));
+ size_t commit_size = static_cast<size_t>(commit_end - start);
+ DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
+ if (!wasm_code_manager_->Commit(start, commit_size)) {
+ return nullptr;
+ }
+ committed_memory_ += commit_size;
+ commit_end = start;
+ }
+#else
+ size_t commit_size = static_cast<size_t>(commit_end - commit_start);
+ DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
+ if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
+ return nullptr;
+ }
+ committed_memory_ += commit_size;
+#endif
+ }
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(ret), kCodeAlignment));
+ allocated_memory_.Merge(std::move(mem));
+ TRACE_HEAP("ID: %zu. Code alloc: %p,+%zu\n", instance_id,
+ reinterpret_cast<void*>(ret), size);
+ return ret;
+}
+
+WasmCode* NativeModule::Lookup(Address pc) {
+ if (owned_code_.empty()) return nullptr;
+ // Make a fake WasmCode temp, to look into owned_code_
+ std::unique_ptr<WasmCode> temp(new WasmCode(pc));
+ auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), temp,
+ owned_code_comparer_);
+ if (iter == owned_code_.begin()) return nullptr;
+ --iter;
+ WasmCode* candidate = (*iter).get();
+ DCHECK_NOT_NULL(candidate);
+ if (candidate->instructions().start() <= pc &&
+ pc < candidate->instructions().start() +
+ candidate->instructions().size()) {
+ return candidate;
+ }
+ return nullptr;
+}
+
+WasmCode* NativeModule::CloneLazyBuiltinInto(uint32_t index) {
+ DCHECK_NOT_NULL(lazy_builtin());
+ WasmCode* ret = CloneCode(lazy_builtin());
+ SetCodeTable(index, ret);
+ ret->index_ = Just(index);
+ return ret;
+}
+
+bool NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
+ for (auto& pair : other->trampolines_) {
+ Address key = pair.first;
+ Address local =
+ GetLocalAddressFor(handle(Code::GetCodeFromTargetAddress(key)));
+ if (local == nullptr) return false;
+ trampolines_.emplace(std::make_pair(key, local));
+ }
+ for (auto& pair : other->stubs_) {
+ uint32_t key = pair.first;
+ WasmCode* clone = CloneCode(pair.second);
+ if (!clone) return false;
+ stubs_.emplace(std::make_pair(key, clone));
+ }
+ return true;
+}
+
+WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
+ std::unique_ptr<byte[]> reloc_info;
+ if (original_code->reloc_info().size() > 0) {
+ reloc_info.reset(new byte[original_code->reloc_info().size()]);
+ memcpy(reloc_info.get(), original_code->reloc_info().start(),
+ original_code->reloc_info().size());
+ }
+ WasmCode* ret = AddOwnedCode(
+ original_code->instructions(), std::move(reloc_info),
+ original_code->reloc_info().size(), original_code->index_,
+ original_code->kind(), original_code->constant_pool_offset_,
+ original_code->stack_slots(), original_code->safepoint_table_offset_,
+ original_code->protected_instructions_);
+ if (ret == nullptr) return nullptr;
+ if (!ret->IsAnonymous()) {
+ SetCodeTable(ret->index(), ret);
+ }
+ intptr_t delta =
+ ret->instructions().start() - original_code->instructions().start();
+ for (RelocIterator it(ret->instructions(), ret->reloc_info(),
+ ret->constant_pool(), RelocInfo::kApplyMask);
+ !it.done(); it.next()) {
+ it.rinfo()->apply(delta);
+ }
+ return ret;
+}
+
+void NativeModule::SetCodeTable(uint32_t index, wasm::WasmCode* code) {
+ code_table_[index] = code;
+}
+
+NativeModule::~NativeModule() {
+ TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
+ wasm_code_manager_->FreeNativeModuleMemories(this);
+}
+
+WasmCodeManager::WasmCodeManager(v8::Isolate* isolate, size_t max_committed)
+ : isolate_(isolate) {
+ DCHECK_LE(max_committed, kMaxWasmCodeMemory);
+ remaining_uncommitted_.SetValue(max_committed);
+}
+
+bool WasmCodeManager::Commit(Address start, size_t size) {
+ DCHECK(
+ IsAligned(reinterpret_cast<size_t>(start), base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(size, base::OS::AllocatePageSize()));
+ if (size > static_cast<size_t>(std::numeric_limits<intptr_t>::max())) {
+ return false;
+ }
+ // reserve the size.
+ intptr_t new_value = remaining_uncommitted_.Decrement(size);
+ if (new_value < 0) {
+ remaining_uncommitted_.Increment(size);
+ return false;
+ }
+ // TODO(v8:7105) Enable W^X instead of setting W|X permissions below.
+ bool ret = base::OS::SetPermissions(
+ start, size, base::OS::MemoryPermission::kReadWriteExecute);
+ if (!ret) {
+ // Highly unlikely.
+ remaining_uncommitted_.Increment(size);
+ return false;
+ }
+ // This API assumes main thread
+ isolate_->AdjustAmountOfExternalAllocatedMemory(size);
+ if (WouldGCHelp()) {
+ // This API does not assume main thread, and would schedule
+ // a GC if called from a different thread, instead of synchronously
+ // doing one.
+ isolate_->MemoryPressureNotification(MemoryPressureLevel::kCritical);
+ }
+ return ret;
+}
+
+bool WasmCodeManager::WouldGCHelp() const {
+ // If all we have is one module, or none, no GC would help.
+ // GC would help if there's some remaining native modules that
+ // would be collected.
+ if (active_ <= 1) return false;
+ // We have an expectation on the largest size a native function
+ // may have.
+ constexpr size_t kMaxNativeFunction = 32 * MB;
+ intptr_t remaining = remaining_uncommitted_.Value();
+ DCHECK_GE(remaining, 0);
+ return static_cast<size_t>(remaining) < kMaxNativeFunction;
+}
+
+void WasmCodeManager::AssignRanges(void* start, void* end,
+ NativeModule* native_module) {
+ lookup_map_.insert(std::make_pair(
+ reinterpret_cast<Address>(start),
+ std::make_pair(reinterpret_cast<Address>(end), native_module)));
+}
+
+void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
+ DCHECK_GT(size, 0);
+ size = RoundUp(size, base::OS::AllocatePageSize());
+ if (hint == nullptr) hint = base::OS::GetRandomMmapAddr();
+
+ if (!AlignedAllocVirtualMemory(
+ size, static_cast<size_t>(base::OS::AllocatePageSize()), hint, ret)) {
+ DCHECK(!ret->IsReserved());
+ }
+ TRACE_HEAP("VMem alloc: %p:%p (%zu)\n", ret->address(), ret->end(),
+ ret->size());
+}
+
+size_t WasmCodeManager::GetAllocationChunk(const WasmModule& module) {
+ // TODO(mtrofin): this should pick up its 'maximal code range size'
+ // from something embedder-provided
+ if (kRequiresCodeRange) return kMaxWasmCodeMemory;
+ DCHECK(kModuleCanAllocateMoreMemory);
+ size_t ret = base::OS::AllocatePageSize();
+ // a ballpark guesstimate on native inflation factor.
+ constexpr size_t kMultiplier = 4;
+
+ for (auto& function : module.functions) {
+ ret += kMultiplier * function.code.length();
+ }
+ return ret;
+}
+
+std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
+ const WasmModule& module) {
+ size_t code_size = GetAllocationChunk(module);
+ return NewNativeModule(
+ code_size, static_cast<uint32_t>(module.functions.size()),
+ module.num_imported_functions, kModuleCanAllocateMoreMemory);
+}
+
+std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
+ size_t size_estimate, uint32_t num_functions,
+ uint32_t num_imported_functions, bool can_request_more) {
+ VirtualMemory mem;
+ TryAllocate(size_estimate, &mem);
+ if (mem.IsReserved()) {
+ void* start = mem.address();
+ size_t size = mem.size();
+ void* end = mem.end();
+ std::unique_ptr<NativeModule> ret(new NativeModule(
+ num_functions, num_imported_functions, can_request_more, &mem, this));
+ TRACE_HEAP("New Module: ID:%zu. Mem: %p,+%zu\n", ret->instance_id, start,
+ size);
+ AssignRanges(start, end, ret.get());
+ ++active_;
+ return ret;
+ }
+
+ return nullptr;
+}
+
+std::unique_ptr<NativeModule> NativeModule::Clone() {
+ std::unique_ptr<NativeModule> ret = wasm_code_manager_->NewNativeModule(
+ owned_memory_.front().size(), FunctionCount(), num_imported_functions(),
+ can_request_more_memory_);
+ TRACE_HEAP("%zu cloned from %zu\n", ret->instance_id, instance_id);
+ if (!ret) return ret;
+
+ if (lazy_builtin() != nullptr) {
+ ret->lazy_builtin_ = ret->CloneCode(lazy_builtin());
+ }
+
+ if (!ret->CloneTrampolinesAndStubs(this)) return nullptr;
+
+ std::unordered_map<Address, Address, AddressHasher> reverse_lookup;
+ for (auto& pair : trampolines_) {
+ Address old_dest = pair.second;
+ auto local = ret->trampolines_.find(pair.first);
+ DCHECK(local != ret->trampolines_.end());
+ Address new_dest = local->second;
+ reverse_lookup.emplace(old_dest, new_dest);
+ }
+
+ for (auto& pair : stubs_) {
+ Address old_dest = pair.second->instructions().start();
+ auto local = ret->stubs_.find(pair.first);
+ DCHECK(local != ret->stubs_.end());
+ Address new_dest = local->second->instructions().start();
+ reverse_lookup.emplace(old_dest, new_dest);
+ }
+
+ for (auto& pair : ret->stubs_) {
+ WasmCode* new_stub = pair.second;
+ WasmCode* old_stub = stubs_.find(pair.first)->second;
+ PatchTrampolineAndStubCalls(old_stub, new_stub, reverse_lookup);
+ }
+ if (lazy_builtin_ != nullptr) {
+ PatchTrampolineAndStubCalls(lazy_builtin_, ret->lazy_builtin_,
+ reverse_lookup);
+ }
+
+ for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
+ const WasmCode* original_code = GetCode(i);
+ switch (original_code->kind()) {
+ case WasmCode::LazyStub: {
+ if (original_code->IsAnonymous()) {
+ ret->SetCodeTable(i, ret->lazy_builtin());
+ } else {
+ if (!ret->CloneLazyBuiltinInto(i)) return nullptr;
+ }
+ } break;
+ case WasmCode::Function: {
+ WasmCode* new_code = ret->CloneCode(original_code);
+ if (new_code == nullptr) return nullptr;
+ PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ ret->specialization_data_ = specialization_data_;
+ return ret;
+}
+
+void WasmCodeManager::FreeNativeModuleMemories(NativeModule* native_module) {
+ DCHECK_GE(active_, 1);
+ --active_;
+ TRACE_HEAP("Freeing %zu\n", native_module->instance_id);
+ for (auto& vmem : native_module->owned_memory_) {
+ lookup_map_.erase(reinterpret_cast<Address>(vmem.address()));
+ Free(&vmem);
+ DCHECK(!vmem.IsReserved());
+ }
+ // No need to tell the GC anything if we're destroying the heap,
+ // which we currently indicate by having the isolate_ as null
+ if (isolate_ == nullptr) return;
+ size_t freed_mem = native_module->committed_memory_;
+ DCHECK(IsAligned(freed_mem, base::OS::AllocatePageSize()));
+ remaining_uncommitted_.Increment(freed_mem);
+ isolate_->AdjustAmountOfExternalAllocatedMemory(
+ -static_cast<int64_t>(freed_mem));
+}
+
+// TODO(wasm): We can make this more efficient if needed. For
+// example, we can preface the first instruction with a pointer to
+// the WasmCode. In the meantime, we have a separate API so we can
+// easily identify those places where we know we have the first
+// instruction PC.
+WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
+ return LookupCode(pc);
+}
+
+WasmCode* WasmCodeManager::LookupCode(Address pc) const {
+ if (lookup_map_.empty()) return nullptr;
+
+ auto iter = lookup_map_.upper_bound(pc);
+ if (iter == lookup_map_.begin()) return nullptr;
+ --iter;
+ Address range_start = iter->first;
+ Address range_end = iter->second.first;
+ NativeModule* candidate = iter->second.second;
+
+ DCHECK_NOT_NULL(candidate);
+ if (range_start <= pc && pc < range_end) {
+ return candidate->Lookup(pc);
+ }
+ return nullptr;
+}
+
+void WasmCodeManager::Free(VirtualMemory* mem) {
+ DCHECK(mem->IsReserved());
+ void* start = mem->address();
+ void* end = mem->end();
+ size_t size = mem->size();
+ mem->Free();
+ TRACE_HEAP("VMem Release: %p:%p (%zu)\n", start, end, size);
+}
+
+intptr_t WasmCodeManager::remaining_uncommitted() const {
+ return remaining_uncommitted_.Value();
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
+#undef TRACE_HEAP
diff --git a/deps/v8/src/wasm/wasm-heap.h b/deps/v8/src/wasm/wasm-heap.h
index 60cbfb14ba..9775f18b9b 100644
--- a/deps/v8/src/wasm/wasm-heap.h
+++ b/deps/v8/src/wasm/wasm-heap.h
@@ -5,15 +5,37 @@
#ifndef V8_WASM_HEAP_H_
#define V8_WASM_HEAP_H_
+#include <functional>
#include <list>
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
#include "src/base/macros.h"
+#include "src/handles.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/vector.h"
namespace v8 {
+class Isolate;
namespace internal {
+
+struct CodeDesc;
+class Code;
+class WasmCompiledModule;
+
namespace wasm {
+using GlobalHandleAddress = Address;
+class NativeModule;
+struct WasmModule;
+
+struct AddressHasher {
+ size_t operator()(const Address& addr) const {
+ return std::hash<intptr_t>()(reinterpret_cast<intptr_t>(addr));
+ }
+};
+
// Sorted, disjoint and non-overlapping memory ranges. A range is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
@@ -60,6 +82,307 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
};
+using ProtectedInstructions =
+ std::vector<trap_handler::ProtectedInstructionData>;
+
+class V8_EXPORT_PRIVATE WasmCode final {
+ public:
+ enum Kind {
+ Function,
+ WasmToWasmWrapper,
+ WasmToJsWrapper,
+ LazyStub,
+ InterpreterStub,
+ CopiedStub,
+ Trampoline
+ };
+
+ Vector<byte> instructions() const { return instructions_; }
+ Vector<const byte> reloc_info() const {
+ return {reloc_info_.get(), reloc_size_};
+ }
+
+ uint32_t index() const { return index_.ToChecked(); }
+ // Anonymous functions are functions that don't carry an index, like
+ // trampolines.
+ bool IsAnonymous() const { return index_.IsNothing(); }
+ Kind kind() const { return kind_; }
+ NativeModule* owner() const { return owner_; }
+ Address constant_pool() const;
+ size_t constant_pool_offset() const { return constant_pool_offset_; }
+ size_t safepoint_table_offset() const { return safepoint_table_offset_; }
+ uint32_t stack_slots() const { return stack_slots_; }
+ bool is_liftoff() const { return is_liftoff_; }
+
+ size_t trap_handler_index() const;
+ void set_trap_handler_index(size_t);
+ bool HasTrapHandlerIndex() const;
+ void ResetTrapHandlerIndex();
+
+ const ProtectedInstructions& protected_instructions() const {
+ return *protected_instructions_.get();
+ }
+
+ void Disassemble(Isolate* isolate, const char* name, std::ostream& os) const;
+ void Print(Isolate* isolate) const;
+
+ ~WasmCode();
+
+ private:
+ friend class NativeModule;
+ friend class NativeModuleDeserializer;
+
+ // A constructor used just for implementing Lookup.
+ WasmCode(Address pc) : instructions_(pc, 0), index_(Nothing<uint32_t>()) {}
+
+ WasmCode(Vector<byte> instructions,
+ std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
+ NativeModule* owner, Maybe<uint32_t> index, Kind kind,
+ size_t constant_pool_offset, uint32_t stack_slots,
+ size_t safepoint_table_offset,
+ std::shared_ptr<ProtectedInstructions> protected_instructions,
+ bool is_liftoff = false)
+ : instructions_(instructions),
+ reloc_info_(std::move(reloc_info)),
+ reloc_size_(reloc_size),
+ owner_(owner),
+ index_(index),
+ kind_(kind),
+ constant_pool_offset_(constant_pool_offset),
+ stack_slots_(stack_slots),
+ safepoint_table_offset_(safepoint_table_offset),
+ protected_instructions_(protected_instructions),
+ is_liftoff_(is_liftoff) {}
+
+ WasmCode(const WasmCode&) = delete;
+ WasmCode& operator=(const WasmCode&) = delete;
+
+ Vector<byte> instructions_;
+ std::unique_ptr<const byte[]> reloc_info_;
+ size_t reloc_size_ = 0;
+ NativeModule* owner_ = nullptr;
+ Maybe<uint32_t> index_;
+ Kind kind_;
+ size_t constant_pool_offset_ = 0;
+ uint32_t stack_slots_ = 0;
+ // we care about safepoint data for wasm-to-js functions,
+ // since there may be stack/register tagged values for large number
+ // conversions.
+ size_t safepoint_table_offset_ = 0;
+ intptr_t trap_handler_index_ = -1;
+ std::shared_ptr<ProtectedInstructions> protected_instructions_;
+ bool is_liftoff_;
+};
+
+class WasmCodeManager;
+
+// Note that we currently need to add code on the main thread, because we may
+// trigger a GC if we believe there's a chance the GC would clear up native
+// modules. The code is ready for concurrency otherwise, we just need to be
+// careful about this GC consideration. See WouldGCHelp and
+// WasmCodeManager::Commit.
+class V8_EXPORT_PRIVATE NativeModule final {
+ public:
+ std::unique_ptr<NativeModule> Clone();
+
+ WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
+ size_t safepoint_table_offset,
+ std::shared_ptr<ProtectedInstructions>,
+ bool is_liftoff = false);
+
+ // A way to copy over JS-allocated code. This is because we compile
+ // certain wrappers using a different pipeline.
+ WasmCode* AddCodeCopy(Handle<Code> code, WasmCode::Kind kind, uint32_t index);
+
+ // Add an interpreter wrapper. For the same reason as AddCodeCopy, we
+ // currently compile these using a different pipeline and we can't get a
+ // CodeDesc here. When adding interpreter wrappers, we do not insert them in
+ // the code_table, however, we let them self-identify as the {index} function
+ WasmCode* AddInterpreterWrapper(Handle<Code> code, uint32_t index);
+
+ // When starting lazy compilation, provide the WasmLazyCompile builtin by
+ // calling SetLazyBuiltin. It will initialize the code table with it, and the
+ // lazy_builtin_ field. The latter is used when creating entries for exported
+ // functions and indirect callable functions, so that they may be identified
+ // by the runtime.
+ WasmCode* SetLazyBuiltin(Handle<Code> code);
+
+ // ExportedWrappers are WasmToWasmWrappers for functions placed on import
+ // tables. We construct them as-needed.
+ WasmCode* GetExportedWrapper(uint32_t index);
+ WasmCode* AddExportedWrapper(Handle<Code> code, uint32_t index);
+
+ // FunctionCount is WasmModule::functions.size().
+ uint32_t FunctionCount() const;
+ WasmCode* GetCode(uint32_t index) const;
+
+ WasmCode* lazy_builtin() const { return lazy_builtin_; }
+
+ // We special-case lazy cloning because we currently rely on making copies
+ // of the lazy builtin, to be able to identify, in the runtime, which function
+ // the lazy builtin is a placeholder of. If we used trampolines, we would call
+ // the runtime function from a common pc. We could, then, figure who the
+ // caller was if the trampolines called rather than jumped to the common
+ // builtin. The logic for seeking though frames would change, though.
+ // TODO(mtrofin): perhaps we can do exactly that - either before or after
+ // this change.
+ WasmCode* CloneLazyBuiltinInto(uint32_t);
+
+ // For cctests, where we build both WasmModule and the runtime objects
+ // on the fly, and bypass the instance builder pipeline.
+ void ResizeCodeTableForTest(size_t);
+ void LinkAll();
+ void Link(uint32_t index);
+
+ // TODO(mtrofin): needed until we sort out exception handlers and
+ // source positions, which are still on the GC-heap.
+ WasmCompiledModule* compiled_module() const;
+ void SetCompiledModule(Handle<WasmCompiledModule>);
+
+ // Shorthand accessors to the specialization data content.
+ std::vector<wasm::GlobalHandleAddress>& function_tables() {
+ return specialization_data_.function_tables;
+ }
+ std::vector<wasm::GlobalHandleAddress>& signature_tables() {
+ return specialization_data_.signature_tables;
+ }
+
+ std::vector<wasm::GlobalHandleAddress>& empty_function_tables() {
+ return specialization_data_.empty_function_tables;
+ }
+ std::vector<wasm::GlobalHandleAddress>& empty_signature_tables() {
+ return specialization_data_.empty_signature_tables;
+ }
+
+ uint32_t num_imported_functions() const { return num_imported_functions_; }
+ size_t num_function_tables() const {
+ return specialization_data_.empty_function_tables.size();
+ }
+
+ size_t committed_memory() const { return committed_memory_; }
+ const size_t instance_id = 0;
+ ~NativeModule();
+
+ private:
+ friend class WasmCodeManager;
+ friend class NativeModuleSerializer;
+ friend class NativeModuleDeserializer;
+
+ struct WasmCodeUniquePtrComparer {
+ bool operator()(const std::unique_ptr<WasmCode>& a,
+ const std::unique_ptr<WasmCode>& b) {
+ DCHECK(a);
+ DCHECK(b);
+ return a->instructions().start() < b->instructions().start();
+ }
+ };
+
+ static base::AtomicNumber<uint32_t> next_id_;
+ NativeModule(const NativeModule&) = delete;
+ NativeModule& operator=(const NativeModule&) = delete;
+ NativeModule(uint32_t num_functions, uint32_t num_imports,
+ bool can_request_more, VirtualMemory* vmem,
+ WasmCodeManager* code_manager);
+
+ WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
+ Address AllocateForCode(size_t size);
+
+ // Primitive for adding code to the native module. All code added to a native
+ // module is owned by that module. Various callers get to decide on how the
+ // code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
+ // whether it has an index or is anonymous, etc.
+ WasmCode* AddOwnedCode(Vector<const byte> orig_instructions,
+ std::unique_ptr<const byte[]>&& reloc_info,
+ size_t reloc_size, Maybe<uint32_t> index,
+ WasmCode::Kind kind, size_t constant_pool_offset,
+ uint32_t stack_slots, size_t safepoint_table_offset,
+ std::shared_ptr<ProtectedInstructions>,
+ bool is_liftoff = false);
+ void SetCodeTable(uint32_t, wasm::WasmCode*);
+ WasmCode* CloneCode(const WasmCode*);
+ bool CloneTrampolinesAndStubs(const NativeModule* other);
+ WasmCode* Lookup(Address);
+ Address GetLocalAddressFor(Handle<Code>);
+ Address CreateTrampolineTo(Handle<Code>);
+
+ std::vector<std::unique_ptr<WasmCode>> owned_code_;
+ std::unordered_map<uint32_t, WasmCode*> exported_wasm_to_wasm_wrappers_;
+
+ WasmCodeUniquePtrComparer owned_code_comparer_;
+
+ std::vector<WasmCode*> code_table_;
+ uint32_t num_imported_functions_;
+
+ std::unordered_map<Address, Address, AddressHasher> trampolines_;
+ std::unordered_map<uint32_t, WasmCode*> stubs_;
+
+ DisjointAllocationPool free_memory_;
+ DisjointAllocationPool allocated_memory_;
+ std::list<VirtualMemory> owned_memory_;
+ WasmCodeManager* wasm_code_manager_;
+ wasm::WasmCode* lazy_builtin_ = nullptr;
+ base::Mutex allocation_mutex_;
+ Handle<WasmCompiledModule> compiled_module_;
+ size_t committed_memory_ = 0;
+ bool can_request_more_memory_;
+
+ // Specialization data that needs to be serialized and cloned.
+ // Keeping it groupped together because it makes cloning of all these
+ // elements a 1 line copy.
+ struct {
+ std::vector<wasm::GlobalHandleAddress> function_tables;
+ std::vector<wasm::GlobalHandleAddress> signature_tables;
+ std::vector<wasm::GlobalHandleAddress> empty_function_tables;
+ std::vector<wasm::GlobalHandleAddress> empty_signature_tables;
+ } specialization_data_;
+};
+
+class V8_EXPORT_PRIVATE WasmCodeManager final {
+ public:
+ // The only reason we depend on Isolate is to report native memory used
+ // and held by a GC-ed object. We'll need to mitigate that when we
+ // start sharing wasm heaps.
+ WasmCodeManager(v8::Isolate*, size_t max_committed);
+ // Create a new NativeModule. The caller is responsible for its
+ // lifetime. The native module will be given some memory for code,
+ // which will be page size aligned. The size of the initial memory
+ // is determined with a heuristic based on the total size of wasm
+ // code. The native module may later request more memory.
+ std::unique_ptr<NativeModule> NewNativeModule(const WasmModule&);
+ std::unique_ptr<NativeModule> NewNativeModule(size_t memory_estimate,
+ uint32_t num_functions,
+ uint32_t num_imported_functions,
+ bool can_request_more);
+
+ WasmCode* LookupCode(Address pc) const;
+ WasmCode* GetCodeFromStartAddress(Address pc) const;
+ intptr_t remaining_uncommitted() const;
+
+ private:
+ friend class NativeModule;
+
+ WasmCodeManager(const WasmCodeManager&) = delete;
+ WasmCodeManager& operator=(const WasmCodeManager&) = delete;
+ void TryAllocate(size_t size, VirtualMemory*, void* hint = nullptr);
+ bool Commit(Address, size_t);
+ // Currently, we uncommit a whole module, so all we need is account
+ // for the freed memory size. We do that in FreeNativeModuleMemories.
+ // There's no separate Uncommit.
+
+ void FreeNativeModuleMemories(NativeModule*);
+ void Free(VirtualMemory* mem);
+ void AssignRanges(void* start, void* end, NativeModule*);
+ size_t GetAllocationChunk(const WasmModule& module);
+ bool WouldGCHelp() const;
+
+ std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
+ // count of NativeModules not yet collected. Helps determine if it's
+ // worth requesting a GC on memory pressure.
+ size_t active_ = 0;
+ base::AtomicNumber<intptr_t> remaining_uncommitted_;
+ v8::Isolate* isolate_;
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 4269e18c8f..80d56a05f8 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -2,15 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <atomic>
#include <type_traits>
#include "src/wasm/wasm-interpreter.h"
#include "src/assembler-inl.h"
+#include "src/boxed-float.h"
#include "src/compiler/wasm-compiler.h"
#include "src/conversions.h"
#include "src/identity-map.h"
#include "src/objects-inl.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
@@ -122,7 +125,9 @@ namespace wasm {
V(I32AsmjsDivS, int32_t) \
V(I32AsmjsDivU, uint32_t) \
V(I32AsmjsRemS, int32_t) \
- V(I32AsmjsRemU, uint32_t)
+ V(I32AsmjsRemU, uint32_t) \
+ V(F32CopySign, Float32) \
+ V(F64CopySign, Float64)
#define FOREACH_OTHER_UNOP(V) \
V(I32Clz, uint32_t) \
@@ -133,14 +138,14 @@ namespace wasm {
V(I64Ctz, uint64_t) \
V(I64Popcnt, uint64_t) \
V(I64Eqz, uint64_t) \
- V(F32Abs, float) \
- V(F32Neg, float) \
+ V(F32Abs, Float32) \
+ V(F32Neg, Float32) \
V(F32Ceil, float) \
V(F32Floor, float) \
V(F32Trunc, float) \
V(F32NearestInt, float) \
- V(F64Abs, double) \
- V(F64Neg, double) \
+ V(F64Abs, Float64) \
+ V(F64Neg, Float64) \
V(F64Ceil, double) \
V(F64Floor, double) \
V(F64Trunc, double) \
@@ -177,26 +182,8 @@ namespace wasm {
namespace {
-// CachedInstanceInfo encapsulates globals and memory buffer runtime information
-// for a wasm instance. The interpreter caches that information when
-// constructed, copying it from the {WasmInstanceObject}. It expects it be
-// notified on changes to it, e.g. {GrowMemory}. We cache it because interpreter
-// perf is sensitive to accesses to this information.
-//
-// TODO(wasm): other runtime information, such as indirect function table, or
-// code table (incl. imports) is currently handled separately. Consider
-// unifying, if possible, with {ModuleEnv}.
-
-struct CachedInstanceInfo {
- CachedInstanceInfo(byte* globals, byte* mem, uint32_t size)
- : globals_start(globals), mem_start(mem), mem_size(size) {}
- // We do not expect the location of the globals buffer to
- // change for an instance.
- byte* const globals_start = nullptr;
- // The memory buffer may change because of GrowMemory
- byte* mem_start = nullptr;
- uint32_t mem_size = 0;
-};
+constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
+constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
if (b == 0) {
@@ -324,8 +311,9 @@ inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
return JSMax(a, b);
}
-inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
- return copysignf(a, b);
+inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
+ return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
+ (b.get_bits() & kFloat32SignBitMask));
}
inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
@@ -336,8 +324,9 @@ inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
return JSMax(a, b);
}
-inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
- return copysign(a, b);
+inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
+ return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
+ (b.get_bits() & kFloat64SignBitMask));
}
inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
@@ -381,11 +370,11 @@ inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
}
int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
- return base::bits::CountLeadingZeros32(val);
+ return base::bits::CountLeadingZeros(val);
}
uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
- return base::bits::CountTrailingZeros32(val);
+ return base::bits::CountTrailingZeros(val);
}
uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
@@ -397,11 +386,11 @@ inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
}
int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
- return base::bits::CountLeadingZeros64(val);
+ return base::bits::CountLeadingZeros(val);
}
inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
- return base::bits::CountTrailingZeros64(val);
+ return base::bits::CountTrailingZeros(val);
}
inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
@@ -412,12 +401,12 @@ inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
return val == 0 ? 1 : 0;
}
-inline float ExecuteF32Abs(float a, TrapReason* trap) {
- return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff);
+inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
+ return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
}
-inline float ExecuteF32Neg(float a, TrapReason* trap) {
- return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000);
+inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
+ return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
}
inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
@@ -435,12 +424,12 @@ inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
return result;
}
-inline double ExecuteF64Abs(double a, TrapReason* trap) {
- return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff);
+inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
+ return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
}
-inline double ExecuteF64Neg(double a, TrapReason* trap) {
- return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000);
+inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
+ return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
}
inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
@@ -578,8 +567,8 @@ inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
return static_cast<float>(a);
}
-inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
- return bit_cast<float>(a);
+inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
+ return Float32::FromBits(a);
}
inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
@@ -606,31 +595,16 @@ inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
return static_cast<double>(a);
}
-inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
- return bit_cast<double>(a);
+inline Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
+ return Float64::FromBits(a);
}
inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
- return a.to_unchecked<int32_t>();
+ return a.to_f32_boxed().get_bits();
}
inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
- return a.to_unchecked<int64_t>();
-}
-
-inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
- MaybeHandle<WasmInstanceObject> instance_obj,
- CachedInstanceInfo* mem_info) {
- Handle<WasmInstanceObject> instance = instance_obj.ToHandleChecked();
- Isolate* isolate = instance->GetIsolate();
- int32_t ret = WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
-
- // Ensure the effects of GrowMemory have been observed by the interpreter.
- // See {UpdateMemory}. In all cases, we are in agreement with the runtime
- // object's view.
- DCHECK_EQ(mem_info->mem_size, instance->wasm_context()->mem_size);
- DCHECK_EQ(mem_info->mem_start, instance->wasm_context()->mem_start);
- return ret;
+ return a.to_f64_boxed().get_bits();
}
enum InternalOpcode {
@@ -653,17 +627,29 @@ const char* OpcodeName(uint32_t val) {
// Unwrap a wasm to js wrapper, return the callable heap object.
// If the wrapper would throw a TypeError, return a null handle.
Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
- Handle<Code> js_wrapper) {
- DCHECK_EQ(Code::WASM_TO_JS_FUNCTION, js_wrapper->kind());
- Handle<FixedArray> deopt_data(js_wrapper->deoptimization_data(), isolate);
- DCHECK_EQ(2, deopt_data->length());
- intptr_t js_imports_table_loc = static_cast<intptr_t>(
- HeapNumber::cast(deopt_data->get(0))->value_as_bits());
- Handle<FixedArray> js_imports_table(
- reinterpret_cast<FixedArray**>(js_imports_table_loc));
+ WasmCodeWrapper wrapper) {
+ Handle<FixedArray> js_imports_table;
int index = 0;
- CHECK(deopt_data->get(1)->ToInt32(&index));
- DCHECK_GT(js_imports_table->length(), index);
+ if (wrapper.IsCodeObject()) {
+ Handle<Code> js_wrapper = wrapper.GetCode();
+ DCHECK(Code::WASM_TO_JS_FUNCTION == js_wrapper->kind());
+ Handle<FixedArray> deopt_data(js_wrapper->deoptimization_data(), isolate);
+ DCHECK_EQ(2, deopt_data->length());
+ intptr_t js_imports_table_loc = static_cast<intptr_t>(
+ HeapNumber::cast(deopt_data->get(0))->value_as_bits());
+ js_imports_table = Handle<FixedArray>(
+ reinterpret_cast<FixedArray**>(js_imports_table_loc));
+ CHECK(deopt_data->get(1)->ToInt32(&index));
+ DCHECK_GT(js_imports_table->length(), index);
+ } else {
+ const wasm::WasmCode* wasm_code = wrapper.GetWasmCode();
+ DCHECK_EQ(wasm::WasmCode::WasmToJsWrapper, wasm_code->kind());
+ js_imports_table = Handle<FixedArray>(wasm_code->owner()
+ ->compiled_module()
+ ->owning_instance()
+ ->js_imports_table());
+ index = 1 + 3 * static_cast<int>(wasm_code->index());
+ }
Handle<Object> obj(js_imports_table->get(index), isolate);
if (obj->IsCallable()) {
return Handle<HeapObject>::cast(obj);
@@ -806,6 +792,7 @@ class SideTable : public ZoneObject {
for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
bool unreachable = control_stack.back().unreachable;
if (unreachable) {
TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
@@ -826,25 +813,35 @@ class SideTable : public ZoneObject {
case kExprBlock:
case kExprLoop: {
bool is_loop = opcode == kExprLoop;
- BlockTypeOperand<false> operand(&i, i.pc());
- TRACE("control @%u: %s, arity %d\n", i.pc_offset(),
- is_loop ? "Loop" : "Block", operand.arity);
+ BlockTypeOperand<Decoder::kNoValidate> operand(&i, i.pc());
+ if (operand.type == kWasmVar) {
+ operand.sig = module->signatures[operand.sig_index];
+ }
+ TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
+ is_loop ? "Loop" : "Block",
+ operand.in_arity(), operand.out_arity());
CLabel* label = CLabel::New(&control_transfer_zone, stack_height,
- is_loop ? 0 : operand.arity);
- control_stack.emplace_back(i.pc(), label, operand.arity);
+ is_loop ? operand.in_arity()
+ : operand.out_arity());
+ control_stack.emplace_back(i.pc(), label, operand.out_arity());
copy_unreachable();
if (is_loop) label->Bind(i.pc());
break;
}
case kExprIf: {
- TRACE("control @%u: If\n", i.pc_offset());
- BlockTypeOperand<false> operand(&i, i.pc());
+ BlockTypeOperand<Decoder::kNoValidate> operand(&i, i.pc());
+ if (operand.type == kWasmVar) {
+ operand.sig = module->signatures[operand.sig_index];
+ }
+ TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
+ operand.in_arity(), operand.out_arity());
CLabel* end_label =
- CLabel::New(&control_transfer_zone, stack_height, operand.arity);
+ CLabel::New(&control_transfer_zone, stack_height,
+ operand.out_arity());
CLabel* else_label =
CLabel::New(&control_transfer_zone, stack_height, 0);
control_stack.emplace_back(i.pc(), end_label, else_label,
- operand.arity);
+ operand.out_arity());
copy_unreachable();
if (!unreachable) else_label->Ref(i.pc(), stack_height);
break;
@@ -880,22 +877,22 @@ class SideTable : public ZoneObject {
break;
}
case kExprBr: {
- BreakDepthOperand<false> operand(&i, i.pc());
+ BreakDepthOperand<Decoder::kNoValidate> operand(&i, i.pc());
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), operand.depth);
Control* c = &control_stack[control_stack.size() - operand.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrIf: {
- BreakDepthOperand<false> operand(&i, i.pc());
+ BreakDepthOperand<Decoder::kNoValidate> operand(&i, i.pc());
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), operand.depth);
Control* c = &control_stack[control_stack.size() - operand.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrTable: {
- BranchTableOperand<false> operand(&i, i.pc());
- BranchTableIterator<false> iterator(&i, operand);
+ BranchTableOperand<Decoder::kNoValidate> operand(&i, i.pc());
+ BranchTableIterator<Decoder::kNoValidate> iterator(&i, operand);
TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
operand.table_count);
if (!unreachable) {
@@ -997,7 +994,15 @@ class CodeMap {
: MaybeHandle<WasmInstanceObject>();
}
- Code* GetImportedFunction(uint32_t function_index) {
+ const wasm::WasmCode* GetImportedFunction(uint32_t function_index) {
+ DCHECK(has_instance());
+ DCHECK_GT(module_->num_imported_functions, function_index);
+ const wasm::NativeModule* native_module =
+ instance()->compiled_module()->GetNativeModule();
+ return native_module->GetCode(function_index);
+ }
+
+ Code* GetImportedFunctionGC(uint32_t function_index) {
DCHECK(has_instance());
DCHECK_GT(module_->num_imported_functions, function_index);
FixedArray* code_table = instance()->compiled_module()->ptr_to_code_table();
@@ -1080,12 +1085,14 @@ Handle<Object> WasmValueToNumber(Factory* factory, WasmValue val,
// Convert JS value to WebAssembly, spec here:
// https://github.com/WebAssembly/design/blob/master/JS.md#towebassemblyvalue
+// Return WasmValue() (i.e. of type kWasmStmt) on failure. In that case, an
+// exception will be pending on the isolate.
WasmValue ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
wasm::ValueType type) {
switch (type) {
case kWasmI32: {
MaybeHandle<Object> maybe_i32 = Object::ToInt32(isolate, value);
- // TODO(clemensh): Handle failure here (unwind).
+ if (maybe_i32.is_null()) return {};
int32_t value;
CHECK(maybe_i32.ToHandleChecked()->ToInt32(&value));
return WasmValue(value);
@@ -1095,13 +1102,13 @@ WasmValue ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
UNREACHABLE();
case kWasmF32: {
MaybeHandle<Object> maybe_number = Object::ToNumber(value);
- // TODO(clemensh): Handle failure here (unwind).
+ if (maybe_number.is_null()) return {};
return WasmValue(
static_cast<float>(maybe_number.ToHandleChecked()->Number()));
}
case kWasmF64: {
MaybeHandle<Object> maybe_number = Object::ToNumber(value);
- // TODO(clemensh): Handle failure here (unwind).
+ if (maybe_number.is_null()) return {};
return WasmValue(maybe_number.ToHandleChecked()->Number());
}
default:
@@ -1111,6 +1118,42 @@ WasmValue ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
}
}
+// Like a static_cast from src to dst, but specialized for boxed floats.
+template <typename dst, typename src>
+struct converter {
+ dst operator()(src val) const { return static_cast<dst>(val); }
+};
+template <>
+struct converter<Float64, uint64_t> {
+ Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
+};
+template <>
+struct converter<Float32, uint32_t> {
+ Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
+};
+template <>
+struct converter<uint64_t, Float64> {
+ uint64_t operator()(Float64 val) const { return val.get_bits(); }
+};
+template <>
+struct converter<uint32_t, Float32> {
+ uint32_t operator()(Float32 val) const { return val.get_bits(); }
+};
+
+template <typename T>
+V8_INLINE bool has_nondeterminism(T val) {
+ static_assert(!std::is_floating_point<T>::value, "missing specialization");
+ return false;
+}
+template <>
+V8_INLINE bool has_nondeterminism<float>(float val) {
+ return std::isnan(val);
+}
+template <>
+V8_INLINE bool has_nondeterminism<double>(double val) {
+ return std::isnan(val);
+}
+
// Responsible for executing code directly.
class ThreadImpl {
struct Activation {
@@ -1120,10 +1163,9 @@ class ThreadImpl {
};
public:
- ThreadImpl(Zone* zone, CodeMap* codemap,
- CachedInstanceInfo* cached_instance_info)
+ ThreadImpl(Zone* zone, CodeMap* codemap, WasmContext* wasm_context)
: codemap_(codemap),
- cached_instance_info_(cached_instance_info),
+ wasm_context_(wasm_context),
zone_(zone),
frames_(zone),
activations_(zone) {}
@@ -1247,7 +1289,7 @@ class ThreadImpl {
WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
Isolate* isolate) {
DCHECK(isolate->has_pending_exception());
- // TODO(wasm): Add wasm exception handling (would return true).
+ // TODO(wasm): Add wasm exception handling (would return HANDLED).
USE(isolate->pending_exception());
TRACE("----- UNWIND -----\n");
DCHECK_LT(0, activations_.size());
@@ -1283,7 +1325,7 @@ class ThreadImpl {
friend class InterpretedFrameImpl;
CodeMap* codemap_;
- CachedInstanceInfo* const cached_instance_info_;
+ WasmContext* wasm_context_;
Zone* zone_;
WasmValue* stack_start_ = nullptr; // Start of allocated stack space.
WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
@@ -1372,11 +1414,13 @@ class ThreadImpl {
pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
switch (code->orig_start[pc]) {
case kExprCallFunction: {
- CallFunctionOperand<false> operand(decoder, code->at(pc));
+ CallFunctionOperand<Decoder::kNoValidate> operand(decoder,
+ code->at(pc));
return pc + 1 + operand.length;
}
case kExprCallIndirect: {
- CallIndirectOperand<false> operand(decoder, code->at(pc));
+ CallIndirectOperand<Decoder::kNoValidate> operand(decoder,
+ code->at(pc));
return pc + 1 + operand.length;
}
default:
@@ -1432,28 +1476,35 @@ class ThreadImpl {
// ^ 0 ^ sp_
DCHECK_LE(dest, sp_);
DCHECK_LE(dest + arity, sp_);
- if (arity) memcpy(dest, sp_ - arity, arity * sizeof(*sp_));
+ if (arity) memmove(dest, sp_ - arity, arity * sizeof(*sp_));
sp_ = dest + arity;
}
template <typename mtype>
- inline bool BoundsCheck(uint32_t mem_size, uint32_t offset, uint32_t index) {
- return sizeof(mtype) <= mem_size && offset <= mem_size - sizeof(mtype) &&
- index <= mem_size - sizeof(mtype) - offset;
+ inline byte* BoundsCheckMem(uint32_t offset, uint32_t index) {
+ uint32_t mem_size = wasm_context_->mem_size;
+ if (sizeof(mtype) > mem_size) return nullptr;
+ if (offset > (mem_size - sizeof(mtype))) return nullptr;
+ if (index > (mem_size - sizeof(mtype) - offset)) return nullptr;
+ // Compute the effective address of the access, making sure to condition
+ // the index even in the in-bounds case.
+ return wasm_context_->mem_start + offset +
+ (index & wasm_context_->mem_mask);
}
template <typename ctype, typename mtype>
bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
MachineRepresentation rep) {
- MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
+ MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc),
+ sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
- if (!BoundsCheck<mtype>(cached_instance_info_->mem_size, operand.offset,
- index)) {
+ byte* addr = BoundsCheckMem<mtype>(operand.offset, index);
+ if (!addr) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
- byte* addr = cached_instance_info_->mem_start + operand.offset + index;
- WasmValue result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr)));
+ WasmValue result(
+ converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
Push(result);
len = 1 + operand.length;
@@ -1462,7 +1513,7 @@ class ThreadImpl {
tracing::TraceMemoryOperation(
tracing::kWasmInterpreted, false, rep, operand.offset + index,
code->function->func_index, static_cast<int>(pc),
- cached_instance_info_->mem_start);
+ wasm_context_->mem_start);
}
return true;
@@ -1471,35 +1522,114 @@ class ThreadImpl {
template <typename ctype, typename mtype>
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
MachineRepresentation rep) {
- MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
- WasmValue val = Pop();
+ MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc),
+ sizeof(ctype));
+ ctype val = Pop().to<ctype>();
uint32_t index = Pop().to<uint32_t>();
- if (!BoundsCheck<mtype>(cached_instance_info_->mem_size, operand.offset,
- index)) {
+ byte* addr = BoundsCheckMem<mtype>(operand.offset, index);
+ if (!addr) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
- byte* addr = cached_instance_info_->mem_start + operand.offset + index;
- WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>()));
+ WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
len = 1 + operand.length;
- if (std::is_same<float, ctype>::value) {
- possible_nondeterminism_ |= std::isnan(val.to<float>());
- } else if (std::is_same<double, ctype>::value) {
- possible_nondeterminism_ |= std::isnan(val.to<double>());
- }
-
if (FLAG_wasm_trace_memory) {
tracing::TraceMemoryOperation(
tracing::kWasmInterpreted, true, rep, operand.offset + index,
code->function->func_index, static_cast<int>(pc),
- cached_instance_info_->mem_start);
+ wasm_context_->mem_start);
}
return true;
}
+ template <typename type>
+ bool ExtractAtomicBinOpParams(Decoder* decoder, InterpreterCode* code,
+ Address& address, pc_t pc, type& val,
+ int& len) {
+ MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc + 1),
+ sizeof(type));
+ val = Pop().to<uint32_t>();
+ uint32_t index = Pop().to<uint32_t>();
+ address = BoundsCheckMem<type>(operand.offset, index);
+ if (!address) {
+ DoTrap(kTrapMemOutOfBounds, pc);
+ return false;
+ }
+ len = 2 + operand.length;
+ return true;
+ }
+
+ bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
+ InterpreterCode* code, pc_t pc, int& len) {
+ WasmValue result;
+ switch (opcode) {
+// TODO(gdeepti): Remove work-around when the bots are upgraded to a more
+// recent gcc version. The gcc bots (Android ARM, linux) currently use
+// gcc 4.8, in which atomics are insufficiently supported, also Bug#58016
+// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58016)
+#if __GNUG__ && __GNUC__ < 5
+#define ATOMIC_BINOP_CASE(name, type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
+ return false; \
+ } \
+ result = WasmValue( \
+ __##operation(reinterpret_cast<type*>(addr), val, __ATOMIC_SEQ_CST)); \
+ break; \
+ }
+#else
+#define ATOMIC_BINOP_CASE(name, type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<std::type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<std::" #type \
+ ">, and " #type); \
+ result = WasmValue( \
+ std::operation(reinterpret_cast<std::atomic<std::type>*>(addr), val)); \
+ break; \
+ }
+#endif
+ ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
+#if __GNUG__ && __GNUC__ < 5
+ ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange_n);
+ ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange_n);
+ ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange_n);
+#else
+ ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
+#endif
+#undef ATOMIC_BINOP_CASE
+ default:
+ return false;
+ }
+ Push(result);
+ return true;
+ }
+
// Check if our control stack (frames_) exceeds the limit. Trigger stack
// overflow if it does, and unwinding the current frame.
// Returns true if execution can continue, false if the current activation was
@@ -1507,8 +1637,15 @@ class ThreadImpl {
// Do call this function immediately *after* pushing a new frame. The pc of
// the top frame will be reset to 0 if the stack check fails.
bool DoStackCheck() WARN_UNUSED_RESULT {
- // Sum up the size of all dynamically growing structures.
- if (V8_LIKELY(frames_.size() <= kV8MaxWasmInterpretedStackSize)) {
+ // The goal of this stack check is not to prevent actual stack overflows,
+ // but to simulate stack overflows during the execution of compiled code.
+ // That is why this function uses FLAG_stack_size, even though the value
+ // stack actually lies in zone memory.
+ const size_t stack_size_limit = FLAG_stack_size * KB;
+ // Sum up the value stack size and the control stack size.
+ const size_t current_stack_size =
+ (sp_ - stack_start_) + frames_.size() * sizeof(Frame);
+ if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
return true;
}
if (!codemap()->has_instance()) {
@@ -1551,16 +1688,22 @@ class ThreadImpl {
// Do first check for a breakpoint, in order to set hit_break correctly.
const char* skip = " ";
int len = 1;
- byte opcode = code->start[pc];
- byte orig = opcode;
- if (V8_UNLIKELY(opcode == kInternalBreakpoint)) {
+ byte orig = code->start[pc];
+ WasmOpcode opcode = static_cast<WasmOpcode>(orig);
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = static_cast<WasmOpcode>(opcode << 8 | code->start[pc + 1]);
+ }
+ if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
orig = code->orig_start[pc];
+ if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
+ opcode =
+ static_cast<WasmOpcode>(orig << 8 | code->orig_start[pc + 1]);
+ }
if (SkipBreakpoint(code, pc)) {
// skip breakpoint by switching on original code.
skip = "[skip] ";
} else {
- TRACE("@%-3zu: [break] %-24s:", pc,
- WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
+ TRACE("@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
TraceValueStack();
TRACE("\n");
hit_break = true;
@@ -1573,8 +1716,7 @@ class ThreadImpl {
if (max > 0) --max;
USE(skip);
- TRACE("@%-3zu: %s%-24s:", pc, skip,
- WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
+ TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
TraceValueStack();
TRACE("\n");
@@ -1592,17 +1734,20 @@ class ThreadImpl {
case kExprNop:
break;
case kExprBlock: {
- BlockTypeOperand<false> operand(&decoder, code->at(pc));
+ BlockTypeOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
len = 1 + operand.length;
break;
}
case kExprLoop: {
- BlockTypeOperand<false> operand(&decoder, code->at(pc));
+ BlockTypeOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
len = 1 + operand.length;
break;
}
case kExprIf: {
- BlockTypeOperand<false> operand(&decoder, code->at(pc));
+ BlockTypeOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -1628,13 +1773,15 @@ class ThreadImpl {
break;
}
case kExprBr: {
- BreakDepthOperand<false> operand(&decoder, code->at(pc));
+ BreakDepthOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
len = DoBreak(code, pc, operand.depth);
TRACE(" br => @%zu\n", pc + len);
break;
}
case kExprBrIf: {
- BreakDepthOperand<false> operand(&decoder, code->at(pc));
+ BreakDepthOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -1647,8 +1794,9 @@ class ThreadImpl {
break;
}
case kExprBrTable: {
- BranchTableOperand<false> operand(&decoder, code->at(pc));
- BranchTableIterator<false> iterator(&decoder, operand);
+ BranchTableOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
+ BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, operand);
uint32_t key = Pop().to<uint32_t>();
uint32_t depth = 0;
if (key >= operand.table_count) key = operand.table_count;
@@ -1673,44 +1821,47 @@ class ThreadImpl {
break;
}
case kExprI32Const: {
- ImmI32Operand<false> operand(&decoder, code->at(pc));
+ ImmI32Operand<Decoder::kNoValidate> operand(&decoder, code->at(pc));
Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprI64Const: {
- ImmI64Operand<false> operand(&decoder, code->at(pc));
+ ImmI64Operand<Decoder::kNoValidate> operand(&decoder, code->at(pc));
Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprF32Const: {
- ImmF32Operand<false> operand(&decoder, code->at(pc));
+ ImmF32Operand<Decoder::kNoValidate> operand(&decoder, code->at(pc));
Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprF64Const: {
- ImmF64Operand<false> operand(&decoder, code->at(pc));
+ ImmF64Operand<Decoder::kNoValidate> operand(&decoder, code->at(pc));
Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprGetLocal: {
- LocalIndexOperand<false> operand(&decoder, code->at(pc));
+ LocalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
Push(GetStackValue(frames_.back().sp + operand.index));
len = 1 + operand.length;
break;
}
case kExprSetLocal: {
- LocalIndexOperand<false> operand(&decoder, code->at(pc));
+ LocalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
WasmValue val = Pop();
SetStackValue(frames_.back().sp + operand.index, val);
len = 1 + operand.length;
break;
}
case kExprTeeLocal: {
- LocalIndexOperand<false> operand(&decoder, code->at(pc));
+ LocalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
WasmValue val = Pop();
SetStackValue(frames_.back().sp + operand.index, val);
Push(val);
@@ -1722,7 +1873,8 @@ class ThreadImpl {
break;
}
case kExprCallFunction: {
- CallFunctionOperand<false> operand(&decoder, code->at(pc));
+ CallFunctionOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
InterpreterCode* target = codemap()->GetCode(operand.index);
if (target->function->imported) {
CommitPc(pc);
@@ -1754,7 +1906,8 @@ class ThreadImpl {
continue; // don't bump pc
} break;
case kExprCallIndirect: {
- CallIndirectOperand<false> operand(&decoder, code->at(pc));
+ CallIndirectOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
DCHECK_LE(module()->function_tables.size(), 1u);
@@ -1781,9 +1934,10 @@ class ThreadImpl {
}
} break;
case kExprGetGlobal: {
- GlobalIndexOperand<false> operand(&decoder, code->at(pc));
+ GlobalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
- byte* ptr = cached_instance_info_->globals_start + global->offset;
+ byte* ptr = wasm_context_->globals_start + global->offset;
WasmValue val;
switch (global->type) {
#define CASE_TYPE(wasm, ctype) \
@@ -1800,9 +1954,10 @@ class ThreadImpl {
break;
}
case kExprSetGlobal: {
- GlobalIndexOperand<false> operand(&decoder, code->at(pc));
+ GlobalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
- byte* ptr = cached_instance_info_->globals_start + global->offset;
+ byte* ptr = wasm_context_->globals_start + global->offset;
WasmValue val = Pop();
switch (global->type) {
#define CASE_TYPE(wasm, ctype) \
@@ -1838,8 +1993,8 @@ class ThreadImpl {
LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
- LOAD_CASE(F32LoadMem, float, float, kFloat32);
- LOAD_CASE(F64LoadMem, double, double, kFloat64);
+ LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
+ LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
#undef LOAD_CASE
#define STORE_CASE(name, ctype, mtype, rep) \
@@ -1857,23 +2012,23 @@ class ThreadImpl {
STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
- STORE_CASE(F32StoreMem, float, float, kFloat32);
- STORE_CASE(F64StoreMem, double, double, kFloat64);
+ STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
+ STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
#undef STORE_CASE
-#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
- case kExpr##name: { \
- uint32_t index = Pop().to<uint32_t>(); \
- ctype result; \
- if (!BoundsCheck<mtype>(cached_instance_info_->mem_size, 0, index)) { \
- result = defval; \
- } else { \
- byte* addr = cached_instance_info_->mem_start + index; \
- /* TODO(titzer): alignment for asmjs load mem? */ \
- result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
- } \
- Push(WasmValue(result)); \
- break; \
+#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
+ case kExpr##name: { \
+ uint32_t index = Pop().to<uint32_t>(); \
+ ctype result; \
+ byte* addr = BoundsCheckMem<mtype>(0, index); \
+ if (!addr) { \
+ result = defval; \
+ } else { \
+ /* TODO(titzer): alignment for asmjs load mem? */ \
+ result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
+ } \
+ Push(WasmValue(result)); \
+ break; \
}
ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
@@ -1890,9 +2045,8 @@ class ThreadImpl {
case kExpr##name: { \
WasmValue val = Pop(); \
uint32_t index = Pop().to<uint32_t>(); \
- if (BoundsCheck<mtype>(cached_instance_info_->mem_size, 0, index)) { \
- byte* addr = cached_instance_info_->mem_start + index; \
- /* TODO(titzer): alignment for asmjs store mem? */ \
+ byte* addr = BoundsCheckMem<mtype>(0, index); \
+ if (addr) { \
*(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
} \
Push(val); \
@@ -1906,16 +2060,23 @@ class ThreadImpl {
ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
#undef ASMJS_STORE_CASE
case kExprGrowMemory: {
- MemoryIndexOperand<false> operand(&decoder, code->at(pc));
+ MemoryIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
- Push(WasmValue(ExecuteGrowMemory(
- delta_pages, codemap_->maybe_instance(), cached_instance_info_)));
+ Handle<WasmInstanceObject> instance =
+ codemap()->maybe_instance().ToHandleChecked();
+ DCHECK_EQ(wasm_context_, instance->wasm_context()->get());
+ Isolate* isolate = instance->GetIsolate();
+ int32_t result =
+ WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
+ Push(WasmValue(result));
len = 1 + operand.length;
break;
}
case kExprMemorySize: {
- MemoryIndexOperand<false> operand(&decoder, code->at(pc));
- Push(WasmValue(static_cast<uint32_t>(cached_instance_info_->mem_size /
+ MemoryIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
+ Push(WasmValue(static_cast<uint32_t>(wasm_context_->mem_size /
WasmModule::kPageSize)));
len = 1 + operand.length;
break;
@@ -1926,69 +2087,53 @@ class ThreadImpl {
case kExprI32ReinterpretF32: {
WasmValue val = Pop();
Push(WasmValue(ExecuteI32ReinterpretF32(val)));
- possible_nondeterminism_ |= std::isnan(val.to<float>());
break;
}
case kExprI64ReinterpretF64: {
WasmValue val = Pop();
Push(WasmValue(ExecuteI64ReinterpretF64(val)));
- possible_nondeterminism_ |= std::isnan(val.to<double>());
break;
}
+ case kAtomicPrefix: {
+ if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
+ break;
+ }
+
#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
case kExpr##name: { \
WasmValue rval = Pop(); \
WasmValue lval = Pop(); \
- WasmValue result(lval.to<ctype>() op rval.to<ctype>()); \
- Push(result); \
+ auto result = lval.to<ctype>() op rval.to<ctype>(); \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ Push(WasmValue(result)); \
break; \
}
FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
#undef EXECUTE_SIMPLE_BINOP
-#define EXECUTE_OTHER_BINOP(name, ctype) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- volatile ctype rval = Pop().to<ctype>(); \
- volatile ctype lval = Pop().to<ctype>(); \
- WasmValue result(Execute##name(lval, rval, &trap)); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(result); \
- break; \
+#define EXECUTE_OTHER_BINOP(name, ctype) \
+ case kExpr##name: { \
+ TrapReason trap = kTrapCount; \
+ ctype rval = Pop().to<ctype>(); \
+ ctype lval = Pop().to<ctype>(); \
+ auto result = Execute##name(lval, rval, &trap); \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ if (trap != kTrapCount) return DoTrap(trap, pc); \
+ Push(WasmValue(result)); \
+ break; \
}
FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
#undef EXECUTE_OTHER_BINOP
- case kExprF32CopySign: {
- // Handle kExprF32CopySign separately because it may introduce
- // observable non-determinism.
- TrapReason trap = kTrapCount;
- volatile float rval = Pop().to<float>();
- volatile float lval = Pop().to<float>();
- WasmValue result(ExecuteF32CopySign(lval, rval, &trap));
- Push(result);
- possible_nondeterminism_ |= std::isnan(rval);
- break;
- }
- case kExprF64CopySign: {
- // Handle kExprF32CopySign separately because it may introduce
- // observable non-determinism.
- TrapReason trap = kTrapCount;
- volatile double rval = Pop().to<double>();
- volatile double lval = Pop().to<double>();
- WasmValue result(ExecuteF64CopySign(lval, rval, &trap));
- Push(result);
- possible_nondeterminism_ |= std::isnan(rval);
- break;
- }
-#define EXECUTE_OTHER_UNOP(name, ctype) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- volatile ctype val = Pop().to<ctype>(); \
- WasmValue result(Execute##name(val, &trap)); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(result); \
- break; \
+#define EXECUTE_OTHER_UNOP(name, ctype) \
+ case kExpr##name: { \
+ TrapReason trap = kTrapCount; \
+ ctype val = Pop().to<ctype>(); \
+ auto result = Execute##name(val, &trap); \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ if (trap != kTrapCount) return DoTrap(trap, pc); \
+ Push(WasmValue(result)); \
+ break; \
}
FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
#undef EXECUTE_OTHER_UNOP
@@ -2000,7 +2145,7 @@ class ThreadImpl {
}
#ifdef DEBUG
- if (!WasmOpcodes::IsControlOpcode(static_cast<WasmOpcode>(opcode))) {
+ if (!WasmOpcodes::IsControlOpcode(opcode)) {
DCHECK_EQ(expected_new_stack_height, StackHeight());
}
#endif
@@ -2118,9 +2263,8 @@ class ThreadImpl {
return {ExternalCallResult::EXTERNAL_RETURNED};
}
- // TODO(clemensh): Remove this, call JS via existing wasm-to-js wrapper, using
- // CallExternalWasmFunction.
- ExternalCallResult CallExternalJSFunction(Isolate* isolate, Handle<Code> code,
+ ExternalCallResult CallExternalJSFunction(Isolate* isolate,
+ WasmCodeWrapper code,
FunctionSig* signature) {
Handle<HeapObject> target = UnwrapWasmToJSWrapper(isolate, code);
@@ -2166,13 +2310,16 @@ class ThreadImpl {
if (signature->return_count() > 0) {
// TODO(wasm): Handle multiple returns.
DCHECK_EQ(1, signature->return_count());
- Push(ToWebAssemblyValue(isolate, retval, signature->GetReturn()));
+ WasmValue value =
+ ToWebAssemblyValue(isolate, retval, signature->GetReturn());
+ if (value.type() == kWasmStmt) return TryHandleException(isolate);
+ Push(value);
}
return {ExternalCallResult::EXTERNAL_RETURNED};
}
ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
- Handle<Code> code,
+ WasmCodeWrapper code,
FunctionSig* sig) {
Handle<WasmDebugInfo> debug_info(codemap()->instance()->debug_info(),
isolate);
@@ -2216,6 +2363,16 @@ class ThreadImpl {
offset += param_size;
}
+ // Ensure that there is enough space in the arg_buffer to hold the return
+ // value(s).
+ uint32_t return_size = 0;
+ for (ValueType t : sig->returns()) {
+ return_size += 1 << ElementSizeLog2Of(t);
+ }
+ if (arg_buffer.size() < return_size) {
+ arg_buffer.resize(return_size);
+ }
+
// Wrap the arg_buffer data pointer in a handle. As this is an aligned
// pointer, to the GC it will look like a Smi.
Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
@@ -2223,13 +2380,26 @@ class ThreadImpl {
DCHECK(!arg_buffer_obj->IsHeapObject());
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
- args[compiler::CWasmEntryParameters::kCodeObject] = code;
+ args[compiler::CWasmEntryParameters::kCodeObject] =
+ code.IsCodeObject()
+ ? Handle<Object>::cast(code.GetCode())
+ : Handle<Object>::cast(isolate->factory()->NewForeign(
+ code.GetWasmCode()->instructions().start(), TENURED));
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
Handle<Object> receiver = isolate->factory()->undefined_value();
+ trap_handler::SetThreadInWasm();
MaybeHandle<Object> maybe_retval =
Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
- if (maybe_retval.is_null()) return TryHandleException(isolate);
+ TRACE(" => External wasm function returned%s\n",
+ maybe_retval.is_null() ? " with exception" : "");
+
+ if (maybe_retval.is_null()) {
+ DCHECK(!trap_handler::IsThreadInWasm());
+ return TryHandleException(isolate);
+ }
+
+ trap_handler::ClearThreadInWasm();
// Pop arguments off the stack.
sp_ -= num_args;
@@ -2262,21 +2432,38 @@ class ThreadImpl {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
- if (code->kind() == Code::WASM_FUNCTION) {
- FixedArray* deopt_data = code->deoptimization_data();
- DCHECK_EQ(2, deopt_data->length());
- WasmInstanceObject* target_instance =
- WasmInstanceObject::cast(WeakCell::cast(deopt_data->get(0))->value());
- if (target_instance != codemap()->instance()) {
- return CallExternalWasmFunction(isolate, code, signature);
+ if (code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_WASM_FUNCTION) {
+ auto func_info = GetWasmFunctionInfo(isolate, code);
+ if (*func_info.instance.ToHandleChecked() != codemap()->instance()) {
+ return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
+ signature);
}
- int target_func_idx = Smi::ToInt(deopt_data->get(1));
- DCHECK_LE(0, target_func_idx);
+ DCHECK_LE(0, func_info.func_index);
return {ExternalCallResult::INTERNAL,
- codemap()->GetCode(target_func_idx)};
+ codemap()->GetCode(func_info.func_index)};
}
- return CallExternalJSFunction(isolate, code, signature);
+ return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
+ }
+
+ ExternalCallResult CallWasmCode(Isolate* isolate, const wasm::WasmCode* code,
+ FunctionSig* signature) {
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
+
+ if (code->kind() == wasm::WasmCode::Function) {
+ DCHECK_EQ(*code->owner()->compiled_module()->owning_instance(),
+ codemap()->instance());
+ return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
+ }
+ if (code->kind() == wasm::WasmCode::WasmToJsWrapper) {
+ return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
+ } else if (code->kind() == wasm::WasmCode::WasmToWasmWrapper) {
+ return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
+ signature);
+ }
+ return {ExternalCallResult::INVALID_FUNC};
}
ExternalCallResult CallImportedFunction(uint32_t function_index) {
@@ -2285,17 +2472,36 @@ class ThreadImpl {
Isolate* isolate = codemap()->instance()->GetIsolate();
HandleScope handle_scope(isolate);
- Handle<Code> target(codemap()->GetImportedFunction(function_index),
- isolate);
- return CallCodeObject(isolate, target,
+ if (FLAG_wasm_jit_to_native) {
+ const wasm::WasmCode* target =
+ codemap()->GetImportedFunction(function_index);
+ return CallWasmCode(isolate, target,
codemap()->module()->functions[function_index].sig);
+ } else {
+ Handle<Code> target(codemap()->GetImportedFunctionGC(function_index),
+ isolate);
+ return CallCodeObject(isolate, target,
+ codemap()->module()->functions[function_index].sig);
+ }
}
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
- if (!codemap()->has_instance() ||
- !codemap()->instance()->compiled_module()->has_function_tables()) {
+ bool no_func_tables = !codemap()->has_instance();
+ if (FLAG_wasm_jit_to_native) {
+ no_func_tables = no_func_tables || codemap()
+ ->instance()
+ ->compiled_module()
+ ->GetNativeModule()
+ ->function_tables()
+ .empty();
+ } else {
+ no_func_tables =
+ no_func_tables ||
+ !codemap()->instance()->compiled_module()->has_function_tables();
+ }
+ if (no_func_tables) {
// No instance. Rely on the information stored in the WasmModule.
// TODO(wasm): This is only needed for testing. Refactor testing to use
// the same paths as production.
@@ -2304,13 +2510,12 @@ class ThreadImpl {
if (!code) return {ExternalCallResult::INVALID_FUNC};
if (code->function->sig_index != sig_index) {
// If not an exact match, we have to do a canonical check.
- // TODO(titzer): make this faster with some kind of caching?
- const WasmIndirectFunctionTable* table =
- &module()->function_tables[table_index];
- int function_key = table->map.Find(code->function->sig);
- if (function_key < 0 ||
- (function_key !=
- table->map.Find(module()->signatures[sig_index]))) {
+ int function_canonical_id =
+ module()->signature_ids[code->function->sig_index];
+ int expected_canonical_id = module()->signature_ids[sig_index];
+ DCHECK_EQ(function_canonical_id,
+ module()->signature_map.Find(code->function->sig));
+ if (function_canonical_id != expected_canonical_id) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
}
@@ -2321,53 +2526,92 @@ class ThreadImpl {
codemap()->instance()->compiled_module();
Isolate* isolate = compiled_module->GetIsolate();
- Code* target;
+ const wasm::WasmCode* target = nullptr;
+ Code* target_gc = nullptr;
{
DisallowHeapAllocation no_gc;
// Get function to be called directly from the live instance to see latest
// changes to the tables.
// Canonicalize signature index.
- // TODO(titzer): make this faster with some kind of caching?
- const WasmIndirectFunctionTable* table =
- &module()->function_tables[table_index];
- FunctionSig* sig = module()->signatures[sig_index];
- uint32_t canonical_sig_index = table->map.Find(sig);
-
- // Check signature.
- FixedArray* sig_tables = compiled_module->ptr_to_signature_tables();
- if (table_index >= static_cast<uint32_t>(sig_tables->length())) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- // Reconstitute the global handle to sig_table, and, further below,
- // to the function table, from the address stored in the
- // respective table of tables.
- int table_index_as_int = static_cast<int>(table_index);
- Handle<FixedArray> sig_table(reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(sig_tables, table_index_as_int)));
- if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- int found_sig = Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
- if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
+ uint32_t canonical_sig_index = module()->signature_ids[sig_index];
+ DCHECK_EQ(canonical_sig_index,
+ module()->signature_map.Find(module()->signatures[sig_index]));
+
+ if (!FLAG_wasm_jit_to_native) {
+ // Check signature.
+ FixedArray* sig_tables = compiled_module->ptr_to_signature_tables();
+ if (table_index >= static_cast<uint32_t>(sig_tables->length())) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ // Reconstitute the global handle to sig_table, and, further below,
+ // to the function table, from the address stored in the
+ // respective table of tables.
+ int table_index_as_int = static_cast<int>(table_index);
+ Handle<FixedArray> sig_table(reinterpret_cast<FixedArray**>(
+ WasmCompiledModule::GetTableValue(sig_tables, table_index_as_int)));
+ if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ int found_sig =
+ Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
+ if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
+ }
+
+ // Get code object.
+ FixedArray* fun_tables = compiled_module->ptr_to_function_tables();
+ DCHECK_EQ(sig_tables->length(), fun_tables->length());
+ Handle<FixedArray> fun_table(reinterpret_cast<FixedArray**>(
+ WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int)));
+ DCHECK_EQ(sig_table->length(), fun_table->length());
+ target_gc = Code::cast(fun_table->get(static_cast<int>(entry_index)));
+ } else {
+ // Check signature.
+ std::vector<GlobalHandleAddress>& sig_tables =
+ compiled_module->GetNativeModule()->signature_tables();
+ if (table_index >= sig_tables.size()) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ // Reconstitute the global handle to sig_table, and, further below,
+ // to the function table, from the address stored in the
+ // respective table of tables.
+ int table_index_as_int = static_cast<int>(table_index);
+ Handle<FixedArray> sig_table(
+ reinterpret_cast<FixedArray**>(sig_tables[table_index_as_int]));
+ if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ int found_sig =
+ Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
+ if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
+ }
- // Get code object.
- FixedArray* fun_tables = compiled_module->ptr_to_function_tables();
- DCHECK_EQ(sig_tables->length(), fun_tables->length());
- Handle<FixedArray> fun_table(reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int)));
- DCHECK_EQ(sig_table->length(), fun_table->length());
- target = Code::cast(fun_table->get(static_cast<int>(entry_index)));
+ // Get code object.
+ std::vector<GlobalHandleAddress>& fun_tables =
+ compiled_module->GetNativeModule()->function_tables();
+ DCHECK_EQ(sig_tables.size(), fun_tables.size());
+ Handle<FixedArray> fun_table(
+ reinterpret_cast<FixedArray**>(fun_tables[table_index_as_int]));
+ DCHECK_EQ(sig_table->length(), fun_table->length());
+ Address first_instr =
+ Foreign::cast(fun_table->get(static_cast<int>(entry_index)))
+ ->foreign_address();
+ target =
+ isolate->wasm_code_manager()->GetCodeFromStartAddress(first_instr);
+ }
}
// Call the code object. Use a new HandleScope to avoid leaking /
// accumulating handles in the outer scope.
HandleScope handle_scope(isolate);
- FunctionSig* signature =
- &codemap()->module()->signatures[table_index][sig_index];
- return CallCodeObject(isolate, handle(target, isolate), signature);
+ FunctionSig* signature = module()->signatures[sig_index];
+ if (FLAG_wasm_jit_to_native) {
+ return CallWasmCode(isolate, target, signature);
+ } else {
+ return CallCodeObject(isolate, handle(target_gc, isolate), signature);
+ }
}
inline Activation current_activation() {
@@ -2559,9 +2803,6 @@ uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
//============================================================================
class WasmInterpreterInternals : public ZoneObject {
public:
- // We cache the memory information of the debugged instance here, and all
- // threads (currently, one) share it and update it in case of {GrowMemory}.
- CachedInstanceInfo cached_instance_info_;
// Create a copy of the module bytes for the interpreter, since the passed
// pointer might be invalidated after constructing the interpreter.
const ZoneVector<uint8_t> module_bytes_;
@@ -2571,13 +2812,11 @@ class WasmInterpreterInternals : public ZoneObject {
WasmInterpreterInternals(Isolate* isolate, Zone* zone,
const WasmModule* module,
const ModuleWireBytes& wire_bytes,
- byte* globals_start, byte* mem_start,
- uint32_t mem_size)
- : cached_instance_info_(globals_start, mem_start, mem_size),
- module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
+ WasmContext* wasm_context)
+ : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
codemap_(isolate, module, module_bytes_.data(), zone),
threads_(zone) {
- threads_.emplace_back(zone, &codemap_, &cached_instance_info_);
+ threads_.emplace_back(zone, &codemap_, wasm_context);
}
};
@@ -2586,12 +2825,10 @@ class WasmInterpreterInternals : public ZoneObject {
//============================================================================
WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
- byte* globals_start, byte* mem_start,
- uint32_t mem_size)
+ WasmContext* wasm_context)
: zone_(isolate->allocator(), ZONE_NAME),
internals_(new (&zone_) WasmInterpreterInternals(
- isolate, &zone_, module, wire_bytes, globals_start, mem_start,
- mem_size)) {}
+ isolate, &zone_, module, wire_bytes, wasm_context)) {}
WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
@@ -2643,14 +2880,6 @@ WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
return ToThread(&internals_->threads_[id]);
}
-void WasmInterpreter::UpdateMemory(byte* mem_start, uint32_t mem_size) {
- // We assume one thread. Things are likely to be more complicated than this
- // in a multi-threaded case.
- DCHECK_EQ(1, internals_->threads_.size());
- internals_->cached_instance_info_.mem_start = mem_start;
- internals_->cached_instance_info_.mem_size = mem_size;
-}
-
void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
internals_->codemap_.AddFunction(function, nullptr, nullptr);
}
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index c317ab2f7f..cdfa74cfad 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -16,6 +16,7 @@ class AccountingAllocator;
namespace internal {
class WasmInstanceObject;
+struct WasmContext;
namespace wasm {
@@ -172,8 +173,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
};
WasmInterpreter(Isolate* isolate, const WasmModule* module,
- const ModuleWireBytes& wire_bytes, byte* globals_start,
- byte* mem_start, uint32_t mem_size);
+ const ModuleWireBytes& wire_bytes, WasmContext* wasm_context);
~WasmInterpreter();
//==========================================================================
@@ -199,11 +199,6 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
Thread* GetThread(int id);
//==========================================================================
- // Update the cached module env memory parameters after a grow memory event.
- //==========================================================================
- void UpdateMemory(byte* mem_start, uint32_t mem_size);
-
- //==========================================================================
// Testing functionality.
//==========================================================================
// Manually adds a function to this interpreter. The func_index of the
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 6a017365aa..03cc26e017 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/wasm/wasm-js.h"
+
#include "src/api-natives.h"
#include "src/api.h"
#include "src/assert-scope.h"
@@ -13,11 +15,10 @@
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/parsing/parse-info.h"
-
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-api.h"
-#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
@@ -192,6 +193,10 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Module must be invoked with 'new'");
+ return;
+ }
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
thrower.CompileError("Wasm code generation disallowed by embedder");
return;
@@ -342,14 +347,20 @@ void WebAssemblyInstantiateToPairCallback(
// new WebAssembly.Instance(module, imports) -> WebAssembly.Instance
void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
MicrotasksScope does_not_run_microtasks(isolate,
MicrotasksScope::kDoNotRunMicrotasks);
HandleScope scope(args.GetIsolate());
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (i_isolate->wasm_instance_callback()(args)) return;
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
+ return;
+ }
GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -368,6 +379,8 @@ void WebAssemblyInstantiateStreaming(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
// we use i_isolate in DCHECKS in the ASSIGN statements.
USE(i_isolate);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
@@ -397,6 +410,8 @@ void WebAssemblyInstantiateStreaming(
void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
i::wasm::ScheduledErrorThrower thrower(i_isolate,
@@ -477,6 +492,10 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Table must be invoked with 'new'");
+ return;
+ }
if (!args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a table descriptor");
return;
@@ -530,6 +549,10 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Memory must be invoked with 'new'");
+ return;
+ }
if (!args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a memory descriptor");
return;
@@ -586,7 +609,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
if (buffer->is_shared()) {
Maybe<bool> result =
- buffer->SetIntegrityLevel(buffer, i::FROZEN, i::Object::DONT_THROW);
+ buffer->SetIntegrityLevel(buffer, i::FROZEN, i::kDontThrow);
if (!result.FromJust()) {
thrower.TypeError(
"Status of setting SetIntegrityLevel of buffer is false.");
@@ -617,8 +640,8 @@ void WebAssemblyInstanceGetExports(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Instance.exports()");
+ i::wasm::ScheduledErrorThrower thrower(i_isolate,
+ "WebAssembly.Instance.exports()");
EXTRACT_THIS(receiver, WasmInstanceObject);
i::Handle<i::JSObject> exports_object(receiver->exports_object());
args.GetReturnValue().Set(Utils::ToLocal(exports_object));
@@ -714,6 +737,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Parameter 1.
i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
+ // TODO(titzer): use WasmExportedFunction::IsWasmExportedFunction() here.
if (!value->IsNull(i_isolate) &&
(!value->IsJSFunction() ||
i::Handle<i::JSFunction>::cast(value)->code()->kind() !=
@@ -804,7 +828,7 @@ void WebAssemblyMemoryGetBuffer(
// buffer are out of sync, handle that here when bounds checks, and Grow
// are handled correctly.
Maybe<bool> result =
- buffer->SetIntegrityLevel(buffer, i::FROZEN, i::Object::DONT_THROW);
+ buffer->SetIntegrityLevel(buffer, i::FROZEN, i::kDontThrow);
if (!result.FromJust()) {
thrower.TypeError(
"Status of setting SetIntegrityLevel of buffer is false.");
@@ -870,8 +894,9 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Setup WebAssembly
Handle<String> name = v8_str(isolate, "WebAssembly");
- Handle<JSFunction> cons = factory->NewFunction(isolate->strict_function_map(),
- name, MaybeHandle<Code>());
+ NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
+ name, isolate->strict_function_map(), LanguageMode::kStrict);
+ Handle<JSFunction> cons = factory->NewFunction(args);
JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
cons->shared()->set_instance_class_name(*name);
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 0e10688cd6..f298fd3fe1 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -29,7 +29,7 @@ constexpr size_t kV8MaxWasmDataSegments = 100000;
constexpr size_t kV8MaxWasmMemoryPages = 32767; // ~ 2 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
-constexpr size_t kV8MaxWasmFunctionSize = 128 * 1024;
+constexpr size_t kV8MaxWasmFunctionSize = 7654321;
constexpr size_t kV8MaxWasmFunctionLocals = 50000;
constexpr size_t kV8MaxWasmFunctionParams = 1000;
constexpr size_t kV8MaxWasmFunctionMultiReturns = 1000;
@@ -43,14 +43,14 @@ constexpr size_t kV8MaxWasmMemories = 1;
constexpr size_t kSpecMaxWasmMemoryPages = 65536;
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
+// TODO(titzer): move WASM page size constant here.
+constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * 65536;
+
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
std::numeric_limits<uint32_t>::max()) // maximum base value
+ std::numeric_limits<uint32_t>::max(); // maximum index value
-// Limit the control stack size of the C++ wasm interpreter.
-constexpr size_t kV8MaxWasmInterpretedStackSize = 64 * 1024;
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 4ddda98189..9f037c898d 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -27,7 +27,7 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
DCHECK_EQ(0, size % base::OS::CommitPageSize());
- // AllocateGuarded makes the whole region inaccessible by default.
+ // The Reserve makes the whole region inaccessible by default.
allocation_base =
isolate->array_buffer_allocator()->Reserve(allocation_length);
if (allocation_base == nullptr) {
@@ -45,10 +45,15 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
return memory;
} else {
+ // TODO(titzer): use guard regions for minicage and merge with above code.
+ CHECK_LE(size, kV8MaxWasmMemoryBytes);
+ allocation_length =
+ base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
void* memory =
- size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
+ size == 0
+ ? nullptr
+ : isolate->array_buffer_allocator()->Allocate(allocation_length);
allocation_base = memory;
- allocation_length = size;
return memory;
}
}
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 1054795f70..2676f3ade7 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -7,7 +7,7 @@
#include "src/flags.h"
#include "src/handles.h"
-#include "src/objects.h"
+#include "src/objects/js-array.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 997496bb29..407ef08700 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -389,7 +389,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
size_t start = EmitSection(kTableSectionCode, buffer);
buffer.write_u8(1); // table count
buffer.write_u8(kWasmAnyFunctionTypeForm);
- buffer.write_u8(kResizableMaximumFlag);
+ buffer.write_u8(kHasMaximumFlag);
buffer.write_size(indirect_functions_.size());
buffer.write_size(indirect_functions_.size());
FixupSection(buffer, start);
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 2c8266592a..bfeeb0fbff 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -8,19 +8,20 @@
#include "src/api.h"
#include "src/assembler-inl.h"
#include "src/code-stubs.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/debug/interface-types.h"
#include "src/frames-inl.h"
#include "src/objects.h"
#include "src/property-descriptor.h"
#include "src/simulator.h"
#include "src/snapshot/snapshot.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/v8.h"
-
-#include "src/compiler/wasm-compiler.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -54,8 +55,8 @@ constexpr const char* WasmException::kRuntimeIdStr;
// static
constexpr const char* WasmException::kRuntimeValuesStr;
-void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
- Handle<FixedArray> code_table) {
+void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
+ Handle<FixedArray> code_table) {
DisallowHeapAllocation no_gc;
std::vector<trap_handler::ProtectedInstructionData> unpacked;
@@ -76,29 +77,69 @@ void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
byte* base = code->entry();
- const int mode_mask =
- RelocInfo::ModeMask(RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING);
- for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ FixedArray* protected_instructions = code->protected_instructions();
+ DCHECK(protected_instructions != nullptr);
+ for (int i = 0; i < protected_instructions->length();
+ i += Code::kTrapDataSize) {
trap_handler::ProtectedInstructionData data;
- data.instr_offset = static_cast<uint32_t>(it.rinfo()->data());
- data.landing_offset = static_cast<uint32_t>(it.rinfo()->pc() - base);
- // Check that now over-/underflow happened.
- DCHECK_EQ(it.rinfo()->data(), data.instr_offset);
- DCHECK_EQ(it.rinfo()->pc() - base, data.landing_offset);
+ data.instr_offset =
+ protected_instructions
+ ->GetValueChecked<Smi>(isolate, i + Code::kTrapCodeOffset)
+ ->value();
+ data.landing_offset =
+ protected_instructions
+ ->GetValueChecked<Smi>(isolate, i + Code::kTrapLandingOffset)
+ ->value();
unpacked.emplace_back(data);
}
+
if (unpacked.empty()) continue;
- int size = code->CodeSize();
- const int index = RegisterHandlerData(reinterpret_cast<void*>(base), size,
+ const int index = RegisterHandlerData(base, code->instruction_size(),
unpacked.size(), &unpacked[0]);
+
unpacked.clear();
+
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+
// TODO(eholk): if index is negative, fail.
DCHECK_LE(0, index);
code->set_trap_handler_index(Smi::FromInt(index));
}
}
+void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
+ wasm::NativeModule* native_module) {
+ DisallowHeapAllocation no_gc;
+
+ for (uint32_t i = native_module->num_imported_functions(),
+ e = native_module->FunctionCount();
+ i < e; ++i) {
+ wasm::WasmCode* code = native_module->GetCode(i);
+
+ if (code == nullptr || code->kind() != wasm::WasmCode::Function) {
+ continue;
+ }
+
+ if (code->HasTrapHandlerIndex()) continue;
+
+ Address base = code->instructions().start();
+
+ size_t size = code->instructions().size();
+ const int index =
+ RegisterHandlerData(base, size, code->protected_instructions().size(),
+ code->protected_instructions().data());
+
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+
+ // TODO(eholk): if index is negative, fail.
+ CHECK_LE(0, index);
+ code->set_trap_handler_index(static_cast<size_t>(index));
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
os << "#" << name.function_->func_index;
if (name.function_->name.is_set()) {
@@ -129,50 +170,56 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate,
return nullptr;
}
-Handle<Code> UnwrapExportWrapper(Handle<JSFunction> export_wrapper) {
- Handle<Code> export_wrapper_code = handle(export_wrapper->code());
- DCHECK_EQ(export_wrapper_code->kind(), Code::JS_TO_WASM_FUNCTION);
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
- for (RelocIterator it(*export_wrapper_code, mask);; it.next()) {
- DCHECK(!it.done());
- Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() != Code::WASM_FUNCTION &&
- target->kind() != Code::WASM_TO_JS_FUNCTION &&
- target->kind() != Code::WASM_INTERPRETER_ENTRY)
- continue;
-// There should only be this one call to wasm code.
-#ifdef DEBUG
- for (it.next(); !it.done(); it.next()) {
- Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- DCHECK(code->kind() != Code::WASM_FUNCTION &&
- code->kind() != Code::WASM_TO_JS_FUNCTION &&
- code->kind() != Code::WASM_INTERPRETER_ENTRY);
+Handle<Object> GetOrCreateIndirectCallWrapper(
+ Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
+ WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig) {
+ Address new_context_address =
+ reinterpret_cast<Address>(owning_instance->wasm_context()->get());
+ if (!wasm_code.IsCodeObject()) {
+ DCHECK_NE(wasm_code.GetWasmCode()->kind(),
+ wasm::WasmCode::WasmToWasmWrapper);
+ wasm::NativeModule* native_module = wasm_code.GetWasmCode()->owner();
+ // The only reason we pass owning_instance is for the GC case. Check
+ // that the values match.
+ DCHECK_EQ(owning_instance->compiled_module()->GetNativeModule(),
+ native_module);
+ // We create the wrapper on the module exporting the function. This
+ // wrapper will only be called as indirect call.
+ wasm::WasmCode* exported_wrapper =
+ native_module->GetExportedWrapper(wasm_code.GetWasmCode()->index());
+ if (exported_wrapper == nullptr) {
+ Handle<Code> new_wrapper = compiler::CompileWasmToWasmWrapper(
+ isolate, wasm_code, sig, new_context_address);
+ exported_wrapper = native_module->AddExportedWrapper(
+ new_wrapper, wasm_code.GetWasmCode()->index());
}
-#endif
- return handle(target);
+ Address target = exported_wrapper->instructions().start();
+ return isolate->factory()->NewForeign(target, TENURED);
}
- UNREACHABLE();
+ Handle<Code> code = compiler::CompileWasmToWasmWrapper(
+ isolate, wasm_code, sig, new_context_address);
+ AttachWasmFunctionInfo(isolate, code, owning_instance,
+ static_cast<int>(index));
+ return code;
}
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
int index, WasmFunction* function,
- Handle<Code> code) {
+ Handle<Object> code_or_foreign) {
DCHECK_EQ(0, dispatch_tables->length() % 4);
for (int i = 0; i < dispatch_tables->length(); i += 4) {
- int table_index = Smi::ToInt(dispatch_tables->get(i + 1));
Handle<FixedArray> function_table(
FixedArray::cast(dispatch_tables->get(i + 2)), isolate);
Handle<FixedArray> signature_table(
FixedArray::cast(dispatch_tables->get(i + 3)), isolate);
if (function) {
- // TODO(titzer): the signature might need to be copied to avoid
- // a dangling pointer in the signature map.
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- auto& func_table = instance->module()->function_tables[table_index];
- uint32_t sig_index = func_table.map.FindOrInsert(function->sig);
- signature_table->set(index, Smi::FromInt(static_cast<int>(sig_index)));
- function_table->set(index, *code);
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ auto sig_index = instance->module()->signature_map.Find(function->sig);
+ signature_table->set(index, Smi::FromInt(sig_index));
+ function_table->set(index, *code_or_foreign);
} else {
signature_table->set(index, Smi::FromInt(-1));
function_table->set(index, Smi::kZero);
@@ -185,8 +232,14 @@ bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// separate callback that includes information about the module about to be
// compiled. For the time being, pass an empty string as placeholder for the
// sources.
- return isolate->allow_code_gen_callback() == nullptr ||
- isolate->allow_code_gen_callback()(
+ if (auto wasm_codegen_callback = isolate->allow_wasm_code_gen_callback()) {
+ return wasm_codegen_callback(
+ v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(isolate->factory()->empty_string()));
+ }
+ auto codegen_callback = isolate->allow_code_gen_callback();
+ return codegen_callback == nullptr ||
+ codegen_callback(
v8::Utils::ToLocal(context),
v8::Utils::ToLocal(isolate->factory()->empty_string()));
}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index a45d421ee8..e44ca995b0 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -15,6 +15,7 @@
#include "src/wasm/decoder.h"
#include "src/wasm/signature-map.h"
+#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
@@ -98,7 +99,6 @@ struct WasmIndirectFunctionTable {
std::vector<int32_t> values; // function table, -1 indicating invalid.
bool imported = false; // true if imported.
bool exported = false; // true if exported.
- SignatureMap map; // canonicalizing map for sig indexes.
};
// Static representation of how to initialize a table.
@@ -157,7 +157,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_exported_functions = 0;
WireBytesRef name = {0, 0};
// TODO(wasm): Add url here, for spec'ed location information.
- std::vector<FunctionSig*> signatures;
+ std::vector<FunctionSig*> signatures; // by signature index
+ std::vector<uint32_t> signature_ids; // by signature index
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
std::vector<WasmIndirectFunctionTable> function_tables;
@@ -165,6 +166,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmExport> export_table;
std::vector<WasmException> exceptions;
std::vector<WasmTableInit> table_inits;
+ SignatureMap signature_map; // canonicalizing map for signature indexes.
WasmModule() : WasmModule(nullptr) {}
WasmModule(std::unique_ptr<Zone> owned);
@@ -208,7 +210,7 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
// Get a string stored in the module bytes representing a name.
WasmName GetNameOrNull(WireBytesRef ref) const {
- if (!ref.is_set()) return {NULL, 0}; // no name.
+ if (!ref.is_set()) return {nullptr, 0}; // no name.
CHECK(BoundsCheck(ref.offset(), ref.length()));
return Vector<const char>::cast(
module_bytes_.SubVector(ref.offset(), ref.end_offset()));
@@ -279,16 +281,22 @@ Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmCompiledModule>);
// to the wrapped wasm function; in all other cases, return nullptr.
// The returned pointer is owned by the wasm instance target belongs to. The
// result is alive as long as the instance exists.
+// TODO(titzer): move this to WasmExportedFunction.
WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
-// {export_wrapper} is known to be an export.
-Handle<Code> UnwrapExportWrapper(Handle<JSFunction> export_wrapper);
-
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
- int index, WasmFunction* function, Handle<Code> code);
+ int index, WasmFunction* function,
+ Handle<Object> code_or_foreign);
+
+Handle<Object> GetOrCreateIndirectCallWrapper(
+ Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
+ WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig);
+
+void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
+ Handle<FixedArray> code_table);
void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
- Handle<FixedArray> code_table);
+ wasm::NativeModule* native_module);
const char* ExternalKindName(WasmExternalKind);
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index c435fc7913..27f7d68d17 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -39,21 +39,21 @@ ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakFixedArray,
kInstancesOffset)
-ACCESSORS(WasmMemoryObject, wasm_context, Managed<WasmContext>,
- kWasmContextOffset)
// WasmInstanceObject
+ACCESSORS(WasmInstanceObject, wasm_context, Managed<WasmContext>,
+ kWasmContextOffset)
ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
kCompiledModuleOffset)
ACCESSORS(WasmInstanceObject, exports_object, JSObject, kExportsObjectOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, WasmMemoryObject,
kMemoryObjectOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, JSArrayBuffer,
- kMemoryBufferOffset)
ACCESSORS(WasmInstanceObject, globals_buffer, JSArrayBuffer,
kGlobalsBufferOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
+ kTableObjectOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
kFunctionTablesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, signature_tables, FixedArray,
@@ -151,29 +151,6 @@ FORWARD_SHARED(bool, is_asm_js)
return handle(TYPE::cast(weak_##NAME()->value())); \
}
-#define WCM_LARGE_NUMBER(TYPE, NAME) \
- TYPE WasmCompiledModule::NAME() const { \
- Object* value = get(kID_##NAME); \
- DCHECK(value->IsMutableHeapNumber()); \
- return static_cast<TYPE>(HeapNumber::cast(value)->value()); \
- } \
- \
- void WasmCompiledModule::set_##NAME(TYPE value) { \
- Object* number = get(kID_##NAME); \
- DCHECK(number->IsMutableHeapNumber()); \
- HeapNumber::cast(number)->set_value(static_cast<double>(value)); \
- } \
- \
- void WasmCompiledModule::recreate_##NAME(Handle<WasmCompiledModule> obj, \
- Factory* factory, TYPE init_val) { \
- Handle<HeapNumber> number = factory->NewHeapNumber( \
- static_cast<double>(init_val), MutableMode::MUTABLE, TENURED); \
- obj->set(kID_##NAME, *number); \
- } \
- bool WasmCompiledModule::has_##NAME() const { \
- return get(kID_##NAME)->IsMutableHeapNumber(); \
- }
-
#define DEFINITION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
WCM_PROPERTY_TABLE(DEFINITION)
#undef DECLARATION
@@ -192,11 +169,6 @@ bool WasmTableObject::has_maximum_length() {
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
-Address WasmCompiledModule::GetGlobalsStartOrNull() const {
- return has_globals_start() ? reinterpret_cast<Address>(globals_start())
- : nullptr;
-}
-
void WasmCompiledModule::ReplaceCodeTableForTesting(
Handle<FixedArray> testing_table) {
set_code_table(testing_table);
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 012aa6644b..565f38a9e7 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -11,6 +11,7 @@
#include "src/debug/debug-interface.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
@@ -62,25 +63,17 @@ class CompiledModulesIterator
void Advance() {
DCHECK(!current_.is_null());
if (!is_backwards_) {
- if (current_->has_weak_next_instance()) {
- WeakCell* weak_next = current_->ptr_to_weak_next_instance();
- if (!weak_next->cleared()) {
- current_ =
- handle(WasmCompiledModule::cast(weak_next->value()), isolate_);
- return;
- }
+ if (current_->has_next_instance()) {
+ current_ = current_->next_instance();
+ return;
}
// No more modules in next-links, now try the previous-links.
is_backwards_ = true;
current_ = start_module_;
}
- if (current_->has_weak_prev_instance()) {
- WeakCell* weak_prev = current_->ptr_to_weak_prev_instance();
- if (!weak_prev->cleared()) {
- current_ =
- handle(WasmCompiledModule::cast(weak_prev->value()), isolate_);
- return;
- }
+ if (current_->has_prev_instance()) {
+ current_ = current_->prev_instance();
+ return;
}
current_ = Handle<WasmCompiledModule>::null();
}
@@ -158,6 +151,14 @@ bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
}
#endif // DEBUG
+void CompiledModuleFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ DisallowHeapAllocation no_gc;
+ JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
+ WasmCompiledModule* compiled_module = WasmCompiledModule::cast(*p);
+ compiled_module->reset_native_module();
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+}
+
} // namespace
Handle<WasmModuleObject> WasmModuleObject::New(
@@ -179,8 +180,8 @@ void WasmModuleObject::ValidateStateForTesting(
WasmCompiledModule* compiled_module = module_obj->compiled_module();
CHECK(compiled_module->has_weak_wasm_module());
CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
- CHECK(!compiled_module->has_weak_prev_instance());
- CHECK(!compiled_module->has_weak_next_instance());
+ CHECK(!compiled_module->has_prev_instance());
+ CHECK(!compiled_module->has_next_instance());
CHECK(!compiled_module->has_weak_owning_instance());
}
@@ -233,6 +234,8 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
}
void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % 4);
uint32_t old_size = functions()->length();
@@ -258,7 +261,27 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
dispatch_tables->set(i + 3, *new_signature_table);
// Patch the code of the respective instance.
- {
+ if (FLAG_wasm_jit_to_native) {
+ DisallowHeapAllocation no_gc;
+ wasm::CodeSpecialization code_specialization(isolate,
+ &specialization_zone);
+ WasmInstanceObject* instance =
+ WasmInstanceObject::cast(dispatch_tables->get(i));
+ WasmCompiledModule* compiled_module = instance->compiled_module();
+ wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+ GlobalHandleAddress old_function_table_addr =
+ native_module->function_tables()[table_index];
+ GlobalHandleAddress old_signature_table_addr =
+ native_module->signature_tables()[table_index];
+ code_specialization.PatchTableSize(old_size, old_size + count);
+ code_specialization.RelocatePointer(old_function_table_addr,
+ new_function_table_addr);
+ code_specialization.RelocatePointer(old_signature_table_addr,
+ new_signature_table_addr);
+ code_specialization.ApplyToWholeInstance(instance);
+ native_module->function_tables()[table_index] = new_function_table_addr;
+ native_module->signature_tables()[table_index] = new_signature_table_addr;
+ } else {
DisallowHeapAllocation no_gc;
wasm::CodeSpecialization code_specialization(isolate,
&specialization_zone);
@@ -294,18 +317,24 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
WasmFunction* wasm_function = nullptr;
- Handle<Code> code = Handle<Code>::null();
+ Handle<Object> code = Handle<Object>::null();
Handle<Object> value = isolate->factory()->null_value();
if (!function.is_null()) {
+ auto exported_function = Handle<WasmExportedFunction>::cast(function);
wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
// The verification that {function} is an export was done
// by the caller.
DCHECK_NOT_NULL(wasm_function);
- code = wasm::UnwrapExportWrapper(function);
- value = Handle<Object>::cast(function);
+ value = function;
+ // TODO(titzer): Make JSToWasm wrappers just call the WASM to WASM wrapper,
+ // and then we can just reuse the WASM to WASM wrapper.
+ WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
+ CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
+ code = wasm::GetOrCreateIndirectCallWrapper(
+ isolate, handle(exported_function->instance()), wasm_code,
+ exported_function->function_index(), wasm_function->sig);
}
-
UpdateDispatchTables(isolate, dispatch_tables, index, wasm_function, code);
array->set(index, *value);
}
@@ -361,45 +390,44 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
// May GC, because SetSpecializationMemInfoFrom may GC
void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
- instance->set_memory_buffer(*buffer);
- if (instance->has_debug_info()) {
- instance->debug_info()->UpdateMemory(*buffer);
+ auto wasm_context = instance->wasm_context()->get();
+ wasm_context->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()),
+ buffer->byte_length()->Number());
+#if DEBUG
+ // To flush out bugs earlier, in DEBUG mode, check that all pages of the
+ // memory are accessible by reading and writing one byte on each page.
+ for (uint32_t offset = 0; offset < wasm_context->mem_size;
+ offset += WasmModule::kPageSize) {
+ byte val = wasm_context->mem_start[offset];
+ wasm_context->mem_start[offset] = val;
}
-}
-
-void UpdateWasmContext(WasmContext* wasm_context,
- Handle<JSArrayBuffer> buffer) {
- uint32_t new_mem_size = buffer->byte_length()->Number();
- Address new_mem_start = static_cast<Address>(buffer->backing_store());
- DCHECK_NOT_NULL(new_mem_start);
- wasm_context->mem_start = new_mem_start;
- wasm_context->mem_size = new_mem_size;
+#endif
}
} // namespace
-Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- int32_t maximum) {
+Handle<WasmMemoryObject> WasmMemoryObject::New(
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
+ int32_t maximum) {
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor());
auto memory_obj = Handle<WasmMemoryObject>::cast(
isolate->factory()->NewJSObject(memory_ctor, TENURED));
- auto wasm_context = Managed<WasmContext>::Allocate(isolate);
- if (buffer.is_null()) {
- const bool enable_guard_regions = trap_handler::UseTrapHandler();
+
+ Handle<JSArrayBuffer> buffer;
+ if (maybe_buffer.is_null()) {
+ // If no buffer was provided, create a 0-length one.
buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
- enable_guard_regions);
- wasm_context->get()->mem_size = 0;
- wasm_context->get()->mem_start = nullptr;
+ trap_handler::UseTrapHandler());
} else {
- CHECK(buffer->byte_length()->ToUint32(&wasm_context->get()->mem_size));
- wasm_context->get()->mem_start =
- static_cast<Address>(buffer->backing_store());
+ buffer = maybe_buffer.ToHandleChecked();
+ // Paranoid check that the buffer size makes sense.
+ uint32_t mem_size = 0;
+ CHECK(buffer->byte_length()->ToUint32(&mem_size));
}
memory_obj->set_array_buffer(*buffer);
memory_obj->set_maximum_pages(maximum);
- memory_obj->set_wasm_context(*wasm_context);
+
return memory_obj;
}
@@ -419,6 +447,8 @@ void WasmMemoryObject::AddInstance(Isolate* isolate,
Handle<WeakFixedArray> new_instances =
WeakFixedArray::Add(old_instances, instance);
memory->set_instances(*new_instances);
+ Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate);
+ SetInstanceMemory(isolate, instance, buffer);
}
void WasmMemoryObject::RemoveInstance(Isolate* isolate,
@@ -463,29 +493,19 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
if (!old_buffer->is_growable()) return -1;
uint32_t old_size = 0;
CHECK(old_buffer->byte_length()->ToUint32(&old_size));
+ DCHECK_EQ(0, old_size % WasmModule::kPageSize);
Handle<JSArrayBuffer> new_buffer;
// Return current size if grow by 0.
- if (pages == 0) {
- DCHECK_EQ(0, old_size % WasmModule::kPageSize);
- return old_size / WasmModule::kPageSize;
- }
+ if (pages == 0) return old_size / WasmModule::kPageSize;
- uint32_t maximum_pages;
+ uint32_t maximum_pages = FLAG_wasm_max_mem_pages;
if (memory_object->has_maximum_pages()) {
maximum_pages = Min(FLAG_wasm_max_mem_pages,
static_cast<uint32_t>(memory_object->maximum_pages()));
- } else {
- maximum_pages = FLAG_wasm_max_mem_pages;
}
new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages);
if (new_buffer.is_null()) return -1;
- // Verify that the values we will change are actually the ones we expect.
- DCHECK_EQ(memory_object->wasm_context()->get()->mem_size, old_size);
- DCHECK_EQ(memory_object->wasm_context()->get()->mem_start,
- static_cast<Address>(old_buffer->backing_store()));
- UpdateWasmContext(memory_object->wasm_context()->get(), new_buffer);
-
if (memory_object->has_instances()) {
Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
for (int i = 0; i < instances->Length(); i++) {
@@ -497,7 +517,6 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
}
}
memory_object->set_array_buffer(*new_buffer);
- DCHECK_EQ(0, old_size % WasmModule::kPageSize);
return old_size / WasmModule::kPageSize;
}
@@ -505,11 +524,6 @@ WasmModuleObject* WasmInstanceObject::module_object() {
return *compiled_module()->wasm_module();
}
-WasmContext* WasmInstanceObject::wasm_context() {
- DCHECK(has_memory_object());
- return memory_object()->wasm_context()->get();
-}
-
WasmModule* WasmInstanceObject::module() { return compiled_module()->module(); }
Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
@@ -530,13 +544,18 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
Handle<WasmInstanceObject> instance(
reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
+ auto wasm_context = Managed<WasmContext>::Allocate(isolate);
+ wasm_context->get()->SetRawMemory(nullptr, 0);
+ wasm_context->get()->globals_start = nullptr;
+ instance->set_wasm_context(*wasm_context);
+
instance->set_compiled_module(*compiled_module);
return instance;
}
int32_t WasmInstanceObject::GetMemorySize() {
- if (!has_memory_buffer()) return 0;
- uint32_t bytes = memory_buffer()->byte_length()->Number();
+ if (!has_memory_object()) return 0;
+ uint32_t bytes = memory_object()->array_buffer()->byte_length()->Number();
DCHECK_EQ(0, bytes % WasmModule::kPageSize);
return bytes / WasmModule::kPageSize;
}
@@ -567,7 +586,20 @@ uint32_t WasmInstanceObject::GetMaxMemoryPages() {
return FLAG_wasm_max_mem_pages;
}
-WasmInstanceObject* WasmInstanceObject::GetOwningInstance(Code* code) {
+WasmInstanceObject* WasmInstanceObject::GetOwningInstance(
+ const wasm::WasmCode* code) {
+ DisallowHeapAllocation no_gc;
+ Object* weak_link = nullptr;
+ DCHECK(code->kind() == wasm::WasmCode::Function ||
+ code->kind() == wasm::WasmCode::InterpreterStub);
+ weak_link = code->owner()->compiled_module()->ptr_to_weak_owning_instance();
+ DCHECK(weak_link->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(weak_link);
+ if (cell->cleared()) return nullptr;
+ return WasmInstanceObject::cast(cell->value());
+}
+
+WasmInstanceObject* WasmInstanceObject::GetOwningInstanceGC(Code* code) {
DisallowHeapAllocation no_gc;
DCHECK(code->kind() == Code::WASM_FUNCTION ||
code->kind() == Code::WASM_INTERPRETER_ENTRY);
@@ -591,16 +623,16 @@ void WasmInstanceObject::ValidateInstancesChainForTesting(
Object* prev = nullptr;
int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
WasmCompiledModule* current_instance = compiled_module;
- while (current_instance->has_weak_next_instance()) {
- CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
- current_instance->ptr_to_weak_prev_instance()->value() == prev);
+ while (current_instance->has_next_instance()) {
+ CHECK((prev == nullptr && !current_instance->has_prev_instance()) ||
+ current_instance->ptr_to_prev_instance() == prev);
CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
CHECK(current_instance->ptr_to_weak_owning_instance()
->value()
->IsWasmInstanceObject());
prev = current_instance;
- current_instance = WasmCompiledModule::cast(
- current_instance->ptr_to_weak_next_instance()->value());
+ current_instance =
+ WasmCompiledModule::cast(current_instance->ptr_to_next_instance());
++found_instances;
CHECK_LE(found_instances, instance_count);
}
@@ -668,8 +700,9 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
isolate->factory()->NewSharedFunctionInfo(name, export_wrapper, false);
shared->set_length(arity);
shared->set_internal_formal_parameter_count(arity);
- Handle<JSFunction> js_function = isolate->factory()->NewFunction(
- isolate->sloppy_function_map(), name, export_wrapper);
+ NewFunctionArgs args = NewFunctionArgs::ForWasm(
+ name, export_wrapper, isolate->sloppy_function_map());
+ Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
js_function->set_shared(*shared);
Handle<Symbol> instance_symbol(isolate->factory()->wasm_instance_symbol());
@@ -683,6 +716,49 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
return Handle<WasmExportedFunction>::cast(js_function);
}
+WasmCodeWrapper WasmExportedFunction::GetWasmCode() {
+ DisallowHeapAllocation no_gc;
+ Handle<Code> export_wrapper_code = handle(this->code());
+ DCHECK_EQ(export_wrapper_code->kind(), Code::JS_TO_WASM_FUNCTION);
+ int mask =
+ RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
+ : RelocInfo::CODE_TARGET);
+ auto IsWasmFunctionCode = [](Code* code) {
+ return code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_JS_FUNCTION ||
+ code->kind() == Code::WASM_TO_WASM_FUNCTION ||
+ code->kind() == Code::WASM_INTERPRETER_ENTRY ||
+ code->builtin_index() == Builtins::kWasmCompileLazy;
+ };
+
+ for (RelocIterator it(*export_wrapper_code, mask);; it.next()) {
+ DCHECK(!it.done());
+ WasmCodeWrapper target;
+ if (FLAG_wasm_jit_to_native) {
+ target = WasmCodeWrapper(GetIsolate()->wasm_code_manager()->LookupCode(
+ it.rinfo()->js_to_wasm_address()));
+ } else {
+ Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (!IsWasmFunctionCode(code)) continue;
+ target = WasmCodeWrapper(handle(code));
+ }
+// There should only be this one call to wasm code.
+#ifdef DEBUG
+ for (it.next(); !it.done(); it.next()) {
+ if (FLAG_wasm_jit_to_native) {
+ UNREACHABLE();
+ } else {
+ Code* code =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ DCHECK(!IsWasmFunctionCode(code));
+ }
+ }
+#endif
+ return target;
+ }
+ UNREACHABLE();
+}
+
bool WasmSharedModuleData::IsWasmSharedModuleData(Object* object) {
if (!object->IsFixedArray()) return false;
FixedArray* arr = FixedArray::cast(object);
@@ -916,8 +992,8 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
}
Handle<WasmCompiledModule> WasmCompiledModule::New(
- Isolate* isolate, Handle<WasmSharedModuleData> shared,
- Handle<FixedArray> code_table, Handle<FixedArray> export_wrappers,
+ Isolate* isolate, WasmModule* module, Handle<FixedArray> code_table,
+ Handle<FixedArray> export_wrappers,
const std::vector<GlobalHandleAddress>& function_tables,
const std::vector<GlobalHandleAddress>& signature_tables) {
DCHECK_EQ(function_tables.size(), signature_tables.size());
@@ -926,40 +1002,75 @@ Handle<WasmCompiledModule> WasmCompiledModule::New(
// WasmCompiledModule::cast would fail since fields are not set yet.
Handle<WasmCompiledModule> compiled_module(
reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
- compiled_module->InitId();
- compiled_module->set_shared(shared);
compiled_module->set_native_context(isolate->native_context());
- compiled_module->set_code_table(code_table);
- compiled_module->set_export_wrappers(export_wrappers);
- // TODO(mtrofin): we copy these because the order of finalization isn't
- // reliable, and we need these at Reset (which is called at
- // finalization). If the order were reliable, and top-down, we could instead
- // just get them from shared().
- compiled_module->set_initial_pages(shared->module()->initial_pages);
- compiled_module->set_num_imported_functions(
- shared->module()->num_imported_functions);
-
- int num_function_tables = static_cast<int>(function_tables.size());
- if (num_function_tables > 0) {
- Handle<FixedArray> st =
- isolate->factory()->NewFixedArray(num_function_tables, TENURED);
- Handle<FixedArray> ft =
- isolate->factory()->NewFixedArray(num_function_tables, TENURED);
- for (int i = 0; i < num_function_tables; ++i) {
- size_t index = static_cast<size_t>(i);
- SetTableValue(isolate, ft, i, function_tables[index]);
- SetTableValue(isolate, st, i, signature_tables[index]);
+ if (!FLAG_wasm_jit_to_native) {
+ compiled_module->InitId();
+ compiled_module->set_native_context(isolate->native_context());
+ compiled_module->set_code_table(code_table);
+ compiled_module->set_export_wrappers(export_wrappers);
+ // TODO(mtrofin): we copy these because the order of finalization isn't
+ // reliable, and we need these at Reset (which is called at
+ // finalization). If the order were reliable, and top-down, we could instead
+ // just get them from shared().
+ compiled_module->set_initial_pages(module->initial_pages);
+ compiled_module->set_num_imported_functions(module->num_imported_functions);
+
+ int num_function_tables = static_cast<int>(function_tables.size());
+ if (num_function_tables > 0) {
+ Handle<FixedArray> st =
+ isolate->factory()->NewFixedArray(num_function_tables, TENURED);
+ Handle<FixedArray> ft =
+ isolate->factory()->NewFixedArray(num_function_tables, TENURED);
+ for (int i = 0; i < num_function_tables; ++i) {
+ size_t index = static_cast<size_t>(i);
+ SetTableValue(isolate, ft, i, function_tables[index]);
+ SetTableValue(isolate, st, i, signature_tables[index]);
+ }
+ // TODO(wasm): setting the empty tables here this way is OK under the
+ // assumption that we compile and then instantiate. It needs rework if we
+ // do direct instantiation. The empty tables are used as a default when
+ // resetting the compiled module.
+ compiled_module->set_signature_tables(st);
+ compiled_module->set_empty_signature_tables(st);
+ compiled_module->set_function_tables(ft);
+ compiled_module->set_empty_function_tables(ft);
}
- // TODO(wasm): setting the empty tables here this way is OK under the
- // assumption that we compile and then instantiate. It needs rework if we do
- // direct instantiation. The empty tables are used as a default when
- // resetting the compiled module.
- compiled_module->set_signature_tables(st);
- compiled_module->set_empty_signature_tables(st);
- compiled_module->set_function_tables(ft);
- compiled_module->set_empty_function_tables(ft);
+ } else {
+ if (!export_wrappers.is_null()) {
+ compiled_module->set_export_wrappers(export_wrappers);
+ }
+ wasm::NativeModule* native_module = nullptr;
+ {
+ std::unique_ptr<wasm::NativeModule> native_module_ptr =
+ isolate->wasm_code_manager()->NewNativeModule(*module);
+ native_module = native_module_ptr.release();
+ Handle<Foreign> native_module_wrapper =
+ Managed<wasm::NativeModule>::From(isolate, native_module);
+ compiled_module->set_native_module(native_module_wrapper);
+ Handle<WasmCompiledModule> weak_link =
+ isolate->global_handles()->Create(*compiled_module);
+ GlobalHandles::MakeWeak(Handle<Object>::cast(weak_link).location(),
+ Handle<Object>::cast(weak_link).location(),
+ &CompiledModuleFinalizer,
+ v8::WeakCallbackType::kFinalizer);
+ compiled_module->GetNativeModule()->SetCompiledModule(weak_link);
+ }
+ // This is here just because it's easier for APIs that need to work with
+ // either code_table or native_module. Otherwise we need to check if
+ // has_code_table and pass undefined.
+ compiled_module->set_code_table(code_table);
+
+ native_module->function_tables() = function_tables;
+ native_module->signature_tables() = signature_tables;
+ native_module->empty_function_tables() = function_tables;
+ native_module->empty_signature_tables() = signature_tables;
+
+ int function_count = static_cast<int>(module->functions.size());
+ compiled_module->set_handler_table(
+ isolate->factory()->NewFixedArray(function_count, TENURED));
+ compiled_module->set_source_positions(
+ isolate->factory()->NewFixedArray(function_count, TENURED));
}
-
// TODO(mtrofin): copy the rest of the specialization parameters over.
// We're currently OK because we're only using defaults.
return compiled_module;
@@ -967,19 +1078,41 @@ Handle<WasmCompiledModule> WasmCompiledModule::New(
Handle<WasmCompiledModule> WasmCompiledModule::Clone(
Isolate* isolate, Handle<WasmCompiledModule> module) {
- Handle<FixedArray> code_copy =
- isolate->factory()->CopyFixedArray(module->code_table());
+ Handle<FixedArray> code_copy;
+ if (!FLAG_wasm_jit_to_native) {
+ code_copy = isolate->factory()->CopyFixedArray(module->code_table());
+ }
Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
isolate->factory()->CopyFixedArray(module));
- ret->InitId();
- ret->set_code_table(code_copy);
ret->reset_weak_owning_instance();
- ret->reset_weak_next_instance();
- ret->reset_weak_prev_instance();
+ ret->reset_next_instance();
+ ret->reset_prev_instance();
ret->reset_weak_exported_functions();
- if (ret->has_globals_start()) {
- WasmCompiledModule::recreate_globals_start(ret, isolate->factory(),
- ret->globals_start());
+ if (!FLAG_wasm_jit_to_native) {
+ ret->InitId();
+ ret->set_code_table(code_copy);
+ return ret;
+ }
+
+ std::unique_ptr<wasm::NativeModule> native_module =
+ module->GetNativeModule()->Clone();
+ // construct the wrapper in 2 steps, because its construction may trigger GC,
+ // which would shift the this pointer in set_native_module.
+ Handle<Foreign> native_module_wrapper =
+ Managed<wasm::NativeModule>::From(isolate, native_module.release());
+ ret->set_native_module(native_module_wrapper);
+ Handle<WasmCompiledModule> weak_link =
+ isolate->global_handles()->Create(*ret);
+ GlobalHandles::MakeWeak(Handle<Object>::cast(weak_link).location(),
+ Handle<Object>::cast(weak_link).location(),
+ &CompiledModuleFinalizer,
+ v8::WeakCallbackType::kFinalizer);
+ ret->GetNativeModule()->SetCompiledModule(weak_link);
+
+ if (module->has_lazy_compile_data()) {
+ Handle<FixedArray> lazy_comp_data = isolate->factory()->NewFixedArray(
+ module->lazy_compile_data()->length(), TENURED);
+ ret->set_lazy_compile_data(lazy_comp_data);
}
return ret;
}
@@ -1005,8 +1138,13 @@ Address WasmCompiledModule::GetTableValue(FixedArray* table, int index) {
return reinterpret_cast<Address>(static_cast<size_t>(value));
}
-void WasmCompiledModule::Reset(Isolate* isolate,
- WasmCompiledModule* compiled_module) {
+wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
+ if (!has_native_module()) return nullptr;
+ return Managed<wasm::NativeModule>::cast(ptr_to_native_module())->get();
+}
+
+void WasmCompiledModule::ResetGCModel(Isolate* isolate,
+ WasmCompiledModule* compiled_module) {
DisallowHeapAllocation no_gc;
TRACE("Resetting %d\n", compiled_module->instance_id());
Object* undefined = *isolate->factory()->undefined_value();
@@ -1017,13 +1155,6 @@ void WasmCompiledModule::Reset(Isolate* isolate,
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
wasm::CodeSpecialization code_specialization(isolate, &specialization_zone);
- if (compiled_module->has_globals_start()) {
- Address globals_start =
- reinterpret_cast<Address>(compiled_module->globals_start());
- code_specialization.RelocateGlobals(globals_start, nullptr);
- compiled_module->set_globals_start(0);
- }
-
// Reset function tables.
if (compiled_module->has_function_tables()) {
FixedArray* function_tables = compiled_module->ptr_to_function_tables();
@@ -1051,6 +1182,8 @@ void WasmCompiledModule::Reset(Isolate* isolate,
}
}
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
FixedArray* functions = FixedArray::cast(fct_obj);
for (int i = compiled_module->num_imported_functions(),
end = functions->length();
@@ -1066,8 +1199,8 @@ void WasmCompiledModule::Reset(Isolate* isolate,
}
break;
}
- bool changed =
- code_specialization.ApplyToWasmCode(code, SKIP_ICACHE_FLUSH);
+ bool changed = code_specialization.ApplyToWasmCode(
+ WasmCodeWrapper(handle(code)), SKIP_ICACHE_FLUSH);
// TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
// above.
if (changed) {
@@ -1086,16 +1219,79 @@ void WasmCompiledModule::InitId() {
#endif
}
-void WasmCompiledModule::SetGlobalsStartAddressFrom(
- Factory* factory, Handle<WasmCompiledModule> compiled_module,
- Handle<JSArrayBuffer> buffer) {
- DCHECK(!buffer.is_null());
- size_t start_address = reinterpret_cast<size_t>(buffer->backing_store());
- if (!compiled_module->has_globals_start()) {
- WasmCompiledModule::recreate_globals_start(compiled_module, factory,
- start_address);
- } else {
- compiled_module->set_globals_start(start_address);
+void WasmCompiledModule::Reset(Isolate* isolate,
+ WasmCompiledModule* compiled_module) {
+ DisallowHeapAllocation no_gc;
+ compiled_module->reset_prev_instance();
+ compiled_module->reset_next_instance();
+ wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+ if (native_module == nullptr) return;
+ TRACE("Resetting %zu\n", native_module->instance_id);
+ if (trap_handler::UseTrapHandler()) {
+ for (uint32_t i = native_module->num_imported_functions(),
+ e = native_module->FunctionCount();
+ i < e; ++i) {
+ wasm::WasmCode* wasm_code = native_module->GetCode(i);
+ if (wasm_code->HasTrapHandlerIndex()) {
+ CHECK_LT(wasm_code->trap_handler_index(),
+ static_cast<size_t>(std::numeric_limits<int>::max()));
+ trap_handler::ReleaseHandlerData(
+ static_cast<int>(wasm_code->trap_handler_index()));
+ wasm_code->ResetTrapHandlerIndex();
+ }
+ }
+ }
+
+ // Patch code to update memory references, global references, and function
+ // table references.
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ wasm::CodeSpecialization code_specialization(isolate, &specialization_zone);
+
+ if (compiled_module->has_lazy_compile_data()) {
+ for (int i = 0, e = compiled_module->lazy_compile_data()->length(); i < e;
+ ++i) {
+ compiled_module->lazy_compile_data()->set(
+ i, isolate->heap()->undefined_value());
+ }
+ }
+ // Reset function tables.
+ if (native_module->function_tables().size() > 0) {
+ std::vector<GlobalHandleAddress>& function_tables =
+ native_module->function_tables();
+ std::vector<GlobalHandleAddress>& signature_tables =
+ native_module->signature_tables();
+ std::vector<GlobalHandleAddress>& empty_function_tables =
+ native_module->empty_function_tables();
+ std::vector<GlobalHandleAddress>& empty_signature_tables =
+ native_module->empty_signature_tables();
+
+ if (function_tables != empty_function_tables) {
+ DCHECK_EQ(function_tables.size(), empty_function_tables.size());
+ for (size_t i = 0, e = function_tables.size(); i < e; ++i) {
+ code_specialization.RelocatePointer(function_tables[i],
+ empty_function_tables[i]);
+ code_specialization.RelocatePointer(signature_tables[i],
+ empty_signature_tables[i]);
+ }
+ native_module->function_tables() = empty_function_tables;
+ native_module->signature_tables() = empty_signature_tables;
+ }
+ }
+
+ for (uint32_t i = native_module->num_imported_functions(),
+ end = native_module->FunctionCount();
+ i < end; ++i) {
+ wasm::WasmCode* code = native_module->GetCode(i);
+ // Skip lazy compile stubs.
+ if (code == nullptr || code->kind() != wasm::WasmCode::Function) continue;
+ bool changed = code_specialization.ApplyToWasmCode(WasmCodeWrapper(code),
+ SKIP_ICACHE_FLUSH);
+ // TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
+ // above.
+ if (changed) {
+ Assembler::FlushICache(isolate, code->instructions().start(),
+ code->instructions().size());
+ }
}
}
@@ -1129,7 +1325,6 @@ bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
if (!obj->IsFixedArray()) return false;
FixedArray* arr = FixedArray::cast(obj);
if (arr->length() != PropertyIndices::Count) return false;
- Isolate* isolate = arr->GetIsolate();
#define WCM_CHECK_TYPE(NAME, TYPE_CHECK) \
do { \
Object* obj = arr->get(kID_##NAME); \
@@ -1145,16 +1340,13 @@ bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
#define WCM_CHECK_CONST_OBJECT(TYPE, NAME) \
WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->Is##TYPE())
#define WCM_CHECK_WASM_OBJECT(TYPE, NAME) \
- WCM_CHECK_TYPE(NAME, TYPE::Is##TYPE(obj))
+ WCM_CHECK_TYPE(NAME, obj->IsFixedArray() || obj->IsUndefined(isolate))
#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT(WeakCell, NAME)
#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) \
WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->IsSmi())
#define WCM_CHECK(KIND, TYPE, NAME) WCM_CHECK_##KIND(TYPE, NAME)
#define WCM_CHECK_SMALL_CONST_NUMBER(TYPE, NAME) \
WCM_CHECK_TYPE(NAME, obj->IsSmi())
-#define WCM_CHECK_LARGE_NUMBER(TYPE, NAME) \
- WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->IsMutableHeapNumber())
- WCM_PROPERTY_TABLE(WCM_CHECK)
#undef WCM_CHECK_TYPE
#undef WCM_CHECK_OBJECT
#undef WCM_CHECK_CONST_OBJECT
@@ -1163,7 +1355,6 @@ bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
#undef WCM_CHECK_SMALL_NUMBER
#undef WCM_CHECK
#undef WCM_CHECK_SMALL_CONST_NUMBER
-#undef WCM_CHECK_LARGE_NUMBER
// All checks passed.
return true;
@@ -1173,16 +1364,46 @@ void WasmCompiledModule::PrintInstancesChain() {
#if DEBUG
if (!FLAG_trace_wasm_instances) return;
for (WasmCompiledModule* current = this; current != nullptr;) {
- PrintF("->%d", current->instance_id());
- if (!current->has_weak_next_instance()) break;
- DCHECK(!current->ptr_to_weak_next_instance()->cleared());
- current =
- WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
+ if (FLAG_wasm_jit_to_native) {
+ PrintF("->%zu", current->GetNativeModule()->instance_id);
+ } else {
+ PrintF("->%d", current->instance_id());
+ }
+ if (!current->has_next_instance()) break;
+ current = current->ptr_to_next_instance();
}
PrintF("\n");
#endif
}
+void WasmCompiledModule::InsertInChain(WasmModuleObject* module) {
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* original = module->compiled_module();
+ set_ptr_to_next_instance(original);
+ original->set_ptr_to_prev_instance(this);
+ set_weak_wasm_module(original->weak_wasm_module());
+}
+
+void WasmCompiledModule::RemoveFromChain() {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = GetIsolate();
+
+ Object* next = get(kID_next_instance);
+ Object* prev = get(kID_prev_instance);
+
+ if (!prev->IsUndefined(isolate)) {
+ WasmCompiledModule::cast(prev)->set(kID_next_instance, next);
+ }
+ if (!next->IsUndefined(isolate)) {
+ WasmCompiledModule::cast(next)->set(kID_prev_instance, prev);
+ }
+}
+
+void WasmCompiledModule::OnWasmModuleDecodingComplete(
+ Handle<WasmSharedModuleData> shared) {
+ set_shared(shared);
+}
+
void WasmCompiledModule::ReinitializeAfterDeserialization(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
// This method must only be called immediately after deserialization.
@@ -1191,31 +1412,48 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
Handle<WasmSharedModuleData> shared(
static_cast<WasmSharedModuleData*>(compiled_module->get(kID_shared)),
isolate);
- DCHECK(!WasmSharedModuleData::IsWasmSharedModuleData(*shared));
- WasmSharedModuleData::ReinitializeAfterDeserialization(isolate, shared);
- int function_table_count =
- static_cast<int>(compiled_module->module()->function_tables.size());
+ if (!FLAG_wasm_jit_to_native) {
+ DCHECK(!WasmSharedModuleData::IsWasmSharedModuleData(*shared));
+ WasmSharedModuleData::ReinitializeAfterDeserialization(isolate, shared);
+ }
+ size_t function_table_count =
+ compiled_module->module()->function_tables.size();
+ wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+
if (function_table_count > 0) {
// The tables are of the right size, but contain bogus global handle
// addresses. Produce new global handles for the empty tables, then reset,
// which will relocate the code. We end up with a WasmCompiledModule as-if
// it were just compiled.
- DCHECK(compiled_module->has_function_tables());
- DCHECK(compiled_module->has_signature_tables());
- DCHECK(compiled_module->has_empty_signature_tables());
- DCHECK(compiled_module->has_empty_function_tables());
-
- for (int i = 0; i < function_table_count; ++i) {
+ if (!FLAG_wasm_jit_to_native) {
+ DCHECK(compiled_module->has_function_tables());
+ DCHECK(compiled_module->has_signature_tables());
+ DCHECK(compiled_module->has_empty_signature_tables());
+ DCHECK(compiled_module->has_empty_function_tables());
+ } else {
+ DCHECK_GT(native_module->function_tables().size(), 0);
+ DCHECK_GT(native_module->signature_tables().size(), 0);
+ DCHECK_EQ(native_module->empty_signature_tables().size(),
+ native_module->function_tables().size());
+ DCHECK_EQ(native_module->empty_function_tables().size(),
+ native_module->function_tables().size());
+ }
+ for (size_t i = 0; i < function_table_count; ++i) {
Handle<Object> global_func_table_handle =
isolate->global_handles()->Create(isolate->heap()->undefined_value());
Handle<Object> global_sig_table_handle =
isolate->global_handles()->Create(isolate->heap()->undefined_value());
GlobalHandleAddress new_func_table = global_func_table_handle.address();
GlobalHandleAddress new_sig_table = global_sig_table_handle.address();
- SetTableValue(isolate, compiled_module->empty_function_tables(), i,
- new_func_table);
- SetTableValue(isolate, compiled_module->empty_signature_tables(), i,
- new_sig_table);
+ if (!FLAG_wasm_jit_to_native) {
+ SetTableValue(isolate, compiled_module->empty_function_tables(),
+ static_cast<int>(i), new_func_table);
+ SetTableValue(isolate, compiled_module->empty_signature_tables(),
+ static_cast<int>(i), new_sig_table);
+ } else {
+ native_module->empty_function_tables()[i] = new_func_table;
+ native_module->empty_signature_tables()[i] = new_sig_table;
+ }
}
}
@@ -1575,16 +1813,44 @@ MaybeHandle<FixedArray> WasmCompiledModule::CheckBreakPoints(int position) {
return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
}
-Handle<Code> WasmCompiledModule::CompileLazy(
- Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
- int offset, int func_index, bool patch_caller) {
- isolate->set_context(*instance->compiled_module()->native_context());
- Object* orch_obj =
- instance->compiled_module()->shared()->lazy_compilation_orchestrator();
- auto* orch =
- Managed<wasm::LazyCompilationOrchestrator>::cast(orch_obj)->get();
- return orch->CompileLazy(isolate, instance, caller, offset, func_index,
- patch_caller);
+void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
+ MaybeHandle<WeakCell> weak_instance,
+ int func_index) {
+ DCHECK(weak_instance.is_null() ||
+ weak_instance.ToHandleChecked()->value()->IsWasmInstanceObject());
+ Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
+ if (!weak_instance.is_null()) {
+ // TODO(wasm): Introduce constants for the indexes in wasm deopt data.
+ deopt_data->set(0, *weak_instance.ToHandleChecked());
+ }
+ deopt_data->set(1, Smi::FromInt(func_index));
+
+ code->set_deoptimization_data(*deopt_data);
+}
+
+void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
+ MaybeHandle<WasmInstanceObject> instance,
+ int func_index) {
+ MaybeHandle<WeakCell> weak_instance;
+ if (!instance.is_null()) {
+ weak_instance = isolate->factory()->NewWeakCell(instance.ToHandleChecked());
+ }
+ AttachWasmFunctionInfo(isolate, code, weak_instance, func_index);
+}
+
+WasmFunctionInfo GetWasmFunctionInfo(Isolate* isolate, Handle<Code> code) {
+ FixedArray* deopt_data = code->deoptimization_data();
+ DCHECK_LE(2, deopt_data->length());
+ MaybeHandle<WasmInstanceObject> instance;
+ Object* maybe_weak_instance = deopt_data->get(0);
+ if (maybe_weak_instance->IsWeakCell()) {
+ Object* maybe_instance = WeakCell::cast(maybe_weak_instance)->value();
+ if (maybe_instance) {
+ instance = handle(WasmInstanceObject::cast(maybe_instance), isolate);
+ }
+ }
+ int func_index = Smi::ToInt(deopt_data->get(1));
+ return {instance, func_index};
}
#undef TRACE
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 86a7913d7a..ec53b8ac2a 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -5,14 +5,15 @@
#ifndef V8_WASM_OBJECTS_H_
#define V8_WASM_OBJECTS_H_
+#include "src/base/bits.h"
#include "src/debug/debug.h"
#include "src/debug/interface-types.h"
#include "src/managed.h"
#include "src/objects.h"
#include "src/objects/script.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-module.h"
#include "src/heap/heap.h"
@@ -23,6 +24,8 @@ namespace v8 {
namespace internal {
namespace wasm {
class InterpretedFrame;
+class NativeModule;
+class WasmCode;
class WasmInterpreter;
struct WasmModule;
class SignatureMap;
@@ -58,9 +61,22 @@ class WasmInstanceObject;
// grow_memory). The address of the WasmContext is provided to the wasm entry
// functions using a RelocatableIntPtrConstant, then the address is passed as
// parameter to the other wasm functions.
+// Note that generated code can directly read from instances of this struct.
struct WasmContext {
- byte* mem_start;
- uint32_t mem_size;
+ byte* mem_start = nullptr;
+ uint32_t mem_size = 0; // TODO(titzer): uintptr_t?
+ uint32_t mem_mask = 0; // TODO(titzer): uintptr_t?
+ byte* globals_start = nullptr;
+
+ inline void SetRawMemory(void* mem_start, size_t mem_size) {
+ DCHECK_LE(mem_size, std::min(wasm::kV8MaxWasmMemoryPages,
+ wasm::kSpecMaxWasmMemoryPages) *
+ wasm::WasmModule::kPageSize);
+ this->mem_start = static_cast<byte*>(mem_start);
+ this->mem_size = static_cast<uint32_t>(mem_size);
+ this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
+ DCHECK_LE(mem_size, this->mem_mask + 1);
+ }
};
// Representation of a WebAssembly.Module JavaScript-level object.
@@ -157,9 +173,8 @@ class WasmMemoryObject : public JSObject {
uint32_t current_pages();
inline bool has_maximum_pages();
- static Handle<WasmMemoryObject> New(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- int32_t maximum);
+ V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, int32_t maximum);
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
static void SetupNewBufferWithSameBackingStore(
@@ -171,12 +186,13 @@ class WasmInstanceObject : public JSObject {
public:
DECL_CAST(WasmInstanceObject)
+ DECL_ACCESSORS(wasm_context, Managed<WasmContext>)
DECL_ACCESSORS(compiled_module, WasmCompiledModule)
DECL_ACCESSORS(exports_object, JSObject)
DECL_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject)
- DECL_OPTIONAL_ACCESSORS(memory_buffer, JSArrayBuffer)
DECL_OPTIONAL_ACCESSORS(globals_buffer, JSArrayBuffer)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
+ DECL_OPTIONAL_ACCESSORS(table_object, WasmTableObject)
DECL_OPTIONAL_ACCESSORS(function_tables, FixedArray)
DECL_OPTIONAL_ACCESSORS(signature_tables, FixedArray)
@@ -185,12 +201,13 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(js_imports_table, FixedArray)
enum { // --
+ kWasmContextIndex,
kCompiledModuleIndex,
kExportsObjectIndex,
kMemoryObjectIndex,
- kMemoryBufferIndex,
kGlobalsBufferIndex,
kDebugInfoIndex,
+ kTableObjectIndex,
kFunctionTablesIndex,
kSignatureTablesIndex,
kDirectlyCalledInstancesIndex,
@@ -199,19 +216,19 @@ class WasmInstanceObject : public JSObject {
};
DEF_SIZE(JSObject)
+ DEF_OFFSET(WasmContext)
DEF_OFFSET(CompiledModule)
DEF_OFFSET(ExportsObject)
DEF_OFFSET(MemoryObject)
- DEF_OFFSET(MemoryBuffer)
DEF_OFFSET(GlobalsBuffer)
DEF_OFFSET(DebugInfo)
+ DEF_OFFSET(TableObject)
DEF_OFFSET(FunctionTables)
DEF_OFFSET(SignatureTables)
DEF_OFFSET(DirectlyCalledInstances)
DEF_OFFSET(JsImportsTable)
WasmModuleObject* module_object();
- WasmContext* wasm_context();
V8_EXPORT_PRIVATE wasm::WasmModule* module();
// Get the debug info associated with the given wasm object.
@@ -230,7 +247,8 @@ class WasmInstanceObject : public JSObject {
// Assumed to be called with a code object associated to a wasm module
// instance. Intended to be called from runtime functions. Returns nullptr on
// failing to get owning instance.
- static WasmInstanceObject* GetOwningInstance(Code* code);
+ static WasmInstanceObject* GetOwningInstance(const wasm::WasmCode* code);
+ static WasmInstanceObject* GetOwningInstanceGC(Code* code);
static void ValidateInstancesChainForTesting(
Isolate* isolate, Handle<WasmModuleObject> module_obj,
@@ -255,7 +273,7 @@ class WasmExportedFunction : public JSFunction {
int func_index, int arity,
Handle<Code> export_wrapper);
- Handle<Code> GetWasmCode();
+ WasmCodeWrapper GetWasmCode();
};
// Information shared by all WasmCompiledModule objects for the same module.
@@ -307,9 +325,7 @@ class WasmSharedModuleData : public FixedArray {
Handle<SeqOneByteString> module_bytes, Handle<Script> script,
Handle<ByteArray> asm_js_offset_table);
- private:
DECL_OPTIONAL_ACCESSORS(lazy_compilation_orchestrator, Foreign)
- friend class WasmCompiledModule;
};
// This represents the set of wasm compiled functions, together
@@ -321,10 +337,7 @@ class WasmSharedModuleData : public FixedArray {
// used as memory of a particular WebAssembly.Instance object. This
// information are then used at runtime to access memory / verify bounds
// check limits.
-// - bounds check limits, computed at compile time, relative to the
-// size of the memory.
// - the objects representing the function tables and signature tables
-// - raw pointer to the globals buffer.
//
// Even without instantiating, we need values for all of these parameters.
// We need to track these values to be able to create new instances and
@@ -332,11 +345,6 @@ class WasmSharedModuleData : public FixedArray {
// The design decisions for how we track these values is not too immediate,
// and it deserves a summary. The "tricky" ones are: memory, globals, and
// the tables (signature and functions).
-// The first 2 (memory & globals) are embedded as raw pointers to native
-// buffers. All we need to track them is the start addresses and, in the
-// case of memory, the size. We model all of them as HeapNumbers, because
-// we need to store size_t values (for addresses), and potentially full
-// 32 bit unsigned values for the size. Smis are 31 bits.
// For tables, we need to hold a reference to the JS Heap object, because
// we embed them as objects, and they may move.
class WasmCompiledModule : public FixedArray {
@@ -386,36 +394,35 @@ class WasmCompiledModule : public FixedArray {
public: \
inline Handle<TYPE> NAME() const;
-#define WCM_LARGE_NUMBER(TYPE, NAME) \
- public: \
- inline TYPE NAME() const; \
- inline void set_##NAME(TYPE value); \
- inline static void recreate_##NAME(Handle<WasmCompiledModule> obj, \
- Factory* factory, TYPE init_val); \
- inline bool has_##NAME() const;
-
// Add values here if they are required for creating new instances or
// for deserialization, and if they are serializable.
// By default, instance values go to WasmInstanceObject, however, if
// we embed the generated code with a value, then we track that value here.
-#define CORE_WCM_PROPERTY_TABLE(MACRO) \
- MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
- MACRO(OBJECT, Context, native_context) \
+#define CORE_WCM_PROPERTY_TABLE(MACRO) \
+ MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
+ MACRO(OBJECT, Context, native_context) \
+ MACRO(CONST_OBJECT, FixedArray, export_wrappers) \
+ MACRO(OBJECT, FixedArray, weak_exported_functions) \
+ MACRO(WASM_OBJECT, WasmCompiledModule, next_instance) \
+ MACRO(WASM_OBJECT, WasmCompiledModule, prev_instance) \
+ MACRO(WEAK_LINK, WasmInstanceObject, owning_instance) \
+ MACRO(WEAK_LINK, WasmModuleObject, wasm_module) \
+ MACRO(OBJECT, FixedArray, handler_table) \
+ MACRO(OBJECT, FixedArray, source_positions) \
+ MACRO(OBJECT, Foreign, native_module) \
+ MACRO(OBJECT, FixedArray, lazy_compile_data)
+
+#define GC_WCM_PROPERTY_TABLE(MACRO) \
MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
MACRO(CONST_OBJECT, FixedArray, code_table) \
- MACRO(CONST_OBJECT, FixedArray, export_wrappers) \
- MACRO(OBJECT, FixedArray, weak_exported_functions) \
MACRO(OBJECT, FixedArray, function_tables) \
MACRO(OBJECT, FixedArray, signature_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_signature_tables) \
- MACRO(LARGE_NUMBER, size_t, globals_start) \
- MACRO(SMALL_CONST_NUMBER, uint32_t, initial_pages) \
- MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
- MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
- MACRO(WEAK_LINK, JSObject, owning_instance) \
- MACRO(WEAK_LINK, WasmModuleObject, wasm_module)
+ MACRO(SMALL_CONST_NUMBER, uint32_t, initial_pages)
+// TODO(mtrofin): this is unnecessary when we stop needing
+// FLAG_wasm_jit_to_native, because we have instance_id on NativeModule.
#if DEBUG
#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_CONST_NUMBER, uint32_t, instance_id)
#else
@@ -427,6 +434,7 @@ class WasmCompiledModule : public FixedArray {
#define WCM_PROPERTY_TABLE(MACRO) \
CORE_WCM_PROPERTY_TABLE(MACRO) \
+ GC_WCM_PROPERTY_TABLE(MACRO) \
DEBUG_ONLY_TABLE(MACRO)
private:
@@ -438,8 +446,8 @@ class WasmCompiledModule : public FixedArray {
public:
static Handle<WasmCompiledModule> New(
- Isolate* isolate, Handle<WasmSharedModuleData> shared,
- Handle<FixedArray> code_table, Handle<FixedArray> export_wrappers,
+ Isolate* isolate, wasm::WasmModule* module, Handle<FixedArray> code_table,
+ Handle<FixedArray> export_wrappers,
const std::vector<wasm::GlobalHandleAddress>& function_tables,
const std::vector<wasm::GlobalHandleAddress>& signature_tables);
@@ -447,13 +455,15 @@ class WasmCompiledModule : public FixedArray {
Handle<WasmCompiledModule> module);
static void Reset(Isolate* isolate, WasmCompiledModule* module);
- inline Address GetGlobalsStartOrNull() const;
+ // TODO(mtrofin): delete this when we don't need FLAG_wasm_jit_to_native
+ static void ResetGCModel(Isolate* isolate, WasmCompiledModule* module);
uint32_t default_mem_size() const;
- static void SetGlobalsStartAddressFrom(
- Factory* factory, Handle<WasmCompiledModule> compiled_module,
- Handle<JSArrayBuffer> buffer);
+ wasm::NativeModule* GetNativeModule() const;
+ void InsertInChain(WasmModuleObject*);
+ void RemoveFromChain();
+ void OnWasmModuleDecodingComplete(Handle<WasmSharedModuleData>);
#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
WCM_PROPERTY_TABLE(DECLARATION)
@@ -555,23 +565,16 @@ class WasmCompiledModule : public FixedArray {
// FixedArray with all hit breakpoint objects.
MaybeHandle<FixedArray> CheckBreakPoints(int position);
- // Compile lazily the function called in the given caller code object at the
- // given offset.
- // If the called function cannot be determined from the caller (indirect
- // call / exported function), func_index must be set. Otherwise it can be -1.
- // If patch_caller is set, then all direct calls to functions which were
- // already lazily compiled are patched (at least the given call site).
- // Returns the Code to be called at the given call site.
- static Handle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
- Handle<Code> caller, int offset,
- int func_index, bool patch_caller);
-
- inline void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table);
+ inline void ReplaceCodeTableForTesting(
+ std::vector<wasm::WasmCode*>&& testing_table);
+ // TODO(mtrofin): following 4 unnecessary after we're done with
+ // FLAG_wasm_jit_to_native
static void SetTableValue(Isolate* isolate, Handle<FixedArray> table,
int index, Address value);
static void UpdateTableValue(FixedArray* table, int index, Address value);
static Address GetTableValue(FixedArray* table, int index);
+ inline void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table);
private:
void InitId();
@@ -652,10 +655,6 @@ class WasmDebugInfo : public FixedArray {
// Returns the number of calls / function frames executed in the interpreter.
uint64_t NumInterpretedCalls();
- // Update the memory view of the interpreter after executing GrowMemory in
- // compiled code.
- void UpdateMemory(JSArrayBuffer* new_memory);
-
// Get scope details for a specific interpreted frame.
// This returns a JSArray of length two: One entry for the global scope, one
// for the local scope. Both elements are JSArrays of size
@@ -677,6 +676,27 @@ class WasmDebugInfo : public FixedArray {
wasm::FunctionSig*);
};
+// Attach function information in the form of deoptimization data to the given
+// code object. This information will be used for generating stack traces,
+// calling imported functions in the interpreter, knowing which function to
+// compile in a lazy compile stub, and more. The deopt data will be a newly
+// allocated FixedArray of length 2, where the first element is a WeakCell
+// containing the WasmInstanceObject, and the second element is the function
+// index.
+// If calling this method repeatedly for the same instance, pass a WeakCell
+// directly in order to avoid creating many cells pointing to the same instance.
+void AttachWasmFunctionInfo(Isolate*, Handle<Code>,
+ MaybeHandle<WeakCell> weak_instance,
+ int func_index);
+void AttachWasmFunctionInfo(Isolate*, Handle<Code>,
+ MaybeHandle<WasmInstanceObject>, int func_index);
+
+struct WasmFunctionInfo {
+ MaybeHandle<WasmInstanceObject> instance;
+ int func_index;
+};
+WasmFunctionInfo GetWasmFunctionInfo(Isolate*, Handle<Code>);
+
#undef DECL_OOL_QUERY
#undef DECL_OOL_CAST
#undef DECL_GETTER
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 5f2507996d..5188d7801e 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -329,7 +329,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig) {
for (auto type : sig->all()) {
if (type == wasm::kWasmI64 || type == wasm::kWasmS128) return false;
}
- return true;
+ return sig->return_count() <= 1;
}
namespace {
@@ -353,59 +353,51 @@ constexpr const FunctionSig* kSimpleExprSigs[] = {
nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
#undef DECLARE_SIG_ENTRY
-// The following constexpr functions are used to initialize the constant arrays
-// defined below. They must have exactly one return statement, and no switch.
-constexpr WasmOpcodeSig GetOpcodeSigIndex(byte opcode) {
- return
+// gcc 4.7 - 4.9 has a bug which causes the constexpr attribute to get lost when
+// passing functions (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892). Hence
+// encapsulate these constexpr functions in functors.
+// TODO(clemensh): Remove this once we require gcc >= 5.0.
+
+struct GetOpcodeSigIndex {
+ constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
- FOREACH_SIMPLE_OPCODE(CASE)
+ return FOREACH_SIMPLE_OPCODE(CASE) kSigEnum_None;
#undef CASE
- kSigEnum_None;
-}
+ }
+};
-constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
- return
+struct GetAsmJsOpcodeSigIndex {
+ constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
- FOREACH_ASMJS_COMPAT_OPCODE(CASE)
+ return FOREACH_ASMJS_COMPAT_OPCODE(CASE) kSigEnum_None;
#undef CASE
- kSigEnum_None;
-}
+ }
+};
-constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
- return
+struct GetSimdOpcodeSigIndex {
+ constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == (opc & 0xff) ? kSigEnum_##sig:
- FOREACH_SIMD_0_OPERAND_OPCODE(CASE)
+ return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) kSigEnum_None;
#undef CASE
- kSigEnum_None;
-}
+ }
+};
-constexpr WasmOpcodeSig GetAtomicOpcodeSigIndex(byte opcode) {
- return
+struct GetAtomicOpcodeSigIndex {
+ constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == (opc & 0xff) ? kSigEnum_##sig:
- FOREACH_ATOMIC_OPCODE(CASE)
+ return FOREACH_ATOMIC_OPCODE(CASE) kSigEnum_None;
#undef CASE
- kSigEnum_None;
}
+};
-// gcc 4.7 - 4.9 have a bug which prohibits marking the array constexpr
-// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892).
-// TODO(clemensh): Remove this once we require gcc >= 5.0.
-#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ == 4
-#define CONSTEXPR_IF_NOT_GCC_4
-#else
-#define CONSTEXPR_IF_NOT_GCC_4 constexpr
-#endif
-
-CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kSimpleExprSigTable =
- base::make_array<256>(GetOpcodeSigIndex);
-CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
- base::make_array<256>(GetAsmJsOpcodeSigIndex);
-CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
- base::make_array<256>(GetSimdOpcodeSigIndex);
-CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
- base::make_array<256>(GetAtomicOpcodeSigIndex);
-
-#undef CONSTEXPR_IF_NOT_GCC_4
+constexpr std::array<WasmOpcodeSig, 256> kSimpleExprSigTable =
+ base::make_array<256>(GetOpcodeSigIndex{});
+constexpr std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
+ base::make_array<256>(GetAsmJsOpcodeSigIndex{});
+constexpr std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
+ base::make_array<256>(GetSimdOpcodeSigIndex{});
+constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
+ base::make_array<256>(GetAtomicOpcodeSigIndex{});
} // namespace
@@ -413,6 +405,9 @@ FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
if (opcode >> 8 == kSimdPrefix) {
return const_cast<FunctionSig*>(
kSimpleExprSigs[kSimdExprSigTable[opcode & 0xff]]);
+ } else if (opcode >> 8 == kAtomicPrefix) {
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xff]]);
} else {
DCHECK_GT(kSimpleExprSigTable.size(), opcode);
return const_cast<FunctionSig*>(
@@ -426,11 +421,6 @@ FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
kSimpleExprSigs[kSimpleAsmjsExprSigTable[opcode]]);
}
-FunctionSig* WasmOpcodes::AtomicSignature(WasmOpcode opcode) {
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xff]]);
-}
-
int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
switch (reason) {
#define TRAPREASON_TO_MESSAGE(name) \
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 2401e0446c..e8cb348b53 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -19,7 +19,7 @@ const uint32_t kWasmMagic = 0x6d736100;
const uint32_t kWasmVersion = 0x01;
// Binary encoding of local types.
-enum ValueTypeCode {
+enum ValueTypeCode : uint8_t {
kLocalVoid = 0x40,
kLocalI32 = 0x7f,
kLocalI64 = 0x7e,
@@ -28,9 +28,6 @@ enum ValueTypeCode {
kLocalS128 = 0x7b
};
-// Type code for multi-value block types.
-static const uint8_t kMultivalBlock = 0x41;
-
// We reuse the internal machine type to represent WebAssembly types.
// A typedef improves readability without adding a whole new type system.
using ValueType = MachineRepresentation;
@@ -541,7 +538,6 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static const char* OpcodeName(WasmOpcode opcode);
static FunctionSig* Signature(WasmOpcode opcode);
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
- static FunctionSig* AtomicSignature(WasmOpcode opcode);
static bool IsPrefixOpcode(WasmOpcode opcode);
static bool IsControlOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
new file mode 100644
index 0000000000..337692b595
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -0,0 +1,687 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-serialization.h"
+
+#include "src/assembler-inl.h"
+#include "src/code-stubs.h"
+#include "src/external-reference-table.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/snapshot/serializer-common.h"
+#include "src/version.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+void SetRawTargetData(RelocInfo* rinfo, uint32_t value) {
+ if (rinfo->target_address_size() == sizeof(uint32_t)) {
+ *(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = value;
+ return;
+ } else {
+ DCHECK_EQ(rinfo->target_address_size(), sizeof(intptr_t));
+ DCHECK_EQ(rinfo->target_address_size(), 8);
+ *(reinterpret_cast<intptr_t*>(rinfo->target_address_address())) =
+ static_cast<intptr_t>(value);
+ return;
+ }
+}
+
+class Writer {
+ public:
+ explicit Writer(Vector<byte> buffer) : buffer_(buffer) {}
+ template <typename T>
+ void Write(const T& value) {
+ if (FLAG_wasm_trace_serialization) {
+ OFStream os(stdout);
+ os << "wrote: " << (size_t)value << " sized: " << sizeof(T) << std::endl;
+ }
+ DCHECK_GE(buffer_.size(), sizeof(T));
+ memcpy(buffer_.start(), reinterpret_cast<const byte*>(&value), sizeof(T));
+ buffer_ = buffer_ + sizeof(T);
+ }
+
+ void WriteVector(const Vector<const byte> data) {
+ DCHECK_GE(buffer_.size(), data.size());
+ if (data.size() > 0) {
+ memcpy(buffer_.start(), data.start(), data.size());
+ buffer_ = buffer_ + data.size();
+ }
+ if (FLAG_wasm_trace_serialization) {
+ OFStream os(stdout);
+ os << "wrote vector of " << data.size() << " elements" << std::endl;
+ }
+ }
+ Vector<byte> current_buffer() const { return buffer_; }
+
+ private:
+ Vector<byte> buffer_;
+};
+
+class Reader {
+ public:
+ explicit Reader(Vector<const byte> buffer) : buffer_(buffer) {}
+
+ template <typename T>
+ T Read() {
+ DCHECK_GE(buffer_.size(), sizeof(T));
+ T ret;
+ memcpy(reinterpret_cast<byte*>(&ret), buffer_.start(), sizeof(T));
+ buffer_ = buffer_ + sizeof(T);
+ if (FLAG_wasm_trace_serialization) {
+ OFStream os(stdout);
+ os << "read: " << (size_t)ret << " sized: " << sizeof(T) << std::endl;
+ }
+ return ret;
+ }
+
+ Vector<const byte> GetSubvector(size_t size) {
+ Vector<const byte> ret = {buffer_.start(), size};
+ buffer_ = buffer_ + size;
+ return ret;
+ }
+
+ void ReadIntoVector(const Vector<byte> data) {
+ if (data.size() > 0) {
+ DCHECK_GE(buffer_.size(), data.size());
+ memcpy(data.start(), buffer_.start(), data.size());
+ buffer_ = buffer_ + data.size();
+ }
+ if (FLAG_wasm_trace_serialization) {
+ OFStream os(stdout);
+ os << "read vector of " << data.size() << " elements" << std::endl;
+ }
+ }
+
+ Vector<const byte> current_buffer() const { return buffer_; }
+
+ private:
+ Vector<const byte> buffer_;
+};
+
+} // namespace
+
+size_t WasmSerializedFormatVersion::GetVersionSize() { return kVersionSize; }
+
+bool WasmSerializedFormatVersion::WriteVersion(Isolate* isolate,
+ Vector<byte> buffer) {
+ if (buffer.size() < GetVersionSize()) return false;
+ Writer writer(buffer);
+ writer.Write(SerializedData::ComputeMagicNumber(
+ ExternalReferenceTable::instance(isolate)));
+ writer.Write(Version::Hash());
+ writer.Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
+ writer.Write(FlagList::Hash());
+ return true;
+}
+
+bool WasmSerializedFormatVersion::IsSupportedVersion(
+ Isolate* isolate, const Vector<const byte> buffer) {
+ if (buffer.size() < kVersionSize) return false;
+ byte version[kVersionSize];
+ CHECK(WriteVersion(isolate, {version, kVersionSize}));
+ if (memcmp(buffer.start(), version, kVersionSize) == 0) return true;
+ return false;
+}
+
+NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
+ const NativeModule* module)
+ : isolate_(isolate), native_module_(module) {
+ DCHECK_NOT_NULL(isolate_);
+ DCHECK_NOT_NULL(native_module_);
+ DCHECK_NULL(native_module_->lazy_builtin_);
+ // TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
+ // the unique ones, i.e. the cache.
+ ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate_);
+ for (uint32_t i = 0; i < table->size(); ++i) {
+ Address addr = table->address(i);
+ reference_table_lookup_.insert(std::make_pair(addr, i));
+ }
+ // defer populating the stub_lookup_ to when we buffer the stubs
+ for (auto pair : native_module_->trampolines_) {
+ v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
+ int builtin_index = code->builtin_index();
+ if (builtin_index >= 0) {
+ uint32_t tag = static_cast<uint32_t>(builtin_index);
+ builtin_lookup_.insert(std::make_pair(pair.second, tag));
+ }
+ }
+ BufferHeader();
+ state_ = Metadata;
+}
+
+size_t NativeModuleSerializer::MeasureHeader() const {
+ return sizeof(uint32_t) + // total wasm fct count
+ sizeof(
+ uint32_t) + // imported fcts - i.e. index of first wasm function
+ sizeof(uint32_t) + // table count
+ native_module_->specialization_data_.function_tables.size() *
+ 2 // 2 same-sized tables, containing pointers
+ * sizeof(GlobalHandleAddress);
+}
+
+void NativeModuleSerializer::BufferHeader() {
+ size_t metadata_size = MeasureHeader();
+ scratch_.resize(metadata_size);
+ remaining_ = {scratch_.data(), metadata_size};
+ Writer writer(remaining_);
+ writer.Write(native_module_->FunctionCount());
+ writer.Write(native_module_->num_imported_functions());
+ writer.Write(static_cast<uint32_t>(
+ native_module_->specialization_data_.function_tables.size()));
+ for (size_t i = 0,
+ e = native_module_->specialization_data_.function_tables.size();
+ i < e; ++i) {
+ writer.Write(native_module_->specialization_data_.function_tables[i]);
+ writer.Write(native_module_->specialization_data_.signature_tables[i]);
+ }
+}
+
+size_t NativeModuleSerializer::GetCodeHeaderSize() {
+ return sizeof(size_t) + // size of this section
+ sizeof(size_t) + // offset of constant pool
+ sizeof(size_t) + // offset of safepoint table
+ sizeof(uint32_t) + // stack slots
+ sizeof(size_t) + // code size
+ sizeof(size_t) + // reloc size
+ sizeof(uint32_t) + // handler size
+ sizeof(uint32_t) + // source positions size
+ sizeof(size_t) + // protected instructions size
+ sizeof(bool); // is_liftoff
+}
+
+size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
+ FixedArray* handler_table = GetHandlerTable(code);
+ ByteArray* source_positions = GetSourcePositions(code);
+ return GetCodeHeaderSize() + code->instructions().size() + // code
+ code->reloc_info().size() + // reloc info
+ (handler_table == nullptr
+ ? 0
+ : static_cast<uint32_t>(
+ handler_table->length())) + // handler table
+ (source_positions == nullptr
+ ? 0
+ : static_cast<uint32_t>(
+ source_positions->length())) + // source positions
+ code->protected_instructions().size() *
+ sizeof(trap_handler::ProtectedInstructionData);
+}
+
+size_t NativeModuleSerializer::Measure() const {
+ size_t ret = MeasureHeader() + MeasureCopiedStubs();
+ for (uint32_t i = native_module_->num_imported_functions(),
+ e = native_module_->FunctionCount();
+ i < e; ++i) {
+ ret += MeasureCode(native_module_->GetCode(i));
+ }
+ return ret;
+}
+
+size_t NativeModuleSerializer::DrainBuffer(Vector<byte> dest) {
+ size_t to_write = std::min(dest.size(), remaining_.size());
+ memcpy(dest.start(), remaining_.start(), to_write);
+ DCHECK_GE(remaining_.size(), to_write);
+ remaining_ = remaining_ + to_write;
+ return to_write;
+}
+
+size_t NativeModuleSerializer::MeasureCopiedStubs() const {
+ size_t ret = sizeof(uint32_t) + // number of stubs
+ native_module_->stubs_.size() * sizeof(uint32_t); // stub keys
+ for (auto pair : native_module_->trampolines_) {
+ v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
+ int builtin_index = code->builtin_index();
+ if (builtin_index < 0) ret += sizeof(uint32_t);
+ }
+ return ret;
+}
+
+void NativeModuleSerializer::BufferCopiedStubs() {
+ // We buffer all the stubs together, because they are very likely
+ // few and small. Each stub is buffered like a WasmCode would,
+ // and in addition prefaced by its stub key. The whole section is prefaced
+ // by the number of stubs.
+ size_t buff_size = MeasureCopiedStubs();
+ scratch_.resize(buff_size);
+ remaining_ = {scratch_.data(), buff_size};
+ Writer writer(remaining_);
+ writer.Write(
+ static_cast<uint32_t>((buff_size - sizeof(uint32_t)) / sizeof(uint32_t)));
+ uint32_t stub_id = 0;
+
+ for (auto pair : native_module_->stubs_) {
+ uint32_t key = pair.first;
+ writer.Write(key);
+ stub_lookup_.insert(
+ std::make_pair(pair.second->instructions().start(), stub_id));
+ ++stub_id;
+ }
+
+ for (auto pair : native_module_->trampolines_) {
+ v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
+ int builtin_index = code->builtin_index();
+ if (builtin_index < 0) {
+ stub_lookup_.insert(std::make_pair(pair.second, stub_id));
+ writer.Write(code->stub_key());
+ ++stub_id;
+ }
+ }
+}
+
+FixedArray* NativeModuleSerializer::GetHandlerTable(
+ const WasmCode* code) const {
+ if (code->kind() != WasmCode::Function) return nullptr;
+ uint32_t index = code->index();
+ // We write the address, the size, and then copy the code as-is, followed
+ // by reloc info, followed by handler table and source positions.
+ Object* handler_table_entry =
+ native_module_->compiled_module()->handler_table()->get(
+ static_cast<int>(index));
+ if (handler_table_entry->IsFixedArray()) {
+ return FixedArray::cast(handler_table_entry);
+ }
+ return nullptr;
+}
+
+ByteArray* NativeModuleSerializer::GetSourcePositions(
+ const WasmCode* code) const {
+ if (code->kind() != WasmCode::Function) return nullptr;
+ uint32_t index = code->index();
+ Object* source_positions_entry =
+ native_module_->compiled_module()->source_positions()->get(
+ static_cast<int>(index));
+ if (source_positions_entry->IsByteArray()) {
+ return ByteArray::cast(source_positions_entry);
+ }
+ return nullptr;
+}
+
+void NativeModuleSerializer::BufferCurrentWasmCode() {
+ const WasmCode* code = native_module_->GetCode(index_);
+ size_t size = MeasureCode(code);
+ scratch_.resize(size);
+ remaining_ = {scratch_.data(), size};
+ BufferCodeInAllocatedScratch(code);
+}
+
+void NativeModuleSerializer::BufferCodeInAllocatedScratch(
+ const WasmCode* code) {
+ // We write the address, the size, and then copy the code as-is, followed
+ // by reloc info, followed by handler table and source positions.
+ FixedArray* handler_table_entry = GetHandlerTable(code);
+ uint32_t handler_table_size = 0;
+ Address handler_table = nullptr;
+ if (handler_table_entry != nullptr) {
+ handler_table_size = static_cast<uint32_t>(handler_table_entry->length());
+ handler_table = reinterpret_cast<Address>(
+ handler_table_entry->GetFirstElementAddress());
+ }
+ ByteArray* source_positions_entry = GetSourcePositions(code);
+ Address source_positions = nullptr;
+ uint32_t source_positions_size = 0;
+ if (source_positions_entry != nullptr) {
+ source_positions = source_positions_entry->GetDataStartAddress();
+ source_positions_size =
+ static_cast<uint32_t>(source_positions_entry->length());
+ }
+ Writer writer(remaining_);
+ // write the header
+ writer.Write(MeasureCode(code));
+ writer.Write(code->constant_pool_offset());
+ writer.Write(code->safepoint_table_offset());
+ writer.Write(code->stack_slots());
+ writer.Write(code->instructions().size());
+ writer.Write(code->reloc_info().size());
+ writer.Write(handler_table_size);
+ writer.Write(source_positions_size);
+ writer.Write(code->protected_instructions().size());
+ writer.Write(code->is_liftoff());
+ // next is the code, which we have to reloc.
+ Address serialized_code_start = writer.current_buffer().start();
+ // write the code and everything else
+ writer.WriteVector(code->instructions());
+ writer.WriteVector(code->reloc_info());
+ writer.WriteVector({handler_table, handler_table_size});
+ writer.WriteVector({source_positions, source_positions_size});
+ writer.WriteVector(
+ {reinterpret_cast<const byte*>(code->protected_instructions().data()),
+ sizeof(trap_handler::ProtectedInstructionData) *
+ code->protected_instructions().size()});
+ // now relocate the code
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ RelocIterator orig_iter(code->instructions(), code->reloc_info(),
+ code->constant_pool(), mask);
+ for (RelocIterator
+ iter({serialized_code_start, code->instructions().size()},
+ code->reloc_info(),
+ serialized_code_start + code->constant_pool_offset(), mask);
+ !iter.done(); iter.next(), orig_iter.next()) {
+ RelocInfo::Mode mode = orig_iter.rinfo()->rmode();
+ switch (mode) {
+ case RelocInfo::CODE_TARGET: {
+ Address orig_target = orig_iter.rinfo()->target_address();
+ uint32_t tag = EncodeBuiltinOrStub(orig_target);
+ SetRawTargetData(iter.rinfo(), tag);
+ } break;
+ case RelocInfo::WASM_CALL: {
+ Address orig_target = orig_iter.rinfo()->wasm_call_address();
+ uint32_t tag = wasm_targets_lookup_[orig_target];
+ SetRawTargetData(iter.rinfo(), tag);
+ } break;
+ case RelocInfo::RUNTIME_ENTRY: {
+ Address orig_target = orig_iter.rinfo()->target_address();
+ uint32_t tag = reference_table_lookup_[orig_target];
+ SetRawTargetData(iter.rinfo(), tag);
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+uint32_t NativeModuleSerializer::EncodeBuiltinOrStub(Address address) {
+ auto builtin_iter = builtin_lookup_.find(address);
+ uint32_t tag = 0;
+ if (builtin_iter != builtin_lookup_.end()) {
+ uint32_t id = builtin_iter->second;
+ DCHECK_LT(id, std::numeric_limits<uint16_t>::max());
+ tag = id << 16;
+ } else {
+ auto stub_iter = stub_lookup_.find(address);
+ DCHECK(stub_iter != stub_lookup_.end());
+ uint32_t id = stub_iter->second;
+ DCHECK_LT(id, std::numeric_limits<uint16_t>::max());
+ tag = id & 0x0000ffff;
+ }
+ return tag;
+}
+
+size_t NativeModuleSerializer::Write(Vector<byte> dest) {
+ Vector<byte> original = dest;
+ while (dest.size() > 0) {
+ switch (state_) {
+ case Metadata: {
+ dest = dest + DrainBuffer(dest);
+ if (remaining_.size() == 0) {
+ BufferCopiedStubs();
+ state_ = Stubs;
+ }
+ break;
+ }
+ case Stubs: {
+ dest = dest + DrainBuffer(dest);
+ if (remaining_.size() == 0) {
+ index_ = native_module_->num_imported_functions();
+ BufferCurrentWasmCode();
+ state_ = CodeSection;
+ }
+ break;
+ }
+ case CodeSection: {
+ dest = dest + DrainBuffer(dest);
+ if (remaining_.size() == 0) {
+ if (++index_ < native_module_->FunctionCount()) {
+ BufferCurrentWasmCode();
+ } else {
+ state_ = Done;
+ }
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ DCHECK_GE(original.size(), dest.size());
+ return original.size() - dest.size();
+}
+
+// static
+std::pair<std::unique_ptr<byte[]>, size_t>
+NativeModuleSerializer::SerializeWholeModule(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ NativeModule* native_module = compiled_module->GetNativeModule();
+ NativeModuleSerializer serializer(isolate, native_module);
+ size_t version_size = WasmSerializedFormatVersion::GetVersionSize();
+ size_t buff_size = serializer.Measure() + version_size;
+ std::unique_ptr<byte[]> ret(new byte[buff_size]);
+ if (!WasmSerializedFormatVersion::WriteVersion(isolate,
+ {ret.get(), buff_size})) {
+ return {};
+ }
+
+ size_t written =
+ serializer.Write({ret.get() + version_size, buff_size - version_size});
+ if (written != buff_size - version_size) return {};
+
+ return {std::move(ret), buff_size};
+}
+
+NativeModuleDeserializer::NativeModuleDeserializer(Isolate* isolate,
+ NativeModule* native_module)
+ : isolate_(isolate), native_module_(native_module) {}
+
+void NativeModuleDeserializer::Expect(size_t size) {
+ scratch_.resize(size);
+ current_expectation_ = size;
+ unread_ = {scratch_.data(), size};
+}
+
+bool NativeModuleDeserializer::Read(Vector<const byte> data) {
+ unread_ = data;
+ if (!ReadHeader()) return false;
+ if (!ReadStubs()) return false;
+ index_ = native_module_->num_imported_functions();
+ for (; index_ < native_module_->FunctionCount(); ++index_) {
+ if (!ReadCode()) return false;
+ }
+ native_module_->LinkAll();
+ return data.size() - unread_.size();
+}
+
+bool NativeModuleDeserializer::ReadHeader() {
+ size_t start_size = unread_.size();
+ Reader reader(unread_);
+ size_t functions = reader.Read<uint32_t>();
+ size_t imports = reader.Read<uint32_t>();
+ bool ok = functions == native_module_->FunctionCount() &&
+ imports == native_module_->num_imported_functions();
+ if (!ok) return false;
+ size_t table_count = reader.Read<uint32_t>();
+
+ std::vector<GlobalHandleAddress> sigs(table_count);
+ std::vector<GlobalHandleAddress> funcs(table_count);
+ for (size_t i = 0; i < table_count; ++i) {
+ funcs[i] = reader.Read<GlobalHandleAddress>();
+ sigs[i] = reader.Read<GlobalHandleAddress>();
+ }
+ native_module_->signature_tables() = sigs;
+ native_module_->function_tables() = funcs;
+ // resize, so that from here on the native module can be
+ // asked about num_function_tables().
+ native_module_->empty_function_tables().resize(table_count);
+ native_module_->empty_signature_tables().resize(table_count);
+
+ unread_ = unread_ + (start_size - reader.current_buffer().size());
+ return true;
+}
+
+bool NativeModuleDeserializer::ReadStubs() {
+ size_t start_size = unread_.size();
+ Reader reader(unread_);
+ size_t nr_stubs = reader.Read<uint32_t>();
+ stubs_.reserve(nr_stubs);
+ for (size_t i = 0; i < nr_stubs; ++i) {
+ uint32_t key = reader.Read<uint32_t>();
+ v8::internal::Code* stub =
+ *(v8::internal::CodeStub::GetCode(isolate_, key).ToHandleChecked());
+ stubs_.push_back(native_module_->GetLocalAddressFor(handle(stub)));
+ }
+ unread_ = unread_ + (start_size - reader.current_buffer().size());
+ return true;
+}
+
+bool NativeModuleDeserializer::ReadCode() {
+ size_t start_size = unread_.size();
+ Reader reader(unread_);
+ size_t code_section_size = reader.Read<size_t>();
+ USE(code_section_size);
+ size_t constant_pool_offset = reader.Read<size_t>();
+ size_t safepoint_table_offset = reader.Read<size_t>();
+ uint32_t stack_slot_count = reader.Read<uint32_t>();
+ size_t code_size = reader.Read<size_t>();
+ size_t reloc_size = reader.Read<size_t>();
+ uint32_t handler_size = reader.Read<uint32_t>();
+ uint32_t source_position_size = reader.Read<uint32_t>();
+ size_t protected_instructions_size = reader.Read<size_t>();
+ bool is_liftoff = reader.Read<bool>();
+ std::shared_ptr<ProtectedInstructions> protected_instructions(
+ new ProtectedInstructions(protected_instructions_size));
+ DCHECK_EQ(protected_instructions_size, protected_instructions->size());
+
+ Vector<const byte> code_buffer = reader.GetSubvector(code_size);
+ std::unique_ptr<byte[]> reloc_info;
+ if (reloc_size > 0) {
+ reloc_info.reset(new byte[reloc_size]);
+ reader.ReadIntoVector({reloc_info.get(), reloc_size});
+ }
+ WasmCode* ret = native_module_->AddOwnedCode(
+ code_buffer, std::move(reloc_info), reloc_size, Just(index_),
+ WasmCode::Function, constant_pool_offset, stack_slot_count,
+ safepoint_table_offset, protected_instructions, is_liftoff);
+ if (ret == nullptr) return false;
+ native_module_->SetCodeTable(index_, ret);
+
+ // now relocate the code
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ for (RelocIterator iter(ret->instructions(), ret->reloc_info(),
+ ret->constant_pool(), mask);
+ !iter.done(); iter.next()) {
+ RelocInfo::Mode mode = iter.rinfo()->rmode();
+ switch (mode) {
+ case RelocInfo::EMBEDDED_OBJECT: {
+ // We only expect {undefined}. We check for that when we add code.
+ iter.rinfo()->set_target_object(isolate_->heap()->undefined_value(),
+ SKIP_WRITE_BARRIER);
+ }
+ case RelocInfo::CODE_TARGET: {
+ uint32_t tag = *(reinterpret_cast<uint32_t*>(
+ iter.rinfo()->target_address_address()));
+ Address target = GetTrampolineOrStubFromTag(tag);
+ iter.rinfo()->set_target_address(nullptr, target, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ } break;
+ case RelocInfo::RUNTIME_ENTRY: {
+ uint32_t orig_target = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(iter.rinfo()->target_address()));
+ Address address =
+ ExternalReferenceTable::instance(isolate_)->address(orig_target);
+ iter.rinfo()->set_target_runtime_entry(
+ nullptr, address, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ } break;
+ default:
+ break;
+ }
+ }
+ if (handler_size > 0) {
+ Handle<FixedArray> handler_table = isolate_->factory()->NewFixedArray(
+ static_cast<int>(handler_size), TENURED);
+ reader.ReadIntoVector(
+ {reinterpret_cast<Address>(handler_table->GetFirstElementAddress()),
+ handler_size});
+ native_module_->compiled_module()->handler_table()->set(
+ static_cast<int>(index_), *handler_table);
+ }
+ if (source_position_size > 0) {
+ Handle<ByteArray> source_positions = isolate_->factory()->NewByteArray(
+ static_cast<int>(source_position_size), TENURED);
+ reader.ReadIntoVector(
+ {source_positions->GetDataStartAddress(), source_position_size});
+ native_module_->compiled_module()->source_positions()->set(
+ static_cast<int>(index_), *source_positions);
+ }
+ if (protected_instructions_size > 0) {
+ reader.ReadIntoVector(
+ {reinterpret_cast<byte*>(protected_instructions->data()),
+ sizeof(trap_handler::ProtectedInstructionData) *
+ protected_instructions->size()});
+ }
+ unread_ = unread_ + (start_size - reader.current_buffer().size());
+ return true;
+}
+
+Address NativeModuleDeserializer::GetTrampolineOrStubFromTag(uint32_t tag) {
+ if ((tag & 0x0000ffff) == 0) {
+ int builtin_id = static_cast<int>(tag >> 16);
+ v8::internal::Code* builtin = isolate_->builtins()->builtin(builtin_id);
+ return native_module_->GetLocalAddressFor(handle(builtin));
+ } else {
+ DCHECK_EQ(tag & 0xffff0000, 0);
+ return stubs_[tag];
+ }
+}
+
+MaybeHandle<WasmCompiledModule> NativeModuleDeserializer::DeserializeFullBuffer(
+ Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes) {
+ if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
+ return {};
+ }
+ if (!WasmSerializedFormatVersion::IsSupportedVersion(isolate, data)) {
+ return {};
+ }
+ data = data + WasmSerializedFormatVersion::GetVersionSize();
+ ModuleResult decode_result =
+ SyncDecodeWasmModule(isolate, wire_bytes.start(), wire_bytes.end(), false,
+ i::wasm::kWasmOrigin);
+ if (!decode_result.ok()) return {};
+ CHECK_NOT_NULL(decode_result.val);
+ Handle<String> module_bytes =
+ isolate->factory()
+ ->NewStringFromOneByte(
+ {wire_bytes.start(), static_cast<size_t>(wire_bytes.length())},
+ TENURED)
+ .ToHandleChecked();
+ DCHECK(module_bytes->IsSeqOneByteString());
+ // The {module_wrapper} will take ownership of the {WasmModule} object,
+ // and it will be destroyed when the GC reclaims the wrapper object.
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::From(isolate, decode_result.val.release());
+ Handle<Script> script = CreateWasmScript(isolate, wire_bytes);
+ Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
+ isolate, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
+ script, Handle<ByteArray>::null());
+ int export_wrappers_size =
+ static_cast<int>(shared->module()->num_exported_functions);
+ Handle<FixedArray> export_wrappers = isolate->factory()->NewFixedArray(
+ static_cast<int>(export_wrappers_size), TENURED);
+
+ Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
+ isolate, shared->module(), isolate->factory()->NewFixedArray(0, TENURED),
+ export_wrappers, {}, {});
+ compiled_module->OnWasmModuleDecodingComplete(shared);
+ NativeModuleDeserializer deserializer(isolate,
+ compiled_module->GetNativeModule());
+ if (!deserializer.Read(data)) return {};
+
+ CompileJsToWasmWrappers(isolate, compiled_module, isolate->counters());
+ WasmCompiledModule::ReinitializeAfterDeserialization(isolate,
+ compiled_module);
+ return compiled_module;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
new file mode 100644
index 0000000000..40025c23cf
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -0,0 +1,96 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_SERIALIZATION_H_
+#define V8_WASM_SERIALIZATION_H_
+
+#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmSerializedFormatVersion {
+ public:
+ static size_t GetVersionSize();
+ static bool WriteVersion(Isolate* isolate, Vector<byte>);
+ static bool IsSupportedVersion(Isolate* isolate, const Vector<const byte>);
+
+ private:
+ static constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
+};
+
+enum SerializationSection { Init, Metadata, Stubs, CodeSection, Done };
+
+class V8_EXPORT_PRIVATE NativeModuleSerializer {
+ public:
+ explicit NativeModuleSerializer(Isolate*, const NativeModule*);
+ size_t Measure() const;
+ size_t Write(Vector<byte>);
+ bool IsDone() const { return state_ == Done; }
+ static std::pair<std::unique_ptr<byte[]>, size_t> SerializeWholeModule(
+ Isolate*, Handle<WasmCompiledModule>);
+
+ private:
+ size_t MeasureHeader() const;
+ static size_t GetCodeHeaderSize();
+ size_t MeasureCode(const WasmCode*) const;
+ size_t MeasureCopiedStubs() const;
+ FixedArray* GetHandlerTable(const WasmCode*) const;
+ ByteArray* GetSourcePositions(const WasmCode*) const;
+
+ void BufferHeader();
+ // we buffer all the stubs because they are small
+ void BufferCopiedStubs();
+ void BufferCodeInAllocatedScratch(const WasmCode*);
+ void BufferCurrentWasmCode();
+ size_t DrainBuffer(Vector<byte> dest);
+ uint32_t EncodeBuiltinOrStub(Address);
+
+ Isolate* const isolate_ = nullptr;
+ const NativeModule* const native_module_ = nullptr;
+ SerializationSection state_ = Init;
+ uint32_t index_ = 0;
+ std::vector<byte> scratch_;
+ Vector<byte> remaining_;
+ // wasm and copied stubs reverse lookup
+ std::map<Address, uint32_t> wasm_targets_lookup_;
+ // immovable builtins and runtime entries lookup
+ std::map<Address, uint32_t> reference_table_lookup_;
+ std::map<Address, uint32_t> stub_lookup_;
+ std::map<Address, uint32_t> builtin_lookup_;
+};
+
+class V8_EXPORT_PRIVATE NativeModuleDeserializer {
+ public:
+ explicit NativeModuleDeserializer(Isolate*, NativeModule*);
+ // Currently, we don't support streamed reading, yet albeit the
+ // API suggests that.
+ bool Read(Vector<const byte>);
+ static MaybeHandle<WasmCompiledModule> DeserializeFullBuffer(
+ Isolate*, Vector<const byte> data, Vector<const byte> wire_bytes);
+
+ private:
+ void ExpectHeader();
+ void Expect(size_t size);
+ bool ReadHeader();
+ bool ReadCode();
+ bool ReadStubs();
+ Address GetTrampolineOrStubFromTag(uint32_t);
+
+ Isolate* const isolate_ = nullptr;
+ NativeModule* const native_module_ = nullptr;
+ std::vector<byte> scratch_;
+ std::vector<Address> stubs_;
+ Vector<const byte> unread_;
+ size_t current_expectation_ = 0;
+ uint32_t index_ = 0;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index e1fea08d31..81c8e41813 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -101,17 +101,19 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeOperand<false> operand(&i, i.pc());
+ BlockTypeOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode);
- for (unsigned i = 0; i < operand.arity; i++) {
- os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
+ if (operand.type == kWasmVar) {
+ os << " (type " << operand.sig_index << ")";
+ } else if (operand.out_arity() > 0) {
+ os << " " << WasmOpcodes::TypeName(operand.out_type(0));
}
control_depth++;
break;
}
case kExprBr:
case kExprBrIf: {
- BreakDepthOperand<false> operand(&i, i.pc());
+ BreakDepthOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.depth;
break;
}
@@ -123,45 +125,45 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << "end";
break;
case kExprBrTable: {
- BranchTableOperand<false> operand(&i, i.pc());
- BranchTableIterator<false> iterator(&i, operand);
+ BranchTableOperand<Decoder::kNoValidate> operand(&i, i.pc());
+ BranchTableIterator<Decoder::kNoValidate> iterator(&i, operand);
os << "br_table";
while (iterator.has_next()) os << ' ' << iterator.next();
break;
}
case kExprCallIndirect: {
- CallIndirectOperand<false> operand(&i, i.pc());
+ CallIndirectOperand<Decoder::kNoValidate> operand(&i, i.pc());
DCHECK_EQ(0, operand.table_index);
os << "call_indirect " << operand.index;
break;
}
case kExprCallFunction: {
- CallFunctionOperand<false> operand(&i, i.pc());
+ CallFunctionOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << "call " << operand.index;
break;
}
case kExprGetLocal:
case kExprSetLocal:
case kExprTeeLocal: {
- LocalIndexOperand<false> operand(&i, i.pc());
+ LocalIndexOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
break;
}
case kExprThrow:
case kExprCatch: {
- ExceptionIndexOperand<false> operand(&i, i.pc());
+ ExceptionIndexOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
break;
}
case kExprGetGlobal:
case kExprSetGlobal: {
- GlobalIndexOperand<false> operand(&i, i.pc());
+ GlobalIndexOperand<Decoder::kNoValidate> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
break;
}
#define CASE_CONST(type, str, cast_type) \
case kExpr##type##Const: { \
- Imm##type##Operand<false> operand(&i, i.pc()); \
+ Imm##type##Operand<Decoder::kNoValidate> operand(&i, i.pc()); \
os << #str ".const " << static_cast<cast_type>(operand.value); \
break; \
}
@@ -174,7 +176,8 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
FOREACH_STORE_MEM_OPCODE(CASE_OPCODE) {
- MemoryAccessOperand<false> operand(&i, i.pc(), kMaxUInt32);
+ MemoryAccessOperand<Decoder::kNoValidate> operand(&i, i.pc(),
+ kMaxUInt32);
os << WasmOpcodes::OpcodeName(opcode) << " offset=" << operand.offset
<< " align=" << (1ULL << operand.alignment);
break;
@@ -194,7 +197,8 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
WasmOpcode atomic_opcode = i.prefixed_opcode();
switch (atomic_opcode) {
FOREACH_ATOMIC_OPCODE(CASE_OPCODE) {
- MemoryAccessOperand<false> operand(&i, i.pc(), kMaxUInt32);
+ MemoryAccessOperand<Decoder::kNoValidate> operand(&i, i.pc(),
+ kMaxUInt32);
os << WasmOpcodes::OpcodeName(atomic_opcode)
<< " offset=" << operand.offset
<< " align=" << (1ULL << operand.alignment);
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 8e86c4824a..a30657aee0 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_VALUE_H_
#define V8_WASM_VALUE_H_
+#include "src/boxed-float.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone-containers.h"
@@ -12,71 +13,71 @@ namespace v8 {
namespace internal {
namespace wasm {
-// Macro for defining WasmValue union members.
-#define FOREACH_WASMVAL_UNION_MEMBER(V) \
- V(i32, kWasmI32, int32_t) \
- V(u32, kWasmI32, uint32_t) \
- V(i64, kWasmI64, int64_t) \
- V(u64, kWasmI64, uint64_t) \
- V(f32, kWasmF32, float) \
- V(f64, kWasmF64, double)
+// Macro for defining WasmValue methods for different types.
+// Elements:
+// - name (for to_<name>() method)
+// - wasm type
+// - c type
+// - how to get bit pattern from value {v} of type {c type}
+// - how to get value of type {c type} from bit pattern {p}
+#define FOREACH_WASMVAL_TYPE(V) \
+ V(i32, kWasmI32, int32_t, static_cast<uint32_t>(v), static_cast<int32_t>(p)) \
+ V(u32, kWasmI32, uint32_t, v, static_cast<uint32_t>(p)) \
+ V(i64, kWasmI64, int64_t, static_cast<uint64_t>(v), static_cast<int64_t>(p)) \
+ V(u64, kWasmI64, uint64_t, v, p) \
+ V(f32, kWasmF32, float, bit_cast<uint32_t>(v), \
+ bit_cast<float>(static_cast<uint32_t>(p))) \
+ V(f32_boxed, kWasmF32, Float32, v.get_bits(), \
+ Float32::FromBits(static_cast<uint32_t>(p))) \
+ V(f64, kWasmF64, double, bit_cast<uint64_t>(v), bit_cast<double>(p)) \
+ V(f64_boxed, kWasmF64, Float64, v.get_bits(), Float64::FromBits(p))
// A wasm value with type information.
class WasmValue {
public:
WasmValue() : type_(kWasmStmt) {}
-#define DEFINE_TYPE_SPECIFIC_METHODS(field, localtype, ctype) \
- explicit WasmValue(ctype v) : type_(localtype) { value_.field = v; } \
- ctype to_##field() const { \
- DCHECK_EQ(localtype, type_); \
- return value_.field; \
+#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype, v_to_p, p_to_v) \
+ explicit WasmValue(ctype v) : type_(localtype), bit_pattern_(v_to_p) {} \
+ ctype to_##name() const { \
+ DCHECK_EQ(localtype, type_); \
+ return to_##name##_unchecked(); \
+ } \
+ ctype to_##name##_unchecked() const { \
+ auto p = bit_pattern_; \
+ return p_to_v; \
}
- FOREACH_WASMVAL_UNION_MEMBER(DEFINE_TYPE_SPECIFIC_METHODS)
+ FOREACH_WASMVAL_TYPE(DEFINE_TYPE_SPECIFIC_METHODS)
#undef DEFINE_TYPE_SPECIFIC_METHODS
ValueType type() const { return type_; }
+ // Checks equality of type and bit pattern (also for float and double values).
bool operator==(const WasmValue& other) const {
- if (type_ != other.type_) return false;
-#define CHECK_VALUE_EQ(field, localtype, ctype) \
- if (type_ == localtype) { \
- return value_.field == other.value_.field; \
- }
- FOREACH_WASMVAL_UNION_MEMBER(CHECK_VALUE_EQ)
-#undef CHECK_VALUE_EQ
- UNREACHABLE();
+ return type_ == other.type_ && bit_pattern_ == other.bit_pattern_;
}
template <typename T>
- inline T to() const {
- static_assert(sizeof(T) == -1, "Do only use this method with valid types");
- }
+ inline T to() const;
template <typename T>
- inline T to_unchecked() const {
- static_assert(sizeof(T) == -1, "Do only use this method with valid types");
- }
+ inline T to_unchecked() const;
private:
ValueType type_;
- union {
-#define DECLARE_FIELD(field, localtype, ctype) ctype field;
- FOREACH_WASMVAL_UNION_MEMBER(DECLARE_FIELD)
-#undef DECLARE_FIELD
- } value_;
+ uint64_t bit_pattern_;
};
-#define DECLARE_CAST(field, localtype, ctype) \
- template <> \
- inline ctype WasmValue::to_unchecked() const { \
- return value_.field; \
- } \
- template <> \
- inline ctype WasmValue::to() const { \
- return to_##field(); \
+#define DECLARE_CAST(name, localtype, ctype, ...) \
+ template <> \
+ inline ctype WasmValue::to_unchecked() const { \
+ return to_##name##_unchecked(); \
+ } \
+ template <> \
+ inline ctype WasmValue::to() const { \
+ return to_##name(); \
}
-FOREACH_WASMVAL_UNION_MEMBER(DECLARE_CAST)
+FOREACH_WASMVAL_TYPE(DECLARE_CAST)
#undef DECLARE_CAST
} // namespace wasm
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 27ffff5375..8b12b0867e 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -280,14 +280,14 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -329,14 +329,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
@@ -394,7 +393,7 @@ void RelocInfo::set_target_object(HeapObject* target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(target->GetIsolate(), pc_, sizeof(Address));
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -419,7 +418,7 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
void RelocInfo::WipeOut(Isolate* isolate) {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(isolate, pc_, host_,
@@ -458,7 +457,7 @@ void Operand::set_modrm(int mod, Register rm_reg) {
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- DCHECK(len_ == 1);
+ DCHECK_EQ(len_, 1);
DCHECK(is_uint2(scale));
// Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index d246e65f62..5f62e2af66 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -143,6 +143,17 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
}
}
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ set_embedded_address(isolate, address, icache_flush_mode);
+}
+
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return embedded_address();
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -205,10 +216,10 @@ Operand::Operand(Label* label) : rex_(0), len_(1) {
Operand::Operand(const Operand& operand, int32_t offset) {
- DCHECK(operand.len_ >= 1);
+ DCHECK_GE(operand.len_, 1);
// Operand encodes REX ModR/M [SIB] [Disp].
byte modrm = operand.buf_[0];
- DCHECK(modrm < 0xC0); // Disallow mode 3 (register target).
+ DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
bool has_sib = ((modrm & 0x07) == 0x04);
byte mode = modrm & 0xC0;
int disp_offset = has_sib ? 2 : 1;
@@ -253,7 +264,7 @@ Operand::Operand(const Operand& operand, int32_t offset) {
bool Operand::AddressUsesRegister(Register reg) const {
int code = reg.code();
- DCHECK((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
+ DCHECK_NE(buf_[0] & 0xC0, 0xC0); // Always a memory operand.
// Start with only low three bits of base register. Initial decoding doesn't
// distinguish on the REX.B bit.
int base_code = buf_[0] & 0x07;
@@ -325,7 +336,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
- DCHECK(desc->instr_size > 0); // Zero-size code objects upset the system.
+ DCHECK_GT(desc->instr_size, 0); // Zero-size code objects upset the system.
desc->reloc_size =
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
@@ -415,7 +426,7 @@ void Assembler::bind_to(Label* L, int pos) {
int fixup_pos = L->near_link_pos();
int offset_to_next =
static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
- DCHECK(offset_to_next <= 0);
+ DCHECK_LE(offset_to_next, 0);
int disp = pos - (fixup_pos + sizeof(int8_t));
CHECK(is_int8(disp));
set_byte_at(fixup_pos, disp);
@@ -520,10 +531,10 @@ void Assembler::GrowBuffer() {
void Assembler::emit_operand(int code, const Operand& adr) {
DCHECK(is_uint3(code));
const unsigned length = adr.len_;
- DCHECK(length > 0);
+ DCHECK_GT(length, 0);
// Emit updated ModR/M byte containing the given register.
- DCHECK((adr.buf_[0] & 0x38) == 0);
+ DCHECK_EQ(adr.buf_[0] & 0x38, 0);
*pc_++ = adr.buf_[0] | code << 3;
// Recognize RIP relative addressing.
@@ -568,7 +579,7 @@ void Assembler::arithmetic_op(byte opcode,
Register rm_reg,
int size) {
EnsureSpace ensure_space(this);
- DCHECK((opcode & 0xC6) == 2);
+ DCHECK_EQ(opcode & 0xC6, 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
emit_rex(rm_reg, reg, size);
@@ -584,7 +595,7 @@ void Assembler::arithmetic_op(byte opcode,
void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
- DCHECK((opcode & 0xC6) == 2);
+ DCHECK_EQ(opcode & 0xC6, 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
emit(0x66);
@@ -625,7 +636,7 @@ void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) {
void Assembler::arithmetic_op_8(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
- DCHECK((opcode & 0xC6) == 2);
+ DCHECK_EQ(opcode & 0xC6, 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
if (!rm_reg.is_byte_register() || !reg.is_byte_register()) {
@@ -916,7 +927,7 @@ void Assembler::call(Label* L) {
emit(0xE8);
if (L->is_bound()) {
int offset = L->pos() - pc_offset() - sizeof(int32_t);
- DCHECK(offset <= 0);
+ DCHECK_LE(offset, 0);
emitl(offset);
} else if (L->is_linked()) {
emitl(L->pos());
@@ -953,6 +964,23 @@ void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
emit_code_target(target, rmode);
}
+void Assembler::near_call(Address addr, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ emit(0xE8);
+ intptr_t value = reinterpret_cast<intptr_t>(addr);
+ DCHECK(is_int32(value));
+ RecordRelocInfo(rmode);
+ emitl(static_cast<int32_t>(value));
+}
+
+void Assembler::near_jmp(Address addr, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ emit(0xE9);
+ intptr_t value = reinterpret_cast<intptr_t>(addr);
+ DCHECK(is_int32(value));
+ RecordRelocInfo(rmode);
+ emitl(static_cast<int32_t>(value));
+}
void Assembler::call(Register adr) {
EnsureSpace ensure_space(this);
@@ -1012,7 +1040,7 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
}
// No need to check CpuInfo for CMOV support, it's a required part of the
// 64-bit architecture.
- DCHECK(cc >= 0); // Use mov for unconditional moves.
+ DCHECK_GE(cc, 0); // Use mov for unconditional moves.
EnsureSpace ensure_space(this);
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
@@ -1028,7 +1056,7 @@ void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
} else if (cc == never) {
return;
}
- DCHECK(cc >= 0);
+ DCHECK_GE(cc, 0);
EnsureSpace ensure_space(this);
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
@@ -1044,7 +1072,7 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
} else if (cc == never) {
return;
}
- DCHECK(cc >= 0);
+ DCHECK_GE(cc, 0);
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
@@ -1060,7 +1088,7 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
} else if (cc == never) {
return;
}
- DCHECK(cc >= 0);
+ DCHECK_GE(cc, 0);
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
@@ -1291,7 +1319,7 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
const int short_size = 2;
const int long_size = 6;
int offs = L->pos() - pc_offset();
- DCHECK(offs <= 0);
+ DCHECK_LE(offs, 0);
// Determine whether we can use 1-byte offsets for backwards branches,
// which have a max range of 128 bytes.
@@ -1382,7 +1410,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
const int long_size = sizeof(int32_t);
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
- DCHECK(offs <= 0);
+ DCHECK_LE(offs, 0);
if (is_int8(offs - short_size) && !predictable_code_size()) {
// 1110 1011 #8-bit disp.
emit(0xEB);
@@ -1473,7 +1501,7 @@ void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
emit(0xA1);
emitp(value, mode);
} else {
- DCHECK(kPointerSize == kInt32Size);
+ DCHECK_EQ(kPointerSize, kInt32Size);
emit(0xA1);
emitp(value, mode);
// In 64-bit mode, need to zero extend the operand to 8 bytes.
@@ -1607,7 +1635,7 @@ void Assembler::emit_mov(Register dst, Immediate value, int size) {
emit(0xC7);
emit_modrm(0x0, dst);
} else {
- DCHECK(size == kInt32Size);
+ DCHECK_EQ(size, kInt32Size);
emit(0xB8 + dst.low_bits());
}
emit(value);
@@ -1661,7 +1689,7 @@ void Assembler::movl(const Operand& dst, Label* src) {
emit_operand(0, dst);
if (src->is_bound()) {
int offset = src->pos() - pc_offset() - sizeof(int32_t);
- DCHECK(offset <= 0);
+ DCHECK_LE(offset, 0);
emitl(offset);
} else if (src->is_linked()) {
emitl(src->pos());
@@ -2143,7 +2171,7 @@ void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
emit(0xA3);
emitp(dst, mode);
} else {
- DCHECK(kPointerSize == kInt32Size);
+ DCHECK_EQ(kPointerSize, kInt32Size);
emit(0xA3);
emitp(dst, mode);
// In 64-bit mode, need to zero extend the operand to 8 bytes.
@@ -4804,14 +4832,6 @@ void Assembler::emit_sse_operand(XMMRegister dst) {
emit(0xD8 | dst.low_bits());
}
-void Assembler::RecordProtectedInstructionLanding(int pc_offset) {
- EnsureSpace ensure_space(this);
- RelocInfo rinfo(pc(), RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING,
- pc_offset, nullptr);
- reloc_info_writer.Write(&rinfo);
-}
-
-
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
emit(data);
@@ -4860,13 +4880,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(pc_, rmode, data, nullptr);
reloc_info_writer.Write(&rinfo);
}
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::INTERNAL_REFERENCE;
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::WASM_CALL;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index c3720784a0..e5711101bd 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -84,19 +84,6 @@ namespace internal {
// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
constexpr int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
-const int kNumRegs = 16;
-const RegList kJSCallerSaved =
- 1 << 0 | // rax
- 1 << 1 | // rcx
- 1 << 2 | // rdx
- 1 << 3 | // rbx - used as a caller-saved register in JavaScript code
- 1 << 7; // rdi - callee function
-
-const int kNumJSCallerSaved = 5;
-
-// Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 16;
-
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
@@ -129,6 +116,19 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr Register no_reg = Register::no_reg();
+constexpr int kNumRegs = 16;
+
+constexpr RegList kJSCallerSaved =
+ Register::ListOf<rax, rcx, rdx,
+ rbx, // used as a caller-saved register in JavaScript code
+ rdi // callee function
+ >();
+
+constexpr int kNumJSCallerSaved = 5;
+
+// Number of registers for which space is reserved in safepoints.
+constexpr int kNumSafepointRegisters = 16;
+
#ifdef _WIN64
// Windows calling convention
constexpr Register arg_reg_1 = rcx;
@@ -446,14 +446,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -500,7 +501,7 @@ class Assembler : public AssemblerBase {
if (kPointerSize == kInt64Size) {
return RelocInfo::NONE64;
} else {
- DCHECK(kPointerSize == kInt32Size);
+ DCHECK_EQ(kPointerSize, kInt32Size);
return RelocInfo::NONE32;
}
}
@@ -884,6 +885,8 @@ class Assembler : public AssemblerBase {
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
void call(Address entry, RelocInfo::Mode rmode);
+ void near_call(Address entry, RelocInfo::Mode rmode);
+ void near_jmp(Address entry, RelocInfo::Mode rmode);
void call(CodeStub* stub);
void call(Handle<Code> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
@@ -1923,8 +1926,6 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
- void RecordProtectedInstructionLanding(int pc_offset);
-
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -2068,7 +2069,7 @@ class Assembler : public AssemblerBase {
if (size == kInt64Size) {
emit_rex_64();
} else {
- DCHECK(size == kInt32Size);
+ DCHECK_EQ(size, kInt32Size);
}
}
@@ -2077,7 +2078,7 @@ class Assembler : public AssemblerBase {
if (size == kInt64Size) {
emit_rex_64(p1);
} else {
- DCHECK(size == kInt32Size);
+ DCHECK_EQ(size, kInt32Size);
emit_optional_rex_32(p1);
}
}
@@ -2087,7 +2088,7 @@ class Assembler : public AssemblerBase {
if (size == kInt64Size) {
emit_rex_64(p1, p2);
} else {
- DCHECK(size == kInt32Size);
+ DCHECK_EQ(size, kInt32Size);
emit_optional_rex_32(p1, p2);
}
}
@@ -2411,7 +2412,6 @@ class Assembler : public AssemblerBase {
bool is_optimizable_farjmp(int idx);
- friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index c24f54a0db..60d04fcbe6 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -7,13 +7,11 @@
#include "src/api-arguments.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/counters.h"
#include "src/double.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -23,8 +21,6 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
-#include "src/x64/code-stubs-x64.h" // Cannot be the first include.
-
namespace v8 {
namespace internal {
@@ -41,58 +37,23 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
}
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- __ PushCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
- const int argument_count = 1;
- __ PrepareCallCFunction(argument_count);
- __ LoadAddress(arg_reg_1,
- ExternalReference::isolate_address(isolate()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- __ PopCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
- __ ret(0);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum ConvertUndefined {
- CONVERT_UNDEFINED_TO_ZERO,
- BAILOUT_ON_UNDEFINED
- };
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-};
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
- Register input_reg = this->source();
Register final_result_reg = this->destination();
- DCHECK(is_truncating());
Label check_negative, process_64_bits, done;
- int double_offset = offset();
-
- // Account for return address and saved regs if input is rsp.
- if (input_reg == rsp) double_offset += 3 * kRegisterSize;
+ // Account for return address and saved regs.
+ const int kArgumentOffset = 3 * kRegisterSize;
- MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
- MemOperand exponent_operand(MemOperand(input_reg,
- double_offset + kDoubleSize / 2));
+ MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
+ MemOperand exponent_operand(
+ MemOperand(rsp, kArgumentOffset + kDoubleSize / 2));
Register scratch1 = no_reg;
Register scratch_candidates[3] = { rbx, rdx, rdi };
for (int i = 0; i < 3; i++) {
scratch1 = scratch_candidates[i];
- if (final_result_reg != scratch1 && input_reg != scratch1) break;
+ if (final_result_reg != scratch1) break;
}
// Since we must use rcx for shifts below, use some other register (rax)
@@ -105,11 +66,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ pushq(scratch1);
__ pushq(save_reg);
- bool stash_exponent_copy = input_reg != rsp;
__ movl(scratch1, mantissa_operand);
__ Movsd(kScratchDoubleReg, mantissa_operand);
__ movl(rcx, exponent_operand);
- if (stash_exponent_copy) __ pushq(rcx);
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
__ shrl(rcx, Immediate(HeapNumber::kExponentShift));
@@ -134,18 +93,11 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ bind(&check_negative);
__ movl(result_reg, scratch1);
__ negl(result_reg);
- if (stash_exponent_copy) {
- __ cmpl(MemOperand(rsp, 0), Immediate(0));
- } else {
- __ cmpl(exponent_operand, Immediate(0));
- }
+ __ cmpl(exponent_operand, Immediate(0));
__ cmovl(greater, result_reg, scratch1);
// Restore registers
__ bind(&done);
- if (stash_exponent_copy) {
- __ addp(rsp, Immediate(kDoubleSize));
- }
if (final_result_reg != result_reg) {
DCHECK(final_result_reg == rcx);
__ movl(final_result_reg, result_reg);
@@ -155,37 +107,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
-
- __ bind(&load_nonsmi_rax);
- __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ Movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ Cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ Cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == rdx);
@@ -330,15 +251,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
+Movability CEntryStub::NeedsImmovableCode() { return kMovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@@ -477,10 +393,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -498,7 +412,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ PrepareCallCFunction(3);
__ CallCFunction(find_handler, 3);
}
-
// Retrieve the handler context, SP and FP.
__ movp(rsi, masm->ExternalOperand(pending_handler_context_address));
__ movp(rsp, masm->ExternalOperand(pending_handler_sp_address));
@@ -513,9 +426,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&skip);
// Compute the handler entry address and jump to it.
- __ movp(rdi, masm->ExternalOperand(pending_handler_code_address));
- __ movp(rdx, masm->ExternalOperand(pending_handler_offset_address));
- __ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ __ movp(rdi, masm->ExternalOperand(pending_handler_entrypoint_address));
__ jmp(rdi);
}
@@ -669,508 +580,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label check_zero_length;
- __ movp(length, FieldOperand(left, String::kLengthOffset));
- __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
- __ j(equal, &check_zero_length, Label::kNear);
- __ Move(rax, Smi::FromInt(NOT_EQUAL));
- __ ret(0);
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTest(length);
- __ j(not_zero, &compare_chars, Label::kNear);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- // Compare characters.
- __ bind(&compare_chars);
- Label strings_not_equal;
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
-
- // Characters are equal.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- // Characters are not equal.
- __ bind(&strings_not_equal);
- __ Move(rax, Smi::FromInt(NOT_EQUAL));
- __ ret(0);
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4) {
- // Ensure that you can always subtract a string length from a non-negative
- // number (e.g. another length).
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
-
- // Find minimum length and length difference.
- __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movp(scratch4, scratch1);
- __ SmiSub(scratch4,
- scratch4,
- FieldOperand(right, String::kLengthOffset));
- // Register scratch4 now holds left.length - right.length.
- const Register length_difference = scratch4;
- Label left_shorter;
- __ j(less, &left_shorter, Label::kNear);
- // The right string isn't longer that the left one.
- // Get the right string's length by subtracting the (non-negative) difference
- // from the left string's length.
- __ SmiSub(scratch1, scratch1, length_difference);
- __ bind(&left_shorter);
- // Register scratch1 now holds Min(left.length, right.length).
- const Register min_length = scratch1;
-
- Label compare_lengths;
- // If min-length is zero, go directly to comparing lengths.
- __ SmiTest(min_length);
- __ j(zero, &compare_lengths, Label::kNear);
-
- // Compare loop.
- Label result_not_equal;
- GenerateOneByteCharsCompareLoop(
- masm, left, right, min_length, scratch2, &result_not_equal,
- // In debug-code mode, SmiTest below might push
- // the target label outside the near range.
- Label::kFar);
-
- // Completed loop without finding different characters.
- // Compare lengths (precomputed).
- __ bind(&compare_lengths);
- __ SmiTest(length_difference);
- Label length_not_equal;
- __ j(not_zero, &length_not_equal, Label::kNear);
-
- // Result is EQUAL.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- Label result_greater;
- Label result_less;
- __ bind(&length_not_equal);
- __ j(greater, &result_greater, Label::kNear);
- __ jmp(&result_less, Label::kNear);
- __ bind(&result_not_equal);
- // Unequal comparison of left to right, either character or length.
- __ j(above, &result_greater, Label::kNear);
- __ bind(&result_less);
-
- // Result is LESS.
- __ Move(rax, Smi::FromInt(LESS));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Move(rax, Smi::FromInt(GREATER));
- __ ret(0);
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch, Label* chars_not_equal, Label::Distance near_jump) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiToInteger32(length, length);
- __ leap(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ leap(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ negq(length);
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ movb(scratch, Operand(left, index, times_1, 0));
- __ cmpb(scratch, Operand(right, index, times_1, 0));
- __ j(not_equal, chars_not_equal, near_jump);
- __ incq(index);
- __ j(not_zero, &loop);
-}
-
-
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<Name> name,
- Register r0) {
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
- __ decl(index);
- __ andp(index,
- Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ movp(entity_name, Operand(properties,
- index,
- times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ Cmp(entity_name, name);
- __ j(equal, miss);
-
- Label good;
- // Check for the hole and skip.
- __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
- __ j(equal, &good, Label::kNear);
-
- // Check if the entry name is not a unique name.
- __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ JumpIfNotUniqueNameInstanceType(
- FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
- __ bind(&good);
- }
-
- NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
- NEGATIVE_LOOKUP);
- __ Push(name);
- __ Push(Immediate(name->Hash()));
- __ CallStub(&stub);
- __ testp(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Stack frame on entry:
- // rsp[0 * kPointerSize] : return address.
- // rsp[1 * kPointerSize] : key's hash.
- // rsp[2 * kPointerSize] : key.
- // Registers:
- // dictionary_: NameDictionary to probe.
- // result_: used as scratch.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- Register scratch = result();
-
- __ SmiToInteger32(scratch, FieldOperand(dictionary(), kCapacityOffset));
- __ decl(scratch);
- __ Push(scratch);
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
- kPointerSize);
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movp(scratch, args.GetArgumentOperand(1));
- if (i > 0) {
- __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
- }
- __ andp(scratch, Operand(rsp, 0));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ leap(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
-
- // Having undefined at this place means the name is not contained.
- __ movp(scratch, Operand(dictionary(), index(), times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
-
- __ Cmp(scratch, isolate()->factory()->undefined_value());
- __ j(equal, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmpp(scratch, args.GetArgumentOperand(0));
- __ j(equal, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // If we hit a key that is not a unique name during negative
- // lookup we have to bailout as this key might be equal to the
- // key we are looking for.
-
- // Check if the entry name is not a unique name.
- __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ JumpIfNotUniqueNameInstanceType(
- FieldOperand(scratch, Map::kInstanceTypeOffset),
- &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ movp(scratch, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
- }
-
- __ bind(&in_dictionary);
- __ movp(scratch, Immediate(1));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_in_dictionary);
- __ movp(scratch, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- DCHECK(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(second_instruction == kTwoByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
-}
-
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kTwoByteNopInstruction;
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kTwoByteJumpInstruction;
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
-}
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental;
- Label second_instr;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ jmp(&skip_to_incremental, Label::kNear);
- __ bind(&second_instr);
- __ jmp(&skip_to_incremental, Label::kNear);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental);
-
- GenerateIncremental(masm, &second_instr);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kTwoByteNopInstruction);
-}
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm,
- Label* second_instr) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(),
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
- second_instr);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, second_instr);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- Register address =
- arg_reg_1 == regs_.address() ? kScratchRegister : regs_.address();
- DCHECK(address != regs_.object());
- DCHECK(address != arg_reg_1);
- __ Move(address, regs_.address());
- __ Move(arg_reg_1, regs_.object());
- // TODO(gc) Can we just set address arg2 in the beginning?
- __ Move(arg_reg_2, address);
- __ LoadAddress(arg_reg_3,
- ExternalReference::isolate_address(isolate()));
- int argument_count = 3;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
- Label* second_instr) {
- Label need_incremental;
- Label need_incremental_pop_object;
-
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &on_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ ret(0);
- }
-
- __ bind(&on_black);
-#endif
-
- // Get the value from the slot.
- __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
-
- Label ensure_not_white;
- // If second instruction is TwoByteNopInstruction, we're in noncompacting
- // mode.
- __ cmpb(Operand(second_instr), Immediate(kTwoByteNopInstruction));
- __ j(equal, &ensure_not_white, Label::kNear);
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask, zero,
- &ensure_not_white, Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask, zero,
- &need_incremental);
-
- __ bind(&ensure_not_white);
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ Push(regs_.object());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object, Label::kNear);
- __ Pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ Pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
masm->CallStub(&stub);
}
@@ -1382,7 +793,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
__ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
@@ -1481,7 +892,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
__ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
@@ -1524,11 +935,11 @@ static int Offset(ExternalReference ref0, ExternalReference ref1) {
return static_cast<int>(offset);
}
-// Prepares stack to put arguments (aligns and so on). WIN64 calling
-// convention requires to put the pointer to the return value slot into
-// rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
-// context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
-// inside the exit frame (not GCed) accessible via StackSpaceOperand.
+// Prepares stack to put arguments (aligns and so on). WIN64 calling convention
+// requires to put the pointer to the return value slot into rcx (rcx must be
+// preserverd until CallApiFunctionAndReturn). Clobbers rax. Allocates
+// arg_stack_space * kPointerSize inside the exit frame (not GCed) accessible
+// via StackSpaceOperand.
static void PrepareCallApiFunction(MacroAssembler* masm, int arg_stack_space) {
__ EnterApiExitFrame(arg_stack_space);
}
@@ -1543,8 +954,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference thunk_ref,
Register thunk_last_arg, int stack_space,
Operand* stack_space_operand,
- Operand return_value_operand,
- Operand* context_restore_operand) {
+ Operand return_value_operand) {
Label prologue;
Label promote_scheduled_exception;
Label delete_allocated_handles;
@@ -1626,14 +1036,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Leave the API exit frame.
__ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ movp(rsi, *context_restore_operand);
- }
if (stack_space_operand != nullptr) {
__ movp(rbx, *stack_space_operand);
}
- __ LeaveApiExitFrame(!restore_context);
+ __ LeaveApiExitFrame();
// Check if the function scheduled an exception.
__ Move(rdi, scheduled_exception_address);
@@ -1702,7 +1108,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rdi : callee
// -- rbx : call_data
// -- rcx : holder
// -- rdx : api_function_address
@@ -1713,22 +1118,17 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- rsp[argc * 8] : first argument
// -- rsp[(argc + 1) * 8] : receiver
- // -- rsp[(argc + 2) * 8] : accessor_holder
// -----------------------------------
- Register callee = rdi;
Register call_data = rbx;
Register holder = rcx;
Register api_function_address = rdx;
- Register context = rsi;
Register return_address = r8;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1740,12 +1140,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
- // context save
- __ Push(context);
-
- // callee
- __ Push(callee);
-
// call data
__ Push(call_data);
@@ -1760,38 +1154,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// holder
__ Push(holder);
- // enter a new context
int argc = this->argc();
- if (this->is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- rsp[0] : holder
- // -- ...
- // -- rsp[(FCA::kArgsLength - 1) * 8] : new_target
- // -- rsp[FCA::kArgsLength * 8] : last argument
- // -- ...
- // -- rsp[(FCA::kArgsLength + argc - 1) * 8] : first argument
- // -- rsp[(FCA::kArgsLength + argc) * 8] : receiver
- // -- rsp[(FCA::kArgsLength + argc + 1) * 8] : accessor_holder
- // -----------------------------------------------------------
-
- // load context from accessor_holder
- Register accessor_holder = context;
- Register scratch2 = callee;
- __ movp(accessor_holder,
- MemOperand(rsp, (argc + FCA::kArgsLength + 1) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ movp(scratch, FieldOperand(accessor_holder, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(not_zero, &skip_looking_for_constructor, Label::kNear);
- __ GetMapConstructor(context, scratch, scratch2);
- __ bind(&skip_looking_for_constructor);
- __ movp(context, FieldOperand(context, JSFunction::kContextOffset));
- } else {
- // load context from callee
- __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
- }
__ movp(scratch, rsp);
// Push return address back on stack.
@@ -1832,15 +1195,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Accessor for FunctionCallbackInfo and first js arg.
StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - FCA::kContextSaveIndex);
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- this->is_store() ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
- const int stack_space = argc + FCA::kArgsLength + 2;
+ FCA::kArgsLength - FCA::kReturnValueOffset);
+ const int stack_space = argc + FCA::kArgsLength + 1;
Operand* stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
stack_space, stack_space_operand,
- return_value_operand, &context_restore_operand);
+ return_value_operand);
}
@@ -1920,8 +1281,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Operand return_value_operand(
rbp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
- kStackUnwindSpace, nullptr, return_value_operand,
- NULL);
+ kStackUnwindSpace, nullptr, return_value_operand);
}
#undef __
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
deleted file mode 100644
index bba64fcb4a..0000000000
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_X64_CODE_STUBS_X64_H_
-#define V8_X64_CODE_STUBS_X64_H_
-
-namespace v8 {
-namespace internal {
-
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one-byte strings and returns result in rax.
- static void GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4);
-
- // Compares two flat one-byte strings for equality and returns result in rax.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- static void GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch, Label* chars_not_equal,
- Label::Distance near_jump = Label::kFar);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class NameDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
- Register result, Register index, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = DictionaryBits::encode(dictionary.code()) |
- ResultBits::encode(result.code()) |
- IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<Name> name,
- Register r0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- Register dictionary() const {
- return Register::from_code(DictionaryBits::decode(minor_key_));
- }
-
- Register result() const {
- return Register::from_code(ResultBits::decode(minor_key_));
- }
-
- Register index() const {
- return Register::from_code(IndexBits::decode(minor_key_));
- }
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class DictionaryBits: public BitField<int, 0, 4> {};
- class ResultBits: public BitField<int, 4, 4> {};
- class IndexBits: public BitField<int, 8, 4> {};
- class LookupModeBits: public BitField<LookupMode, 12, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate, Register object, Register value,
- Register address, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always rcx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0),
- scratch1_(no_reg) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
- if (scratch0 == rcx) {
- scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
- }
- if (object == rcx) {
- object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
- }
- if (address == rcx) {
- address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
- }
- DCHECK(!AreAliased(scratch0_, object_, address_, rcx));
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(address_orig_ != object_);
- DCHECK(object_ == object_orig_ || address_ == address_orig_);
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (scratch0_ != scratch0_orig_) masm->Push(scratch0_);
- if (rcx != scratch0_orig_ && rcx != object_orig_ &&
- rcx != address_orig_) {
- masm->Push(rcx);
- }
- masm->Push(scratch1_);
- if (address_ != address_orig_) {
- masm->Push(address_);
- masm->movp(address_, address_orig_);
- }
- if (object_ != object_orig_) {
- masm->Push(object_);
- masm->movp(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with rcx.
- if (object_ != object_orig_) {
- masm->movp(object_orig_, object_);
- masm->Pop(object_);
- }
- if (address_ != address_orig_) {
- masm->movp(address_orig_, address_);
- masm->Pop(address_);
- }
- masm->Pop(scratch1_);
- if (rcx != scratch0_orig_ && rcx != object_orig_ &&
- rcx != address_orig_) {
- masm->Pop(rcx);
- }
- if (scratch0_ != scratch0_orig_) masm->Pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved.
-
- // The three scratch registers (incl. rcx) will be restored by other means
- // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee
- // save and don't need to be preserved.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always rcx.
-
- Register GetRegThatIsNotRcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(i)) {
- Register candidate = Register::from_code(i);
- if (candidate != rcx && candidate != r1 && candidate != r2 &&
- candidate != r3) {
- return candidate;
- }
- }
- }
- UNREACHABLE();
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Label* second_instr);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
- Label* second_instr);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
- Label slow_;
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index f9b0cfd1a8..8c22e07b12 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/x64/codegen-x64.h"
-
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
@@ -15,16 +13,15 @@ namespace internal {
#define __ masm.
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(base::OS::Allocate(
- 1 * KB, &actual_size, true, isolate->heap()->GetRandomMmapAddr()));
+ size_t allocated = 0;
+ byte* buffer =
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
+
// xmm0: raw double input.
// Move double input into registers.
__ Sqrtsd(xmm0, xmm0);
@@ -34,150 +31,14 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
#undef __
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ testb(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string, thin_string;
- __ andl(result, Immediate(kStringRepresentationMask));
- __ cmpl(result, Immediate(kConsStringTag));
- __ j(equal, &cons_string, Label::kNear);
- __ cmpl(result, Immediate(kThinStringTag));
- __ j(equal, &thin_string, Label::kNear);
-
- // Handle slices.
- __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addp(index, result);
- __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ movp(string, FieldOperand(string, ThinString::kActualOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- Heap::kempty_stringRootIndex);
- __ j(not_equal, call_runtime);
- __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
- __ jmp(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label seq_string;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
-
- // Handle external strings.
- Label one_byte_external, done;
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ testb(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, kExternalStringExpectedButNotFound);
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(result, Immediate(kShortExternalStringTag));
- __ j(not_zero, call_runtime);
- // Check encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &one_byte_external, Label::kNear);
- // Two-byte string.
- __ movzxwl(result, Operand(result, index, times_2, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&one_byte_external);
- // One-byte string.
- __ movzxbl(result, Operand(result, index, times_1, 0));
- __ jmp(&done, Label::kNear);
-
- // Dispatch on the encoding: one-byte or two-byte.
- Label one_byte;
- __ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &one_byte, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzxwl(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // One-byte string.
- // Load the byte into the result register.
- __ bind(&one_byte);
- __ movzxbl(result, FieldOperand(string,
- index,
- times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-#undef __
-
-Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
- DCHECK(index >= 0);
- int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
- int displacement_to_last_argument =
- base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
- displacement_to_last_argument += extra_displacement_to_last_argument_;
- if (argument_count_reg_ == no_reg) {
- // argument[0] is at base_reg_ + displacement_to_last_argument +
- // (argument_count_immediate_ + receiver - 1) * kPointerSize.
- DCHECK(argument_count_immediate_ + receiver > 0);
- return Operand(base_reg_, displacement_to_last_argument +
- (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
- } else {
- // argument[0] is at base_reg_ + displacement_to_last_argument +
- // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
- return Operand(base_reg_, argument_count_reg_, times_pointer_size,
- displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
- }
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
deleted file mode 100644
index 799187869e..0000000000
--- a/deps/v8/src/x64/codegen-x64.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_X64_CODEGEN_X64_H_
-#define V8_X64_CODEGEN_X64_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-enum StackArgumentsAccessorReceiverMode {
- ARGUMENTS_CONTAIN_RECEIVER,
- ARGUMENTS_DONT_CONTAIN_RECEIVER
-};
-
-
-class StackArgumentsAccessor BASE_EMBEDDED {
- public:
- StackArgumentsAccessor(
- Register base_reg,
- int argument_count_immediate,
- StackArgumentsAccessorReceiverMode receiver_mode =
- ARGUMENTS_CONTAIN_RECEIVER,
- int extra_displacement_to_last_argument = 0)
- : base_reg_(base_reg),
- argument_count_reg_(no_reg),
- argument_count_immediate_(argument_count_immediate),
- receiver_mode_(receiver_mode),
- extra_displacement_to_last_argument_(
- extra_displacement_to_last_argument) { }
-
- StackArgumentsAccessor(
- Register base_reg,
- Register argument_count_reg,
- StackArgumentsAccessorReceiverMode receiver_mode =
- ARGUMENTS_CONTAIN_RECEIVER,
- int extra_displacement_to_last_argument = 0)
- : base_reg_(base_reg),
- argument_count_reg_(argument_count_reg),
- argument_count_immediate_(0),
- receiver_mode_(receiver_mode),
- extra_displacement_to_last_argument_(
- extra_displacement_to_last_argument) { }
-
- StackArgumentsAccessor(
- Register base_reg,
- const ParameterCount& parameter_count,
- StackArgumentsAccessorReceiverMode receiver_mode =
- ARGUMENTS_CONTAIN_RECEIVER,
- int extra_displacement_to_last_argument = 0)
- : base_reg_(base_reg),
- argument_count_reg_(parameter_count.is_reg() ?
- parameter_count.reg() : no_reg),
- argument_count_immediate_(parameter_count.is_immediate() ?
- parameter_count.immediate() : 0),
- receiver_mode_(receiver_mode),
- extra_displacement_to_last_argument_(
- extra_displacement_to_last_argument) { }
-
- Operand GetArgumentOperand(int index);
- Operand GetReceiverOperand() {
- DCHECK(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
- return GetArgumentOperand(0);
- }
-
- private:
- const Register base_reg_;
- const Register argument_count_reg_;
- const int argument_count_immediate_;
- const StackArgumentsAccessorReceiverMode receiver_mode_;
- const int extra_displacement_to_last_argument_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 1214142e6f..adc1b7874e 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -4,7 +4,6 @@
#if V8_TARGET_ARCH_X64
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
@@ -212,7 +211,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Do not restore rsp, simply pop the value into the next register
// and overwrite this afterwards.
if (r == rsp) {
- DCHECK(i > 0);
+ DCHECK_GT(i, 0);
r = Register::from_code(i - 1);
}
__ popq(r);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 5458a86c3f..23f502ab47 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -243,7 +243,7 @@ void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b];
DCHECK_EQ(NO_INSTR, id->type); // Information not already entered
- id->mnem = NULL; // Computed depending on condition code.
+ id->mnem = nullptr; // Computed depending on condition code.
id->type = JUMP_CONDITIONAL_SHORT_INSTR;
}
}
@@ -732,7 +732,7 @@ int DisassemblerX64::F6F7Instruction(byte* data) {
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
if (mod == 3 && regop != 0) {
- const char* mnem = NULL;
+ const char* mnem = nullptr;
switch (regop) {
case 2:
mnem = "not";
@@ -786,7 +786,7 @@ int DisassemblerX64::ShiftInstruction(byte* data) {
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
regop &= 0x7; // The REX.R bit does not affect the operation.
- const char* mnem = NULL;
+ const char* mnem = nullptr;
switch (regop) {
case 0:
mnem = "rol";
@@ -2200,10 +2200,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
return static_cast<int>(current - data);
}
-
// Mnemonics for two-byte opcode instructions starting with 0x0F.
// The argument is the second byte of the two-byte opcode.
-// Returns NULL if the instruction is not handled here.
+// Returns nullptr if the instruction is not handled here.
const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
switch (opcode) {
case 0x1F:
@@ -2252,7 +2251,7 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
case 0xBF:
return "movsxw";
default:
- return NULL;
+ return nullptr;
}
}
@@ -2340,7 +2339,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
break;
case MOVE_REG_INSTR: {
- byte* addr = NULL;
+ byte* addr = nullptr;
switch (operand_size()) {
case OPERAND_WORD_SIZE:
addr =
@@ -2437,7 +2436,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
+ const char* mnem = nullptr;
switch (regop) {
case 0:
mnem = "inc";
@@ -2703,7 +2702,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
int instr_len = static_cast<int>(data - instr);
- DCHECK(instr_len > 0); // Ensure progress.
+ DCHECK_GT(instr_len, 0); // Ensure progress.
int outp = 0;
// Instruction bytes.
diff --git a/deps/v8/src/x64/frame-constants-x64.cc b/deps/v8/src/x64/frame-constants-x64.cc
index ab29aed277..553d3ef665 100644
--- a/deps/v8/src/x64/frame-constants-x64.cc
+++ b/deps/v8/src/x64/frame-constants-x64.cc
@@ -22,6 +22,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index a978a14aaa..59c9532cb3 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -57,9 +57,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return rdi; }
const Register StoreTransitionDescriptor::VectorRegister() { return rbx; }
const Register StoreTransitionDescriptor::MapRegister() { return r11; }
-const Register StringCompareDescriptor::LeftRegister() { return rdx; }
-const Register StringCompareDescriptor::RightRegister() { return rax; }
-
const Register ApiGetterDescriptor::HolderRegister() { return rcx; }
const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
@@ -218,7 +215,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {rdi, rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -228,7 +225,7 @@ void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
// rdi -- function
// rbx -- allocation site with elements kind
Register registers[] = {rdi, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -238,7 +235,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// rdi -- function
// rbx -- allocation site with elements kind
Register registers[] = {rdi, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@@ -248,7 +245,7 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
// rdi -- function
// rbx -- allocation site with elements kind
Register registers[] = {rdi, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
@@ -284,10 +281,10 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- rdi, // callee
- rbx, // call_data
- rcx, // holder
- rdx, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ rbx, // call_data
+ rcx, // holder
+ rdx, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -336,8 +333,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rax, // the value to pass to the generator
- rbx, // the JSGeneratorObject / JSAsyncGeneratorObject to resume
- rdx // the resume mode (tagged)
+ rdx // the JSGeneratorObject / JSAsyncGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 4255e583e3..e305aaa1a5 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -9,7 +9,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
@@ -24,6 +24,42 @@
namespace v8 {
namespace internal {
+Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
+ DCHECK_GE(index, 0);
+ int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
+ int displacement_to_last_argument =
+ base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
+ displacement_to_last_argument += extra_displacement_to_last_argument_;
+ if (argument_count_reg_ == no_reg) {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // (argument_count_immediate_ + receiver - 1) * kPointerSize.
+ DCHECK_GT(argument_count_immediate_ + receiver, 0);
+ return Operand(
+ base_reg_,
+ displacement_to_last_argument +
+ (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
+ } else {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
+ return Operand(
+ base_reg_, argument_count_reg_, times_pointer_size,
+ displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
+ }
+}
+
+StackArgumentsAccessor::StackArgumentsAccessor(
+ Register base_reg, const ParameterCount& parameter_count,
+ StackArgumentsAccessorReceiverMode receiver_mode,
+ int extra_displacement_to_last_argument)
+ : base_reg_(base_reg),
+ argument_count_reg_(parameter_count.is_reg() ? parameter_count.reg()
+ : no_reg),
+ argument_count_immediate_(
+ parameter_count.is_immediate() ? parameter_count.immediate() : 0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) {}
+
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
@@ -184,50 +220,6 @@ void TurboAssembler::CompareRoot(const Operand& with,
cmpp(with, kScratchRegister);
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp) {
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- DCHECK(scratch != kScratchRegister);
- Move(kScratchRegister, store_buffer);
- movp(scratch, Operand(kScratchRegister, 0));
- // Store pointer to buffer.
- movp(Operand(scratch, 0), addr);
- // Increment buffer top.
- addp(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- movp(Operand(kScratchRegister, 0), scratch);
- // Call stub on end of buffer.
- Label done;
- // Check for end of buffer.
- testp(scratch, Immediate(StoreBuffer::kStoreBufferMask));
- Label buffer_overflowed;
- j(equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
- CallStub(&store_buffer_overflow);
- ret(0);
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance distance) {
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch,
- distance);
-}
-
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
SaveFPRegsMode save_fp,
@@ -269,7 +261,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
pushq(Register::from_code(i));
@@ -278,7 +270,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
if ((registers >> i) & 1u) {
popq(Register::from_code(i));
@@ -363,13 +355,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
&done,
Label::kNear);
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
bind(&done);
@@ -418,7 +404,7 @@ void TurboAssembler::CheckStackAlignment() {
void TurboAssembler::Abort(BailoutReason reason) {
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
+ if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@@ -960,24 +946,6 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
}
}
-
-void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- if (SmiValuesAre32Bits()) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
- } else {
- DCHECK(SmiValuesAre31Bits());
- movp(dst, src);
- SmiToInteger64(dst, dst);
- }
-}
-
-
-void MacroAssembler::SmiTest(Register src) {
- AssertSmi(src);
- testp(src, src);
-}
-
-
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
@@ -992,7 +960,7 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::Cmp(Register dst, Smi* src) {
- DCHECK(dst != kScratchRegister);
+ DCHECK_NE(dst, kScratchRegister);
if (src->value() == 0) {
testp(dst, dst);
} else {
@@ -1035,25 +1003,6 @@ void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
}
-void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power) {
- DCHECK(power >= 0);
- DCHECK(power < 64);
- if (power == 0) {
- SmiToInteger64(dst, src);
- return;
- }
- if (dst != src) {
- movp(dst, src);
- }
- if (power < kSmiShift) {
- sarp(dst, Immediate(kSmiShift - power));
- } else if (power > kSmiShift) {
- shlp(dst, Immediate(power - kSmiShift));
- }
-}
-
Condition TurboAssembler::CheckSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
testb(src, Immediate(kSmiTagMask));
@@ -1086,23 +1035,6 @@ void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- if (dst != src) {
- movp(dst, src);
- }
- return;
- } else if (dst == src) {
- DCHECK(dst != kScratchRegister);
- Register constant_reg = GetSmiConstant(constant);
- addp(dst, constant_reg);
- } else {
- Move(dst, constant);
- addp(dst, src);
- }
-}
-
-
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
if (SmiValuesAre32Bits()) {
@@ -1115,248 +1047,6 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
}
}
-void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
- SmiOperationConstraints constraints,
- Label* bailout_label,
- Label::Distance near_jump) {
- if (constant->value() == 0) {
- if (dst != src) {
- movp(dst, src);
- }
- } else if (dst == src) {
- DCHECK(dst != kScratchRegister);
- Move(kScratchRegister, constant);
- addp(dst, kScratchRegister);
- if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
- j(no_overflow, bailout_label, near_jump);
- DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
- subp(dst, kScratchRegister);
- } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
- if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
- Label done;
- j(no_overflow, &done, Label::kNear);
- subp(dst, kScratchRegister);
- jmp(bailout_label, near_jump);
- bind(&done);
- } else {
- // Bailout if overflow without reserving src.
- j(overflow, bailout_label, near_jump);
- }
- } else {
- UNREACHABLE();
- }
- } else {
- DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
- DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
- Move(dst, constant);
- addp(dst, src);
- j(overflow, bailout_label, near_jump);
- }
-}
-
-void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- if (dst != src) {
- movp(dst, src);
- }
- } else if (dst == src) {
- DCHECK(dst != kScratchRegister);
- Register constant_reg = GetSmiConstant(constant);
- subp(dst, constant_reg);
- } else {
- if (constant->value() == Smi::kMinValue) {
- Move(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addp(dst, src);
- } else {
- // Subtract by adding the negation.
- Move(dst, Smi::FromInt(-constant->value()));
- addp(dst, src);
- }
- }
-}
-
-void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
- SmiOperationConstraints constraints,
- Label* bailout_label,
- Label::Distance near_jump) {
- if (constant->value() == 0) {
- if (dst != src) {
- movp(dst, src);
- }
- } else if (dst == src) {
- DCHECK(dst != kScratchRegister);
- Move(kScratchRegister, constant);
- subp(dst, kScratchRegister);
- if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
- j(no_overflow, bailout_label, near_jump);
- DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
- addp(dst, kScratchRegister);
- } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
- if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
- Label done;
- j(no_overflow, &done, Label::kNear);
- addp(dst, kScratchRegister);
- jmp(bailout_label, near_jump);
- bind(&done);
- } else {
- // Bailout if overflow without reserving src.
- j(overflow, bailout_label, near_jump);
- }
- } else {
- UNREACHABLE();
- }
- } else {
- DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
- DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
- if (constant->value() == Smi::kMinValue) {
- DCHECK(dst != kScratchRegister);
- movp(dst, src);
- Move(kScratchRegister, constant);
- subp(dst, kScratchRegister);
- j(overflow, bailout_label, near_jump);
- } else {
- // Subtract by adding the negation.
- Move(dst, Smi::FromInt(-(constant->value())));
- addp(dst, src);
- j(overflow, bailout_label, near_jump);
- }
- }
-}
-
-template<class T>
-static void SmiAddHelper(MacroAssembler* masm,
- Register dst,
- Register src1,
- T src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- if (dst == src1) {
- Label done;
- masm->addp(dst, src2);
- masm->j(no_overflow, &done, Label::kNear);
- // Restore src1.
- masm->subp(dst, src2);
- masm->jmp(on_not_smi_result, near_jump);
- masm->bind(&done);
- } else {
- masm->movp(dst, src1);
- masm->addp(dst, src2);
- masm->j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- DCHECK_NOT_NULL(on_not_smi_result);
- DCHECK(dst != src2);
- SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- DCHECK_NOT_NULL(on_not_smi_result);
- DCHECK(!src2.AddressUsesRegister(dst));
- SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
-}
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible.
- if (dst != src1) {
- if (emit_debug_code()) {
- movp(kScratchRegister, src1);
- addp(kScratchRegister, src2);
- Check(no_overflow, kSmiAdditionOverflow);
- }
- leap(dst, Operand(src1, src2, times_1, 0));
- } else {
- addp(dst, src2);
- Assert(no_overflow, kSmiAdditionOverflow);
- }
-}
-
-
-template<class T>
-static void SmiSubHelper(MacroAssembler* masm,
- Register dst,
- Register src1,
- T src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- if (dst == src1) {
- Label done;
- masm->subp(dst, src2);
- masm->j(no_overflow, &done, Label::kNear);
- // Restore src1.
- masm->addp(dst, src2);
- masm->jmp(on_not_smi_result, near_jump);
- masm->bind(&done);
- } else {
- masm->movp(dst, src1);
- masm->subp(dst, src2);
- masm->j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- DCHECK_NOT_NULL(on_not_smi_result);
- DCHECK(dst != src2);
- SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
-}
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- DCHECK_NOT_NULL(on_not_smi_result);
- DCHECK(!src2.AddressUsesRegister(dst));
- SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
-}
-
-template<class T>
-static void SmiSubNoOverflowHelper(MacroAssembler* masm,
- Register dst,
- Register src1,
- T src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (dst != src1) {
- masm->movp(dst, src1);
- }
- masm->subp(dst, src2);
- masm->Assert(no_overflow, kSmiSubtractionOverflow);
-}
-
-
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- DCHECK(dst != src2);
- SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2) {
- SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
-}
-
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@@ -1411,36 +1101,6 @@ void TurboAssembler::Push(Smi* source) {
// ----------------------------------------------------------------------------
-template<class T>
-static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
- T operand_or_register,
- Label* not_unique_name,
- Label::Distance distance) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- masm->testb(operand_or_register,
- Immediate(kIsNotStringMask | kIsNotInternalizedMask));
- masm->j(zero, &succeed, Label::kNear);
- masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- masm->j(not_equal, not_unique_name, distance);
-
- masm->bind(&succeed);
-}
-
-
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
- Label* not_unique_name,
- Label::Distance distance) {
- JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
-}
-
-
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
- Label* not_unique_name,
- Label::Distance distance) {
- JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
-}
-
void TurboAssembler::Move(Register dst, Register src) {
if (dst != src) {
movp(dst, src);
@@ -1460,7 +1120,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
Xorpd(dst, dst);
} else {
- unsigned pop = base::bits::CountPopulation32(src);
+ unsigned pop = base::bits::CountPopulation(src);
DCHECK_NE(0u, pop);
if (pop == 32) {
Pcmpeqd(dst, dst);
@@ -1475,9 +1135,9 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) {
Xorpd(dst, dst);
} else {
- unsigned nlz = base::bits::CountLeadingZeros64(src);
- unsigned ntz = base::bits::CountTrailingZeros64(src);
- unsigned pop = base::bits::CountPopulation64(src);
+ unsigned nlz = base::bits::CountLeadingZeros(src);
+ unsigned ntz = base::bits::CountTrailingZeros(src);
+ unsigned pop = base::bits::CountPopulation(src);
DCHECK_NE(0u, pop);
if (pop == 64) {
Pcmpeqd(dst, dst);
@@ -1813,19 +1473,6 @@ void TurboAssembler::Move(const Operand& dst, Handle<HeapObject> object,
movp(dst, kScratchRegister);
}
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
- movp(value, FieldOperand(value, WeakCell::kValueOffset));
-}
-
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
-
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addp(rsp, Immediate(stack_elements * kPointerSize));
@@ -1835,7 +1482,7 @@ void MacroAssembler::Drop(int stack_elements) {
void MacroAssembler::DropUnderReturnAddress(int stack_elements,
Register scratch) {
- DCHECK(stack_elements > 0);
+ DCHECK_GT(stack_elements, 0);
if (kPointerSize == kInt64Size && stack_elements == 1) {
popq(MemOperand(rsp, 0));
return;
@@ -2315,14 +1962,11 @@ void MacroAssembler::CmpObjectType(Register heap_object,
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(type)));
+ cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg,
- Register input_reg, int offset) {
- CallStubDelayed(
- new (zone) DoubleToIStub(nullptr, input_reg, result_reg, offset, true));
+void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg) {
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, result_reg));
}
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
@@ -2352,23 +1996,6 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
}
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
-}
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
- : AccessorPair::kSetterOffset;
- movp(dst, FieldOperand(dst, offset));
-}
-
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
@@ -2405,7 +2032,7 @@ void MacroAssembler::AssertFixedArray(Register object) {
void TurboAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
- DCHECK(int32_register != kScratchRegister);
+ DCHECK_NE(int32_register, kScratchRegister);
movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
@@ -2472,21 +2099,8 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
}
}
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp) {
- Label done, loop;
- movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done, Label::kNear);
- CmpObjectType(result, MAP_TYPE, temp);
- j(not_equal, &done, Label::kNear);
- movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
- jmp(&loop);
- bind(&done);
-}
-
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
@@ -2499,7 +2113,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
@@ -2522,8 +2136,7 @@ void MacroAssembler::MaybeDropFrames() {
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
- Register scratch0, Register scratch1,
- ReturnAddressState ra_state) {
+ Register scratch0, Register scratch1) {
#if DEBUG
if (callee_args_count.is_reg()) {
DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
@@ -2555,13 +2168,8 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// to avoid its trashing and let the following loop copy it to the right
// place.
Register tmp_reg = scratch1;
- if (ra_state == ReturnAddressState::kOnStack) {
- movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
- movp(Operand(rsp, 0), tmp_reg);
- } else {
- DCHECK(ReturnAddressState::kNotOnStack == ra_state);
- Push(Operand(rbp, StandardFrameConstants::kCallerPCOffset));
- }
+ movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ movp(Operand(rsp, 0), tmp_reg);
// Restore caller's frame pointer now as it could be overwritten by
// the copying loop.
@@ -2923,26 +2531,22 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
leave();
}
- LeaveExitFrameEpilogue(true);
+ LeaveExitFrameEpilogue();
}
-
-void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
+void MacroAssembler::LeaveApiExitFrame() {
movp(rsp, rbp);
popq(rbp);
- LeaveExitFrameEpilogue(restore_context);
+ LeaveExitFrameEpilogue();
}
-
-void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
+void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(IsolateAddressId::kContextAddress,
isolate());
Operand context_operand = ExternalOperand(context_address);
- if (restore_context) {
- movp(rsi, context_operand);
- }
+ movp(rsi, context_operand);
#ifdef DEBUG
movp(context_operand, Immediate(0));
#endif
@@ -2975,7 +2579,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// arguments.
// On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
// and the caller does not reserve stack slots for them.
- DCHECK(num_arguments >= 0);
+ DCHECK_GE(num_arguments, 0);
#ifdef _WIN64
const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
@@ -2988,8 +2592,8 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
void TurboAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = base::OS::ActivationFrameAlignment();
- DCHECK(frame_alignment != 0);
- DCHECK(num_arguments >= 0);
+ DCHECK_NE(frame_alignment, 0);
+ DCHECK_GE(num_arguments, 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
movp(kScratchRegister, rsp);
@@ -3016,8 +2620,8 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
call(function);
- DCHECK(base::OS::ActivationFrameAlignment() != 0);
- DCHECK(num_arguments >= 0);
+ DCHECK_NE(base::OS::ActivationFrameAlignment(), 0);
+ DCHECK_GE(num_arguments, 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
@@ -3052,27 +2656,6 @@ bool AreAliased(Register reg1,
}
#endif
-
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
- : address_(address),
- size_(size),
- masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- Assembler::FlushICache(masm_.isolate(), address_, size_);
-
- // Check that the code was patched as expected.
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
@@ -3091,69 +2674,6 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
}
j(cc, condition_met, condition_met_distance);
}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* on_black,
- Label::Distance on_black_distance) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- // The mask_scratch register contains a 1 at the position of the first bit
- // and a 1 at a position of the second bit. All other positions are zero.
- movp(rcx, mask_scratch);
- andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpp(mask_scratch, rcx);
- j(equal, on_black, on_black_distance);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movp(bitmap_reg, addr_reg);
- // Sign extended 32 bit immediate.
- andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movp(rcx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shrl(rcx, Immediate(shift));
- andp(rcx,
- Immediate((Page::kPageAlignmentMask >> shift) &
- ~(Bitmap::kBytesPerCell - 1)));
-
- addp(bitmap_reg, rcx);
- movp(rcx, addr_reg);
- shrl(rcx, Immediate(kPointerSizeLog2));
- andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
- movl(mask_reg, Immediate(3));
- shlp_cl(mask_reg);
-}
-
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Label* value_is_white,
- Label::Distance distance) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(zero, value_is_white, distance);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index c3c92745fc..73650f36e5 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -45,18 +45,6 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum class SmiOperationConstraint {
- kPreserveSourceRegister = 1 << 0,
- kBailoutOnNoOverflow = 1 << 1,
- kBailoutOnOverflow = 1 << 2
-};
-
-enum class ReturnAddressState { kOnStack, kNotOnStack };
-
-typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
-
-DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
-
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@@ -79,6 +67,57 @@ struct SmiIndex {
ScaleFactor scale;
};
+enum StackArgumentsAccessorReceiverMode {
+ ARGUMENTS_CONTAIN_RECEIVER,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER
+};
+
+class StackArgumentsAccessor BASE_EMBEDDED {
+ public:
+ StackArgumentsAccessor(Register base_reg, int argument_count_immediate,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(no_reg),
+ argument_count_immediate_(argument_count_immediate),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) {}
+
+ StackArgumentsAccessor(Register base_reg, Register argument_count_reg,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(argument_count_reg),
+ argument_count_immediate_(0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) {}
+
+ StackArgumentsAccessor(Register base_reg,
+ const ParameterCount& parameter_count,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0);
+
+ Operand GetArgumentOperand(int index);
+ Operand GetReceiverOperand() {
+ DCHECK(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
+ return GetArgumentOperand(0);
+ }
+
+ private:
+ const Register base_reg_;
+ const Register argument_count_reg_;
+ const int argument_count_immediate_;
+ const StackArgumentsAccessorReceiverMode receiver_mode_;
+ const int extra_displacement_to_last_argument_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
+};
+
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
@@ -371,15 +410,13 @@ class TurboAssembler : public Assembler {
}
void LeaveFrame(StackFrame::Type type);
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // |ra_state| defines whether return address is already pushed to stack or
- // not. Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed.
+ // Removes current frame and its arguments from the stack preserving the
+ // arguments and a return address pushed to the stack for the next call. Both
+ // |callee_args_count| and |caller_args_count_reg| do not include receiver.
+ // |callee_args_count| is not modified, |caller_args_count_reg| is trashed.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
- Register scratch1, ReturnAddressState ra_state);
+ Register scratch1);
inline bool AllowThisStubCall(CodeStub* stub);
@@ -388,10 +425,7 @@ class TurboAssembler : public Assembler {
// HeapObjectRequest that will be fulfilled after code assembly.
void CallStubDelayed(CodeStub* stub);
- void SlowTruncateToIDelayed(Zone* zone, Register result_reg,
- Register input_reg,
- int offset = HeapNumber::kValueOffset -
- kHeapObjectTag);
+ void SlowTruncateToIDelayed(Zone* zone, Register result_reg);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
@@ -537,42 +571,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// GC Support
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, zero, branch, distance);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_zero, branch, distance);
- }
-
- // Check if an object has the black incremental marking color. Also uses rcx!
- void JumpIfBlack(Register object, Register bitmap_scratch,
- Register mask_scratch, Label* on_black,
- Label::Distance on_black_distance);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Label* value_is_white, Label::Distance distance);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -618,7 +616,7 @@ class MacroAssembler : public TurboAssembler {
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
- void LeaveApiExitFrame(bool restore_context);
+ void LeaveApiExitFrame();
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { Pushad(); }
@@ -660,19 +658,6 @@ class MacroAssembler : public TurboAssembler {
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
- void SmiToInteger64(Register dst, const Operand& src);
-
- // Convert smi to double.
- void SmiToDouble(XMMRegister dst, Register src) {
- SmiToInteger32(kScratchRegister, src);
- Cvtlsi2sd(dst, kScratchRegister);
- }
-
- // Multiply a positive smi's integer value by a power of two.
- // Provides result as 64-bit integer value.
- void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power);
// Simple comparison of smis. Both sides must be known smis to use these,
// otherwise use Cmp.
@@ -681,8 +666,6 @@ class MacroAssembler : public TurboAssembler {
void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src);
void SmiCompare(const Operand& dst, Smi* src);
- // Compare the int32 in src register to the value of the smi stored at dst.
- void SmiTest(Register src);
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
@@ -706,69 +689,8 @@ class MacroAssembler : public TurboAssembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
- void SmiAddConstant(Register dst, Register src, Smi* constant);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result.
- // No overflow testing on the result is done.
void SmiAddConstant(const Operand& dst, Smi* constant);
- // Add an integer constant to a tagged smi, giving a tagged smi as result,
- // or jumping to a label if the result cannot be represented by a smi.
- void SmiAddConstant(Register dst, Register src, Smi* constant,
- SmiOperationConstraints constraints, Label* bailout_label,
- Label::Distance near_jump = Label::kFar);
-
- // Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result. No testing on the result is done. Sets the N and Z flags
- // based on the value of the resulting integer.
- void SmiSubConstant(Register dst, Register src, Smi* constant);
-
- // Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result, or jumping to a label if the result cannot be represented by a smi.
- void SmiSubConstant(Register dst, Register src, Smi* constant,
- SmiOperationConstraints constraints, Label* bailout_label,
- Label::Distance near_jump = Label::kFar);
-
- // Adds smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed if the operation is
- // successful, otherwise kept intact.
- void SmiAdd(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- void SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- void SmiAdd(Register dst,
- Register src1,
- Register src2);
-
- // Subtracts smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed if the operation is
- // successful, otherwise kept intact.
- void SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- void SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- void SmiSub(Register dst,
- Register src1,
- Register src2);
-
- void SmiSub(Register dst,
- Register src1,
- const Operand& src2);
-
// Specialized operations
// Converts, if necessary, a smi to a combination of number and
@@ -782,15 +704,6 @@ class MacroAssembler : public TurboAssembler {
SmiIndex SmiToIndex(Register dst, Register src, int shift);
// ---------------------------------------------------------------------------
- // String macros.
-
- // Checks if the given register or operand is a unique name
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
- Label::Distance distance = Label::kFar);
- void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
- Label::Distance distance = Label::kFar);
-
- // ---------------------------------------------------------------------------
// Macro instructions.
// Load/store with specific representation.
@@ -802,24 +715,6 @@ class MacroAssembler : public TurboAssembler {
void Cmp(Register dst, Smi* src);
void Cmp(const Operand& dst, Smi* src);
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the given
- // miss label if the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp (on x64 it's at least return address).
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 1) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
@@ -910,10 +805,6 @@ class MacroAssembler : public TurboAssembler {
Label* lost_precision, Label* is_nan, Label* minus_zero,
Label::Distance dst = Label::kFar);
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template<typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
@@ -961,10 +852,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Support functions.
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done.
- void GetMapConstructor(Register result, Register map, Register temp);
-
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
@@ -1043,7 +930,7 @@ class MacroAssembler : public TurboAssembler {
// accessible via StackSpaceOperand.
void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
- void LeaveExitFrameEpilogue(bool restore_context);
+ void LeaveExitFrameEpilogue();
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
@@ -1052,14 +939,6 @@ class MacroAssembler : public TurboAssembler {
Label* branch,
Label::Distance distance = Label::kFar);
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses rcx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code) {
return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
@@ -1070,27 +949,6 @@ class MacroAssembler : public TurboAssembler {
friend class StandardFrame;
};
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(Isolate* isolate, byte* address, int size);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/zone/zone-handle-set.h b/deps/v8/src/zone/zone-handle-set.h
index e2cc1c6dc3..9abc89a30e 100644
--- a/deps/v8/src/zone/zone-handle-set.h
+++ b/deps/v8/src/zone/zone-handle-set.h
@@ -6,6 +6,7 @@
#define V8_ZONE_ZONE_HANDLE_SET_H_
#include "src/handles.h"
+#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -25,7 +26,7 @@ class ZoneHandleSet final {
size_t size() const {
if ((data_ & kTagMask) == kEmptyTag) return 0;
if ((data_ & kTagMask) == kSingletonTag) return 1;
- return list()->length();
+ return list()->size();
}
Handle<T> at(size_t i) const {
@@ -46,34 +47,35 @@ class ZoneHandleSet final {
data_ = bit_cast<intptr_t>(value) | kSingletonTag;
} else if ((data_ & kTagMask) == kSingletonTag) {
if (singleton() == value) return;
- List* list = new (zone) List(2, zone);
+ List* list = new (zone->New(sizeof(List))) List(zone);
if (singleton() < value) {
- list->Add(singleton(), zone);
- list->Add(value, zone);
+ list->push_back(singleton());
+ list->push_back(value);
} else {
- list->Add(value, zone);
- list->Add(singleton(), zone);
+ list->push_back(value);
+ list->push_back(singleton());
}
DCHECK(IsAligned(bit_cast<intptr_t>(list), kPointerAlignment));
data_ = bit_cast<intptr_t>(list) | kListTag;
} else {
DCHECK_EQ(kListTag, data_ & kTagMask);
List const* const old_list = list();
- for (int i = 0; i < old_list->length(); ++i) {
+ for (size_t i = 0; i < old_list->size(); ++i) {
if (old_list->at(i) == value) return;
if (old_list->at(i) > value) break;
}
- List* new_list = new (zone) List(old_list->length() + 1, zone);
- int i = 0;
- for (; i < old_list->length(); ++i) {
+ List* new_list = new (zone->New(sizeof(List))) List(zone);
+ new_list->reserve(old_list->size() + 1);
+ size_t i = 0;
+ for (; i < old_list->size(); ++i) {
if (old_list->at(i) > value) break;
- new_list->Add(old_list->at(i), zone);
+ new_list->push_back(old_list->at(i));
}
- new_list->Add(value, zone);
- for (; i < old_list->length(); ++i) {
- new_list->Add(old_list->at(i), zone);
+ new_list->push_back(value);
+ for (; i < old_list->size(); ++i) {
+ new_list->push_back(old_list->at(i));
}
- DCHECK_EQ(old_list->length() + 1, new_list->length());
+ DCHECK_EQ(old_list->size() + 1, new_list->size());
DCHECK(IsAligned(bit_cast<intptr_t>(new_list), kPointerAlignment));
data_ = bit_cast<intptr_t>(new_list) | kListTag;
}
@@ -85,17 +87,32 @@ class ZoneHandleSet final {
if (other.data_ == kEmptyTag) return true;
if ((data_ & kTagMask) == kSingletonTag) return false;
DCHECK_EQ(kListTag, data_ & kTagMask);
+ List const* cached_list = list();
if ((other.data_ & kTagMask) == kSingletonTag) {
- return list()->Contains(other.singleton());
+ return std::find(cached_list->begin(), cached_list->end(),
+ other.singleton()) != cached_list->end();
}
DCHECK_EQ(kListTag, other.data_ & kTagMask);
// TODO(bmeurer): Optimize this case.
- for (int i = 0; i < other.list()->length(); ++i) {
- if (!list()->Contains(other.list()->at(i))) return false;
+ for (size_t i = 0; i < other.list()->size(); ++i) {
+ if (std::find(cached_list->begin(), cached_list->end(),
+ other.list()->at(i)) == cached_list->end()) {
+ return false;
+ }
}
return true;
}
+ bool contains(Handle<T> other) const {
+ if (data_ == kEmptyTag) return false;
+ if ((data_ & kTagMask) == kSingletonTag) {
+ return singleton() == bit_cast<T**>(other.address());
+ }
+ DCHECK_EQ(kListTag, data_ & kTagMask);
+ return std::find(list()->begin(), list()->end(),
+ bit_cast<T**>(other.address())) != list()->end();
+ }
+
void remove(Handle<T> handle, Zone* zone) {
// TODO(bmeurer): Optimize this case.
ZoneHandleSet<T> that;
@@ -115,8 +132,8 @@ class ZoneHandleSet final {
(rhs.data_ & kTagMask) == kListTag) {
List const* const lhs_list = lhs.list();
List const* const rhs_list = rhs.list();
- if (lhs_list->length() == rhs_list->length()) {
- for (int i = 0; i < lhs_list->length(); ++i) {
+ if (lhs_list->size() == rhs_list->size()) {
+ for (size_t i = 0; i < lhs_list->size(); ++i) {
if (lhs_list->at(i) != rhs_list->at(i)) return false;
}
return true;
@@ -139,7 +156,7 @@ class ZoneHandleSet final {
inline const_iterator end() const;
private:
- typedef ZoneList<T**> List;
+ typedef ZoneVector<T**> List;
List const* list() const {
DCHECK_EQ(kListTag, data_ & kTagMask);
diff --git a/deps/v8/src/zone/zone-list-inl.h b/deps/v8/src/zone/zone-list-inl.h
index efae3971a3..d90c9a28fe 100644
--- a/deps/v8/src/zone/zone-list-inl.h
+++ b/deps/v8/src/zone/zone-list-inl.h
@@ -111,7 +111,7 @@ void ZoneList<T>::Clear() {
DeleteData(data_);
// We don't call Initialize(0) since that requires passing a Zone,
// which we don't really need.
- data_ = NULL;
+ data_ = nullptr;
capacity_ = 0;
length_ = 0;
}
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index d9113a8f76..de8146de05 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -59,7 +59,7 @@ Zone::~Zone() {
DeleteAll();
- DCHECK(segment_bytes_allocated_ == 0);
+ DCHECK_EQ(segment_bytes_allocated_, 0);
}
void* Zone::New(size_t size) {
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index ba79cfa666..c8c1fe3515 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -204,8 +204,8 @@ class ZoneList final {
INLINE(void Initialize(int capacity, Zone* zone)) {
DCHECK_GE(capacity, 0);
- data_ =
- (capacity > 0) ? NewData(capacity, ZoneAllocationPolicy(zone)) : NULL;
+ data_ = (capacity > 0) ? NewData(capacity, ZoneAllocationPolicy(zone))
+ : nullptr;
capacity_ = capacity;
length_ = 0;
}
@@ -314,4 +314,17 @@ typedef base::CustomMatcherTemplateHashMapImpl<ZoneAllocationPolicy>
} // namespace internal
} // namespace v8
+// The accidential pattern
+// new (zone) SomeObject()
+// where SomeObject does not inherit from ZoneObject leads to nasty crashes.
+// This triggers a compile-time error instead.
+template <class T, typename = typename std::enable_if<std::is_convertible<
+ T, const v8::internal::Zone*>::value>::type>
+void* operator new(size_t size, T zone) {
+ static_assert(false && sizeof(T),
+ "Placement new with a zone is only permitted for classes "
+ "inheriting from ZoneObject");
+ UNREACHABLE();
+}
+
#endif // V8_ZONE_ZONE_H_
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index cdbb0adc8f..63c0f9f1f8 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -39,8 +39,8 @@ class BenchmarksVariantGenerator(testsuite.VariantGenerator):
# always opt to match the way the benchmarks are run for performance
# testing.
def FilterVariantsByTest(self, testcase):
- if testcase.outcomes and statusfile.OnlyStandardVariant(
- testcase.outcomes):
+ outcomes = self.suite.GetStatusFileOutcomes(testcase)
+ if statusfile.OnlyStandardVariant(outcomes):
return self.standard_variant
return self.fast_variants
@@ -117,53 +117,33 @@ class BenchmarksTestSuite(testsuite.TestSuite):
tests.append(testcase.TestCase(self, test))
return tests
- def GetFlagsForTestCase(self, testcase, context):
- result = []
- result += context.mode_flags
+ def GetParametersForTestCase(self, testcase, context):
+ files = []
if testcase.path.startswith("kraken"):
- result.append(os.path.join(self.testroot, "%s-data.js" % testcase.path))
- result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
+ files.append(os.path.join(self.testroot, "%s-data.js" % testcase.path))
+ files.append(os.path.join(self.testroot, "%s.js" % testcase.path))
elif testcase.path.startswith("octane"):
- result.append(os.path.join(self.testroot, "octane/base.js"))
- result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
+ files.append(os.path.join(self.testroot, "octane/base.js"))
+ files.append(os.path.join(self.testroot, "%s.js" % testcase.path))
if testcase.path.startswith("octane/gbemu"):
- result.append(os.path.join(self.testroot, "octane/gbemu-part2.js"))
+ files.append(os.path.join(self.testroot, "octane/gbemu-part2.js"))
elif testcase.path.startswith("octane/typescript"):
- result.append(os.path.join(self.testroot,
- "octane/typescript-compiler.js"))
- result.append(os.path.join(self.testroot, "octane/typescript-input.js"))
+ files.append(os.path.join(self.testroot,
+ "octane/typescript-compiler.js"))
+ files.append(os.path.join(self.testroot, "octane/typescript-input.js"))
elif testcase.path.startswith("octane/zlib"):
- result.append(os.path.join(self.testroot, "octane/zlib-data.js"))
- result += ["-e", "BenchmarkSuite.RunSuites({});"]
+ files.append(os.path.join(self.testroot, "octane/zlib-data.js"))
+ files += ["-e", "BenchmarkSuite.RunSuites({});"]
elif testcase.path.startswith("sunspider"):
- result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
- return testcase.flags + result
+ files.append(os.path.join(self.testroot, "%s.js" % testcase.path))
+
+ return files, testcase.flags + context.mode_flags, {}
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
with open(filename) as f:
return f.read()
- def DownloadData(self):
- print "Benchmarks download is deprecated. It's part of DEPS."
-
- def rm_dir(directory):
- directory_name = os.path.join(self.root, directory)
- if os.path.exists(directory_name):
- shutil.rmtree(directory_name)
-
- # Clean up old directories and archive files.
- rm_dir('kraken')
- rm_dir('octane')
- rm_dir('sunspider')
- archive_files = [f for f in os.listdir(self.root)
- if f.startswith("downloaded_") or
- f.startswith("CHECKED_OUT_")]
- if len(archive_files) > 0:
- print "Clobber outdated test archives ..."
- for f in archive_files:
- os.remove(os.path.join(self.root, f))
-
def _VariantGeneratorFactory(self):
return BenchmarksVariantGenerator
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 3dae59bac2..ca81ef1f0d 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -87,6 +87,7 @@ v8_source_set("cctest_sources") {
"compiler/test-run-unwinding-info.cc",
"compiler/test-run-variables.cc",
"compiler/test-run-wasm-machops.cc",
+ "compiler/value-helper.cc",
"compiler/value-helper.h",
"expression-type-collector-macros.h",
"gay-fixed.cc",
@@ -227,6 +228,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm.cc",
"wasm/test-streaming-compilation.cc",
"wasm/test-wasm-breakpoints.cc",
+ "wasm/test-wasm-codegen.cc",
"wasm/test-wasm-interpreter-entry.cc",
"wasm/test-wasm-stack.cc",
"wasm/test-wasm-trap-position.cc",
@@ -248,6 +250,8 @@ v8_source_set("cctest_sources") {
if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
+ "assembler-helper-arm.cc",
+ "assembler-helper-arm.h",
"test-assembler-arm.cc",
"test-code-stubs-arm.cc",
"test-code-stubs.cc",
@@ -255,7 +259,7 @@ v8_source_set("cctest_sources") {
"test-disasm-arm.cc",
"test-macro-assembler-arm.cc",
"test-run-wasm-relocation-arm.cc",
- "test-simulator-arm.cc",
+ "test-sync-primitives-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
@@ -268,7 +272,7 @@ v8_source_set("cctest_sources") {
"test-javascript-arm64.cc",
"test-js-arm64-variables.cc",
"test-run-wasm-relocation-arm64.cc",
- "test-simulator-arm64.cc",
+ "test-sync-primitives-arm64.cc",
"test-utils-arm64.cc",
"test-utils-arm64.h",
]
@@ -345,13 +349,6 @@ v8_source_set("cctest_sources") {
]
}
- if (is_linux) {
- # TODO(machenbach): Translate 'or OS=="qnx"' from gyp.
- sources += [ "test-platform-linux.cc" ]
- } else if (is_win) {
- sources += [ "test-platform-win32.cc" ]
- }
-
configs = [
"../..:external_config",
"../..:internal_config_base",
@@ -426,6 +423,7 @@ action("resources") {
"../../tools/consarray.js",
"../../tools/profile.js",
"../../tools/profile_view.js",
+ "../../tools/arguments.js",
"../../tools/logreader.js",
"log-eq-of-logging-and-traversal.js",
]
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index f8bf2773cf..a5bb7db64a 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,6 +1,5 @@
-per-file *-mips*=ivica.bogosavljevic@imgtec.com
-per-file *-mips*=Miran.Karic@imgtec.com
-per-file *-mips*=dusan.simicic@imgtec.com
+per-file *-mips*=ivica.bogosavljevic@mips.com
+per-file *-mips*=Miran.Karic@mips.com
per-file *-ppc*=dstence@us.ibm.com
per-file *-ppc*=joransiu@ca.ibm.com
per-file *-ppc*=jyan@ca.ibm.com
diff --git a/deps/v8/test/cctest/assembler-helper-arm.cc b/deps/v8/test/cctest/assembler-helper-arm.cc
new file mode 100644
index 0000000000..73079ed701
--- /dev/null
+++ b/deps/v8/test/cctest/assembler-helper-arm.cc
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/assembler-helper-arm.h"
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Address AssembleCode(std::function<void(Assembler&)> assemble) {
+ Isolate* isolate = CcTest::i_isolate();
+ Assembler assm(isolate, nullptr, 0);
+
+ assemble(assm);
+ assm.bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ if (FLAG_print_code) {
+ code->Print();
+ }
+ return code->entry();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/assembler-helper-arm.h b/deps/v8/test/cctest/assembler-helper-arm.h
new file mode 100644
index 0000000000..dd24087bda
--- /dev/null
+++ b/deps/v8/test/cctest/assembler-helper-arm.h
@@ -0,0 +1,28 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_ASSEMBLER_HELPER_ARM_H_
+#define V8_CCTEST_ASSEMBLER_HELPER_ARM_H_
+
+#include <functional>
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// These function prototypes have 5 arguments since they are used with the
+// CALL_GENERATED_CODE macro.
+typedef Object* (*F_iiiii)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F_piiii)(void* p0, int p1, int p2, int p3, int p4);
+typedef Object* (*F_ppiii)(void* p0, void* p1, int p2, int p3, int p4);
+typedef Object* (*F_pppii)(void* p0, void* p1, void* p2, int p3, int p4);
+typedef Object* (*F_ippii)(int p0, void* p1, void* p2, int p3, int p4);
+
+Address AssembleCode(std::function<void(Assembler&)> assemble);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CCTEST_ASSEMBLER_HELPER_ARM_H_
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 45015a2b90..8d33884b5b 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -47,11 +47,11 @@ enum InitializationState { kUnset, kUninitialized, kInitialized };
static InitializationState initialization_state_ = kUnset;
static bool disable_automatic_dispose_ = false;
-CcTest* CcTest::last_ = NULL;
+CcTest* CcTest::last_ = nullptr;
bool CcTest::initialize_called_ = false;
v8::base::Atomic32 CcTest::isolate_used_ = 0;
-v8::ArrayBuffer::Allocator* CcTest::allocator_ = NULL;
-v8::Isolate* CcTest::isolate_ = NULL;
+v8::ArrayBuffer::Allocator* CcTest::allocator_ = nullptr;
+v8::Isolate* CcTest::isolate_ = nullptr;
CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
bool enabled, bool initialize)
@@ -82,20 +82,30 @@ CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
void CcTest::Run() {
if (!initialize_) {
- CHECK(initialization_state_ != kInitialized);
+ CHECK_NE(initialization_state_, kInitialized);
initialization_state_ = kUninitialized;
- CHECK(CcTest::isolate_ == NULL);
+ CHECK_NULL(CcTest::isolate_);
} else {
- CHECK(initialization_state_ != kUninitialized);
+ CHECK_NE(initialization_state_, kUninitialized);
initialization_state_ = kInitialized;
- if (isolate_ == NULL) {
+ if (isolate_ == nullptr) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator_;
isolate_ = v8::Isolate::New(create_params);
}
isolate_->Enter();
}
+#ifdef DEBUG
+ const size_t active_isolates = i::Isolate::non_disposed_isolates();
+#endif // DEBUG
callback_();
+#ifdef DEBUG
+ // This DCHECK ensures that all Isolates are properly disposed after finishing
+ // the test. Stray Isolates lead to stray tasks in the platform which can
+ // interact weirdly when swapping in new platforms (for testing) or during
+ // shutdown.
+ DCHECK_EQ(active_isolates, i::Isolate::non_disposed_isolates());
+#endif // DEBUG
if (initialize_) {
if (v8::Locker::IsActive()) {
v8::Locker locker(isolate_);
@@ -142,7 +152,7 @@ void CcTest::InitializeVM() {
}
void CcTest::TearDown() {
- if (isolate_ != NULL) isolate_->Dispose();
+ if (isolate_ != nullptr) isolate_->Dispose();
}
v8::Local<v8::Context> CcTest::NewContext(CcTestExtensionFlags extensions,
@@ -208,7 +218,7 @@ HandleAndZoneScope::HandleAndZoneScope()
HandleAndZoneScope::~HandleAndZoneScope() {}
static void PrintTestList(CcTest* current) {
- if (current == NULL) return;
+ if (current == nullptr) return;
PrintTestList(current->prev());
printf("%s/%s\n", current->file(), current->name());
}
@@ -253,8 +263,8 @@ int main(int argc, char* argv[]) {
}
v8::V8::InitializeICUDefaultLocation(argv[0]);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
+ v8::V8::InitializePlatform(platform.get());
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::Initialize();
v8::V8::InitializeExternalStartupData(argv[0]);
@@ -291,7 +301,7 @@ int main(int argc, char* argv[]) {
char* file = arg_copy;
char* name = testname + 1;
CcTest* test = CcTest::last();
- while (test != NULL) {
+ while (test != nullptr) {
if (test->enabled()
&& strcmp(test->file(), file) == 0
&& strcmp(test->name(), name) == 0) {
@@ -305,7 +315,7 @@ int main(int argc, char* argv[]) {
// Run all tests with the specified file or test name.
char* file_or_name = arg_copy;
CcTest* test = CcTest::last();
- while (test != NULL) {
+ while (test != nullptr) {
if (test->enabled()
&& (strcmp(test->file(), file_or_name) == 0
|| strcmp(test->name(), file_or_name) == 0)) {
@@ -324,9 +334,8 @@ int main(int argc, char* argv[]) {
// TODO(svenpanne) See comment above.
// if (!disable_automatic_dispose_) v8::V8::Dispose();
v8::V8::ShutdownPlatform();
- delete platform;
return 0;
}
-RegisterThreadedTest *RegisterThreadedTest::first_ = NULL;
+RegisterThreadedTest* RegisterThreadedTest::first_ = nullptr;
int RegisterThreadedTest::count_ = 0;
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index ffcd865531..92c64b8c87 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -75,6 +75,7 @@
'compiler/test-run-stubs.cc',
'compiler/test-run-variables.cc',
'compiler/test-run-wasm-machops.cc',
+ 'compiler/value-helper.cc',
'compiler/value-helper.h',
'cctest.cc',
'cctest.h',
@@ -217,6 +218,7 @@
'wasm/test-run-wasm-relocation.cc',
'wasm/test-run-wasm-simd.cc',
'wasm/test-wasm-breakpoints.cc',
+ "wasm/test-wasm-codegen.cc",
'wasm/test-wasm-interpreter-entry.cc',
'wasm/test-wasm-stack.cc',
'wasm/test-wasm-trap-position.cc',
@@ -243,6 +245,8 @@
'test-run-wasm-relocation-x64.cc',
],
'cctest_sources_arm': [ ### gcmole(arch:arm) ###
+ 'assembler-helper-arm.cc',
+ 'assembler-helper-arm.h',
'test-assembler-arm.cc',
'test-code-stubs.cc',
'test-code-stubs.h',
@@ -250,7 +254,7 @@
'test-disasm-arm.cc',
'test-macro-assembler-arm.cc',
'test-run-wasm-relocation-arm.cc',
- 'test-simulator-arm.cc',
+ 'test-sync-primitives-arm.cc',
],
'cctest_sources_arm64': [ ### gcmole(arch:arm64) ###
'test-utils-arm64.cc',
@@ -264,7 +268,7 @@
'test-javascript-arm64.cc',
'test-js-arm64-variables.cc',
'test-run-wasm-relocation-arm64.cc',
- 'test-simulator-arm64.cc',
+ 'test-sync-primitives-arm64.cc',
],
'cctest_sources_s390': [ ### gcmole(arch:s390) ###
'test-assembler-s390.cc',
@@ -394,15 +398,7 @@
'<@(cctest_sources_mips64el)',
],
}],
- [ 'OS=="linux" or OS=="qnx"', {
- 'sources': [
- 'test-platform-linux.cc',
- ],
- }],
[ 'OS=="win"', {
- 'sources': [
- 'test-platform-win32.cc',
- ],
'msvs_settings': {
'VCCLCompilerTool': {
# MSVS wants this for gay-{precision,shortest}.cc.
@@ -446,6 +442,7 @@
'../../tools/consarray.js',
'../../tools/profile.js',
'../../tools/profile_view.js',
+ '../../tools/arguments.js',
'../../tools/logreader.js',
'log-eq-of-logging-and-traversal.js',
],
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 155d7393a0..8a7b6d1462 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -32,6 +32,7 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
+#include "src/assembler.h"
#include "src/debug/debug-interface.h"
#include "src/factory.h"
#include "src/flags.h"
@@ -114,7 +115,7 @@ class CcTest {
bool enabled() { return enabled_; }
static v8::Isolate* isolate() {
- CHECK(isolate_ != NULL);
+ CHECK_NOT_NULL(isolate_);
v8::base::Relaxed_Store(&isolate_used_, 1);
return isolate_;
}
@@ -246,7 +247,7 @@ class RegisterThreadedTest {
public:
explicit RegisterThreadedTest(CcTest::TestFunction* callback,
const char* name)
- : fuzzer_(NULL), callback_(callback), name_(name) {
+ : fuzzer_(nullptr), callback_(callback), name_(name) {
prev_ = first_;
first_ = this;
count_++;
@@ -567,6 +568,19 @@ static inline void CheckDoubleEquals(double expected, double actual) {
CHECK_GE(expected, actual - kEpsilon);
}
+static inline uint8_t* AllocateAssemblerBuffer(
+ size_t* allocated,
+ size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize) {
+ size_t page_size = v8::base::OS::AllocatePageSize();
+ size_t alloc_size = RoundUp(requested, page_size);
+ void* result =
+ v8::base::OS::Allocate(nullptr, alloc_size, page_size,
+ v8::base::OS::MemoryPermission::kReadWriteExecute);
+ CHECK(result);
+ *allocated = alloc_size;
+ return static_cast<uint8_t*>(result);
+}
+
static v8::debug::DebugDelegate dummy_delegate;
static inline void EnableDebugger(v8::Isolate* isolate) {
@@ -632,21 +646,26 @@ class ManualGCScope {
ManualGCScope()
: flag_concurrent_marking_(i::FLAG_concurrent_marking),
flag_concurrent_sweeping_(i::FLAG_concurrent_sweeping),
- flag_stress_incremental_marking_(i::FLAG_stress_incremental_marking) {
+ flag_stress_incremental_marking_(i::FLAG_stress_incremental_marking),
+ flag_parallel_marking_(i::FLAG_parallel_marking) {
i::FLAG_concurrent_marking = false;
i::FLAG_concurrent_sweeping = false;
i::FLAG_stress_incremental_marking = false;
+ // Parallel marking has a dependency on concurrent marking.
+ i::FLAG_parallel_marking = false;
}
~ManualGCScope() {
i::FLAG_concurrent_marking = flag_concurrent_marking_;
i::FLAG_concurrent_sweeping = flag_concurrent_sweeping_;
i::FLAG_stress_incremental_marking = flag_stress_incremental_marking_;
+ i::FLAG_parallel_marking = flag_parallel_marking_;
}
private:
bool flag_concurrent_marking_;
bool flag_concurrent_sweeping_;
bool flag_stress_incremental_marking_;
+ bool flag_parallel_marking_;
};
// This is an abstract base class that can be overridden to implement a test
@@ -659,6 +678,16 @@ class TestPlatform : public v8::Platform {
old_platform_->OnCriticalMemoryPressure();
}
+ std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate* isolate) override {
+ return old_platform_->GetForegroundTaskRunner(isolate);
+ }
+
+ std::shared_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
+ v8::Isolate* isolate) override {
+ return old_platform_->GetBackgroundTaskRunner(isolate);
+ }
+
void CallOnBackgroundThread(v8::Task* task,
ExpectedRuntime expected_runtime) override {
old_platform_->CallOnBackgroundThread(task, expected_runtime);
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index c82ca85ab8..d3c8a8d393 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -82,6 +82,9 @@
'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
'test-sampler/LibSamplerCollectSample': [SKIP],
+ # BUG(7054)
+ 'test-cpu-profiler/StaticCollectSampleAPI': [SKIP],
+
# BUG(2340). Preprocessing stack traces is disabled at the moment.
'test-heap/PreprocessStackTrace': [FAIL],
@@ -101,6 +104,8 @@
# Test that serialization with unknown external reference fails.
'test-serialize/SnapshotCreatorUnknownExternalReferences': [FAIL],
+ 'test-serialize/SnapshotCreatorNoExternalReferencesCustomFail1': [FAIL],
+ 'test-serialize/SnapshotCreatorNoExternalReferencesCustomFail2': [FAIL],
############################################################################
# Slow tests.
@@ -171,6 +176,8 @@
['tsan == True', {
# BUG(v8:6133).
'test-cpu-profiler/TickEvents': [SKIP],
+ # BUG(v8:6924). The test allocates a lot of memory.
+ 'test-api/NewStringRangeError': [PASS, NO_VARIANTS],
}], # 'tsan == True'
##############################################################################
@@ -378,4 +385,10 @@
'test-dtoa/*': [SKIP],
}], # variant == wasm_traps
+##############################################################################
+# BUG(v8:7138).
+['arch == arm and not simulator_run and variant == wasm_traps', {
+ '*': [SKIP],
+}], # arch == arm and not simulator_run and variant == wasm_traps
+
]
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index 8bfdb72ea1..7a75441ad0 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -6,6 +6,7 @@
#define V8_TEST_CCTEST_COMPILER_CODE_ASSEMBLER_TESTER_H_
#include "src/compiler/code-assembler.h"
+#include "src/compiler/raw-machine-assembler.h"
#include "src/handles.h"
#include "src/interface-descriptors.h"
#include "src/isolate.h"
@@ -30,15 +31,23 @@ class CodeAssemblerTester {
scope_(isolate),
state_(isolate, &zone_, parameter_count, kind, "test") {}
- // This constructor is intended to be used for creating code objects with
- // specific flags.
CodeAssemblerTester(Isolate* isolate, Code::Kind kind)
: zone_(isolate->allocator(), ZONE_NAME),
scope_(isolate),
state_(isolate, &zone_, 0, kind, "test") {}
+ CodeAssemblerTester(Isolate* isolate, CallDescriptor* call_descriptor)
+ : zone_(isolate->allocator(), ZONE_NAME),
+ scope_(isolate),
+ state_(isolate, &zone_, call_descriptor, Code::STUB, "test", 0, -1) {}
+
CodeAssemblerState* state() { return &state_; }
+ // Direct low-level access to the machine assembler, for testing only.
+ RawMachineAssembler* raw_assembler_for_testing() {
+ return state_.raw_assembler_.get();
+ }
+
Handle<Code> GenerateCode() { return CodeAssembler::GenerateCode(&state_); }
Handle<Code> GenerateCodeCloseAndEscape() {
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index 63b3a3dc64..a3548fe8d0 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -285,7 +285,7 @@ TEST(CompareWrapper) {
void Int32BinopInputShapeTester::TestAllInputShapes() {
- std::vector<int32_t> inputs = ValueHelper::int32_vector();
+ Vector<const int32_t> inputs = ValueHelper::int32_vector();
int num_int_inputs = static_cast<int>(inputs.size());
if (num_int_inputs > 16) num_int_inputs = 16; // limit to 16 inputs
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index ea1eee55f1..c33e7d1ca9 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -68,10 +68,9 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
Schedule* schedule = this->Export();
CallDescriptor* call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
- CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone(),
- Code::STUB);
- code_ = Pipeline::GenerateCodeForTesting(&info, call_descriptor, graph,
- schedule);
+ CompilationInfo info(ArrayVector("testing"), main_zone(), Code::STUB);
+ code_ = Pipeline::GenerateCodeForTesting(
+ &info, main_isolate(), call_descriptor, graph, schedule);
}
return this->code_.ToHandleChecked()->entry();
}
@@ -475,7 +474,7 @@ class CompareWrapper {
default:
UNREACHABLE();
}
- return NULL;
+ return nullptr;
}
bool Int32Compare(int32_t a, int32_t b) {
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index facbd8eb55..2455d9f5b9 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -129,7 +129,7 @@ Handle<Object> FunctionTester::false_value() {
Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
int param_count) {
- JSFunction* p = NULL;
+ JSFunction* p = nullptr;
{ // because of the implicit handle scope of FunctionTester.
FunctionTester f(graph, param_count);
p = *f.function;
@@ -152,7 +152,8 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
CHECK(info.shared_info()->HasBytecodeArray());
JSFunction::EnsureLiterals(function);
- Handle<Code> code = Pipeline::GenerateCodeForTesting(&info);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, function->GetIsolate());
CHECK(!code.is_null());
info.dependencies()->Commit(code);
info.context()->native_context()->AddOptimizedCode(*code);
@@ -168,7 +169,8 @@ Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
CompilationInfo info(parse_info.zone(), function->GetIsolate(), shared,
function);
- Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, function->GetIsolate(), graph);
CHECK(!code.is_null());
function->set_code(*code);
return function;
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index 0de682e86e..1bf1d40587 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -60,8 +60,8 @@ class GraphBuilderTester : public HandleAndZoneScope,
main_isolate(),
CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0, p1,
p2, p3, p4)),
- effect_(NULL),
- return_(NULL),
+ effect_(nullptr),
+ return_(nullptr),
parameters_(main_zone()->template NewArray<Node*>(parameter_count())) {
Begin(static_cast<int>(parameter_count()));
InitParameters();
@@ -89,7 +89,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
Node* zero = graph()->NewNode(common()->Int32Constant(0));
return_ = graph()->NewNode(common()->Return(), zero, value, effect_,
graph()->start());
- effect_ = NULL;
+ effect_ = nullptr;
}
// Close the graph.
@@ -193,7 +193,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
}
Node* NewNode(const Operator* op) {
- return MakeNode(op, 0, static_cast<Node**>(NULL));
+ return MakeNode(op, 0, static_cast<Node**>(nullptr));
}
Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
@@ -248,7 +248,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
CHECK_LT(op->ControlInputCount(), 2);
CHECK_LT(op->EffectInputCount(), 2);
- Node* result = NULL;
+ Node* result = nullptr;
if (!has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs);
} else {
@@ -280,9 +280,9 @@ class GraphBuilderTester : public HandleAndZoneScope,
Zone* zone = graph()->zone();
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
- CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone(),
- Code::STUB);
- code_ = Pipeline::GenerateCodeForTesting(&info, desc, graph());
+ CompilationInfo info(ArrayVector("testing"), main_zone(), Code::STUB);
+ code_ = Pipeline::GenerateCodeForTesting(&info, main_isolate(), desc,
+ graph());
#ifdef ENABLE_DISASSEMBLER
if (!code_.is_null() && FLAG_print_opt_code) {
OFStream os(stdout);
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 3de36ac986..a131d861f7 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -4,6 +4,7 @@
#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/code-stub-assembler.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator.h"
@@ -13,12 +14,15 @@
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
namespace internal {
namespace compiler {
+#define __ assembler.
+
namespace {
int GetSlotSizeInBytes(MachineRepresentation rep) {
@@ -36,43 +40,313 @@ int GetSlotSizeInBytes(MachineRepresentation rep) {
UNREACHABLE();
}
+// Forward declaration.
+Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
+ std::vector<AllocatedOperand> parameters);
+
+// Build the `setup` function. It takes a code object and a FixedArray as
+// parameters and calls the former while passing it each element of the array as
+// arguments:
+// ~~~
+// FixedArray setup(CodeObject* test, FixedArray state_in) {
+// // `test` will tail-call to its first parameter which will be `teardown`.
+// return test(teardown, state_in[0], state_in[1], state_in[2], ...);
+// }
+// ~~~
+//
+// This function needs to convert each element of the FixedArray to raw unboxed
+// values to pass to the `test` function. The array will have been created using
+// `GenerateInitialState()` and needs to be converted in the following way:
+//
+// | Parameter type | FixedArray element | Conversion |
+// |----------------+--------------------+------------------------------------|
+// | kTagged | Smi | None. |
+// | kFloat32 | HeapNumber | Load value and convert to Float32. |
+// | kFloat64 | HeapNumber | Load value. |
+//
+Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
+ std::vector<AllocatedOperand> parameters) {
+ CodeAssemblerTester tester(isolate, 2);
+ CodeStubAssembler assembler(tester.state());
+ std::vector<Node*> params;
+ // The first parameter is always the callee.
+ params.push_back(__ Parameter(0));
+ params.push_back(
+ __ HeapConstant(BuildTeardownFunction(isolate, descriptor, parameters)));
+ Node* state_in = __ Parameter(1);
+ for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
+ Node* element = __ LoadFixedArrayElement(state_in, __ IntPtrConstant(i));
+ // Unbox all elements before passing them as arguments.
+ switch (parameters[i].representation()) {
+ // Tagged parameters are Smis, they do not need unboxing.
+ case MachineRepresentation::kTagged:
+ break;
+ case MachineRepresentation::kFloat32:
+ element = __ TruncateFloat64ToFloat32(__ LoadHeapNumberValue(element));
+ break;
+ case MachineRepresentation::kFloat64:
+ element = __ LoadHeapNumberValue(element);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ params.push_back(element);
+ }
+ __ Return(tester.raw_assembler_for_testing()->AddNode(
+ tester.raw_assembler_for_testing()->common()->Call(descriptor),
+ static_cast<int>(params.size()), params.data()));
+ return tester.GenerateCodeCloseAndEscape();
+}
+
+// Build the `teardown` function. It allocates and fills a FixedArray with all
+// its parameters. The parameters need to be consistent with `parameters`.
+// ~~~
+// FixedArray teardown(CodeObject* /* unused */,
+// // Tagged registers.
+// Object* r0, Object* r1, ...,
+// // FP registers.
+// Float32 s0, Float64 d1, ...,
+// // Mixed stack slots.
+// Float64 mem0, Object* mem1, Float32 mem2, ...) {
+// return new FixedArray(r0, r1, ..., s0, d1, ..., mem0, mem1, mem2, ...);
+// }
+// ~~~
+//
+// This function needs to convert its parameters into values fit for a
+// FixedArray, essentially reverting what the `setup` function did:
+//
+// | Parameter type | Parameter value | Conversion |
+// |----------------+-------------------+----------------------------|
+// | kTagged | Smi or HeapNumber | None. |
+// | kFloat32 | Raw Float32 | Convert to Float64 and |
+// | | | allocate a new HeapNumber. |
+// | kFloat64 | Raw Float64 | Allocate a new HeapNumber. |
+//
+// Note that it is possible for a `kTagged` value to go from a Smi to a
+// HeapNumber. This is because `AssembleMove` will allocate a new HeapNumber if
+// it is asked to move a FP constant to a tagged register or slot.
+//
+Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
+ std::vector<AllocatedOperand> parameters) {
+ CodeAssemblerTester tester(isolate, descriptor);
+ CodeStubAssembler assembler(tester.state());
+ Node* result_array = __ AllocateFixedArray(
+ PACKED_ELEMENTS, __ IntPtrConstant(parameters.size()));
+ for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
+ // The first argument is not used.
+ Node* param = __ Parameter(i + 1);
+ switch (parameters[i].representation()) {
+ case MachineRepresentation::kTagged:
+ break;
+ // Box FP values into HeapNumbers.
+ case MachineRepresentation::kFloat32:
+ param =
+ tester.raw_assembler_for_testing()->ChangeFloat32ToFloat64(param);
+ // Fallthrough
+ case MachineRepresentation::kFloat64:
+ param = __ AllocateHeapNumberWithValue(param);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ StoreFixedArrayElement(result_array, i, param);
+ }
+ __ Return(result_array);
+ return tester.GenerateCodeCloseAndEscape();
+}
+
+// Print the content of `value`, representing the register or stack slot
+// described by `operand`.
+void PrintStateValue(std::ostream& os, Handle<Object> value,
+ AllocatedOperand operand) {
+ switch (operand.representation()) {
+ case MachineRepresentation::kTagged:
+ if (value->IsSmi()) {
+ os << Smi::cast(*value)->value();
+ } else {
+ os << value->Number();
+ }
+ break;
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ os << value->Number();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ os << " (" << operand.representation() << " ";
+ if (operand.location_kind() == AllocatedOperand::REGISTER) {
+ os << "register";
+ } else {
+ DCHECK_EQ(operand.location_kind(), AllocatedOperand::STACK_SLOT);
+ os << "stack slot";
+ }
+ os << ")";
+}
+
} // namespace
-// Wrapper around the CodeGenerator with the ability to randomly generate moves
-// and swaps which can then be executed. The `slots` map represents how many
-// slots should be allocated per representation. Parallel moves will then be
-// generated by randomly picking slots. Constants can be provided so that
-// parallel moves may use them.
+#undef __
+
+// Representation of a test environment. It describes a set of registers, stack
+// slots and constants available to the CodeGeneratorTester to perform moves
+// with. It has the ability to randomly generate lists of moves and run the code
+// generated by the CodeGeneratorTester.
//
// At the moment, only the following representations are tested:
// - kTagged
// - kFloat32
// - kFloat64
-// - kSimd128
+// - TODO(planglois): Add support for kSimd128.
// There is no need to test using Word32 or Word64 as they are the same as
// Tagged as far as the code generator is concerned.
-class CodeGeneratorTester : public HandleAndZoneScope {
+//
+// Testing the generated code is achieved by wrapping it around `setup` and
+// `teardown` functions, written using the CodeStubAssembler. The key idea here
+// is that `teardown` and the generated code share the same custom
+// CallDescriptor. This descriptor assigns parameters to either registers or
+// stack slot of a given representation and therefore essentially describes the
+// environment.
+//
+// What happens is the following:
+//
+// - The `setup` function receives a FixedArray as the initial state. It
+// unpacks it and passes each element as arguments to the generated code
+// `test`. We also pass the `teardown` function as a first argument. Thanks
+// to the custom CallDescriptor, registers and stack slots get initialised
+// according to the content of the FixedArray.
+//
+// - The `test` function performs the list of moves on its parameters and
+// eventually tail-calls to its first parameter, which is the `teardown`
+// function.
+//
+// - The `teardown` function allocates a new FixedArray and fills it with all
+// its parameters. Thanks to the tail-call, this is as if the `setup`
+// function called `teardown` directly, except now moves were performed!
+//
+// .----------------setup--------------------------.
+// | Take a FixedArray as parameters with |
+// | all the initial values of registers |
+// | and stack slots. | <- CodeStubAssembler
+// | |
+// | Call test(teardown, state[0], state[1], ...); |
+// '-----------------------------------------------'
+// |
+// V
+// .----------------test-----------------------------.
+// | - Move(param3, param42); |
+// | - Swap(param64, param1); |
+// | - Move(param2, param6); | <- CodeGeneratorTester
+// | ... |
+// | |
+// | // "teardown" is the first parameter as well as |
+// | // the callee. |
+// | TailCall param0(param0, param1, param2, ...); |
+// '-------------------------------------------------'
+// |
+// V
+// .----------------teardown--------------.
+// | Create a FixedArray with all |
+// | parameters and return it. | <- CodeStubAssembler
+// '--------------------------------------'
+
+class TestEnvironment : public HandleAndZoneScope {
public:
- CodeGeneratorTester(std::map<MachineRepresentation, int> slots =
- std::map<MachineRepresentation, int>{},
- std::initializer_list<Constant> constants = {})
- : info_(ArrayVector("test"), main_isolate(), main_zone(), Code::STUB),
- descriptor_(Linkage::GetStubCallDescriptor(
- main_isolate(), main_zone(), VoidDescriptor(main_isolate()), 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), 0)),
- linkage_(descriptor_),
- blocks_(main_zone()),
- sequence_(main_isolate(), main_zone(), &blocks_),
+ // These constants may be tuned to experiment with different environments.
+
+ static constexpr int kGeneralRegisterCount = 4;
+ static constexpr int kDoubleRegisterCount = 6;
+
+ static constexpr int kTaggedSlotCount = 64;
+ static constexpr int kFloat32SlotCount = 64;
+ static constexpr int kFloat64SlotCount = 64;
+ static constexpr int kStackParameterCount =
+ kTaggedSlotCount + kFloat32SlotCount + kFloat64SlotCount;
+
+ // TODO(all): Test all types of constants (e.g. ExternalReference and
+ // HeapObject).
+ static constexpr int kSmiConstantCount = 4;
+ static constexpr int kFloatConstantCount = 4;
+ static constexpr int kDoubleConstantCount = 4;
+
+ TestEnvironment()
+ : blocks_(main_zone()),
+ code_(main_isolate(), main_zone(), &blocks_),
rng_(CcTest::random_number_generator()),
- frame_(descriptor_->CalculateFixedFrameSize()),
- generator_(main_zone(), &frame_, &linkage_, &sequence_, &info_,
- base::Optional<OsrHelper>(), kNoSourcePosition, nullptr) {
- // Keep track of all supported representations depending on what kind of
- // stack slots are supported.
- for (const auto& slot : slots) {
- supported_reps_.push_back(slot.first);
+ // TODO(planglois): Support kSimd128.
+ supported_reps_({MachineRepresentation::kTagged,
+ MachineRepresentation::kFloat32,
+ MachineRepresentation::kFloat64}) {
+ // The "teardown" and "test" functions share the same descriptor with the
+ // following signature:
+ // ~~~
+ // FixedArray f(CodeObject* teardown,
+ // // Tagged registers.
+ // Object*, Object*, ...,
+ // // FP registers.
+ // Float32, Float64, ...,
+ // // Mixed stack slots.
+ // Float64, Object*, Float32, ...);
+ // ~~~
+ LocationSignature::Builder test_signature(main_zone(), 1,
+ 1 + kGeneralRegisterCount +
+ kDoubleRegisterCount +
+ kStackParameterCount);
+
+ // The first parameter will be the code object of the "teardown"
+ // function. This way, the "test" function can tail-call to it.
+ test_signature.AddParam(LinkageLocation::ForRegister(
+ kReturnRegister0.code(), MachineType::AnyTagged()));
+
+ // Initialise registers.
+
+ int32_t general_mask =
+ RegisterConfiguration::Default()->allocatable_general_codes_mask();
+ // kReturnRegister0 is used to hold the "teardown" code object, do not
+ // generate moves using it.
+ std::unique_ptr<const RegisterConfiguration> registers(
+ RegisterConfiguration::RestrictGeneralRegisters(
+ general_mask & ~(1 << kReturnRegister0.code())));
+
+ for (int i = 0; i < kGeneralRegisterCount; i++) {
+ int code = registers->GetAllocatableGeneralCode(i);
+ AddRegister(&test_signature, MachineRepresentation::kTagged, code);
+ }
+ // We assume that Double and Float registers alias, depending on
+ // kSimpleFPAliasing. For this reason, we allocate a Float and a Double in
+ // pairs.
+ static_assert((kDoubleRegisterCount % 2) == 0,
+ "kDoubleRegisterCount should be a multiple of two.");
+ for (int i = 0; i < kDoubleRegisterCount; i += 2) {
+ // Make sure we do not allocate FP registers which alias. We double the
+ // index for Float registers if the aliasing is not "Simple":
+ // Simple -> s0, d1, s2, d3, s4, d5, ...
+ // Arm32-style -> s0, d1, s4, d3, s8, d5, ...
+ // This isn't space-efficient at all but suits our need.
+ static_assert(kDoubleRegisterCount < 16,
+ "Arm has a d16 register but no overlapping s32 register.");
+ int float_code =
+ registers->GetAllocatableFloatCode(kSimpleFPAliasing ? i : i * 2);
+ int double_code = registers->GetAllocatableDoubleCode(i + 1);
+ AddRegister(&test_signature, MachineRepresentation::kFloat32, float_code);
+ AddRegister(&test_signature, MachineRepresentation::kFloat64,
+ double_code);
}
+
+ // Initialise stack slots.
+
+ // Stack parameters start at -1.
+ int slot_parameter_n = -1;
+
+ // TODO(planglois): Support kSimd128 stack slots.
+ std::map<MachineRepresentation, int> slots = {
+ {MachineRepresentation::kTagged, kTaggedSlotCount},
+ {MachineRepresentation::kFloat32, kFloat32SlotCount},
+ {MachineRepresentation::kFloat64, kFloat64SlotCount}};
+
// Allocate new slots until we run out of them.
while (std::any_of(slots.cbegin(), slots.cend(),
[](const std::pair<MachineRepresentation, int>& entry) {
@@ -89,75 +363,264 @@ class CodeGeneratorTester : public HandleAndZoneScope {
if (entry->second > 0) {
// Keep a map of (MachineRepresentation . std::vector<int>) with
// allocated slots to pick from for each representation.
- RegisterSlot(rep, frame_.AllocateSpillSlot(GetSlotSizeInBytes(rep)));
+ int slot = slot_parameter_n;
+ slot_parameter_n -= (GetSlotSizeInBytes(rep) / kPointerSize);
+ AddStackSlot(&test_signature, rep, slot);
entry->second--;
}
}
- for (auto constant : constants) {
- int virtual_register = AllocateConstant(constant);
- // Associate constants with their compatible representations.
- // TODO(all): Test all types of constants.
- switch (constant.type()) {
- // Integer constants are always moved to a tagged location, whatever
- // their sizes.
- case Constant::kInt32:
- case Constant::kInt64:
- RegisterConstant(MachineRepresentation::kTagged, virtual_register);
- break;
- // FP constants may be moved to a tagged location using a heap number,
- // or directly to a location of the same size.
- case Constant::kFloat32:
- RegisterConstant(MachineRepresentation::kTagged, virtual_register);
- RegisterConstant(MachineRepresentation::kFloat32, virtual_register);
- break;
- case Constant::kFloat64:
- RegisterConstant(MachineRepresentation::kTagged, virtual_register);
- RegisterConstant(MachineRepresentation::kFloat64, virtual_register);
- break;
- default:
- break;
- }
+
+ // Initialise random constants.
+
+ // While constants do not know about Smis, we need to be able to
+ // differentiate between a pointer to a HeapNumber and a integer. For this
+ // reason, we make sure all integers are Smis, including constants.
+ for (int i = 0; i < kSmiConstantCount; i++) {
+ intptr_t smi_value = reinterpret_cast<intptr_t>(
+ Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
+ Constant constant = kPointerSize == 8
+ ? Constant(static_cast<int64_t>(smi_value))
+ : Constant(static_cast<int32_t>(smi_value));
+ AddConstant(MachineRepresentation::kTagged, AllocateConstant(constant));
}
- // Force a frame to be created.
- generator_.frame_access_state()->MarkHasFrame(true);
- generator_.AssembleConstructFrame();
- // TODO(all): Generate a stack check here so that we fail gracefully if the
- // frame is too big.
+ // Float and Double constants can be moved to both Tagged and FP registers
+ // or slots. Register them as compatible with both FP and Tagged
+ // destinations.
+ for (int i = 0; i < kFloatConstantCount; i++) {
+ int virtual_register =
+ AllocateConstant(Constant(DoubleToFloat32(rng_->NextDouble())));
+ AddConstant(MachineRepresentation::kTagged, virtual_register);
+ AddConstant(MachineRepresentation::kFloat32, virtual_register);
+ }
+ for (int i = 0; i < kDoubleConstantCount; i++) {
+ int virtual_register = AllocateConstant(Constant(rng_->NextDouble()));
+ AddConstant(MachineRepresentation::kTagged, virtual_register);
+ AddConstant(MachineRepresentation::kFloat64, virtual_register);
+ }
+
+ // The "teardown" function returns a FixedArray with the resulting state.
+ test_signature.AddReturn(LinkageLocation::ForRegister(
+ kReturnRegister0.code(), MachineType::AnyTagged()));
+
+ test_descriptor_ = new (main_zone())
+ CallDescriptor(CallDescriptor::kCallCodeObject, // kind
+ MachineType::AnyTagged(), // target MachineType
+ LinkageLocation::ForAnyRegister(
+ MachineType::AnyTagged()), // target location
+ test_signature.Build(), // location_sig
+ kStackParameterCount, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kNoFlags); // flags
}
int AllocateConstant(Constant constant) {
- int virtual_register = sequence_.NextVirtualRegister();
- sequence_.AddConstant(virtual_register, constant);
+ int virtual_register = code_.NextVirtualRegister();
+ code_.AddConstant(virtual_register, constant);
return virtual_register;
}
// Register a constant referenced by `virtual_register` as compatible with
// `rep`.
- void RegisterConstant(MachineRepresentation rep, int virtual_register) {
- auto entry = constants_.find(rep);
- if (entry == constants_.end()) {
- std::vector<int> vregs = {virtual_register};
- constants_.emplace(rep, vregs);
+ void AddConstant(MachineRepresentation rep, int virtual_register) {
+ auto entry = allocated_constants_.find(rep);
+ if (entry == allocated_constants_.end()) {
+ allocated_constants_.emplace(
+ rep, std::vector<ConstantOperand>{ConstantOperand(virtual_register)});
+ } else {
+ entry->second.emplace_back(virtual_register);
+ }
+ }
+
+ // Register a new register or stack slot as compatible with `rep`. As opposed
+ // to constants, registers and stack slots are written to on `setup` and read
+ // from on `teardown`. Therefore they are part of the environment's layout,
+ // and are parameters of the `test` function.
+
+ void AddRegister(LocationSignature::Builder* test_signature,
+ MachineRepresentation rep, int code) {
+ AllocatedOperand operand(AllocatedOperand::REGISTER, rep, code);
+ layout_.push_back(operand);
+ test_signature->AddParam(LinkageLocation::ForRegister(
+ code, MachineType::TypeForRepresentation(rep)));
+ auto entry = allocated_registers_.find(rep);
+ if (entry == allocated_registers_.end()) {
+ allocated_registers_.emplace(rep, std::vector<AllocatedOperand>{operand});
} else {
- entry->second.push_back(virtual_register);
+ entry->second.push_back(operand);
}
}
- void RegisterSlot(MachineRepresentation rep, int slot) {
+ void AddStackSlot(LocationSignature::Builder* test_signature,
+ MachineRepresentation rep, int slot) {
+ AllocatedOperand operand(AllocatedOperand::STACK_SLOT, rep, slot);
+ layout_.push_back(operand);
+ test_signature->AddParam(LinkageLocation::ForCallerFrameSlot(
+ slot, MachineType::TypeForRepresentation(rep)));
auto entry = allocated_slots_.find(rep);
if (entry == allocated_slots_.end()) {
- std::vector<int> slots = {slot};
- allocated_slots_.emplace(rep, slots);
+ allocated_slots_.emplace(rep, std::vector<AllocatedOperand>{operand});
} else {
- entry->second.push_back(slot);
+ entry->second.push_back(operand);
}
}
- enum PushTypeFlag {
- kRegisterPush = CodeGenerator::kRegisterPush,
- kStackSlotPush = CodeGenerator::kStackSlotPush,
- kScalarPush = CodeGenerator::kScalarPush
- };
+ // Generate a random initial state to test moves against. A "state" is a
+ // packed FixedArray with Smis and HeapNumbers, according to the layout of the
+ // environment.
+ Handle<FixedArray> GenerateInitialState() {
+ Handle<FixedArray> state = main_isolate()->factory()->NewFixedArray(
+ static_cast<int>(layout_.size()));
+ for (int i = 0; i < state->length(); i++) {
+ switch (layout_[i].representation()) {
+ case MachineRepresentation::kTagged:
+ state->set(i, Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
+ break;
+ case MachineRepresentation::kFloat32:
+ // HeapNumbers are Float64 values. However, we will convert it to a
+ // Float32 and back inside `setup` and `teardown`. Make sure the value
+ // we pick fits in a Float32.
+ state->set(
+ i, *main_isolate()->factory()->NewHeapNumber(
+ static_cast<double>(DoubleToFloat32(rng_->NextDouble()))));
+ break;
+ case MachineRepresentation::kFloat64:
+ state->set(
+ i, *main_isolate()->factory()->NewHeapNumber(rng_->NextDouble()));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ return state;
+ }
+
+ // Run the code generated by a CodeGeneratorTester against `state_in` and
+ // return a new resulting state.
+ Handle<FixedArray> Run(Handle<Code> test, Handle<FixedArray> state_in) {
+ Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
+ static_cast<int>(layout_.size()));
+ {
+ Handle<Code> setup =
+ BuildSetupFunction(main_isolate(), test_descriptor_, layout_);
+ // FunctionTester maintains its own HandleScope which means that its
+ // return value will be freed along with it. Copy the result into
+ // state_out.
+ FunctionTester ft(setup, 2);
+ Handle<FixedArray> result = ft.CallChecked<FixedArray>(test, state_in);
+ CHECK_EQ(result->length(), state_in->length());
+ result->CopyTo(0, *state_out, 0, result->length());
+ }
+ return state_out;
+ }
+
+ // For a given operand representing either a register or a stack slot, return
+ // what position it should live in inside a FixedArray state.
+ int OperandToStatePosition(const AllocatedOperand& operand) const {
+ // Search `layout_` for `operand`.
+ auto it = std::find_if(layout_.cbegin(), layout_.cend(),
+ [operand](const AllocatedOperand& this_operand) {
+ return this_operand.Equals(operand);
+ });
+ DCHECK_NE(it, layout_.cend());
+ return static_cast<int>(std::distance(layout_.cbegin(), it));
+ }
+
+ // Perform the given list of moves on `state_in` and return a newly allocated
+ // state with the results.
+ Handle<FixedArray> SimulateMoves(ParallelMove* moves,
+ Handle<FixedArray> state_in) {
+ Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
+ static_cast<int>(layout_.size()));
+ // We do not want to modify `state_in` in place so perform the moves on a
+ // copy.
+ state_in->CopyTo(0, *state_out, 0, state_in->length());
+ for (auto move : *moves) {
+ int to_index =
+ OperandToStatePosition(AllocatedOperand::cast(move->destination()));
+ InstructionOperand from = move->source();
+ if (from.IsConstant()) {
+ Constant constant =
+ code_.GetConstant(ConstantOperand::cast(from).virtual_register());
+ Handle<Object> constant_value;
+ switch (constant.type()) {
+ case Constant::kInt32:
+ constant_value =
+ Handle<Smi>(reinterpret_cast<Smi*>(
+ static_cast<intptr_t>(constant.ToInt32())),
+ main_isolate());
+ break;
+ case Constant::kInt64:
+ constant_value =
+ Handle<Smi>(reinterpret_cast<Smi*>(
+ static_cast<intptr_t>(constant.ToInt64())),
+ main_isolate());
+ break;
+ case Constant::kFloat32:
+ constant_value = main_isolate()->factory()->NewHeapNumber(
+ static_cast<double>(constant.ToFloat32()));
+ break;
+ case Constant::kFloat64:
+ constant_value = main_isolate()->factory()->NewHeapNumber(
+ constant.ToFloat64().value());
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ state_out->set(to_index, *constant_value);
+ } else {
+ int from_index = OperandToStatePosition(AllocatedOperand::cast(from));
+ state_out->set(to_index, *state_out->GetValueChecked<Object>(
+ main_isolate(), from_index));
+ }
+ }
+ return state_out;
+ }
+
+ // Perform the given list of swaps on `state_in` and return a newly allocated
+ // state with the results.
+ Handle<FixedArray> SimulateSwaps(ParallelMove* swaps,
+ Handle<FixedArray> state_in) {
+ Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
+ static_cast<int>(layout_.size()));
+ // We do not want to modify `state_in` in place so perform the swaps on a
+ // copy.
+ state_in->CopyTo(0, *state_out, 0, state_in->length());
+ for (auto swap : *swaps) {
+ int lhs_index =
+ OperandToStatePosition(AllocatedOperand::cast(swap->destination()));
+ int rhs_index =
+ OperandToStatePosition(AllocatedOperand::cast(swap->source()));
+ Handle<Object> lhs =
+ state_out->GetValueChecked<Object>(main_isolate(), lhs_index);
+ Handle<Object> rhs =
+ state_out->GetValueChecked<Object>(main_isolate(), rhs_index);
+ state_out->set(lhs_index, *rhs);
+ state_out->set(rhs_index, *lhs);
+ }
+ return state_out;
+ }
+
+ // Compare the given state with a reference.
+ void CheckState(Handle<FixedArray> actual, Handle<FixedArray> expected) {
+ for (int i = 0; i < static_cast<int>(layout_.size()); i++) {
+ Handle<Object> actual_value =
+ actual->GetValueChecked<Object>(main_isolate(), i);
+ Handle<Object> expected_value =
+ expected->GetValueChecked<Object>(main_isolate(), i);
+ if (!actual_value->StrictEquals(*expected_value)) {
+ std::ostringstream expected_str;
+ PrintStateValue(expected_str, expected_value, layout_[i]);
+ std::ostringstream actual_str;
+ PrintStateValue(actual_str, actual_value, layout_[i]);
+ V8_Fatal(__FILE__, __LINE__, "Expected: '%s' but got '%s'",
+ expected_str.str().c_str(), actual_str.str().c_str());
+ }
+ }
+ }
enum OperandConstraint {
kNone,
@@ -219,8 +682,9 @@ class CodeGeneratorTester : public HandleAndZoneScope {
MachineRepresentation rep) {
// Only generate a Constant if the operand is a source and we have a
// constant with a compatible representation in stock.
- bool generate_constant = (constraint != kCannotBeConstant) &&
- (constants_.find(rep) != constants_.end());
+ bool generate_constant =
+ (constraint != kCannotBeConstant) &&
+ (allocated_constants_.find(rep) != allocated_constants_.end());
switch (rng_->NextInt(generate_constant ? 3 : 2)) {
case 0:
return CreateRandomStackSlotOperand(rep);
@@ -232,51 +696,88 @@ class CodeGeneratorTester : public HandleAndZoneScope {
UNREACHABLE();
}
- InstructionOperand CreateRandomRegisterOperand(MachineRepresentation rep) {
- int code;
- const RegisterConfiguration* conf = RegisterConfiguration::Default();
- switch (rep) {
- case MachineRepresentation::kFloat32: {
- int index = rng_->NextInt(conf->num_allocatable_float_registers());
- code = conf->RegisterConfiguration::GetAllocatableFloatCode(index);
- break;
- }
- case MachineRepresentation::kFloat64: {
- int index = rng_->NextInt(conf->num_allocatable_double_registers());
- code = conf->RegisterConfiguration::GetAllocatableDoubleCode(index);
- break;
- }
- case MachineRepresentation::kSimd128: {
- int index = rng_->NextInt(conf->num_allocatable_simd128_registers());
- code = conf->RegisterConfiguration::GetAllocatableSimd128Code(index);
- break;
- }
- case MachineRepresentation::kTagged: {
- // Pick an allocatable register that is not the return register.
- do {
- int index = rng_->NextInt(conf->num_allocatable_general_registers());
- code = conf->RegisterConfiguration::GetAllocatableGeneralCode(index);
- } while (code == kReturnRegister0.code());
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return AllocatedOperand(LocationOperand::REGISTER, rep, code);
+ AllocatedOperand CreateRandomRegisterOperand(MachineRepresentation rep) {
+ int index =
+ rng_->NextInt(static_cast<int>(allocated_registers_[rep].size()));
+ return allocated_registers_[rep][index];
}
- InstructionOperand CreateRandomStackSlotOperand(MachineRepresentation rep) {
+ AllocatedOperand CreateRandomStackSlotOperand(MachineRepresentation rep) {
int index = rng_->NextInt(static_cast<int>(allocated_slots_[rep].size()));
- return AllocatedOperand(LocationOperand::STACK_SLOT, rep,
- allocated_slots_[rep][index]);
+ return allocated_slots_[rep][index];
}
- InstructionOperand CreateRandomConstant(MachineRepresentation rep) {
- int index = rng_->NextInt(static_cast<int>(constants_[rep].size()));
- return ConstantOperand(constants_[rep][index]);
+ ConstantOperand CreateRandomConstant(MachineRepresentation rep) {
+ int index =
+ rng_->NextInt(static_cast<int>(allocated_constants_[rep].size()));
+ return allocated_constants_[rep][index];
+ }
+
+ v8::base::RandomNumberGenerator* rng() const { return rng_; }
+ InstructionSequence* code() { return &code_; }
+ CallDescriptor* test_descriptor() { return test_descriptor_; }
+
+ private:
+ ZoneVector<InstructionBlock*> blocks_;
+ InstructionSequence code_;
+ v8::base::RandomNumberGenerator* rng_;
+ // The layout describes the type of each element in the environment, in order.
+ std::vector<AllocatedOperand> layout_;
+ CallDescriptor* test_descriptor_;
+ // Allocated constants, registers and stack slots that we can generate moves
+ // with. Each per compatible representation.
+ std::vector<MachineRepresentation> supported_reps_;
+ std::map<MachineRepresentation, std::vector<ConstantOperand>>
+ allocated_constants_;
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>
+ allocated_registers_;
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>
+ allocated_slots_;
+};
+
+// static
+constexpr int TestEnvironment::kGeneralRegisterCount;
+constexpr int TestEnvironment::kDoubleRegisterCount;
+constexpr int TestEnvironment::kTaggedSlotCount;
+constexpr int TestEnvironment::kFloat32SlotCount;
+constexpr int TestEnvironment::kFloat64SlotCount;
+constexpr int TestEnvironment::kStackParameterCount;
+constexpr int TestEnvironment::kSmiConstantCount;
+constexpr int TestEnvironment::kFloatConstantCount;
+constexpr int TestEnvironment::kDoubleConstantCount;
+
+// Wrapper around the CodeGenerator. Code generated by this can only be called
+// using the given `TestEnvironment`.
+//
+// TODO(planglois): We execute moves on stack parameters only which restricts
+// ourselves to small positive offsets relative to the frame pointer. We should
+// test large and negative offsets too. A way to do this would be to move some
+// stack parameters to local spill slots and create artificial stack space
+// between them.
+class CodeGeneratorTester {
+ public:
+ explicit CodeGeneratorTester(TestEnvironment* environment)
+ : zone_(environment->main_zone()),
+ info_(ArrayVector("test"), environment->main_zone(), Code::STUB),
+ linkage_(environment->test_descriptor()),
+ frame_(environment->test_descriptor()->CalculateFixedFrameSize()),
+ generator_(environment->main_zone(), &frame_, &linkage_,
+ environment->code(), &info_, environment->main_isolate(),
+ base::Optional<OsrHelper>(), kNoSourcePosition, nullptr,
+ nullptr) {
+ // Force a frame to be created.
+ generator_.frame_access_state()->MarkHasFrame(true);
+ generator_.AssembleConstructFrame();
+ // TODO(all): Generate a stack check here so that we fail gracefully if the
+ // frame is too big.
}
+ enum PushTypeFlag {
+ kRegisterPush = CodeGenerator::kRegisterPush,
+ kStackSlotPush = CodeGenerator::kStackSlotPush,
+ kScalarPush = CodeGenerator::kScalarPush
+ };
+
void CheckAssembleTailCallGaps(Instruction* instr,
int first_unused_stack_slot,
CodeGeneratorTester::PushTypeFlag push_type) {
@@ -318,8 +819,27 @@ class CodeGeneratorTester : public HandleAndZoneScope {
}
Handle<Code> Finalize() {
- InstructionOperand zero = ImmediateOperand(ImmediateOperand::INLINE, 0);
- generator_.AssembleReturn(&zero);
+ // The environment expects this code to tail-call to it's first parameter
+ // placed in `kReturnRegister0`.
+ generator_.AssembleArchInstruction(
+ Instruction::New(zone_, kArchPrepareTailCall));
+
+ InstructionOperand callee[] = {
+ AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ kReturnRegister0.code()),
+ ImmediateOperand(
+ ImmediateOperand::INLINE,
+ V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0)};
+ Instruction* tail_call = Instruction::New(zone_, kArchTailCallCodeObject, 0,
+ nullptr, 2, callee, 0, nullptr);
+ int first_unused_stack_slot;
+ if (generator_.GetSlotAboveSPBeforeTailCall(tail_call,
+ &first_unused_stack_slot)) {
+ generator_.AssembleTailCallBeforeGap(tail_call, first_unused_stack_slot);
+ generator_.AssembleTailCallAfterGap(tail_call, first_unused_stack_slot);
+ }
+ generator_.AssembleArchInstruction(tail_call);
generator_.FinishCode();
generator_.safepoints()->Emit(generator_.tasm(),
@@ -327,143 +847,114 @@ class CodeGeneratorTester : public HandleAndZoneScope {
return generator_.FinalizeCode();
}
- void Disassemble() {
- HandleScope scope(main_isolate());
- Handle<Code> code = Finalize();
- if (FLAG_print_code) {
- code->Print();
- }
- }
-
- void Run() {
- HandleScope scope(main_isolate());
- Handle<Code> code = Finalize();
- if (FLAG_print_code) {
- code->Print();
- }
- FunctionTester ft(code);
- ft.Call();
- }
-
- v8::base::RandomNumberGenerator* rng() const { return rng_; }
-
private:
+ Zone* zone_;
CompilationInfo info_;
- CallDescriptor* descriptor_;
Linkage linkage_;
- ZoneVector<InstructionBlock*> blocks_;
- InstructionSequence sequence_;
- std::vector<MachineRepresentation> supported_reps_;
- std::map<MachineRepresentation, std::vector<int>> allocated_slots_;
- std::map<MachineRepresentation, std::vector<int>> constants_;
- v8::base::RandomNumberGenerator* rng_;
Frame frame_;
CodeGenerator generator_;
};
// The following fuzz tests will assemble a lot of moves, wrap them in
-// executable native code and run them. At this time, we only check that
-// something is actually generated, and that it runs on hardware or the
-// simulator.
-
-// TODO(all): It would be great to record the data on the stack after all moves
-// are executed so that we could test the functionality in an architecture
-// independent way. We would also have to make sure we generate moves compatible
-// with each other as the gap-resolver tests do.
+// executable native code and run them. In order to check that moves were
+// performed correctly, we need to setup an environment with an initial state
+// and get it back after the list of moves were performed.
+//
+// We have two components to do this: TestEnvironment and CodeGeneratorTester.
+//
+// The TestEnvironment is in charge of bringing up an environment consisting of
+// a set of registers, stack slots and constants, with initial values in
+// them. The CodeGeneratorTester is a wrapper around the CodeGenerator and its
+// only purpose is to generate code for a list of moves. The TestEnvironment is
+// then able to run this code against the environment and return a resulting
+// state.
+//
+// A "state" here is a packed FixedArray with tagged values which can either be
+// Smis or HeapNumbers. When calling TestEnvironment::Run(...), registers and
+// stack slots will be initialised according to this FixedArray. A new
+// FixedArray is returned containing values that were moved by the generated
+// code.
+//
+// And finally, we are able to compare the resulting FixedArray against a
+// reference, computed with a simulation of AssembleMove and AssembleSwap. See
+// SimulateMoves and SimulateSwaps.
TEST(FuzzAssembleMove) {
- // Test small and potentially large ranges separately. Note that the number of
- // slots affects how much stack is allocated when running the generated code.
- // This means we have to be careful not to exceed the stack limit, which is
- // lower on Windows.
- for (auto n : {64, 500}) {
- std::map<MachineRepresentation, int> slots = {
- {MachineRepresentation::kTagged, n},
- {MachineRepresentation::kFloat32, n},
- {MachineRepresentation::kFloat64, n}};
- if (CpuFeatures::SupportsWasmSimd128()) {
- // Generate fewer 128-bit slots.
- slots.emplace(MachineRepresentation::kSimd128, n / 4);
- }
- CodeGeneratorTester c(
- slots,
- {Constant(0), Constant(1), Constant(2), Constant(3), Constant(4),
- Constant(5), Constant(6), Constant(7),
- Constant(static_cast<float>(0.1)), Constant(static_cast<float>(0.2)),
- Constant(static_cast<float>(0.3)), Constant(static_cast<float>(0.4)),
- Constant(static_cast<double>(0.5)), Constant(static_cast<double>(0.6)),
- Constant(static_cast<double>(0.7)),
- Constant(static_cast<double>(0.8))});
- ParallelMove* moves = c.GenerateRandomMoves(1000);
- for (const auto m : *moves) {
- c.CheckAssembleMove(&m->source(), &m->destination());
- }
- c.Run();
+ TestEnvironment env;
+ CodeGeneratorTester c(&env);
+
+ Handle<FixedArray> state_in = env.GenerateInitialState();
+ ParallelMove* moves = env.GenerateRandomMoves(1000);
+
+ for (auto m : *moves) {
+ c.CheckAssembleMove(&m->source(), &m->destination());
+ }
+
+ Handle<Code> test = c.Finalize();
+ if (FLAG_print_code) {
+ test->Print();
}
+
+ Handle<FixedArray> actual = env.Run(test, state_in);
+ Handle<FixedArray> expected = env.SimulateMoves(moves, state_in);
+ env.CheckState(actual, expected);
}
TEST(FuzzAssembleSwap) {
- // Test small and potentially large ranges separately. Note that the number of
- // slots affects how much stack is allocated when running the generated code.
- // This means we have to be careful not to exceed the stack limit, which is
- // lower on Windows.
- for (auto n : {64, 500}) {
- std::map<MachineRepresentation, int> slots = {
- {MachineRepresentation::kTagged, n},
- {MachineRepresentation::kFloat32, n},
- {MachineRepresentation::kFloat64, n}};
- if (CpuFeatures::SupportsWasmSimd128()) {
- // Generate fewer 128-bit slots.
- slots.emplace(MachineRepresentation::kSimd128, n / 4);
- }
- CodeGeneratorTester c(slots);
- ParallelMove* moves = c.GenerateRandomSwaps(1000);
- for (const auto m : *moves) {
- c.CheckAssembleSwap(&m->source(), &m->destination());
- }
- c.Run();
+ TestEnvironment env;
+ CodeGeneratorTester c(&env);
+
+ Handle<FixedArray> state_in = env.GenerateInitialState();
+ ParallelMove* swaps = env.GenerateRandomSwaps(1000);
+
+ for (auto s : *swaps) {
+ c.CheckAssembleSwap(&s->source(), &s->destination());
}
+
+ Handle<Code> test = c.Finalize();
+ if (FLAG_print_code) {
+ test->Print();
+ }
+
+ Handle<FixedArray> actual = env.Run(test, state_in);
+ Handle<FixedArray> expected = env.SimulateSwaps(swaps, state_in);
+ env.CheckState(actual, expected);
}
TEST(FuzzAssembleMoveAndSwap) {
- // Test small and potentially large ranges separately. Note that the number of
- // slots affects how much stack is allocated when running the generated code.
- // This means we have to be careful not to exceed the stack limit, which is
- // lower on Windows.
- for (auto n : {64, 500}) {
- std::map<MachineRepresentation, int> slots = {
- {MachineRepresentation::kTagged, n},
- {MachineRepresentation::kFloat32, n},
- {MachineRepresentation::kFloat64, n}};
- if (CpuFeatures::SupportsWasmSimd128()) {
- // Generate fewer 128-bit slots.
- slots.emplace(MachineRepresentation::kSimd128, n / 4);
- }
- CodeGeneratorTester c(
- slots,
- {Constant(0), Constant(1), Constant(2), Constant(3), Constant(4),
- Constant(5), Constant(6), Constant(7),
- Constant(static_cast<float>(0.1)), Constant(static_cast<float>(0.2)),
- Constant(static_cast<float>(0.3)), Constant(static_cast<float>(0.4)),
- Constant(static_cast<double>(0.5)), Constant(static_cast<double>(0.6)),
- Constant(static_cast<double>(0.7)),
- Constant(static_cast<double>(0.8))});
- for (int i = 0; i < 1000; i++) {
- // Randomly alternate between swaps and moves.
- if (c.rng()->NextInt(2) == 0) {
- MoveOperands* move = c.GenerateRandomMoves(1)->at(0);
- c.CheckAssembleMove(&move->source(), &move->destination());
- } else {
- MoveOperands* move = c.GenerateRandomSwaps(1)->at(0);
- c.CheckAssembleSwap(&move->source(), &move->destination());
- }
+ TestEnvironment env;
+ CodeGeneratorTester c(&env);
+
+ Handle<FixedArray> state_in = env.GenerateInitialState();
+ Handle<FixedArray> expected =
+ env.main_isolate()->factory()->NewFixedArray(state_in->length());
+ state_in->CopyTo(0, *expected, 0, state_in->length());
+
+ for (int i = 0; i < 1000; i++) {
+ // Randomly alternate between swaps and moves.
+ if (env.rng()->NextInt(2) == 0) {
+ ParallelMove* move = env.GenerateRandomMoves(1);
+ expected = env.SimulateMoves(move, expected);
+ c.CheckAssembleMove(&move->at(0)->source(), &move->at(0)->destination());
+ } else {
+ ParallelMove* swap = env.GenerateRandomSwaps(1);
+ expected = env.SimulateSwaps(swap, expected);
+ c.CheckAssembleSwap(&swap->at(0)->source(), &swap->at(0)->destination());
}
- c.Run();
}
+
+ Handle<Code> test = c.Finalize();
+ if (FLAG_print_code) {
+ test->Print();
+ }
+
+ Handle<FixedArray> actual = env.Run(test, state_in);
+ env.CheckState(actual, expected);
}
TEST(AssembleTailCallGap) {
const RegisterConfiguration* conf = RegisterConfiguration::Default();
+ TestEnvironment env;
// This test assumes at least 4 registers are allocatable.
CHECK_LE(4, conf->num_allocatable_general_registers());
@@ -511,80 +1002,89 @@ TEST(AssembleTailCallGap) {
{
// Generate a series of register pushes only.
- CodeGeneratorTester c;
- Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
+ CodeGeneratorTester c(&env);
+ Instruction* instr = Instruction::New(env.main_zone(), kArchNop);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(r3, slot_0);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(r2, slot_1);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(r1, slot_2);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(r0, slot_3);
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
CodeGeneratorTester::kRegisterPush);
- c.Disassemble();
+ Handle<Code> code = c.Finalize();
+ if (FLAG_print_code) {
+ code->Print();
+ }
}
{
// Generate a series of stack pushes only.
- CodeGeneratorTester c;
- Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
+ CodeGeneratorTester c(&env);
+ Instruction* instr = Instruction::New(env.main_zone(), kArchNop);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(slot_minus_4, slot_0);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(slot_minus_3, slot_1);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(slot_minus_2, slot_2);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(slot_minus_1, slot_3);
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
CodeGeneratorTester::kStackSlotPush);
- c.Disassemble();
+ Handle<Code> code = c.Finalize();
+ if (FLAG_print_code) {
+ code->Print();
+ }
}
{
// Generate a mix of stack and register pushes.
- CodeGeneratorTester c;
- Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
+ CodeGeneratorTester c(&env);
+ Instruction* instr = Instruction::New(env.main_zone(), kArchNop);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(slot_minus_2, slot_0);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(r1, slot_1);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(slot_minus_1, slot_2);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
- c.main_zone())
+ env.main_zone())
->AddMove(r0, slot_3);
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
CodeGeneratorTester::kScalarPush);
- c.Disassemble();
+ Handle<Code> code = c.Finalize();
+ if (FLAG_print_code) {
+ code->Print();
+ }
}
}
diff --git a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
index 842a23bdbc..942a7e882e 100644
--- a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
+++ b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
@@ -35,7 +35,7 @@ TEST(NodeWithNullInputReachableFromEnd) {
Node* k = graph.NewNode(common.Int32Constant(0));
Node* phi =
graph.NewNode(common.Phi(MachineRepresentation::kTagged, 1), k, start);
- phi->ReplaceInput(0, NULL);
+ phi->ReplaceInput(0, nullptr);
graph.SetEnd(phi);
OFStream os(stdout);
@@ -54,7 +54,7 @@ TEST(NodeWithNullControlReachableFromEnd) {
Node* k = graph.NewNode(common.Int32Constant(0));
Node* phi =
graph.NewNode(common.Phi(MachineRepresentation::kTagged, 1), k, start);
- phi->ReplaceInput(1, NULL);
+ phi->ReplaceInput(1, nullptr);
graph.SetEnd(phi);
OFStream os(stdout);
@@ -73,7 +73,7 @@ TEST(NodeWithNullInputReachableFromStart) {
Node* k = graph.NewNode(common.Int32Constant(0));
Node* phi =
graph.NewNode(common.Phi(MachineRepresentation::kTagged, 1), k, start);
- phi->ReplaceInput(0, NULL);
+ phi->ReplaceInput(0, nullptr);
graph.SetEnd(start);
OFStream os(stdout);
@@ -90,7 +90,7 @@ TEST(NodeWithNullControlReachableFromStart) {
Node* start = graph.NewNode(common.Start(0));
graph.SetStart(start);
Node* merge = graph.NewNode(common.Merge(2), start, start);
- merge->ReplaceInput(1, NULL);
+ merge->ReplaceInput(1, nullptr);
graph.SetEnd(merge);
OFStream os(stdout);
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index a8d34434d3..d0addad660 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -30,7 +30,7 @@ class InstructionTester : public HandleAndZoneScope {
schedule(zone()),
common(zone()),
machine(zone()),
- code(NULL) {}
+ code(nullptr) {}
Graph graph;
Schedule schedule;
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 3944afee42..9d9d634e33 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -23,15 +23,15 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
public:
explicit JSTypedLoweringTester(int num_parameters = 0)
: isolate(main_isolate()),
- binop(NULL),
- unop(NULL),
+ binop(nullptr),
+ unop(nullptr),
javascript(main_zone()),
machine(main_zone()),
simplified(main_zone()),
common(main_zone()),
graph(main_zone()),
typer(main_isolate(), Typer::kNoFlags, &graph),
- context_node(NULL) {
+ context_node(nullptr) {
graph.SetStart(graph.NewNode(common.Start(num_parameters)));
graph.SetEnd(graph.NewNode(common.End(1), graph.start()));
typer.Run();
@@ -95,7 +95,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* start() { return graph.start(); }
Node* context() {
- if (context_node == NULL) {
+ if (context_node == nullptr) {
context_node = graph.NewNode(common.Parameter(-1), graph.start());
}
return context_node;
@@ -679,13 +679,13 @@ TEST(MixedComparison1) {
TEST(RemoveToNumberEffects) {
JSTypedLoweringTester R;
- Node* effect_use = NULL;
+ Node* effect_use = nullptr;
Node* zero = R.graph.NewNode(R.common.NumberConstant(0));
for (int i = 0; i < 10; i++) {
Node* p0 = R.Parameter(Type::Number());
Node* ton = R.Unop(R.javascript.ToNumber(), p0);
Node* frame_state = R.EmptyFrameState(R.context());
- effect_use = NULL;
+ effect_use = nullptr;
switch (i) {
case 0:
@@ -720,13 +720,13 @@ TEST(RemoveToNumberEffects) {
}
R.CheckEffectInput(R.start(), ton);
- if (effect_use != NULL) R.CheckEffectInput(ton, effect_use);
+ if (effect_use != nullptr) R.CheckEffectInput(ton, effect_use);
Node* r = R.reduce(ton);
CHECK_EQ(p0, r);
CHECK_NE(R.start(), r);
- if (effect_use != NULL) {
+ if (effect_use != nullptr) {
R.CheckEffectInput(R.start(), effect_use);
// Check that value uses of ToNumber() do not go to start().
for (int i = 0; i < effect_use->op()->ValueInputCount(); i++) {
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index f115989f43..eace236e36 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -19,7 +19,7 @@ class TestCode : public HandleAndZoneScope {
blocks_(main_zone()),
sequence_(main_isolate(), main_zone(), &blocks_),
rpo_number_(RpoNumber::FromInt(0)),
- current_(NULL) {}
+ current_(nullptr) {}
ZoneVector<InstructionBlock*> blocks_;
InstructionSequence sequence_;
@@ -29,8 +29,8 @@ class TestCode : public HandleAndZoneScope {
int Jump(int target) {
Start();
InstructionOperand ops[] = {UseRpo(target)};
- sequence_.AddInstruction(
- Instruction::New(main_zone(), kArchJmp, 0, NULL, 1, ops, 0, NULL));
+ sequence_.AddInstruction(Instruction::New(main_zone(), kArchJmp, 0, nullptr,
+ 1, ops, 0, nullptr));
int pos = static_cast<int>(sequence_.instructions().size() - 1);
End();
return pos;
@@ -45,7 +45,7 @@ class TestCode : public HandleAndZoneScope {
InstructionCode code = 119 | FlagsModeField::encode(kFlags_branch) |
FlagsConditionField::encode(kEqual);
sequence_.AddInstruction(
- Instruction::New(main_zone(), code, 0, NULL, 2, ops, 0, NULL));
+ Instruction::New(main_zone(), code, 0, nullptr, 2, ops, 0, nullptr));
int pos = static_cast<int>(sequence_.instructions().size() - 1);
End();
return pos;
@@ -78,14 +78,14 @@ class TestCode : public HandleAndZoneScope {
void End() {
Start();
sequence_.EndBlock(current_->rpo_number());
- current_ = NULL;
+ current_ = nullptr;
rpo_number_ = RpoNumber::FromInt(rpo_number_.ToInt() + 1);
}
InstructionOperand UseRpo(int num) {
return sequence_.AddImmediate(Constant(RpoNumber::FromInt(num)));
}
void Start(bool deferred = false) {
- if (current_ == NULL) {
+ if (current_ == nullptr) {
current_ = new (main_zone())
InstructionBlock(main_zone(), rpo_number_, RpoNumber::Invalid(),
RpoNumber::Invalid(), deferred, false);
@@ -94,7 +94,7 @@ class TestCode : public HandleAndZoneScope {
}
}
void Defer() {
- CHECK(current_ == NULL);
+ CHECK_NULL(current_);
Start(true);
}
void AddGapMove(int index, const InstructionOperand& from,
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 57af474848..13f493e82d 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -37,7 +37,8 @@ static Handle<JSFunction> Compile(const char* source) {
Compiler::GetSharedFunctionInfoForScript(
source_code, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
- NULL, NULL, v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE,
+ nullptr, nullptr, v8::ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE,
MaybeHandle<FixedArray>())
.ToHandleChecked();
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -107,7 +108,7 @@ TEST(TestLinkageStubCall) {
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
Callable callable = Builtins::CallableFor(isolate, Builtins::kToNumber);
- CompilationInfo info(ArrayVector("test"), isolate, &zone, Code::STUB);
+ CompilationInfo info(ArrayVector("test"), &zone, Code::STUB);
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate, &zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index ffb0872269..29e0c2f444 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -47,7 +47,7 @@ class LoopFinderTester : HandleAndZoneScope {
half(jsgraph.Constant(0.5)),
self(graph.NewNode(common.Int32Constant(0xaabbccdd))),
dead(graph.NewNode(common.Dead())),
- loop_tree(NULL) {
+ loop_tree(nullptr) {
graph.SetEnd(end);
graph.SetStart(start);
leaf[0] = zero;
@@ -123,7 +123,7 @@ class LoopFinderTester : HandleAndZoneScope {
}
LoopTree* GetLoopTree() {
- if (loop_tree == NULL) {
+ if (loop_tree == nullptr) {
if (FLAG_trace_turbo_graph) {
OFStream os(stdout);
os << AsRPO(graph);
@@ -168,7 +168,7 @@ class LoopFinderTester : HandleAndZoneScope {
CHECK(loop);
// Check parentage.
LoopTree::Loop* parent =
- i == 0 ? NULL : tree->ContainingLoop(chain[i - 1]);
+ i == 0 ? nullptr : tree->ContainingLoop(chain[i - 1]);
CHECK_EQ(parent, loop->parent());
for (int j = i - 1; j >= 0; j--) {
// This loop should be nested inside all the outer loops.
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index b146080618..b41fe5184b 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -78,8 +78,8 @@ class ReducerTester : public HandleAndZoneScope {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kAllOptionalOps)
: isolate(main_isolate()),
- binop(NULL),
- unop(NULL),
+ binop(nullptr),
+ unop(nullptr),
machine(main_zone(), MachineType::PointerRepresentation(), flags),
common(main_zone()),
graph(main_zone()),
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 25e5527f03..6be7814756 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -81,10 +81,9 @@ TEST(ReturnThreeValues) {
Node* mul = m.Int32Mul(p0, p1);
m.Return(add, sub, mul);
- CompilationInfo info(ArrayVector("testing"), handles.main_isolate(),
- handles.main_zone(), Code::STUB);
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, desc, m.graph(), m.Export());
+ CompilationInfo info(ArrayVector("testing"), handles.main_zone(), Code::STUB);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &info, handles.main_isolate(), desc, m.graph(), m.Export());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code) {
OFStream os(stdout);
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index 9d1ad5cd0c..a8927de33c 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -466,13 +466,13 @@ TEST(NullInputsSimple) {
n2->ReplaceInput(0, nullptr);
- CHECK_INPUTS(n2, NULL, n1);
+ CHECK_INPUTS(n2, nullptr, n1);
CHECK_USES(n0, n1);
n2->ReplaceInput(1, nullptr);
- CHECK_INPUTS(n2, NULL, NULL);
+ CHECK_INPUTS(n2, nullptr, nullptr);
CHECK_USES(n1, NONE);
}
@@ -495,10 +495,10 @@ TEST(NullInputsAppended) {
CHECK_USES(n1, n3);
CHECK_USES(n2, n3);
- n3->ReplaceInput(1, NULL);
+ n3->ReplaceInput(1, nullptr);
CHECK_USES(n1, NONE);
- CHECK_INPUTS(n3, n0, NULL, n2);
+ CHECK_INPUTS(n3, n0, nullptr, n2);
}
@@ -808,13 +808,13 @@ TEST(NullAllInputs) {
CHECK_USES(n0, n1, n2);
n1->NullAllInputs();
- CHECK_INPUTS(n1, NULL);
+ CHECK_INPUTS(n1, nullptr);
CHECK_INPUTS(n2, n0, n1);
CHECK_USES(n0, n2);
n2->NullAllInputs();
- CHECK_INPUTS(n1, NULL);
- CHECK_INPUTS(n2, NULL, NULL);
+ CHECK_INPUTS(n1, nullptr);
+ CHECK_INPUTS(n2, nullptr, nullptr);
CHECK_USES(n0, NONE);
}
@@ -830,7 +830,7 @@ TEST(NullAllInputs) {
n1->NullAllInputs();
CHECK_INPUTS(n0, NONE);
- CHECK_INPUTS(n1, NULL);
+ CHECK_INPUTS(n1, nullptr);
CHECK_USES(n0, NONE);
CHECK_USES(n1, NONE);
}
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index a1dde0dc41..ffcf0527a8 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -121,7 +121,14 @@ class BytecodeGraphTester {
Handle<SharedFunctionInfo> shared(function->shared());
CompilationInfo compilation_info(&zone, function->GetIsolate(), shared,
function);
- Handle<Code> code = Pipeline::GenerateCodeForTesting(&compilation_info);
+
+ // Compiler relies on canonicalized handles, let's create
+ // a canonicalized scope and migrate existing handles there.
+ CanonicalHandleScope canonical(isolate_);
+ compilation_info.ReopenHandlesInNewHandleScope();
+
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &compilation_info, function->GetIsolate());
function->set_code(*code);
return function;
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index 3d6f1fbe0d..96ab8cb513 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -106,24 +106,6 @@ TEST(StringAdd) {
T.CheckCall(T.Val("bbb"), T.Val(""), T.Val("bbb"));
}
-
-TEST(StringCompare) {
- FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })", flags);
-
- T.CheckCall(T.Val(-1), T.Val("aaa"), T.Val("bbb"));
- T.CheckCall(T.Val(0.0), T.Val("bbb"), T.Val("bbb"));
- T.CheckCall(T.Val(+1), T.Val("ccc"), T.Val("bbb"));
-}
-
-
-TEST(SubString) {
- FunctionTester T("(function(a,b) { return %_SubString(a,b,b+3); })", flags);
-
- T.CheckCall(T.Val("aaa"), T.Val("aaabbb"), T.Val(0.0));
- T.CheckCall(T.Val("abb"), T.Val("aaabbb"), T.Val(2));
- T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(0.0));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index a2cc262fba..ff8c83536b 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/base/ieee754.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/boxed-float.h"
#include "src/codegen.h"
#include "src/objects-inl.h"
#include "src/utils.h"
@@ -424,9 +425,9 @@ static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
case 6:
return m->Int32Constant(0x01234567);
case 7:
- return m->Load(MachineType::Int32(), m->PointerConstant(NULL));
+ return m->Load(MachineType::Int32(), m->PointerConstant(nullptr));
default:
- return NULL;
+ return nullptr;
}
}
@@ -486,9 +487,9 @@ static Node* Int64Input(RawMachineAssemblerTester<int64_t>* m, int index) {
case 6:
return m->Int64Constant(0x0123456789abcdefLL);
case 7:
- return m->Load(MachineType::Int64(), m->PointerConstant(NULL));
+ return m->Load(MachineType::Int64(), m->PointerConstant(nullptr));
default:
- return NULL;
+ return nullptr;
}
}
@@ -3690,7 +3691,7 @@ TEST(RunFloat64Mod) {
m.Return(m.Float64Mod(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(modulo(*i, *j), m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(Modulo(*i, *j), m.Call(*i, *j)); }
}
}
@@ -3700,9 +3701,9 @@ TEST(RunDeadFloat32Binops) {
const Operator* ops[] = {m.machine()->Float32Add(), m.machine()->Float32Sub(),
m.machine()->Float32Mul(), m.machine()->Float32Div(),
- NULL};
+ nullptr};
- for (int i = 0; ops[i] != NULL; i++) {
+ for (int i = 0; ops[i] != nullptr; i++) {
RawMachineAssemblerTester<int32_t> m;
int constant = 0x53355 + i;
m.AddNode(ops[i], m.Float32Constant(0.1f), m.Float32Constant(1.11f));
@@ -3717,9 +3718,9 @@ TEST(RunDeadFloat64Binops) {
const Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
m.machine()->Float64Mul(), m.machine()->Float64Div(),
- m.machine()->Float64Mod(), NULL};
+ m.machine()->Float64Mod(), nullptr};
- for (int i = 0; ops[i] != NULL; i++) {
+ for (int i = 0; ops[i] != nullptr; i++) {
RawMachineAssemblerTester<int32_t> m;
int constant = 0x53355 + i;
m.AddNode(ops[i], m.Float64Constant(0.1), m.Float64Constant(1.11));
@@ -4034,7 +4035,7 @@ TEST(RunFloat64ModP) {
bt.AddReturn(m.Float64Mod(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(modulo(*i, *j), bt.call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(Modulo(*i, *j), bt.call(*i, *j)); }
}
}
@@ -4924,7 +4925,7 @@ static int Float64CompareHelper(RawMachineAssemblerTester<int32_t>* m,
Node* b =
load_b ? m->Load(MachineType::Float64(), m->PointerConstant(&buffer[1]))
: m->Float64Constant(y);
- Node* cmp = NULL;
+ Node* cmp = nullptr;
bool expected = false;
switch (test_case) {
// Equal tests.
@@ -5093,7 +5094,7 @@ static void IntPtrCompare(intptr_t left, intptr_t right) {
MachineType::Pointer());
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
- Node* res = NULL;
+ Node* res = nullptr;
bool expected = false;
switch (test) {
case 0:
@@ -6297,17 +6298,17 @@ TEST(RunCallCFunction9) {
TEST(RunBitcastInt64ToFloat64) {
int64_t input = 1;
- double output = 0.0;
+ Float64 output;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
- &output, MachineRepresentation::kFloat64,
+ output.get_bits_address(), MachineRepresentation::kFloat64,
m.BitcastInt64ToFloat64(m.LoadFromPointer(&input, MachineType::Int64())));
m.Return(m.Int32Constant(11));
FOR_INT64_INPUTS(i) {
input = *i;
CHECK_EQ(11, m.Call());
- double expected = bit_cast<double>(input);
- CHECK_EQ(bit_cast<int64_t>(expected), bit_cast<int64_t>(output));
+ Float64 expected = Float64::FromBits(input);
+ CHECK_EQ(expected.get_bits(), output.get_bits());
}
}
@@ -6694,17 +6695,17 @@ TEST(RunRoundUint32ToFloat32) {
TEST(RunBitcastInt32ToFloat32) {
int32_t input = 1;
- float output = 0.0;
+ Float32 output;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
- &output, MachineRepresentation::kFloat32,
+ output.get_bits_address(), MachineRepresentation::kFloat32,
m.BitcastInt32ToFloat32(m.LoadFromPointer(&input, MachineType::Int32())));
m.Return(m.Int32Constant(11));
FOR_INT32_INPUTS(i) {
input = *i;
CHECK_EQ(11, m.Call());
- float expected = bit_cast<float>(input);
- CHECK_EQ(bit_cast<int32_t>(expected), bit_cast<int32_t>(output));
+ Float32 expected = Float32::FromBits(input);
+ CHECK_EQ(expected.get_bits(), output.get_bits());
}
}
@@ -6911,6 +6912,8 @@ TEST(Regression6640) {
for (RelocIterator it(*code,
1 << RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
!it.done(); it.next()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(code->GetHeap());
it.rinfo()->update_wasm_function_table_size_reference(
code->GetIsolate(), old_value, new_value, FLUSH_ICACHE_IF_NEEDED);
}
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 70a0455f20..8472c1e70e 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -256,10 +256,9 @@ class Int32Signature : public MachineSignature {
Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
- CompilationInfo info(ArrayVector("testing"), isolate, graph->zone(),
- Code::STUB);
+ CompilationInfo info(ArrayVector("testing"), graph->zone(), Code::STUB);
Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, desc, graph, schedule);
+ Pipeline::GenerateCodeForTesting(&info, isolate, desc, graph, schedule);
CHECK(!code.is_null());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index d42a803c3e..139db1e207 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -24,7 +24,7 @@ class StubTester {
public:
StubTester(Isolate* isolate, Zone* zone, CodeStub* stub)
: zone_(zone),
- info_(ArrayVector("test"), isolate, zone, Code::STUB),
+ info_(ArrayVector("test"), zone, Code::STUB),
interface_descriptor_(stub->GetCallInterfaceDescriptor()),
descriptor_(Linkage::GetStubCallDescriptor(
isolate, zone, interface_descriptor_,
@@ -37,7 +37,7 @@ class StubTester {
StubTester(Isolate* isolate, Zone* zone, Builtins::Name name)
: zone_(zone),
- info_(ArrayVector("test"), isolate, zone, Code::STUB),
+ info_(ArrayVector("test"), zone, Code::STUB),
interface_descriptor_(
Builtins::CallableFor(isolate, name).descriptor()),
descriptor_(Linkage::GetStubCallDescriptor(
@@ -101,12 +101,12 @@ class StubTester {
FunctionTester tester_;
};
-TEST(RunStringLengthStub) {
+TEST(RunStringWrapperLengthStub) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
Zone* zone = scope.main_zone();
- StubTester tester(isolate, zone, Builtins::kLoadIC_StringLength);
+ StubTester tester(isolate, zone, Builtins::kLoadIC_StringWrapperLength);
// Actuall call through to the stub, verifying its result.
const char* testString = "Und das Lamm schrie HURZ!";
@@ -119,6 +119,119 @@ TEST(RunStringLengthStub) {
CHECK_EQ(static_cast<int>(strlen(testString)), Smi::ToInt(*result));
}
+TEST(RunArrayExtractStubSimple) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ StubTester tester(isolate, zone, Builtins::kExtractFastJSArray);
+
+ // Actuall call through to the stub, verifying its result.
+ Handle<JSArray> source_array = isolate->factory()->NewJSArray(
+ PACKED_ELEMENTS, 5, 10, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ static_cast<FixedArray*>(source_array->elements())->set(0, Smi::FromInt(5));
+ static_cast<FixedArray*>(source_array->elements())->set(1, Smi::FromInt(4));
+ static_cast<FixedArray*>(source_array->elements())->set(2, Smi::FromInt(3));
+ static_cast<FixedArray*>(source_array->elements())->set(3, Smi::FromInt(2));
+ static_cast<FixedArray*>(source_array->elements())->set(4, Smi::FromInt(1));
+ Handle<JSArray> result = Handle<JSArray>::cast(
+ tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate),
+ Handle<Smi>(Smi::FromInt(5), isolate)));
+ CHECK_NE(*source_array, *result);
+ CHECK_EQ(result->GetElementsKind(), PACKED_ELEMENTS);
+ CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(0),
+ Smi::FromInt(5));
+ CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(1),
+ Smi::FromInt(4));
+ CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(2),
+ Smi::FromInt(3));
+ CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(3),
+ Smi::FromInt(2));
+ CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(4),
+ Smi::FromInt(1));
+}
+
+TEST(RunArrayExtractDoubleStubSimple) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ StubTester tester(isolate, zone, Builtins::kExtractFastJSArray);
+
+ // Actuall call through to the stub, verifying its result.
+ Handle<JSArray> source_array = isolate->factory()->NewJSArray(
+ PACKED_DOUBLE_ELEMENTS, 5, 10, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ static_cast<FixedDoubleArray*>(source_array->elements())->set(0, 5);
+ static_cast<FixedDoubleArray*>(source_array->elements())->set(1, 4);
+ static_cast<FixedDoubleArray*>(source_array->elements())->set(2, 3);
+ static_cast<FixedDoubleArray*>(source_array->elements())->set(3, 2);
+ static_cast<FixedDoubleArray*>(source_array->elements())->set(4, 1);
+ Handle<JSArray> result = Handle<JSArray>::cast(
+ tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate),
+ Handle<Smi>(Smi::FromInt(5), isolate)));
+ CHECK_NE(*source_array, *result);
+ CHECK_EQ(result->GetElementsKind(), PACKED_DOUBLE_ELEMENTS);
+ CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(0),
+ 5);
+ CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(1),
+ 4);
+ CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(2),
+ 3);
+ CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(3),
+ 2);
+ CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(4),
+ 1);
+}
+
+TEST(RunArrayExtractStubTooBigForNewSpace) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ StubTester tester(isolate, zone, Builtins::kExtractFastJSArray);
+
+ // Actuall call through to the stub, verifying its result.
+ Handle<JSArray> source_array = isolate->factory()->NewJSArray(
+ PACKED_ELEMENTS, 500000, 500000, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ for (int i = 0; i < 500000; ++i) {
+ static_cast<FixedArray*>(source_array->elements())->set(i, Smi::FromInt(i));
+ }
+ Handle<JSArray> result = Handle<JSArray>::cast(
+ tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate),
+ Handle<Smi>(Smi::FromInt(500000), isolate)));
+ CHECK_NE(*source_array, *result);
+ CHECK_EQ(result->GetElementsKind(), PACKED_ELEMENTS);
+ for (int i = 0; i < 500000; ++i) {
+ CHECK_EQ(static_cast<FixedArray*>(source_array->elements())->get(i),
+ static_cast<FixedArray*>(result->elements())->get(i));
+ }
+}
+
+TEST(RunArrayExtractDoubleStubTooBigForNewSpace) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ StubTester tester(isolate, zone, Builtins::kExtractFastJSArray);
+
+ // Actuall call through to the stub, verifying its result.
+ Handle<JSArray> source_array = isolate->factory()->NewJSArray(
+ PACKED_DOUBLE_ELEMENTS, 500000, 500000,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, TENURED);
+ for (int i = 0; i < 500000; ++i) {
+ static_cast<FixedDoubleArray*>(source_array->elements())->set(i, i);
+ }
+ Handle<JSArray> result = Handle<JSArray>::cast(
+ tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate),
+ Handle<Smi>(Smi::FromInt(500000), isolate)));
+ CHECK_NE(*source_array, *result);
+ CHECK_EQ(result->GetElementsKind(), PACKED_DOUBLE_ELEMENTS);
+ for (int i = 0; i < 500000; ++i) {
+ CHECK_EQ(
+ static_cast<FixedDoubleArray*>(source_array->elements())->get_scalar(i),
+ static_cast<FixedDoubleArray*>(result->elements())->get_scalar(i));
+ }
+}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/test-run-variables.cc b/deps/v8/test/cctest/compiler/test-run-variables.cc
index d1f564a51c..728d60f491 100644
--- a/deps/v8/test/cctest/compiler/test-run-variables.cc
+++ b/deps/v8/test/cctest/compiler/test-run-variables.cc
@@ -10,38 +10,56 @@ namespace v8 {
namespace internal {
namespace compiler {
-static const char* throws = NULL;
-
-static const char* load_tests[] = {
- "var x = a; r = x", "123", "0",
- "var x = (r = x)", "undefined", "undefined",
- "var x = (a?1:2); r = x", "1", "2",
- "const x = a; r = x", "123", "0",
- "const x = (a?3:4); r = x", "3", "4",
- "'use strict'; const x = a; r = x", "123", "0",
- "'use strict'; const x = (r = x)", throws, throws,
- "'use strict'; const x = (a?5:6); r = x", "5", "6",
- "'use strict'; let x = a; r = x", "123", "0",
- "'use strict'; let x = (r = x)", throws, throws,
- "'use strict'; let x = (a?7:8); r = x", "7", "8",
- NULL};
+static const char* throws = nullptr;
+
+static const char* load_tests[] = {"var x = a; r = x",
+ "123",
+ "0",
+ "var x = (r = x)",
+ "undefined",
+ "undefined",
+ "var x = (a?1:2); r = x",
+ "1",
+ "2",
+ "const x = a; r = x",
+ "123",
+ "0",
+ "const x = (a?3:4); r = x",
+ "3",
+ "4",
+ "'use strict'; const x = a; r = x",
+ "123",
+ "0",
+ "'use strict'; const x = (r = x)",
+ throws,
+ throws,
+ "'use strict'; const x = (a?5:6); r = x",
+ "5",
+ "6",
+ "'use strict'; let x = a; r = x",
+ "123",
+ "0",
+ "'use strict'; let x = (r = x)",
+ throws,
+ throws,
+ "'use strict'; let x = (a?7:8); r = x",
+ "7",
+ "8",
+ nullptr};
static const char* store_tests[] = {
- "var x = 1; x = a; r = x", "123", "0",
- "var x = (a?(x=4,2):3); r = x", "2", "3",
- "var x = (a?4:5); x = a; r = x", "123", "0",
+ "var x = 1; x = a; r = x", "123", "0", "var x = (a?(x=4,2):3); r = x", "2",
+ "3", "var x = (a?4:5); x = a; r = x", "123", "0",
// Assignments to 'const' are SyntaxErrors, handled by the parser,
// hence we cannot test them here because they are early errors.
- "'use strict'; let x = 1; x = a; r = x", "123", "0",
- "'use strict'; let x = (a?(x=4,2):3); r = x", throws, "3",
- "'use strict'; let x = (a?4:5); x = a; r = x", "123", "0",
- NULL};
-
+ "'use strict'; let x = 1; x = a; r = x", "123", "0",
+ "'use strict'; let x = (a?(x=4,2):3); r = x", throws, "3",
+ "'use strict'; let x = (a?4:5); x = a; r = x", "123", "0", nullptr};
static void RunVariableTests(const char* source, const char* tests[]) {
EmbeddedVector<char, 512> buffer;
- for (int i = 0; tests[i] != NULL; i += 3) {
+ for (int i = 0; tests[i] != nullptr; i += 3) {
SNPrintF(buffer, source, tests[i]);
PrintF("#%d: %s\n", i / 3, buffer.start());
FunctionTester T(buffer.start());
diff --git a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
index 7f63484ba9..b451b73e0d 100644
--- a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
@@ -24,6 +24,8 @@ static void UpdateFunctionTableSizeReferences(Handle<Code> code,
uint32_t old_size,
uint32_t new_size) {
Isolate* isolate = CcTest::i_isolate();
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
bool modified = false;
int mode_mask =
RelocInfo::ModeMask(RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
@@ -48,7 +50,8 @@ static void RunLoadStoreRelocation(MachineType rep) {
CType new_buffer[kNumElems];
byte* raw = reinterpret_cast<byte*>(buffer);
byte* new_raw = reinterpret_cast<byte*>(new_buffer);
- WasmContext wasm_context = {raw, sizeof(buffer)};
+ WasmContext wasm_context;
+ wasm_context.SetRawMemory(raw, sizeof(buffer));
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
new_raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
@@ -68,8 +71,7 @@ static void RunLoadStoreRelocation(MachineType rep) {
CHECK(buffer[0] != buffer[1]);
CHECK_EQ(OK, m.Call());
CHECK(buffer[0] == buffer[1]);
- wasm_context.mem_size = sizeof(new_buffer);
- wasm_context.mem_start = new_raw;
+ wasm_context.SetRawMemory(new_raw, sizeof(new_buffer));
CHECK(new_buffer[0] != new_buffer[1]);
CHECK_EQ(OK, m.Call());
CHECK(new_buffer[0] == new_buffer[1]);
@@ -99,7 +101,7 @@ static void RunLoadStoreRelocationOffset(MachineType rep) {
int32_t y = kNumElems - x - 1;
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
- wasm_context = {raw, sizeof(buffer)};
+ wasm_context.SetRawMemory(raw, sizeof(buffer));
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
@@ -128,8 +130,7 @@ static void RunLoadStoreRelocationOffset(MachineType rep) {
new_raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
- wasm_context.mem_size = sizeof(new_buffer);
- wasm_context.mem_start = new_raw;
+ wasm_context.SetRawMemory(new_raw, sizeof(new_buffer));
CHECK(new_buffer[x] != new_buffer[y]);
CHECK_EQ(OK, m.Call());
@@ -152,7 +153,8 @@ TEST(RunLoadStoreRelocationOffset) {
TEST(Uint32LessThanMemoryRelocation) {
RawMachineAssemblerTester<uint32_t> m;
RawMachineLabel within_bounds, out_of_bounds;
- WasmContext wasm_context = {reinterpret_cast<Address>(1234), 0x200};
+ WasmContext wasm_context;
+ wasm_context.SetRawMemory(reinterpret_cast<void*>(1234), 0x200);
Node* index = m.Int32Constant(0x200);
Node* wasm_context_node =
m.RelocatableIntPtrConstant(reinterpret_cast<uintptr_t>(&wasm_context),
@@ -167,7 +169,7 @@ TEST(Uint32LessThanMemoryRelocation) {
m.Return(m.Int32Constant(0xdeadbeef));
// Check that index is out of bounds with current size
CHECK_EQ(0xdeadbeef, m.Call());
- wasm_context.mem_size = 0x400;
+ wasm_context.SetRawMemory(wasm_context.mem_start, 0x400);
// Check that after limit is increased, index is within bounds.
CHECK_EQ(0xacedu, m.Call());
}
diff --git a/deps/v8/test/cctest/compiler/value-helper.cc b/deps/v8/test/cctest/compiler/value-helper.cc
new file mode 100644
index 0000000000..abafa40039
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/value-helper.cc
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/value-helper.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Define constexpr arrays of ValueHelper for external references.
+constexpr int8_t ValueHelper::int8_array[];
+constexpr int16_t ValueHelper::int16_array[];
+constexpr uint32_t ValueHelper::uint32_array[];
+constexpr uint64_t ValueHelper::uint64_array[];
+constexpr float ValueHelper::float32_array[];
+constexpr double ValueHelper::float64_array[];
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 87cdb585da..28e64c703e 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -7,9 +7,11 @@
#include <stdint.h>
+#include "src/base/template-utils.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "test/cctest/cctest.h"
@@ -60,283 +62,278 @@ class ValueHelper {
CheckHeapConstant(isolate_->heap()->false_value(), node);
}
- static std::vector<float> float32_vector() {
- static const float nan = std::numeric_limits<float>::quiet_NaN();
- static const float kValues[] = {
- -std::numeric_limits<float>::infinity(),
- -2.70497e+38f,
- -1.4698e+37f,
- -1.22813e+35f,
- -1.20555e+35f,
- -1.34584e+34f,
- -1.0079e+32f,
- -6.49364e+26f,
- -3.06077e+25f,
- -1.46821e+25f,
- -1.17658e+23f,
- -1.9617e+22f,
- -2.7357e+20f,
- -9223372036854775808.0f, // INT64_MIN
- -1.48708e+13f,
- -1.89633e+12f,
- -4.66622e+11f,
- -2.22581e+11f,
- -1.45381e+10f,
- -2147483904.0f, // First float32 after INT32_MIN
- -2147483648.0f, // INT32_MIN
- -2147483520.0f, // Last float32 before INT32_MIN
- -1.3956e+09f,
- -1.32951e+09f,
- -1.30721e+09f,
- -1.19756e+09f,
- -9.26822e+08f,
- -6.35647e+08f,
- -4.00037e+08f,
- -1.81227e+08f,
- -5.09256e+07f,
- -964300.0f,
- -192446.0f,
- -28455.0f,
- -27194.0f,
- -26401.0f,
- -20575.0f,
- -17069.0f,
- -9167.0f,
- -960.178f,
- -113.0f,
- -62.0f,
- -15.0f,
- -7.0f,
- -1.0f,
- -0.0256635f,
- -4.60374e-07f,
- -3.63759e-10f,
- -4.30175e-14f,
- -5.27385e-15f,
- -1.5707963267948966f,
- -1.48084e-15f,
- -2.220446049250313e-16f,
- -1.05755e-19f,
- -3.2995e-21f,
- -1.67354e-23f,
- -1.11885e-23f,
- -1.78506e-30f,
- -5.07594e-31f,
- -3.65799e-31f,
- -1.43718e-34f,
- -1.27126e-38f,
- -0.0f,
- 0.0f,
- 1.17549e-38f,
- 1.56657e-37f,
- 4.08512e-29f,
- 3.31357e-28f,
- 6.25073e-22f,
- 4.1723e-13f,
- 1.44343e-09f,
- 1.5707963267948966f,
- 5.27004e-08f,
- 9.48298e-08f,
- 5.57888e-07f,
- 4.89988e-05f,
- 0.244326f,
- 1.0f,
- 12.4895f,
- 19.0f,
- 47.0f,
- 106.0f,
- 538.324f,
- 564.536f,
- 819.124f,
- 7048.0f,
- 12611.0f,
- 19878.0f,
- 20309.0f,
- 797056.0f,
- 1.77219e+09f,
- 2147483648.0f, // INT32_MAX + 1
- 4294967296.0f, // UINT32_MAX + 1
- 1.51116e+11f,
- 4.18193e+13f,
- 3.59167e+16f,
- 9223372036854775808.0f, // INT64_MAX + 1
- 18446744073709551616.0f, // UINT64_MAX + 1
- 3.38211e+19f,
- 2.67488e+20f,
- 1.78831e+21f,
- 9.20914e+21f,
- 8.35654e+23f,
- 1.4495e+24f,
- 5.94015e+25f,
- 4.43608e+30f,
- 2.44502e+33f,
- 2.61152e+33f,
- 1.38178e+37f,
- 1.71306e+37f,
- 3.31899e+38f,
- 3.40282e+38f,
- std::numeric_limits<float>::infinity(),
- nan,
- -nan,
- };
- return std::vector<float>(&kValues[0], &kValues[arraysize(kValues)]);
+ static constexpr float float32_array[] = {
+ -std::numeric_limits<float>::infinity(),
+ -2.70497e+38f,
+ -1.4698e+37f,
+ -1.22813e+35f,
+ -1.20555e+35f,
+ -1.34584e+34f,
+ -1.0079e+32f,
+ -6.49364e+26f,
+ -3.06077e+25f,
+ -1.46821e+25f,
+ -1.17658e+23f,
+ -1.9617e+22f,
+ -2.7357e+20f,
+ -9223372036854775808.0f, // INT64_MIN
+ -1.48708e+13f,
+ -1.89633e+12f,
+ -4.66622e+11f,
+ -2.22581e+11f,
+ -1.45381e+10f,
+ -2147483904.0f, // First float32 after INT32_MIN
+ -2147483648.0f, // INT32_MIN
+ -2147483520.0f, // Last float32 before INT32_MIN
+ -1.3956e+09f,
+ -1.32951e+09f,
+ -1.30721e+09f,
+ -1.19756e+09f,
+ -9.26822e+08f,
+ -6.35647e+08f,
+ -4.00037e+08f,
+ -1.81227e+08f,
+ -5.09256e+07f,
+ -964300.0f,
+ -192446.0f,
+ -28455.0f,
+ -27194.0f,
+ -26401.0f,
+ -20575.0f,
+ -17069.0f,
+ -9167.0f,
+ -960.178f,
+ -113.0f,
+ -62.0f,
+ -15.0f,
+ -7.0f,
+ -1.0f,
+ -0.0256635f,
+ -4.60374e-07f,
+ -3.63759e-10f,
+ -4.30175e-14f,
+ -5.27385e-15f,
+ -1.5707963267948966f,
+ -1.48084e-15f,
+ -2.220446049250313e-16f,
+ -1.05755e-19f,
+ -3.2995e-21f,
+ -1.67354e-23f,
+ -1.11885e-23f,
+ -1.78506e-30f,
+ -5.07594e-31f,
+ -3.65799e-31f,
+ -1.43718e-34f,
+ -1.27126e-38f,
+ -0.0f,
+ 0.0f,
+ 1.17549e-38f,
+ 1.56657e-37f,
+ 4.08512e-29f,
+ 3.31357e-28f,
+ 6.25073e-22f,
+ 4.1723e-13f,
+ 1.44343e-09f,
+ 1.5707963267948966f,
+ 5.27004e-08f,
+ 9.48298e-08f,
+ 5.57888e-07f,
+ 4.89988e-05f,
+ 0.244326f,
+ 1.0f,
+ 12.4895f,
+ 19.0f,
+ 47.0f,
+ 106.0f,
+ 538.324f,
+ 564.536f,
+ 819.124f,
+ 7048.0f,
+ 12611.0f,
+ 19878.0f,
+ 20309.0f,
+ 797056.0f,
+ 1.77219e+09f,
+ 2147483648.0f, // INT32_MAX + 1
+ 4294967296.0f, // UINT32_MAX + 1
+ 1.51116e+11f,
+ 4.18193e+13f,
+ 3.59167e+16f,
+ 9223372036854775808.0f, // INT64_MAX + 1
+ 18446744073709551616.0f, // UINT64_MAX + 1
+ 3.38211e+19f,
+ 2.67488e+20f,
+ 1.78831e+21f,
+ 9.20914e+21f,
+ 8.35654e+23f,
+ 1.4495e+24f,
+ 5.94015e+25f,
+ 4.43608e+30f,
+ 2.44502e+33f,
+ 2.61152e+33f,
+ 1.38178e+37f,
+ 1.71306e+37f,
+ 3.31899e+38f,
+ 3.40282e+38f,
+ std::numeric_limits<float>::infinity(),
+ std::numeric_limits<float>::quiet_NaN(),
+ -std::numeric_limits<float>::quiet_NaN()};
+
+ static constexpr Vector<const float> float32_vector() {
+ return ArrayVector(float32_array);
}
- static std::vector<double> float64_vector() {
- static const double nan = std::numeric_limits<double>::quiet_NaN();
- static const double values[] = {-2e66,
- -2.220446049250313e-16,
- -9223373136366403584.0,
- -9223372036854775808.0, // INT64_MIN
- -2147483649.5,
- -2147483648.25,
- -2147483648.0,
- -2147483647.875,
- -2147483647.125,
- -2147483647.0,
- -999.75,
- -2e66,
- -1.75,
- -1.5707963267948966,
- -1.0,
- -0.5,
- -0.0,
- 0.0,
- 3e-88,
- 0.125,
- 0.25,
- 0.375,
- 0.5,
- 1.0,
- 1.17549e-38,
- 1.56657e-37,
- 1.0000001,
- 1.25,
- 1.5707963267948966,
- 2,
- 3.1e7,
- 5.125,
- 6.25,
- 888,
- 982983.25,
- 2147483647.0,
- 2147483647.375,
- 2147483647.75,
- 2147483648.0,
- 2147483648.25,
- 2147483649.25,
- 9223372036854775808.0, // INT64_MAX + 1
- 9223373136366403584.0,
- 18446744073709551616.0, // UINT64_MAX + 1
- 2e66,
- V8_INFINITY,
- -V8_INFINITY,
- -nan,
- nan};
- return std::vector<double>(&values[0], &values[arraysize(values)]);
+ static constexpr double float64_array[] = {
+ -2e66,
+ -2.220446049250313e-16,
+ -9223373136366403584.0,
+ -9223372036854775808.0, // INT64_MIN
+ -2147483649.5,
+ -2147483648.25,
+ -2147483648.0,
+ -2147483647.875,
+ -2147483647.125,
+ -2147483647.0,
+ -999.75,
+ -2e66,
+ -1.75,
+ -1.5707963267948966,
+ -1.0,
+ -0.5,
+ -0.0,
+ 0.0,
+ 3e-88,
+ 0.125,
+ 0.25,
+ 0.375,
+ 0.5,
+ 1.0,
+ 1.17549e-38,
+ 1.56657e-37,
+ 1.0000001,
+ 1.25,
+ 1.5707963267948966,
+ 2,
+ 3.1e7,
+ 5.125,
+ 6.25,
+ 888,
+ 982983.25,
+ 2147483647.0,
+ 2147483647.375,
+ 2147483647.75,
+ 2147483648.0,
+ 2147483648.25,
+ 2147483649.25,
+ 9223372036854775808.0, // INT64_MAX + 1
+ 9223373136366403584.0,
+ 18446744073709551616.0, // UINT64_MAX + 1
+ 2e66,
+ V8_INFINITY,
+ -V8_INFINITY,
+ std::numeric_limits<double>::quiet_NaN(),
+ -std::numeric_limits<double>::quiet_NaN()};
+
+ static constexpr Vector<const double> float64_vector() {
+ return ArrayVector(float64_array);
}
- static const std::vector<int32_t> int32_vector() {
- std::vector<uint32_t> values = uint32_vector();
- return std::vector<int32_t>(values.begin(), values.end());
+ static constexpr uint32_t uint32_array[] = {
+ 0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+ // This row is useful for testing lea optimizations on intel.
+ 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000008, 0x00000009,
+ 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+ 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+ 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+ 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
+ // Bit pattern of a quiet NaN and signaling NaN, with or without
+ // additional payload.
+ 0x7fc00000, 0x7f800000, 0x7fffffff, 0x7f876543};
+
+ static constexpr Vector<const uint32_t> uint32_vector() {
+ return ArrayVector(uint32_array);
}
- static const std::vector<uint32_t> uint32_vector() {
- static const uint32_t kValues[] = {
- 0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
- // This row is useful for testing lea optimizations on intel.
- 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000008, 0x00000009,
- 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
- 0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
- 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
- 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
- 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
- 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
- 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
- return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+ static constexpr Vector<const int32_t> int32_vector() {
+ return Vector<const int32_t>::cast(uint32_vector());
}
- static const std::vector<int64_t> int64_vector() {
- std::vector<uint64_t> values = uint64_vector();
- return std::vector<int64_t>(values.begin(), values.end());
+ static constexpr uint64_t uint64_array[] = {
+ 0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+ 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000008, 0x00000009,
+ 0xffffffffffffffff, 0xfffffffffffffffe, 0xfffffffffffffffd,
+ 0x0000000000000000, 0x0000000100000000, 0xffffffff00000000,
+ 0x1b09788b00000000, 0x04c5fce800000000, 0xcc0de5bf00000000,
+ 0x0000000200000000, 0x0000000300000000, 0x0000000400000000,
+ 0x0000000500000000, 0x0000000800000000, 0x0000000900000000,
+ 0x273a798e187937a3, 0xece3af835495a16b, 0x0b668ecc11223344, 0x0000009e,
+ 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c, 0x88776655,
+ 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0x761c4761eeeeeeee, 0x80000000eeeeeeee, 0x88888888dddddddd,
+ 0xa0000000dddddddd, 0xddddddddaaaaaaaa, 0xe0000000aaaaaaaa,
+ 0xeeeeeeeeeeeeeeee, 0xfffffffdeeeeeeee, 0xf0000000dddddddd,
+ 0x007fffffdddddddd, 0x003fffffaaaaaaaa, 0x001fffffaaaaaaaa, 0x000fffff,
+ 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff, 0x00003fff,
+ 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
+ 0x00003fffffffffff, 0x00001fffffffffff, 0x00000fffffffffff,
+ 0x000007ffffffffff, 0x000003ffffffffff, 0x000001ffffffffff,
+ 0x8000008000000000, 0x8000008000000001, 0x8000000000000400,
+ 0x8000000000000401, 0x0000000000000020,
+ // Bit pattern of a quiet NaN and signaling NaN, with or without
+ // additional payload.
+ 0x7ff8000000000000, 0x7ff0000000000000, 0x7ff8123456789abc,
+ 0x7ff7654321fedcba};
+
+ static constexpr Vector<const uint64_t> uint64_vector() {
+ return ArrayVector(uint64_array);
}
- static const std::vector<uint64_t> uint64_vector() {
- static const uint64_t kValues[] = {
- 0x00000000, 0x00000001, 0xffffffff,
- 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
- 0x00000002, 0x00000003, 0x00000004,
- 0x00000005, 0x00000008, 0x00000009,
- 0xffffffffffffffff, 0xfffffffffffffffe, 0xfffffffffffffffd,
- 0x0000000000000000, 0x0000000100000000, 0xffffffff00000000,
- 0x1b09788b00000000, 0x04c5fce800000000, 0xcc0de5bf00000000,
- 0x0000000200000000, 0x0000000300000000, 0x0000000400000000,
- 0x0000000500000000, 0x0000000800000000, 0x0000000900000000,
- 0x273a798e187937a3, 0xece3af835495a16b, 0x0b668ecc11223344,
- 0x0000009e, 0x00000043, 0x0000af73,
- 0x0000116b, 0x00658ecc, 0x002b3b4c,
- 0x88776655, 0x70000000, 0x07200000,
- 0x7fffffff, 0x56123761, 0x7fffff00,
- 0x761c4761eeeeeeee, 0x80000000eeeeeeee, 0x88888888dddddddd,
- 0xa0000000dddddddd, 0xddddddddaaaaaaaa, 0xe0000000aaaaaaaa,
- 0xeeeeeeeeeeeeeeee, 0xfffffffdeeeeeeee, 0xf0000000dddddddd,
- 0x007fffffdddddddd, 0x003fffffaaaaaaaa, 0x001fffffaaaaaaaa,
- 0x000fffff, 0x0007ffff, 0x0003ffff,
- 0x0001ffff, 0x0000ffff, 0x00007fff,
- 0x00003fff, 0x00001fff, 0x00000fff,
- 0x000007ff, 0x000003ff, 0x000001ff,
- 0x00003fffffffffff, 0x00001fffffffffff, 0x00000fffffffffff,
- 0x000007ffffffffff, 0x000003ffffffffff, 0x000001ffffffffff,
- 0x8000008000000000, 0x8000008000000001, 0x8000000000000400,
- 0x8000000000000401, 0x0000000000000020};
- return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+ static constexpr Vector<const int64_t> int64_vector() {
+ return Vector<const int64_t>::cast(uint64_vector());
}
- static const std::vector<double> nan_vector(size_t limit = 0) {
- static const double nan = std::numeric_limits<double>::quiet_NaN();
- static const double values[] = {-nan, -V8_INFINITY * -0.0,
- -V8_INFINITY * 0.0, V8_INFINITY * -0.0,
- V8_INFINITY * 0.0, nan};
- return std::vector<double>(&values[0], &values[arraysize(values)]);
- }
+ static constexpr int16_t int16_array[] = {
+ 0, 1, 2, INT16_MAX - 1, INT16_MAX, INT16_MIN, INT16_MIN + 1, -2, -1};
- static const std::vector<int16_t> int16_vector() {
- static const int16_t kValues[] = {
- 0, 1, 2, INT16_MAX - 1, INT16_MAX, INT16_MIN, INT16_MIN + 1, -2, -1};
- return std::vector<int16_t>(&kValues[0], &kValues[arraysize(kValues)]);
+ static constexpr Vector<const int16_t> int16_vector() {
+ return ArrayVector(int16_array);
}
- static const std::vector<uint16_t> uint16_vector() {
- std::vector<int16_t> values = int16_vector();
- return std::vector<uint16_t>(values.begin(), values.end());
+ static constexpr Vector<const uint16_t> uint16_vector() {
+ return Vector<const uint16_t>::cast(int16_vector());
}
- static const std::vector<int8_t> int8_vector() {
- static const int8_t kValues[] = {
- 0, 1, 2, INT8_MAX - 1, INT8_MAX, INT8_MIN, INT8_MIN + 1, -2, -1};
- return std::vector<int8_t>(&kValues[0], &kValues[arraysize(kValues)]);
+ static constexpr int8_t int8_array[] = {
+ 0, 1, 2, INT8_MAX - 1, INT8_MAX, INT8_MIN, INT8_MIN + 1, -2, -1};
+
+ static constexpr Vector<const int8_t> int8_vector() {
+ return ArrayVector(int8_array);
}
- static const std::vector<uint8_t> uint8_vector() {
- std::vector<int8_t> values = int8_vector();
- return std::vector<uint8_t>(values.begin(), values.end());
+ static constexpr Vector<const uint8_t> uint8_vector() {
+ return Vector<const uint8_t>::cast(ArrayVector(int8_array));
}
- static const std::vector<uint32_t> ror_vector() {
- static const uint32_t kValues[31] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
- return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+ static constexpr uint32_t ror_array[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
+
+ static constexpr Vector<const uint32_t> ror_vector() {
+ return ArrayVector(ror_array);
}
};
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
// Watch out, these macros aren't hygenic; they pollute your scope. Thanks STL.
-#define FOR_INPUTS(ctype, itype, var) \
- std::vector<ctype> var##_vec = \
- ::v8::internal::compiler::ValueHelper::itype##_vector(); \
- for (std::vector<ctype>::iterator var = var##_vec.begin(); \
- var != var##_vec.end(); ++var)
+#define FOR_INPUTS(ctype, itype, var) \
+ Vector<const ctype> var##_vec = \
+ ::v8::internal::compiler::ValueHelper::itype##_vector(); \
+ for (Vector<const ctype>::iterator var = var##_vec.begin(), \
+ var##_end = var##_vec.end(); \
+ var != var##_end; ++var)
#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
diff --git a/deps/v8/test/cctest/expression-type-collector-macros.h b/deps/v8/test/cctest/expression-type-collector-macros.h
index 68d69481d9..39774f0a71 100644
--- a/deps/v8/test/cctest/expression-type-collector-macros.h
+++ b/deps/v8/test/cctest/expression-type-collector-macros.h
@@ -33,11 +33,11 @@
#define CHECK_TYPE(type) CHECK(types[index].bounds.Narrows(type));
#endif
-#define CHECK_EXPR(ekind, type) \
- CHECK_LT(index, types.size()); \
- CHECK(strcmp(#ekind, types[index].kind) == 0); \
- CHECK_EQ(depth, types[index].depth); \
- CHECK_TYPE(type); \
+#define CHECK_EXPR(ekind, type) \
+ CHECK_LT(index, types.size()); \
+ CHECK_EQ(strcmp(#ekind, types[index].kind), 0); \
+ CHECK_EQ(depth, types[index].depth); \
+ CHECK_TYPE(type); \
for (int j = (++depth, ++index, 0); j < 1 ? 1 : (--depth, 0); ++j)
#define CHECK_VAR(vname, type) \
diff --git a/deps/v8/test/cctest/gay-fixed.cc b/deps/v8/test/cctest/gay-fixed.cc
index 86ebb24cd8..75c872fd81 100644
--- a/deps/v8/test/cctest/gay-fixed.cc
+++ b/deps/v8/test/cctest/gay-fixed.cc
@@ -27,7 +27,7 @@
// This file contains 100.000 decimal representations of random doubles. They
// have been generated using Gay's dtoa to produce the fixed representation:
-// dtoa(v, 3, number_digits, &decimal_point, &sign, NULL);
+// dtoa(v, 3, number_digits, &decimal_point, &sign, nullptr);
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/gay-precision.cc b/deps/v8/test/cctest/gay-precision.cc
index 68d29f8cd5..0661e92897 100644
--- a/deps/v8/test/cctest/gay-precision.cc
+++ b/deps/v8/test/cctest/gay-precision.cc
@@ -27,7 +27,7 @@
// This file contains 100.000 decimal representations of random doubles. They
// have been generated using Gay's dtoa to produce the precision representation:
-// dtoa(v, 2, number_digits, &decimal_point, &sign, NULL);
+// dtoa(v, 2, number_digits, &decimal_point, &sign, nullptr);
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/gay-shortest.cc b/deps/v8/test/cctest/gay-shortest.cc
index 456055392c..b810fd2468 100644
--- a/deps/v8/test/cctest/gay-shortest.cc
+++ b/deps/v8/test/cctest/gay-shortest.cc
@@ -27,7 +27,7 @@
// This file contains 100.000 decimal representations of random doubles. They
// have been generated using Gay's dtoa to produce the shortest representation:
-// decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
+// decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, nullptr);
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 926a750927..692514d854 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -40,6 +40,8 @@
V(Regress658718) \
V(Regress670675) \
V(Regress5831) \
+ V(Regress777177) \
+ V(Regress779503) \
V(RegressMissingWriteBarrierInAllocate) \
V(WriteBarriersInCopyJSObject)
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 6e92b96da1..30bbde2c76 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -162,7 +162,7 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- i::IncrementalMarking::FORCE_COMPLETION, i::StepOrigin::kV8);
+ i::StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -171,6 +171,7 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
}
void SimulateFullSpace(v8::internal::PagedSpace* space) {
+ CodeSpaceMemoryModificationScope modification_scope(space->heap());
i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index 06aec9ac6e..4cf4a44afd 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -84,8 +84,8 @@ AllocationResult HeapTester::AllocateAfterFailures() {
// Test that we can allocate in old pointer space and code space.
heap::SimulateFullSpace(heap->code_space());
heap->AllocateFixedArray(100, TENURED).ToObjectChecked();
- heap->CopyCode(CcTest::i_isolate()->builtins()->builtin(
- Builtins::kIllegal)).ToObjectChecked();
+ Code* illegal = CcTest::i_isolate()->builtins()->builtin(Builtins::kIllegal);
+ heap->CopyCode(illegal, illegal->code_data_container()).ToObjectChecked();
// Return success.
return heap->true_value();
@@ -129,8 +129,7 @@ void TestSetter(v8::Local<v8::Name> name, v8::Local<v8::Value> value,
Handle<AccessorInfo> TestAccessorInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name = isolate->factory()->NewStringFromStaticChars("get");
- return Accessors::MakeAccessor(isolate, name, &TestGetter, &TestSetter,
- attributes);
+ return Accessors::MakeAccessor(isolate, name, &TestGetter, &TestSetter);
}
@@ -140,8 +139,8 @@ TEST(StressJS) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = v8::Context::New(CcTest::isolate());
env->Enter();
- Handle<JSFunction> function = factory->NewFunction(
- factory->function_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->function_string());
// Force the creation of an initial map and set the code to
// something empty.
factory->NewJSObject(function);
@@ -229,6 +228,7 @@ TEST(CodeRange) {
// kMaxRegularHeapObjectSize.
size_t requested = (kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
+ requested = RoundUp(requested, MemoryAllocator::GetCommitPageSize());
size_t allocated = 0;
// The request size has to be at least 2 code guard pages larger than the
@@ -236,7 +236,7 @@ TEST(CodeRange) {
Address base = code_range.AllocateRawMemory(
requested, requested - (2 * MemoryAllocator::CodePageGuardSize()),
&allocated);
- CHECK(base != NULL);
+ CHECK_NOT_NULL(base);
blocks.emplace_back(base, static_cast<int>(allocated));
current_allocated += static_cast<int>(allocated);
total_allocated += static_cast<int>(allocated);
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index e8c65d1110..06ac948cb9 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -46,9 +46,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
// we can reach the state of a half aborted page.
- FLAG_concurrent_sweeping = false;
- FLAG_concurrent_marking = false;
- FLAG_stress_incremental_marking = false;
+ ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -91,9 +89,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
// we can reach the state of a half aborted page.
- FLAG_concurrent_sweeping = false;
- FLAG_concurrent_marking = false;
- FLAG_stress_incremental_marking = false;
+ ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
@@ -168,9 +164,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
// we can reach the state of a half aborted page.
- FLAG_concurrent_sweeping = false;
- FLAG_concurrent_marking = false;
- FLAG_stress_incremental_marking = false;
+ ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
@@ -258,9 +252,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
// we can reach the state of a half aborted page.
- FLAG_concurrent_sweeping = false;
- FLAG_concurrent_marking = false;
- FLAG_stress_incremental_marking = false;
+ ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
@@ -347,7 +339,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
Address broken_address = holder->address() + 2 * kPointerSize + 1;
// Convert it to a vector to create a string from it.
Vector<const uint8_t> string_to_broken_addresss(
- reinterpret_cast<const uint8_t*>(&broken_address), 8);
+ reinterpret_cast<const uint8_t*>(&broken_address), kPointerSize);
Handle<String> string;
do {
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index ab2ba1a53b..d70c1e502c 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -200,9 +200,11 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- Code* tmp = nullptr;
- heap->CopyCode(*code).To(&tmp);
- Handle<Code> copy(tmp);
+ Handle<Code> copy;
+ {
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ copy = factory->CopyCode(code);
+ }
CheckEmbeddedObjectsAreEqual(code, copy);
CcTest::CollectAllAvailableGarbage();
@@ -213,7 +215,7 @@ static void CheckFindCodeObject(Isolate* isolate) {
// Test FindCodeObject
#define __ assm.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ nop(); // supported on all architectures
@@ -363,12 +365,15 @@ TEST(GarbageCollection) {
{
HandleScope inner_scope(isolate);
// Allocate a function and keep it in global object's property.
- Handle<JSFunction> function = factory->NewFunction(name);
- JSReceiver::SetProperty(global, name, function, SLOPPY).Check();
+ Handle<JSFunction> function = factory->NewFunctionForTest(name);
+ JSReceiver::SetProperty(global, name, function, LanguageMode::kSloppy)
+ .Check();
// Allocate an object. Unrooted after leaving the scope.
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
- JSReceiver::SetProperty(obj, prop_namex, twenty_four, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, LanguageMode::kSloppy)
+ .Check();
+ JSReceiver::SetProperty(obj, prop_namex, twenty_four, LanguageMode::kSloppy)
+ .Check();
CHECK_EQ(Smi::FromInt(23),
*Object::GetProperty(obj, prop_name).ToHandleChecked());
@@ -390,8 +395,10 @@ TEST(GarbageCollection) {
HandleScope inner_scope(isolate);
// Allocate another object, make it reachable from global.
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(global, obj_name, obj, SLOPPY).Check();
- JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
+ JSReceiver::SetProperty(global, obj_name, obj, LanguageMode::kSloppy)
+ .Check();
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, LanguageMode::kSloppy)
+ .Check();
}
// After gc, it should survive.
@@ -749,9 +756,8 @@ TEST(BytecodeArray) {
static const int kFrameSize = 32;
static const int kParameterCount = 2;
- FLAG_concurrent_marking = false;
+ ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true;
- FLAG_stress_incremental_marking = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
@@ -924,18 +930,21 @@ TEST(FunctionAllocation) {
v8::HandleScope sc(CcTest::isolate());
Handle<String> name = factory->InternalizeUtf8String("theFunction");
- Handle<JSFunction> function = factory->NewFunction(name);
+ Handle<JSFunction> function = factory->NewFunctionForTest(name);
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
Handle<Smi> twenty_four(Smi::FromInt(24), isolate);
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, LanguageMode::kSloppy)
+ .Check();
CHECK_EQ(Smi::FromInt(23),
*Object::GetProperty(obj, prop_name).ToHandleChecked());
// Check that we can add properties to function objects.
- JSReceiver::SetProperty(function, prop_name, twenty_four, SLOPPY).Check();
+ JSReceiver::SetProperty(function, prop_name, twenty_four,
+ LanguageMode::kSloppy)
+ .Check();
CHECK_EQ(Smi::FromInt(24),
*Object::GetProperty(function, prop_name).ToHandleChecked());
}
@@ -962,50 +971,55 @@ TEST(ObjectProperties) {
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first
- JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, first, one, LanguageMode::kSloppy).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
// delete first
- CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY));
+ CHECK(Just(true) ==
+ JSReceiver::DeleteProperty(obj, first, LanguageMode::kSloppy));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first and then second
- JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
- JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, first, one, LanguageMode::kSloppy).Check();
+ JSReceiver::SetProperty(obj, second, two, LanguageMode::kSloppy).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
// delete first and then second
- CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY));
+ CHECK(Just(true) ==
+ JSReceiver::DeleteProperty(obj, first, LanguageMode::kSloppy));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
- CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY));
+ CHECK(Just(true) ==
+ JSReceiver::DeleteProperty(obj, second, LanguageMode::kSloppy));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
// add first and then second
- JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
- JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, first, one, LanguageMode::kSloppy).Check();
+ JSReceiver::SetProperty(obj, second, two, LanguageMode::kSloppy).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
// delete second and then first
- CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY));
+ CHECK(Just(true) ==
+ JSReceiver::DeleteProperty(obj, second, LanguageMode::kSloppy));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
- CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY));
+ CHECK(Just(true) ==
+ JSReceiver::DeleteProperty(obj, first, LanguageMode::kSloppy));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
// check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = factory->NewStringFromAsciiChecked(string1);
- JSReceiver::SetProperty(obj, s1, one, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, s1, one, LanguageMode::kSloppy).Check();
Handle<String> s1_string = factory->InternalizeUtf8String(string1);
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string));
// check internalized string and string match
const char* string2 = "fugl";
Handle<String> s2_string = factory->InternalizeUtf8String(string2);
- JSReceiver::SetProperty(obj, s2_string, one, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, s2_string, one, LanguageMode::kSloppy).Check();
Handle<String> s2 = factory->NewStringFromAsciiChecked(string2);
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2));
}
@@ -1018,7 +1032,7 @@ TEST(JSObjectMaps) {
v8::HandleScope sc(CcTest::isolate());
Handle<String> name = factory->InternalizeUtf8String("theFunction");
- Handle<JSFunction> function = factory->NewFunction(name);
+ Handle<JSFunction> function = factory->NewFunctionForTest(name);
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = factory->NewJSObject(function);
@@ -1026,7 +1040,8 @@ TEST(JSObjectMaps) {
// Set a propery
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, LanguageMode::kSloppy)
+ .Check();
CHECK_EQ(Smi::FromInt(23),
*Object::GetProperty(obj, prop_name).ToHandleChecked());
@@ -1060,7 +1075,8 @@ TEST(JSArray) {
CHECK(array->HasSmiOrObjectElements());
// array[length] = name.
- JSReceiver::SetElement(isolate, array, 0, name, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, array, 0, name, LanguageMode::kSloppy)
+ .Check();
CHECK_EQ(Smi::FromInt(1), array->length());
element = i::Object::GetElement(isolate, array, 0).ToHandleChecked();
CHECK_EQ(*element, *name);
@@ -1074,7 +1090,9 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- JSReceiver::SetElement(isolate, array, int_length, name, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, array, int_length, name,
+ LanguageMode::kSloppy)
+ .Check();
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@@ -1102,11 +1120,12 @@ TEST(JSObjectCopy) {
Handle<Smi> one(Smi::FromInt(1), isolate);
Handle<Smi> two(Smi::FromInt(2), isolate);
- JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
- JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, first, one, LanguageMode::kSloppy).Check();
+ JSReceiver::SetProperty(obj, second, two, LanguageMode::kSloppy).Check();
- JSReceiver::SetElement(isolate, obj, 0, first, SLOPPY).Check();
- JSReceiver::SetElement(isolate, obj, 1, second, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, obj, 0, first, LanguageMode::kSloppy).Check();
+ JSReceiver::SetElement(isolate, obj, 1, second, LanguageMode::kSloppy)
+ .Check();
// Make the clone.
Handle<Object> value1, value2;
@@ -1128,11 +1147,13 @@ TEST(JSObjectCopy) {
CHECK_EQ(*value1, *value2);
// Flip the values.
- JSReceiver::SetProperty(clone, first, two, SLOPPY).Check();
- JSReceiver::SetProperty(clone, second, one, SLOPPY).Check();
+ JSReceiver::SetProperty(clone, first, two, LanguageMode::kSloppy).Check();
+ JSReceiver::SetProperty(clone, second, one, LanguageMode::kSloppy).Check();
- JSReceiver::SetElement(isolate, clone, 0, second, SLOPPY).Check();
- JSReceiver::SetElement(isolate, clone, 1, first, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, clone, 0, second, LanguageMode::kSloppy)
+ .Check();
+ JSReceiver::SetElement(isolate, clone, 1, first, LanguageMode::kSloppy)
+ .Check();
value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked();
value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked();
@@ -1194,7 +1215,8 @@ static int ObjectsFoundInHeap(Heap* heap, Handle<Object> objs[], int size) {
// Count the number of objects found in the heap.
int found_count = 0;
HeapIterator iterator(heap);
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
for (int i = 0; i < size; i++) {
if (*objs[i] == obj) {
found_count++;
@@ -1570,7 +1592,7 @@ TEST(TestAlignmentCalculations) {
int max_double_unaligned_fill = Heap::GetMaximumFillToAlign(kDoubleUnaligned);
CHECK_EQ(maximum_double_misalignment, max_double_unaligned_fill);
- Address base = static_cast<Address>(NULL);
+ Address base = static_cast<Address>(nullptr);
int fill = 0;
// Word alignment never requires fill.
@@ -1598,7 +1620,7 @@ static HeapObject* NewSpaceAllocateAligned(int size,
Heap* heap = CcTest::heap();
AllocationResult allocation =
heap->new_space()->AllocateRawAligned(size, alignment);
- HeapObject* obj = NULL;
+ HeapObject* obj = nullptr;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
@@ -1665,7 +1687,7 @@ static HeapObject* OldSpaceAllocateAligned(int size,
Heap* heap = CcTest::heap();
AllocationResult allocation =
heap->old_space()->AllocateRawAligned(size, alignment);
- HeapObject* obj = NULL;
+ HeapObject* obj = nullptr;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
@@ -1743,8 +1765,7 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
HeapIterator iterator(CcTest::heap());
intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects();
intptr_t size_of_objects_2 = 0;
- for (HeapObject* obj = iterator.next();
- obj != NULL;
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
if (!obj->IsFreeSpace()) {
size_of_objects_2 += obj->Size();
@@ -1857,7 +1878,8 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
static int NumberOfGlobalObjects() {
int count = 0;
HeapIterator iterator(CcTest::heap());
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
if (obj->IsJSGlobalObject()) count++;
}
return count;
@@ -2103,7 +2125,7 @@ TEST(InstanceOfStubWriteBarrier) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ StepOrigin::kV8);
}
CHECK(marking->IsMarking());
@@ -2167,28 +2189,12 @@ TEST(IdleNotificationFinishMarking) {
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count);
- // TODO(hpayer): We cannot write proper unit test right now for heap.
- // The ideal test would call kMaxIdleMarkingDelayCounter to test the
- // marking delay counter.
-
- // Perform a huge incremental marking step but don't complete marking.
do {
marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION, StepOrigin::kV8);
- CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
+ StepOrigin::kV8);
} while (
!CcTest::heap()->mark_compact_collector()->marking_worklist()->IsEmpty());
- // The next invocations of incremental marking are not going to complete
- // marking
- // since the completion threshold is not reached
- for (size_t i = 0; i < IncrementalMarking::kMaxIdleMarkingDelayCounter - 2;
- i++) {
- marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION, StepOrigin::kV8);
- CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
- }
-
marking->SetWeakClosureWasOverApproximatedForTesting(true);
// The next idle notification has to finish incremental marking.
@@ -2742,7 +2748,9 @@ static void AddPropertyTo(
FLAG_gc_global = true;
FLAG_retain_maps_for_n_gc = 0;
CcTest::heap()->set_allocation_timeout(gc_count);
- JSReceiver::SetProperty(object, prop_name, twenty_three, SLOPPY).Check();
+ JSReceiver::SetProperty(object, prop_name, twenty_three,
+ LanguageMode::kSloppy)
+ .Check();
}
@@ -2873,16 +2881,16 @@ TEST(ReleaseOverReservedPages) {
// The optimizer can allocate stuff, messing up the test.
FLAG_opt = false;
FLAG_always_opt = false;
- // Parallel compaction increases fragmentation, depending on how existing
- // memory is distributed. Since this is non-deterministic because of
- // concurrent sweeping, we disable it for this test.
- FLAG_parallel_compaction = false;
- FLAG_concurrent_marking = false;
- // Concurrent sweeping adds non determinism, depending on when memory is
- // available for further reuse.
- FLAG_concurrent_sweeping = false;
- // Fast evacuation of pages may result in a different page count in old space.
+ // - Parallel compaction increases fragmentation, depending on how existing
+ // memory is distributed. Since this is non-deterministic because of
+ // concurrent sweeping, we disable it for this test.
+ // - Concurrent sweeping adds non determinism, depending on when memory is
+ // available for further reuse.
+ // - Fast evacuation of pages may result in a different page count in old
+ // space.
+ ManualGCScope manual_gc_scope;
FLAG_page_promotion = false;
+ FLAG_parallel_compaction = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
// If there's snapshot available, we don't know whether 20 small arrays will
@@ -3164,14 +3172,14 @@ class SourceResource : public v8::String::ExternalOneByteStringResource {
virtual void Dispose() {
i::DeleteArray(data_);
- data_ = NULL;
+ data_ = nullptr;
}
const char* data() const { return data_; }
size_t length() const { return length_; }
- bool IsDisposed() { return data_ == NULL; }
+ bool IsDisposed() { return data_ == nullptr; }
private:
const char* data_;
@@ -3329,7 +3337,7 @@ TEST(Regress169928) {
// We need filler the size of AllocationMemento object, plus an extra
// fill pointer value.
- HeapObject* obj = NULL;
+ HeapObject* obj = nullptr;
AllocationResult allocation =
CcTest::heap()->new_space()->AllocateRawUnaligned(
AllocationMemento::kSize + kPointerSize);
@@ -3378,8 +3386,7 @@ TEST(LargeObjectSlotRecording) {
// Start incremental marking to active write barrier.
heap::SimulateIncrementalMarking(heap, false);
heap->incremental_marking()->AdvanceIncrementalMarking(
- 10000000, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ 10000000, IncrementalMarking::NO_GC_VIA_STACK_GUARD, StepOrigin::kV8);
// Create references from the large object to the object on the evacuation
// candidate.
@@ -3443,7 +3450,7 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
}
// This big step should be sufficient to mark the whole array.
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ StepOrigin::kV8);
CHECK(marking->IsComplete() ||
marking->IsReadyToOverApproximateWeakClosure());
}
@@ -3905,6 +3912,30 @@ TEST(NextCodeLinkIsWeak) {
CHECK_EQ(code_chain_length_before - 1, code_chain_length_after);
}
+TEST(NextCodeLinkInCodeDataContainerIsCleared) {
+ FLAG_always_opt = false;
+ FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_optimizer()) return;
+ HandleScope outer_scope(heap->isolate());
+ Handle<CodeDataContainer> code_data_container;
+ {
+ HandleScope scope(heap->isolate());
+ Handle<JSFunction> mortal1 =
+ OptimizeDummyFunction(CcTest::isolate(), "mortal1");
+ Handle<JSFunction> mortal2 =
+ OptimizeDummyFunction(CcTest::isolate(), "mortal2");
+ CHECK_EQ(mortal2->code()->next_code_link(), mortal1->code());
+ code_data_container = scope.CloseAndEscape(
+ Handle<CodeDataContainer>(mortal2->code()->code_data_container()));
+ CompileRun("mortal1 = null; mortal2 = null;");
+ }
+ CcTest::CollectAllAvailableGarbage();
+ CHECK(code_data_container->next_code_link()->IsUndefined(isolate));
+}
static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
i::byte buffer[i::Assembler::kMinimalBufferSize];
@@ -4338,7 +4369,7 @@ TEST(WeakCellsWithIncrementalMarking) {
i::GarbageCollectionReason::kTesting);
}
marking->Step(128, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ StepOrigin::kV8);
CcTest::CollectGarbage(NEW_SPACE);
CHECK(weak_cell->value()->IsFixedArray());
weak_cells[i] = inner_scope.CloseAndEscape(weak_cell);
@@ -4425,16 +4456,20 @@ static void InterruptCallback357137(v8::Isolate* isolate, void* data) { }
static void RequestInterrupt(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CcTest::isolate()->RequestInterrupt(&InterruptCallback357137, NULL);
+ CcTest::isolate()->RequestInterrupt(&InterruptCallback357137, nullptr);
}
HEAP_TEST(Regress538257) {
- FLAG_concurrent_marking = false;
+ ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true;
v8::Isolate::CreateParams create_params;
// Set heap limits.
create_params.constraints.set_max_semi_space_size_in_kb(1024);
+#ifdef DEBUG
+ create_params.constraints.set_max_old_space_size(20);
+#else
create_params.constraints.set_max_old_space_size(6);
+#endif
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
@@ -4472,7 +4507,7 @@ TEST(Regress357137) {
v8::String::NewFromUtf8(isolate, "interrupt", v8::NewStringType::kNormal)
.ToLocalChecked(),
v8::FunctionTemplate::New(isolate, RequestInterrupt));
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
CHECK(!context.IsEmpty());
v8::Context::Scope cscope(context);
@@ -4504,7 +4539,7 @@ TEST(Regress507979) {
// way the filler object shares the mark bits with the following live object.
o1->Shrink(kFixedArrayLen - 1);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ for (HeapObject* obj = it.next(); obj != nullptr; obj = it.next()) {
// Let's not optimize the loop away.
CHECK_NOT_NULL(obj->address());
}
@@ -4588,7 +4623,7 @@ TEST(Regress3631) {
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
while (!marking_state->IsBlack(weak_map_table) && !marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ StepOrigin::kV8);
}
// Stash the backing store in a handle.
Handle<Object> save(weak_map->table(), isolate);
@@ -4613,7 +4648,7 @@ TEST(Regress442710) {
Handle<JSArray> array = factory->NewJSArray(2);
Handle<String> name = factory->InternalizeUtf8String("testArray");
- JSReceiver::SetProperty(global, name, array, SLOPPY).Check();
+ JSReceiver::SetProperty(global, name, array, LanguageMode::kSloppy).Check();
CompileRun("testArray[0] = 1; testArray[1] = 2; testArray.shift();");
CcTest::CollectGarbage(OLD_SPACE);
}
@@ -4736,58 +4771,6 @@ TEST(WritableVsImmortalRoots) {
}
}
-
-static void TestRightTrimFixedTypedArray(i::ExternalArrayType type,
- int initial_length,
- int elements_to_trim) {
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- Handle<FixedTypedArrayBase> array =
- factory->NewFixedTypedArray(initial_length, type, true);
- int old_size = array->size();
- heap->RightTrimFixedArray(*array, elements_to_trim);
-
- // Check that free space filler is at the right place and did not smash the
- // array header.
- CHECK(array->IsFixedArrayBase());
- CHECK_EQ(initial_length - elements_to_trim, array->length());
- int new_size = array->size();
- if (new_size != old_size) {
- // Free space filler should be created in this case.
- Address next_obj_address = array->address() + array->size();
- CHECK(HeapObject::FromAddress(next_obj_address)->IsFiller());
- }
- CcTest::CollectAllAvailableGarbage();
-}
-
-
-TEST(Regress472513) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- // The combination of type/initial_length/elements_to_trim triggered
- // typed array header smashing with free space filler (crbug/472513).
-
- // 64-bit cases.
- TestRightTrimFixedTypedArray(i::kExternalUint8Array, 32, 6);
- TestRightTrimFixedTypedArray(i::kExternalUint8Array, 32 - 7, 6);
- TestRightTrimFixedTypedArray(i::kExternalUint16Array, 16, 6);
- TestRightTrimFixedTypedArray(i::kExternalUint16Array, 16 - 3, 6);
- TestRightTrimFixedTypedArray(i::kExternalUint32Array, 8, 6);
- TestRightTrimFixedTypedArray(i::kExternalUint32Array, 8 - 1, 6);
-
- // 32-bit cases.
- TestRightTrimFixedTypedArray(i::kExternalUint8Array, 16, 3);
- TestRightTrimFixedTypedArray(i::kExternalUint8Array, 16 - 3, 3);
- TestRightTrimFixedTypedArray(i::kExternalUint16Array, 8, 3);
- TestRightTrimFixedTypedArray(i::kExternalUint16Array, 8 - 1, 3);
- TestRightTrimFixedTypedArray(i::kExternalUint32Array, 4, 3);
-}
-
-
TEST(WeakFixedArray) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -5001,7 +4984,7 @@ TEST(MessageObjectLeak) {
v8::String::NewFromUtf8(isolate, "check", v8::NewStringType::kNormal)
.ToLocalChecked(),
v8::FunctionTemplate::New(isolate, CheckLeak));
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope cscope(context);
const char* test =
@@ -5056,7 +5039,7 @@ TEST(CanonicalSharedFunctionInfo) {
isolate, CheckEqualSharedFunctionInfos));
global->Set(isolate, "remove",
v8::FunctionTemplate::New(isolate, RemoveCodeAndGC));
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope cscope(context);
CompileRun(
"function f() { return function g() {}; }"
@@ -5086,7 +5069,7 @@ TEST(ScriptIterator) {
int script_count = 0;
{
HeapIterator it(heap);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ for (HeapObject* obj = it.next(); obj != nullptr; obj = it.next()) {
if (obj->IsScript()) script_count++;
}
}
@@ -5113,7 +5096,7 @@ TEST(SharedFunctionInfoIterator) {
int sfi_count = 0;
{
HeapIterator it(heap);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ for (HeapObject* obj = it.next(); obj != nullptr; obj = it.next()) {
if (!obj->IsSharedFunctionInfo()) continue;
sfi_count++;
}
@@ -5128,8 +5111,7 @@ TEST(SharedFunctionInfoIterator) {
}
HEAP_TEST(Regress587004) {
- FLAG_concurrent_marking = false;
- FLAG_concurrent_sweeping = false;
+ ManualGCScope manual_gc_scope;
#ifdef VERIFY_HEAP
FLAG_verify_heap = false;
#endif
@@ -5171,8 +5153,7 @@ HEAP_TEST(Regress589413) {
FLAG_stress_compaction = true;
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
- FLAG_concurrent_marking = false;
- FLAG_concurrent_sweeping = false;
+ ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -5316,7 +5297,7 @@ TEST(Regress598319) {
// only partially marked the large object.
while (!marking->IsComplete()) {
marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ StepOrigin::kV8);
if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->progress_bar() > 0) {
CHECK_NE(page->progress_bar(), arr.get()->Size());
{
@@ -5334,7 +5315,7 @@ TEST(Regress598319) {
// Finish marking with bigger steps to speed up test.
while (!marking->IsComplete()) {
marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -5417,7 +5398,7 @@ TEST(Regress615489) {
}
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -5448,8 +5429,7 @@ TEST(Regress631969) {
if (!FLAG_incremental_marking) return;
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
- FLAG_concurrent_marking = false;
- FLAG_concurrent_sweeping = false;
+ ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -5478,7 +5458,7 @@ TEST(Regress631969) {
IncrementalMarking* marking = heap->incremental_marking();
while (!marking->IsComplete()) {
marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -5805,15 +5785,14 @@ HEAP_TEST(Regress670675) {
if (marking->IsStopped()) break;
double deadline = heap->MonotonicallyIncreasingTimeInMs() + 1;
marking->AdvanceIncrementalMarking(
- deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
DCHECK(marking->IsStopped());
}
namespace {
Handle<Code> GenerateDummyImmovableCode(Isolate* isolate) {
- Assembler assm(isolate, NULL, 256);
+ Assembler assm(isolate, nullptr, 256);
const int kNumberOfNops = 1 << 10;
for (int i = 0; i < kNumberOfNops; i++) {
@@ -5822,9 +5801,10 @@ Handle<Code> GenerateDummyImmovableCode(Isolate* isolate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- const bool kImmovable = true;
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>(), kImmovable);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::STUB, Handle<Code>(), Builtins::kNoBuiltinId,
+ HandlerTable::Empty(isolate), MaybeHandle<ByteArray>(),
+ DeoptimizationData::Empty(isolate), kImmovable);
CHECK(code->IsCode());
return code;
@@ -5974,6 +5954,50 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
}
}
+HEAP_TEST(Regress779503) {
+ // The following regression test ensures that the Scavenger does not allocate
+ // over invalid slots. More specific, the Scavenger should not sweep a page
+ // that it currently processes because it might allocate over the currently
+ // processed slot.
+ const int kArraySize = 2048;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = CcTest::heap();
+ heap::SealCurrentObjects(heap);
+ {
+ HandleScope handle_scope(isolate);
+ // The byte array filled with kHeapObjectTag ensures that we cannot read
+ // from the slot again and interpret it as heap value. Doing so will crash.
+ Handle<ByteArray> byte_array = isolate->factory()->NewByteArray(kArraySize);
+ CHECK(heap->InNewSpace(*byte_array));
+ for (int i = 0; i < kArraySize; i++) {
+ byte_array->set(i, kHeapObjectTag);
+ }
+
+ {
+ HandleScope handle_scope(isolate);
+ // The FixedArray in old space serves as space for slots.
+ Handle<FixedArray> fixed_array =
+ isolate->factory()->NewFixedArray(kArraySize, TENURED);
+ CHECK(!heap->InNewSpace(*fixed_array));
+ for (int i = 0; i < kArraySize; i++) {
+ fixed_array->set(i, *byte_array);
+ }
+ }
+ // Delay sweeper tasks to allow the scavenger to sweep the page it is
+ // currently scavenging.
+ CcTest::heap()->delay_sweeper_tasks_for_testing_ = true;
+ CcTest::CollectGarbage(OLD_SPACE);
+ CHECK(heap->InNewSpace(*byte_array));
+ }
+ // Scavenging and sweeping the same page will crash as slots will be
+ // overridden.
+ CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::heap()->delay_sweeper_tasks_for_testing_ = false;
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
+
+#undef __
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index 8f704b1a97..e7f3e93160 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -129,8 +129,9 @@ HEAP_TEST(MarkCompactCollector) {
{ HandleScope scope(isolate);
// allocate a garbage
Handle<String> func_name = factory->InternalizeUtf8String("theFunction");
- Handle<JSFunction> function = factory->NewFunction(func_name);
- JSReceiver::SetProperty(global, func_name, function, SLOPPY).Check();
+ Handle<JSFunction> function = factory->NewFunctionForTest(func_name);
+ JSReceiver::SetProperty(global, func_name, function, LanguageMode::kSloppy)
+ .Check();
factory->NewJSObject(function);
}
@@ -147,10 +148,12 @@ HEAP_TEST(MarkCompactCollector) {
Handle<JSObject> obj = factory->NewJSObject(function);
Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
- JSReceiver::SetProperty(global, obj_name, obj, SLOPPY).Check();
+ JSReceiver::SetProperty(global, obj_name, obj, LanguageMode::kSloppy)
+ .Check();
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, LanguageMode::kSloppy)
+ .Check();
}
CcTest::CollectGarbage(OLD_SPACE);
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index c9ad761b35..1464bc4e56 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -191,7 +191,8 @@ UNINITIALIZED_HEAP_TEST(Regress658718) {
heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
heap->new_space()->Shrink();
heap->memory_allocator()->unmapper()->WaitUntilCompleted();
- heap->mark_compact_collector()->sweeper().StartSweeperTasks();
+ heap->delay_sweeper_tasks_for_testing_ = false;
+ heap->mark_compact_collector()->sweeper()->StartSweeperTasks();
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
isolate->Dispose();
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index f5a0083771..d9deb10475 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -80,14 +80,11 @@ class TestCodeRangeScope {
DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
};
-namespace test_spaces {
-
static void VerifyMemoryChunk(Isolate* isolate,
Heap* heap,
CodeRange* code_range,
size_t reserve_area_size,
size_t commit_area_size,
- size_t second_commit_area_size,
Executability executable) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
@@ -102,8 +99,8 @@ static void VerifyMemoryChunk(Isolate* isolate,
(executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
- reserve_area_size, commit_area_size, executable, NULL);
- size_t alignment = code_range != NULL && code_range->valid()
+ reserve_area_size, commit_area_size, executable, nullptr);
+ size_t alignment = code_range != nullptr && code_range->valid()
? MemoryChunk::kAlignment
: base::OS::CommitPageSize();
size_t reserved_size =
@@ -119,24 +116,12 @@ static void VerifyMemoryChunk(Isolate* isolate,
memory_chunk->address() + memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
- Address area_start = memory_chunk->area_start();
-
- memory_chunk->CommitArea(second_commit_area_size);
- CHECK(area_start == memory_chunk->area_start());
- CHECK(memory_chunk->area_start() <
- memory_chunk->address() + memory_chunk->size());
- CHECK(memory_chunk->area_end() <=
- memory_chunk->address() + memory_chunk->size());
- CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
- second_commit_area_size);
-
memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
}
memory_allocator->TearDown();
delete memory_allocator;
}
-
TEST(Regress3540) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
@@ -172,8 +157,7 @@ TEST(Regress3540) {
delete memory_allocator;
}
-
-static unsigned int Pseudorandom() {
+static unsigned int PseudorandomAreaSize() {
static uint32_t lo = 2345;
lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
return lo & 0xFFFFF;
@@ -185,11 +169,10 @@ TEST(MemoryChunk) {
Heap* heap = isolate->heap();
size_t reserve_area_size = 1 * MB;
- size_t initial_commit_area_size, second_commit_area_size;
+ size_t initial_commit_area_size;
for (int i = 0; i < 100; i++) {
- initial_commit_area_size = Pseudorandom();
- second_commit_area_size = Pseudorandom();
+ initial_commit_area_size = PseudorandomAreaSize();
// With CodeRange.
CodeRange* code_range = new CodeRange(isolate);
@@ -201,7 +184,6 @@ TEST(MemoryChunk) {
code_range,
reserve_area_size,
initial_commit_area_size,
- second_commit_area_size,
EXECUTABLE);
VerifyMemoryChunk(isolate,
@@ -209,7 +191,6 @@ TEST(MemoryChunk) {
code_range,
reserve_area_size,
initial_commit_area_size,
- second_commit_area_size,
NOT_EXECUTABLE);
delete code_range;
@@ -220,7 +201,6 @@ TEST(MemoryChunk) {
code_range,
reserve_area_size,
initial_commit_area_size,
- second_commit_area_size,
EXECUTABLE);
VerifyMemoryChunk(isolate,
@@ -228,7 +208,6 @@ TEST(MemoryChunk) {
code_range,
reserve_area_size,
initial_commit_area_size,
- second_commit_area_size,
NOT_EXECUTABLE);
delete code_range;
}
@@ -318,7 +297,7 @@ TEST(OldSpace) {
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- CHECK(s != NULL);
+ CHECK_NOT_NULL(s);
CHECK(s->SetUp());
@@ -338,7 +317,7 @@ TEST(LargeObjectSpace) {
v8::V8::Initialize();
LargeObjectSpace* lo = CcTest::heap()->lo_space();
- CHECK(lo != NULL);
+ CHECK_NOT_NULL(lo);
int lo_size = Page::kPageSize;
@@ -433,7 +412,7 @@ TEST(SizeOfInitialHeap) {
static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
AllocationResult allocation = space->AllocateRawUnaligned(size);
CHECK(!allocation.IsRetry());
- HeapObject* filler = NULL;
+ HeapObject* filler = nullptr;
CHECK(allocation.To(&filler));
space->heap()->CreateFillerObjectAt(filler->address(), size,
ClearRecordedSlots::kNo);
@@ -443,7 +422,7 @@ static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
CHECK(!allocation.IsRetry());
- HeapObject* filler = NULL;
+ HeapObject* filler = nullptr;
CHECK(allocation.To(&filler));
space->heap()->CreateFillerObjectAt(filler->address(), size,
ClearRecordedSlots::kNo);
@@ -453,7 +432,7 @@ static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
CHECK(!allocation.IsRetry());
- HeapObject* filler = NULL;
+ HeapObject* filler = nullptr;
CHECK(allocation.To(&filler));
return filler;
}
@@ -562,7 +541,6 @@ UNINITIALIZED_TEST(AllocationObserver) {
isolate->Dispose();
}
-
UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -600,6 +578,49 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
isolate->Dispose();
}
+HEAP_TEST(Regress777177) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ PagedSpace* old_space = heap->old_space();
+ Observer observer(128);
+ old_space->AddAllocationObserver(&observer);
+
+ int area_size = old_space->AreaSize();
+ int max_object_size = kMaxRegularHeapObjectSize;
+ int filler_size = area_size - max_object_size;
+
+ {
+ // Ensure a new linear allocation area on a fresh page.
+ AlwaysAllocateScope always_allocate(isolate);
+ heap::SimulateFullSpace(old_space);
+ AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
+ HeapObject* obj = result.ToObjectChecked();
+ heap->CreateFillerObjectAt(obj->address(), filler_size,
+ ClearRecordedSlots::kNo);
+ }
+
+ {
+ // Allocate all bytes of the linear allocation area. This moves top_ and
+ // top_on_previous_step_ to the next page.
+ AllocationResult result =
+ old_space->AllocateRaw(max_object_size, kWordAligned);
+ HeapObject* obj = result.ToObjectChecked();
+ // Simulate allocation folding moving the top pointer back.
+ old_space->SetTopAndLimit(obj->address(), old_space->limit());
+ }
+
+ {
+ // This triggers assert in crbug.com/777177.
+ AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
+ HeapObject* obj = result.ToObjectChecked();
+ heap->CreateFillerObjectAt(obj->address(), filler_size,
+ ClearRecordedSlots::kNo);
+ }
+ old_space->RemoveAllocationObserver(&observer);
+}
+
TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
FLAG_stress_incremental_marking = false;
CcTest::InitializeVM();
@@ -704,7 +725,6 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
CHECK_EQ(0u, shrunk);
}
-} // namespace test_spaces
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index 89f79d3b40..be5b143b5d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -18,6 +18,7 @@ class Isolate;
namespace internal {
+class BytecodeArray;
class SourcePositionTableIterator;
namespace interpreter {
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index e5de344960..56878047c8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -34,17 +34,17 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(1), U8(37),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(2),
B(LdaZero),
B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(StaKeyedProperty), R(2), R(1), U8(2),
+ /* 54 E> */ B(StaKeyedProperty), R(2), R(1), U8(1),
B(LdaSmi), I8(1),
B(Star), R(1),
B(Ldar), R(0),
- /* 59 E> */ B(AddSmi), I8(1), U8(0),
- B(StaKeyedProperty), R(2), R(1), U8(2),
+ /* 59 E> */ B(AddSmi), I8(1), U8(3),
+ B(StaKeyedProperty), R(2), R(1), U8(1),
B(Ldar), R(2),
/* 65 S> */ B(Return),
]
@@ -63,7 +63,7 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(4),
+ /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(4),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -83,29 +83,29 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(7), U8(4),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(4),
B(Star), R(2),
B(LdaZero),
B(Star), R(1),
- B(CreateArrayLiteral), U8(1), U8(0), U8(37),
+ B(CreateArrayLiteral), U8(1), U8(3), U8(37),
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
B(Ldar), R(0),
- /* 56 E> */ B(StaKeyedProperty), R(4), R(3), U8(1),
+ /* 56 E> */ B(StaKeyedProperty), R(4), R(3), U8(4),
B(Ldar), R(4),
- B(StaKeyedProperty), R(2), R(1), U8(8),
+ B(StaKeyedProperty), R(2), R(1), U8(1),
B(LdaSmi), I8(1),
B(Star), R(1),
- B(CreateArrayLiteral), U8(2), U8(4), U8(37),
+ B(CreateArrayLiteral), U8(2), U8(6), U8(37),
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
B(Ldar), R(0),
- /* 68 E> */ B(AddSmi), I8(2), U8(3),
- B(StaKeyedProperty), R(4), R(3), U8(5),
+ /* 68 E> */ B(AddSmi), I8(2), U8(9),
+ B(StaKeyedProperty), R(4), R(3), U8(7),
B(Ldar), R(4),
- B(StaKeyedProperty), R(2), R(1), U8(8),
+ B(StaKeyedProperty), R(2), R(1), U8(1),
B(Ldar), R(2),
/* 76 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
deleted file mode 100644
index f29f5b36fb..0000000000
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
+++ /dev/null
@@ -1,1050 +0,0 @@
-#
-# Autogenerated by generate-bytecode-expectations.
-#
-
----
-wrap: yes
-
----
-snippet: "
- var a;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- return [ 1 , 2 ];
-"
-frame size: 1
-parameter count: 1
-bytecode array length: 1033
-bytecodes: [
- /* 30 E> */ B(StackCheck),
- /* 41 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 51 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 61 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 71 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 81 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 91 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 101 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 111 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 121 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 131 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 141 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 151 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 161 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 171 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 181 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 191 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 201 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 211 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 221 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 231 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 241 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 251 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 261 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 271 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 281 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 291 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 301 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 311 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 321 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 331 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 341 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 351 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 361 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 371 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 381 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 391 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 401 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 411 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 421 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 431 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 441 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 451 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 461 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 471 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 481 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 491 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 501 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 511 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 521 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 531 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 541 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 551 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 561 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 571 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 581 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 591 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 601 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 611 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 621 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 631 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 641 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 651 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 661 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 671 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 681 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 691 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 701 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 711 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 721 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 731 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 741 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 751 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 761 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 771 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 781 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 791 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 801 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 811 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 821 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 831 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 841 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 851 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 861 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 871 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 881 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 891 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 901 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 911 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 921 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 931 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 941 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 951 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 961 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 971 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 981 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 991 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1001 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1011 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1021 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1031 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1041 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1051 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1061 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1071 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1081 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1091 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1101 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1111 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1121 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1131 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1141 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1151 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1161 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1171 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1181 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1191 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1201 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1211 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1221 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1231 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1241 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1251 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1261 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1271 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1281 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1291 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1301 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1311 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1321 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1331 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1341 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1351 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1361 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1371 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1381 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1391 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1401 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1411 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1421 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1431 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1441 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1451 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1461 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1471 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1481 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1491 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1501 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1511 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1521 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1531 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1541 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 1551 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 1561 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 1571 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 1581 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 1591 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 1601 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 1611 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 1621 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 1631 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 1641 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 1651 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 1661 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 1671 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 1681 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 1691 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 1701 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 1711 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 1721 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 1731 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 1741 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 1751 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 1761 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 1771 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 1781 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 1791 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 1801 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 1811 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 1821 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 1831 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 1841 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 1851 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 1861 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 1871 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 1881 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 1891 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 1901 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 1911 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 1921 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 1931 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 1941 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 1951 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 1961 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 1971 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 1981 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 1991 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2001 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2011 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2021 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2031 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2041 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2051 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2061 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2071 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2081 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2091 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2101 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2111 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2121 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2131 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2141 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2151 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2161 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2171 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2181 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2191 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2201 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2211 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2221 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2231 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2241 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2251 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2261 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2271 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2281 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2291 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2301 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2311 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 2321 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 2331 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 2341 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 2351 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 2361 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 2371 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 2381 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 2391 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 2401 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 2411 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 2421 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 2431 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 2441 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 2451 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 2461 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 2471 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 2481 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 2491 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 2501 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 2511 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 2521 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 2531 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 2541 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 2551 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 2561 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 2571 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 2581 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 2591 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 2601 S> */ B(Wide), B(CreateArrayLiteral), U16(256), U16(0), U8(37),
- /* 2618 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- TUPLE2_TYPE,
-]
-handlers: [
-]
-
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
index 925795a673..07d2ea75ef 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -102,11 +102,11 @@ bytecodes: [
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(56),
B(Star), R(0),
- /* 59 E> */ B(Sub), R(0), U8(0),
+ /* 59 E> */ B(Sub), R(0), U8(1),
B(Star), R(1),
B(LdaSmi), I8(57),
B(Star), R(0),
- /* 63 E> */ B(Add), R(1), U8(1),
+ /* 63 E> */ B(Add), R(1), U8(0),
B(Star), R(0),
/* 75 S> */ B(Inc), U8(2),
B(Star), R(0),
@@ -203,11 +203,11 @@ bytecodes: [
/* 63 E> */ B(Add), R(2), U8(0),
B(Star), R(2),
B(Ldar), R(0),
- /* 78 E> */ B(AddSmi), I8(1), U8(1),
+ /* 78 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(3),
B(LdaSmi), I8(2),
B(Star), R(1),
- /* 83 E> */ B(Mul), R(3), U8(2),
+ /* 83 E> */ B(Mul), R(3), U8(1),
/* 73 E> */ B(Add), R(2), U8(3),
B(Star), R(2),
B(LdaSmi), I8(3),
@@ -249,7 +249,7 @@ bytecodes: [
/* 55 E> */ B(Add), R(1), U8(0),
B(Star), R(1),
B(Ldar), R(0),
- B(ToNumber), U8(1),
+ B(ToNumeric), U8(1),
B(Star), R(2),
B(Inc), U8(1),
B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 84fdd0f715..312316c9b7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -23,7 +23,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -150,7 +150,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -301,7 +301,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(10),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(10),
B(Mov), R(closure), R(11),
@@ -344,10 +344,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 36 E> */ B(TestEqualStrictNoFeedback), R(10),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
- /* 31 S> */ B(LdaNamedProperty), R(4), U8(8), U8(7),
+ B(Abort), U8(42),
+ /* 31 S> */ B(LdaNamedProperty), R(4), U8(8), U8(5),
B(Star), R(19),
- B(CallProperty0), R(19), R(4), U8(5),
+ B(CallProperty0), R(19), R(4), U8(7),
B(Star), R(5),
/* 31 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
B(ToBooleanLogicalNot),
@@ -422,13 +422,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(18),
+ B(TestEqualStrict), R(6), U8(17),
B(JumpIfFalse), U8(61),
B(Ldar), R(8),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(18),
B(LdaConstant), U8(16),
B(Star), R(19),
@@ -589,7 +589,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(5), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -615,20 +615,20 @@ bytecodes: [
B(Star), R(2),
B(Mov), R(6), R(3),
B(JumpConstant), U8(22),
- /* 49 S> */ B(LdaGlobal), U8(7), U8(2),
+ /* 49 S> */ B(LdaGlobal), U8(7), U8(0),
B(Star), R(12),
- /* 56 E> */ B(CallUndefinedReceiver0), R(12), U8(0),
+ /* 56 E> */ B(CallUndefinedReceiver0), R(12), U8(2),
B(Star), R(10),
- B(LdaNamedProperty), R(10), U8(8), U8(26),
+ B(LdaNamedProperty), R(10), U8(8), U8(4),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
B(Star), R(11),
- B(CallProperty0), R(11), R(10), U8(28),
+ B(CallProperty0), R(11), R(10), U8(6),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(10), U8(9), U8(4),
+ B(LdaNamedProperty), R(10), U8(9), U8(8),
B(Star), R(11),
- B(CallProperty0), R(11), R(10), U8(16),
+ B(CallProperty0), R(11), R(10), U8(10),
B(Star), R(11),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(11), U8(1),
B(Star), R(8),
@@ -641,14 +641,14 @@ bytecodes: [
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(1),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(Ldar), R(7),
B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(1),
- B(LdaNamedProperty), R(8), U8(15), U8(8),
+ B(LdaNamedProperty), R(8), U8(15), U8(12),
B(Star), R(12),
- B(CallProperty1), R(12), R(8), R(9), U8(22),
+ B(CallProperty1), R(12), R(8), R(9), U8(14),
B(Jump), U8(118),
- B(LdaNamedProperty), R(8), U8(16), U8(6),
+ B(LdaNamedProperty), R(8), U8(16), U8(16),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
B(Star), R(12),
@@ -658,17 +658,17 @@ bytecodes: [
B(Star), R(2),
B(Mov), R(9), R(3),
B(JumpConstant), U8(23),
- B(LdaNamedProperty), R(8), U8(17), U8(10),
+ B(LdaNamedProperty), R(8), U8(17), U8(20),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
B(Star), R(12),
- B(CallProperty1), R(12), R(8), R(9), U8(24),
+ B(CallProperty1), R(12), R(8), R(9), U8(22),
B(Jump), U8(76),
- B(LdaNamedProperty), R(8), U8(16), U8(6),
+ B(LdaNamedProperty), R(8), U8(16), U8(24),
B(Star), R(12),
B(JumpIfUndefined), U8(63),
B(JumpIfNull), U8(61),
- B(CallProperty0), R(12), R(8), U8(20),
+ B(CallProperty0), R(12), R(8), U8(26),
B(Star), R(14),
B(Mov), R(0), R(13),
B(CallJSRuntime), U8(%async_generator_await_uncaught), R(13), U8(2),
@@ -712,9 +712,9 @@ bytecodes: [
B(Mov), R(12), R(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(18), U8(12),
+ B(LdaNamedProperty), R(6), U8(18), U8(28),
B(JumpIfToBooleanTrue), U8(47),
- B(LdaNamedProperty), R(6), U8(19), U8(14),
+ B(LdaNamedProperty), R(6), U8(19), U8(30),
B(Star), R(15),
B(LdaFalse),
B(Star), R(16),
@@ -730,7 +730,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(7),
B(JumpLoop), U8(252), I8(0),
- B(LdaNamedProperty), R(6), U8(19), U8(14),
+ B(LdaNamedProperty), R(6), U8(19), U8(32),
B(Star), R(8),
B(LdaSmi), I8(1),
B(TestEqualStrictNoFeedback), R(7),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index dc397b888c..951e4b5408 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -489,15 +489,15 @@ bytecodes: [
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 68 S> */ B(LdaSmi), I8(1),
- /* 74 E> */ B(TestEqual), R(0), U8(1),
+ /* 74 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
/* 80 S> */ B(Jump), U8(21),
/* 89 S> */ B(LdaSmi), I8(2),
- /* 95 E> */ B(TestEqual), R(0), U8(2),
+ /* 95 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
/* 101 S> */ B(Jump), U8(2),
/* 55 S> */ B(Ldar), R(0),
- /* 59 E> */ B(AddSmi), I8(1), U8(0),
+ /* 59 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(0),
B(JumpLoop), U8(26), I8(0),
B(LdaUndefined),
@@ -524,15 +524,15 @@ bytecodes: [
B(Star), R(0),
/* 34 E> */ B(StackCheck),
/* 66 S> */ B(LdaSmi), I8(1),
- /* 72 E> */ B(TestEqual), R(0), U8(1),
+ /* 72 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
/* 78 S> */ B(Jump), U8(21),
/* 87 S> */ B(LdaSmi), I8(2),
- /* 93 E> */ B(TestEqual), R(0), U8(2),
+ /* 93 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
/* 99 S> */ B(Jump), U8(2),
/* 53 S> */ B(Ldar), R(0),
- /* 57 E> */ B(AddSmi), I8(1), U8(0),
+ /* 57 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(0),
B(JumpLoop), U8(26), I8(0),
B(LdaUndefined),
@@ -565,11 +565,11 @@ bytecodes: [
B(JumpIfFalse), U8(22),
/* 45 E> */ B(StackCheck),
/* 85 S> */ B(Ldar), R(0),
- /* 91 E> */ B(AddSmi), I8(1), U8(2),
+ /* 91 E> */ B(AddSmi), I8(1), U8(1),
B(Star), R(0),
/* 98 S> */ B(Jump), U8(2),
/* 72 S> */ B(Ldar), R(1),
- /* 76 E> */ B(AddSmi), I8(1), U8(1),
+ /* 76 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(1),
B(JumpLoop), U8(24), I8(0),
B(LdaUndefined),
@@ -601,10 +601,10 @@ bytecodes: [
B(JumpIfToBooleanFalse), U8(19),
/* 45 E> */ B(StackCheck),
/* 74 S> */ B(Ldar), R(0),
- /* 80 E> */ B(MulSmi), I8(12), U8(1),
+ /* 80 E> */ B(MulSmi), I8(12), U8(0),
B(Star), R(0),
/* 67 S> */ B(Ldar), R(1),
- B(Dec), U8(0),
+ B(Dec), U8(1),
B(Star), R(1),
B(JumpLoop), U8(18), I8(0),
/* 88 S> */ B(Ldar), R(0),
@@ -660,14 +660,14 @@ bytecodes: [
B(Star), R(1),
/* 45 E> */ B(StackCheck),
/* 76 S> */ B(Ldar), R(0),
- /* 82 E> */ B(AddSmi), I8(1), U8(1),
+ /* 82 E> */ B(AddSmi), I8(1), U8(0),
B(Star), R(0),
/* 89 S> */ B(LdaSmi), I8(20),
- /* 95 E> */ B(TestEqual), R(0), U8(2),
+ /* 95 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
/* 102 S> */ B(Jump), U8(11),
/* 69 S> */ B(Ldar), R(1),
- B(Inc), U8(0),
+ B(Inc), U8(2),
B(Star), R(1),
B(JumpLoop), U8(23), I8(0),
/* 112 S> */ B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index ee8cace0ee..2ddfd5512d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -62,25 +62,25 @@ bytecodes: [
/* 106 S> */ B(LdaZero),
B(Star), R(2),
/* 111 S> */ B(LdaSmi), I8(3),
- /* 111 E> */ B(TestLessThan), R(2), U8(2),
+ /* 111 E> */ B(TestLessThan), R(2), U8(1),
B(JumpIfFalse), U8(34),
/* 93 E> */ B(StackCheck),
/* 129 S> */ B(Ldar), R(0),
- B(Inc), U8(4),
+ B(Inc), U8(2),
B(Star), R(0),
/* 142 S> */ B(Ldar), R(2),
- /* 148 E> */ B(Add), R(1), U8(5),
+ /* 148 E> */ B(Add), R(1), U8(3),
B(Star), R(3),
B(LdaSmi), I8(12),
- /* 152 E> */ B(TestEqual), R(3), U8(6),
+ /* 152 E> */ B(TestEqual), R(3), U8(4),
B(JumpIfFalse), U8(4),
/* 161 S> */ B(Jump), U8(20),
/* 118 S> */ B(Ldar), R(2),
- B(Inc), U8(3),
+ B(Inc), U8(5),
B(Star), R(2),
B(JumpLoop), U8(36), I8(1),
/* 84 S> */ B(Ldar), R(1),
- B(Inc), U8(1),
+ B(Inc), U8(6),
B(Star), R(1),
B(JumpLoop), U8(56), I8(0),
/* 188 S> */ B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index 1315be378b..9c1b26da83 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -14,13 +14,13 @@ parameter count: 1
bytecode array length: 25
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(1),
- /* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(4),
+ /* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
B(Star), R(0),
- B(CreateArrayLiteral), U8(2), U8(6), U8(37),
+ B(CreateArrayLiteral), U8(2), U8(4), U8(37),
B(Star), R(2),
- /* 39 E> */ B(CallWithSpread), R(0), R(1), U8(2), U8(0),
+ /* 39 E> */ B(CallWithSpread), R(0), R(1), U8(2), U8(5),
B(LdaUndefined),
/* 58 S> */ B(Return),
]
@@ -41,15 +41,15 @@ parameter count: 1
bytecode array length: 28
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(1),
- /* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(4),
+ /* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
B(Star), R(0),
B(LdaZero),
B(Star), R(2),
- B(CreateArrayLiteral), U8(2), U8(6), U8(37),
+ B(CreateArrayLiteral), U8(2), U8(4), U8(37),
B(Star), R(3),
- /* 39 E> */ B(CallWithSpread), R(0), R(1), U8(3), U8(0),
+ /* 39 E> */ B(CallWithSpread), R(0), R(1), U8(3), U8(5),
B(LdaUndefined),
/* 61 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
index b313251990..5c5c0ac00c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
@@ -17,9 +17,9 @@ parameter count: 1
bytecode array length: 10
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
- /* 39 E> */ B(CallUndefinedReceiver0), R(0), U8(0),
+ /* 39 E> */ B(CallUndefinedReceiver0), R(0), U8(2),
/* 43 S> */ B(Return),
]
constant pool: [
@@ -39,7 +39,7 @@ parameter count: 1
bytecode array length: 24
bytecodes: [
/* 34 E> */ B(StackCheck),
- /* 39 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 39 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaSmi), I8(1),
B(Star), R(1),
@@ -47,7 +47,7 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(3),
B(Star), R(3),
- /* 46 E> */ B(CallUndefinedReceiver), R(0), R(1), U8(3), U8(0),
+ /* 46 E> */ B(CallUndefinedReceiver), R(0), R(1), U8(3), U8(2),
/* 57 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
index 9bc9e20078..f4a4bc4796 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
@@ -24,7 +24,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
/* 36 E> */ B(StaLookupSlot), U8(1), U8(0),
- /* 52 S> */ B(LdaLookupGlobalSlot), U8(2), U8(3), U8(1),
+ /* 52 S> */ B(LdaLookupGlobalSlot), U8(2), U8(1), U8(1),
B(Star), R(2),
B(LdaConstant), U8(3),
B(Star), R(3),
@@ -39,10 +39,10 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(2),
- /* 52 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(1),
- /* 62 S> */ B(LdaLookupGlobalSlot), U8(1), U8(7), U8(1),
+ /* 52 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(3),
+ /* 62 S> */ B(LdaLookupGlobalSlot), U8(1), U8(5), U8(1),
B(Star), R(2),
- /* 69 E> */ B(CallUndefinedReceiver0), R(2), U8(5),
+ /* 69 E> */ B(CallUndefinedReceiver0), R(2), U8(7),
/* 73 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
index db1c169936..4e7e6d3190 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
@@ -17,9 +17,9 @@ parameter count: 1
bytecode array length: 12
bytecodes: [
/* 45 E> */ B(StackCheck),
- /* 50 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 50 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
- /* 57 E> */ B(Construct), R(0), R(0), U8(0), U8(0),
+ /* 57 E> */ B(Construct), R(0), R(0), U8(0), U8(2),
/* 67 S> */ B(Return),
]
constant pool: [
@@ -39,12 +39,12 @@ parameter count: 1
bytecode array length: 18
bytecodes: [
/* 58 E> */ B(StackCheck),
- /* 63 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 63 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 70 E> */ B(Construct), R(0), R(1), U8(1), U8(0),
+ /* 70 E> */ B(Construct), R(0), R(1), U8(1), U8(2),
/* 81 S> */ B(Return),
]
constant pool: [
@@ -69,7 +69,7 @@ parameter count: 1
bytecode array length: 26
bytecodes: [
/* 100 E> */ B(StackCheck),
- /* 105 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 105 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaSmi), I8(3),
B(Star), R(1),
@@ -78,7 +78,7 @@ bytecodes: [
B(LdaSmi), I8(5),
B(Star), R(3),
B(Ldar), R(0),
- /* 112 E> */ B(Construct), R(0), R(1), U8(3), U8(0),
+ /* 112 E> */ B(Construct), R(0), R(1), U8(3), U8(2),
/* 129 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index ea44a8a040..f1149ac258 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -27,15 +27,15 @@ bytecodes: [
B(Mov), R(closure), R(0),
/* 99 E> */ B(StackCheck),
/* 104 S> */ B(LdaConstant), U8(0),
- /* 111 E> */ B(LdaKeyedProperty), R(closure), U8(2),
+ /* 111 E> */ B(LdaKeyedProperty), R(closure), U8(1),
B(Star), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
B(Mov), R(this), R(3),
/* 117 E> */ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(3), U8(3),
B(Star), R(1),
- /* 117 E> */ B(CallAnyReceiver), R(1), R(this), U8(1), U8(0),
- /* 126 E> */ B(AddSmi), I8(1), U8(6),
+ /* 117 E> */ B(CallAnyReceiver), R(1), R(this), U8(1), U8(3),
+ /* 126 E> */ B(AddSmi), I8(1), U8(0),
/* 130 S> */ B(Return),
]
constant pool: [
@@ -104,18 +104,18 @@ snippet: "
test = new B().constructor;
})();
"
-frame size: 4
+frame size: 5
parameter count: 1
bytecode array length: 40
bytecodes: [
B(Mov), R(closure), R(1),
/* 113 E> */ B(StackCheck),
/* 118 S> */ B(Ldar), R(1),
- B(GetSuperConstructor), R(2),
+ B(GetSuperConstructor), R(3),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star), R(4),
B(Ldar), R(0),
- /* 118 E> */ B(Construct), R(2), R(3), U8(1), U8(0),
+ /* 118 E> */ B(Construct), R(3), R(4), U8(1), U8(0),
B(Star), R(2),
B(Ldar), R(this),
/* 118 E> */ B(ThrowSuperAlreadyCalledIfNotHole),
@@ -147,16 +147,16 @@ snippet: "
test = new B().constructor;
})();
"
-frame size: 3
+frame size: 4
parameter count: 1
bytecode array length: 36
bytecodes: [
B(Mov), R(closure), R(1),
/* 112 E> */ B(StackCheck),
/* 117 S> */ B(Ldar), R(1),
- B(GetSuperConstructor), R(2),
+ B(GetSuperConstructor), R(3),
B(Ldar), R(0),
- /* 117 E> */ B(Construct), R(2), R(0), U8(0), U8(0),
+ /* 117 E> */ B(Construct), R(3), R(0), U8(0), U8(0),
B(Star), R(2),
B(Ldar), R(this),
/* 117 E> */ B(ThrowSuperAlreadyCalledIfNotHole),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index 4efdd0a1ad..ab03cea491 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -12,40 +12,30 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 67
+bytecode array length: 38
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(2),
B(LdaTheHole),
- B(Star), R(3),
- B(LdaSmi), I8(34),
B(Star), R(5),
- B(Wide), B(LdaSmi), I16(148),
+ B(CreateClosure), U8(1), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CreateClosure), U8(2), U8(1), U8(2),
B(Star), R(6),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
B(Star), R(3),
- B(LdaConstant), U8(1),
- B(Star), R(5),
- B(CreateClosure), U8(2), U8(1), U8(2),
- B(Star), R(6),
- B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Ldar), R(6),
- B(StaDataPropertyInLiteral), R(3), R(5), U8(1), U8(2),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
- B(Star), R(0),
- B(Star), R(1),
+ B(Mov), R(4), R(0),
+ B(Mov), R(0), R(1),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
constant pool: [
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["speak"],
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
@@ -58,40 +48,30 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 67
+bytecode array length: 38
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(2),
B(LdaTheHole),
- B(Star), R(3),
- B(LdaSmi), I8(34),
B(Star), R(5),
- B(Wide), B(LdaSmi), I16(148),
+ B(CreateClosure), U8(1), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CreateClosure), U8(2), U8(1), U8(2),
B(Star), R(6),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
B(Star), R(3),
- B(LdaConstant), U8(1),
- B(Star), R(5),
- B(CreateClosure), U8(2), U8(1), U8(2),
- B(Star), R(6),
- B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Ldar), R(6),
- B(StaDataPropertyInLiteral), R(3), R(5), U8(1), U8(2),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
- B(Star), R(0),
- B(Star), R(1),
+ B(Mov), R(4), R(0),
+ B(Mov), R(0), R(1),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
constant pool: [
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["speak"],
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
@@ -106,9 +86,9 @@ snippet: "
static [n1]() { return n1; }
}
"
-frame size: 9
+frame size: 11
parameter count: 1
-bytecode array length: 106
+bytecode array length: 75
bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(2),
@@ -117,44 +97,36 @@ bytecodes: [
/* 43 E> */ B(StaCurrentContextSlot), U8(4),
/* 57 S> */ B(LdaConstant), U8(1),
/* 57 E> */ B(StaCurrentContextSlot), U8(5),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
B(LdaTheHole),
- B(Star), R(4),
- B(LdaSmi), I8(62),
B(Star), R(6),
- B(Wide), B(LdaSmi), I16(128),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(3),
+ B(LdaConstant), U8(2),
B(Star), R(4),
B(LdaImmutableCurrentContextSlot), U8(4),
- /* 75 E> */ B(ToName), R(6),
- B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(7),
- B(LdaSmi), I8(2),
+ /* 75 E> */ B(ToName), R(7),
+ B(CreateClosure), U8(4), U8(1), U8(2),
B(Star), R(8),
- B(Ldar), R(7),
- B(StaDataPropertyInLiteral), R(4), R(6), U8(3), U8(3),
B(LdaImmutableCurrentContextSlot), U8(5),
- /* 106 E> */ B(ToName), R(6),
- B(LdaConstant), U8(4),
- B(TestEqualStrictNoFeedback), R(6),
+ /* 106 E> */ B(ToName), R(9),
+ B(LdaConstant), U8(5),
+ B(TestEqualStrictNoFeedback), R(9),
B(Mov), R(3), R(5),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(CreateClosure), U8(5), U8(2), U8(2),
- B(StaDataPropertyInLiteral), R(5), R(6), U8(3), U8(5),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessorWithCheck), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
- B(Star), R(0),
- B(Star), R(1),
+ B(CreateClosure), U8(6), U8(2), U8(2),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(7),
+ B(Star), R(4),
+ B(Mov), R(3), R(0),
+ B(Mov), R(0), R(1),
B(LdaUndefined),
/* 129 S> */ B(Return),
]
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
@@ -169,34 +141,32 @@ snippet: "
class C { constructor() { count++; }}
return new C();
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 55
+bytecode array length: 45
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(2),
/* 30 E> */ B(StackCheck),
/* 46 S> */ B(LdaZero),
/* 46 E> */ B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(3),
B(LdaTheHole),
- B(Star), R(4),
- B(LdaSmi), I8(49),
B(Star), R(6),
- B(LdaSmi), I8(86),
- B(Star), R(7),
+ B(CreateClosure), U8(1), U8(0), U8(2),
+ B(Star), R(3),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- /* 94 S> */ B(Construct), R(1), R(0), U8(0), U8(1),
+ B(Mov), R(5), R(0),
+ B(Mov), R(0), R(1),
+ /* 87 S> */ B(Ldar), R(1),
+ /* 94 E> */ B(Construct), R(3), R(0), U8(0), U8(1),
/* 102 S> */ B(Return),
]
constant pool: [
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
@@ -207,52 +177,41 @@ snippet: "
(class {})
class E { static name () {}}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 92
+bytecode array length: 61
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
+ /* 34 S> */ B(LdaTheHole),
+ B(Star), R(5),
+ B(CreateClosure), U8(1), U8(0), U8(2),
B(Star), R(2),
- B(LdaTheHole),
+ B(LdaConstant), U8(0),
B(Star), R(3),
- B(LdaSmi), I8(35),
- B(Star), R(5),
- B(LdaSmi), I8(43),
- B(Star), R(6),
B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
B(Star), R(3),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
- B(CreateClosure), U8(1), U8(1), U8(2),
- B(Star), R(2),
B(LdaTheHole),
- B(Star), R(3),
- B(LdaSmi), I8(45),
B(Star), R(5),
- B(LdaSmi), I8(73),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CreateClosure), U8(4), U8(2), U8(2),
B(Star), R(6),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
B(Star), R(3),
- B(LdaConstant), U8(2),
- B(Star), R(5),
- B(CreateClosure), U8(3), U8(2), U8(2),
- B(Star), R(6),
- B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Ldar), R(6),
- B(StaDataPropertyInLiteral), R(4), R(5), U8(1), U8(3),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
- B(Star), R(0),
- B(Star), R(1),
+ B(Mov), R(4), R(0),
+ B(Mov), R(0), R(1),
B(LdaUndefined),
/* 74 S> */ B(Return),
]
constant pool: [
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden
new file mode 100644
index 0000000000..e3a828e1cc
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden
@@ -0,0 +1,337 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+public fields: yes
+
+---
+snippet: "
+ {
+ class A {
+ a;
+ ['b'];
+ static c;
+ static ['d'];
+ }
+
+ class B {
+ a = 1;
+ ['b'] = this.a;
+ static c = 3;
+ static ['d'] = this.c;
+ }
+ new A;
+ new B;
+ }
+"
+frame size: 11
+parameter count: 1
+bytecode array length: 193
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(8),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(1),
+ B(Star), R(6),
+ B(LdaConstant), U8(3),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(9),
+ B(LdaConstant), U8(4),
+ B(Star), R(10),
+ B(LdaConstant), U8(5),
+ B(TestEqualStrictNoFeedback), R(10),
+ B(Mov), R(5), R(7),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(10),
+ B(StaCurrentContextSlot), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
+ B(Star), R(6),
+ B(Mov), R(5), R(1),
+ B(CreateClosure), U8(6), U8(1), U8(2),
+ B(Star), R(7),
+ B(StaNamedProperty), R(5), U8(7), U8(2),
+ B(CreateClosure), U8(8), U8(4), U8(2),
+ B(Star), R(9),
+ B(CallProperty0), R(9), R(1), U8(5),
+ B(PopContext), R(4),
+ B(Mov), R(1), R(2),
+ B(Ldar), R(closure),
+ /* 38 E> */ B(CreateBlockContext), U8(9),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(8),
+ B(CreateClosure), U8(11), U8(7), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(10),
+ B(Star), R(6),
+ B(LdaConstant), U8(3),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(9),
+ B(LdaConstant), U8(4),
+ B(Star), R(10),
+ B(LdaConstant), U8(5),
+ B(TestEqualStrictNoFeedback), R(10),
+ B(Mov), R(5), R(7),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(10),
+ B(StaCurrentContextSlot), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
+ B(Star), R(6),
+ B(Mov), R(5), R(0),
+ B(CreateClosure), U8(12), U8(8), U8(2),
+ B(Star), R(7),
+ B(StaNamedProperty), R(5), U8(7), U8(9),
+ B(CreateClosure), U8(13), U8(11), U8(2),
+ B(Star), R(9),
+ B(CallProperty0), R(9), R(0), U8(12),
+ B(PopContext), R(4),
+ B(Mov), R(0), R(3),
+ /* 197 S> */ B(Ldar), R(2),
+ /* 197 E> */ B(Construct), R(2), R(0), U8(0), U8(14),
+ /* 206 S> */ B(Ldar), R(0),
+ /* 206 E> */ B(Construct), R(0), R(0), U8(0), U8(16),
+ B(LdaUndefined),
+ /* 215 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["d"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+ SHARED_FUNCTION_INFO_TYPE,
+ SYMBOL_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ {
+ class A extends class {} {
+ a;
+ ['b'];
+ static c;
+ static ['d'];
+ }
+
+ class B extends class {} {
+ a = 1;
+ ['b'] = this.a;
+ static c = 3;
+ static ['d'] = this.c;
+ foo() { return 1; }
+ constructor() {
+ super();
+ }
+ }
+
+ class C extends B {
+ a = 1;
+ ['b'] = this.a;
+ static c = 3;
+ static ['d'] = super.foo();
+ constructor() {
+ (() => super())();
+ }
+ }
+
+ new A;
+ new B;
+ new C;
+ }
+"
+frame size: 15
+parameter count: 1
+bytecode array length: 346
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(14),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
+ B(Star), R(12),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(1),
+ B(Star), R(8),
+ B(LdaConstant), U8(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(11),
+ B(LdaConstant), U8(6),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrictNoFeedback), R(12),
+ B(Mov), R(13), R(10),
+ B(Mov), R(7), R(9),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(12),
+ B(StaCurrentContextSlot), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
+ B(Star), R(8),
+ B(Mov), R(7), R(2),
+ B(CreateClosure), U8(8), U8(2), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(9), U8(3),
+ B(CreateClosure), U8(10), U8(5), U8(2),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(2), U8(6),
+ B(PopContext), R(6),
+ B(Mov), R(2), R(3),
+ B(Ldar), R(closure),
+ /* 38 E> */ B(CreateBlockContext), U8(11),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(14),
+ B(CreateClosure), U8(14), U8(8), U8(2),
+ B(Star), R(11),
+ B(LdaConstant), U8(13),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
+ B(Star), R(12),
+ B(CreateClosure), U8(15), U8(9), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(12),
+ B(Star), R(8),
+ B(LdaConstant), U8(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(11),
+ B(LdaConstant), U8(6),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrictNoFeedback), R(12),
+ B(Mov), R(7), R(9),
+ B(Mov), R(13), R(10),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(12),
+ B(StaCurrentContextSlot), U8(5),
+ B(CreateClosure), U8(16), U8(10), U8(2),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(6),
+ B(Star), R(8),
+ B(Mov), R(7), R(1),
+ B(CreateClosure), U8(17), U8(11), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(9), U8(12),
+ B(CreateClosure), U8(18), U8(14), U8(2),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(1), U8(15),
+ B(PopContext), R(6),
+ B(Mov), R(1), R(4),
+ B(Ldar), R(closure),
+ /* 122 E> */ B(CreateBlockContext), U8(19),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ /* 313 E> */ B(CreateClosure), U8(21), U8(17), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(20),
+ B(Star), R(8),
+ B(LdaConstant), U8(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(11),
+ B(LdaConstant), U8(6),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrictNoFeedback), R(12),
+ B(Mov), R(1), R(10),
+ B(Mov), R(7), R(9),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(12),
+ B(StaCurrentContextSlot), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
+ B(Star), R(8),
+ B(Mov), R(7), R(0),
+ B(CreateClosure), U8(22), U8(18), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(9), U8(19),
+ B(CreateClosure), U8(23), U8(21), U8(2),
+ B(Star), R(11),
+ B(Ldar), R(0),
+ B(StaNamedProperty), R(11), U8(24), U8(22),
+ B(CallProperty0), R(11), R(0), U8(24),
+ B(PopContext), R(6),
+ B(Mov), R(0), R(5),
+ /* 456 S> */ B(Ldar), R(3),
+ /* 456 E> */ B(Construct), R(3), R(0), U8(0), U8(26),
+ /* 465 S> */ B(Ldar), R(4),
+ /* 465 E> */ B(Construct), R(4), R(0), U8(0), U8(28),
+ /* 474 S> */ B(Ldar), R(0),
+ /* 474 E> */ B(Construct), R(0), R(0), U8(0), U8(30),
+ B(LdaUndefined),
+ /* 483 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["d"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+ SHARED_FUNCTION_INFO_TYPE,
+ SYMBOL_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SYMBOL_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
index 840735d286..83a267c29f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
@@ -276,7 +276,7 @@ bytecodes: [
B(JumpIfUndefined), U8(12),
/* 64 E> */ B(StackCheck),
/* 92 S> */ B(Ldar), R(1),
- B(Inc), U8(3),
+ B(Inc), U8(0),
B(Star), R(1),
B(JumpLoop), U8(11), I8(0),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
index 2cd01010e4..3d262e4b83 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -82,9 +82,9 @@ bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
- /* 41 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 41 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(1),
- /* 64 E> */ B(CallUndefinedReceiver0), R(1), U8(0),
+ /* 64 E> */ B(CallUndefinedReceiver0), R(1), U8(1),
/* 68 S> */ B(LdaCurrentContextSlot), U8(4),
/* 77 S> */ B(Return),
]
@@ -898,9 +898,9 @@ bytecodes: [
/* 3421 E> */ B(StaCurrentContextSlot), U8(254),
/* 3435 S> */ B(LdaZero),
/* 3435 E> */ B(StaCurrentContextSlot), U8(255),
- /* 3438 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 3438 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(2),
- /* 3438 E> */ B(CallUndefinedReceiver0), R(2), U8(0),
+ /* 3438 E> */ B(CallUndefinedReceiver0), R(2), U8(2),
/* 3454 S> */ B(LdaSmi), I8(100),
/* 3454 E> */ B(Wide), B(StaCurrentContextSlot), U16(256),
/* 3459 S> */ B(Wide), B(LdaCurrentContextSlot), U16(256),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index 2aefc7a142..50a25c63a5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -36,7 +36,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(ToNumber), U8(0),
+ /* 45 S> */ B(ToNumeric), U8(0),
B(Star), R(1),
B(Inc), U8(0),
B(Star), R(0),
@@ -79,7 +79,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(ToNumber), U8(0),
+ /* 45 S> */ B(ToNumeric), U8(0),
B(Star), R(1),
B(Dec), U8(0),
B(Star), R(0),
@@ -95,18 +95,19 @@ handlers: [
snippet: "
var a = { val: 1 }; return a.val++;
"
-frame size: 3
+frame size: 4
parameter count: 1
-bytecode array length: 26
+bytecode array length: 28
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
B(Mov), R(1), R(0),
/* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
- B(ToNumber), U8(5),
+ B(ToNumeric), U8(3),
B(Star), R(2),
- B(Inc), U8(5),
- /* 66 E> */ B(StaNamedProperty), R(1), U8(1), U8(3),
+ B(Inc), U8(3),
+ B(Star), R(3),
+ /* 66 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
B(Ldar), R(2),
/* 69 S> */ B(Return),
]
@@ -121,16 +122,18 @@ handlers: [
snippet: "
var a = { val: 1 }; return --a.val;
"
-frame size: 2
+frame size: 3
parameter count: 1
-bytecode array length: 20
+bytecode array length: 24
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
B(Mov), R(1), R(0),
/* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
- B(Dec), U8(5),
- /* 65 E> */ B(StaNamedProperty), R(1), U8(1), U8(3),
+ B(Dec), U8(3),
+ B(Star), R(2),
+ /* 65 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
+ B(Ldar), R(2),
/* 69 S> */ B(Return),
]
constant pool: [
@@ -144,9 +147,9 @@ handlers: [
snippet: "
var name = 'var'; var a = { val: 1 }; return a[name]--;
"
-frame size: 5
+frame size: 6
parameter count: 1
-bytecode array length: 31
+bytecode array length: 33
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
@@ -155,10 +158,11 @@ bytecodes: [
B(Mov), R(2), R(1),
/* 72 S> */ B(Ldar), R(0),
/* 81 E> */ B(LdaKeyedProperty), R(2), U8(1),
- B(ToNumber), U8(5),
+ B(ToNumeric), U8(3),
B(Star), R(4),
- B(Dec), U8(5),
- /* 86 E> */ B(StaKeyedProperty), R(2), R(0), U8(3),
+ B(Dec), U8(3),
+ B(Star), R(5),
+ /* 86 E> */ B(StaKeyedProperty), R(2), R(0), U8(4),
B(Ldar), R(4),
/* 89 S> */ B(Return),
]
@@ -173,9 +177,9 @@ handlers: [
snippet: "
var name = 'var'; var a = { val: 1 }; return ++a[name];
"
-frame size: 3
+frame size: 5
parameter count: 1
-bytecode array length: 25
+bytecode array length: 29
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
@@ -184,8 +188,10 @@ bytecodes: [
B(Mov), R(2), R(1),
/* 72 S> */ B(Ldar), R(0),
/* 83 E> */ B(LdaKeyedProperty), R(2), U8(1),
- B(Inc), U8(5),
- /* 87 E> */ B(StaKeyedProperty), R(2), R(0), U8(3),
+ B(Inc), U8(3),
+ B(Star), R(4),
+ /* 87 E> */ B(StaKeyedProperty), R(2), R(0), U8(4),
+ B(Ldar), R(4),
/* 89 S> */ B(Return),
]
constant pool: [
@@ -237,7 +243,7 @@ bytecodes: [
/* 53 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(0),
/* 78 S> */ B(LdaCurrentContextSlot), U8(4),
- B(ToNumber), U8(1),
+ B(ToNumeric), U8(1),
B(Star), R(2),
B(Dec), U8(1),
/* 86 E> */ B(StaCurrentContextSlot), U8(4),
@@ -254,9 +260,9 @@ handlers: [
snippet: "
var idx = 1; var a = [1, 2]; return a[idx++] = 2;
"
-frame size: 4
+frame size: 5
parameter count: 1
-bytecode array length: 28
+bytecode array length: 32
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(1),
@@ -264,12 +270,14 @@ bytecodes: [
/* 55 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(1),
/* 63 S> */ B(Ldar), R(0),
- B(ToNumber), U8(1),
+ B(ToNumeric), U8(1),
B(Star), R(3),
B(Inc), U8(1),
B(Star), R(0),
B(LdaSmi), I8(2),
+ B(Star), R(4),
/* 79 E> */ B(StaKeyedProperty), R(1), R(3), U8(2),
+ B(Ldar), R(4),
/* 83 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index 9f8ca4b36e..1d79f8e7e2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -88,11 +88,11 @@ bytecodes: [
B(Mov), R(arg0), R(1),
B(Mov), R(0), R(2),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdaKeyedProperty), R(2), U8(0),
+ /* 44 E> */ B(LdaKeyedProperty), R(2), U8(1),
B(Star), R(4),
B(LdaZero),
- /* 59 E> */ B(LdaKeyedProperty), R(3), U8(2),
- /* 48 E> */ B(Add), R(4), U8(4),
+ /* 59 E> */ B(LdaKeyedProperty), R(3), U8(3),
+ /* 48 E> */ B(Add), R(4), U8(0),
/* 63 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
index 17952e79e2..eb7b9d1f3c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
@@ -22,7 +22,7 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(5),
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaLookupGlobalSlot), U8(0), U8(2), U8(1),
+ /* 34 S> */ B(LdaLookupGlobalSlot), U8(0), U8(0), U8(1),
B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
@@ -37,7 +37,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(2),
- /* 41 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(0),
+ /* 41 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 52 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index f02cb544d0..16e36dd707 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -25,7 +25,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(Mov), R(closure), R(12),
@@ -43,16 +43,16 @@ bytecodes: [
B(Mov), R(context), R(19),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(5),
+ B(LdaNamedProperty), R(20), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
+ B(CallProperty0), R(21), R(20), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(1),
+ B(LdaNamedProperty), R(20), U8(5), U8(5),
B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(CallProperty0), R(21), R(20), U8(7),
B(Star), R(21),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
B(Star), R(4),
@@ -61,10 +61,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
- /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(11),
+ B(Abort), U8(42),
+ /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(9),
B(Star), R(20),
- B(CallProperty0), R(20), R(4), U8(9),
+ B(CallProperty0), R(20), R(4), U8(11),
B(Star), R(21),
B(Mov), R(2), R(20),
B(Mov), R(10), R(22),
@@ -137,13 +137,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(188),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(22),
+ B(TestEqualStrict), R(6), U8(21),
B(JumpIfFalse), U8(109),
B(Ldar), R(8),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(19),
B(LdaConstant), U8(13),
B(Star), R(20),
@@ -313,7 +313,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(Mov), R(closure), R(12),
@@ -331,16 +331,16 @@ bytecodes: [
B(Mov), R(context), R(19),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(5),
+ B(LdaNamedProperty), R(20), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
+ B(CallProperty0), R(21), R(20), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(1),
+ B(LdaNamedProperty), R(20), U8(5), U8(5),
B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(CallProperty0), R(21), R(20), U8(7),
B(Star), R(21),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
B(Star), R(4),
@@ -349,10 +349,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
- /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(11),
+ B(Abort), U8(42),
+ /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(9),
B(Star), R(20),
- B(CallProperty0), R(20), R(4), U8(9),
+ B(CallProperty0), R(20), R(4), U8(11),
B(Star), R(21),
B(Mov), R(2), R(20),
B(Mov), R(10), R(22),
@@ -426,13 +426,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(188),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(22),
+ B(TestEqualStrict), R(6), U8(21),
B(JumpIfFalse), U8(109),
B(Ldar), R(8),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(19),
B(LdaConstant), U8(13),
B(Star), R(20),
@@ -617,7 +617,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(Mov), R(closure), R(12),
@@ -635,16 +635,16 @@ bytecodes: [
B(Mov), R(context), R(19),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(5),
+ B(LdaNamedProperty), R(20), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
+ B(CallProperty0), R(21), R(20), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(1),
+ B(LdaNamedProperty), R(20), U8(5), U8(5),
B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(CallProperty0), R(21), R(20), U8(7),
B(Star), R(21),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
B(Star), R(4),
@@ -653,10 +653,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
- /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(11),
+ B(Abort), U8(42),
+ /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(9),
B(Star), R(20),
- B(CallProperty0), R(20), R(4), U8(9),
+ B(CallProperty0), R(20), R(4), U8(11),
B(Star), R(21),
B(Mov), R(2), R(20),
B(Mov), R(10), R(22),
@@ -737,13 +737,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(188),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(24),
+ B(TestEqualStrict), R(6), U8(23),
B(JumpIfFalse), U8(109),
B(Ldar), R(8),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(19),
B(LdaConstant), U8(13),
B(Star), R(20),
@@ -926,9 +926,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 59 S> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
+ /* 59 S> */ B(LdaNamedProperty), R(2), U8(3), U8(6),
B(Star), R(17),
- B(CallProperty0), R(17), R(2), U8(6),
+ B(CallProperty0), R(17), R(2), U8(8),
B(Star), R(3),
/* 59 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
@@ -985,13 +985,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(23),
+ B(TestEqualStrict), R(4), U8(22),
B(JumpIfFalse), U8(61),
B(Ldar), R(6),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(16),
B(LdaConstant), U8(10),
B(Star), R(17),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index 55f94ab321..579cf70e3d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -107,23 +107,23 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 59 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
+ /* 59 S> */ B(CreateArrayLiteral), U8(0), U8(1), U8(37),
B(JumpIfUndefined), U8(48),
B(JumpIfNull), U8(46),
B(ToObject), R(3),
B(ForInEnumerate), R(3),
- B(ForInPrepare), R(4), U8(2),
+ B(ForInPrepare), R(4), U8(0),
B(LdaZero),
B(Star), R(7),
/* 54 S> */ B(ForInContinue), R(7), R(6),
B(JumpIfFalse), U8(31),
- B(ForInNext), R(3), R(7), R(4), U8(2),
+ B(ForInNext), R(3), R(7), R(4), U8(0),
B(JumpIfUndefined), U8(17),
B(Star), R(1),
/* 45 E> */ B(StackCheck),
B(Star), R(2),
/* 70 S> */ B(Ldar), R(1),
- /* 75 E> */ B(Add), R(0), U8(1),
+ /* 75 E> */ B(Add), R(0), U8(2),
B(Mov), R(0), R(8),
B(Star), R(0),
/* 72 E> */ B(ForInStep), R(7),
@@ -153,32 +153,32 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
B(Mov), R(1), R(0),
- /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
+ /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
B(JumpIfUndefined), U8(72),
B(JumpIfNull), U8(70),
B(ToObject), R(1),
B(ForInEnumerate), R(1),
- B(ForInPrepare), R(2), U8(12),
+ B(ForInPrepare), R(2), U8(1),
B(LdaZero),
B(Star), R(5),
/* 68 S> */ B(ForInContinue), R(5), R(4),
B(JumpIfFalse), U8(55),
- B(ForInNext), R(1), R(5), R(2), U8(12),
+ B(ForInNext), R(1), R(5), R(2), U8(1),
B(JumpIfUndefined), U8(41),
B(Star), R(6),
B(Ldar), R(6),
- /* 67 E> */ B(StaNamedProperty), R(0), U8(2), U8(10),
+ /* 67 E> */ B(StaNamedProperty), R(0), U8(2), U8(3),
/* 62 E> */ B(StackCheck),
- /* 100 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
+ /* 100 S> */ B(LdaNamedProperty), R(0), U8(2), U8(5),
B(Star), R(6),
B(LdaSmi), I8(10),
- /* 106 E> */ B(TestEqual), R(6), U8(6),
+ /* 106 E> */ B(TestEqual), R(6), U8(7),
B(JumpIfFalse), U8(4),
/* 113 S> */ B(Jump), U8(17),
- /* 130 S> */ B(LdaNamedProperty), R(0), U8(2), U8(7),
+ /* 130 S> */ B(LdaNamedProperty), R(0), U8(2), U8(8),
B(Star), R(6),
B(LdaSmi), I8(20),
- /* 136 E> */ B(TestEqual), R(6), U8(9),
+ /* 136 E> */ B(TestEqual), R(6), U8(10),
B(JumpIfFalse), U8(4),
/* 143 S> */ B(Jump), U8(9),
B(ForInStep), R(5),
@@ -207,26 +207,26 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
- /* 72 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
+ /* 72 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
B(JumpIfUndefined), U8(51),
B(JumpIfNull), U8(49),
B(ToObject), R(1),
B(ForInEnumerate), R(1),
- B(ForInPrepare), R(2), U8(8),
+ B(ForInPrepare), R(2), U8(1),
B(LdaZero),
B(Star), R(5),
/* 65 S> */ B(ForInContinue), R(5), R(4),
B(JumpIfFalse), U8(34),
- B(ForInNext), R(1), R(5), R(2), U8(8),
+ B(ForInNext), R(1), R(5), R(2), U8(1),
B(JumpIfUndefined), U8(20),
B(Star), R(6),
B(LdaZero),
B(Star), R(8),
B(Ldar), R(6),
- /* 64 E> */ B(StaKeyedProperty), R(0), R(8), U8(6),
+ /* 64 E> */ B(StaKeyedProperty), R(0), R(8), U8(3),
/* 59 E> */ B(StackCheck),
/* 83 S> */ B(LdaSmi), I8(3),
- /* 91 E> */ B(LdaKeyedProperty), R(0), U8(4),
+ /* 91 E> */ B(LdaKeyedProperty), R(0), U8(5),
/* 95 S> */ B(Return),
B(ForInStep), R(5),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 9ef001a264..caf3e26cf7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -26,9 +26,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 43 S> */ B(LdaNamedProperty), R(2), U8(2), U8(7),
+ /* 43 S> */ B(LdaNamedProperty), R(2), U8(2), U8(5),
B(Star), R(12),
- B(CallProperty0), R(12), R(2), U8(5),
+ B(CallProperty0), R(12), R(2), U8(7),
B(Star), R(3),
/* 43 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
@@ -80,13 +80,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(18),
+ B(TestEqualStrict), R(4), U8(17),
B(JumpIfFalse), U8(61),
B(Ldar), R(6),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(11),
B(LdaConstant), U8(8),
B(Star), R(12),
@@ -166,9 +166,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(3),
- /* 63 S> */ B(LdaNamedProperty), R(3), U8(2), U8(6),
+ /* 63 S> */ B(LdaNamedProperty), R(3), U8(2), U8(4),
B(Star), R(13),
- B(CallProperty0), R(13), R(3), U8(4),
+ B(CallProperty0), R(13), R(3), U8(6),
B(Star), R(4),
/* 63 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
B(ToBooleanLogicalNot),
@@ -221,13 +221,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(5), U8(17),
+ B(TestEqualStrict), R(5), U8(16),
B(JumpIfFalse), U8(61),
B(Ldar), R(7),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(12),
B(LdaConstant), U8(8),
B(Star), R(13),
@@ -312,9 +312,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 43 S> */ B(LdaNamedProperty), R(2), U8(2), U8(7),
+ /* 43 S> */ B(LdaNamedProperty), R(2), U8(2), U8(5),
B(Star), R(12),
- B(CallProperty0), R(12), R(2), U8(5),
+ B(CallProperty0), R(12), R(2), U8(7),
B(Star), R(3),
/* 43 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
@@ -374,13 +374,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(20),
+ B(TestEqualStrict), R(4), U8(19),
B(JumpIfFalse), U8(61),
B(Ldar), R(6),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(11),
B(LdaConstant), U8(8),
B(Star), R(12),
@@ -461,9 +461,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(1),
- /* 68 S> */ B(LdaNamedProperty), R(1), U8(3), U8(8),
+ /* 68 S> */ B(LdaNamedProperty), R(1), U8(3), U8(6),
B(Star), R(11),
- B(CallProperty0), R(11), R(1), U8(6),
+ B(CallProperty0), R(11), R(1), U8(8),
B(Star), R(2),
/* 68 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
B(ToBooleanLogicalNot),
@@ -517,13 +517,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(3), U8(23),
+ B(TestEqualStrict), R(3), U8(22),
B(JumpIfFalse), U8(61),
B(Ldar), R(5),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(10),
B(LdaConstant), U8(10),
B(Star), R(11),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index af992d39ec..57b2b27ea1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -29,9 +29,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
- /* 29 S> */ B(LdaNamedProperty), R(4), U8(1), U8(6),
+ /* 29 S> */ B(LdaNamedProperty), R(4), U8(1), U8(4),
B(Star), R(14),
- B(CallProperty0), R(14), R(4), U8(4),
+ B(CallProperty0), R(14), R(4), U8(6),
B(Star), R(5),
/* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
B(ToBooleanLogicalNot),
@@ -84,13 +84,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(17),
+ B(TestEqualStrict), R(6), U8(16),
B(JumpIfFalse), U8(61),
B(Ldar), R(8),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(13),
B(LdaConstant), U8(7),
B(Star), R(14),
@@ -185,9 +185,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(1),
- /* 29 S> */ B(LdaNamedProperty), R(1), U8(2), U8(6),
+ /* 29 S> */ B(LdaNamedProperty), R(1), U8(2), U8(4),
B(Star), R(14),
- B(CallProperty0), R(14), R(1), U8(4),
+ B(CallProperty0), R(14), R(1), U8(6),
B(Star), R(2),
/* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
B(ToBooleanLogicalNot),
@@ -208,7 +208,7 @@ bytecodes: [
B(StaCurrentContextSlot), U8(4),
B(Ldar), R(4),
B(StaCurrentContextSlot), U8(4),
- /* 41 S> */ B(LdaLookupGlobalSlot), U8(6), U8(14), U8(1),
+ /* 41 S> */ B(LdaLookupGlobalSlot), U8(6), U8(12), U8(3),
B(Star), R(15),
B(LdaConstant), U8(7),
B(Star), R(16),
@@ -223,7 +223,7 @@ bytecodes: [
B(Mov), R(closure), R(19),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(17), U8(6),
B(Star), R(15),
- /* 41 E> */ B(CallUndefinedReceiver1), R(15), R(16), U8(12),
+ /* 41 E> */ B(CallUndefinedReceiver1), R(15), R(16), U8(14),
B(PopContext), R(14),
B(LdaZero),
B(Star), R(3),
@@ -262,13 +262,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(3), U8(21),
+ B(TestEqualStrict), R(3), U8(20),
B(JumpIfFalse), U8(61),
B(Ldar), R(5),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(13),
B(LdaConstant), U8(11),
B(Star), R(14),
@@ -352,9 +352,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 29 S> */ B(LdaNamedProperty), R(2), U8(1), U8(6),
+ /* 29 S> */ B(LdaNamedProperty), R(2), U8(1), U8(4),
B(Star), R(12),
- B(CallProperty0), R(12), R(2), U8(4),
+ B(CallProperty0), R(12), R(2), U8(6),
B(Star), R(3),
/* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
@@ -375,9 +375,9 @@ bytecodes: [
B(StaCurrentContextSlot), U8(4),
B(Ldar), R(5),
B(StaCurrentContextSlot), U8(4),
- /* 41 S> */ B(CreateClosure), U8(5), U8(14), U8(2),
+ /* 41 S> */ B(CreateClosure), U8(5), U8(12), U8(2),
B(Star), R(13),
- /* 67 E> */ B(CallUndefinedReceiver0), R(13), U8(12),
+ /* 67 E> */ B(CallUndefinedReceiver0), R(13), U8(13),
B(PopContext), R(12),
B(LdaZero),
B(Star), R(4),
@@ -416,13 +416,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(20),
+ B(TestEqualStrict), R(4), U8(19),
B(JumpIfFalse), U8(61),
B(Ldar), R(6),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(11),
B(LdaConstant), U8(9),
B(Star), R(12),
@@ -503,9 +503,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(7),
- /* 36 S> */ B(LdaNamedProperty), R(7), U8(1), U8(6),
+ /* 36 S> */ B(LdaNamedProperty), R(7), U8(1), U8(4),
B(Star), R(17),
- B(CallProperty0), R(17), R(7), U8(4),
+ B(CallProperty0), R(17), R(7), U8(6),
B(Star), R(8),
/* 36 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(ToBooleanLogicalNot),
@@ -524,18 +524,18 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(6),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(67),
+ B(LdaSmi), I8(73),
B(Star), R(17),
B(LdaConstant), U8(4),
B(Star), R(18),
B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
/* 31 E> */ B(Throw),
- /* 31 S> */ B(LdaNamedProperty), R(6), U8(4), U8(14),
+ /* 31 S> */ B(LdaNamedProperty), R(6), U8(4), U8(12),
B(Star), R(1),
- /* 34 S> */ B(LdaNamedProperty), R(6), U8(5), U8(16),
+ /* 34 S> */ B(LdaNamedProperty), R(6), U8(5), U8(14),
B(Star), R(2),
/* 56 S> */ B(Ldar), R(2),
- /* 58 E> */ B(Add), R(1), U8(18),
+ /* 58 E> */ B(Add), R(1), U8(16),
B(Star), R(0),
B(LdaZero),
B(Star), R(9),
@@ -547,7 +547,7 @@ bytecodes: [
B(PushContext), R(17),
B(Star), R(16),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(9), U8(19),
+ B(TestEqualStrict), R(9), U8(17),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(9),
@@ -566,21 +566,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(15),
B(LdaZero),
- B(TestEqualStrict), R(9), U8(20),
+ B(TestEqualStrict), R(9), U8(18),
B(JumpIfTrue), U8(104),
- B(LdaNamedProperty), R(7), U8(8), U8(21),
+ B(LdaNamedProperty), R(7), U8(8), U8(19),
B(Star), R(11),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(9), U8(24),
+ B(TestEqualStrict), R(9), U8(21),
B(JumpIfFalse), U8(61),
B(Ldar), R(11),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(16),
B(LdaConstant), U8(9),
B(Star), R(17),
@@ -656,7 +656,7 @@ bytecodes: [
B(RestoreGeneratorState), R(3),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(CreateFunctionContext), U8(1),
@@ -693,9 +693,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(5),
- /* 30 S> */ B(LdaNamedProperty), R(5), U8(4), U8(6),
+ /* 30 S> */ B(LdaNamedProperty), R(5), U8(4), U8(4),
B(Star), R(17),
- B(CallProperty0), R(17), R(5), U8(4),
+ B(CallProperty0), R(17), R(5), U8(6),
B(Star), R(6),
/* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
@@ -748,13 +748,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(17),
+ B(TestEqualStrict), R(7), U8(16),
B(JumpIfFalse), U8(61),
B(Ldar), R(9),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(16),
B(LdaConstant), U8(10),
B(Star), R(17),
@@ -831,7 +831,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(10),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(10),
B(CreateFunctionContext), U8(1),
@@ -873,10 +873,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 35 E> */ B(TestEqualStrictNoFeedback), R(10),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
- /* 30 S> */ B(LdaNamedProperty), R(4), U8(6), U8(6),
+ B(Abort), U8(42),
+ /* 30 S> */ B(LdaNamedProperty), R(4), U8(6), U8(4),
B(Star), R(16),
- B(CallProperty0), R(16), R(4), U8(4),
+ B(CallProperty0), R(16), R(4), U8(6),
B(Star), R(5),
/* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
B(ToBooleanLogicalNot),
@@ -947,13 +947,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(17),
+ B(TestEqualStrict), R(6), U8(16),
B(JumpIfFalse), U8(61),
B(Ldar), R(8),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(15),
B(LdaConstant), U8(14),
B(Star), R(16),
@@ -1052,9 +1052,9 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(5),
- /* 35 S> */ B(LdaNamedProperty), R(5), U8(1), U8(6),
+ /* 35 S> */ B(LdaNamedProperty), R(5), U8(1), U8(4),
B(Star), R(21),
- B(CallProperty0), R(21), R(5), U8(4),
+ B(CallProperty0), R(21), R(5), U8(6),
B(Star), R(6),
/* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
@@ -1110,13 +1110,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(17),
+ B(TestEqualStrict), R(7), U8(16),
B(JumpIfFalse), U8(61),
B(Ldar), R(9),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(20),
B(LdaConstant), U8(7),
B(Star), R(21),
@@ -1243,7 +1243,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(CreateFunctionContext), U8(1),
@@ -1276,10 +1276,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 40 E> */ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
- /* 35 S> */ B(LdaNamedProperty), R(4), U8(3), U8(6),
+ B(Abort), U8(42),
+ /* 35 S> */ B(LdaNamedProperty), R(4), U8(3), U8(4),
B(Star), R(21),
- B(CallProperty0), R(21), R(4), U8(4),
+ B(CallProperty0), R(21), R(4), U8(6),
B(Star), R(5),
/* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
B(ToBooleanLogicalNot),
@@ -1352,13 +1352,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(17),
+ B(TestEqualStrict), R(6), U8(16),
B(JumpIfFalse), U8(61),
B(Ldar), R(8),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(20),
B(LdaConstant), U8(9),
B(Star), R(21),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
index f168f2f0cb..98fa04dad6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
@@ -32,9 +32,9 @@ parameter count: 1
bytecode array length: 11
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(0),
- /* 56 E> */ B(CallUndefinedReceiver0), R(0), U8(0),
+ /* 56 E> */ B(CallUndefinedReceiver0), R(0), U8(1),
/* 58 S> */ B(Return),
]
constant pool: [
@@ -52,11 +52,11 @@ parameter count: 1
bytecode array length: 16
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(0),
B(LdaSmi), I8(1),
B(Star), R(1),
- /* 67 E> */ B(CallUndefinedReceiver1), R(0), R(1), U8(0),
+ /* 67 E> */ B(CallUndefinedReceiver1), R(0), R(1), U8(1),
/* 70 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index bf2eb53a4f..8068bc1fe8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -22,7 +22,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -70,7 +70,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -139,7 +139,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(10),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(10),
B(Mov), R(closure), R(11),
@@ -177,10 +177,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 30 E> */ B(TestEqualStrictNoFeedback), R(10),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
- /* 25 S> */ B(LdaNamedProperty), R(4), U8(7), U8(7),
+ B(Abort), U8(42),
+ /* 25 S> */ B(LdaNamedProperty), R(4), U8(7), U8(5),
B(Star), R(15),
- B(CallProperty0), R(15), R(4), U8(5),
+ B(CallProperty0), R(15), R(4), U8(7),
B(Star), R(5),
/* 25 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
B(ToBooleanLogicalNot),
@@ -251,13 +251,13 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(18),
+ B(TestEqualStrict), R(6), U8(17),
B(JumpIfFalse), U8(61),
B(Ldar), R(8),
- B(TestTypeOf), U8(5),
+ B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(137),
+ B(Wide), B(LdaSmi), I16(143),
B(Star), R(14),
B(LdaConstant), U8(15),
B(Star), R(15),
@@ -342,7 +342,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -363,13 +363,13 @@ bytecodes: [
/* 38 E> */ B(Throw),
B(Ldar), R(2),
/* 54 S> */ B(Return),
- /* 43 S> */ B(LdaGlobal), U8(4), U8(2),
+ /* 43 S> */ B(LdaGlobal), U8(4), U8(0),
B(Star), R(8),
- /* 50 E> */ B(CallUndefinedReceiver0), R(8), U8(0),
+ /* 50 E> */ B(CallUndefinedReceiver0), R(8), U8(2),
B(Star), R(6),
B(LdaNamedProperty), R(6), U8(5), U8(4),
B(Star), R(7),
- B(CallProperty0), R(7), R(6), U8(16),
+ B(CallProperty0), R(7), R(6), U8(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
@@ -382,39 +382,39 @@ bytecodes: [
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(1),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(Ldar), R(3),
B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(1),
B(LdaNamedProperty), R(4), U8(9), U8(8),
B(Star), R(8),
- B(CallProperty1), R(8), R(4), R(5), U8(22),
+ B(CallProperty1), R(8), R(4), R(5), U8(10),
B(Jump), U8(65),
- B(LdaNamedProperty), R(4), U8(10), U8(6),
+ B(LdaNamedProperty), R(4), U8(10), U8(12),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
B(Star), R(8),
- B(CallProperty1), R(8), R(4), R(5), U8(18),
+ B(CallProperty1), R(8), R(4), R(5), U8(14),
B(Jump), U8(48),
B(Ldar), R(5),
/* 54 S> */ B(Return),
- B(LdaNamedProperty), R(4), U8(11), U8(10),
+ B(LdaNamedProperty), R(4), U8(11), U8(16),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
B(Star), R(8),
- B(CallProperty1), R(8), R(4), R(5), U8(24),
+ B(CallProperty1), R(8), R(4), R(5), U8(18),
B(Jump), U8(28),
- B(LdaNamedProperty), R(4), U8(10), U8(6),
+ B(LdaNamedProperty), R(4), U8(10), U8(20),
B(Star), R(8),
B(JumpIfUndefined), U8(15),
B(JumpIfNull), U8(13),
- B(CallProperty0), R(8), R(4), U8(20),
+ B(CallProperty0), R(8), R(4), U8(22),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
B(Star), R(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
- B(LdaNamedProperty), R(2), U8(12), U8(12),
+ B(LdaNamedProperty), R(2), U8(12), U8(24),
B(JumpIfToBooleanTrue), U8(33),
B(Ldar), R(2),
B(SuspendGenerator), R(0), R(0), U8(8), U8(1),
@@ -427,7 +427,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(3),
B(JumpLoop), U8(139), I8(0),
- B(LdaNamedProperty), R(2), U8(13), U8(14),
+ B(LdaNamedProperty), R(2), U8(13), U8(26),
B(Star), R(4),
B(LdaSmi), I8(1),
B(TestEqualStrictNoFeedback), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index 2e0b987b22..302f883cfb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -18,8 +18,8 @@ bytecode array length: 10
bytecodes: [
/* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Inc), U8(4),
- /* 40 E> */ B(StaGlobalSloppy), U8(0), U8(2),
+ B(Inc), U8(2),
+ /* 40 E> */ B(StaGlobalSloppy), U8(0), U8(3),
/* 47 S> */ B(Return),
]
constant pool: [
@@ -40,10 +40,10 @@ bytecode array length: 16
bytecodes: [
/* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
- B(ToNumber), U8(4),
+ B(ToNumeric), U8(2),
B(Star), R(0),
- B(Dec), U8(4),
- /* 44 E> */ B(StaGlobalSloppy), U8(0), U8(2),
+ B(Dec), U8(2),
+ /* 44 E> */ B(StaGlobalSloppy), U8(0), U8(3),
B(Ldar), R(0),
/* 47 S> */ B(Return),
]
@@ -65,8 +65,8 @@ bytecode array length: 10
bytecodes: [
/* 27 E> */ B(StackCheck),
/* 46 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Dec), U8(4),
- /* 55 E> */ B(StaGlobalStrict), U8(0), U8(2),
+ B(Dec), U8(2),
+ /* 55 E> */ B(StaGlobalStrict), U8(0), U8(3),
/* 67 S> */ B(Return),
]
constant pool: [
@@ -87,10 +87,10 @@ bytecode array length: 16
bytecodes: [
/* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
- B(ToNumber), U8(4),
+ B(ToNumeric), U8(2),
B(Star), R(0),
- B(Inc), U8(4),
- /* 50 E> */ B(StaGlobalSloppy), U8(0), U8(2),
+ B(Inc), U8(2),
+ /* 50 E> */ B(StaGlobalSloppy), U8(0), U8(3),
B(Ldar), R(0),
/* 53 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
index 5ffe8caa6f..d6b6bb6c60 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
@@ -55,1056 +55,11 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 48 S> */ B(LdaConstant), U8(1),
+ /* 48 S> */ B(LdaConstant), U8(0),
/* 60 S> */ B(Return),
]
constant pool: [
HEAP_NUMBER_TYPE [3.14],
- HEAP_NUMBER_TYPE [3.14],
-]
-handlers: [
-]
-
----
-snippet: "
- var a;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414;
- a = 1.414; a = 3.14;
-"
-frame size: 1
-parameter count: 1
-bytecode array length: 1033
-bytecodes: [
- /* 30 E> */ B(StackCheck),
- /* 41 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 52 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 63 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 74 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 85 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 96 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 107 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 118 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 129 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 140 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 151 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 162 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 173 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 184 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 195 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 206 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 217 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 228 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 239 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 250 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 261 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 272 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 283 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 294 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 305 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 316 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 327 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 338 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 349 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 360 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 371 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 382 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 393 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 404 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 415 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 426 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 437 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 448 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 459 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 470 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 481 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 492 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 503 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 514 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 525 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 536 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 547 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 558 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 569 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 580 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 591 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 602 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 613 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 624 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 635 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 646 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 657 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 668 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 679 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 690 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 701 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 712 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 723 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 734 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 745 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 756 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 767 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 778 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 789 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 800 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 811 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 822 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 833 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 844 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 855 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 866 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 877 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 888 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 899 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 910 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 921 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 932 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 943 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 954 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 965 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 976 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 987 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 998 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 1009 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 1020 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 1031 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 1042 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 1053 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 1064 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 1075 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 1086 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1097 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1108 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1119 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1130 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1141 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1152 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1163 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1174 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1185 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1196 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1207 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1218 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1229 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1240 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1251 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1262 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1273 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1284 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1295 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1306 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1317 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1328 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1339 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1350 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1361 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1372 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1383 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1394 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1405 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1416 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1427 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1438 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1449 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1460 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1471 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1482 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1493 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1504 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1515 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1526 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1537 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1548 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1559 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1570 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1581 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1592 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1603 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1614 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1625 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1636 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1647 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1658 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1669 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1680 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1691 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 1702 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 1713 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 1724 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 1735 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 1746 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 1757 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 1768 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 1779 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 1790 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 1801 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 1812 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 1823 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 1834 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 1845 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 1856 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 1867 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 1878 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 1889 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 1900 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 1911 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 1922 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 1933 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 1944 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 1955 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 1966 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 1977 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 1988 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 1999 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 2010 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 2021 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 2032 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 2043 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 2054 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 2065 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 2076 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 2087 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 2098 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 2109 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 2120 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 2131 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 2142 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 2153 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 2164 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 2175 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 2186 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2197 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2208 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2219 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2230 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2241 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2252 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2263 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2274 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2285 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2296 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2307 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2318 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2329 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2340 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2351 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2362 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2373 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2384 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2395 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2406 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2417 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2428 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2439 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2450 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2461 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2472 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2483 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2494 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2505 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2516 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2527 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2538 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 2549 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 2560 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 2571 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 2582 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 2593 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 2604 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 2615 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 2626 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 2637 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 2648 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 2659 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 2670 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 2681 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 2692 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 2703 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 2714 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 2725 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 2736 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 2747 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 2758 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 2769 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 2780 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 2791 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 2802 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 2813 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 2824 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 2835 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 2846 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 2857 S> */ B(Wide), B(LdaConstant), U16(256),
- B(Star), R(0),
- B(LdaUndefined),
- /* 2867 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [1.414],
- HEAP_NUMBER_TYPE [3.14],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
index 82ece6c8b5..e68211a189 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
@@ -396,7 +396,7 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 81
+bytecode array length: 82
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
@@ -435,7 +435,7 @@ bytecodes: [
/* 202 S> */ B(LdaSmi), I8(1),
/* 211 S> */ B(Return),
/* 216 S> */ B(Ldar), R(arg1),
- /* 222 E> */ B(TestInstanceOf), R(arg0),
+ /* 222 E> */ B(TestInstanceOf), R(arg0), U8(6),
B(JumpIfFalse), U8(5),
/* 238 S> */ B(LdaSmi), I8(1),
/* 247 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
deleted file mode 100644
index 16e648ec36..0000000000
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
+++ /dev/null
@@ -1,1294 +0,0 @@
-#
-# Autogenerated by generate-bytecode-expectations.
-#
-
----
-wrap: yes
-
----
-snippet: "
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.1;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.2;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.3;
- var x = 0.4;
- var x = 0.4;
- var x = 0.4;
- var x = 0.4;
- var x = 0.4;
- var x = 0.4;
- var x = 0.4;
- var x = 0.4;
- for (var i = 0; i < 3; i++) {
- if (i == 1) continue;
- if (i == 2) break;
- }
- return 3;
-"
-frame size: 2
-parameter count: 1
-bytecode array length: 1412
-bytecodes: [
- /* 30 E> */ B(StackCheck),
- /* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 55 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 68 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 81 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 94 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 107 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 120 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 133 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 146 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 159 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 172 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 185 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 198 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 211 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 224 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 237 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 250 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 263 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 276 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 289 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 302 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 315 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 328 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 341 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 354 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 367 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 380 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 393 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 406 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 419 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 432 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 445 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 458 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 471 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 484 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 497 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 510 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 523 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 536 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 549 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 562 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 575 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 588 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 601 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 614 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 627 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 640 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 653 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 666 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 679 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 692 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 705 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 718 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 731 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 744 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 757 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 770 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 783 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 796 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 809 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 822 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 835 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 848 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 861 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 874 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 887 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 900 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 913 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 926 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 939 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 952 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 965 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 978 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 991 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 1004 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 1017 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 1030 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 1043 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 1056 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 1069 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 1082 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 1095 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 1108 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 1121 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 1134 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 1147 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 1160 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 1173 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 1186 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 1199 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 1212 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 1225 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 1238 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 1251 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 1264 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 1277 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1290 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1303 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1316 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1329 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1342 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1355 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1368 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1381 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1394 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1407 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1420 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1433 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1446 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1459 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1472 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1485 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1498 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1511 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1524 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1537 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1550 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1563 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1576 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1589 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1602 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1615 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1628 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1641 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1654 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1667 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1680 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1693 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1706 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1719 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1732 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1745 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1758 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1771 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1784 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1797 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1810 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1823 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1836 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1849 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1862 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1875 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1888 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1901 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1914 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1927 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1940 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1953 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1966 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1979 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1992 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 2005 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 2018 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 2031 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 2044 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 2057 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 2070 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 2083 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 2096 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 2109 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 2122 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 2135 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 2148 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 2161 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 2174 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 2187 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 2200 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 2213 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 2226 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 2239 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 2252 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 2265 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 2278 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 2291 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 2304 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 2317 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 2330 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 2343 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 2356 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 2369 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 2382 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 2395 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 2408 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 2421 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 2434 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 2447 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 2460 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 2473 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 2486 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 2499 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 2512 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 2525 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 2538 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 2551 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 2564 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 2577 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2590 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2603 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2616 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2629 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2642 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2655 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2668 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2681 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2694 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2707 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2720 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2733 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2746 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2759 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2772 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2785 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2798 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2811 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2824 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2837 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2850 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2863 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2876 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2889 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2902 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2915 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2928 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2941 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2954 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2967 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2980 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2993 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 3006 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 3019 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 3032 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 3045 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 3058 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 3071 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 3084 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 3097 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 3110 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 3123 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 3136 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 3149 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 3162 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 3175 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 3188 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 3201 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 3214 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 3227 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 3240 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 3253 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 3266 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 3279 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 3292 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 3305 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 3318 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 3331 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 3344 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 3357 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 3370 S> */ B(Wide), B(LdaConstant), U16(256),
- B(Star), R(0),
- /* 3383 S> */ B(Wide), B(LdaConstant), U16(257),
- B(Star), R(0),
- /* 3396 S> */ B(Wide), B(LdaConstant), U16(258),
- B(Star), R(0),
- /* 3409 S> */ B(Wide), B(LdaConstant), U16(259),
- B(Star), R(0),
- /* 3422 S> */ B(Wide), B(LdaConstant), U16(260),
- B(Star), R(0),
- /* 3435 S> */ B(Wide), B(LdaConstant), U16(261),
- B(Star), R(0),
- /* 3448 S> */ B(Wide), B(LdaConstant), U16(262),
- B(Star), R(0),
- /* 3461 S> */ B(Wide), B(LdaConstant), U16(263),
- B(Star), R(0),
- /* 3474 S> */ B(Wide), B(LdaConstant), U16(264),
- B(Star), R(0),
- /* 3487 S> */ B(Wide), B(LdaConstant), U16(265),
- B(Star), R(0),
- /* 3500 S> */ B(Wide), B(LdaConstant), U16(266),
- B(Star), R(0),
- /* 3513 S> */ B(Wide), B(LdaConstant), U16(267),
- B(Star), R(0),
- /* 3526 S> */ B(Wide), B(LdaConstant), U16(268),
- B(Star), R(0),
- /* 3539 S> */ B(Wide), B(LdaConstant), U16(269),
- B(Star), R(0),
- /* 3552 S> */ B(Wide), B(LdaConstant), U16(270),
- B(Star), R(0),
- /* 3565 S> */ B(Wide), B(LdaConstant), U16(271),
- B(Star), R(0),
- /* 3578 S> */ B(Wide), B(LdaConstant), U16(272),
- B(Star), R(0),
- /* 3591 S> */ B(Wide), B(LdaConstant), U16(273),
- B(Star), R(0),
- /* 3604 S> */ B(Wide), B(LdaConstant), U16(274),
- B(Star), R(0),
- /* 3617 S> */ B(Wide), B(LdaConstant), U16(275),
- B(Star), R(0),
- /* 3630 S> */ B(Wide), B(LdaConstant), U16(276),
- B(Star), R(0),
- /* 3643 S> */ B(Wide), B(LdaConstant), U16(277),
- B(Star), R(0),
- /* 3656 S> */ B(Wide), B(LdaConstant), U16(278),
- B(Star), R(0),
- /* 3669 S> */ B(Wide), B(LdaConstant), U16(279),
- B(Star), R(0),
- /* 3682 S> */ B(Wide), B(LdaConstant), U16(280),
- B(Star), R(0),
- /* 3695 S> */ B(Wide), B(LdaConstant), U16(281),
- B(Star), R(0),
- /* 3708 S> */ B(Wide), B(LdaConstant), U16(282),
- B(Star), R(0),
- /* 3721 S> */ B(Wide), B(LdaConstant), U16(283),
- B(Star), R(0),
- /* 3734 S> */ B(Wide), B(LdaConstant), U16(284),
- B(Star), R(0),
- /* 3747 S> */ B(Wide), B(LdaConstant), U16(285),
- B(Star), R(0),
- /* 3760 S> */ B(Wide), B(LdaConstant), U16(286),
- B(Star), R(0),
- /* 3773 S> */ B(Wide), B(LdaConstant), U16(287),
- B(Star), R(0),
- /* 3786 S> */ B(Wide), B(LdaConstant), U16(288),
- B(Star), R(0),
- /* 3799 S> */ B(Wide), B(LdaConstant), U16(289),
- B(Star), R(0),
- /* 3812 S> */ B(Wide), B(LdaConstant), U16(290),
- B(Star), R(0),
- /* 3825 S> */ B(Wide), B(LdaConstant), U16(291),
- B(Star), R(0),
- /* 3838 S> */ B(Wide), B(LdaConstant), U16(292),
- B(Star), R(0),
- /* 3851 S> */ B(Wide), B(LdaConstant), U16(293),
- B(Star), R(0),
- /* 3864 S> */ B(Wide), B(LdaConstant), U16(294),
- B(Star), R(0),
- /* 3877 S> */ B(Wide), B(LdaConstant), U16(295),
- B(Star), R(0),
- /* 3890 S> */ B(Wide), B(LdaConstant), U16(296),
- B(Star), R(0),
- /* 3903 S> */ B(Wide), B(LdaConstant), U16(297),
- B(Star), R(0),
- /* 3916 S> */ B(Wide), B(LdaConstant), U16(298),
- B(Star), R(0),
- /* 3929 S> */ B(Wide), B(LdaConstant), U16(299),
- B(Star), R(0),
- /* 3942 S> */ B(Wide), B(LdaConstant), U16(300),
- B(Star), R(0),
- /* 3955 S> */ B(Wide), B(LdaConstant), U16(301),
- B(Star), R(0),
- /* 3968 S> */ B(Wide), B(LdaConstant), U16(302),
- B(Star), R(0),
- /* 3981 S> */ B(Wide), B(LdaConstant), U16(303),
- B(Star), R(0),
- /* 3994 S> */ B(Wide), B(LdaConstant), U16(304),
- B(Star), R(0),
- /* 4007 S> */ B(Wide), B(LdaConstant), U16(305),
- B(Star), R(0),
- /* 4020 S> */ B(Wide), B(LdaConstant), U16(306),
- B(Star), R(0),
- /* 4033 S> */ B(Wide), B(LdaConstant), U16(307),
- B(Star), R(0),
- /* 4046 S> */ B(Wide), B(LdaConstant), U16(308),
- B(Star), R(0),
- /* 4059 S> */ B(Wide), B(LdaConstant), U16(309),
- B(Star), R(0),
- /* 4072 S> */ B(Wide), B(LdaConstant), U16(310),
- B(Star), R(0),
- /* 4085 S> */ B(Wide), B(LdaConstant), U16(311),
- B(Star), R(0),
- /* 4103 S> */ B(LdaZero),
- B(Star), R(1),
- /* 4108 S> */ B(LdaSmi), I8(3),
- /* 4108 E> */ B(TestLessThan), R(1), U8(0),
- B(Wide), B(JumpIfFalse), U16(39),
- /* 4090 E> */ B(StackCheck),
- /* 4122 S> */ B(LdaSmi), I8(1),
- /* 4128 E> */ B(TestEqual), R(1), U8(2),
- B(Wide), B(JumpIfFalse), U16(7),
- /* 4134 S> */ B(Wide), B(Jump), U16(16),
- /* 4146 S> */ B(LdaSmi), I8(2),
- /* 4152 E> */ B(TestEqual), R(1), U8(3),
- B(Wide), B(JumpIfFalse), U16(7),
- /* 4158 S> */ B(Wide), B(Jump), U16(12),
- /* 4114 S> */ B(Ldar), R(1),
- B(Inc), U8(1),
- B(Star), R(1),
- B(JumpLoop), U8(42), I8(0),
- /* 4167 S> */ B(LdaSmi), I8(3),
- /* 4176 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.1],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.2],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.3],
- HEAP_NUMBER_TYPE [0.4],
- HEAP_NUMBER_TYPE [0.4],
- HEAP_NUMBER_TYPE [0.4],
- HEAP_NUMBER_TYPE [0.4],
- HEAP_NUMBER_TYPE [0.4],
- HEAP_NUMBER_TYPE [0.4],
- HEAP_NUMBER_TYPE [0.4],
- HEAP_NUMBER_TYPE [0.4],
-]
-handlers: [
-]
-
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
index 516276856a..e90b425c59 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
@@ -23,7 +23,7 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(5),
/* 10 E> */ B(StackCheck),
- /* 14 S> */ B(LdaLookupGlobalSlot), U8(0), U8(2), U8(1),
+ /* 14 S> */ B(LdaLookupGlobalSlot), U8(0), U8(0), U8(1),
B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
@@ -38,7 +38,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(2),
- /* 14 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(0),
+ /* 14 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 35 S> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(1),
/* 44 S> */ B(Return),
]
@@ -67,7 +67,7 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(5),
/* 10 E> */ B(StackCheck),
- /* 14 S> */ B(LdaLookupGlobalSlot), U8(0), U8(2), U8(1),
+ /* 14 S> */ B(LdaLookupGlobalSlot), U8(0), U8(0), U8(1),
B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
@@ -82,7 +82,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(2),
- /* 14 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(0),
+ /* 14 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 35 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(2), U8(4), U8(1),
B(TypeOf),
/* 51 S> */ B(Return),
@@ -114,7 +114,7 @@ bytecodes: [
/* 10 E> */ B(StackCheck),
/* 14 S> */ B(LdaSmi), I8(20),
/* 16 E> */ B(StaLookupSlot), U8(0), U8(0),
- /* 22 S> */ B(LdaLookupGlobalSlot), U8(1), U8(2), U8(1),
+ /* 22 S> */ B(LdaLookupGlobalSlot), U8(1), U8(0), U8(1),
B(Star), R(2),
B(LdaConstant), U8(2),
B(Star), R(3),
@@ -129,7 +129,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(2),
- /* 29 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(0),
+ /* 29 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 38 S> */ B(Return),
]
constant pool: [
@@ -162,7 +162,7 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(5),
/* 38 E> */ B(StackCheck),
- /* 44 S> */ B(LdaLookupGlobalSlot), U8(0), U8(2), U8(1),
+ /* 44 S> */ B(LdaLookupGlobalSlot), U8(0), U8(0), U8(1),
B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
@@ -177,7 +177,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(2),
- /* 44 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(0),
+ /* 44 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 66 S> */ B(LdaLookupContextSlot), U8(2), U8(6), U8(1),
/* 75 S> */ B(Return),
]
@@ -211,7 +211,7 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(5),
/* 34 E> */ B(StackCheck),
- /* 40 S> */ B(LdaLookupGlobalSlot), U8(0), U8(2), U8(1),
+ /* 40 S> */ B(LdaLookupGlobalSlot), U8(0), U8(0), U8(1),
B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
@@ -226,7 +226,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(2),
- /* 40 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(0),
+ /* 40 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 62 S> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(1),
/* 71 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
deleted file mode 100644
index 9eaa6ad260..0000000000
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
+++ /dev/null
@@ -1,4218 +0,0 @@
-#
-# Autogenerated by generate-bytecode-expectations.
-#
-
----
-wrap: no
-test function name: f
-
----
-snippet: "
- var f;
- var x = 1;
- function f1() {
- eval(\"function t() {\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"return x;\" +
- \"};\" +
- \"f = t; f();\"
- );
- }
- f1();
-"
-frame size: 1
-parameter count: 1
-bytecode array length: 1034
-bytecodes: [
- /* 10 E> */ B(StackCheck),
- /* 22 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 34 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 46 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 58 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 70 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 82 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 94 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 106 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 118 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 130 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 142 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 154 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 166 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 178 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 190 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 202 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 214 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 226 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 238 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 250 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 262 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 274 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 286 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 298 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 310 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 322 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 334 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 346 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 358 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 370 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 382 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 394 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 406 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 418 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 430 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 442 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 454 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 466 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 478 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 490 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 502 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 514 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 526 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 538 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 550 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 562 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 574 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 586 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 598 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 610 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 622 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 634 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 646 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 658 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 670 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 682 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 694 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 706 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 718 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 730 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 742 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 754 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 766 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 778 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 790 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 802 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 814 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 826 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 838 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 850 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 862 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 874 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 886 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 898 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 910 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 922 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 934 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 946 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 958 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 970 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 982 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 994 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 1006 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 1018 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 1030 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 1042 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 1054 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 1066 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 1078 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 1090 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 1102 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 1114 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 1126 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 1138 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 1150 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 1162 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1174 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1186 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1198 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1210 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1222 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1234 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1246 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1258 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1270 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1282 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1294 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1306 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1318 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1330 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1342 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1354 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1366 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1378 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1390 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1402 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1414 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1426 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1438 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1450 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1462 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1474 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1486 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1498 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1510 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1522 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1534 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1546 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1558 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1570 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1582 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1594 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1606 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1618 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1630 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1642 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1654 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1666 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1678 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1690 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1702 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1714 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1726 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1738 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1750 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1762 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1774 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1786 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1798 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1810 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1822 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 1834 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 1846 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 1858 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 1870 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 1882 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 1894 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 1906 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 1918 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 1930 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 1942 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 1954 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 1966 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 1978 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 1990 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 2002 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 2014 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 2026 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 2038 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 2050 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 2062 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 2074 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 2086 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 2098 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 2110 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 2122 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 2134 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 2146 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 2158 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 2170 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 2182 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 2194 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 2206 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 2218 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 2230 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 2242 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 2254 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 2266 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 2278 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 2290 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 2302 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 2314 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 2326 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 2338 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 2350 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 2362 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2374 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2386 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2398 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2410 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2422 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2434 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2446 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2458 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2470 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2482 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2494 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2506 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2518 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2530 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2542 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2554 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2566 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2578 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2590 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2602 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2614 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2626 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2638 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2650 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2662 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2674 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2686 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2698 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2710 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2722 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2734 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2746 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 2758 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 2770 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 2782 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 2794 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 2806 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 2818 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 2830 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 2842 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 2854 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 2866 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 2878 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 2890 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 2902 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 2914 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 2926 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 2938 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 2950 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 2962 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 2974 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 2986 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 2998 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 3010 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 3022 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 3034 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 3046 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 3058 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 3070 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 3082 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 3086 S> */ B(Wide), B(LdaLookupGlobalSlot), U16(256), U16(0), U16(1),
- /* 3095 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
-]
-handlers: [
-]
-
----
-snippet: "
- var f;
- var x = 1;
- function f1() {
- eval(\"function t() {\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"return typeof x;\" +
- \"};\" +
- \"f = t; f();\"
- );
- }
- f1();
-"
-frame size: 1
-parameter count: 1
-bytecode array length: 1035
-bytecodes: [
- /* 10 E> */ B(StackCheck),
- /* 22 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 34 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 46 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 58 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 70 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 82 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 94 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 106 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 118 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 130 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 142 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 154 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 166 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 178 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 190 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 202 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 214 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 226 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 238 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 250 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 262 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 274 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 286 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 298 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 310 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 322 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 334 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 346 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 358 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 370 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 382 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 394 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 406 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 418 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 430 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 442 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 454 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 466 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 478 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 490 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 502 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 514 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 526 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 538 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 550 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 562 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 574 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 586 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 598 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 610 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 622 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 634 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 646 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 658 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 670 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 682 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 694 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 706 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 718 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 730 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 742 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 754 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 766 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 778 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 790 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 802 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 814 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 826 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 838 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 850 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 862 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 874 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 886 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 898 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 910 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 922 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 934 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 946 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 958 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 970 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 982 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 994 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 1006 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 1018 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 1030 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 1042 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 1054 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 1066 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 1078 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 1090 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 1102 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 1114 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 1126 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 1138 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 1150 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 1162 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1174 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1186 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1198 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1210 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1222 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1234 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1246 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1258 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1270 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1282 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1294 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1306 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1318 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1330 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1342 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1354 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1366 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1378 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1390 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1402 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1414 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1426 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1438 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1450 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1462 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1474 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1486 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1498 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1510 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1522 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1534 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1546 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1558 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1570 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1582 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1594 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1606 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1618 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1630 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1642 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1654 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1666 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1678 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1690 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1702 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1714 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1726 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1738 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1750 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1762 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1774 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1786 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1798 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1810 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1822 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 1834 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 1846 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 1858 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 1870 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 1882 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 1894 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 1906 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 1918 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 1930 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 1942 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 1954 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 1966 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 1978 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 1990 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 2002 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 2014 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 2026 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 2038 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 2050 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 2062 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 2074 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 2086 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 2098 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 2110 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 2122 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 2134 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 2146 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 2158 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 2170 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 2182 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 2194 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 2206 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 2218 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 2230 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 2242 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 2254 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 2266 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 2278 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 2290 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 2302 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 2314 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 2326 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 2338 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 2350 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 2362 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2374 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2386 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2398 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2410 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2422 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2434 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2446 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2458 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2470 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2482 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2494 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2506 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2518 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2530 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2542 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2554 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2566 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2578 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2590 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2602 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2614 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2626 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2638 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2650 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2662 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2674 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2686 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2698 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2710 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2722 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2734 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2746 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 2758 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 2770 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 2782 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 2794 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 2806 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 2818 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 2830 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 2842 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 2854 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 2866 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 2878 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 2890 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 2902 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 2914 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 2926 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 2938 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 2950 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 2962 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 2974 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 2986 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 2998 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 3010 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 3022 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 3034 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 3046 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 3058 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 3070 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 3082 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 3086 S> */ B(Wide), B(LdaLookupGlobalSlotInsideTypeof), U16(256), U16(0), U16(1),
- B(TypeOf),
- /* 3102 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
-]
-handlers: [
-]
-
----
-snippet: "
- var f;
- var x = 1;
- function f1() {
- eval(\"function t() {\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"x = 10;\" +
- \"};\" +
- \"f = t; f();\"
- );
- }
- f1();
-"
-frame size: 1
-parameter count: 1
-bytecode array length: 1034
-bytecodes: [
- /* 10 E> */ B(StackCheck),
- /* 22 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 34 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 46 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 58 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 70 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 82 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 94 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 106 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 118 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 130 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 142 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 154 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 166 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 178 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 190 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 202 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 214 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 226 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 238 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 250 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 262 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 274 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 286 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 298 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 310 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 322 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 334 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 346 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 358 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 370 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 382 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 394 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 406 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 418 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 430 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 442 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 454 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 466 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 478 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 490 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 502 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 514 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 526 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 538 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 550 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 562 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 574 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 586 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 598 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 610 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 622 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 634 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 646 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 658 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 670 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 682 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 694 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 706 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 718 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 730 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 742 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 754 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 766 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 778 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 790 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 802 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 814 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 826 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 838 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 850 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 862 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 874 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 886 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 898 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 910 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 922 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 934 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 946 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 958 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 970 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 982 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 994 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 1006 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 1018 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 1030 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 1042 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 1054 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 1066 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 1078 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 1090 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 1102 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 1114 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 1126 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 1138 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 1150 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 1162 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1174 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1186 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1198 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1210 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1222 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1234 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1246 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1258 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1270 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1282 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1294 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1306 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1318 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1330 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1342 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1354 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1366 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1378 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1390 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1402 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1414 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1426 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1438 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1450 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1462 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1474 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1486 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1498 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1510 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1522 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1534 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1546 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1558 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1570 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1582 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1594 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1606 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1618 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1630 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1642 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1654 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1666 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1678 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1690 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1702 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1714 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1726 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1738 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1750 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1762 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1774 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1786 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1798 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1810 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1822 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 1834 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 1846 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 1858 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 1870 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 1882 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 1894 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 1906 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 1918 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 1930 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 1942 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 1954 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 1966 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 1978 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 1990 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 2002 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 2014 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 2026 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 2038 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 2050 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 2062 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 2074 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 2086 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 2098 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 2110 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 2122 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 2134 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 2146 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 2158 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 2170 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 2182 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 2194 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 2206 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 2218 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 2230 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 2242 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 2254 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 2266 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 2278 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 2290 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 2302 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 2314 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 2326 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 2338 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 2350 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 2362 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2374 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2386 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2398 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2410 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2422 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2434 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2446 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2458 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2470 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2482 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2494 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2506 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2518 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2530 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2542 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2554 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2566 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2578 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2590 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2602 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2614 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2626 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2638 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2650 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2662 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2674 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2686 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2698 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2710 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2722 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2734 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2746 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 2758 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 2770 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 2782 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 2794 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 2806 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 2818 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 2830 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 2842 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 2854 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 2866 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 2878 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 2890 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 2902 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 2914 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 2926 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 2938 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 2950 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 2962 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 2974 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 2986 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 2998 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 3010 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 3022 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 3034 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 3046 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 3058 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 3070 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 3082 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 3086 S> */ B(LdaSmi), I8(10),
- /* 3088 E> */ B(Wide), B(StaLookupSlot), U16(256), U8(0),
- B(LdaUndefined),
- /* 3093 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
-]
-handlers: [
-]
-
----
-snippet: "
- var f;
- var x = 1;
- function f1() {
- eval(\"function t() {\" +
- \"'use strict';\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"var y = 2.3;\" +
- \"x = 10;\" +
- \"};\" +
- \"f = t; f();\"
- );
- }
- f1();
-"
-frame size: 1
-parameter count: 1
-bytecode array length: 1034
-bytecodes: [
- /* 10 E> */ B(StackCheck),
- /* 35 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 47 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 59 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 71 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 83 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 95 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 107 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 119 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 131 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 143 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 155 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 167 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 179 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 191 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 203 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 215 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 227 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 239 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 251 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 263 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 275 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 287 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 299 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 311 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 323 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 335 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 347 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 359 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 371 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 383 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 395 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 407 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 419 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 431 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 443 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 455 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 467 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 479 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 491 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 503 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 515 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 527 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 539 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 551 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 563 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 575 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 587 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 599 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 611 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 623 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 635 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 647 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 659 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 671 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 683 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 695 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 707 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 719 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 731 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 743 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 755 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 767 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 779 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 791 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 803 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 815 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 827 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 839 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 851 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 863 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 875 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 887 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 899 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 911 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 923 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 935 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 947 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 959 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 971 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 983 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 995 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 1007 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 1019 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 1031 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 1043 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 1055 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 1067 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 1079 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 1091 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 1103 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 1115 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 1127 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 1139 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 1151 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 1163 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 1175 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1187 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1199 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1211 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1223 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1235 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1247 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1259 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1271 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1283 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1295 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1307 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1319 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1331 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1343 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1355 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1367 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1379 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1391 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1403 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1415 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1427 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1439 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1451 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1463 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1475 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1487 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1499 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1511 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1523 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1535 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1547 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1559 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1571 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1583 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1595 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1607 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1619 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1631 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1643 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1655 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1667 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1679 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1691 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1703 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1715 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1727 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1739 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1751 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1763 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1775 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1787 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1799 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1811 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1823 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1835 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 1847 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 1859 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 1871 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 1883 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 1895 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 1907 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 1919 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 1931 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 1943 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 1955 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 1967 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 1979 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 1991 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 2003 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 2015 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 2027 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 2039 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 2051 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 2063 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 2075 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 2087 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 2099 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 2111 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 2123 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 2135 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 2147 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 2159 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 2171 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 2183 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 2195 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 2207 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 2219 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 2231 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 2243 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 2255 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 2267 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 2279 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 2291 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 2303 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 2315 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 2327 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 2339 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 2351 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 2363 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 2375 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2387 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2399 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2411 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2423 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2435 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2447 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2459 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2471 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2483 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2495 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2507 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2519 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2531 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2543 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2555 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2567 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2579 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2591 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2603 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2615 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2627 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2639 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2651 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2663 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2675 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2687 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2699 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2711 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2723 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2735 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2747 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2759 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 2771 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 2783 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 2795 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 2807 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 2819 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 2831 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 2843 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 2855 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 2867 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 2879 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 2891 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 2903 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 2915 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 2927 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 2939 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 2951 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 2963 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 2975 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 2987 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 2999 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 3011 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 3023 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 3035 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 3047 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 3059 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 3071 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 3083 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 3095 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 3099 S> */ B(LdaSmi), I8(10),
- /* 3101 E> */ B(Wide), B(StaLookupSlot), U16(256), U8(1),
- B(LdaUndefined),
- /* 3106 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- HEAP_NUMBER_TYPE [2.3],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
-]
-handlers: [
-]
-
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 7ee726bb85..3be8bc5158 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -22,7 +22,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -79,7 +79,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -138,7 +138,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -218,7 +218,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -259,7 +259,7 @@ bytecodes: [
/* 34 S> */ B(LdaUndefined),
/* 34 E> */ B(StaCurrentContextSlot), U8(4),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), U8(1),
+ B(ToNumeric), U8(1),
B(Star), R(4),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(1),
@@ -296,7 +296,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -339,7 +339,7 @@ bytecodes: [
/* 34 S> */ B(LdaUndefined),
/* 34 E> */ B(StaCurrentContextSlot), U8(4),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), U8(1),
+ B(ToNumeric), U8(1),
B(Star), R(4),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(1),
@@ -376,7 +376,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -419,7 +419,7 @@ bytecodes: [
/* 36 S> */ B(LdaUndefined),
/* 36 E> */ B(StaCurrentContextSlot), U8(4),
/* 41 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), U8(1),
+ B(ToNumeric), U8(1),
B(Star), R(4),
B(Inc), U8(1),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
@@ -454,7 +454,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -505,9 +505,9 @@ handlers: [
snippet: "
export default (class {});
"
-frame size: 8
+frame size: 7
parameter count: 2
-bytecode array length: 140
+bytecode array length: 128
bytecodes: [
B(Ldar), R(1),
B(JumpIfUndefined), U8(18),
@@ -516,7 +516,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -548,19 +548,16 @@ bytecodes: [
/* 26 S> */ B(Return),
B(Ldar), R(3),
B(StaCurrentContextSlot), U8(5),
- B(CreateClosure), U8(4), U8(0), U8(0),
- B(Star), R(3),
B(LdaTheHole),
- B(Star), R(4),
- B(LdaSmi), I8(16),
B(Star), R(6),
- B(LdaSmi), I8(24),
- B(Star), R(7),
+ B(CreateClosure), U8(5), U8(0), U8(0),
+ B(Star), R(3),
+ B(LdaConstant), U8(4),
+ B(Star), R(4),
B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
+ B(Ldar), R(5),
B(StaModuleVariable), I8(1), U8(0),
B(LdaCurrentContextSlot), U8(5),
/* 26 S> */ B(Return),
@@ -570,6 +567,7 @@ constant pool: [
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
@@ -590,7 +588,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -647,7 +645,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -705,7 +703,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -739,15 +737,15 @@ bytecodes: [
/* 45 S> */ B(Return),
/* 27 S> */ B(LdaImmutableCurrentContextSlot), U8(5),
B(Star), R(4),
- /* 31 E> */ B(LdaNamedProperty), R(4), U8(4), U8(2),
+ /* 31 E> */ B(LdaNamedProperty), R(4), U8(4), U8(0),
B(Star), R(3),
B(LdaImmutableCurrentContextSlot), U8(5),
B(Star), R(5),
B(LdaImmutableCurrentContextSlot), U8(5),
B(Star), R(6),
- /* 42 E> */ B(LdaNamedProperty), R(6), U8(5), U8(4),
+ /* 42 E> */ B(LdaNamedProperty), R(6), U8(5), U8(2),
B(Star), R(6),
- /* 31 E> */ B(CallProperty2), R(3), R(4), R(5), R(6), U8(0),
+ /* 31 E> */ B(CallProperty2), R(3), R(4), R(5), R(6), U8(4),
B(StaCurrentContextSlot), U8(6),
B(LdaCurrentContextSlot), U8(6),
/* 45 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index 9f701feb05..6a2e1a8c7f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -10,34 +10,31 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(...[1, 2, 3]);
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 57
+bytecode array length: 45
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(2),
B(LdaTheHole),
- B(Star), R(3),
- B(LdaSmi), I8(34),
B(Star), R(5),
- B(LdaSmi), I8(88),
- B(Star), R(6),
+ B(CreateClosure), U8(1), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
B(Star), R(3),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- /* 89 S> */ B(CreateArrayLiteral), U8(1), U8(3), U8(37),
+ B(Mov), R(4), R(0),
+ B(Mov), R(0), R(1),
+ /* 89 S> */ B(CreateArrayLiteral), U8(2), U8(1), U8(37),
B(Star), R(3),
B(Ldar), R(1),
- /* 89 E> */ B(ConstructWithSpread), R(1), R(3), U8(1), U8(1),
+ /* 89 E> */ B(ConstructWithSpread), R(2), R(3), U8(1), U8(2),
B(LdaUndefined),
/* 110 S> */ B(Return),
]
constant pool: [
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
TUPLE2_TYPE,
]
@@ -49,36 +46,33 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(0, ...[1, 2, 3]);
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 60
+bytecode array length: 48
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(2),
B(LdaTheHole),
- B(Star), R(3),
- B(LdaSmi), I8(34),
B(Star), R(5),
- B(LdaSmi), I8(88),
- B(Star), R(6),
+ B(CreateClosure), U8(1), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
B(Star), R(3),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
- B(Star), R(0),
- B(Star), R(1),
+ B(Mov), R(4), R(0),
+ B(Mov), R(0), R(1),
/* 89 S> */ B(LdaZero),
B(Star), R(3),
- B(CreateArrayLiteral), U8(1), U8(3), U8(37),
+ B(CreateArrayLiteral), U8(2), U8(1), U8(37),
B(Star), R(4),
B(Ldar), R(1),
- /* 89 E> */ B(ConstructWithSpread), R(1), R(3), U8(2), U8(1),
+ /* 89 E> */ B(ConstructWithSpread), R(2), R(3), U8(2), U8(2),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
constant pool: [
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
TUPLE2_TYPE,
]
@@ -90,42 +84,38 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(0, ...[1, 2, 3], 4);
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 81
+bytecode array length: 66
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(2),
B(LdaTheHole),
- B(Star), R(3),
- B(LdaSmi), I8(34),
B(Star), R(5),
- B(LdaSmi), I8(88),
- B(Star), R(6),
+ B(CreateClosure), U8(1), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
B(Star), R(3),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- /* 89 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
+ B(Mov), R(4), R(0),
+ B(Mov), R(0), R(1),
+ /* 89 S> */ B(CreateArrayLiteral), U8(2), U8(1), U8(37),
B(Star), R(3),
- B(CreateArrayLiteral), U8(2), U8(2), U8(37),
+ B(CreateArrayLiteral), U8(3), U8(2), U8(37),
B(Star), R(4),
B(CallJSRuntime), U8(%spread_iterable), R(4), U8(1),
B(Star), R(4),
- B(CreateArrayLiteral), U8(3), U8(3), U8(37),
+ B(CreateArrayLiteral), U8(4), U8(3), U8(37),
B(Star), R(5),
B(CallJSRuntime), U8(%spread_arguments), R(3), U8(3),
B(Star), R(3),
- B(Mov), R(1), R(2),
B(CallJSRuntime), U8(%reflect_construct), R(2), U8(2),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
constant pool: [
+ FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
TUPLE2_TYPE,
TUPLE2_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index 242d988f63..63014f70dc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -75,8 +75,8 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(1), U8(41), R(1),
- /* 69 E> */ B(AddSmi), I8(1), U8(0),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
+ /* 69 E> */ B(AddSmi), I8(1), U8(1),
B(StaNamedOwnProperty), R(1), U8(1), U8(2),
B(Ldar), R(1),
/* 75 S> */ B(Return),
@@ -97,8 +97,8 @@ parameter count: 1
bytecode array length: 17
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(1), U8(41), R(0),
- B(CreateClosure), U8(1), U8(0), U8(2),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
+ B(CreateClosure), U8(1), U8(1), U8(2),
B(StaNamedOwnProperty), R(0), U8(2), U8(2),
B(Ldar), R(0),
/* 66 S> */ B(Return),
@@ -120,8 +120,8 @@ parameter count: 1
bytecode array length: 17
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(1), U8(41), R(0),
- B(CreateClosure), U8(1), U8(0), U8(2),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
+ B(CreateClosure), U8(1), U8(1), U8(2),
B(StaNamedOwnProperty), R(0), U8(2), U8(2),
B(Ldar), R(0),
/* 67 S> */ B(Return),
@@ -143,10 +143,10 @@ parameter count: 1
bytecode array length: 33
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(1), U8(41), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
- B(CreateClosure), U8(2), U8(0), U8(2),
+ B(CreateClosure), U8(2), U8(1), U8(2),
B(Star), R(3),
B(LdaNull),
B(Star), R(4),
@@ -174,12 +174,12 @@ parameter count: 1
bytecode array length: 36
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(41), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
- B(CreateClosure), U8(2), U8(0), U8(2),
+ B(CreateClosure), U8(2), U8(1), U8(2),
B(Star), R(3),
- B(CreateClosure), U8(3), U8(1), U8(2),
+ B(CreateClosure), U8(3), U8(2), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
@@ -206,12 +206,12 @@ parameter count: 1
bytecode array length: 33
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(1), U8(41), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
B(LdaNull),
B(Star), R(3),
- B(CreateClosure), U8(2), U8(0), U8(2),
+ B(CreateClosure), U8(2), U8(1), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
@@ -306,13 +306,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 26
+bytecode array length: 28
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(1),
/* 64 E> */ B(StaNamedOwnProperty), R(1), U8(2), U8(1),
+ B(Ldar), R(0),
/* 68 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(3),
@@ -367,13 +368,13 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(2), U8(41), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(1),
/* 60 E> */ B(ToName), R(2),
B(LdaConstant), U8(2),
- B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(3),
+ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
B(LdaConstant), U8(3),
B(Star), R(3),
- B(CreateClosure), U8(4), U8(0), U8(2),
+ B(CreateClosure), U8(4), U8(3), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
@@ -381,7 +382,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), R(2), U8(4),
B(LdaConstant), U8(3),
B(Star), R(3),
- B(CreateClosure), U8(5), U8(1), U8(2),
+ B(CreateClosure), U8(5), U8(4), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
deleted file mode 100644
index dba4ae1811..0000000000
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
+++ /dev/null
@@ -1,1051 +0,0 @@
-#
-# Autogenerated by generate-bytecode-expectations.
-#
-
----
-wrap: yes
-
----
-snippet: "
- var a;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- return { name: 'string', val: 9.2 };
-"
-frame size: 2
-parameter count: 1
-bytecode array length: 1037
-bytecodes: [
- /* 30 E> */ B(StackCheck),
- /* 41 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 51 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 61 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 71 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 81 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 91 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 101 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 111 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 121 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 131 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 141 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 151 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 161 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 171 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 181 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 191 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 201 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 211 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 221 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 231 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 241 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 251 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 261 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 271 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 281 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 291 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 301 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 311 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 321 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 331 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 341 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 351 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 361 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 371 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 381 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 391 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 401 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 411 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 421 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 431 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 441 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 451 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 461 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 471 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 481 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 491 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 501 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 511 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 521 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 531 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 541 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 551 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 561 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 571 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 581 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 591 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 601 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 611 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 621 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 631 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 641 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 651 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 661 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 671 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 681 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 691 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 701 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 711 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 721 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 731 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 741 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 751 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 761 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 771 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 781 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 791 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 801 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 811 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 821 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 831 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 841 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 851 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 861 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 871 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 881 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 891 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 901 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 911 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 921 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 931 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 941 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 951 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 961 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 971 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 981 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 991 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1001 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1011 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1021 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1031 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1041 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1051 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1061 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1071 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1081 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1091 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1101 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1111 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1121 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1131 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1141 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1151 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1161 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1171 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1181 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1191 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1201 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1211 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1221 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1231 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1241 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1251 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1261 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1271 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1281 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1291 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1301 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1311 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1321 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1331 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1341 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1351 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1361 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1371 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1381 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1391 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1401 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1411 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1421 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1431 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1441 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1451 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1461 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1471 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1481 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1491 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1501 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1511 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1521 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1531 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1541 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 1551 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 1561 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 1571 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 1581 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 1591 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 1601 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 1611 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 1621 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 1631 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 1641 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 1651 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 1661 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 1671 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 1681 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 1691 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 1701 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 1711 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 1721 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 1731 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 1741 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 1751 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 1761 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 1771 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 1781 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 1791 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 1801 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 1811 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 1821 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 1831 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 1841 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 1851 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 1861 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 1871 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 1881 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 1891 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 1901 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 1911 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 1921 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 1931 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 1941 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 1951 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 1961 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 1971 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 1981 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 1991 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2001 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2011 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2021 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2031 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2041 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2051 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2061 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2071 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2081 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2091 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2101 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2111 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2121 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2131 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2141 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2151 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2161 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2171 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2181 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2191 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2201 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2211 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2221 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2231 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2241 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2251 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2261 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2271 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2281 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2291 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2301 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2311 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 2321 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 2331 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 2341 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 2351 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 2361 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 2371 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 2381 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 2391 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 2401 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 2411 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 2421 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 2431 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 2441 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 2451 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 2461 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 2471 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 2481 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 2491 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 2501 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 2511 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 2521 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 2531 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 2541 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 2551 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 2561 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 2571 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 2581 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 2591 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 2601 S> */ B(Wide), B(CreateObjectLiteral), U16(256), U16(0), U8(41), R16(1),
- B(Ldar), R(1),
- /* 2637 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- FIXED_ARRAY_TYPE,
-]
-handlers: [
-]
-
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
index 52a2553bb9..c08ca8886a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -16,9 +16,9 @@ parameter count: 2
bytecode array length: 12
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(0),
- /* 25 E> */ B(CallProperty0), R(0), R(arg0), U8(0),
+ /* 25 E> */ B(CallProperty0), R(0), R(arg0), U8(2),
/* 32 S> */ B(Return),
]
constant pool: [
@@ -37,9 +37,9 @@ parameter count: 4
bytecode array length: 14
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 31 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 31 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(0),
- /* 31 E> */ B(CallProperty2), R(0), R(arg0), R(arg1), R(arg2), U8(0),
+ /* 31 E> */ B(CallProperty2), R(0), R(arg0), R(arg1), R(arg2), U8(2),
/* 42 S> */ B(Return),
]
constant pool: [
@@ -58,12 +58,12 @@ parameter count: 3
bytecode array length: 21
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 28 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 28 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(0),
B(Ldar), R(arg1),
- /* 35 E> */ B(Add), R(arg1), U8(4),
+ /* 35 E> */ B(Add), R(arg1), U8(2),
B(Star), R(2),
- /* 28 E> */ B(CallProperty2), R(0), R(arg0), R(2), R(arg1), U8(0),
+ /* 28 E> */ B(CallProperty2), R(0), R(arg0), R(2), R(arg1), U8(3),
/* 43 S> */ B(Return),
]
constant pool: [
@@ -339,9 +339,9 @@ bytecodes: [
/* 1144 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
/* 1153 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
/* 1162 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
- /* 1178 S> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(258),
+ /* 1178 S> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
B(Star), R(0),
- /* 1178 E> */ B(Wide), B(CallProperty0), R16(0), R16(arg0), U16(256),
+ /* 1178 E> */ B(Wide), B(CallProperty0), R16(0), R16(arg0), U16(258),
/* 1185 S> */ B(Return),
]
constant pool: [
@@ -360,23 +360,23 @@ parameter count: 2
bytecode array length: 51
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
+ /* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(2),
B(LdaSmi), I8(1),
B(Star), R(4),
- /* 25 E> */ B(CallProperty1), R(2), R(arg0), R(4), U8(4),
+ /* 25 E> */ B(CallProperty1), R(2), R(arg0), R(4), U8(2),
B(Star), R(2),
- /* 32 E> */ B(LdaNamedProperty), R(2), U8(0), U8(8),
+ /* 32 E> */ B(LdaNamedProperty), R(2), U8(0), U8(4),
B(Star), R(1),
B(LdaSmi), I8(2),
B(Star), R(3),
- /* 33 E> */ B(CallProperty1), R(1), R(2), R(3), U8(2),
+ /* 33 E> */ B(CallProperty1), R(1), R(2), R(3), U8(6),
B(Star), R(1),
- /* 40 E> */ B(LdaNamedProperty), R(1), U8(0), U8(10),
+ /* 40 E> */ B(LdaNamedProperty), R(1), U8(0), U8(8),
B(Star), R(0),
B(LdaSmi), I8(3),
B(Star), R(2),
- /* 41 E> */ B(CallProperty1), R(0), R(1), R(2), U8(0),
+ /* 41 E> */ B(CallProperty1), R(0), R(1), R(2), U8(10),
/* 49 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
index c431b0e2e6..69f63eb8b4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -50,13 +50,13 @@ parameter count: 1
bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(2), U8(0),
+ /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
B(Star), R(1),
- /* 48 E> */ B(LdaNamedProperty), R(1), U8(1), U8(3),
+ /* 48 E> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
B(Star), R(0),
B(LdaConstant), U8(2),
B(Star), R(2),
- /* 48 E> */ B(CallProperty1), R(0), R(1), R(2), U8(0),
+ /* 48 E> */ B(CallProperty1), R(0), R(1), R(2), U8(3),
/* 61 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
deleted file mode 100644
index 83162ff30f..0000000000
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
+++ /dev/null
@@ -1,1050 +0,0 @@
-#
-# Autogenerated by generate-bytecode-expectations.
-#
-
----
-wrap: yes
-
----
-snippet: "
- var a;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- a = 1.23;
- return /ab+d/;
-"
-frame size: 1
-parameter count: 1
-bytecode array length: 1033
-bytecodes: [
- /* 30 E> */ B(StackCheck),
- /* 41 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 51 S> */ B(LdaConstant), U8(1),
- B(Star), R(0),
- /* 61 S> */ B(LdaConstant), U8(2),
- B(Star), R(0),
- /* 71 S> */ B(LdaConstant), U8(3),
- B(Star), R(0),
- /* 81 S> */ B(LdaConstant), U8(4),
- B(Star), R(0),
- /* 91 S> */ B(LdaConstant), U8(5),
- B(Star), R(0),
- /* 101 S> */ B(LdaConstant), U8(6),
- B(Star), R(0),
- /* 111 S> */ B(LdaConstant), U8(7),
- B(Star), R(0),
- /* 121 S> */ B(LdaConstant), U8(8),
- B(Star), R(0),
- /* 131 S> */ B(LdaConstant), U8(9),
- B(Star), R(0),
- /* 141 S> */ B(LdaConstant), U8(10),
- B(Star), R(0),
- /* 151 S> */ B(LdaConstant), U8(11),
- B(Star), R(0),
- /* 161 S> */ B(LdaConstant), U8(12),
- B(Star), R(0),
- /* 171 S> */ B(LdaConstant), U8(13),
- B(Star), R(0),
- /* 181 S> */ B(LdaConstant), U8(14),
- B(Star), R(0),
- /* 191 S> */ B(LdaConstant), U8(15),
- B(Star), R(0),
- /* 201 S> */ B(LdaConstant), U8(16),
- B(Star), R(0),
- /* 211 S> */ B(LdaConstant), U8(17),
- B(Star), R(0),
- /* 221 S> */ B(LdaConstant), U8(18),
- B(Star), R(0),
- /* 231 S> */ B(LdaConstant), U8(19),
- B(Star), R(0),
- /* 241 S> */ B(LdaConstant), U8(20),
- B(Star), R(0),
- /* 251 S> */ B(LdaConstant), U8(21),
- B(Star), R(0),
- /* 261 S> */ B(LdaConstant), U8(22),
- B(Star), R(0),
- /* 271 S> */ B(LdaConstant), U8(23),
- B(Star), R(0),
- /* 281 S> */ B(LdaConstant), U8(24),
- B(Star), R(0),
- /* 291 S> */ B(LdaConstant), U8(25),
- B(Star), R(0),
- /* 301 S> */ B(LdaConstant), U8(26),
- B(Star), R(0),
- /* 311 S> */ B(LdaConstant), U8(27),
- B(Star), R(0),
- /* 321 S> */ B(LdaConstant), U8(28),
- B(Star), R(0),
- /* 331 S> */ B(LdaConstant), U8(29),
- B(Star), R(0),
- /* 341 S> */ B(LdaConstant), U8(30),
- B(Star), R(0),
- /* 351 S> */ B(LdaConstant), U8(31),
- B(Star), R(0),
- /* 361 S> */ B(LdaConstant), U8(32),
- B(Star), R(0),
- /* 371 S> */ B(LdaConstant), U8(33),
- B(Star), R(0),
- /* 381 S> */ B(LdaConstant), U8(34),
- B(Star), R(0),
- /* 391 S> */ B(LdaConstant), U8(35),
- B(Star), R(0),
- /* 401 S> */ B(LdaConstant), U8(36),
- B(Star), R(0),
- /* 411 S> */ B(LdaConstant), U8(37),
- B(Star), R(0),
- /* 421 S> */ B(LdaConstant), U8(38),
- B(Star), R(0),
- /* 431 S> */ B(LdaConstant), U8(39),
- B(Star), R(0),
- /* 441 S> */ B(LdaConstant), U8(40),
- B(Star), R(0),
- /* 451 S> */ B(LdaConstant), U8(41),
- B(Star), R(0),
- /* 461 S> */ B(LdaConstant), U8(42),
- B(Star), R(0),
- /* 471 S> */ B(LdaConstant), U8(43),
- B(Star), R(0),
- /* 481 S> */ B(LdaConstant), U8(44),
- B(Star), R(0),
- /* 491 S> */ B(LdaConstant), U8(45),
- B(Star), R(0),
- /* 501 S> */ B(LdaConstant), U8(46),
- B(Star), R(0),
- /* 511 S> */ B(LdaConstant), U8(47),
- B(Star), R(0),
- /* 521 S> */ B(LdaConstant), U8(48),
- B(Star), R(0),
- /* 531 S> */ B(LdaConstant), U8(49),
- B(Star), R(0),
- /* 541 S> */ B(LdaConstant), U8(50),
- B(Star), R(0),
- /* 551 S> */ B(LdaConstant), U8(51),
- B(Star), R(0),
- /* 561 S> */ B(LdaConstant), U8(52),
- B(Star), R(0),
- /* 571 S> */ B(LdaConstant), U8(53),
- B(Star), R(0),
- /* 581 S> */ B(LdaConstant), U8(54),
- B(Star), R(0),
- /* 591 S> */ B(LdaConstant), U8(55),
- B(Star), R(0),
- /* 601 S> */ B(LdaConstant), U8(56),
- B(Star), R(0),
- /* 611 S> */ B(LdaConstant), U8(57),
- B(Star), R(0),
- /* 621 S> */ B(LdaConstant), U8(58),
- B(Star), R(0),
- /* 631 S> */ B(LdaConstant), U8(59),
- B(Star), R(0),
- /* 641 S> */ B(LdaConstant), U8(60),
- B(Star), R(0),
- /* 651 S> */ B(LdaConstant), U8(61),
- B(Star), R(0),
- /* 661 S> */ B(LdaConstant), U8(62),
- B(Star), R(0),
- /* 671 S> */ B(LdaConstant), U8(63),
- B(Star), R(0),
- /* 681 S> */ B(LdaConstant), U8(64),
- B(Star), R(0),
- /* 691 S> */ B(LdaConstant), U8(65),
- B(Star), R(0),
- /* 701 S> */ B(LdaConstant), U8(66),
- B(Star), R(0),
- /* 711 S> */ B(LdaConstant), U8(67),
- B(Star), R(0),
- /* 721 S> */ B(LdaConstant), U8(68),
- B(Star), R(0),
- /* 731 S> */ B(LdaConstant), U8(69),
- B(Star), R(0),
- /* 741 S> */ B(LdaConstant), U8(70),
- B(Star), R(0),
- /* 751 S> */ B(LdaConstant), U8(71),
- B(Star), R(0),
- /* 761 S> */ B(LdaConstant), U8(72),
- B(Star), R(0),
- /* 771 S> */ B(LdaConstant), U8(73),
- B(Star), R(0),
- /* 781 S> */ B(LdaConstant), U8(74),
- B(Star), R(0),
- /* 791 S> */ B(LdaConstant), U8(75),
- B(Star), R(0),
- /* 801 S> */ B(LdaConstant), U8(76),
- B(Star), R(0),
- /* 811 S> */ B(LdaConstant), U8(77),
- B(Star), R(0),
- /* 821 S> */ B(LdaConstant), U8(78),
- B(Star), R(0),
- /* 831 S> */ B(LdaConstant), U8(79),
- B(Star), R(0),
- /* 841 S> */ B(LdaConstant), U8(80),
- B(Star), R(0),
- /* 851 S> */ B(LdaConstant), U8(81),
- B(Star), R(0),
- /* 861 S> */ B(LdaConstant), U8(82),
- B(Star), R(0),
- /* 871 S> */ B(LdaConstant), U8(83),
- B(Star), R(0),
- /* 881 S> */ B(LdaConstant), U8(84),
- B(Star), R(0),
- /* 891 S> */ B(LdaConstant), U8(85),
- B(Star), R(0),
- /* 901 S> */ B(LdaConstant), U8(86),
- B(Star), R(0),
- /* 911 S> */ B(LdaConstant), U8(87),
- B(Star), R(0),
- /* 921 S> */ B(LdaConstant), U8(88),
- B(Star), R(0),
- /* 931 S> */ B(LdaConstant), U8(89),
- B(Star), R(0),
- /* 941 S> */ B(LdaConstant), U8(90),
- B(Star), R(0),
- /* 951 S> */ B(LdaConstant), U8(91),
- B(Star), R(0),
- /* 961 S> */ B(LdaConstant), U8(92),
- B(Star), R(0),
- /* 971 S> */ B(LdaConstant), U8(93),
- B(Star), R(0),
- /* 981 S> */ B(LdaConstant), U8(94),
- B(Star), R(0),
- /* 991 S> */ B(LdaConstant), U8(95),
- B(Star), R(0),
- /* 1001 S> */ B(LdaConstant), U8(96),
- B(Star), R(0),
- /* 1011 S> */ B(LdaConstant), U8(97),
- B(Star), R(0),
- /* 1021 S> */ B(LdaConstant), U8(98),
- B(Star), R(0),
- /* 1031 S> */ B(LdaConstant), U8(99),
- B(Star), R(0),
- /* 1041 S> */ B(LdaConstant), U8(100),
- B(Star), R(0),
- /* 1051 S> */ B(LdaConstant), U8(101),
- B(Star), R(0),
- /* 1061 S> */ B(LdaConstant), U8(102),
- B(Star), R(0),
- /* 1071 S> */ B(LdaConstant), U8(103),
- B(Star), R(0),
- /* 1081 S> */ B(LdaConstant), U8(104),
- B(Star), R(0),
- /* 1091 S> */ B(LdaConstant), U8(105),
- B(Star), R(0),
- /* 1101 S> */ B(LdaConstant), U8(106),
- B(Star), R(0),
- /* 1111 S> */ B(LdaConstant), U8(107),
- B(Star), R(0),
- /* 1121 S> */ B(LdaConstant), U8(108),
- B(Star), R(0),
- /* 1131 S> */ B(LdaConstant), U8(109),
- B(Star), R(0),
- /* 1141 S> */ B(LdaConstant), U8(110),
- B(Star), R(0),
- /* 1151 S> */ B(LdaConstant), U8(111),
- B(Star), R(0),
- /* 1161 S> */ B(LdaConstant), U8(112),
- B(Star), R(0),
- /* 1171 S> */ B(LdaConstant), U8(113),
- B(Star), R(0),
- /* 1181 S> */ B(LdaConstant), U8(114),
- B(Star), R(0),
- /* 1191 S> */ B(LdaConstant), U8(115),
- B(Star), R(0),
- /* 1201 S> */ B(LdaConstant), U8(116),
- B(Star), R(0),
- /* 1211 S> */ B(LdaConstant), U8(117),
- B(Star), R(0),
- /* 1221 S> */ B(LdaConstant), U8(118),
- B(Star), R(0),
- /* 1231 S> */ B(LdaConstant), U8(119),
- B(Star), R(0),
- /* 1241 S> */ B(LdaConstant), U8(120),
- B(Star), R(0),
- /* 1251 S> */ B(LdaConstant), U8(121),
- B(Star), R(0),
- /* 1261 S> */ B(LdaConstant), U8(122),
- B(Star), R(0),
- /* 1271 S> */ B(LdaConstant), U8(123),
- B(Star), R(0),
- /* 1281 S> */ B(LdaConstant), U8(124),
- B(Star), R(0),
- /* 1291 S> */ B(LdaConstant), U8(125),
- B(Star), R(0),
- /* 1301 S> */ B(LdaConstant), U8(126),
- B(Star), R(0),
- /* 1311 S> */ B(LdaConstant), U8(127),
- B(Star), R(0),
- /* 1321 S> */ B(LdaConstant), U8(128),
- B(Star), R(0),
- /* 1331 S> */ B(LdaConstant), U8(129),
- B(Star), R(0),
- /* 1341 S> */ B(LdaConstant), U8(130),
- B(Star), R(0),
- /* 1351 S> */ B(LdaConstant), U8(131),
- B(Star), R(0),
- /* 1361 S> */ B(LdaConstant), U8(132),
- B(Star), R(0),
- /* 1371 S> */ B(LdaConstant), U8(133),
- B(Star), R(0),
- /* 1381 S> */ B(LdaConstant), U8(134),
- B(Star), R(0),
- /* 1391 S> */ B(LdaConstant), U8(135),
- B(Star), R(0),
- /* 1401 S> */ B(LdaConstant), U8(136),
- B(Star), R(0),
- /* 1411 S> */ B(LdaConstant), U8(137),
- B(Star), R(0),
- /* 1421 S> */ B(LdaConstant), U8(138),
- B(Star), R(0),
- /* 1431 S> */ B(LdaConstant), U8(139),
- B(Star), R(0),
- /* 1441 S> */ B(LdaConstant), U8(140),
- B(Star), R(0),
- /* 1451 S> */ B(LdaConstant), U8(141),
- B(Star), R(0),
- /* 1461 S> */ B(LdaConstant), U8(142),
- B(Star), R(0),
- /* 1471 S> */ B(LdaConstant), U8(143),
- B(Star), R(0),
- /* 1481 S> */ B(LdaConstant), U8(144),
- B(Star), R(0),
- /* 1491 S> */ B(LdaConstant), U8(145),
- B(Star), R(0),
- /* 1501 S> */ B(LdaConstant), U8(146),
- B(Star), R(0),
- /* 1511 S> */ B(LdaConstant), U8(147),
- B(Star), R(0),
- /* 1521 S> */ B(LdaConstant), U8(148),
- B(Star), R(0),
- /* 1531 S> */ B(LdaConstant), U8(149),
- B(Star), R(0),
- /* 1541 S> */ B(LdaConstant), U8(150),
- B(Star), R(0),
- /* 1551 S> */ B(LdaConstant), U8(151),
- B(Star), R(0),
- /* 1561 S> */ B(LdaConstant), U8(152),
- B(Star), R(0),
- /* 1571 S> */ B(LdaConstant), U8(153),
- B(Star), R(0),
- /* 1581 S> */ B(LdaConstant), U8(154),
- B(Star), R(0),
- /* 1591 S> */ B(LdaConstant), U8(155),
- B(Star), R(0),
- /* 1601 S> */ B(LdaConstant), U8(156),
- B(Star), R(0),
- /* 1611 S> */ B(LdaConstant), U8(157),
- B(Star), R(0),
- /* 1621 S> */ B(LdaConstant), U8(158),
- B(Star), R(0),
- /* 1631 S> */ B(LdaConstant), U8(159),
- B(Star), R(0),
- /* 1641 S> */ B(LdaConstant), U8(160),
- B(Star), R(0),
- /* 1651 S> */ B(LdaConstant), U8(161),
- B(Star), R(0),
- /* 1661 S> */ B(LdaConstant), U8(162),
- B(Star), R(0),
- /* 1671 S> */ B(LdaConstant), U8(163),
- B(Star), R(0),
- /* 1681 S> */ B(LdaConstant), U8(164),
- B(Star), R(0),
- /* 1691 S> */ B(LdaConstant), U8(165),
- B(Star), R(0),
- /* 1701 S> */ B(LdaConstant), U8(166),
- B(Star), R(0),
- /* 1711 S> */ B(LdaConstant), U8(167),
- B(Star), R(0),
- /* 1721 S> */ B(LdaConstant), U8(168),
- B(Star), R(0),
- /* 1731 S> */ B(LdaConstant), U8(169),
- B(Star), R(0),
- /* 1741 S> */ B(LdaConstant), U8(170),
- B(Star), R(0),
- /* 1751 S> */ B(LdaConstant), U8(171),
- B(Star), R(0),
- /* 1761 S> */ B(LdaConstant), U8(172),
- B(Star), R(0),
- /* 1771 S> */ B(LdaConstant), U8(173),
- B(Star), R(0),
- /* 1781 S> */ B(LdaConstant), U8(174),
- B(Star), R(0),
- /* 1791 S> */ B(LdaConstant), U8(175),
- B(Star), R(0),
- /* 1801 S> */ B(LdaConstant), U8(176),
- B(Star), R(0),
- /* 1811 S> */ B(LdaConstant), U8(177),
- B(Star), R(0),
- /* 1821 S> */ B(LdaConstant), U8(178),
- B(Star), R(0),
- /* 1831 S> */ B(LdaConstant), U8(179),
- B(Star), R(0),
- /* 1841 S> */ B(LdaConstant), U8(180),
- B(Star), R(0),
- /* 1851 S> */ B(LdaConstant), U8(181),
- B(Star), R(0),
- /* 1861 S> */ B(LdaConstant), U8(182),
- B(Star), R(0),
- /* 1871 S> */ B(LdaConstant), U8(183),
- B(Star), R(0),
- /* 1881 S> */ B(LdaConstant), U8(184),
- B(Star), R(0),
- /* 1891 S> */ B(LdaConstant), U8(185),
- B(Star), R(0),
- /* 1901 S> */ B(LdaConstant), U8(186),
- B(Star), R(0),
- /* 1911 S> */ B(LdaConstant), U8(187),
- B(Star), R(0),
- /* 1921 S> */ B(LdaConstant), U8(188),
- B(Star), R(0),
- /* 1931 S> */ B(LdaConstant), U8(189),
- B(Star), R(0),
- /* 1941 S> */ B(LdaConstant), U8(190),
- B(Star), R(0),
- /* 1951 S> */ B(LdaConstant), U8(191),
- B(Star), R(0),
- /* 1961 S> */ B(LdaConstant), U8(192),
- B(Star), R(0),
- /* 1971 S> */ B(LdaConstant), U8(193),
- B(Star), R(0),
- /* 1981 S> */ B(LdaConstant), U8(194),
- B(Star), R(0),
- /* 1991 S> */ B(LdaConstant), U8(195),
- B(Star), R(0),
- /* 2001 S> */ B(LdaConstant), U8(196),
- B(Star), R(0),
- /* 2011 S> */ B(LdaConstant), U8(197),
- B(Star), R(0),
- /* 2021 S> */ B(LdaConstant), U8(198),
- B(Star), R(0),
- /* 2031 S> */ B(LdaConstant), U8(199),
- B(Star), R(0),
- /* 2041 S> */ B(LdaConstant), U8(200),
- B(Star), R(0),
- /* 2051 S> */ B(LdaConstant), U8(201),
- B(Star), R(0),
- /* 2061 S> */ B(LdaConstant), U8(202),
- B(Star), R(0),
- /* 2071 S> */ B(LdaConstant), U8(203),
- B(Star), R(0),
- /* 2081 S> */ B(LdaConstant), U8(204),
- B(Star), R(0),
- /* 2091 S> */ B(LdaConstant), U8(205),
- B(Star), R(0),
- /* 2101 S> */ B(LdaConstant), U8(206),
- B(Star), R(0),
- /* 2111 S> */ B(LdaConstant), U8(207),
- B(Star), R(0),
- /* 2121 S> */ B(LdaConstant), U8(208),
- B(Star), R(0),
- /* 2131 S> */ B(LdaConstant), U8(209),
- B(Star), R(0),
- /* 2141 S> */ B(LdaConstant), U8(210),
- B(Star), R(0),
- /* 2151 S> */ B(LdaConstant), U8(211),
- B(Star), R(0),
- /* 2161 S> */ B(LdaConstant), U8(212),
- B(Star), R(0),
- /* 2171 S> */ B(LdaConstant), U8(213),
- B(Star), R(0),
- /* 2181 S> */ B(LdaConstant), U8(214),
- B(Star), R(0),
- /* 2191 S> */ B(LdaConstant), U8(215),
- B(Star), R(0),
- /* 2201 S> */ B(LdaConstant), U8(216),
- B(Star), R(0),
- /* 2211 S> */ B(LdaConstant), U8(217),
- B(Star), R(0),
- /* 2221 S> */ B(LdaConstant), U8(218),
- B(Star), R(0),
- /* 2231 S> */ B(LdaConstant), U8(219),
- B(Star), R(0),
- /* 2241 S> */ B(LdaConstant), U8(220),
- B(Star), R(0),
- /* 2251 S> */ B(LdaConstant), U8(221),
- B(Star), R(0),
- /* 2261 S> */ B(LdaConstant), U8(222),
- B(Star), R(0),
- /* 2271 S> */ B(LdaConstant), U8(223),
- B(Star), R(0),
- /* 2281 S> */ B(LdaConstant), U8(224),
- B(Star), R(0),
- /* 2291 S> */ B(LdaConstant), U8(225),
- B(Star), R(0),
- /* 2301 S> */ B(LdaConstant), U8(226),
- B(Star), R(0),
- /* 2311 S> */ B(LdaConstant), U8(227),
- B(Star), R(0),
- /* 2321 S> */ B(LdaConstant), U8(228),
- B(Star), R(0),
- /* 2331 S> */ B(LdaConstant), U8(229),
- B(Star), R(0),
- /* 2341 S> */ B(LdaConstant), U8(230),
- B(Star), R(0),
- /* 2351 S> */ B(LdaConstant), U8(231),
- B(Star), R(0),
- /* 2361 S> */ B(LdaConstant), U8(232),
- B(Star), R(0),
- /* 2371 S> */ B(LdaConstant), U8(233),
- B(Star), R(0),
- /* 2381 S> */ B(LdaConstant), U8(234),
- B(Star), R(0),
- /* 2391 S> */ B(LdaConstant), U8(235),
- B(Star), R(0),
- /* 2401 S> */ B(LdaConstant), U8(236),
- B(Star), R(0),
- /* 2411 S> */ B(LdaConstant), U8(237),
- B(Star), R(0),
- /* 2421 S> */ B(LdaConstant), U8(238),
- B(Star), R(0),
- /* 2431 S> */ B(LdaConstant), U8(239),
- B(Star), R(0),
- /* 2441 S> */ B(LdaConstant), U8(240),
- B(Star), R(0),
- /* 2451 S> */ B(LdaConstant), U8(241),
- B(Star), R(0),
- /* 2461 S> */ B(LdaConstant), U8(242),
- B(Star), R(0),
- /* 2471 S> */ B(LdaConstant), U8(243),
- B(Star), R(0),
- /* 2481 S> */ B(LdaConstant), U8(244),
- B(Star), R(0),
- /* 2491 S> */ B(LdaConstant), U8(245),
- B(Star), R(0),
- /* 2501 S> */ B(LdaConstant), U8(246),
- B(Star), R(0),
- /* 2511 S> */ B(LdaConstant), U8(247),
- B(Star), R(0),
- /* 2521 S> */ B(LdaConstant), U8(248),
- B(Star), R(0),
- /* 2531 S> */ B(LdaConstant), U8(249),
- B(Star), R(0),
- /* 2541 S> */ B(LdaConstant), U8(250),
- B(Star), R(0),
- /* 2551 S> */ B(LdaConstant), U8(251),
- B(Star), R(0),
- /* 2561 S> */ B(LdaConstant), U8(252),
- B(Star), R(0),
- /* 2571 S> */ B(LdaConstant), U8(253),
- B(Star), R(0),
- /* 2581 S> */ B(LdaConstant), U8(254),
- B(Star), R(0),
- /* 2591 S> */ B(LdaConstant), U8(255),
- B(Star), R(0),
- /* 2601 S> */ B(Wide), B(CreateRegExpLiteral), U16(256), U16(0), U8(0),
- /* 2615 S> */ B(Return),
-]
-constant pool: [
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- HEAP_NUMBER_TYPE [1.23],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["ab+d"],
-]
-handlers: [
-]
-
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 4194925e41..ec2d310302 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -99,7 +99,7 @@ bytecodes: [
B(TestEqual), R(2), U8(3),
B(JumpIfFalse), U8(54),
/* 17 E> */ B(StackCheck),
- /* 48 S> */ B(LdaLookupGlobalSlot), U8(2), U8(6), U8(1),
+ /* 48 S> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(3),
B(Star), R(7),
B(LdaConstant), U8(3),
B(Star), R(8),
@@ -114,7 +114,7 @@ bytecodes: [
B(Mov), R(closure), R(11),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(9), U8(6),
B(Star), R(7),
- /* 48 E> */ B(CallUndefinedReceiver1), R(7), R(8), U8(4),
+ /* 48 E> */ B(CallUndefinedReceiver1), R(7), R(8), U8(6),
B(LdaZero),
B(Star), R(2),
B(LdaCurrentContextSlot), U8(4),
@@ -188,9 +188,9 @@ bytecodes: [
B(TestEqual), R(3), U8(3),
B(JumpIfFalse), U8(22),
/* 17 E> */ B(StackCheck),
- /* 48 S> */ B(CreateClosure), U8(1), U8(6), U8(2),
+ /* 48 S> */ B(CreateClosure), U8(1), U8(4), U8(2),
B(Star), R(5),
- /* 74 E> */ B(CallUndefinedReceiver0), R(5), U8(4),
+ /* 74 E> */ B(CallUndefinedReceiver0), R(5), U8(5),
B(LdaZero),
B(Star), R(3),
B(LdaCurrentContextSlot), U8(4),
@@ -231,25 +231,25 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(3),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(67),
+ B(LdaSmi), I8(73),
B(Star), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
B(CallRuntime), U16(Runtime::kNewTypeError), R(4), U8(2),
/* 28 E> */ B(Throw),
- /* 37 S> */ B(LdaNamedProperty), R(3), U8(1), U8(3),
+ /* 37 S> */ B(LdaNamedProperty), R(3), U8(1), U8(1),
B(Star), R(1),
- /* 37 S> */ B(LdaNamedProperty), R(3), U8(2), U8(5),
+ /* 37 S> */ B(LdaNamedProperty), R(3), U8(2), U8(3),
B(Star), R(2),
/* 55 S> */ B(LdaZero),
- /* 55 E> */ B(TestGreaterThan), R(2), U8(7),
+ /* 55 E> */ B(TestGreaterThan), R(2), U8(5),
B(JumpIfFalse), U8(19),
/* 17 E> */ B(StackCheck),
/* 75 S> */ B(Ldar), R(2),
- /* 77 E> */ B(Add), R(1), U8(9),
+ /* 77 E> */ B(Add), R(1), U8(6),
B(Star), R(0),
/* 62 S> */ B(Ldar), R(2),
- B(Dec), U8(8),
+ B(Dec), U8(7),
B(Star), R(2),
B(JumpLoop), U8(20), I8(0),
B(LdaUndefined),
@@ -281,7 +281,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(3),
B(Mov), R(closure), R(4),
@@ -342,7 +342,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(2),
B(Mov), R(closure), R(3),
@@ -370,7 +370,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 31 E> */ B(TestEqualStrictNoFeedback), R(2),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
+ B(Abort), U8(42),
/* 36 S> */ B(LdaSmi), I8(10),
/* 36 E> */ B(TestLessThan), R(0), U8(0),
B(JumpIfFalse), U8(56),
@@ -518,7 +518,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(43),
+ B(Abort), U8(42),
B(LdaSmi), I8(-2),
B(Star), R(3),
B(Mov), R(closure), R(4),
@@ -537,7 +537,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 36 E> */ B(TestEqualStrictNoFeedback), R(3),
B(JumpIfTrue), U8(4),
- B(Abort), U8(43),
+ B(Abort), U8(42),
/* 41 S> */ B(LdaSmi), I8(10),
/* 41 E> */ B(TestLessThan), R(0), U8(0),
B(JumpIfFalse), U8(59),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index 0d36442d47..cf04f8e0c2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -17,7 +17,7 @@ snippet: "
test = new B(1, 2, 3).constructor;
})();
"
-frame size: 4
+frame size: 5
parameter count: 1
bytecode array length: 19
bytecodes: [
@@ -26,9 +26,9 @@ bytecodes: [
B(Mov), R(closure), R(1),
/* 93 E> */ B(StackCheck),
/* 93 S> */ B(Ldar), R(1),
- B(GetSuperConstructor), R(3),
+ B(GetSuperConstructor), R(4),
B(Ldar), R(0),
- /* 93 E> */ B(ConstructWithSpread), R(3), R(2), U8(1), U8(0),
+ /* 93 E> */ B(ConstructWithSpread), R(4), R(2), U8(1), U8(0),
/* 93 S> */ B(Return),
]
constant pool: [
@@ -49,7 +49,7 @@ snippet: "
test = new B(1, 2, 3).constructor;
})();
"
-frame size: 7
+frame size: 8
parameter count: 1
bytecode array length: 40
bytecodes: [
@@ -59,12 +59,12 @@ bytecodes: [
/* 128 E> */ B(StackCheck),
B(Mov), R(2), R(3),
/* 140 S> */ B(Ldar), R(closure),
- B(GetSuperConstructor), R(4),
+ B(GetSuperConstructor), R(5),
B(LdaSmi), I8(1),
- B(Star), R(5),
+ B(Star), R(6),
B(Ldar), R(0),
- B(Mov), R(2), R(6),
- /* 140 E> */ B(ConstructWithSpread), R(4), R(5), U8(2), U8(0),
+ B(Mov), R(2), R(7),
+ /* 140 E> */ B(ConstructWithSpread), R(5), R(6), U8(2), U8(0),
B(Star), R(4),
B(Ldar), R(this),
/* 140 E> */ B(ThrowSuperAlreadyCalledIfNotHole),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
index a853183351..ca596e7a5e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
@@ -25,7 +25,7 @@ bytecodes: [
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(1),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(7),
B(Jump), U8(8),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -60,7 +60,7 @@ bytecodes: [
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(1),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(10),
B(Jump), U8(14),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -97,7 +97,7 @@ bytecodes: [
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(1),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(8),
B(Jump), U8(12),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -134,7 +134,7 @@ bytecodes: [
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(3),
- B(TestEqualStrict), R(1), U8(1),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(6),
B(Jump), U8(6),
/* 66 S> */ B(Jump), U8(10),
@@ -172,7 +172,7 @@ bytecodes: [
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(3),
- B(TestEqualStrict), R(1), U8(1),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(10),
B(Jump), U8(14),
/* 74 S> */ B(LdaSmi), I8(1),
@@ -313,7 +313,7 @@ bytecodes: [
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(1),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrueConstant), U8(0),
B(JumpConstant), U8(1),
/* 68 S> */ B(LdaSmi), I8(2),
@@ -478,18 +478,18 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(0), U8(3),
+ B(TestEqualStrict), R(0), U8(0),
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(4),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(32),
B(Jump), U8(34),
/* 70 S> */ B(Ldar), R(0),
- /* 79 E> */ B(AddSmi), I8(1), U8(0),
+ /* 79 E> */ B(AddSmi), I8(1), U8(1),
B(Star), R(2),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(1),
+ B(TestEqualStrict), R(2), U8(2),
B(JumpIfTrue), U8(4),
B(Jump), U8(8),
/* 101 S> */ B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
index 6e4eec57b9..3e7bb57f05 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
@@ -12,12 +12,11 @@ snippet: "
f = function f() {};
f();
"
-frame size: 1
+frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 3
bytecodes: [
/* 21 E> */ B(StackCheck),
- B(Mov), R(closure), R(0),
B(LdaUndefined),
/* 25 S> */ B(Return),
]
@@ -36,8 +35,8 @@ frame size: 1
parameter count: 1
bytecode array length: 7
bytecodes: [
- /* 21 E> */ B(StackCheck),
B(Mov), R(closure), R(0),
+ /* 21 E> */ B(StackCheck),
/* 26 S> */ B(Ldar), R(0),
/* 35 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index a78163a562..4b56b6302b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -21,8 +21,8 @@ bytecodes: [
B(Mov), R(closure), R(3),
B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
/* 0 E> */ B(StackCheck),
- /* 8 S> */ B(CreateObjectLiteral), U8(1), U8(3), U8(41), R(1),
- B(CreateClosure), U8(2), U8(2), U8(0),
+ /* 8 S> */ B(CreateObjectLiteral), U8(1), U8(2), U8(41), R(1),
+ B(CreateClosure), U8(2), U8(3), U8(0),
B(StaNamedOwnProperty), R(1), U8(3), U8(4),
B(Ldar), R(1),
/* 8 E> */ B(StaGlobalSloppy), U8(4), U8(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
index 2297a7fdc2..b8b4c4bde8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -101,8 +101,8 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(Wide), B(LdaSmi), I16(1234),
B(Star), R(0),
- /* 64 S> */ B(Mul), R(0), U8(0),
- /* 68 E> */ B(SubSmi), I8(1), U8(1),
+ /* 64 S> */ B(Mul), R(0), U8(1),
+ /* 68 E> */ B(SubSmi), I8(1), U8(0),
B(LdaUndefined),
B(Star), R(1),
/* 83 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index 94450515ce..6d7309b732 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -905,11 +905,11 @@ bytecodes: [
B(JumpIfFalse), U8(31),
/* 1518 E> */ B(StackCheck),
/* 1555 S> */ B(Wide), B(Ldar), R16(128),
- /* 1561 E> */ B(Add), R(1), U8(2),
+ /* 1561 E> */ B(Add), R(1), U8(1),
B(Wide), B(Mov), R16(1), R16(157),
B(Star), R(1),
/* 1548 S> */ B(Wide), B(Ldar), R16(128),
- B(Inc), U8(1),
+ B(Inc), U8(2),
B(Wide), B(Star), R16(128),
B(JumpLoop), U8(36), I8(0),
/* 1567 S> */ B(Wide), B(Ldar), R16(128),
@@ -1097,17 +1097,17 @@ bytecodes: [
B(JumpIfNull), U8(72),
B(Wide), B(ToObject), R16(157),
B(Wide), B(ForInEnumerate), R16(157),
- B(Wide), B(ForInPrepare), R16(158), U16(1),
+ B(Wide), B(ForInPrepare), R16(158), U16(0),
B(LdaZero),
B(Wide), B(Star), R16(161),
/* 1526 S> */ B(Wide), B(ForInContinue), R16(161), R16(160),
B(JumpIfFalse), U8(45),
- B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(1),
+ B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(0),
B(JumpIfUndefined), U8(22),
B(Wide), B(Star), R16(128),
/* 1521 E> */ B(StackCheck),
/* 1541 S> */ B(Wide), B(Ldar), R16(128),
- /* 1547 E> */ B(Add), R(1), U8(0),
+ /* 1547 E> */ B(Add), R(1), U8(1),
B(Wide), B(Mov), R16(1), R16(162),
B(Star), R(1),
/* 1544 E> */ B(Wide), B(ForInStep), R16(161),
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 081be123bc..c6b1a01ff7 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -44,6 +44,7 @@ class ProgramOptions final {
top_level_(false),
do_expressions_(false),
async_iteration_(false),
+ public_fields_(false),
verbose_(false) {}
bool Validate() const;
@@ -63,6 +64,7 @@ class ProgramOptions final {
bool top_level() const { return top_level_; }
bool do_expressions() const { return do_expressions_; }
bool async_iteration() const { return async_iteration_; }
+ bool public_fields() const { return public_fields_; }
bool verbose() const { return verbose_; }
bool suppress_runtime_errors() const { return rebaseline_ && !verbose_; }
std::vector<std::string> input_filenames() const { return input_filenames_; }
@@ -80,6 +82,7 @@ class ProgramOptions final {
bool top_level_;
bool do_expressions_;
bool async_iteration_;
+ bool public_fields_;
bool verbose_;
std::vector<std::string> input_filenames_;
std::string output_filename_;
@@ -169,6 +172,8 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.do_expressions_ = true;
} else if (strcmp(argv[i], "--async-iteration") == 0) {
options.async_iteration_ = true;
+ } else if (strcmp(argv[i], "--public-fields") == 0) {
+ options.public_fields_ = true;
} else if (strcmp(argv[i], "--verbose") == 0) {
options.verbose_ = true;
} else if (strncmp(argv[i], "--output=", 9) == 0) {
@@ -273,6 +278,8 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
do_expressions_ = ParseBoolean(line.c_str() + 16);
} else if (line.compare(0, 17, "async iteration: ") == 0) {
async_iteration_ = ParseBoolean(line.c_str() + 17);
+ } else if (line.compare(0, 15, "public fields: ") == 0) {
+ public_fields_ = ParseBoolean(line.c_str() + 15);
} else if (line == "---") {
break;
} else if (line.empty()) {
@@ -296,12 +303,13 @@ void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
if (top_level_) stream << "\ntop level: yes";
if (do_expressions_) stream << "\ndo expressions: yes";
if (async_iteration_) stream << "\nasync iteration: yes";
+ if (public_fields_) stream << "\npublic fields: yes";
stream << "\n\n";
}
V8InitializationScope::V8InitializationScope(const char* exec_path)
- : platform_(v8::platform::CreateDefaultPlatform()) {
+ : platform_(v8::platform::NewDefaultPlatform()) {
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
@@ -400,6 +408,7 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
if (options.do_expressions()) i::FLAG_harmony_do_expressions = true;
if (options.async_iteration()) i::FLAG_harmony_async_iteration = true;
+ if (options.public_fields()) i::FLAG_harmony_public_fields = true;
stream << "#\n# Autogenerated by generate-bytecode-expectations.\n#\n\n";
options.PrintHeader(stream);
@@ -409,6 +418,7 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
i::FLAG_harmony_do_expressions = false;
i::FLAG_harmony_async_iteration = false;
+ i::FLAG_harmony_public_fields = false;
}
bool WriteExpectationsFile(const std::vector<std::string>& snippet_list,
@@ -456,6 +466,7 @@ void PrintUsage(const char* exec_path) {
" --top-level Process top level code, not the top-level function.\n"
" --do-expressions Enable harmony_do_expressions flag.\n"
" --async-iteration Enable harmony_async_iteration flag.\n"
+ " --public-fields Enable harmony_public_fields flag.\n"
" --output=file.name\n"
" Specify the output file. If not specified, output goes to "
"stdout.\n"
diff --git a/deps/v8/test/cctest/interpreter/source-position-matcher.cc b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
index 9cff95af5b..2fcc292b1c 100644
--- a/deps/v8/test/cctest/interpreter/source-position-matcher.cc
+++ b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
@@ -150,7 +150,7 @@ bool SourcePositionMatcher::CompareExpressionPositions(
for (size_t i = 0; i < original_positions->size(); ++i) {
PositionTableEntry original = original_positions->at(i);
PositionTableEntry optimized = original_positions->at(i);
- CHECK(original.source_position > 0);
+ CHECK_GT(original.source_position, 0);
if ((original.is_statement || optimized.is_statement) ||
(original.source_position != optimized.source_position) ||
(original.source_position < 0)) {
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 50e7034686..ed8098ddab 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -331,10 +331,6 @@ TEST(HeapNumberConstants) {
"var a = 1.2; return 2.6;\n",
"var a = 3.14; return 3.14;\n",
-
- "var a;" //
- REPEAT_256("\na = 1.414;") //
- " a = 3.14;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
@@ -950,25 +946,6 @@ TEST(BasicLoops) {
LoadGolden("BasicLoops.golden")));
}
-TEST(JumpsRequiringConstantWideOperands) {
- InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
- REPEAT_256("var x = 0.1;\n")
- REPEAT_32("var x = 0.2;\n")
- REPEAT_16("var x = 0.3;\n")
- REPEAT_8("var x = 0.4;\n")
- "for (var i = 0; i < 3; i++) {\n"
- " if (i == 1) continue;\n"
- " if (i == 2) break;\n"
- "}\n"
- "return 3;\n",
- };
-
- CHECK(CompareTexts(BuildActual(printer, snippets),
- LoadGolden("JumpsRequiringConstantWideOperands.golden")));
-}
-
TEST(UnaryOperators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -1197,20 +1174,6 @@ TEST(RegExpLiterals) {
LoadGolden("RegExpLiterals.golden")));
}
-TEST(RegExpLiteralsWide) {
- InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate());
-
- const char* snippets[] = {
- "var a;" //
- REPEAT_256("\na = 1.23;") //
- "\nreturn /ab+d/;\n",
- };
-
- CHECK(CompareTexts(BuildActual(printer, snippets),
- LoadGolden("RegExpLiteralsWide.golden")));
-}
-
TEST(ArrayLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -1229,20 +1192,6 @@ TEST(ArrayLiterals) {
LoadGolden("ArrayLiterals.golden")));
}
-TEST(ArrayLiteralsWide) {
- InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate());
-
- const char* snippets[] = {
- "var a;" //
- REPEAT_256("\na = 1.23;") //
- "\nreturn [ 1 , 2 ];\n",
- };
-
- CHECK(CompareTexts(BuildActual(printer, snippets),
- LoadGolden("ArrayLiteralsWide.golden")));
-}
-
TEST(ObjectLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -1283,19 +1232,6 @@ TEST(ObjectLiterals) {
LoadGolden("ObjectLiterals.golden")));
}
-TEST(ObjectLiteralsWide) {
- InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
- "var a;" //
- REPEAT_256("\na = 1.23;") //
- "\nreturn { name: 'string', val: 9.2 };\n",
- };
-
- CHECK(CompareTexts(BuildActual(printer, snippets),
- LoadGolden("ObjectLiteralsWide.golden")));
-}
-
TEST(TopLevelObjectLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -2016,41 +1952,6 @@ TEST(LookupSlotInEval) {
CHECK(CompareTexts(actual, LoadGolden("LookupSlotInEval.golden")));
}
-TEST(LookupSlotWideInEval) {
- InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate());
- printer.set_wrap(false);
- printer.set_test_function_name("f");
-
- const char* snippets[] = {
- REPEAT_256(" \"var y = 2.3;\" +\n") //
- " \"return x;\" +\n",
-
- REPEAT_256(" \"var y = 2.3;\" +\n") //
- " \"return typeof x;\" +\n",
-
- REPEAT_256(" \"var y = 2.3;\" +\n") //
- " \"x = 10;\" +\n",
-
- " \"'use strict';\" +\n" //
- REPEAT_256(" \"var y = 2.3;\" +\n") //
- " \"x = 10;\" +\n",
- };
-
- std::string actual = BuildActual(printer, snippets,
- "var f;\n"
- "var x = 1;\n"
- "function f1() {\n"
- " eval(\"function t() {\" +\n",
-
- " \"};\" +\n"
- " \"f = t; f();\"\n);\n"
- "}\n"
- "f1();");
-
- CHECK(CompareTexts(actual, LoadGolden("LookupSlotWideInEval.golden")));
-}
-
TEST(DeleteLookupSlotInEval) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -2332,6 +2233,70 @@ TEST(ClassAndSuperClass) {
LoadGolden("ClassAndSuperClass.golden")));
}
+TEST(ClassFields) {
+ bool old_flag = i::FLAG_harmony_public_fields;
+ i::FLAG_harmony_public_fields = true;
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+
+ const char* snippets[] = {
+ "{\n"
+ " class A {\n"
+ " a;\n"
+ " ['b'];\n"
+ " static c;\n"
+ " static ['d'];\n"
+ " }\n"
+ "\n"
+ " class B {\n"
+ " a = 1;\n"
+ " ['b'] = this.a;\n"
+ " static c = 3;\n"
+ " static ['d'] = this.c;\n"
+ " }\n"
+ " new A;\n"
+ " new B;\n"
+ "}\n",
+
+ "{\n"
+ " class A extends class {} {\n"
+ " a;\n"
+ " ['b'];\n"
+ " static c;\n"
+ " static ['d'];\n"
+ " }\n"
+ "\n"
+ " class B extends class {} {\n"
+ " a = 1;\n"
+ " ['b'] = this.a;\n"
+ " static c = 3;\n"
+ " static ['d'] = this.c;\n"
+ " foo() { return 1; }\n"
+ " constructor() {\n"
+ " super();\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " class C extends B {\n"
+ " a = 1;\n"
+ " ['b'] = this.a;\n"
+ " static c = 3;\n"
+ " static ['d'] = super.foo();\n"
+ " constructor() {\n"
+ " (() => super())();\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " new A;\n"
+ " new B;\n"
+ " new C;\n"
+ "}\n"};
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ClassFields.golden")));
+ i::FLAG_harmony_public_fields = old_flag;
+}
+
TEST(Generators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index 8d1f551ee7..b706b7c480 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -26,7 +26,7 @@ class InvokeIntrinsicHelper {
template <class... A>
Handle<Object> Invoke(A... args) {
CHECK(IntrinsicsHelper::IsSupported(function_id_));
- BytecodeArrayBuilder builder(isolate_, zone_, sizeof...(args), 0, 0);
+ BytecodeArrayBuilder builder(zone_, sizeof...(args), 0, 0);
RegisterList reg_list(builder.Receiver().index(), sizeof...(args));
builder.CallRuntime(function_id_, reg_list).Return();
InterpreterTester tester(isolate_, builder.ToBytecodeArray(isolate_));
@@ -216,14 +216,6 @@ TEST(IntrinsicAsStubCall) {
*has_property_helper.Invoke(
has_property_helper.NewObject("'y'"),
has_property_helper.NewObject("({ x: 20 })")));
-
- InvokeIntrinsicHelper sub_string_helper(isolate, handles.main_zone(),
- Runtime::kInlineSubString);
- CHECK(sub_string_helper
- .Invoke(sub_string_helper.NewObject("'foobar'"),
- sub_string_helper.NewObject("3"),
- sub_string_helper.NewObject("6"))
- ->SameValue(*sub_string_helper.NewObject("'bar'")));
}
TEST(ClassOf) {
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index e1134e85b1..6185925ab4 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -35,7 +35,7 @@ TEST(InterpreterReturn) {
Zone* zone = handles.main_zone();
Handle<Object> undefined_value = isolate->factory()->undefined_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -51,7 +51,7 @@ TEST(InterpreterLoadUndefined) {
Zone* zone = handles.main_zone();
Handle<Object> undefined_value = isolate->factory()->undefined_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadUndefined().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -67,7 +67,7 @@ TEST(InterpreterLoadNull) {
Zone* zone = handles.main_zone();
Handle<Object> null_value = isolate->factory()->null_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadNull().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -83,7 +83,7 @@ TEST(InterpreterLoadTheHole) {
Zone* zone = handles.main_zone();
Handle<Object> the_hole_value = isolate->factory()->the_hole_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadTheHole().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -99,7 +99,7 @@ TEST(InterpreterLoadTrue) {
Zone* zone = handles.main_zone();
Handle<Object> true_value = isolate->factory()->true_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadTrue().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -115,7 +115,7 @@ TEST(InterpreterLoadFalse) {
Zone* zone = handles.main_zone();
Handle<Object> false_value = isolate->factory()->false_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadFalse().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -132,7 +132,7 @@ TEST(InterpreterLoadLiteral) {
// Small Smis.
for (int i = -128; i < 128; i++) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadLiteral(Smi::FromInt(i)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -144,7 +144,7 @@ TEST(InterpreterLoadLiteral) {
// Large Smis.
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadLiteral(Smi::FromInt(0x12345678)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -160,9 +160,9 @@ TEST(InterpreterLoadLiteral) {
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
- builder.LoadLiteral(ast_factory.NewNumber(-2.1e19)).Return();
+ builder.LoadLiteral(-2.1e19).Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -178,7 +178,7 @@ TEST(InterpreterLoadLiteral) {
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
const AstRawString* raw_string = ast_factory.GetOneByteString("String");
builder.LoadLiteral(raw_string).Return();
@@ -199,7 +199,7 @@ TEST(InterpreterLoadStoreRegisters) {
Zone* zone = handles.main_zone();
Handle<Object> true_value = isolate->factory()->true_value();
for (int i = 0; i <= kMaxInt8; i++) {
- BytecodeArrayBuilder builder(isolate, zone, 1, i + 1);
+ BytecodeArrayBuilder builder(zone, 1, i + 1);
Register reg(i);
builder.LoadTrue()
@@ -282,10 +282,10 @@ TEST(InterpreterShiftOpsSmi) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -320,10 +320,10 @@ TEST(InterpreterBinaryOpsSmi) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -359,24 +359,21 @@ TEST(InterpreterBinaryOpsHeapNumber) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
- AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
Register reg(0);
double lhs = lhs_inputs[l];
double rhs = rhs_inputs[r];
- builder.LoadLiteral(ast_factory.NewNumber(lhs))
+ builder.LoadLiteral(lhs)
.StoreAccumulatorInRegister(reg)
- .LoadLiteral(ast_factory.NewNumber(rhs))
+ .LoadLiteral(rhs)
.BinaryOperation(kArithmeticOperators[o], reg, GetIndex(slot))
.Return();
- ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
@@ -390,6 +387,55 @@ TEST(InterpreterBinaryOpsHeapNumber) {
}
}
+namespace {
+
+struct LiteralForTest {
+ enum Type { kString, kHeapNumber, kSmi, kTrue, kFalse, kUndefined, kNull };
+
+ explicit LiteralForTest(const AstRawString* string)
+ : type(kString), string(string) {}
+ explicit LiteralForTest(double number) : type(kHeapNumber), number(number) {}
+ explicit LiteralForTest(int smi) : type(kSmi), smi(smi) {}
+ explicit LiteralForTest(Type type) : type(type) {}
+
+ Type type;
+ union {
+ const AstRawString* string;
+ double number;
+ int smi;
+ };
+};
+
+void LoadLiteralForTest(BytecodeArrayBuilder* builder,
+ const LiteralForTest& value) {
+ switch (value.type) {
+ case LiteralForTest::kString:
+ builder->LoadLiteral(value.string);
+ return;
+ case LiteralForTest::kHeapNumber:
+ builder->LoadLiteral(value.number);
+ return;
+ case LiteralForTest::kSmi:
+ builder->LoadLiteral(Smi::FromInt(value.smi));
+ return;
+ case LiteralForTest::kTrue:
+ builder->LoadTrue();
+ return;
+ case LiteralForTest::kFalse:
+ builder->LoadFalse();
+ return;
+ case LiteralForTest::kUndefined:
+ builder->LoadUndefined();
+ return;
+ case LiteralForTest::kNull:
+ builder->LoadNull();
+ return;
+ }
+ UNREACHABLE();
+}
+
+} // anonymous namespace
+
TEST(InterpreterStringAdd) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -400,53 +446,51 @@ TEST(InterpreterStringAdd) {
struct TestCase {
const AstRawString* lhs;
- const AstValue* rhs;
+ LiteralForTest rhs;
Handle<Object> expected_value;
int32_t expected_feedback;
} test_cases[] = {
{ast_factory.GetOneByteString("a"),
- ast_factory.NewString(ast_factory.GetOneByteString("b")),
+ LiteralForTest(ast_factory.GetOneByteString("b")),
factory->NewStringFromStaticChars("ab"),
BinaryOperationFeedback::kString},
{ast_factory.GetOneByteString("aaaaaa"),
- ast_factory.NewString(ast_factory.GetOneByteString("b")),
+ LiteralForTest(ast_factory.GetOneByteString("b")),
factory->NewStringFromStaticChars("aaaaaab"),
BinaryOperationFeedback::kString},
{ast_factory.GetOneByteString("aaa"),
- ast_factory.NewString(ast_factory.GetOneByteString("bbbbb")),
+ LiteralForTest(ast_factory.GetOneByteString("bbbbb")),
factory->NewStringFromStaticChars("aaabbbbb"),
BinaryOperationFeedback::kString},
{ast_factory.GetOneByteString(""),
- ast_factory.NewString(ast_factory.GetOneByteString("b")),
+ LiteralForTest(ast_factory.GetOneByteString("b")),
factory->NewStringFromStaticChars("b"),
BinaryOperationFeedback::kString},
{ast_factory.GetOneByteString("a"),
- ast_factory.NewString(ast_factory.GetOneByteString("")),
+ LiteralForTest(ast_factory.GetOneByteString("")),
factory->NewStringFromStaticChars("a"),
BinaryOperationFeedback::kString},
- {ast_factory.GetOneByteString("1.11"), ast_factory.NewNumber(2.5),
+ {ast_factory.GetOneByteString("1.11"), LiteralForTest(2.5),
factory->NewStringFromStaticChars("1.112.5"),
BinaryOperationFeedback::kAny},
- {ast_factory.GetOneByteString("-1.11"), ast_factory.NewNumber(2.56),
+ {ast_factory.GetOneByteString("-1.11"), LiteralForTest(2.56),
factory->NewStringFromStaticChars("-1.112.56"),
BinaryOperationFeedback::kAny},
- {ast_factory.GetOneByteString(""), ast_factory.NewNumber(2.5),
+ {ast_factory.GetOneByteString(""), LiteralForTest(2.5),
factory->NewStringFromStaticChars("2.5"), BinaryOperationFeedback::kAny},
};
for (size_t i = 0; i < arraysize(test_cases); i++) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
Register reg(0);
- builder.LoadLiteral(test_cases[i].lhs)
- .StoreAccumulatorInRegister(reg)
- .LoadLiteral(test_cases[i].rhs)
- .BinaryOperation(Token::Value::ADD, reg, GetIndex(slot))
- .Return();
+ builder.LoadLiteral(test_cases[i].lhs).StoreAccumulatorInRegister(reg);
+ LoadLiteralForTest(&builder, test_cases[i].rhs);
+ builder.BinaryOperation(Token::Value::ADD, reg, GetIndex(slot)).Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -466,7 +510,7 @@ TEST(InterpreterParameter1) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadAccumulatorWithRegister(builder.Receiver()).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -491,16 +535,16 @@ TEST(InterpreterParameter8) {
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 8, 0);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot3 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot4 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot5 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot6 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 8, 0, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot2 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot3 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot4 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot5 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot6 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -545,125 +589,114 @@ TEST(InterpreterBinaryOpTypeFeedback) {
struct BinaryOpExpectation {
Token::Value op;
- const AstValue* arg1;
- const AstValue* arg2;
+ LiteralForTest arg1;
+ LiteralForTest arg2;
Handle<Object> result;
int32_t feedback;
};
BinaryOpExpectation const kTestCases[] = {
// ADD
- {Token::Value::ADD, ast_factory.NewSmi(2), ast_factory.NewSmi(3),
+ {Token::Value::ADD, LiteralForTest(2), LiteralForTest(3),
Handle<Smi>(Smi::FromInt(5), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::ADD, ast_factory.NewSmi(Smi::kMaxValue),
- ast_factory.NewSmi(1),
+ {Token::Value::ADD, LiteralForTest(Smi::kMaxValue), LiteralForTest(1),
isolate->factory()->NewHeapNumber(Smi::kMaxValue + 1.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::ADD, ast_factory.NewNumber(3.1415), ast_factory.NewSmi(3),
+ {Token::Value::ADD, LiteralForTest(3.1415), LiteralForTest(3),
isolate->factory()->NewHeapNumber(3.1415 + 3),
BinaryOperationFeedback::kNumber},
- {Token::Value::ADD, ast_factory.NewNumber(3.1415),
- ast_factory.NewNumber(1.4142),
+ {Token::Value::ADD, LiteralForTest(3.1415), LiteralForTest(1.4142),
isolate->factory()->NewHeapNumber(3.1415 + 1.4142),
BinaryOperationFeedback::kNumber},
- {Token::Value::ADD,
- ast_factory.NewString(ast_factory.GetOneByteString("foo")),
- ast_factory.NewString(ast_factory.GetOneByteString("bar")),
+ {Token::Value::ADD, LiteralForTest(ast_factory.GetOneByteString("foo")),
+ LiteralForTest(ast_factory.GetOneByteString("bar")),
isolate->factory()->NewStringFromAsciiChecked("foobar"),
BinaryOperationFeedback::kString},
- {Token::Value::ADD, ast_factory.NewSmi(2),
- ast_factory.NewString(ast_factory.GetOneByteString("2")),
+ {Token::Value::ADD, LiteralForTest(2),
+ LiteralForTest(ast_factory.GetOneByteString("2")),
isolate->factory()->NewStringFromAsciiChecked("22"),
BinaryOperationFeedback::kAny},
// SUB
- {Token::Value::SUB, ast_factory.NewSmi(2), ast_factory.NewSmi(3),
+ {Token::Value::SUB, LiteralForTest(2), LiteralForTest(3),
Handle<Smi>(Smi::FromInt(-1), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::SUB,
- ast_factory.NewSmi(static_cast<uint32_t>(Smi::kMinValue)),
- ast_factory.NewSmi(1),
+ {Token::Value::SUB, LiteralForTest(Smi::kMinValue), LiteralForTest(1),
isolate->factory()->NewHeapNumber(Smi::kMinValue - 1.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::SUB, ast_factory.NewNumber(3.1415), ast_factory.NewSmi(3),
+ {Token::Value::SUB, LiteralForTest(3.1415), LiteralForTest(3),
isolate->factory()->NewHeapNumber(3.1415 - 3),
BinaryOperationFeedback::kNumber},
- {Token::Value::SUB, ast_factory.NewNumber(3.1415),
- ast_factory.NewNumber(1.4142),
+ {Token::Value::SUB, LiteralForTest(3.1415), LiteralForTest(1.4142),
isolate->factory()->NewHeapNumber(3.1415 - 1.4142),
BinaryOperationFeedback::kNumber},
- {Token::Value::SUB, ast_factory.NewSmi(2),
- ast_factory.NewString(ast_factory.GetOneByteString("1")),
+ {Token::Value::SUB, LiteralForTest(2),
+ LiteralForTest(ast_factory.GetOneByteString("1")),
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny},
// MUL
- {Token::Value::MUL, ast_factory.NewSmi(2), ast_factory.NewSmi(3),
+ {Token::Value::MUL, LiteralForTest(2), LiteralForTest(3),
Handle<Smi>(Smi::FromInt(6), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::MUL,
- ast_factory.NewSmi(static_cast<uint32_t>(Smi::kMinValue)),
- ast_factory.NewSmi(2),
+ {Token::Value::MUL, LiteralForTest(Smi::kMinValue), LiteralForTest(2),
isolate->factory()->NewHeapNumber(Smi::kMinValue * 2.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::MUL, ast_factory.NewNumber(3.1415), ast_factory.NewSmi(3),
+ {Token::Value::MUL, LiteralForTest(3.1415), LiteralForTest(3),
isolate->factory()->NewHeapNumber(3 * 3.1415),
BinaryOperationFeedback::kNumber},
- {Token::Value::MUL, ast_factory.NewNumber(3.1415),
- ast_factory.NewNumber(1.4142),
+ {Token::Value::MUL, LiteralForTest(3.1415), LiteralForTest(1.4142),
isolate->factory()->NewHeapNumber(3.1415 * 1.4142),
BinaryOperationFeedback::kNumber},
- {Token::Value::MUL, ast_factory.NewSmi(2),
- ast_factory.NewString(ast_factory.GetOneByteString("1")),
+ {Token::Value::MUL, LiteralForTest(2),
+ LiteralForTest(ast_factory.GetOneByteString("1")),
Handle<Smi>(Smi::FromInt(2), isolate), BinaryOperationFeedback::kAny},
// DIV
- {Token::Value::DIV, ast_factory.NewSmi(6), ast_factory.NewSmi(3),
+ {Token::Value::DIV, LiteralForTest(6), LiteralForTest(3),
Handle<Smi>(Smi::FromInt(2), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::DIV, ast_factory.NewSmi(3), ast_factory.NewSmi(2),
+ {Token::Value::DIV, LiteralForTest(3), LiteralForTest(2),
isolate->factory()->NewHeapNumber(3.0 / 2.0),
BinaryOperationFeedback::kSignedSmallInputs},
- {Token::Value::DIV, ast_factory.NewNumber(3.1415), ast_factory.NewSmi(3),
+ {Token::Value::DIV, LiteralForTest(3.1415), LiteralForTest(3),
isolate->factory()->NewHeapNumber(3.1415 / 3),
BinaryOperationFeedback::kNumber},
- {Token::Value::DIV, ast_factory.NewNumber(3.1415),
- ast_factory.NewNumber(-std::numeric_limits<double>::infinity()),
+ {Token::Value::DIV, LiteralForTest(3.1415),
+ LiteralForTest(-std::numeric_limits<double>::infinity()),
isolate->factory()->NewHeapNumber(-0.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::DIV, ast_factory.NewSmi(2),
- ast_factory.NewString(ast_factory.GetOneByteString("1")),
+ {Token::Value::DIV, LiteralForTest(2),
+ LiteralForTest(ast_factory.GetOneByteString("1")),
Handle<Smi>(Smi::FromInt(2), isolate), BinaryOperationFeedback::kAny},
// MOD
- {Token::Value::MOD, ast_factory.NewSmi(5), ast_factory.NewSmi(3),
+ {Token::Value::MOD, LiteralForTest(5), LiteralForTest(3),
Handle<Smi>(Smi::FromInt(2), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::MOD, ast_factory.NewSmi(static_cast<uint32_t>(-4)),
- ast_factory.NewSmi(2), isolate->factory()->NewHeapNumber(-0.0),
+ {Token::Value::MOD, LiteralForTest(-4), LiteralForTest(2),
+ isolate->factory()->NewHeapNumber(-0.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::MOD, ast_factory.NewNumber(3.1415), ast_factory.NewSmi(3),
+ {Token::Value::MOD, LiteralForTest(3.1415), LiteralForTest(3),
isolate->factory()->NewHeapNumber(fmod(3.1415, 3.0)),
BinaryOperationFeedback::kNumber},
- {Token::Value::MOD, ast_factory.NewNumber(-3.1415),
- ast_factory.NewNumber(-1.4142),
+ {Token::Value::MOD, LiteralForTest(-3.1415), LiteralForTest(-1.4142),
isolate->factory()->NewHeapNumber(fmod(-3.1415, -1.4142)),
BinaryOperationFeedback::kNumber},
- {Token::Value::MOD, ast_factory.NewSmi(3),
- ast_factory.NewString(ast_factory.GetOneByteString("-2")),
+ {Token::Value::MOD, LiteralForTest(3),
+ LiteralForTest(ast_factory.GetOneByteString("-2")),
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
for (const BinaryOpExpectation& test_case : kTestCases) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
-
i::FeedbackVectorSpec feedback_spec(zone);
- i::FeedbackSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ i::FeedbackSlot slot0 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
i::NewFeedbackMetadata(isolate, &feedback_spec);
Register reg(0);
- builder.LoadLiteral(test_case.arg1)
- .StoreAccumulatorInRegister(reg)
- .LoadLiteral(test_case.arg2)
- .BinaryOperation(test_case.op, reg, GetIndex(slot0))
- .Return();
+ LoadLiteralForTest(&builder, test_case.arg1);
+ builder.StoreAccumulatorInRegister(reg);
+ LoadLiteralForTest(&builder, test_case.arg2);
+ builder.BinaryOperation(test_case.op, reg, GetIndex(slot0)).Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -688,7 +721,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
struct BinaryOpExpectation {
Token::Value op;
- const AstValue* arg1;
+ LiteralForTest arg1;
int32_t arg2;
Handle<Object> result;
int32_t feedback;
@@ -696,84 +729,77 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
BinaryOpExpectation const kTestCases[] = {
// ADD
- {Token::Value::ADD, ast_factory.NewSmi(2), 42,
+ {Token::Value::ADD, LiteralForTest(2), 42,
Handle<Smi>(Smi::FromInt(44), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::ADD, ast_factory.NewSmi(2), Smi::kMaxValue,
+ {Token::Value::ADD, LiteralForTest(2), Smi::kMaxValue,
isolate->factory()->NewHeapNumber(Smi::kMaxValue + 2.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::ADD, ast_factory.NewNumber(3.1415), 2,
+ {Token::Value::ADD, LiteralForTest(3.1415), 2,
isolate->factory()->NewHeapNumber(3.1415 + 2.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::ADD,
- ast_factory.NewString(ast_factory.GetOneByteString("2")), 2,
+ {Token::Value::ADD, LiteralForTest(ast_factory.GetOneByteString("2")), 2,
isolate->factory()->NewStringFromAsciiChecked("22"),
BinaryOperationFeedback::kAny},
// SUB
- {Token::Value::SUB, ast_factory.NewSmi(2), 42,
+ {Token::Value::SUB, LiteralForTest(2), 42,
Handle<Smi>(Smi::FromInt(-40), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::SUB,
- ast_factory.NewSmi(static_cast<uint32_t>(Smi::kMinValue)), 1,
+ {Token::Value::SUB, LiteralForTest(Smi::kMinValue), 1,
isolate->factory()->NewHeapNumber(Smi::kMinValue - 1.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::SUB, ast_factory.NewNumber(3.1415), 2,
+ {Token::Value::SUB, LiteralForTest(3.1415), 2,
isolate->factory()->NewHeapNumber(3.1415 - 2.0),
BinaryOperationFeedback::kNumber},
- {Token::Value::SUB,
- ast_factory.NewString(ast_factory.GetOneByteString("2")), 2,
+ {Token::Value::SUB, LiteralForTest(ast_factory.GetOneByteString("2")), 2,
Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kAny},
// BIT_OR
- {Token::Value::BIT_OR, ast_factory.NewSmi(4), 1,
+ {Token::Value::BIT_OR, LiteralForTest(4), 1,
Handle<Smi>(Smi::FromInt(5), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::BIT_OR, ast_factory.NewNumber(3.1415), 8,
+ {Token::Value::BIT_OR, LiteralForTest(3.1415), 8,
Handle<Smi>(Smi::FromInt(11), isolate),
BinaryOperationFeedback::kNumber},
- {Token::Value::BIT_OR,
- ast_factory.NewString(ast_factory.GetOneByteString("2")), 1,
- Handle<Smi>(Smi::FromInt(3), isolate), BinaryOperationFeedback::kAny},
+ {Token::Value::BIT_OR, LiteralForTest(ast_factory.GetOneByteString("2")),
+ 1, Handle<Smi>(Smi::FromInt(3), isolate), BinaryOperationFeedback::kAny},
// BIT_AND
- {Token::Value::BIT_AND, ast_factory.NewSmi(3), 1,
+ {Token::Value::BIT_AND, LiteralForTest(3), 1,
Handle<Smi>(Smi::FromInt(1), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::BIT_AND, ast_factory.NewNumber(3.1415), 2,
+ {Token::Value::BIT_AND, LiteralForTest(3.1415), 2,
Handle<Smi>(Smi::FromInt(2), isolate), BinaryOperationFeedback::kNumber},
- {Token::Value::BIT_AND,
- ast_factory.NewString(ast_factory.GetOneByteString("2")), 1,
- Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kAny},
+ {Token::Value::BIT_AND, LiteralForTest(ast_factory.GetOneByteString("2")),
+ 1, Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kAny},
// SHL
- {Token::Value::SHL, ast_factory.NewSmi(3), 1,
+ {Token::Value::SHL, LiteralForTest(3), 1,
Handle<Smi>(Smi::FromInt(6), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::SHL, ast_factory.NewNumber(3.1415), 2,
+ {Token::Value::SHL, LiteralForTest(3.1415), 2,
Handle<Smi>(Smi::FromInt(12), isolate),
BinaryOperationFeedback::kNumber},
- {Token::Value::SHL,
- ast_factory.NewString(ast_factory.GetOneByteString("2")), 1,
+ {Token::Value::SHL, LiteralForTest(ast_factory.GetOneByteString("2")), 1,
Handle<Smi>(Smi::FromInt(4), isolate), BinaryOperationFeedback::kAny},
// SAR
- {Token::Value::SAR, ast_factory.NewSmi(3), 1,
+ {Token::Value::SAR, LiteralForTest(3), 1,
Handle<Smi>(Smi::FromInt(1), isolate),
BinaryOperationFeedback::kSignedSmall},
- {Token::Value::SAR, ast_factory.NewNumber(3.1415), 2,
+ {Token::Value::SAR, LiteralForTest(3.1415), 2,
Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kNumber},
- {Token::Value::SAR,
- ast_factory.NewString(ast_factory.GetOneByteString("2")), 1,
+ {Token::Value::SAR, LiteralForTest(ast_factory.GetOneByteString("2")), 1,
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
for (const BinaryOpExpectation& test_case : kTestCases) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
-
i::FeedbackVectorSpec feedback_spec(zone);
- i::FeedbackSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ i::FeedbackSlot slot0 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
i::NewFeedbackMetadata(isolate, &feedback_spec);
Register reg(0);
- builder.LoadLiteral(test_case.arg1)
- .StoreAccumulatorInRegister(reg)
+ LoadLiteralForTest(&builder, test_case.arg1);
+ builder.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(test_case.arg2))
.BinaryOperation(test_case.op, reg, GetIndex(slot0))
.Return();
@@ -814,13 +840,13 @@ TEST(InterpreterUnaryOpFeedback) {
{Token::Value::INC, smi_one, smi_max, number, str},
{Token::Value::DEC, smi_one, smi_min, number, str}};
for (TestCase const& test_case : kTestCases) {
- BytecodeArrayBuilder builder(isolate, zone, 4, 0);
-
i::FeedbackVectorSpec feedback_spec(zone);
- i::FeedbackSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
- i::FeedbackSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
- i::FeedbackSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
- i::FeedbackSlot slot3 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 4, 0, &feedback_spec);
+
+ i::FeedbackSlot slot0 = feedback_spec.AddBinaryOpICSlot();
+ i::FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
+ i::FeedbackSlot slot2 = feedback_spec.AddBinaryOpICSlot();
+ i::FeedbackSlot slot3 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
i::NewFeedbackMetadata(isolate, &feedback_spec);
@@ -878,12 +904,12 @@ TEST(InterpreterBitwiseTypeFeedback) {
Token::Value::SHL, Token::Value::SHR, Token::Value::SAR};
for (Token::Value op : kBitwiseBinaryOperators) {
- BytecodeArrayBuilder builder(isolate, zone, 4, 0);
-
i::FeedbackVectorSpec feedback_spec(zone);
- i::FeedbackSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
- i::FeedbackSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
- i::FeedbackSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 4, 0, &feedback_spec);
+
+ i::FeedbackSlot slot0 = feedback_spec.AddBinaryOpICSlot();
+ i::FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
+ i::FeedbackSlot slot2 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
i::NewFeedbackMetadata(isolate, &feedback_spec);
@@ -929,7 +955,7 @@ TEST(InterpreterParameter1Assign) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadLiteral(Smi::FromInt(5))
.StoreAccumulatorInRegister(builder.Receiver())
@@ -1054,7 +1080,7 @@ TEST(InterpreterLoadNamedProperty) {
const AstRawString* name = ast_factory.GetOneByteString("val");
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0, &feedback_spec);
builder.LoadNamedProperty(builder.Receiver(), name, GetIndex(slot)).Return();
ast_factory.Internalize(isolate);
@@ -1106,7 +1132,7 @@ TEST(InterpreterLoadKeyedProperty) {
const AstRawString* key = ast_factory.GetOneByteString("key");
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
builder.LoadLiteral(key)
.LoadKeyedProperty(builder.Receiver(), GetIndex(slot))
@@ -1141,17 +1167,18 @@ TEST(InterpreterStoreNamedProperty) {
isolate->heap()->HashSeed());
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddStoreICSlot(SLOPPY);
+ FeedbackSlot slot = feedback_spec.AddStoreICSlot(LanguageMode::kStrict);
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
const AstRawString* name = ast_factory.GetOneByteString("val");
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0, &feedback_spec);
builder.LoadLiteral(Smi::FromInt(999))
- .StoreNamedProperty(builder.Receiver(), name, GetIndex(slot), STRICT)
+ .StoreNamedProperty(builder.Receiver(), name, GetIndex(slot),
+ LanguageMode::kStrict)
.Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1203,20 +1230,20 @@ TEST(InterpreterStoreKeyedProperty) {
isolate->heap()->HashSeed());
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddKeyedStoreICSlot(SLOPPY);
+ FeedbackSlot slot = feedback_spec.AddKeyedStoreICSlot(LanguageMode::kSloppy);
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
const AstRawString* name = ast_factory.GetOneByteString("val");
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
builder.LoadLiteral(name)
.StoreAccumulatorInRegister(Register(0))
.LoadLiteral(Smi::FromInt(999))
.StoreKeyedProperty(builder.Receiver(), Register(0), GetIndex(slot),
- i::SLOPPY)
+ i::LanguageMode::kSloppy)
.Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1268,7 +1295,7 @@ TEST(InterpreterCall) {
// Check with no args.
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(1);
builder.LoadNamedProperty(builder.Receiver(), name, slot_index)
@@ -1292,7 +1319,7 @@ TEST(InterpreterCall) {
// Check that receiver is passed properly.
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(1);
builder.LoadNamedProperty(builder.Receiver(), name, slot_index)
@@ -1317,7 +1344,7 @@ TEST(InterpreterCall) {
// Check with two parameters (+ receiver).
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 4);
+ BytecodeArrayBuilder builder(zone, 1, 4, &feedback_spec);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(3);
@@ -1350,7 +1377,7 @@ TEST(InterpreterCall) {
// Check with 10 parameters (+ receiver).
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 12);
+ BytecodeArrayBuilder builder(zone, 1, 12, &feedback_spec);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(11);
@@ -1358,25 +1385,25 @@ TEST(InterpreterCall) {
.StoreAccumulatorInRegister(reg)
.LoadAccumulatorWithRegister(builder.Receiver())
.StoreAccumulatorInRegister(args[0])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("a")))
+ .LoadLiteral(ast_factory.GetOneByteString("a"))
.StoreAccumulatorInRegister(args[1])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("b")))
+ .LoadLiteral(ast_factory.GetOneByteString("b"))
.StoreAccumulatorInRegister(args[2])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("c")))
+ .LoadLiteral(ast_factory.GetOneByteString("c"))
.StoreAccumulatorInRegister(args[3])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("d")))
+ .LoadLiteral(ast_factory.GetOneByteString("d"))
.StoreAccumulatorInRegister(args[4])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("e")))
+ .LoadLiteral(ast_factory.GetOneByteString("e"))
.StoreAccumulatorInRegister(args[5])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("f")))
+ .LoadLiteral(ast_factory.GetOneByteString("f"))
.StoreAccumulatorInRegister(args[6])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("g")))
+ .LoadLiteral(ast_factory.GetOneByteString("g"))
.StoreAccumulatorInRegister(args[7])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("h")))
+ .LoadLiteral(ast_factory.GetOneByteString("h"))
.StoreAccumulatorInRegister(args[8])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("i")))
+ .LoadLiteral(ast_factory.GetOneByteString("i"))
.StoreAccumulatorInRegister(args[9])
- .LoadLiteral(ast_factory.NewString(ast_factory.GetOneByteString("j")))
+ .LoadLiteral(ast_factory.GetOneByteString("j"))
.StoreAccumulatorInRegister(args[10]);
builder.CallProperty(reg, args, call_slot_index);
@@ -1427,12 +1454,12 @@ TEST(InterpreterJumps) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 2);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 2, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot2 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -1464,14 +1491,14 @@ TEST(InterpreterConditionalJumps) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 2);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot3 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot4 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 2, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot2 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot3 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot4 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -1514,14 +1541,14 @@ TEST(InterpreterConditionalJumps2) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 2);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot3 = feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot4 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 2, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot2 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot3 = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot4 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -1565,10 +1592,10 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 257);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 257, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -1579,7 +1606,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
builder.StoreAccumulatorInRegister(reg);
// Consume all 8-bit operands
for (int i = 1; i <= 256; i++) {
- builder.LoadLiteral(ast_factory.NewNumber(i + 0.5));
+ builder.LoadLiteral(i + 0.5);
builder.BinaryOperation(Token::Value::ADD, reg, GetIndex(slot));
builder.StoreAccumulatorInRegister(reg);
}
@@ -1626,7 +1653,7 @@ TEST(InterpreterJumpWith32BitOperand) {
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
+ BytecodeArrayBuilder builder(zone, 1, 1);
Register reg(0);
BytecodeLabel done;
@@ -1635,7 +1662,7 @@ TEST(InterpreterJumpWith32BitOperand) {
// Consume all 16-bit constant pool entries. Make sure to use doubles so that
// the jump can't re-use an integer.
for (int i = 1; i <= 65536; i++) {
- builder.LoadLiteral(ast_factory.NewNumber(i + 0.5));
+ builder.LoadLiteral(i + 0.5);
}
builder.Jump(&done);
builder.LoadLiteral(Smi::kZero);
@@ -1716,10 +1743,10 @@ TEST(InterpreterSmiComparisons) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterCompareICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddCompareICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -1764,17 +1791,17 @@ TEST(InterpreterHeapNumberComparisons) {
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterCompareICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddCompareICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
Register r0(0);
- builder.LoadLiteral(ast_factory.NewNumber(inputs[i]))
+ builder.LoadLiteral(inputs[i])
.StoreAccumulatorInRegister(r0)
- .LoadLiteral(ast_factory.NewNumber(inputs[j]))
+ .LoadLiteral(inputs[j])
.CompareOperation(comparison, r0, GetIndex(slot))
.Return();
@@ -1814,11 +1841,11 @@ TEST(InterpreterStringComparisons) {
const char* rhs = inputs[j].c_str();
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot slot = feedback_spec.AddInterpreterCompareICSlot();
+ FeedbackSlot slot = feedback_spec.AddCompareICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
Register r0(0);
builder.LoadLiteral(ast_factory.GetOneByteString(lhs))
.StoreAccumulatorInRegister(r0)
@@ -1853,9 +1880,9 @@ static void LoadStringAndAddSpace(BytecodeArrayBuilder* builder,
Register string_reg = builder->register_allocator()->NewRegister();
(*builder)
- .LoadLiteral(ast_factory->NewString(ast_factory->GetOneByteString(cstr)))
+ .LoadLiteral(ast_factory->GetOneByteString(cstr))
.StoreAccumulatorInRegister(string_reg)
- .LoadLiteral(ast_factory->NewString(ast_factory->GetOneByteString(" ")))
+ .LoadLiteral(ast_factory->GetOneByteString(" "))
.BinaryOperation(Token::Value::ADD, string_reg,
GetIndex(string_add_slot));
}
@@ -1893,12 +1920,11 @@ TEST(InterpreterMixedComparisons) {
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
-
FeedbackVectorSpec feedback_spec(zone);
- FeedbackSlot string_add_slot =
- feedback_spec.AddInterpreterBinaryOpICSlot();
- FeedbackSlot slot = feedback_spec.AddInterpreterCompareICSlot();
+ BytecodeArrayBuilder builder(zone, 1, 0, &feedback_spec);
+
+ FeedbackSlot string_add_slot = feedback_spec.AddBinaryOpICSlot();
+ FeedbackSlot slot = feedback_spec.AddCompareICSlot();
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
@@ -1908,13 +1934,11 @@ TEST(InterpreterMixedComparisons) {
if (which_side == kRhsIsString) {
// Comparison with HeapNumber on the lhs and String on the rhs.
- builder.LoadLiteral(ast_factory.NewNumber(lhs))
- .StoreAccumulatorInRegister(lhs_reg);
+ builder.LoadLiteral(lhs).StoreAccumulatorInRegister(lhs_reg);
if (string_type == kInternalizedStringConstant) {
// rhs string is internalized.
- builder.LoadLiteral(ast_factory.NewString(
- ast_factory.GetOneByteString(rhs_cstr)));
+ builder.LoadLiteral(ast_factory.GetOneByteString(rhs_cstr));
} else {
CHECK_EQ(string_type, kComputedString);
// rhs string is not internalized (append a space to the end).
@@ -1928,8 +1952,7 @@ TEST(InterpreterMixedComparisons) {
if (string_type == kInternalizedStringConstant) {
// lhs string is internalized
- builder.LoadLiteral(ast_factory.NewString(
- ast_factory.GetOneByteString(lhs_cstr)));
+ builder.LoadLiteral(ast_factory.GetOneByteString(lhs_cstr));
} else {
CHECK_EQ(string_type, kComputedString);
// lhs string is not internalized (append a space to the end).
@@ -1938,7 +1961,7 @@ TEST(InterpreterMixedComparisons) {
}
builder.StoreAccumulatorInRegister(lhs_reg);
- builder.LoadLiteral(ast_factory.NewNumber(rhs));
+ builder.LoadLiteral(rhs);
}
builder.CompareOperation(comparison, lhs_reg, GetIndex(slot))
@@ -2073,7 +2096,7 @@ TEST(InterpreterCompareTypeOf) {
LiteralFlag literal_flag = kLiterals[l];
if (literal_flag == LiteralFlag::kOther) continue;
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
builder.LoadAccumulatorWithRegister(builder.Receiver())
.CompareTypeOf(kLiterals[l])
.Return();
@@ -2095,27 +2118,32 @@ TEST(InterpreterInstanceOf) {
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
Handle<i::String> name = factory->NewStringFromAsciiChecked("cons");
- Handle<i::JSFunction> func = factory->NewFunction(name);
+ Handle<i::JSFunction> func = factory->NewFunctionForTest(name);
Handle<i::JSObject> instance = factory->NewJSObject(func);
Handle<i::Object> other = factory->NewNumber(3.3333);
Handle<i::Object> cases[] = {Handle<i::Object>::cast(instance), other};
for (size_t i = 0; i < arraysize(cases); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
+ FeedbackVectorSpec feedback_spec(zone);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
Register r0(0);
size_t case_entry = builder.AllocateDeferredConstantPoolEntry();
builder.SetDeferredConstantPoolEntry(case_entry, cases[i]);
builder.LoadConstantPoolEntry(case_entry).StoreAccumulatorInRegister(r0);
+ FeedbackSlot slot = feedback_spec.AddInstanceOfSlot();
+ Handle<i::FeedbackMetadata> metadata =
+ NewFeedbackMetadata(isolate, &feedback_spec);
+
size_t func_entry = builder.AllocateDeferredConstantPoolEntry();
builder.SetDeferredConstantPoolEntry(func_entry, func);
builder.LoadConstantPoolEntry(func_entry)
- .CompareOperation(Token::Value::INSTANCEOF, r0)
+ .CompareOperation(Token::Value::INSTANCEOF, r0, GetIndex(slot))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(isolate, bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, metadata);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -2137,7 +2165,7 @@ TEST(InterpreterTestIn) {
const char* properties[] = {"length", "fuzzle", "x", "0"};
for (size_t i = 0; i < arraysize(properties); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(isolate, zone, 1, 1);
+ BytecodeArrayBuilder builder(zone, 1, 1);
Register r0(0);
builder.LoadLiteral(ast_factory.GetOneByteString(properties[i]))
@@ -2165,7 +2193,7 @@ TEST(InterpreterUnaryNot) {
Zone* zone = handles.main_zone();
for (size_t i = 1; i < 10; i++) {
bool expected_value = ((i & 1) == 1);
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
Register r0(0);
builder.LoadFalse();
@@ -2189,26 +2217,24 @@ TEST(InterpreterUnaryNotNonBoolean) {
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- std::pair<const AstValue*, bool> object_type_tuples[] = {
- std::make_pair(ast_factory.NewUndefined(), true),
- std::make_pair(ast_factory.NewNull(), true),
- std::make_pair(ast_factory.NewBoolean(false), true),
- std::make_pair(ast_factory.NewBoolean(true), false),
- std::make_pair(ast_factory.NewNumber(9.1), false),
- std::make_pair(ast_factory.NewNumber(0), true),
- std::make_pair(
- ast_factory.NewString(ast_factory.GetOneByteString("hello")), false),
- std::make_pair(ast_factory.NewString(ast_factory.GetOneByteString("")),
- true),
+ std::pair<LiteralForTest, bool> object_type_tuples[] = {
+ std::make_pair(LiteralForTest(LiteralForTest::kUndefined), true),
+ std::make_pair(LiteralForTest(LiteralForTest::kNull), true),
+ std::make_pair(LiteralForTest(LiteralForTest::kFalse), true),
+ std::make_pair(LiteralForTest(LiteralForTest::kTrue), false),
+ std::make_pair(LiteralForTest(9.1), false),
+ std::make_pair(LiteralForTest(0), true),
+ std::make_pair(LiteralForTest(ast_factory.GetOneByteString("hello")),
+ false),
+ std::make_pair(LiteralForTest(ast_factory.GetOneByteString("")), true),
};
for (size_t i = 0; i < arraysize(object_type_tuples); i++) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 0);
+ BytecodeArrayBuilder builder(zone, 1, 0);
Register r0(0);
- builder.LoadLiteral(object_type_tuples[i].first);
- builder.LogicalNot(ToBooleanMode::kConvertToBoolean);
- builder.Return();
+ LoadLiteralForTest(&builder, object_type_tuples[i].first);
+ builder.LogicalNot(ToBooleanMode::kConvertToBoolean).Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array);
@@ -2251,7 +2277,7 @@ TEST(InterpreterCallRuntime) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 2);
+ BytecodeArrayBuilder builder(zone, 1, 2);
RegisterList args = builder.register_allocator()->NewRegisterList(2);
builder.LoadLiteral(Smi::FromInt(15))
@@ -2274,7 +2300,7 @@ TEST(InterpreterInvokeIntrinsic) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 2);
+ BytecodeArrayBuilder builder(zone, 1, 2);
builder.LoadLiteral(Smi::FromInt(15))
.StoreAccumulatorInRegister(Register(0))
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index 1d0ead6246..dd3f30d621 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -42,7 +42,7 @@ TEST(TestTraceObject) {
uint8_t category_enabled_flag = 41;
trace_object.Initialize('X', &category_enabled_flag, "Test.Trace",
"Test.Scope", 42, 123, 0, nullptr, nullptr, nullptr,
- nullptr, 0, 1729, 4104);
+ nullptr, 0);
CHECK_EQ('X', trace_object.phase());
CHECK_EQ(category_enabled_flag, *trace_object.category_enabled_flag());
CHECK_EQ(std::string("Test.Trace"), std::string(trace_object.name()));
@@ -96,7 +96,7 @@ TEST(TestTraceBufferRingBuffer) {
CHECK_NOT_NULL(trace_object);
trace_object->Initialize('X', &category_enabled_flag, names[i].c_str(),
"Test.Scope", 42, 123, 0, nullptr, nullptr,
- nullptr, nullptr, 0, 1729, 4104);
+ nullptr, nullptr, 0);
trace_object = ring_buffer->GetEventByHandle(handles[i]);
CHECK_NOT_NULL(trace_object);
CHECK_EQ('X', trace_object->phase());
@@ -130,35 +130,40 @@ TEST(TestTraceBufferRingBuffer) {
TEST(TestJSONTraceWriter) {
std::ostringstream stream;
- v8::Platform* old_platform = i::V8::GetCurrentPlatform();
- v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
- i::V8::SetPlatformForTesting(default_platform);
// Create a scope for the tracing controller to terminate the trace writer.
{
- TracingController tracing_controller;
- static_cast<v8::platform::DefaultPlatform*>(default_platform)
- ->SetTracingController(&tracing_controller);
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ std::unique_ptr<v8::Platform> default_platform(
+ v8::platform::NewDefaultPlatform());
+ i::V8::SetPlatformForTesting(default_platform.get());
+ auto tracing =
+ base::make_unique<v8::platform::tracing::TracingController>();
+ v8::platform::tracing::TracingController* tracing_controller =
+ tracing.get();
+ static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
+ ->SetTracingController(std::move(tracing));
TraceWriter* writer = TraceWriter::CreateJSONTraceWriter(stream);
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
- tracing_controller.Initialize(ring_buffer);
+ tracing_controller->Initialize(ring_buffer);
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8-cat");
- tracing_controller.StartTracing(trace_config);
+ tracing_controller->StartTracing(trace_config);
TraceObject trace_object;
trace_object.InitializeForTesting(
- 'X', tracing_controller.GetCategoryGroupEnabled("v8-cat"), "Test0",
+ 'X', tracing_controller->GetCategoryGroupEnabled("v8-cat"), "Test0",
v8::internal::tracing::kGlobalScope, 42, 123, 0, nullptr, nullptr,
nullptr, nullptr, TRACE_EVENT_FLAG_HAS_ID, 11, 22, 100, 50, 33, 44);
writer->AppendTraceEvent(&trace_object);
trace_object.InitializeForTesting(
- 'Y', tracing_controller.GetCategoryGroupEnabled("v8-cat"), "Test1",
+ 'Y', tracing_controller->GetCategoryGroupEnabled("v8-cat"), "Test1",
v8::internal::tracing::kGlobalScope, 43, 456, 0, nullptr, nullptr,
nullptr, nullptr, 0, 55, 66, 110, 55, 77, 88);
writer->AppendTraceEvent(&trace_object);
- tracing_controller.StopTracing();
+ tracing_controller->StopTracing();
+ i::V8::SetPlatformForTesting(old_platform);
}
std::string trace_str = stream.str();
@@ -170,32 +175,32 @@ TEST(TestJSONTraceWriter) {
"\"Test1\",\"dur\":77,\"tdur\":88,\"args\":{}}]}";
CHECK_EQ(expected_trace_str, trace_str);
-
- i::V8::SetPlatformForTesting(old_platform);
}
TEST(TestTracingController) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
- v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
- i::V8::SetPlatformForTesting(default_platform);
+ std::unique_ptr<v8::Platform> default_platform(
+ v8::platform::NewDefaultPlatform());
+ i::V8::SetPlatformForTesting(default_platform.get());
- TracingController tracing_controller;
- static_cast<v8::platform::DefaultPlatform*>(default_platform)
- ->SetTracingController(&tracing_controller);
+ auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
+ v8::platform::tracing::TracingController* tracing_controller = tracing.get();
+ static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
+ ->SetTracingController(std::move(tracing));
MockTraceWriter* writer = new MockTraceWriter();
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
- tracing_controller.Initialize(ring_buffer);
+ tracing_controller->Initialize(ring_buffer);
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8");
- tracing_controller.StartTracing(trace_config);
+ tracing_controller->StartTracing(trace_config);
TRACE_EVENT0("v8", "v8.Test");
// cat category is not included in default config
TRACE_EVENT0("cat", "v8.Test2");
TRACE_EVENT0("v8", "v8.Test3");
- tracing_controller.StopTracing();
+ tracing_controller->StopTracing();
CHECK_EQ(2u, writer->events().size());
CHECK_EQ(std::string("v8.Test"), writer->events()[0]);
@@ -220,10 +225,6 @@ void GetJSONStrings(std::vector<std::string>& ret, std::string str,
TEST(TestTracingControllerMultipleArgsAndCopy) {
std::ostringstream stream;
- v8::Platform* old_platform = i::V8::GetCurrentPlatform();
- v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
- i::V8::SetPlatformForTesting(default_platform);
-
uint64_t aa = 11;
unsigned int bb = 22;
uint16_t cc = 33;
@@ -246,17 +247,25 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
// Create a scope for the tracing controller to terminate the trace writer.
{
- TracingController tracing_controller;
- static_cast<v8::platform::DefaultPlatform*>(default_platform)
- ->SetTracingController(&tracing_controller);
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ std::unique_ptr<v8::Platform> default_platform(
+ v8::platform::NewDefaultPlatform());
+ i::V8::SetPlatformForTesting(default_platform.get());
+
+ auto tracing =
+ base::make_unique<v8::platform::tracing::TracingController>();
+ v8::platform::tracing::TracingController* tracing_controller =
+ tracing.get();
+ static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
+ ->SetTracingController(std::move(tracing));
TraceWriter* writer = TraceWriter::CreateJSONTraceWriter(stream);
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
- tracing_controller.Initialize(ring_buffer);
+ tracing_controller->Initialize(ring_buffer);
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8");
- tracing_controller.StartTracing(trace_config);
+ tracing_controller->StartTracing(trace_config);
TRACE_EVENT1("v8", "v8.Test.aa", "aa", aa);
TRACE_EVENT1("v8", "v8.Test.bb", "bb", bb);
@@ -296,7 +305,9 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
std::move(trace_event_arg), "a2",
new ConvertableToTraceFormatMock(123));
- tracing_controller.StopTracing();
+ tracing_controller->StopTracing();
+
+ i::V8::SetPlatformForTesting(old_platform);
}
std::string trace_str = stream.str();
@@ -337,8 +348,6 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
CHECK_EQ(all_args[21], "\"mm1\":\"INIT\",\"mm2\":\"\\\"INIT\\\"\"");
CHECK_EQ(all_args[22], "\"a1\":[42,42]");
CHECK_EQ(all_args[23], "\"a1\":[42,42],\"a2\":[123,123]");
-
- i::V8::SetPlatformForTesting(old_platform);
}
namespace {
@@ -356,58 +365,60 @@ class TraceStateObserverImpl : public TracingController::TraceStateObserver {
TEST(TracingObservers) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
- v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
- i::V8::SetPlatformForTesting(default_platform);
-
- v8::platform::tracing::TracingController tracing_controller;
- static_cast<v8::platform::DefaultPlatform*>(default_platform)
- ->SetTracingController(&tracing_controller);
+ std::unique_ptr<v8::Platform> default_platform(
+ v8::platform::NewDefaultPlatform());
+ i::V8::SetPlatformForTesting(default_platform.get());
+
+ auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
+ v8::platform::tracing::TracingController* tracing_controller = tracing.get();
+ static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
+ ->SetTracingController(std::move(tracing));
MockTraceWriter* writer = new MockTraceWriter();
v8::platform::tracing::TraceBuffer* ring_buffer =
v8::platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(1,
writer);
- tracing_controller.Initialize(ring_buffer);
+ tracing_controller->Initialize(ring_buffer);
v8::platform::tracing::TraceConfig* trace_config =
new v8::platform::tracing::TraceConfig();
trace_config->AddIncludedCategory("v8");
TraceStateObserverImpl observer;
- tracing_controller.AddTraceStateObserver(&observer);
+ tracing_controller->AddTraceStateObserver(&observer);
CHECK_EQ(0, observer.enabled_count);
CHECK_EQ(0, observer.disabled_count);
- tracing_controller.StartTracing(trace_config);
+ tracing_controller->StartTracing(trace_config);
CHECK_EQ(1, observer.enabled_count);
CHECK_EQ(0, observer.disabled_count);
TraceStateObserverImpl observer2;
- tracing_controller.AddTraceStateObserver(&observer2);
+ tracing_controller->AddTraceStateObserver(&observer2);
CHECK_EQ(1, observer2.enabled_count);
CHECK_EQ(0, observer2.disabled_count);
- tracing_controller.RemoveTraceStateObserver(&observer2);
+ tracing_controller->RemoveTraceStateObserver(&observer2);
CHECK_EQ(1, observer2.enabled_count);
CHECK_EQ(0, observer2.disabled_count);
- tracing_controller.StopTracing();
+ tracing_controller->StopTracing();
CHECK_EQ(1, observer.enabled_count);
CHECK_EQ(1, observer.disabled_count);
CHECK_EQ(1, observer2.enabled_count);
CHECK_EQ(0, observer2.disabled_count);
- tracing_controller.RemoveTraceStateObserver(&observer);
+ tracing_controller->RemoveTraceStateObserver(&observer);
CHECK_EQ(1, observer.enabled_count);
CHECK_EQ(1, observer.disabled_count);
trace_config = new v8::platform::tracing::TraceConfig();
- tracing_controller.StartTracing(trace_config);
- tracing_controller.StopTracing();
+ tracing_controller->StartTracing(trace_config);
+ tracing_controller->StopTracing();
CHECK_EQ(1, observer.enabled_count);
CHECK_EQ(1, observer.disabled_count);
diff --git a/deps/v8/test/cctest/log-eq-of-logging-and-traversal.js b/deps/v8/test/cctest/log-eq-of-logging-and-traversal.js
index 522a3726ea..cc3c8184d5 100644
--- a/deps/v8/test/cctest/log-eq-of-logging-and-traversal.js
+++ b/deps/v8/test/cctest/log-eq-of-logging-and-traversal.js
@@ -39,7 +39,8 @@ function parseState(s) {
function LogProcessor() {
LogReader.call(this, {
'code-creation': {
- parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
+ parsers: [null, parseInt, parseInt, parseInt, parseInt,
+ null, 'var-args'],
processor: this.processCodeCreation },
'code-move': { parsers: [parseInt, parseInt],
processor: this.processCodeMove },
@@ -55,8 +56,10 @@ function LogProcessor() {
LogProcessor.prototype.__proto__ = LogReader.prototype;
LogProcessor.prototype.processCodeCreation = function(
- type, kind, start, size, name, maybe_func) {
- if (type != "LazyCompile" && type != "Script" && type != "Function") return;
+ type, kind, timestamp, start, size, name, maybe_func) {
+ if (type != "LazyCompile" && type != "Script" && type != "Function") {
+ return;
+ }
// Scripts will compile into anonymous functions starting at 1:1. Adjust the
// name here so that it matches corrsponding function's name during the heap
// traversal.
@@ -66,9 +69,9 @@ LogProcessor.prototype.processCodeCreation = function(
if (maybe_func.length) {
var funcAddr = parseInt(maybe_func[0]);
var state = parseState(maybe_func[1]);
- this.profile.addFuncCode(type, name, start, size, funcAddr, state);
+ this.profile.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
} else {
- this.profile.addCode(type, name, start, size);
+ this.profile.addCode(type, name, timestamp, start, size);
}
};
@@ -100,6 +103,7 @@ function RunTest() {
logging_processor.profile.codeMap_.getAllDynamicEntriesWithAddresses();
if (logging_entries.length === 0)
return "logging_entries.length === 0";
+
var traversal_processor = new LogProcessor();
for ( ; pos < log_lines_length; ++pos) {
line = log_lines[pos];
@@ -170,6 +174,7 @@ function RunTest() {
return [equal, comparison];
}
+
var result = RunTest();
if (typeof result !== "string") {
var out = [];
@@ -182,6 +187,13 @@ if (typeof result !== "string") {
(c[2] ? c[2] : "---") + " " +
(c[3] ? c[3] : "---"));
}
+ out.push("================================================")
+ out.push("MAKE SURE TO USE A CLEAN ISOLATiE!");
+ out.push("Use tools/test.py");
+ out.push("================================================")
+ out.push("* Lines are the same");
+ out.push("--- Line is missing"
+ out.push("================================================")
}
result[0] ? true : out.join("\n");
} else {
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index 3db7fb99d6..74630c6c7e 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
#include "src/ast/ast.h"
#include "src/compiler.h"
#include "src/objects-inl.h"
@@ -37,7 +38,6 @@ TEST(PreParserScopeAnalysis) {
i::FLAG_aggressive_lazy_inner_functions = true;
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
- i::HandleScope scope(isolate);
LocalContext env;
struct {
@@ -45,77 +45,49 @@ TEST(PreParserScopeAnalysis) {
bool strict_outer;
bool strict_test_function;
bool arrow;
- std::vector<unsigned> location; // "Directions" to the relevant scope.
} outers[] = {
// Normal case (test function at the laziness boundary):
- {"(function outer() { function test(%s) { %s \n"
- "function skippable() { } } })();",
- false,
- false,
- false,
- {0, 0}},
-
- {"(function outer() { let test2 = function test(%s) { %s \n"
- "function skippable() { } } })();",
- false,
- false,
- false,
- {0, 0}},
+ {"function test(%s) { %s function skippable() { } } test;", false, false,
+ false},
+
+ {"var test2 = function test(%s) { %s function skippable() { } }; test2",
+ false, false, false},
// Arrow functions (they can never be at the laziness boundary):
- {"(function outer() { function inner() { (%s) => { %s } \n"
- "function skippable() { } } })();",
- false,
- false,
- true,
- {0, 0}},
-
- // Repeat the above mentioned cases w/ outer function declaring itself
- // strict:
- {"(function outer() { 'use strict'; function test(%s) { %s \n"
- "function skippable() { } } })();",
- true,
- false,
- false,
- {0, 0}},
-
- {"(function outer() { 'use strict'; function inner() { "
- "(%s) => { %s } \nfunction skippable() { } } })();",
- true,
- false,
- true,
- {0, 0}},
+ {"function test() { (%s) => { %s }; function skippable() { } } test;",
+ false, false, true},
+
+ // Repeat the above mentioned cases with global 'use strict'
+ {"'use strict'; function test(%s) { %s function skippable() { } } test;",
+ true, false, false},
+
+ {"'use strict'; var test2 = function test(%s) { %s \n"
+ "function skippable() { } }; test2",
+ true, false, false},
+
+ {"'use strict'; function test() { (%s) => { %s };\n"
+ "function skippable() { } } test;",
+ true, false, true},
// ... and with the test function declaring itself strict:
- {"(function outer() { function test(%s) { 'use strict'; %s \n"
- "function skippable() { } } })();",
- false,
- true,
- false,
- {0, 0}},
-
- {"(function outer() { function inner() { "
- "(%s) => { 'use strict'; %s } \nfunction skippable() { } } })();",
- false,
- true,
- true,
- {0, 0}},
+ {"function test(%s) { 'use strict'; %s function skippable() { } } test;",
+ false, true, false},
+
+ {"var test2 = function test(%s) { 'use strict'; %s \n"
+ "function skippable() { } }; test2",
+ false, true, false},
+
+ {"function test() { 'use strict'; (%s) => { %s };\n"
+ "function skippable() { } } test;",
+ false, true, true},
// Methods containing skippable functions.
- {"class MyClass { constructor(%s) { %s \n"
- "function skippable() { } } }",
- true,
- true,
- false,
- {0, 0}},
-
- {"class MyClass { test(%s) { %s \n"
- "function skippable() { } } }",
- true,
- true,
- false,
- // The default constructor is scope 0 inside the class.
- {0, 1}},
+ {"function get_method() {\n"
+ " class MyClass { test_method(%s) { %s function skippable() { } } }\n"
+ " var o = new MyClass(); return o.test_method;\n"
+ "}\n"
+ "get_method();",
+ true, true, false},
// FIXME(marja): Generators and async functions
};
@@ -135,11 +107,16 @@ TEST(PreParserScopeAnalysis) {
Inner(const char* p, const char* s, SkipTests skip, Bailout bailout)
: params(p), source(s), skip(skip), bailout(bailout) {}
+ Inner(const char* s, std::function<void()> p, std::function<void()> e)
+ : source(s), prologue(p), epilogue(e) {}
+
const char* params = "";
const char* source;
SkipTests skip = DONT_SKIP;
PreciseMaybeAssigned precise_maybe_assigned = PreciseMaybeAssigned::YES;
Bailout bailout = Bailout::NO;
+ std::function<void()> prologue = nullptr;
+ std::function<void()> epilogue = nullptr;
} inners[] = {
// Simple cases
{"var1;"},
@@ -175,20 +152,20 @@ TEST(PreParserScopeAnalysis) {
// Functions.
{"function f1() { let var2; }"},
- {"var var1 = function f1() { let var2; }"},
- {"let var1 = function f1() { let var2; }"},
- {"const var1 = function f1() { let var2; }"},
- {"var var1 = function() { let var2; }"},
- {"let var1 = function() { let var2; }"},
- {"const var1 = function() { let var2; }"},
+ {"var var1 = function f1() { let var2; };"},
+ {"let var1 = function f1() { let var2; };"},
+ {"const var1 = function f1() { let var2; };"},
+ {"var var1 = function() { let var2; };"},
+ {"let var1 = function() { let var2; };"},
+ {"const var1 = function() { let var2; };"},
{"function *f1() { let var2; }"},
- {"let var1 = function *f1() { let var2; }"},
- {"let var1 = function*() { let var2; }"},
+ {"let var1 = function *f1() { let var2; };"},
+ {"let var1 = function*() { let var2; };"},
{"async function f1() { let var2; }"},
- {"let var1 = async function f1() { let var2; }"},
- {"let var1 = async function() { let var2; }"},
+ {"let var1 = async function f1() { let var2; };"},
+ {"let var1 = async function() { let var2; };"},
// Redeclarations.
{"var var1; var var1;"},
@@ -216,15 +193,15 @@ TEST(PreParserScopeAnalysis) {
{"arguments = 5;", SKIP_STRICT},
{"if (true) { arguments; }"},
{"if (true) { arguments = 5; }", SKIP_STRICT},
- {"() => { arguments; }"},
+ {"() => { arguments; };"},
{"var1, var2, var3", "arguments;"},
{"var1, var2, var3", "arguments = 5;", SKIP_STRICT},
- {"var1, var2, var3", "() => { arguments; }"},
- {"var1, var2, var3", "() => { arguments = 5; }", SKIP_STRICT},
+ {"var1, var2, var3", "() => { arguments; };"},
+ {"var1, var2, var3", "() => { arguments = 5; };", SKIP_STRICT},
{"this;"},
{"if (true) { this; }"},
- {"() => { this; }"},
+ {"() => { this; };"},
// Variable called "arguments"
{"var arguments;", SKIP_STRICT},
@@ -532,21 +509,21 @@ TEST(PreParserScopeAnalysis) {
{"{name1: var1}", "name1 = 16;", SKIP_STRICT_FUNCTION},
{"{var1}", "var1 = 16;", SKIP_STRICT_FUNCTION},
- {"[var1]", "() => { var1; }", SKIP_STRICT_FUNCTION},
- {"{name1: var1}", "() => { var1; }", SKIP_STRICT_FUNCTION},
- {"{name1: var1}", "() => { name1; }", SKIP_STRICT_FUNCTION},
- {"{var1}", "() => { var1; }", SKIP_STRICT_FUNCTION},
+ {"[var1]", "() => { var1; };", SKIP_STRICT_FUNCTION},
+ {"{name1: var1}", "() => { var1; };", SKIP_STRICT_FUNCTION},
+ {"{name1: var1}", "() => { name1; };", SKIP_STRICT_FUNCTION},
+ {"{var1}", "() => { var1; };", SKIP_STRICT_FUNCTION},
{"[var1, var2, var3]", "", SKIP_STRICT_FUNCTION},
{"{name1: var1, name2: var2, name3: var3}", "", SKIP_STRICT_FUNCTION},
{"{var1, var2, var3}", "", SKIP_STRICT_FUNCTION},
- {"[var1, var2, var3]", "() => { var2 = 16;}", SKIP_STRICT_FUNCTION},
- {"{name1: var1, name2: var2, name3: var3}", "() => { var2 = 16;}",
+ {"[var1, var2, var3]", "() => { var2 = 16;};", SKIP_STRICT_FUNCTION},
+ {"{name1: var1, name2: var2, name3: var3}", "() => { var2 = 16;};",
SKIP_STRICT_FUNCTION},
- {"{name1: var1, name2: var2, name3: var3}", "() => { name2 = 16;}",
+ {"{name1: var1, name2: var2, name3: var3}", "() => { name2 = 16;};",
SKIP_STRICT_FUNCTION},
- {"{var1, var2, var3}", "() => { var2 = 16;}", SKIP_STRICT_FUNCTION},
+ {"{var1, var2, var3}", "() => { var2 = 16;};", SKIP_STRICT_FUNCTION},
// Nesting destructuring.
{"[var1, [var2, var3], {var4, name5: [var5, var6]}]", "",
@@ -562,9 +539,9 @@ TEST(PreParserScopeAnalysis) {
// Destructuring rest. Because we can.
{"var1, ...[var2]", "", SKIP_STRICT_FUNCTION},
- {"var1, ...[var2]", "() => { var2; }", SKIP_STRICT_FUNCTION},
+ {"var1, ...[var2]", "() => { var2; };", SKIP_STRICT_FUNCTION},
{"var1, ...{0: var2}", "", SKIP_STRICT_FUNCTION},
- {"var1, ...{0: var2}", "() => { var2; }", SKIP_STRICT_FUNCTION},
+ {"var1, ...{0: var2}", "() => { var2; };", SKIP_STRICT_FUNCTION},
{"var1, ...[]", "", SKIP_STRICT_FUNCTION},
{"var1, ...{}", "", SKIP_STRICT_FUNCTION},
{"var1, ...[var2, var3]", "", SKIP_STRICT_FUNCTION},
@@ -583,16 +560,16 @@ TEST(PreParserScopeAnalysis) {
PreciseMaybeAssigned::NO},
// Locals shadowing parameters.
- {"var1, var2", "var var1 = 16; () => { var1 = 17; }"},
+ {"var1, var2", "var var1 = 16; () => { var1 = 17; };"},
// Locals shadowing destructuring parameters and the rest parameter.
- {"[var1, var2]", "var var1 = 16; () => { var1 = 17; }",
+ {"[var1, var2]", "var var1 = 16; () => { var1 = 17; };",
SKIP_STRICT_FUNCTION},
- {"{var1, var2}", "var var1 = 16; () => { var1 = 17; }",
+ {"{var1, var2}", "var var1 = 16; () => { var1 = 17; };",
SKIP_STRICT_FUNCTION},
- {"var1, var2, ...var3", "var var3 = 16; () => { var3 = 17; }",
+ {"var1, var2, ...var3", "var var3 = 16; () => { var3 = 17; };",
SKIP_STRICT_FUNCTION},
- {"var1, var2 = var1", "var var1 = 16; () => { var1 = 17; }",
+ {"var1, var2 = var1", "var var1 = 16; () => { var1 = 17; };",
SKIP_STRICT_FUNCTION, PreciseMaybeAssigned::NO},
// Hoisted sloppy block function shadowing a parameter.
@@ -632,13 +609,13 @@ TEST(PreParserScopeAnalysis) {
// Classes
{"class MyClass {}"},
- {"var1 = class MyClass {}"},
- {"var var1 = class MyClass {}"},
- {"let var1 = class MyClass {}"},
- {"const var1 = class MyClass {}"},
- {"var var1 = class {}"},
- {"let var1 = class {}"},
- {"const var1 = class {}"},
+ {"var1 = class MyClass {};"},
+ {"var var1 = class MyClass {};"},
+ {"let var1 = class MyClass {};"},
+ {"const var1 = class MyClass {};"},
+ {"var var1 = class {};"},
+ {"let var1 = class {};"},
+ {"const var1 = class {};"},
{"class MyClass { constructor() {} }"},
{"class MyClass { constructor() { var var1; } }"},
@@ -678,6 +655,16 @@ TEST(PreParserScopeAnalysis) {
{"class MyClass extends MyBase { static m() { var var1 = 11; } }"},
{"class MyClass extends MyBase { static m() { var var1; function foo() { "
"var1 = 11; } } }"},
+
+ {"class X { ['bar'] = 1; }; new X;",
+ [] { i::FLAG_harmony_public_fields = true; },
+ [] { i::FLAG_harmony_public_fields = false; }},
+ {"class X { static ['foo'] = 2; }; new X;",
+ [] { i::FLAG_harmony_public_fields = true; },
+ [] { i::FLAG_harmony_public_fields = false; }},
+ {"class X { ['bar'] = 1; static ['foo'] = 2; }; new X;",
+ [] { i::FLAG_harmony_public_fields = true; },
+ [] { i::FLAG_harmony_public_fields = false; }},
};
for (unsigned outer_ix = 0; outer_ix < arraysize(outers); ++outer_ix) {
@@ -701,84 +688,77 @@ TEST(PreParserScopeAnalysis) {
int source_len = Utf8LengthHelper(inners[inner_ix].source);
int len = code_len + params_len + source_len;
+ if (inners[inner_ix].prologue != nullptr) {
+ inners[inner_ix].prologue();
+ }
+
i::ScopedVector<char> program(len + 1);
i::SNPrintF(program, code, inners[inner_ix].params,
inners[inner_ix].source);
+ i::HandleScope scope(isolate);
+
i::Handle<i::String> source =
factory->InternalizeUtf8String(program.start());
source->PrintOn(stdout);
printf("\n");
- // First compile with the lazy inner function and extract the scope data.
- i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo lazy_info(script);
-
- // No need to run scope analysis; preparser scope data is produced when
- // parsing.
- CHECK(i::parsing::ParseProgram(&lazy_info, isolate));
-
- // Retrieve the scope data we produced.
- i::Scope* scope_with_data = i::ScopeTestHelper::FindScope(
- lazy_info.literal()->scope(), outers[outer_ix].location);
- i::ProducedPreParsedScopeData* produced_data =
- scope_with_data->AsDeclarationScope()
- ->produced_preparsed_scope_data();
- i::MaybeHandle<i::PreParsedScopeData> maybe_produced_data_on_heap =
- produced_data->Serialize(isolate);
+ // Compile and run the script to get a pointer to the lazy function.
+ v8::Local<v8::Value> v = CompileRun(program.start());
+ i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
+ i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
+ i::Handle<i::SharedFunctionInfo> shared = i::handle(f->shared());
+
if (inners[inner_ix].bailout == Bailout::BAILOUT_IF_OUTER_SLOPPY &&
!outers[outer_ix].strict_outer) {
- DCHECK(maybe_produced_data_on_heap.is_null());
+ CHECK(!shared->HasPreParsedScopeData());
continue;
}
- DCHECK(!maybe_produced_data_on_heap.is_null());
- i::Handle<i::PreParsedScopeData> produced_data_on_heap =
- maybe_produced_data_on_heap.ToHandleChecked();
-
- // Then parse eagerly and check against the scope data.
- script = factory->NewScript(source);
-
- i::ParseInfo eager_normal(script);
- eager_normal.set_allow_lazy_parsing(false);
-
- CHECK(i::parsing::ParseProgram(&eager_normal, isolate));
- CHECK(i::Compiler::Analyze(&eager_normal));
-
- // Compare the allocation of the variables in two cases: 1) normal scope
- // allocation 2) allocation based on the preparse data.
-
- i::Scope* normal_scope = i::ScopeTestHelper::FindScope(
- eager_normal.literal()->scope(), outers[outer_ix].location);
- CHECK_NULL(normal_scope->sibling());
- CHECK(normal_scope->is_function_scope());
-
- i::ParseInfo eager_using_scope_data(script);
- eager_using_scope_data.set_allow_lazy_parsing(false);
-
- CHECK(i::parsing::ParseProgram(&eager_using_scope_data, isolate));
- // Don't run scope analysis (that would obviously decide the correct
- // allocation for the variables).
-
- i::Scope* unallocated_scope = i::ScopeTestHelper::FindScope(
- eager_using_scope_data.literal()->scope(), outers[outer_ix].location);
- CHECK_NULL(unallocated_scope->sibling());
- CHECK(unallocated_scope->is_function_scope());
-
- // Mark all inner functions as "skipped", so that we don't try to restore
- // data for them. No test should contain eager functions, because we
- // cannot properly decide whether we have or don't have data for them.
- i::ScopeTestHelper::MarkInnerFunctionsAsSkipped(unallocated_scope);
- i::ConsumedPreParsedScopeData* consumed_preparsed_scope_data =
- lazy_info.consumed_preparsed_scope_data();
- consumed_preparsed_scope_data->SetData(produced_data_on_heap);
- consumed_preparsed_scope_data->SkipFunctionDataForTesting();
- consumed_preparsed_scope_data->RestoreScopeAllocationData(
- unallocated_scope->AsDeclarationScope());
- i::ScopeTestHelper::AllocateWithoutVariableResolution(unallocated_scope);
+ CHECK(shared->HasPreParsedScopeData());
+ i::Handle<i::PreParsedScopeData> produced_data_on_heap(
+ i::PreParsedScopeData::cast(shared->preparsed_scope_data()));
+
+ // Parse the lazy function using the scope data.
+ i::ParseInfo using_scope_data(shared);
+ using_scope_data.set_lazy_compile();
+ using_scope_data.consumed_preparsed_scope_data()->SetData(
+ produced_data_on_heap);
+ CHECK(i::parsing::ParseFunction(&using_scope_data, shared, isolate));
+
+ // Verify that we skipped at least one function inside that scope.
+ i::DeclarationScope* scope_with_skipped_functions =
+ using_scope_data.literal()->scope();
+ CHECK(i::ScopeTestHelper::HasSkippedFunctionInside(
+ scope_with_skipped_functions));
+
+ // Do scope allocation (based on the preparsed scope data).
+ i::DeclarationScope::Analyze(&using_scope_data);
+
+ // Parse the lazy function again eagerly to produce baseline data.
+ i::ParseInfo not_using_scope_data(shared);
+ not_using_scope_data.set_lazy_compile();
+ CHECK(i::parsing::ParseFunction(&not_using_scope_data, shared, isolate));
+
+ // Verify that we didn't skip anything (there's no preparsed scope data,
+ // so we cannot skip).
+ i::DeclarationScope* scope_without_skipped_functions =
+ not_using_scope_data.literal()->scope();
+ CHECK(!i::ScopeTestHelper::HasSkippedFunctionInside(
+ scope_without_skipped_functions));
+
+ // Do normal scope allocation.
+ i::DeclarationScope::Analyze(&not_using_scope_data);
+
+ // Verify that scope allocation gave the same results when parsing w/ the
+ // scope data (and skipping functions), and when parsing without.
i::ScopeTestHelper::CompareScopes(
- normal_scope, unallocated_scope,
+ scope_without_skipped_functions, scope_with_skipped_functions,
inners[inner_ix].precise_maybe_assigned == PreciseMaybeAssigned::YES);
+
+ if (inners[inner_ix].epilogue != nullptr) {
+ inners[inner_ix].epilogue();
+ }
}
}
}
@@ -819,6 +799,20 @@ TEST(ProducingAndConsumingByteData) {
bytes.WriteUint8(0);
bytes.OverwriteFirstUint32(2017);
bytes.WriteUint8(100);
+ // Write quarter bytes between uint8s and uint32s to verify they're stored
+ // correctly.
+ bytes.WriteQuarter(3);
+ bytes.WriteQuarter(0);
+ bytes.WriteQuarter(2);
+ bytes.WriteQuarter(1);
+ bytes.WriteQuarter(0);
+ bytes.WriteUint8(50);
+ bytes.WriteQuarter(0);
+ bytes.WriteQuarter(1);
+ bytes.WriteQuarter(2);
+ bytes.WriteUint32(50);
+ // End with a lonely quarter.
+ bytes.WriteQuarter(2);
i::Handle<i::PodArray<uint8_t>> data_on_heap = bytes.Serialize(isolate);
i::ConsumedPreParsedScopeData::ByteData bytes_for_reading;
@@ -833,4 +827,15 @@ TEST(ProducingAndConsumingByteData) {
CHECK_EQ(bytes_for_reading.ReadUint32(), 0);
CHECK_EQ(bytes_for_reading.ReadUint8(), 0);
CHECK_EQ(bytes_for_reading.ReadUint8(), 100);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 3);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 50);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 50);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
}
diff --git a/deps/v8/test/cctest/print-extension.cc b/deps/v8/test/cctest/print-extension.cc
index dcac79752f..226d37ac31 100644
--- a/deps/v8/test/cctest/print-extension.cc
+++ b/deps/v8/test/cctest/print-extension.cc
@@ -41,7 +41,7 @@ void PrintExtension::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i != 0) printf(" ");
v8::HandleScope scope(args.GetIsolate());
v8::String::Utf8Value str(args.GetIsolate(), args[i]);
- if (*str == NULL) return;
+ if (*str == nullptr) return;
printf("%s", *str);
}
printf("\n");
diff --git a/deps/v8/test/cctest/profiler-extension.cc b/deps/v8/test/cctest/profiler-extension.cc
index df5cec79ce..aa75a481f5 100644
--- a/deps/v8/test/cctest/profiler-extension.cc
+++ b/deps/v8/test/cctest/profiler-extension.cc
@@ -74,7 +74,7 @@ void ProfilerExtension::StopProfiling(
void ProfilerExtension::CollectSample(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- profiler_->CollectSample();
+ v8::CpuProfiler::CollectSample(args.GetIsolate());
}
} // namespace internal
diff --git a/deps/v8/test/cctest/scope-test-helper.h b/deps/v8/test/cctest/scope-test-helper.h
index 7101ad63ab..b5b6df41fe 100644
--- a/deps/v8/test/cctest/scope-test-helper.h
+++ b/deps/v8/test/cctest/scope-test-helper.h
@@ -17,10 +17,6 @@ class ScopeTestHelper {
return var->scope()->MustAllocateInContext(var);
}
- static void AllocateWithoutVariableResolution(Scope* scope) {
- scope->AllocateVariablesRecursively();
- }
-
static void CompareScopes(Scope* baseline, Scope* scope,
bool precise_maybe_assigned) {
CHECK_EQ(baseline->scope_type(), scope->scope_type());
@@ -109,6 +105,20 @@ class ScopeTestHelper {
MarkInnerFunctionsAsSkipped(inner);
}
}
+
+ static bool HasSkippedFunctionInside(Scope* scope) {
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE &&
+ scope->AsDeclarationScope()->is_skipped_function()) {
+ return true;
+ }
+ for (Scope* inner = scope->inner_scope(); inner != nullptr;
+ inner = inner->sibling()) {
+ if (HasSkippedFunctionInside(inner)) {
+ return true;
+ }
+ }
+ return false;
+ }
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 070a1a0817..07da7a55a0 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -334,7 +334,7 @@ THREADED_TEST(DirectCall) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
- obj->SetAccessor(v8_str("xxx"), CheckAccessorArgsCorrect, NULL,
+ obj->SetAccessor(v8_str("xxx"), CheckAccessorArgsCorrect, nullptr,
v8_str("data"));
v8::Local<v8::Object> inst =
obj->NewInstance(context.local()).ToLocalChecked();
@@ -363,7 +363,7 @@ THREADED_TEST(EmptyResult) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
- obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8_str("data"));
+ obj->SetAccessor(v8_str("xxx"), EmptyGetter, nullptr, v8_str("data"));
v8::Local<v8::Object> inst =
obj->NewInstance(context.local()).ToLocalChecked();
CHECK(
@@ -384,7 +384,7 @@ THREADED_TEST(NoReuseRegress) {
v8::HandleScope scope(isolate);
{
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
- obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8_str("data"));
+ obj->SetAccessor(v8_str("xxx"), EmptyGetter, nullptr, v8_str("data"));
LocalContext context;
v8::Local<v8::Object> inst =
obj->NewInstance(context.local()).ToLocalChecked();
@@ -400,7 +400,7 @@ THREADED_TEST(NoReuseRegress) {
}
{
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
- obj->SetAccessor(v8_str("xxx"), CheckAccessorArgsCorrect, NULL,
+ obj->SetAccessor(v8_str("xxx"), CheckAccessorArgsCorrect, nullptr,
v8_str("data"));
LocalContext context;
v8::Local<v8::Object> inst =
@@ -596,7 +596,7 @@ THREADED_TEST(JSONStringifyNamedInterceptorObject) {
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
- JSONStringifyGetter, NULL, NULL, NULL, JSONStringifyEnumerator));
+ JSONStringifyGetter, nullptr, nullptr, nullptr, JSONStringifyEnumerator));
CHECK(env->Global()
->Set(env.local(), v8_str("obj"),
obj->NewInstance(env.local()).ToLocalChecked())
diff --git a/deps/v8/test/cctest/test-allocation.cc b/deps/v8/test/cctest/test-allocation.cc
index f31b03670a..b1a3bef421 100644
--- a/deps/v8/test/cctest/test-allocation.cc
+++ b/deps/v8/test/cctest/test-allocation.cc
@@ -17,7 +17,7 @@ using v8::Task;
#include "src/allocation.h"
#include "src/zone/accounting-allocator.h"
-// ASAN isn't configured to return NULL, so skip all of these tests.
+// ASAN isn't configured to return nullptr, so skip all of these tests.
#if !defined(V8_USE_ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
!defined(THREAD_SANITIZER)
@@ -54,7 +54,7 @@ size_t GetHugeMemoryAmount() {
static size_t huge_memory = 0;
if (!huge_memory) {
for (int i = 0; i < 100; i++) {
- huge_memory |= bit_cast<size_t>(v8::internal::GetRandomMmapAddr());
+ huge_memory |= bit_cast<size_t>(v8::base::OS::GetRandomMmapAddr());
}
// Make it larger than the available address space.
huge_memory *= 2;
@@ -122,7 +122,7 @@ TEST(AlignedAllocOOM) {
// On failure, this won't return, since an AlignedAlloc failure is fatal.
// In that case, behavior is checked in OnAlignedAllocOOM before exit.
void* result = v8::internal::AlignedAlloc(GetHugeMemoryAmount(),
- v8::base::OS::AllocateAlignment());
+ v8::base::OS::AllocatePageSize());
// On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
@@ -143,7 +143,7 @@ TEST(AlignedAllocVirtualMemoryOOM) {
CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result;
bool success = v8::internal::AlignedAllocVirtualMemory(
- GetHugeMemoryAmount(), v8::base::OS::AllocateAlignment(), nullptr,
+ GetHugeMemoryAmount(), v8::base::OS::AllocatePageSize(), nullptr,
&result);
// On a few systems, allocation somehow succeeds.
CHECK_IMPLIES(success, result.IsReserved());
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index 85fb3bd93c..fad8c0b79b 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -245,7 +245,7 @@ TEST(RedeclareAccessor) {
v8::Local<v8::ObjectTemplate> object_template = templ->InstanceTemplate();
object_template->SetAccessor(
- v8_str("foo"), NULL, Setter, v8::Local<v8::Value>(),
+ v8_str("foo"), nullptr, Setter, v8::Local<v8::Value>(),
v8::AccessControl::DEFAULT, v8::PropertyAttribute::DontDelete);
v8::Local<v8::Context> ctx =
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index e511f57db2..63f3bc42fb 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -2284,7 +2284,7 @@ THREADED_TEST(PrePropertyHandler) {
desc->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
PrePropertyHandlerGet, 0, PrePropertyHandlerQuery));
is_bootstrapping = true;
- LocalContext env(NULL, desc->InstanceTemplate());
+ LocalContext env(nullptr, desc->InstanceTemplate());
is_bootstrapping = false;
CompileRun("var pre = 'Object: pre'; var on = 'Object: on';");
v8::Local<Value> result_pre = CompileRun("pre");
@@ -3252,10 +3252,10 @@ THREADED_TEST(Deleter) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
- obj->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX, NULL,
- NULL, PDeleter, NULL));
+ obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ NoBlockGetterX, nullptr, nullptr, PDeleter, nullptr));
obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- NoBlockGetterI, NULL, NULL, IDeleter, NULL));
+ NoBlockGetterI, nullptr, nullptr, IDeleter, nullptr));
LocalContext context;
context->Global()
->Set(context.local(), v8_str("k"),
@@ -3361,10 +3361,10 @@ THREADED_TEST(Enumerators) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
- obj->SetHandler(
- v8::NamedPropertyHandlerConfiguration(GetK, NULL, NULL, NULL, NamedEnum));
+ obj->SetHandler(v8::NamedPropertyHandlerConfiguration(GetK, nullptr, nullptr,
+ nullptr, NamedEnum));
obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- IndexedGetK, NULL, NULL, NULL, IndexedEnum));
+ IndexedGetK, nullptr, nullptr, nullptr, IndexedEnum));
LocalContext context;
context->Global()
->Set(context.local(), v8_str("k"),
@@ -4453,9 +4453,9 @@ THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
obj_template->Set(v8_str("7"), v8::Integer::New(CcTest::isolate(), 7));
obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
obj_template->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- NULL, NULL, NULL, NULL, IndexedPropertyEnumerator));
+ nullptr, nullptr, nullptr, nullptr, IndexedPropertyEnumerator));
obj_template->SetHandler(v8::NamedPropertyHandlerConfiguration(
- NULL, NULL, NULL, NULL, NamedPropertyEnumerator));
+ nullptr, nullptr, nullptr, nullptr, NamedPropertyEnumerator));
LocalContext context;
v8::Local<v8::Object> global = context->Global();
@@ -4520,7 +4520,7 @@ THREADED_TEST(GetOwnPropertyNamesWithIndexedInterceptorExceptions_regress4026) {
obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
// First just try a failing indexed interceptor.
obj_template->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- NULL, NULL, NULL, NULL, IndexedPropertyEnumeratorException));
+ nullptr, nullptr, nullptr, nullptr, IndexedPropertyEnumeratorException));
LocalContext context;
v8::Local<v8::Object> global = context->Global();
@@ -4566,7 +4566,7 @@ THREADED_TEST(GetOwnPropertyNamesWithNamedInterceptorExceptions_regress4026) {
obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
// First just try a failing indexed interceptor.
obj_template->SetHandler(v8::NamedPropertyHandlerConfiguration(
- NULL, NULL, NULL, NULL, NamedPropertyEnumeratorException));
+ nullptr, nullptr, nullptr, nullptr, NamedPropertyEnumeratorException));
LocalContext context;
v8::Local<v8::Object> global = context->Global();
@@ -5068,7 +5068,7 @@ THREADED_TEST(EnumeratorsAndUnenumerableNamedProperties) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
- ConcatNamedPropertyGetter, NULL, RestrictiveNamedQuery, NULL,
+ ConcatNamedPropertyGetter, nullptr, RestrictiveNamedQuery, nullptr,
EnumCallbackWithNames));
LocalContext context;
context->Global()
@@ -5119,7 +5119,7 @@ THREADED_TEST(EnumeratorsAndUnenumerableNamedPropertiesWithoutSet) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
- ConcatNamedPropertyGetter, NULL, QueryInterceptorForFoo, NULL,
+ ConcatNamedPropertyGetter, nullptr, QueryInterceptorForFoo, nullptr,
EnumCallbackWithNames));
LocalContext context;
context->Global()
@@ -5141,7 +5141,7 @@ THREADED_TEST(EnumeratorsAndUnenumerableIndexedPropertiesArgumentsElements) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- ConcatIndexedPropertyGetter, NULL, RestrictiveIndexedQuery, NULL,
+ ConcatIndexedPropertyGetter, nullptr, RestrictiveIndexedQuery, nullptr,
SloppyArgsIndexedPropertyEnumerator));
LocalContext context;
context->Global()
@@ -5176,7 +5176,7 @@ THREADED_TEST(EnumeratorsAndUnenumerableIndexedProperties) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- ConcatIndexedPropertyGetter, NULL, RestrictiveIndexedQuery, NULL,
+ ConcatIndexedPropertyGetter, nullptr, RestrictiveIndexedQuery, nullptr,
EnumCallbackWithIndices));
LocalContext context;
context->Global()
@@ -5208,7 +5208,8 @@ THREADED_TEST(EnumeratorsAndForIn) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
- ConcatNamedPropertyGetter, NULL, RestrictiveNamedQuery, NULL, NamedEnum));
+ ConcatNamedPropertyGetter, nullptr, RestrictiveNamedQuery, nullptr,
+ NamedEnum));
LocalContext context;
context->Global()
->Set(context.local(), v8_str("obj"),
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 99ab0bfaa1..a12a00da35 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -72,6 +72,7 @@ using ::v8::Local;
using ::v8::Maybe;
using ::v8::Message;
using ::v8::MessageCallback;
+using ::v8::Module;
using ::v8::Name;
using ::v8::None;
using ::v8::Object;
@@ -434,7 +435,7 @@ THREADED_TEST(Script) {
class TestResource: public String::ExternalStringResource {
public:
- explicit TestResource(uint16_t* data, int* counter = NULL,
+ explicit TestResource(uint16_t* data, int* counter = nullptr,
bool owning_data = true)
: data_(data), length_(0), counter_(counter), owning_data_(owning_data) {
while (data[length_]) ++length_;
@@ -442,7 +443,7 @@ class TestResource: public String::ExternalStringResource {
~TestResource() {
if (owning_data_) i::DeleteArray(data_);
- if (counter_ != NULL) ++*counter_;
+ if (counter_ != nullptr) ++*counter_;
}
const uint16_t* data() const {
@@ -463,7 +464,7 @@ class TestResource: public String::ExternalStringResource {
class TestOneByteResource : public String::ExternalOneByteStringResource {
public:
- explicit TestOneByteResource(const char* data, int* counter = NULL,
+ explicit TestOneByteResource(const char* data, int* counter = nullptr,
size_t offset = 0)
: orig_data_(data),
data_(data + offset),
@@ -472,7 +473,7 @@ class TestOneByteResource : public String::ExternalOneByteStringResource {
~TestOneByteResource() {
i::DeleteArray(orig_data_);
- if (counter_ != NULL) ++*counter_;
+ if (counter_ != nullptr) ++*counter_;
}
const char* data() const {
@@ -677,12 +678,12 @@ TEST(MakingExternalUnalignedOneByteString) {
// Turn into external string with unaligned resource data.
const char* c_cons = "_abcdefghijklmnopqrstuvwxyz";
- bool success =
- cons->MakeExternal(new TestOneByteResource(i::StrDup(c_cons), NULL, 1));
+ bool success = cons->MakeExternal(
+ new TestOneByteResource(i::StrDup(c_cons), nullptr, 1));
CHECK(success);
const char* c_slice = "_bcdefghijklmnopqrstuvwxyz";
- success =
- slice->MakeExternal(new TestOneByteResource(i::StrDup(c_slice), NULL, 1));
+ success = slice->MakeExternal(
+ new TestOneByteResource(i::StrDup(c_slice), nullptr, 1));
CHECK(success);
// Trigger GCs and force evacuation.
@@ -1523,7 +1524,7 @@ THREADED_TEST(BigInteger) {
// The code will not be run in that case, due to the "if" guard.
int32_t value =
static_cast<int32_t>(static_cast<uint32_t>(i::Smi::kMaxValue) + 1);
- CHECK(value > i::Smi::kMaxValue);
+ CHECK_GT(value, i::Smi::kMaxValue);
CHECK(!i::Smi::IsValid(value));
Local<v8::Integer> value_obj = v8::Integer::New(isolate, value);
@@ -2599,7 +2600,7 @@ static void CallFunctionRecursivelyCall(
->Get(context, v8_str("callFunctionRecursively"))
.ToLocalChecked();
args.GetReturnValue().Set(function.As<Function>()
- ->Call(context, args.This(), 0, NULL)
+ ->Call(context, args.This(), 0, nullptr)
.ToLocalChecked());
}
@@ -2612,7 +2613,7 @@ THREADED_TEST(DeepCrossLanguageRecursion) {
v8::FunctionTemplate::New(isolate, CallScriptRecursivelyCall));
global->Set(v8_str("callFunctionRecursively"),
v8::FunctionTemplate::New(isolate, CallFunctionRecursivelyCall));
- LocalContext env(NULL, global);
+ LocalContext env(nullptr, global);
CHECK(env->Global()
->Set(env.local(), v8_str("depth"), v8::Integer::New(isolate, 0))
@@ -2717,7 +2718,7 @@ THREADED_TEST(GlobalObjectInternalFields) {
v8::HandleScope scope(isolate);
Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(1);
- LocalContext env(NULL, global_template);
+ LocalContext env(nullptr, global_template);
v8::Local<v8::Object> global_proxy = env->Global();
v8::Local<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
CHECK_EQ(1, global->InternalFieldCount());
@@ -2758,7 +2759,7 @@ THREADED_TEST(InternalFieldsAlignedPointers) {
.ToLocalChecked();
CHECK_EQ(1, obj->InternalFieldCount());
- CheckAlignedPointerInInternalField(obj, NULL);
+ CheckAlignedPointerInInternalField(obj, nullptr);
int* heap_allocated = new int[100];
CheckAlignedPointerInInternalField(obj, heap_allocated);
@@ -2828,7 +2829,7 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- CheckAlignedPointerInEmbedderData(&env, 0, NULL);
+ CheckAlignedPointerInEmbedderData(&env, 0, nullptr);
int* heap_allocated = new int[100];
CheckAlignedPointerInEmbedderData(&env, 1, heap_allocated);
@@ -2939,7 +2940,7 @@ void GlobalProxyIdentityHash(bool set_in_js) {
CHECK_EQ(hash1, hash2);
{
// Re-attach global proxy to a new context, hash should stay the same.
- LocalContext env2(NULL, Local<ObjectTemplate>(), global_proxy);
+ LocalContext env2(nullptr, Local<ObjectTemplate>(), global_proxy);
int hash3 = global_proxy->GetIdentityHash();
CHECK_EQ(hash1, hash3);
}
@@ -3527,7 +3528,7 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
- CHECK(data != NULL);
+ CHECK_NOT_NULL(data);
CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
v8::Local<v8::Value> result = CompileRun("ab.byteLength");
@@ -3781,8 +3782,8 @@ THREADED_TEST(ArrayBuffer_AllocationInformation) {
ScopedArrayBufferContents contents(ab->Externalize());
// Array buffers should have normal allocation mode.
- CHECK(contents.AllocationMode() ==
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
+ CHECK_EQ(contents.AllocationMode(),
+ v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
// The allocation must contain the buffer (normally they will be equal, but
// this is not required by the contract).
CHECK_NOT_NULL(contents.AllocationBase());
@@ -3830,7 +3831,7 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
- CHECK(data != NULL);
+ CHECK_NOT_NULL(data);
CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
v8::Local<v8::Value> result = CompileRun("ab.byteLength");
@@ -4867,86 +4868,6 @@ TEST(MessageHandler5) {
}
-TEST(NativeWeakMap) {
- v8::Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- Local<v8::NativeWeakMap> weak_map(v8::NativeWeakMap::New(isolate));
- CHECK(!weak_map.IsEmpty());
-
- LocalContext env;
- Local<Object> value = v8::Object::New(isolate);
-
- Local<Object> local1 = v8::Object::New(isolate);
- CHECK(!weak_map->Has(local1));
- CHECK(weak_map->Get(local1)->IsUndefined());
- weak_map->Set(local1, value);
- CHECK(weak_map->Has(local1));
- CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
-
- WeakCallCounter counter(1234);
- WeakCallCounterAndPersistent<Value> o1(&counter);
- WeakCallCounterAndPersistent<Value> o2(&counter);
- WeakCallCounterAndPersistent<Value> s1(&counter);
- {
- HandleScope scope(isolate);
- Local<v8::Object> obj1 = v8::Object::New(isolate);
- Local<v8::Object> obj2 = v8::Object::New(isolate);
- Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
-
- weak_map->Set(obj1, value);
- weak_map->Set(obj2, value);
- weak_map->Set(sym1, value);
-
- o1.handle.Reset(isolate, obj1);
- o2.handle.Reset(isolate, obj2);
- s1.handle.Reset(isolate, sym1);
-
- CHECK(weak_map->Has(local1));
- CHECK(weak_map->Has(obj1));
- CHECK(weak_map->Has(obj2));
- CHECK(weak_map->Has(sym1));
-
- CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
- CHECK(value->Equals(env.local(), weak_map->Get(obj1)).FromJust());
- CHECK(value->Equals(env.local(), weak_map->Get(obj2)).FromJust());
- CHECK(value->Equals(env.local(), weak_map->Get(sym1)).FromJust());
- }
- CcTest::CollectAllGarbage();
- {
- HandleScope scope(isolate);
- CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
- CHECK(value->Equals(env.local(),
- weak_map->Get(Local<Value>::New(isolate, o1.handle)))
- .FromJust());
- CHECK(value->Equals(env.local(),
- weak_map->Get(Local<Value>::New(isolate, o2.handle)))
- .FromJust());
- CHECK(value->Equals(env.local(),
- weak_map->Get(Local<Value>::New(isolate, s1.handle)))
- .FromJust());
- }
-
- o1.handle.SetWeak(&o1, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
- o2.handle.SetWeak(&o2, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
- s1.handle.SetWeak(&s1, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
-
- CcTest::CollectAllGarbage();
- CHECK_EQ(3, counter.NumberOfWeakCalls());
-
- CHECK(o1.handle.IsEmpty());
- CHECK(o2.handle.IsEmpty());
- CHECK(s1.handle.IsEmpty());
-
- CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
- CHECK(weak_map->Delete(local1));
- CHECK(!weak_map->Has(local1));
- CHECK(weak_map->Get(local1)->IsUndefined());
-}
-
-
THREADED_TEST(GetSetProperty) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -5255,7 +5176,7 @@ THREADED_TEST(FunctionCall) {
->Get(context.local(), v8_str("ReturnThisStrict"))
.ToLocalChecked());
- v8::Local<Value>* args0 = NULL;
+ v8::Local<Value>* args0 = nullptr;
Local<v8::Array> a0 = Local<v8::Array>::Cast(
Foo->Call(context.local(), Foo, 0, args0).ToLocalChecked());
CHECK_EQ(0u, a0->Length());
@@ -5322,47 +5243,49 @@ THREADED_TEST(FunctionCall) {
.FromJust());
Local<v8::Value> r1 =
- ReturnThisSloppy->Call(context.local(), v8::Undefined(isolate), 0, NULL)
+ ReturnThisSloppy
+ ->Call(context.local(), v8::Undefined(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(r1->StrictEquals(context->Global()));
Local<v8::Value> r2 =
- ReturnThisSloppy->Call(context.local(), v8::Null(isolate), 0, NULL)
+ ReturnThisSloppy->Call(context.local(), v8::Null(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(r2->StrictEquals(context->Global()));
Local<v8::Value> r3 =
- ReturnThisSloppy->Call(context.local(), v8_num(42), 0, NULL)
+ ReturnThisSloppy->Call(context.local(), v8_num(42), 0, nullptr)
.ToLocalChecked();
CHECK(r3->IsNumberObject());
CHECK_EQ(42.0, r3.As<v8::NumberObject>()->ValueOf());
Local<v8::Value> r4 =
- ReturnThisSloppy->Call(context.local(), v8_str("hello"), 0, NULL)
+ ReturnThisSloppy->Call(context.local(), v8_str("hello"), 0, nullptr)
.ToLocalChecked();
CHECK(r4->IsStringObject());
CHECK(r4.As<v8::StringObject>()->ValueOf()->StrictEquals(v8_str("hello")));
Local<v8::Value> r5 =
- ReturnThisSloppy->Call(context.local(), v8::True(isolate), 0, NULL)
+ ReturnThisSloppy->Call(context.local(), v8::True(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(r5->IsBooleanObject());
CHECK(r5.As<v8::BooleanObject>()->ValueOf());
Local<v8::Value> r6 =
- ReturnThisStrict->Call(context.local(), v8::Undefined(isolate), 0, NULL)
+ ReturnThisStrict
+ ->Call(context.local(), v8::Undefined(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(r6->IsUndefined());
Local<v8::Value> r7 =
- ReturnThisStrict->Call(context.local(), v8::Null(isolate), 0, NULL)
+ ReturnThisStrict->Call(context.local(), v8::Null(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(r7->IsNull());
Local<v8::Value> r8 =
- ReturnThisStrict->Call(context.local(), v8_num(42), 0, NULL)
+ ReturnThisStrict->Call(context.local(), v8_num(42), 0, nullptr)
.ToLocalChecked();
CHECK(r8->StrictEquals(v8_num(42)));
Local<v8::Value> r9 =
- ReturnThisStrict->Call(context.local(), v8_str("hello"), 0, NULL)
+ ReturnThisStrict->Call(context.local(), v8_str("hello"), 0, nullptr)
.ToLocalChecked();
CHECK(r9->StrictEquals(v8_str("hello")));
Local<v8::Value> r10 =
- ReturnThisStrict->Call(context.local(), v8::True(isolate), 0, NULL)
+ ReturnThisStrict->Call(context.local(), v8::True(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(r10->StrictEquals(v8::True(isolate)));
}
@@ -5383,7 +5306,7 @@ THREADED_TEST(ConstructCall) {
Local<Function> Foo = Local<Function>::Cast(
context->Global()->Get(context.local(), v8_str("Foo")).ToLocalChecked());
- v8::Local<Value>* args0 = NULL;
+ v8::Local<Value>* args0 = nullptr;
Local<v8::Array> a0 = Local<v8::Array>::Cast(
Foo->NewInstance(context.local(), 0, args0).ToLocalChecked());
CHECK_EQ(0u, a0->Length());
@@ -5462,52 +5385,45 @@ THREADED_TEST(ConversionNumber) {
CHECK_EQ(5312874545152.0,
obj->ToNumber(env.local()).ToLocalChecked()->Value());
CHECK_EQ(0, obj->ToInt32(env.local()).ToLocalChecked()->Value());
- CHECK(0u ==
- obj->ToUint32(env.local())
- .ToLocalChecked()
- ->Value()); // NOLINT - no CHECK_EQ for unsigned.
+ CHECK_EQ(0, obj->ToUint32(env.local()).ToLocalChecked()->Value());
// Large number.
CompileRun("var obj = -1234567890123;");
obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK_EQ(-1234567890123.0,
obj->ToNumber(env.local()).ToLocalChecked()->Value());
CHECK_EQ(-1912276171, obj->ToInt32(env.local()).ToLocalChecked()->Value());
- CHECK(2382691125u ==
- obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
+ CHECK_EQ(2382691125, obj->ToUint32(env.local()).ToLocalChecked()->Value());
// Small positive integer.
CompileRun("var obj = 42;");
obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK_EQ(42.0, obj->ToNumber(env.local()).ToLocalChecked()->Value());
CHECK_EQ(42, obj->ToInt32(env.local()).ToLocalChecked()->Value());
- CHECK(42u == obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
+ CHECK_EQ(42, obj->ToUint32(env.local()).ToLocalChecked()->Value());
// Negative integer.
CompileRun("var obj = -37;");
obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK_EQ(-37.0, obj->ToNumber(env.local()).ToLocalChecked()->Value());
CHECK_EQ(-37, obj->ToInt32(env.local()).ToLocalChecked()->Value());
- CHECK(4294967259u ==
- obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
+ CHECK_EQ(4294967259, obj->ToUint32(env.local()).ToLocalChecked()->Value());
// Positive non-int32 integer.
CompileRun("var obj = 0x81234567;");
obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK_EQ(2166572391.0, obj->ToNumber(env.local()).ToLocalChecked()->Value());
CHECK_EQ(-2128394905, obj->ToInt32(env.local()).ToLocalChecked()->Value());
- CHECK(2166572391u ==
- obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
+ CHECK_EQ(2166572391, obj->ToUint32(env.local()).ToLocalChecked()->Value());
// Fraction.
CompileRun("var obj = 42.3;");
obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK_EQ(42.3, obj->ToNumber(env.local()).ToLocalChecked()->Value());
CHECK_EQ(42, obj->ToInt32(env.local()).ToLocalChecked()->Value());
- CHECK(42u == obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
+ CHECK_EQ(42, obj->ToUint32(env.local()).ToLocalChecked()->Value());
// Large negative fraction.
CompileRun("var obj = -5726623061.75;");
obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK_EQ(-5726623061.75,
obj->ToNumber(env.local()).ToLocalChecked()->Value());
CHECK_EQ(-1431655765, obj->ToInt32(env.local()).ToLocalChecked()->Value());
- CHECK(2863311531u ==
- obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
+ CHECK_EQ(2863311531, obj->ToUint32(env.local()).ToLocalChecked()->Value());
}
@@ -6483,7 +6399,7 @@ THREADED_TEST(SimplePropertyRead) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
+ templ->SetAccessor(v8_str("x"), GetXValue, nullptr, v8_str("donut"));
CHECK(context->Global()
->Set(context.local(), v8_str("obj"),
templ->NewInstance(context.local()).ToLocalChecked())
@@ -6501,7 +6417,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
+ templ->SetAccessor(v8_str("x"), GetXValue, nullptr, v8_str("donut"));
CHECK(context->Global()
->Set(context.local(), v8_str("obj"),
templ->NewInstance(context.local()).ToLocalChecked())
@@ -6553,7 +6469,7 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
+ templ->SetAccessor(v8_str("x"), GetXValue, nullptr, v8_str("donut"));
LocalContext context;
CHECK(context->Global()
->Set(context.local(), v8_str("obj"),
@@ -6625,7 +6541,7 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
CHECK(CompileRun("obj2.x")->IsUndefined());
CHECK(GetGlobalProperty(&context, "obj1")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
v8_str("donut"))
.FromJust());
@@ -6633,7 +6549,7 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
CHECK(CompileRun("obj2.x")->IsUndefined());
CHECK(GetGlobalProperty(&context, "obj2")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
v8_str("donut"))
.FromJust());
@@ -6661,11 +6577,11 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
CHECK(GetGlobalProperty(&context, "obj1")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
v8_str("donut"))
.FromJust());
CHECK(GetGlobalProperty(&context, "obj2")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
v8_str("donut"))
.FromJust());
@@ -6689,13 +6605,13 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
ExpectString("obj2.x", "z");
CHECK(!GetGlobalProperty(&context, "obj1")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
- v8_str("donut"))
- .FromJust());
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
+ v8_str("donut"))
+ .FromJust());
CHECK(!GetGlobalProperty(&context, "obj2")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
- v8_str("donut"))
- .FromJust());
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
+ v8_str("donut"))
+ .FromJust());
ExpectString("obj1.x", "z");
ExpectString("obj2.x", "z");
@@ -6715,11 +6631,11 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
CompileRun("var obj2 = {};");
CHECK(GetGlobalProperty(&context, "obj1")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
v8_str("donut"), v8::DEFAULT, v8::DontDelete)
.FromJust());
CHECK(GetGlobalProperty(&context, "obj2")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
v8_str("donut"), v8::DEFAULT, v8::DontDelete)
.FromJust());
@@ -6730,13 +6646,13 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
CHECK(!GetGlobalProperty(&context, "obj1")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
- v8_str("donut"))
- .FromJust());
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
+ v8_str("donut"))
+ .FromJust());
CHECK(!GetGlobalProperty(&context, "obj2")
- ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
- v8_str("donut"))
- .FromJust());
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, nullptr,
+ v8_str("donut"))
+ .FromJust());
{
v8::TryCatch try_catch(isolate);
@@ -6786,11 +6702,11 @@ THREADED_TEST(ElementAPIAccessor) {
CompileRun("var obj2 = {};");
CHECK(GetGlobalProperty(&context, "obj1")
- ->SetAccessor(context.local(), v8_str("239"), Get239Value, NULL,
+ ->SetAccessor(context.local(), v8_str("239"), Get239Value, nullptr,
v8_str("donut"))
.FromJust());
CHECK(GetGlobalProperty(&context, "obj2")
- ->SetAccessor(context.local(), v8_str("239"), Get239Value, NULL,
+ ->SetAccessor(context.local(), v8_str("239"), Get239Value, nullptr,
v8_str("donut"))
.FromJust());
@@ -6842,7 +6758,7 @@ THREADED_TEST(SetterOnly) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessor(v8_str("x"), NULL, SetXValue, v8_str("donut"));
+ templ->SetAccessor(v8_str("x"), nullptr, SetXValue, v8_str("donut"));
LocalContext context;
CHECK(context->Global()
->Set(context.local(), v8_str("obj"),
@@ -6865,8 +6781,9 @@ THREADED_TEST(NoAccessors) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessor(v8_str("x"), static_cast<v8::AccessorGetterCallback>(NULL),
- NULL, v8_str("donut"));
+ templ->SetAccessor(v8_str("x"),
+ static_cast<v8::AccessorGetterCallback>(nullptr), nullptr,
+ v8_str("donut"));
LocalContext context;
CHECK(context->Global()
->Set(context.local(), v8_str("obj"),
@@ -7226,7 +7143,7 @@ TEST(StackTraceInExtension) {
TEST(NullExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("nulltest", NULL));
+ v8::RegisterExtension(new Extension("nulltest", nullptr));
const char* extension_names[] = {"nulltest"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7236,10 +7153,9 @@ TEST(NullExtensions) {
.FromJust());
}
-
static const char* kEmbeddedExtensionSource =
"function Ret54321(){return 54321;}~~@@$"
- "$%% THIS IS A SERIES OF NON-NULL-TERMINATED STRINGS.";
+ "$%% THIS IS A SERIES OF NON-nullptr-TERMINATED STRINGS.";
static const int kEmbeddedExtensionSourceValidLen = 34;
@@ -7250,7 +7166,7 @@ TEST(ExtensionMissingSourceLength) {
const char* extension_names[] = {"srclentest_fail"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
- CHECK(0 == *context);
+ CHECK_NULL(*context);
}
@@ -7273,7 +7189,7 @@ TEST(ExtensionWithSourceLength) {
.FromJust());
} else {
// Anything but exactly the right length should fail to compile.
- CHECK(0 == *context);
+ CHECK_NULL(*context);
}
}
}
@@ -7621,7 +7537,7 @@ THREADED_TEST(NativeFunctionConstructCall) {
static const char* last_location;
static const char* last_message;
void StoringErrorCallback(const char* location, const char* message) {
- if (last_location == NULL) {
+ if (last_location == nullptr) {
last_location = location;
last_message = message;
}
@@ -7637,7 +7553,7 @@ TEST(ErrorReporting) {
v8::RegisterExtension(new Extension("A", "", 1, aDeps));
static const char* bDeps[] = {"A"};
v8::RegisterExtension(new Extension("B", "", 1, bDeps));
- last_location = NULL;
+ last_location = nullptr;
v8::ExtensionConfiguration config(1, bDeps);
v8::Local<Context> context = Context::New(CcTest::isolate(), &config);
CHECK(context.IsEmpty());
@@ -7695,84 +7611,6 @@ struct FlagAndPersistent {
v8::Global<v8::Object> handle;
};
-
-static void SetFlag(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
- data.GetParameter()->flag = true;
- data.GetParameter()->handle.Reset();
-}
-
-
-static void IndependentWeakHandle(bool global_gc, bool interlinked) {
- i::FLAG_stress_incremental_marking = false;
- // Parallel scavenge introduces too much fragmentation.
- i::FLAG_parallel_scavenge = false;
- v8::Isolate* iso = CcTest::isolate();
- v8::HandleScope scope(iso);
- v8::Local<Context> context = Context::New(iso);
- Context::Scope context_scope(context);
-
- FlagAndPersistent object_a, object_b;
-
- size_t big_heap_size = 0;
- size_t big_array_size = 0;
-
- {
- v8::HandleScope handle_scope(iso);
- Local<Object> a(v8::Object::New(iso));
- Local<Object> b(v8::Object::New(iso));
- object_a.handle.Reset(iso, a);
- object_b.handle.Reset(iso, b);
- if (interlinked) {
- a->Set(context, v8_str("x"), b).FromJust();
- b->Set(context, v8_str("x"), a).FromJust();
- }
- if (global_gc) {
- CcTest::CollectAllGarbage();
- } else {
- CcTest::CollectGarbage(i::NEW_SPACE);
- }
- v8::Local<Value> big_array = v8::Array::New(CcTest::isolate(), 5000);
- // Verify that we created an array where the space was reserved up front.
- big_array_size =
- v8::internal::JSArray::cast(*v8::Utils::OpenHandle(*big_array))
- ->elements()
- ->Size();
- CHECK_LE(20000, big_array_size);
- a->Set(context, v8_str("y"), big_array).FromJust();
- big_heap_size = CcTest::heap()->SizeOfObjects();
- }
-
- object_a.flag = false;
- object_b.flag = false;
- object_a.handle.SetWeak(&object_a, &SetFlag,
- v8::WeakCallbackType::kParameter);
- object_b.handle.SetWeak(&object_b, &SetFlag,
- v8::WeakCallbackType::kParameter);
- CHECK(!object_b.handle.IsIndependent());
- object_a.handle.MarkIndependent();
- object_b.handle.MarkIndependent();
- CHECK(object_b.handle.IsIndependent());
- if (global_gc) {
- CcTest::CollectAllGarbage();
- } else {
- CcTest::CollectGarbage(i::NEW_SPACE);
- }
- // A single GC should be enough to reclaim the memory, since we are using
- // phantom handles.
- CHECK_GT(big_heap_size - big_array_size, CcTest::heap()->SizeOfObjects());
- CHECK(object_a.flag);
- CHECK(object_b.flag);
-}
-
-
-TEST(IndependentWeakHandle) {
- IndependentWeakHandle(false, false);
- IndependentWeakHandle(false, true);
- IndependentWeakHandle(true, false);
- IndependentWeakHandle(true, true);
-}
-
-
class Trivial {
public:
explicit Trivial(int x) : x_(x) {}
@@ -7846,9 +7684,6 @@ void InternalFieldCallback(bool global_gc) {
handle.SetWeak<v8::Persistent<v8::Object>>(
&handle, CheckInternalFields, v8::WeakCallbackType::kInternalFields);
- if (!global_gc) {
- handle.MarkIndependent();
- }
}
if (global_gc) {
CcTest::CollectAllGarbage();
@@ -7868,130 +7703,6 @@ THREADED_TEST(InternalFieldCallback) {
InternalFieldCallback(true);
}
-
-static void ResetUseValueAndSetFlag(
- const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
- // Blink will reset the handle, and then use the other handle, so they
- // can't use the same backing slot.
- data.GetParameter()->handle.Reset();
- data.GetParameter()->flag = true;
-}
-
-void v8::internal::heap::HeapTester::ResetWeakHandle(bool global_gc) {
- using v8::Context;
- using v8::Local;
- using v8::Object;
-
- v8::Isolate* iso = CcTest::isolate();
- v8::HandleScope scope(iso);
- v8::Local<Context> context = Context::New(iso);
- Context::Scope context_scope(context);
-
- FlagAndPersistent object_a, object_b;
-
- {
- v8::HandleScope handle_scope(iso);
- Local<Object> a(v8::Object::New(iso));
- Local<Object> b(v8::Object::New(iso));
- object_a.handle.Reset(iso, a);
- object_b.handle.Reset(iso, b);
- if (global_gc) {
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- } else {
- CcTest::CollectGarbage(i::NEW_SPACE);
- }
- }
-
- object_a.flag = false;
- object_b.flag = false;
- object_a.handle.SetWeak(&object_a, &ResetUseValueAndSetFlag,
- v8::WeakCallbackType::kParameter);
- object_b.handle.SetWeak(&object_b, &ResetUseValueAndSetFlag,
- v8::WeakCallbackType::kParameter);
- if (!global_gc) {
- object_a.handle.MarkIndependent();
- object_b.handle.MarkIndependent();
- CHECK(object_b.handle.IsIndependent());
- }
- if (global_gc) {
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- } else {
- CcTest::CollectGarbage(i::NEW_SPACE);
- }
- CHECK(object_a.flag);
- CHECK(object_b.flag);
-}
-
-
-THREADED_HEAP_TEST(ResetWeakHandle) {
- v8::internal::heap::HeapTester::ResetWeakHandle(false);
- v8::internal::heap::HeapTester::ResetWeakHandle(true);
-}
-
-static void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
-
-static void InvokeMarkSweep() { CcTest::CollectAllGarbage(); }
-
-static void ForceScavenge2(
- const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
- data.GetParameter()->flag = true;
- InvokeScavenge();
-}
-
-static void ForceScavenge1(
- const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
- data.GetParameter()->handle.Reset();
- data.SetSecondPassCallback(ForceScavenge2);
-}
-
-
-static void ForceMarkSweep2(
- const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
- data.GetParameter()->flag = true;
- InvokeMarkSweep();
-}
-
-static void ForceMarkSweep1(
- const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
- data.GetParameter()->handle.Reset();
- data.SetSecondPassCallback(ForceMarkSweep2);
-}
-
-
-THREADED_TEST(GCFromWeakCallbacks) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Locker locker(CcTest::isolate());
- v8::HandleScope scope(isolate);
- v8::Local<Context> context = Context::New(isolate);
- Context::Scope context_scope(context);
-
- static const int kNumberOfGCTypes = 2;
- typedef v8::WeakCallbackInfo<FlagAndPersistent>::Callback Callback;
- Callback gc_forcing_callback[kNumberOfGCTypes] = {&ForceScavenge1,
- &ForceMarkSweep1};
-
- typedef void (*GCInvoker)();
- GCInvoker invoke_gc[kNumberOfGCTypes] = {&InvokeScavenge, &InvokeMarkSweep};
-
- for (int outer_gc = 0; outer_gc < kNumberOfGCTypes; outer_gc++) {
- for (int inner_gc = 0; inner_gc < kNumberOfGCTypes; inner_gc++) {
- FlagAndPersistent object;
- {
- v8::HandleScope handle_scope(isolate);
- object.handle.Reset(isolate, v8::Object::New(isolate));
- }
- object.flag = false;
- object.handle.SetWeak(&object, gc_forcing_callback[inner_gc],
- v8::WeakCallbackType::kParameter);
- object.handle.MarkIndependent();
- invoke_gc[outer_gc]();
- EmptyMessageQueues(isolate);
- CHECK(object.flag);
- }
- }
-}
-
-
v8::Local<Function> args_fun;
@@ -8016,7 +7727,7 @@ THREADED_TEST(Arguments) {
v8::Local<v8::ObjectTemplate> global = ObjectTemplate::New(isolate);
global->Set(v8_str("f"),
v8::FunctionTemplate::New(isolate, ArgumentsTestCallback));
- LocalContext context(NULL, global);
+ LocalContext context(nullptr, global);
args_fun = context->Global()
->Get(context.local(), v8_str("f"))
.ToLocalChecked()
@@ -8516,9 +8227,9 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(0, buf[3]);
CHECK_EQ(0, strcmp("def", buf + 4));
- CHECK_EQ(0, str->WriteOneByte(NULL, 0, 0, String::NO_NULL_TERMINATION));
- CHECK_EQ(0, str->WriteUtf8(NULL, 0, 0, String::NO_NULL_TERMINATION));
- CHECK_EQ(0, str->Write(NULL, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->WriteOneByte(nullptr, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->WriteUtf8(nullptr, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->Write(nullptr, 0, 0, String::NO_NULL_TERMINATION));
}
@@ -9026,7 +8737,7 @@ static void TroubleCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
: arg_this->Get(context, v8_str("trouble_caller")).ToLocalChecked();
CHECK(trouble_callee->IsFunction());
args.GetReturnValue().Set(Function::Cast(*trouble_callee)
- ->Call(context, arg_this, 0, NULL)
+ ->Call(context, arg_this, 0, nullptr)
.FromMaybe(v8::Local<v8::Value>()));
}
@@ -9072,7 +8783,7 @@ TEST(ApiUncaughtException) {
global->Get(env.local(), v8_str("trouble_caller")).ToLocalChecked();
CHECK(trouble_caller->IsFunction());
Function::Cast(*trouble_caller)
- ->Call(env.local(), global, 0, NULL)
+ ->Call(env.local(), global, 0, nullptr)
.FromMaybe(v8::Local<v8::Value>());
CHECK_EQ(1, report_count);
isolate->RemoveMessageListeners(ApiUncaughtExceptionTestListener);
@@ -9119,7 +8830,9 @@ TEST(ExceptionInNativeScript) {
Local<Value> trouble =
global->Get(env.local(), v8_str("trouble")).ToLocalChecked();
CHECK(trouble->IsFunction());
- CHECK(Function::Cast(*trouble)->Call(env.local(), global, 0, NULL).IsEmpty());
+ CHECK(Function::Cast(*trouble)
+ ->Call(env.local(), global, 0, nullptr)
+ .IsEmpty());
isolate->RemoveMessageListeners(ExceptionInNativeScriptTestListener);
}
@@ -9213,7 +8926,7 @@ TEST(SecurityHandler) {
v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallback(SecurityTestCallback, v8_num(42));
// Create an environment
- v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
+ v8::Local<Context> context0 = Context::New(isolate, nullptr, global_template);
context0->Enter();
v8::Local<v8::Object> global0 = context0->Global();
@@ -9229,7 +8942,7 @@ TEST(SecurityHandler) {
// Create another environment, should fail security checks.
v8::HandleScope scope1(isolate);
- v8::Local<Context> context1 = Context::New(isolate, NULL, global_template);
+ v8::Local<Context> context1 = Context::New(isolate, nullptr, global_template);
context1->Enter();
v8::Local<v8::Object> global1 = context1->Global();
@@ -9299,7 +9012,7 @@ THREADED_TEST(SecurityChecks) {
// Enter env2
Context::Scope scope_env2(env2);
Local<Value> result = Function::Cast(*spy)
- ->Call(env2, env2->Global(), 0, NULL)
+ ->Call(env2, env2->Global(), 0, nullptr)
.ToLocalChecked();
CHECK(result->IsFunction());
}
@@ -9310,7 +9023,9 @@ THREADED_TEST(SecurityChecks) {
// Call cross_domain_call, it should throw an exception
v8::TryCatch try_catch(env1->GetIsolate());
- CHECK(Function::Cast(*spy2)->Call(env2, env2->Global(), 0, NULL).IsEmpty());
+ CHECK(Function::Cast(*spy2)
+ ->Call(env2, env2->Global(), 0, nullptr)
+ .IsEmpty());
CHECK(try_catch.HasCaught());
}
}
@@ -9636,7 +9351,7 @@ TEST(ContextDetachGlobal) {
CHECK(get_prop->IsFunction());
v8::TryCatch try_catch(env1->GetIsolate());
Local<Value> r = Function::Cast(*get_prop)
- ->Call(env1.local(), global1, 0, NULL)
+ ->Call(env1.local(), global1, 0, nullptr)
.ToLocalChecked();
CHECK(!try_catch.HasCaught());
CHECK_EQ(1, r->Int32Value(env1.local()).FromJust());
@@ -9730,7 +9445,7 @@ TEST(DetachedAccesses) {
inner_global_template ->SetAccessorProperty(
v8_str("this_x"), FunctionTemplate::New(env1->GetIsolate(), GetThisX));
v8::Local<Context> env2 =
- Context::New(env1->GetIsolate(), NULL, inner_global_template);
+ Context::New(env1->GetIsolate(), nullptr, inner_global_template);
Local<Value> foo = v8_str("foo");
@@ -9949,7 +9664,7 @@ TEST(AccessControl) {
v8::DEFAULT);
// Create an environment
- v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
+ v8::Local<Context> context0 = Context::New(isolate, nullptr, global_template);
context0->Enter();
v8::Local<v8::Object> global0 = context0->Global();
@@ -10119,7 +9834,7 @@ TEST(AccessControlES5) {
v8::DEFAULT);
// Create an environment
- v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
+ v8::Local<Context> context0 = Context::New(isolate, nullptr, global_template);
context0->Enter();
v8::Local<v8::Object> global0 = context0->Global();
@@ -10199,7 +9914,7 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE));
// Create an environment
- v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
+ v8::Local<Context> context0 = Context::New(isolate, nullptr, obj_template);
context0->Enter();
v8::Local<v8::Object> global0 = context0->Global();
@@ -10290,7 +10005,7 @@ THREADED_TEST(CrossDomainAccessors) {
global_template->SetAccessor(v8_str("unreachable"), UnreachableGetter, 0,
v8::Local<Value>(), v8::DEFAULT);
- v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
+ v8::Local<Context> context0 = Context::New(isolate, nullptr, global_template);
context0->Enter();
Local<v8::Object> global = context0->Global();
@@ -10521,7 +10236,7 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
"thrown";
{
- LocalContext env(NULL, instance_template);
+ LocalContext env(nullptr, instance_template);
// Hold on to the global object so it can be used again in another
// environment initialization.
global_object = env->Global();
@@ -10536,7 +10251,7 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
{
// Create new environment reusing the global object.
- LocalContext env(NULL, instance_template, global_object);
+ LocalContext env(nullptr, instance_template, global_object);
Local<Value> value = CompileRun("x");
CHECK_EQ(42, value->Int32Value(env.local()).FromJust());
value = CompileRun("f()");
@@ -10635,7 +10350,7 @@ THREADED_TEST(CallKnownGlobalReceiver) {
Local<Value> foo;
{
- LocalContext env(NULL, instance_template);
+ LocalContext env(nullptr, instance_template);
// Hold on to the global object so it can be used again in another
// environment initialization.
global_object = env->Global();
@@ -10644,7 +10359,7 @@ THREADED_TEST(CallKnownGlobalReceiver) {
{
// Create new environment reusing the global object.
- LocalContext env(NULL, instance_template, global_object);
+ LocalContext env(nullptr, instance_template, global_object);
CHECK(env->Global()->Set(env.local(), v8_str("foo"), foo).FromJust());
CompileRun("foo()");
}
@@ -10694,7 +10409,7 @@ THREADED_TEST(ShadowObject) {
v8::HandleScope handle_scope(isolate);
Local<ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
- LocalContext context(NULL, global_template);
+ LocalContext context(nullptr, global_template);
Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
t->InstanceTemplate()->SetHandler(
@@ -11959,24 +11674,25 @@ THREADED_TEST(CallAsFunction) {
.ToLocalChecked();
Local<v8::Value> a1 =
- instance->CallAsFunction(context.local(), v8::Undefined(isolate), 0,
- NULL)
+ instance
+ ->CallAsFunction(context.local(), v8::Undefined(isolate), 0,
+ nullptr)
.ToLocalChecked();
CHECK(a1->StrictEquals(instance));
Local<v8::Value> a2 =
- instance->CallAsFunction(context.local(), v8::Null(isolate), 0, NULL)
+ instance->CallAsFunction(context.local(), v8::Null(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(a2->StrictEquals(instance));
Local<v8::Value> a3 =
- instance->CallAsFunction(context.local(), v8_num(42), 0, NULL)
+ instance->CallAsFunction(context.local(), v8_num(42), 0, nullptr)
.ToLocalChecked();
CHECK(a3->StrictEquals(instance));
Local<v8::Value> a4 =
- instance->CallAsFunction(context.local(), v8_str("hello"), 0, NULL)
+ instance->CallAsFunction(context.local(), v8_str("hello"), 0, nullptr)
.ToLocalChecked();
CHECK(a4->StrictEquals(instance));
Local<v8::Value> a5 =
- instance->CallAsFunction(context.local(), v8::True(isolate), 0, NULL)
+ instance->CallAsFunction(context.local(), v8::True(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(a5->StrictEquals(instance));
}
@@ -12000,55 +11716,59 @@ THREADED_TEST(CallAsFunction) {
.ToLocalChecked());
Local<v8::Value> a1 =
- ReturnThisSloppy->CallAsFunction(context.local(),
- v8::Undefined(isolate), 0, NULL)
+ ReturnThisSloppy
+ ->CallAsFunction(context.local(), v8::Undefined(isolate), 0,
+ nullptr)
.ToLocalChecked();
CHECK(a1->StrictEquals(context->Global()));
Local<v8::Value> a2 =
- ReturnThisSloppy->CallAsFunction(context.local(), v8::Null(isolate), 0,
- NULL)
+ ReturnThisSloppy
+ ->CallAsFunction(context.local(), v8::Null(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(a2->StrictEquals(context->Global()));
Local<v8::Value> a3 =
- ReturnThisSloppy->CallAsFunction(context.local(), v8_num(42), 0, NULL)
+ ReturnThisSloppy
+ ->CallAsFunction(context.local(), v8_num(42), 0, nullptr)
.ToLocalChecked();
CHECK(a3->IsNumberObject());
CHECK_EQ(42.0, a3.As<v8::NumberObject>()->ValueOf());
Local<v8::Value> a4 =
- ReturnThisSloppy->CallAsFunction(context.local(), v8_str("hello"), 0,
- NULL)
+ ReturnThisSloppy
+ ->CallAsFunction(context.local(), v8_str("hello"), 0, nullptr)
.ToLocalChecked();
CHECK(a4->IsStringObject());
CHECK(a4.As<v8::StringObject>()->ValueOf()->StrictEquals(v8_str("hello")));
Local<v8::Value> a5 =
- ReturnThisSloppy->CallAsFunction(context.local(), v8::True(isolate), 0,
- NULL)
+ ReturnThisSloppy
+ ->CallAsFunction(context.local(), v8::True(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(a5->IsBooleanObject());
CHECK(a5.As<v8::BooleanObject>()->ValueOf());
Local<v8::Value> a6 =
- ReturnThisStrict->CallAsFunction(context.local(),
- v8::Undefined(isolate), 0, NULL)
+ ReturnThisStrict
+ ->CallAsFunction(context.local(), v8::Undefined(isolate), 0,
+ nullptr)
.ToLocalChecked();
CHECK(a6->IsUndefined());
Local<v8::Value> a7 =
- ReturnThisStrict->CallAsFunction(context.local(), v8::Null(isolate), 0,
- NULL)
+ ReturnThisStrict
+ ->CallAsFunction(context.local(), v8::Null(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(a7->IsNull());
Local<v8::Value> a8 =
- ReturnThisStrict->CallAsFunction(context.local(), v8_num(42), 0, NULL)
+ ReturnThisStrict
+ ->CallAsFunction(context.local(), v8_num(42), 0, nullptr)
.ToLocalChecked();
CHECK(a8->StrictEquals(v8_num(42)));
Local<v8::Value> a9 =
- ReturnThisStrict->CallAsFunction(context.local(), v8_str("hello"), 0,
- NULL)
+ ReturnThisStrict
+ ->CallAsFunction(context.local(), v8_str("hello"), 0, nullptr)
.ToLocalChecked();
CHECK(a9->StrictEquals(v8_str("hello")));
Local<v8::Value> a10 =
- ReturnThisStrict->CallAsFunction(context.local(), v8::True(isolate), 0,
- NULL)
+ ReturnThisStrict
+ ->CallAsFunction(context.local(), v8::True(isolate), 0, nullptr)
.ToLocalChecked();
CHECK(a10->StrictEquals(v8::True(isolate)));
}
@@ -12416,7 +12136,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
proto_templ->Set(v8_str("method"), method_templ);
v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
v8::Local<v8::Function> fun =
@@ -12454,7 +12174,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
fun_templ->SetHiddenPrototype(true);
v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
v8::Local<v8::Function> fun =
@@ -12495,7 +12215,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
fun_templ->SetHiddenPrototype(true);
v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
v8::Local<v8::Function> fun =
@@ -12546,7 +12266,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
fun_templ->SetHiddenPrototype(true);
v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
v8::Local<v8::Function> fun =
@@ -12597,7 +12317,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
fun_templ->SetHiddenPrototype(true);
v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
v8::Local<v8::Function> fun =
@@ -12652,7 +12372,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
fun_templ->SetHiddenPrototype(true);
v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
v8::Local<v8::Function> fun =
@@ -13061,11 +12781,11 @@ THREADED_TEST(ExceptionsDoNotPropagatePastTryCatch) {
CHECK(
context->Global()->Set(context.local(), v8_str("func"), func).FromJust());
- MessageCallback callbacks[] =
- { NULL, WebKitLike, ThrowViaApi, ThrowFromJS, WithTryCatch };
+ MessageCallback callbacks[] = {nullptr, WebKitLike, ThrowViaApi, ThrowFromJS,
+ WithTryCatch};
for (unsigned i = 0; i < sizeof(callbacks)/sizeof(callbacks[0]); i++) {
MessageCallback callback = callbacks[i];
- if (callback != NULL) {
+ if (callback != nullptr) {
isolate->AddMessageListener(callback);
}
// Some small number to control number of times message handler should
@@ -13075,7 +12795,7 @@ THREADED_TEST(ExceptionsDoNotPropagatePastTryCatch) {
"var thrown = false;\n"
"try { func(); } catch(e) { thrown = true; }\n"
"thrown\n");
- if (callback != NULL) {
+ if (callback != nullptr) {
isolate->RemoveMessageListeners(callback);
}
}
@@ -13127,7 +12847,7 @@ THREADED_TEST(Overriding) {
// Add 'i' as an accessor to the instance template with ReadOnly attributes
// but the attribute does not have effect because it is duplicated with
- // NULL setter.
+ // nullptr setter.
child_instance_templ->SetAccessor(v8_str("i"), ChildGetter, 0,
v8::Local<Value>(), v8::DEFAULT,
v8::ReadOnly);
@@ -13837,7 +13557,7 @@ void ApiTestFuzzer::TearDown() {
fuzzing_ = false;
for (int i = 0; i < RegisterThreadedTest::count(); i++) {
ApiTestFuzzer *fuzzer = RegisterThreadedTest::nth(i)->fuzzer_;
- if (fuzzer != NULL) fuzzer->Join();
+ if (fuzzer != nullptr) fuzzer->Join();
}
}
@@ -14022,7 +13742,7 @@ THREADED_TEST(LockUnlockLock) {
static int GetGlobalObjectsCount() {
int count = 0;
i::HeapIterator it(CcTest::heap());
- for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
+ for (i::HeapObject* object = it.next(); object != nullptr; object = it.next())
if (object->IsJSGlobalObject()) {
i::JSGlobalObject* g = i::JSGlobalObject::cast(object);
// Skip dummy global object.
@@ -14314,12 +14034,12 @@ struct SymbolInfo {
class SetFunctionEntryHookTest {
public:
SetFunctionEntryHookTest() {
- CHECK(instance_ == NULL);
+ CHECK_NULL(instance_);
instance_ = this;
}
~SetFunctionEntryHookTest() {
CHECK(instance_ == this);
- instance_ = NULL;
+ instance_ = nullptr;
}
void Reset() {
symbols_.clear();
@@ -14329,7 +14049,7 @@ class SetFunctionEntryHookTest {
void RunTest();
void OnJitEvent(const v8::JitCodeEvent* event);
static void JitEvent(const v8::JitCodeEvent* event) {
- CHECK(instance_ != NULL);
+ CHECK_NOT_NULL(instance_);
instance_->OnJitEvent(event);
}
@@ -14337,12 +14057,12 @@ class SetFunctionEntryHookTest {
uintptr_t return_addr_location);
static void EntryHook(uintptr_t function,
uintptr_t return_addr_location) {
- CHECK(instance_ != NULL);
+ CHECK_NOT_NULL(instance_);
instance_->OnEntryHook(function, return_addr_location);
}
static void RuntimeCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(instance_ != NULL);
+ CHECK_NOT_NULL(instance_);
args.GetReturnValue().Set(v8_num(42));
}
void RunLoopInNewEnv(v8::Isolate* isolate);
@@ -14369,8 +14089,7 @@ class SetFunctionEntryHookTest {
static SetFunctionEntryHookTest* instance_;
};
-SetFunctionEntryHookTest* SetFunctionEntryHookTest::instance_ = NULL;
-
+SetFunctionEntryHookTest* SetFunctionEntryHookTest::instance_ = nullptr;
// Returns true if addr is in the range [start, start+len).
static bool Overlaps(i::Address start, size_t len, i::Address addr) {
@@ -14410,19 +14129,19 @@ void SetFunctionEntryHookTest::InsertSymbolAt(i::Address addr,
void SetFunctionEntryHookTest::OnJitEvent(const v8::JitCodeEvent* event) {
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
- CHECK(event->code_start != NULL);
- CHECK_NE(0, static_cast<int>(event->code_len));
- CHECK(event->name.str != NULL);
- size_t symbol_id = symbols_.size();
-
- // Record the new symbol.
- SymbolInfo& info = symbols_[symbol_id];
- info.id = symbol_id;
- info.size = event->code_len;
- info.name.assign(event->name.str, event->name.str + event->name.len);
-
- // And record it's location.
- InsertSymbolAt(reinterpret_cast<i::Address>(event->code_start), &info);
+ CHECK_NOT_NULL(event->code_start);
+ CHECK_NE(0, static_cast<int>(event->code_len));
+ CHECK_NOT_NULL(event->name.str);
+ size_t symbol_id = symbols_.size();
+
+ // Record the new symbol.
+ SymbolInfo& info = symbols_[symbol_id];
+ info.id = symbol_id;
+ info.size = event->code_len;
+ info.name.assign(event->name.str, event->name.str + event->name.len);
+
+ // And record it's location.
+ InsertSymbolAt(reinterpret_cast<i::Address>(event->code_start), &info);
}
break;
@@ -14455,7 +14174,7 @@ void SetFunctionEntryHookTest::OnEntryHook(
// Get the function's code object.
i::Code* function_code = i::Code::GetCodeFromTargetAddress(
reinterpret_cast<i::Address>(function));
- CHECK(function_code != NULL);
+ CHECK_NOT_NULL(function_code);
// Then try and look up the caller's code object.
i::Address caller = *reinterpret_cast<i::Address*>(return_addr_location);
@@ -14491,15 +14210,14 @@ SymbolInfo* SetFunctionEntryHookTest::FindSymbolForAddr(i::Address addr) {
}
// If not a direct hit, it'll have to be the previous symbol.
- if (it == symbol_locations_.begin())
- return NULL;
+ if (it == symbol_locations_.begin()) return nullptr;
--it;
size_t offs = addr - it->first;
if (offs < it->second->size)
return it->second;
- return NULL;
+ return nullptr;
}
@@ -14512,15 +14230,14 @@ int SetFunctionEntryHookTest::CountInvocations(
SymbolInfo* function = it->first.second;
// Filter out non-matching functions.
- if (function_name != NULL) {
+ if (function_name != nullptr) {
if (function->name.find(function_name) == std::string::npos)
continue;
}
// Filter out non-matching callers.
- if (caller_name != NULL) {
- if (caller == NULL)
- continue;
+ if (caller_name != nullptr) {
+ if (caller == nullptr) continue;
if (caller->name.find(caller_name) == std::string::npos)
continue;
}
@@ -14591,27 +14308,27 @@ void SetFunctionEntryHookTest::RunTest() {
// Check the expected invocation counts.
if (i::FLAG_always_opt) {
- CHECK_EQ(2, CountInvocations(NULL, "bar"));
+ CHECK_EQ(2, CountInvocations(nullptr, "bar"));
CHECK_EQ(200, CountInvocations("bar", "foo"));
- CHECK_EQ(200, CountInvocations(NULL, "foo"));
+ CHECK_EQ(200, CountInvocations(nullptr, "foo"));
} else if (i::FLAG_opt) {
// For ignition we don't see the actual functions being called, instead
// we see the InterpreterEntryTrampoline at least 102 times
// (100 unoptimized calls to foo, and 2 calls to bar).
- CHECK_LE(102, CountInvocations(NULL, "InterpreterEntryTrampoline"));
+ CHECK_LE(102, CountInvocations(nullptr, "InterpreterEntryTrampoline"));
// We should also see the calls to the optimized function foo.
- CHECK_EQ(100, CountInvocations(NULL, "foo"));
+ CHECK_EQ(100, CountInvocations(nullptr, "foo"));
} else {
// For ignition without an optimizing compiler, we should only see the
// InterpreterEntryTrampoline.
// (200 unoptimized calls to foo, and 2 calls to bar).
- CHECK_LE(202, CountInvocations(NULL, "InterpreterEntryTrampoline"));
+ CHECK_LE(202, CountInvocations(nullptr, "InterpreterEntryTrampoline"));
}
// Verify that we have an entry hook on some specific stubs.
- CHECK_NE(0, CountInvocations(NULL, "CEntryStub"));
- CHECK_NE(0, CountInvocations(NULL, "JSEntryStub"));
- CHECK_NE(0, CountInvocations(NULL, "JSEntryTrampoline"));
+ CHECK_NE(0, CountInvocations(nullptr, "CEntryStub"));
+ CHECK_NE(0, CountInvocations(nullptr, "JSEntryStub"));
+ CHECK_NE(0, CountInvocations(nullptr, "JSEntryTrampoline"));
}
isolate->Dispose();
@@ -14647,8 +14364,8 @@ TEST(SetFunctionEntryHook) {
test.RunTest();
}
-static v8::base::HashMap* code_map = NULL;
-static v8::base::HashMap* jitcode_line_info = NULL;
+static v8::base::HashMap* code_map = nullptr;
+static v8::base::HashMap* jitcode_line_info = nullptr;
static int saw_bar = 0;
static int move_events = 0;
@@ -14696,24 +14413,24 @@ static bool FunctionNameIs(const char* expected,
static void event_handler(const v8::JitCodeEvent* event) {
- CHECK(event != NULL);
- CHECK(code_map != NULL);
- CHECK(jitcode_line_info != NULL);
+ CHECK_NOT_NULL(event);
+ CHECK_NOT_NULL(code_map);
+ CHECK_NOT_NULL(jitcode_line_info);
class DummyJitCodeLineInfo {
};
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
- CHECK(event->code_start != NULL);
- CHECK_NE(0, static_cast<int>(event->code_len));
- CHECK(event->name.str != NULL);
- v8::base::HashMap::Entry* entry = code_map->LookupOrInsert(
- event->code_start, i::ComputePointerHash(event->code_start));
- entry->value = reinterpret_cast<void*>(event->code_len);
-
- if (FunctionNameIs("bar", event)) {
- ++saw_bar;
+ CHECK_NOT_NULL(event->code_start);
+ CHECK_NE(0, static_cast<int>(event->code_len));
+ CHECK_NOT_NULL(event->name.str);
+ v8::base::HashMap::Entry* entry = code_map->LookupOrInsert(
+ event->code_start, i::ComputePointerHash(event->code_start));
+ entry->value = reinterpret_cast<void*>(event->code_len);
+
+ if (FunctionNameIs("bar", event)) {
+ ++saw_bar;
}
}
break;
@@ -14729,7 +14446,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
// before its existence can be logged.
v8::base::HashMap::Entry* entry =
code_map->Lookup(event->code_start, hash);
- if (entry != NULL) {
+ if (entry != nullptr) {
++move_events;
CHECK_EQ(reinterpret_cast<void*>(event->code_len), entry->value);
@@ -14764,21 +14481,21 @@ static void event_handler(const v8::JitCodeEvent* event) {
// data structure is created before during CODE_START_LINE_INFO_RECORDING
// event. And delete it in CODE_END_LINE_INFO_RECORDING event handling.
case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
- CHECK(event->user_data != NULL);
- uint32_t hash = i::ComputePointerHash(event->user_data);
- v8::base::HashMap::Entry* entry =
- jitcode_line_info->Lookup(event->user_data, hash);
- CHECK(entry != NULL);
- delete reinterpret_cast<DummyJitCodeLineInfo*>(event->user_data);
+ CHECK_NOT_NULL(event->user_data);
+ uint32_t hash = i::ComputePointerHash(event->user_data);
+ v8::base::HashMap::Entry* entry =
+ jitcode_line_info->Lookup(event->user_data, hash);
+ CHECK_NOT_NULL(entry);
+ delete reinterpret_cast<DummyJitCodeLineInfo*>(event->user_data);
}
break;
case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: {
- CHECK(event->user_data != NULL);
- uint32_t hash = i::ComputePointerHash(event->user_data);
- v8::base::HashMap::Entry* entry =
- jitcode_line_info->Lookup(event->user_data, hash);
- CHECK(entry != NULL);
+ CHECK_NOT_NULL(event->user_data);
+ uint32_t hash = i::ComputePointerHash(event->user_data);
+ v8::base::HashMap::Entry* entry =
+ jitcode_line_info->Lookup(event->user_data, hash);
+ CHECK_NOT_NULL(entry);
}
break;
@@ -14861,13 +14578,13 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
// Force code movement.
heap->CollectAllAvailableGarbage(i::GarbageCollectionReason::kTesting);
- isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, nullptr);
CHECK_LE(kIterations, saw_bar);
CHECK_LT(0, move_events);
- code_map = NULL;
- jitcode_line_info = NULL;
+ code_map = nullptr;
+ jitcode_line_info = nullptr;
}
isolate->Exit();
@@ -14893,16 +14610,16 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
isolate->SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting,
event_handler);
- isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, nullptr);
- jitcode_line_info = NULL;
+ jitcode_line_info = nullptr;
// We expect that we got some events. Note that if we could get code removal
// notifications, we could compare two collections, one created by listening
// from the time of creation of an isolate, and the other by subscribing
// with EnumExisting.
CHECK_LT(0u, code.occupancy());
- code_map = NULL;
+ code_map = nullptr;
}
isolate->Exit();
@@ -15179,7 +14896,7 @@ THREADED_TEST(PropertyEnumeration) {
v8::Local<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4u, elms->Length());
int elmc0 = 0;
- const char** elmv0 = NULL;
+ const char** elmv0 = nullptr;
CheckProperties(
isolate,
elms->Get(context.local(), v8::Integer::New(isolate, 0)).ToLocalChecked(),
@@ -15239,7 +14956,7 @@ THREADED_TEST(PropertyEnumeration2) {
v8::Local<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4u, elms->Length());
int elmc0 = 0;
- const char** elmv0 = NULL;
+ const char** elmv0 = nullptr;
CheckProperties(
isolate,
elms->Get(context.local(), v8::Integer::New(isolate, 0)).ToLocalChecked(),
@@ -15685,10 +15402,10 @@ TEST(CompileExternalTwoByteSource) {
"0.5",
"-0.5", // This mainly testes PushBack in the Scanner.
"--0.5", // This mainly testes PushBack in the Scanner.
- NULL};
+ nullptr};
// Compile the sources as external two byte strings.
- for (int i = 0; one_byte_sources[i] != NULL; i++) {
+ for (int i = 0; one_byte_sources[i] != nullptr; i++) {
uint16_t* two_byte_string = AsciiToTwoByteString(one_byte_sources[i]);
TestResource* uc16_resource = new TestResource(two_byte_string);
v8::Local<v8::String> source =
@@ -16318,8 +16035,8 @@ TEST(DefineProperty) {
THREADED_TEST(GetCurrentContextWhenNotInContext) {
i::Isolate* isolate = CcTest::i_isolate();
- CHECK(isolate != NULL);
- CHECK(isolate->context() == NULL);
+ CHECK_NOT_NULL(isolate);
+ CHECK_NULL(isolate->context());
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::HandleScope scope(v8_isolate);
// The following should not crash, but return an empty handle.
@@ -17120,7 +16837,7 @@ THREADED_TEST(StackTrace) {
v8::String::Utf8Value stack(
context->GetIsolate(),
try_catch.StackTrace(context.local()).ToLocalChecked());
- CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL);
+ CHECK_NOT_NULL(strstr(*stack, "at foo (stack-trace-test"));
}
@@ -17132,13 +16849,13 @@ void checkStackFrame(const char* expected_script_name,
v8::HandleScope scope(CcTest::isolate());
v8::String::Utf8Value func_name(CcTest::isolate(), frame->GetFunctionName());
v8::String::Utf8Value script_name(CcTest::isolate(), frame->GetScriptName());
- if (*script_name == NULL) {
+ if (*script_name == nullptr) {
// The situation where there is no associated script, like for evals.
- CHECK(expected_script_name == NULL);
+ CHECK_NULL(expected_script_name);
} else {
- CHECK(strstr(*script_name, expected_script_name) != NULL);
+ CHECK_NOT_NULL(strstr(*script_name, expected_script_name));
}
- CHECK(strstr(*func_name, expected_func_name) != NULL);
+ CHECK_NOT_NULL(strstr(*func_name, expected_func_name));
CHECK_EQ(expected_line_number, frame->GetLineNumber());
CHECK_EQ(expected_column, frame->GetColumn());
CHECK_EQ(is_eval, frame->IsEval());
@@ -17157,7 +16874,7 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
const int kDisplayNameIsNotString = 6;
const int kFunctionNameIsNotString = 7;
- CHECK(args.Length() == 1);
+ CHECK_EQ(args.Length(), 1);
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
int testGroup = args[0]->Int32Value(context).FromJust();
@@ -17169,7 +16886,7 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
stackTrace->GetFrame(0));
checkStackFrame(origin, "foo", 6, 3, false, true, stackTrace->GetFrame(1));
// This is the source string inside the eval which has the call to foo.
- checkStackFrame(NULL, "", 1, 1, true, false, stackTrace->GetFrame(2));
+ checkStackFrame(nullptr, "", 1, 1, true, false, stackTrace->GetFrame(2));
// The last frame is an anonymous function which has the initial eval call.
checkStackFrame(origin, "", 8, 7, false, false, stackTrace->GetFrame(3));
} else if (testGroup == kDetailedTest) {
@@ -17182,7 +16899,7 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
stackTrace->GetFrame(1));
bool is_eval = true;
// This is the source string inside the eval which has the call to baz.
- checkStackFrame(NULL, "", 1, 1, is_eval, false, stackTrace->GetFrame(2));
+ checkStackFrame(nullptr, "", 1, 1, is_eval, false, stackTrace->GetFrame(2));
// The last frame is an anonymous function which has the initial eval call.
checkStackFrame(origin, "", 10, 1, false, false, stackTrace->GetFrame(3));
} else if (testGroup == kFunctionName) {
@@ -17342,7 +17059,9 @@ TEST(CaptureStackTraceForUncaughtException) {
Local<Value> trouble =
global->Get(env.local(), v8_str("bar")).ToLocalChecked();
CHECK(trouble->IsFunction());
- CHECK(Function::Cast(*trouble)->Call(env.local(), global, 0, NULL).IsEmpty());
+ CHECK(Function::Cast(*trouble)
+ ->Call(env.local(), global, 0, nullptr)
+ .IsEmpty());
isolate->SetCaptureStackTraceForUncaughtExceptions(false);
isolate->RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
CHECK_EQ(1, report_count);
@@ -18109,7 +17828,7 @@ TEST(ScriptIdInStackTrace) {
v8::Local<v8::Script> script = CompileWithOrigin(scriptSource, "test");
script->Run(context.local()).ToLocalChecked();
for (int i = 0; i < 2; i++) {
- CHECK(scriptIdInStack[i] != v8::Message::kNoScriptIdInfo);
+ CHECK_NE(scriptIdInStack[i], v8::Message::kNoScriptIdInfo);
CHECK_EQ(scriptIdInStack[i], script->GetUnboundScript()->GetId());
}
}
@@ -18251,7 +17970,7 @@ TEST(PromiseHook) {
auto init_promise = global->Get(context, v8_str("init")).ToLocalChecked();
CHECK(GetPromise("p")->Equals(env.local(), init_promise).FromJust());
auto init_promise_obj = v8::Local<v8::Promise>::Cast(init_promise);
- CHECK(init_promise_obj->State() == v8::Promise::PromiseState::kPending);
+ CHECK_EQ(init_promise_obj->State(), v8::Promise::PromiseState::kPending);
CHECK(!init_promise_obj->HasHandler());
promise_hook_data->Reset();
@@ -18570,7 +18289,7 @@ TEST(DynamicWithSourceURLInStackTraceString) {
v8::String::Utf8Value stack(
context->GetIsolate(),
try_catch.StackTrace(context.local()).ToLocalChecked());
- CHECK(strstr(*stack, "at foo (source_url:3:5)") != NULL);
+ CHECK_NOT_NULL(strstr(*stack, "at foo (source_url:3:5)"));
}
@@ -18855,13 +18574,13 @@ TEST(VisitExternalStrings) {
v8::Local<v8::String> string0 =
v8::String::NewExternalTwoByte(env->GetIsolate(), resource[0])
.ToLocalChecked();
- resource[1] = new TestResource(two_byte_string, NULL, false);
+ resource[1] = new TestResource(two_byte_string, nullptr, false);
v8::Local<v8::String> string1 =
v8::String::NewExternalTwoByte(env->GetIsolate(), resource[1])
.ToLocalChecked();
// Externalized symbol.
- resource[2] = new TestResource(two_byte_string, NULL, false);
+ resource[2] = new TestResource(two_byte_string, nullptr, false);
v8::Local<v8::String> string2 =
v8::String::NewFromUtf8(env->GetIsolate(), string,
v8::NewStringType::kInternalized)
@@ -19767,7 +19486,7 @@ TEST(Regress618) {
}
}
-v8::Isolate* gc_callbacks_isolate = NULL;
+v8::Isolate* gc_callbacks_isolate = nullptr;
int prologue_call_count = 0;
int epilogue_call_count = 0;
int prologue_call_count_second = 0;
@@ -20096,8 +19815,8 @@ TEST(ContainsOnlyOneByte) {
string_contents[length-1] = 0;
// Simple case.
Local<String> string =
- String::NewExternalTwoByte(isolate,
- new TestResource(string_contents, NULL, false))
+ String::NewExternalTwoByte(
+ isolate, new TestResource(string_contents, nullptr, false))
.ToLocalChecked();
CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
// Counter example.
@@ -20117,8 +19836,8 @@ TEST(ContainsOnlyOneByte) {
balanced = String::Concat(balanced, right);
Local<String> cons_strings[] = {left, balanced, right};
Local<String> two_byte =
- String::NewExternalTwoByte(isolate,
- new TestResource(string_contents, NULL, false))
+ String::NewExternalTwoByte(
+ isolate, new TestResource(string_contents, nullptr, false))
.ToLocalChecked();
USE(two_byte); USE(cons_strings);
for (size_t i = 0; i < arraysize(cons_strings); i++) {
@@ -20141,8 +19860,8 @@ TEST(ContainsOnlyOneByte) {
int shift = 8 + (i % 7);
string_contents[alignment + i] = 1 << shift;
string = String::NewExternalTwoByte(
- isolate,
- new TestResource(string_contents + alignment, NULL, false))
+ isolate, new TestResource(string_contents + alignment,
+ nullptr, false))
.ToLocalChecked();
CHECK_EQ(size, string->Length());
CHECK(!string->ContainsOnlyOneByte());
@@ -20182,7 +19901,7 @@ TEST(GCInFailedAccessCheckCallback) {
global_template->SetAccessCheckCallback(AccessAlwaysBlocked);
// Create a context and set an x property on it's global object.
- LocalContext context0(NULL, global_template);
+ LocalContext context0(nullptr, global_template);
CHECK(context0->Global()
->Set(context0.local(), v8_str("x"), v8_num(42))
.FromJust());
@@ -20190,7 +19909,7 @@ TEST(GCInFailedAccessCheckCallback) {
// Create a context with a different security token so that the
// failed access check callback will be called on each access.
- LocalContext context1(NULL, global_template);
+ LocalContext context1(nullptr, global_template);
CHECK(context1->Global()
->Set(context1.local(), v8_str("other"), global0)
.FromJust());
@@ -20238,8 +19957,9 @@ TEST(GCInFailedAccessCheckCallback) {
try_catch.Reset();
// DefineAccessor.
- CHECK(global0->SetAccessor(context1.local(), v8_str("x"), GetXValue, NULL,
- v8_str("x"))
+ CHECK(global0
+ ->SetAccessor(context1.local(), v8_str("x"), GetXValue, nullptr,
+ v8_str("x"))
.IsNothing());
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -20281,7 +20001,7 @@ TEST(GCInFailedAccessCheckCallback) {
// Reset the failed access check callback so it does not influence
// the other tests.
- isolate->SetFailedAccessCheckCallbackFunction(NULL);
+ isolate->SetFailedAccessCheckCallbackFunction(nullptr);
}
@@ -20290,12 +20010,12 @@ TEST(IsolateNewDispose) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
- CHECK(isolate != NULL);
+ CHECK_NOT_NULL(isolate);
CHECK(current_isolate != isolate);
CHECK(current_isolate == CcTest::isolate());
isolate->SetFatalErrorHandler(StoringErrorCallback);
- last_location = last_message = NULL;
+ last_location = last_message = nullptr;
isolate->Dispose();
CHECK(!last_location);
CHECK(!last_message);
@@ -20313,7 +20033,7 @@ UNINITIALIZED_TEST(DisposeIsolateWhenInUse) {
// Run something in this isolate.
ExpectTrue("true");
isolate->SetFatalErrorHandler(StoringErrorCallback);
- last_location = last_message = NULL;
+ last_location = last_message = nullptr;
// Still entered, should fail.
isolate->Dispose();
CHECK(last_location);
@@ -20341,10 +20061,10 @@ static void BreakArrayGuarantees(const char* script) {
v8::Context::Scope context_scope(context);
v8::internal::Isolate* i_isolate =
reinterpret_cast<v8::internal::Isolate*>(isolate1);
- CHECK(i_isolate->IsFastArrayConstructorPrototypeChainIntact());
+ CHECK(i_isolate->IsNoElementsProtectorIntact());
// Run something in new isolate.
CompileRun(script);
- CHECK(!i_isolate->IsFastArrayConstructorPrototypeChainIntact());
+ CHECK(!i_isolate->IsNoElementsProtectorIntact());
}
isolate1->Exit();
isolate1->Dispose();
@@ -20477,7 +20197,7 @@ TEST(RunTwoIsolatesOnSingleThread) {
isolate1->Exit();
isolate2->SetFatalErrorHandler(StoringErrorCallback);
- last_location = last_message = NULL;
+ last_location = last_message = nullptr;
isolate1->Dispose();
CHECK(!last_location);
@@ -20618,19 +20338,19 @@ class InitDefaultIsolateThread : public v8::base::Thread {
break;
case SetFatalHandler:
- isolate->SetFatalErrorHandler(NULL);
+ isolate->SetFatalErrorHandler(nullptr);
break;
case SetCounterFunction:
- CcTest::isolate()->SetCounterFunction(NULL);
+ CcTest::isolate()->SetCounterFunction(nullptr);
break;
case SetCreateHistogramFunction:
- CcTest::isolate()->SetCreateHistogramFunction(NULL);
+ CcTest::isolate()->SetCreateHistogramFunction(nullptr);
break;
case SetAddHistogramSampleFunction:
- CcTest::isolate()->SetAddHistogramSampleFunction(NULL);
+ CcTest::isolate()->SetAddHistogramSampleFunction(nullptr);
break;
}
isolate->Exit();
@@ -20999,8 +20719,8 @@ TEST(NamedEnumeratorAndForIn) {
v8::Context::Scope context_scope(context.local());
v8::Local<v8::ObjectTemplate> tmpl = v8::ObjectTemplate::New(isolate);
- tmpl->SetHandler(v8::NamedPropertyHandlerConfiguration(Getter, NULL, NULL,
- NULL, Enumerator));
+ tmpl->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ Getter, nullptr, nullptr, nullptr, Enumerator));
CHECK(context->Global()
->Set(context.local(), v8_str("o"),
tmpl->NewInstance(context.local()).ToLocalChecked())
@@ -21029,7 +20749,7 @@ TEST(DefinePropertyPostDetach) {
"})")
.As<Function>();
context->DetachGlobal();
- CHECK(define_property->Call(context.local(), proxy, 0, NULL).IsEmpty());
+ CHECK(define_property->Call(context.local(), proxy, 0, nullptr).IsEmpty());
}
@@ -21341,7 +21061,7 @@ TEST(IndexedInterceptorWithStringProto) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- NULL, NULL, HasOwnPropertyIndexedPropertyQuery));
+ nullptr, nullptr, HasOwnPropertyIndexedPropertyQuery));
LocalContext context;
CHECK(context->Global()
->Set(context.local(), v8_str("obj"),
@@ -21925,9 +21645,7 @@ static void MicrotaskTwo(const v8::FunctionCallbackInfo<Value>& info) {
CompileRun("ext2Calls++;");
}
-
-void* g_passed_to_three = NULL;
-
+void* g_passed_to_three = nullptr;
static void MicrotaskThree(void* data) {
g_passed_to_three = data;
@@ -21969,7 +21687,7 @@ TEST(EnqueueMicrotask) {
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- g_passed_to_three = NULL;
+ g_passed_to_three = nullptr;
env->GetIsolate()->EnqueueMicrotask(MicrotaskThree);
CompileRun("1+1;");
CHECK(!g_passed_to_three);
@@ -21986,7 +21704,7 @@ TEST(EnqueueMicrotask) {
CHECK_EQ(&dummy, g_passed_to_three);
CHECK_EQ(3, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- g_passed_to_three = NULL;
+ g_passed_to_three = nullptr;
}
@@ -22291,7 +22009,7 @@ int* LookupCounter(const char* name) {
} else if (strcmp(name, "c:V8.MegamorphicStubCacheUpdates") == 0) {
return &updates_counter;
}
- return NULL;
+ return nullptr;
}
template <typename Stub, typename... Args>
@@ -23083,7 +22801,7 @@ class ThreadInterruptTest {
// Setup signal handler
memset(&action, 0, sizeof(action));
action.sa_handler = SignalHandler;
- sigaction(SIGCHLD, &action, NULL);
+ sigaction(SIGCHLD, &action, nullptr);
// Send signal
kill(getpid(), SIGCHLD);
@@ -23133,7 +22851,7 @@ TEST(JSONStringifyAccessCheck) {
global_template->SetAccessCheckCallback(AccessAlwaysBlocked);
// Create a context and set an x property on it's global object.
- LocalContext context0(NULL, global_template);
+ LocalContext context0(nullptr, global_template);
v8::Local<v8::Object> global0 = context0->Global();
global0->Set(context0.local(), v8_str("x"), v8_num(42)).FromJust();
ExpectString("JSON.stringify(this)", "{\"x\":42}");
@@ -23150,7 +22868,7 @@ TEST(JSONStringifyAccessCheck) {
}
// Create a context with a different security token so that the
// failed access check callback will be called on each access.
- LocalContext context1(NULL, global_template);
+ LocalContext context1(nullptr, global_template);
CHECK(context1->Global()
->Set(context1.local(), v8_str("other"), global0)
.FromJust());
@@ -23230,13 +22948,13 @@ TEST(AccessCheckThrows) {
global_template->SetAccessCheckCallback(AccessAlwaysBlocked);
// Create a context and set an x property on it's global object.
- LocalContext context0(NULL, global_template);
+ LocalContext context0(nullptr, global_template);
v8::Local<v8::Object> global0 = context0->Global();
CHECK(global0->Set(context0.local(), v8_str("x"), global0).FromJust());
// Create a context with a different security token so that the
// failed access check callback will be called on each access.
- LocalContext context1(NULL, global_template);
+ LocalContext context1(nullptr, global_template);
CHECK(context1->Global()
->Set(context1.local(), v8_str("other"), global0)
.FromJust());
@@ -23271,8 +22989,8 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("%GetProperty(other, 'x')");
CheckCorrectThrow("%SetProperty(other, 'x', 'foo', 0)");
CheckCorrectThrow("%AddNamedProperty(other, 'x', 'foo', 1)");
- STATIC_ASSERT(i::SLOPPY == 0);
- STATIC_ASSERT(i::STRICT == 1);
+ STATIC_ASSERT(static_cast<int>(i::LanguageMode::kSloppy) == 0);
+ STATIC_ASSERT(static_cast<int>(i::LanguageMode::kStrict) == 1);
CheckCorrectThrow("%DeleteProperty(other, 'x', 0)"); // 0 == SLOPPY
CheckCorrectThrow("%DeleteProperty(other, 'x', 1)"); // 1 == STRICT
CheckCorrectThrow("%DeleteProperty(other, '1', 0)");
@@ -23286,7 +23004,7 @@ TEST(AccessCheckThrows) {
// Reset the failed access check callback so it does not influence
// the other tests.
- isolate->SetFailedAccessCheckCallbackFunction(NULL);
+ isolate->SetFailedAccessCheckCallbackFunction(nullptr);
}
TEST(AccessCheckInIC) {
@@ -23315,13 +23033,13 @@ TEST(AccessCheckInIC) {
global_template->SetAccessCheckCallback(AccessCounter);
// Create a context and set an x property on its global object.
- LocalContext context0(isolate, NULL, global_template);
+ LocalContext context0(isolate, nullptr, global_template);
v8::Local<v8::Object> global0 = context0->Global();
CHECK(global0->Set(context0.local(), v8_str("x"), global0).FromJust());
// Create a context with a different security token so that the
// failed access check callback will be called on each access.
- LocalContext context1(isolate, NULL, global_template);
+ LocalContext context1(isolate, nullptr, global_template);
CHECK(context1->Global()
->Set(context1.local(), v8_str("other"), global0)
.FromJust());
@@ -23546,9 +23264,7 @@ class RequestInterruptTestWithNativeAccessor
virtual void TestBody() {
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
t->InstanceTemplate()->SetNativeDataProperty(
- v8_str("shouldContinue"),
- &ShouldContinueNativeGetter,
- NULL,
+ v8_str("shouldContinue"), &ShouldContinueNativeGetter, nullptr,
v8::External::New(isolate_, this));
CHECK(env_->Global()
->Set(env_.local(), v8_str("Klass"),
@@ -23755,7 +23471,7 @@ TEST(RequestInterruptSmallScripts) {
v8::HandleScope scope(isolate);
interrupt_was_called = false;
- isolate->RequestInterrupt(&SmallScriptsInterruptCallback, NULL);
+ isolate->RequestInterrupt(&SmallScriptsInterruptCallback, nullptr);
CompileRun("(function(x){return x;})(1);");
CHECK(interrupt_was_called);
}
@@ -23925,7 +23641,7 @@ class ApiCallOptimizationChecker {
}
// Global object must pass checks.
Local<v8::Context> context =
- v8::Context::New(isolate, NULL, signature_template);
+ v8::Context::New(isolate, nullptr, signature_template);
v8::Context::Scope context_scope(context);
// Install regular object that can pass signature checks.
Local<Object> function_receiver =
@@ -24544,15 +24260,15 @@ TEST(PromiseStateAndValue) {
"var resolver;"
"new Promise((res, rej) => { resolver = res; })");
v8::Local<v8::Promise> promise = v8::Local<v8::Promise>::Cast(result);
- CHECK(promise->State() == v8::Promise::PromiseState::kPending);
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kPending);
CompileRun("resolver('fulfilled')");
- CHECK(promise->State() == v8::Promise::PromiseState::kFulfilled);
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kFulfilled);
CHECK(v8_str("fulfilled")->SameValue(promise->Result()));
result = CompileRun("Promise.reject('rejected')");
promise = v8::Local<v8::Promise>::Cast(result);
- CHECK(promise->State() == v8::Promise::PromiseState::kRejected);
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kRejected);
CHECK(v8_str("rejected")->SameValue(promise->Result()));
}
@@ -24760,14 +24476,14 @@ TEST(ScriptPositionInfo) {
void CheckMagicComments(v8::Isolate* isolate, Local<Script> script,
const char* expected_source_url,
const char* expected_source_mapping_url) {
- if (expected_source_url != NULL) {
+ if (expected_source_url != nullptr) {
v8::String::Utf8Value url(isolate,
script->GetUnboundScript()->GetSourceURL());
CHECK_EQ(0, strcmp(expected_source_url, *url));
} else {
CHECK(script->GetUnboundScript()->GetSourceURL()->IsUndefined());
}
- if (expected_source_mapping_url != NULL) {
+ if (expected_source_mapping_url != nullptr) {
v8::String::Utf8Value url(
isolate, script->GetUnboundScript()->GetSourceMappingURL());
CHECK_EQ(0, strcmp(expected_source_mapping_url, *url));
@@ -24792,11 +24508,11 @@ TEST(ScriptSourceURLAndSourceMappingURL) {
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceURL=bar1.js\n",
- "bar1.js", NULL);
+ "bar1.js", nullptr);
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceMappingURL=bar2.js\n",
- NULL, "bar2.js");
+ nullptr, "bar2.js");
// Both sourceURL and sourceMappingURL.
SourceURLHelper(isolate,
@@ -24810,58 +24526,58 @@ TEST(ScriptSourceURLAndSourceMappingURL) {
"function foo() {}\n"
"//# sourceURL=ignoreme.js\n"
"//# sourceURL=bar5.js\n",
- "bar5.js", NULL);
+ "bar5.js", nullptr);
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceMappingURL=ignoreme.js\n"
"//# sourceMappingURL=bar6.js\n",
- NULL, "bar6.js");
+ nullptr, "bar6.js");
// SourceURL or sourceMappingURL in the middle of the script.
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceURL=bar7.js\n"
"function baz() {}\n",
- "bar7.js", NULL);
+ "bar7.js", nullptr);
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceMappingURL=bar8.js\n"
"function baz() {}\n",
- NULL, "bar8.js");
+ nullptr, "bar8.js");
// Too much whitespace.
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceURL=bar9.js\n"
"//# sourceMappingURL=bar10.js\n",
- NULL, NULL);
+ nullptr, nullptr);
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceURL =bar11.js\n"
"//# sourceMappingURL =bar12.js\n",
- NULL, NULL);
+ nullptr, nullptr);
// Disallowed characters in value.
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceURL=bar13 .js \n"
"//# sourceMappingURL=bar14 .js \n",
- NULL, NULL);
+ nullptr, nullptr);
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceURL=bar15\t.js \n"
"//# sourceMappingURL=bar16\t.js \n",
- NULL, NULL);
+ nullptr, nullptr);
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceURL=bar17'.js \n"
"//# sourceMappingURL=bar18'.js \n",
- NULL, NULL);
+ nullptr, nullptr);
SourceURLHelper(isolate,
"function foo() {}\n"
"//# sourceURL=bar19\".js \n"
"//# sourceMappingURL=bar20\".js \n",
- NULL, NULL);
+ nullptr, nullptr);
// Not too much whitespace.
SourceURLHelper(isolate,
@@ -24909,13 +24625,13 @@ TEST(GetOwnPropertyDescriptor) {
.ToLocalChecked());
CHECK(v8_num(13)
->Equals(env.local(),
- get->Call(env.local(), x, 0, NULL).ToLocalChecked())
+ get->Call(env.local(), x, 0, nullptr).ToLocalChecked())
.FromJust());
Local<Value> args[] = {v8_num(14)};
set->Call(env.local(), x, 1, args).ToLocalChecked();
CHECK(v8_num(14)
->Equals(env.local(),
- get->Call(env.local(), x, 0, NULL).ToLocalChecked())
+ get->Call(env.local(), x, 0, nullptr).ToLocalChecked())
.FromJust());
desc =
x->GetOwnPropertyDescriptor(env.local(), Symbol::GetToStringTag(isolate))
@@ -24992,7 +24708,7 @@ class TestSourceStream : public v8::ScriptCompiler::ExternalSourceStream {
virtual size_t GetMoreData(const uint8_t** src) {
// Unlike in real use cases, this function will never block.
- if (chunks_[index_] == NULL) {
+ if (chunks_[index_] == nullptr) {
return 0;
}
// Copy the data, since the caller takes ownership of it.
@@ -25009,12 +24725,12 @@ class TestSourceStream : public v8::ScriptCompiler::ExternalSourceStream {
// too).
static char* FullSourceString(const char** chunks) {
size_t total_len = 0;
- for (size_t i = 0; chunks[i] != NULL; ++i) {
+ for (size_t i = 0; chunks[i] != nullptr; ++i) {
total_len += strlen(chunks[i]);
}
char* full_string = new char[total_len + 1];
size_t offset = 0;
- for (size_t i = 0; chunks[i] != NULL; ++i) {
+ for (size_t i = 0; chunks[i] != nullptr; ++i) {
size_t len = strlen(chunks[i]);
memcpy(full_string + offset, chunks[i], len);
offset += len;
@@ -25034,8 +24750,8 @@ void RunStreamingTest(const char** chunks,
v8::ScriptCompiler::StreamedSource::Encoding encoding =
v8::ScriptCompiler::StreamedSource::ONE_BYTE,
bool expected_success = true,
- const char* expected_source_url = NULL,
- const char* expected_source_mapping_url = NULL) {
+ const char* expected_source_url = nullptr,
+ const char* expected_source_mapping_url = nullptr) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -25077,19 +24793,16 @@ TEST(StreamingSimpleScript) {
// This script is unrealistically small, since no one chunk is enough to fill
// the backing buffer of Scanner, let alone overflow it.
const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
- NULL};
+ nullptr};
RunStreamingTest(chunks);
}
TEST(StreamingScriptConstantArray) {
// When run with Ignition, tests that the streaming parser canonicalizes
// handles so that they are only added to the constant pool array once.
- const char* chunks[] = {"var a = {};",
- "var b = {};",
- "var c = 'testing';",
- "var d = 'testing';",
- "13;",
- NULL};
+ const char* chunks[] = {
+ "var a = {};", "var b = {};", "var c = 'testing';",
+ "var d = 'testing';", "13;", nullptr};
RunStreamingTest(chunks);
}
@@ -25107,7 +24820,7 @@ TEST(StreamingScriptEvalShadowing) {
" return g();\n"
" })()\n"
"})()\n";
- const char* chunks[] = {chunk1, NULL};
+ const char* chunks[] = {chunk1, nullptr};
RunStreamingTest(chunks);
}
@@ -25127,7 +24840,7 @@ TEST(StreamingBiggerScript) {
" for (i = 0; i < 13; ++i) { result = result + 1; }\n"
" return result;\n"
"}\n";
- const char* chunks[] = {chunk1, "foo(); ", NULL};
+ const char* chunks[] = {chunk1, "foo(); ", nullptr};
RunStreamingTest(chunks);
}
@@ -25139,7 +24852,7 @@ TEST(StreamingScriptWithParseError) {
" // This will result in a parse error.\n"
" var if else then foo";
char chunk2[] = " 13\n";
- const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::ONE_BYTE,
false);
@@ -25150,7 +24863,7 @@ TEST(StreamingScriptWithParseError) {
" // This will be parsed successfully.\n"
" function foo() { return ";
char chunk2[] = " 13; }\n";
- const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
RunStreamingTest(chunks);
}
@@ -25167,7 +24880,7 @@ TEST(StreamingUtf8Script) {
" var foob\xec\x92\x81r = 13;\n"
" return foob\xec\x92\x81r;\n"
"}\n";
- const char* chunks[] = {chunk1, "foo(); ", NULL};
+ const char* chunks[] = {chunk1, "foo(); ", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -25177,7 +24890,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersSanityCheck) {
// characters is correct. Here is an UTF-8 character which will take three
// bytes.
const char* reference = "\xec\x92\x81";
- CHECK(3u == strlen(reference)); // NOLINT - no CHECK_EQ for unsigned.
+ CHECK_EQ(3, strlen(reference));
char chunk1[] =
"function foo() {\n"
@@ -25191,7 +24904,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersSanityCheck) {
for (int i = 0; i < 3; ++i) {
chunk2[i] = reference[i];
}
- const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -25212,7 +24925,7 @@ TEST(StreamingUtf8ScriptWithSplitCharacters) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -25239,7 +24952,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
chunk2[0] = reference[0];
chunk2[1] = reference[1];
chunk3[0] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
// The small chunk is at the end of a character
@@ -25257,7 +24970,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
// Case 2: the script ends with a multi-byte character. Make sure that it's
@@ -25266,7 +24979,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
char chunk1[] =
"var foob\xec\x92\x81 = 13;\n"
"foob\xec\x92\x81";
- const char* chunks[] = {chunk1, NULL};
+ const char* chunks[] = {chunk1, nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
}
@@ -25290,7 +25003,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk3[0] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -25298,7 +25011,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
TEST(StreamingProducesParserCache) {
const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
- NULL};
+ nullptr};
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -25317,8 +25030,8 @@ TEST(StreamingProducesParserCache) {
delete task;
const v8::ScriptCompiler::CachedData* cached_data = source.GetCachedData();
- CHECK(cached_data != NULL);
- CHECK(cached_data->data != NULL);
+ CHECK_NOT_NULL(cached_data);
+ CHECK_NOT_NULL(cached_data->data);
CHECK(!cached_data->rejected);
CHECK_GT(cached_data->length, 0);
}
@@ -25335,7 +25048,7 @@ TEST(StreamingWithDebuggingEnabledLate) {
" };",
" foo(2);",
"}",
- NULL};
+ nullptr};
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -25388,7 +25101,7 @@ TEST(StreamingScriptWithInvalidUtf8) {
"}\n";
for (int i = 0; i < 5; ++i) chunk1[strlen(chunk1) - 5 + i] = reference[i];
- const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, false);
}
@@ -25409,7 +25122,7 @@ TEST(StreamingUtf8ScriptWithMultipleMultibyteCharactersSomeSplit) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -25430,7 +25143,7 @@ TEST(StreamingUtf8ScriptWithMultipleMultibyteCharactersSomeSplit2) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -25447,7 +25160,7 @@ TEST(StreamingWithHarmonyScopes) {
// Then stream a script which (erroneously) tries to introduce the same
// variable again.
- const char* chunks[] = {"\"use strict\"; let x = 2;", NULL};
+ const char* chunks[] = {"\"use strict\"; let x = 2;", nullptr};
v8::TryCatch try_catch(isolate);
v8::ScriptCompiler::StreamedSource source(
@@ -25589,8 +25302,8 @@ TEST(ParserCacheRejectedGracefully) {
USE(script);
const v8::ScriptCompiler::CachedData* original_cached_data =
source.GetCachedData();
- CHECK(original_cached_data != NULL);
- CHECK(original_cached_data->data != NULL);
+ CHECK_NOT_NULL(original_cached_data);
+ CHECK_NOT_NULL(original_cached_data->data);
CHECK(!original_cached_data->rejected);
CHECK_GT(original_cached_data->length, 0);
// Recompiling the same script with it won't reject the data.
@@ -25606,7 +25319,7 @@ TEST(ParserCacheRejectedGracefully) {
USE(script);
const v8::ScriptCompiler::CachedData* new_cached_data =
source_with_cached_data.GetCachedData();
- CHECK(new_cached_data != NULL);
+ CHECK_NOT_NULL(new_cached_data);
CHECK(!new_cached_data->rejected);
}
// Compile an incompatible script with the cached data. The new script doesn't
@@ -25626,7 +25339,7 @@ TEST(ParserCacheRejectedGracefully) {
USE(script);
const v8::ScriptCompiler::CachedData* new_cached_data =
source_with_cached_data.GetCachedData();
- CHECK(new_cached_data != NULL);
+ CHECK_NOT_NULL(new_cached_data);
CHECK(new_cached_data->rejected);
}
}
@@ -25751,7 +25464,7 @@ TEST(ClassPrototypeCreationContext) {
TEST(SimpleStreamingScriptWithSourceURL) {
const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo();\n",
- "//# sourceURL=bar2.js\n", NULL};
+ "//# sourceURL=bar2.js\n", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
"bar2.js");
}
@@ -25759,7 +25472,7 @@ TEST(SimpleStreamingScriptWithSourceURL) {
TEST(StreamingScriptWithSplitSourceURL) {
const char* chunks[] = {"function foo() { ret", "urn 13; } f",
- "oo();\n//# sourceURL=b", "ar2.js\n", NULL};
+ "oo();\n//# sourceURL=b", "ar2.js\n", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
"bar2.js");
}
@@ -25767,19 +25480,22 @@ TEST(StreamingScriptWithSplitSourceURL) {
TEST(StreamingScriptWithSourceMappingURLInTheMiddle) {
const char* chunks[] = {"function foo() { ret", "urn 13; }\n//#",
- " sourceMappingURL=bar2.js\n", "foo();", NULL};
- RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true, NULL,
- "bar2.js");
+ " sourceMappingURL=bar2.js\n", "foo();", nullptr};
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
+ nullptr, "bar2.js");
}
TEST(NewStringRangeError) {
+ // This test uses a lot of memory and fails with flaky OOM when run
+ // with --stress-incremental-marking on TSAN.
+ i::FLAG_stress_incremental_marking = false;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
const int length = i::String::kMaxLength + 1;
const int buffer_size = length * sizeof(uint16_t);
void* buffer = malloc(buffer_size);
- if (buffer == NULL) return;
+ if (buffer == nullptr) return;
memset(buffer, 'A', buffer_size);
{
v8::TryCatch try_catch(isolate);
@@ -26323,8 +26039,8 @@ THREADED_TEST(SharedArrayBuffer_AllocationInformation) {
ScopedSharedArrayBufferContents contents(ab->Externalize());
// Array buffers should have normal allocation mode.
- CHECK(contents.AllocationMode() ==
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
+ CHECK_EQ(contents.AllocationMode(),
+ v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
// The allocation must contain the buffer (normally they will be equal, but
// this is not required by the contract).
CHECK_NOT_NULL(contents.AllocationBase());
@@ -26349,7 +26065,7 @@ TEST(AbortOnUncaughtExceptionNoAbort) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- LocalContext env(NULL, global_template);
+ LocalContext env(nullptr, global_template);
i::FLAG_abort_on_uncaught_exception = true;
isolate->SetAbortOnUncaughtExceptionCallback(NoAbortOnUncaughtException);
@@ -26360,7 +26076,7 @@ TEST(AbortOnUncaughtExceptionNoAbort) {
v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
global_object->Get(env.local(), v8_str("boom")).ToLocalChecked());
- CHECK(foo->Call(env.local(), global_object, 0, NULL).IsEmpty());
+ CHECK(foo->Call(env.local(), global_object, 0, nullptr).IsEmpty());
CHECK_EQ(1, nb_uncaught_exception_callback_calls);
}
@@ -26832,7 +26548,7 @@ static void CallEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Context::Scope scope(call_eval_context);
args.GetReturnValue().Set(
call_eval_bound_function
- ->Call(call_eval_context, call_eval_context->Global(), 0, NULL)
+ ->Call(call_eval_context, call_eval_context->Global(), 0, nullptr)
.ToLocalChecked());
}
@@ -26865,8 +26581,8 @@ TEST(EvalInAccessCheckedContext) {
obj_template->SetAccessCheckCallback(AccessAlwaysAllowed);
- v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
- v8::Local<Context> context1 = Context::New(isolate, NULL, obj_template);
+ v8::Local<Context> context0 = Context::New(isolate, nullptr, obj_template);
+ v8::Local<Context> context1 = Context::New(isolate, nullptr, obj_template);
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -27053,7 +26769,7 @@ THREADED_TEST(GlobalAccessorInfo) {
v8::String::NewFromUtf8(isolate, "prop", v8::NewStringType::kInternalized)
.ToLocalChecked(),
&ensure_receiver_is_global_proxy);
- LocalContext env(NULL, global_template);
+ LocalContext env(nullptr, global_template);
CompileRun("for (var i = 0; i < 10; i++) this.prop");
CompileRun("for (var i = 0; i < 10; i++) prop");
}
@@ -27231,6 +26947,58 @@ TEST(DynamicImport) {
CHECK(result->Equals(i::String::cast(promise->result())));
}
+void HostInitializeImportMetaObjectCallbackStatic(Local<Context> context,
+ Local<Module> module,
+ Local<Object> meta) {
+ CHECK(!module.IsEmpty());
+
+ meta->CreateDataProperty(context, v8_str("foo"), v8_str("bar")).ToChecked();
+}
+
+v8::MaybeLocal<Module> UnexpectedModuleResolveCallback(Local<Context> context,
+ Local<String> specifier,
+ Local<Module> referrer) {
+ CHECK_WITH_MSG(false, "Unexpected call to resolve callback");
+}
+
+TEST(ImportMeta) {
+ i::FLAG_harmony_dynamic_import = true;
+ i::FLAG_harmony_import_meta = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetHostInitializeImportMetaObjectCallback(
+ HostInitializeImportMetaObjectCallbackStatic);
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Local<String> url = v8_str("www.google.com");
+ Local<String> source_text = v8_str("import.meta;");
+ v8::ScriptOrigin origin(url, Local<v8::Integer>(), Local<v8::Integer>(),
+ Local<v8::Boolean>(), Local<v8::Integer>(),
+ Local<v8::Value>(), Local<v8::Boolean>(),
+ Local<v8::Boolean>(), True(isolate));
+ v8::ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ i::Handle<i::Object> meta =
+ i_isolate->RunHostInitializeImportMetaObjectCallback(
+ v8::Utils::OpenHandle(*module));
+ CHECK(meta->IsJSObject());
+ Local<Object> meta_obj = Local<Object>::Cast(v8::Utils::ToLocal(meta));
+ CHECK(meta_obj->Get(context.local(), v8_str("foo"))
+ .ToLocalChecked()
+ ->IsString());
+ CHECK(meta_obj->Get(context.local(), v8_str("zapp"))
+ .ToLocalChecked()
+ ->IsUndefined());
+
+ module->InstantiateModule(context.local(), UnexpectedModuleResolveCallback)
+ .ToChecked();
+ Local<Value> result = module->Evaluate(context.local()).ToLocalChecked();
+ CHECK(result->StrictEquals(Local<v8::Value>::Cast(v8::Utils::ToLocal(meta))));
+}
+
TEST(GlobalTemplateWithDoubleProperty) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -27293,3 +27061,19 @@ TEST(PrimitiveArray) {
CHECK(array->Get(3)->IsBoolean());
CHECK(array->Get(4)->IsNull());
}
+
+TEST(PersistentValueMap) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+
+ v8::PersistentValueMap<
+ std::string, v8::Value,
+ v8::DefaultPersistentValueMapTraits<std::string, v8::Value>>
+ map(isolate);
+ v8::Local<v8::Value> value =
+ v8::String::NewFromUtf8(isolate, "value",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ map.Set("key", value);
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 5f405548f0..169f927f74 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -36,7 +36,9 @@
#include "src/macro-assembler.h"
#include "src/ostreams.h"
#include "src/v8.h"
+#include "test/cctest/assembler-helper-arm.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
namespace v8 {
namespace internal {
@@ -44,13 +46,6 @@ namespace test_assembler_arm {
using base::RandomNumberGenerator;
-// Define these function prototypes to match JSEntryFunction in execution.cc.
-typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
-typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
-typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
-typedef Object* (*F5)(uint32_t p0, void* p1, void* p2, int p3, int p4);
-
#define __ assm.
TEST(0) {
@@ -58,7 +53,7 @@ TEST(0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ add(r0, r0, Operand(r1));
__ mov(pc, Operand(lr));
@@ -71,7 +66,7 @@ TEST(0) {
OFStream os(stdout);
code->Print(os);
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
int res =
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
::printf("f() = %d\n", res);
@@ -84,7 +79,7 @@ TEST(1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
__ mov(r1, Operand(r0));
@@ -108,7 +103,7 @@ TEST(1) {
OFStream os(stdout);
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
int res =
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 100, 0, 0, 0, 0));
::printf("f() = %d\n", res);
@@ -121,7 +116,7 @@ TEST(2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
__ mov(r1, Operand(r0));
@@ -154,7 +149,7 @@ TEST(2) {
OFStream os(stdout);
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
int res =
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 10, 0, 0, 0, 0));
::printf("f() = %d\n", res);
@@ -174,7 +169,7 @@ TEST(3) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
__ mov(ip, Operand(sp));
@@ -202,7 +197,7 @@ TEST(3) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
t.i = 100000;
t.c = 10;
t.s = 1000;
@@ -244,7 +239,7 @@ TEST(4) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
if (CpuFeatures::IsSupported(VFPv3)) {
@@ -334,7 +329,7 @@ TEST(4) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
@@ -377,7 +372,7 @@ TEST(5) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(&assm, ARMv7);
@@ -397,7 +392,7 @@ TEST(5) {
OFStream os(stdout);
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
int res = reinterpret_cast<int>(
CALL_GENERATED_CODE(isolate, f, 0xAAAAAAAA, 0, 0, 0, 0));
::printf("f() = %d\n", res);
@@ -412,7 +407,7 @@ TEST(6) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
__ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
@@ -429,7 +424,7 @@ TEST(6) {
OFStream os(stdout);
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
int res = reinterpret_cast<int>(
CALL_GENERATED_CODE(isolate, f, 0xFFFF, 0, 0, 0, 0));
::printf("f() = %d\n", res);
@@ -450,7 +445,7 @@ static void TestRoundingMode(VCVTTypes types,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label wrong_exception;
@@ -498,7 +493,7 @@ static void TestRoundingMode(VCVTTypes types,
OFStream os(stdout);
code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
int res =
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
::printf("res = %d\n", res);
@@ -649,7 +644,7 @@ TEST(8) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -681,7 +676,7 @@ TEST(8) {
OFStream os(stdout);
code->Print(os);
#endif
- F4 fn = FUNCTION_CAST<F4>(code->entry());
+ F_ppiii fn = FUNCTION_CAST<F_ppiii>(code->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -755,7 +750,7 @@ TEST(9) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -791,7 +786,7 @@ TEST(9) {
OFStream os(stdout);
code->Print(os);
#endif
- F4 fn = FUNCTION_CAST<F4>(code->entry());
+ F_ppiii fn = FUNCTION_CAST<F_ppiii>(code->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -865,7 +860,7 @@ TEST(10) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -897,7 +892,7 @@ TEST(10) {
OFStream os(stdout);
code->Print(os);
#endif
- F4 fn = FUNCTION_CAST<F4>(code->entry());
+ F_ppiii fn = FUNCTION_CAST<F_ppiii>(code->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -956,7 +951,7 @@ TEST(11) {
i.a = 0xabcd0001;
i.b = 0xabcd0000;
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
// Test HeapObject untagging.
__ ldr(r1, MemOperand(r0, offsetof(I, a)));
@@ -992,7 +987,7 @@ TEST(11) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
Object* dummy = CALL_GENERATED_CODE(isolate, f, &i, 0, 0, 0, 0);
USE(dummy);
@@ -1009,7 +1004,7 @@ TEST(12) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label target;
__ b(eq, &target);
__ b(ne, &target);
@@ -1045,7 +1040,7 @@ TEST(13) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
if (CpuFeatures::IsSupported(VFPv3)) {
@@ -1119,7 +1114,7 @@ TEST(13) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
@@ -1160,7 +1155,7 @@ TEST(14) {
T t;
// Create a function that makes the four basic operations.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
// Ensure FPSCR state (as JSEntryStub does).
Label fpscr_done;
@@ -1192,7 +1187,7 @@ TEST(14) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
t.left = bit_cast<double>(kHoleNanInt64);
t.right = 1;
t.add_result = 0;
@@ -1206,7 +1201,7 @@ TEST(14) {
#ifdef DEBUG
const uint64_t kArmNanInt64 =
(static_cast<uint64_t>(kArmNanUpper32) << 32) | kArmNanLower32;
- CHECK(kArmNanInt64 != kHoleNanInt64);
+ CHECK_NE(kArmNanInt64, kHoleNanInt64);
#endif
// With VFP2 the sign of the canonicalized Nan is undefined. So
// we remove the sign bit for the upper tests.
@@ -1340,7 +1335,7 @@ TEST(15) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles, floats, and SIMD values.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
if (CpuFeatures::IsSupported(NEON)) {
CpuFeatureScope scope(&assm, NEON);
@@ -2073,7 +2068,7 @@ TEST(15) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
t.src0 = 0x01020304;
t.src1 = 0x11121314;
t.src2 = 0x21222324;
@@ -2316,7 +2311,7 @@ TEST(16) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ stm(db_w, sp, r4.bit() | lr.bit());
@@ -2350,7 +2345,7 @@ TEST(16) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
t.src0 = 0x01020304;
t.src1 = 0x11121314;
t.src2 = 0x11121300;
@@ -2377,7 +2372,7 @@ TEST(17) {
HandleScope scope(isolate);
// Generate a code segment that will be longer than 2^24 bytes.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
for (size_t i = 0; i < 1 << 23 ; ++i) { // 2^23
__ nop();
}
@@ -2402,7 +2397,7 @@ TEST(sdiv) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
struct T {
int32_t dividend;
@@ -2431,7 +2426,7 @@ TEST(sdiv) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
Object* dummy;
TEST_SDIV(0, kMinInt, 0);
TEST_SDIV(0, 1024, 0);
@@ -2466,7 +2461,7 @@ TEST(udiv) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
struct T {
uint32_t dividend;
@@ -2495,7 +2490,7 @@ TEST(udiv) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
Object* dummy;
TEST_UDIV(0u, 0, 0);
TEST_UDIV(0u, 1024, 0);
@@ -2525,7 +2520,7 @@ TEST(smmla) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt(), z = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, z, 0);
@@ -2551,7 +2546,7 @@ TEST(smmul) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
@@ -2577,7 +2572,7 @@ TEST(sxtb) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
@@ -2603,7 +2598,7 @@ TEST(sxtab) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
@@ -2629,7 +2624,7 @@ TEST(sxth) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
@@ -2655,7 +2650,7 @@ TEST(sxtah) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
@@ -2681,7 +2676,7 @@ TEST(uxtb) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
@@ -2707,7 +2702,7 @@ TEST(uxtab) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
@@ -2733,7 +2728,7 @@ TEST(uxth) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
@@ -2759,7 +2754,7 @@ TEST(uxtah) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
@@ -2803,8 +2798,8 @@ TEST(rbit) {
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- Object* dummy = NULL;
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ Object* dummy = nullptr;
TEST_RBIT(0xffffffff, 0xffffffff);
TEST_RBIT(0x00000000, 0x00000000);
TEST_RBIT(0xffff0000, 0x0000ffff);
@@ -2825,7 +2820,7 @@ TEST(code_relative_offset) {
// Initialize a code object that will contain the code.
Handle<HeapObject> code_object(isolate->heap()->undefined_value(), isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label start, target_away, target_faraway;
@@ -2880,7 +2875,7 @@ TEST(code_relative_offset) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, code_object);
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
int res =
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 21, 0, 0, 0, 0));
::printf("f() = %d\n", res);
@@ -2893,7 +2888,7 @@ TEST(msr_mrs) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
// Create a helper function:
// void TestMsrMrs(uint32_t nzcv,
@@ -2924,7 +2919,7 @@ TEST(msr_mrs) {
OFStream os(stdout);
code->Print(os);
#endif
- F5 f = FUNCTION_CAST<F5>(code->entry());
+ F_ippii f = FUNCTION_CAST<F_ippii>(code->entry());
Object* dummy = nullptr;
USE(dummy);
@@ -2978,7 +2973,7 @@ TEST(ARMv8_float32_vrintX) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the floats.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
@@ -3025,7 +3020,7 @@ TEST(ARMv8_float32_vrintX) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
Object* dummy = nullptr;
USE(dummy);
@@ -3083,7 +3078,7 @@ TEST(ARMv8_vrintX) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
@@ -3130,7 +3125,7 @@ TEST(ARMv8_vrintX) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
Object* dummy = nullptr;
USE(dummy);
@@ -3175,7 +3170,7 @@ TEST(ARMv8_vsel) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
// Used to indicate whether a condition passed or failed.
static constexpr float kResultPass = 1.0f;
@@ -3270,7 +3265,7 @@ TEST(ARMv8_vsel) {
OFStream os(stdout);
code->Print(os);
#endif
- F5 f = FUNCTION_CAST<F5>(code->entry());
+ F_ippii f = FUNCTION_CAST<F_ippii>(code->entry());
Object* dummy = nullptr;
USE(dummy);
@@ -3328,7 +3323,7 @@ TEST(ARMv8_vminmax_f64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
struct Inputs {
double left_;
@@ -3364,7 +3359,7 @@ TEST(ARMv8_vminmax_f64) {
OFStream os(stdout);
code->Print(os);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ F_ppiii f = FUNCTION_CAST<F_ppiii>(code->entry());
Object* dummy = nullptr;
USE(dummy);
@@ -3410,7 +3405,7 @@ TEST(ARMv8_vminmax_f32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
struct Inputs {
float left_;
@@ -3446,7 +3441,7 @@ TEST(ARMv8_vminmax_f32) {
OFStream os(stdout);
code->Print(os);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ F_ppiii f = FUNCTION_CAST<F_ppiii>(code->entry());
Object* dummy = nullptr;
USE(dummy);
@@ -3487,7 +3482,7 @@ TEST(ARMv8_vminmax_f32) {
}
template <typename T, typename Inputs, typename Results>
-static F4 GenerateMacroFloatMinMax(MacroAssembler& assm) {
+static F_ppiii GenerateMacroFloatMinMax(MacroAssembler& assm) {
T a = T::from_code(0); // d0/s0
T b = T::from_code(1); // d1/s1
T c = T::from_code(2); // d2/s2
@@ -3578,7 +3573,7 @@ static F4 GenerateMacroFloatMinMax(MacroAssembler& assm) {
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<F4>(code->entry());
+ return FUNCTION_CAST<F_ppiii>(code->entry());
}
TEST(macro_float_minmax_f64) {
@@ -3587,7 +3582,7 @@ TEST(macro_float_minmax_f64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0, CodeObjectRequired::kYes);
struct Inputs {
double left_;
@@ -3605,7 +3600,7 @@ TEST(macro_float_minmax_f64) {
double max_aba_;
};
- F4 f = GenerateMacroFloatMinMax<DwVfpRegister, Inputs, Results>(assm);
+ F_ppiii f = GenerateMacroFloatMinMax<DwVfpRegister, Inputs, Results>(assm);
Object* dummy = nullptr;
USE(dummy);
@@ -3655,7 +3650,7 @@ TEST(macro_float_minmax_f32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0, CodeObjectRequired::kYes);
struct Inputs {
float left_;
@@ -3673,7 +3668,7 @@ TEST(macro_float_minmax_f32) {
float max_aba_;
};
- F4 f = GenerateMacroFloatMinMax<SwVfpRegister, Inputs, Results>(assm);
+ F_ppiii f = GenerateMacroFloatMinMax<SwVfpRegister, Inputs, Results>(assm);
Object* dummy = nullptr;
USE(dummy);
@@ -3729,7 +3724,7 @@ TEST(unaligned_loads) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ ldrh(ip, MemOperand(r1, r2));
__ str(ip, MemOperand(r0, offsetof(T, ldrh)));
__ ldrsh(ip, MemOperand(r1, r2));
@@ -3746,7 +3741,7 @@ TEST(unaligned_loads) {
OFStream os(stdout);
code->Print(os);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ F_ppiii f = FUNCTION_CAST<F_ppiii>(code->entry());
Object* dummy = nullptr;
USE(dummy);
@@ -3779,7 +3774,7 @@ TEST(unaligned_stores) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ strh(r3, MemOperand(r0, r2));
__ str(r3, MemOperand(r1, r2));
__ bx(lr);
@@ -3792,7 +3787,7 @@ TEST(unaligned_stores) {
OFStream os(stdout);
code->Print(os);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ F_ppiii f = FUNCTION_CAST<F_ppiii>(code->entry());
Object* dummy = nullptr;
USE(dummy);
@@ -3836,7 +3831,7 @@ TEST(vswp) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
typedef struct {
uint64_t vswp_d0;
@@ -3895,7 +3890,7 @@ TEST(vswp) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(minus_one, t.vswp_d0);
@@ -3919,7 +3914,7 @@ TEST(regress4292_b) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
@@ -3934,7 +3929,7 @@ TEST(regress4292_bl) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
@@ -3949,7 +3944,7 @@ TEST(regress4292_blx) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
@@ -3964,7 +3959,7 @@ TEST(regress4292_CheckConstPool) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ mov(r0, Operand(isolate->factory()->infinity_value()));
__ BlockConstPoolFor(1019);
for (int i = 0; i < 1019; ++i) __ nop();
@@ -3976,7 +3971,7 @@ TEST(use_scratch_register_scope) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
// The assembler should have ip as a scratch by default.
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
@@ -3993,6 +3988,179 @@ TEST(use_scratch_register_scope) {
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
}
+TEST(split_add_immediate) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ {
+ Assembler assm(isolate, nullptr, 0);
+ __ mov(r1, r0);
+ // Re-use the destination as a scratch.
+ __ add(r0, r1, Operand(0x12345678));
+ __ blx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
+ uint32_t res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ ::printf("f() = 0x%x\n", res);
+ CHECK_EQ(0x12345678, res);
+ }
+
+ {
+ Assembler assm(isolate, nullptr, 0);
+ // Use ip as a scratch.
+ __ add(r0, r0, Operand(0x12345678));
+ __ blx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
+ uint32_t res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ ::printf("f() = 0x%x\n", res);
+ CHECK_EQ(0x12345678, res);
+ }
+
+ {
+ Assembler assm(isolate, nullptr, 0);
+ UseScratchRegisterScope temps(&assm);
+ Register reserved = temps.Acquire();
+ USE(reserved);
+ // If ip is not available, split the operation into multiple additions.
+ __ add(r0, r0, Operand(0x12345678));
+ __ blx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
+ uint32_t res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ ::printf("f() = 0x%x\n", res);
+ CHECK_EQ(0x12345678, res);
+ }
+}
+
+namespace {
+
+std::vector<Float32> Float32Inputs() {
+ std::vector<Float32> inputs;
+ FOR_FLOAT32_INPUTS(f) {
+ inputs.push_back(Float32::FromBits(bit_cast<uint32_t>(*f)));
+ }
+ FOR_UINT32_INPUTS(bits) { inputs.push_back(Float32::FromBits(*bits)); }
+ return inputs;
+}
+
+std::vector<Float64> Float64Inputs() {
+ std::vector<Float64> inputs;
+ FOR_FLOAT64_INPUTS(f) {
+ inputs.push_back(Float64::FromBits(bit_cast<uint64_t>(*f)));
+ }
+ FOR_UINT64_INPUTS(bits) { inputs.push_back(Float64::FromBits(*bits)); }
+ return inputs;
+}
+
+} // namespace
+
+TEST(vabs_32) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(AssembleCode([](Assembler& assm) {
+ __ vmov(s0, r0);
+ __ vabs(s0, s0);
+ __ vmov(r0, s0);
+ }));
+
+ for (Float32 f32 : Float32Inputs()) {
+ Float32 res = Float32::FromBits(reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, f32.get_bits(), 0, 0, 0, 0)));
+ Float32 exp = Float32::FromBits(f32.get_bits() & ~(1 << 31));
+ CHECK_EQ(exp.get_bits(), res.get_bits());
+ }
+}
+
+TEST(vabs_64) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(AssembleCode([](Assembler& assm) {
+ __ vmov(d0, r0, r1);
+ __ vabs(d0, d0);
+ __ vmov(r1, r0, d0);
+ }));
+
+ for (Float64 f64 : Float64Inputs()) {
+ uint32_t p0 = static_cast<uint32_t>(f64.get_bits());
+ uint32_t p1 = static_cast<uint32_t>(f64.get_bits() >> 32);
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, p0, p1, 0, 0, 0));
+ Float64 exp = Float64::FromBits(f64.get_bits() & ~(1ull << 63));
+ // We just get back the top word, so only compare that one.
+ CHECK_EQ(exp.get_bits() >> 32, res);
+ }
+}
+
+TEST(vneg_32) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(AssembleCode([](Assembler& assm) {
+ __ vmov(s0, r0);
+ __ vneg(s0, s0);
+ __ vmov(r0, s0);
+ }));
+
+ for (Float32 f32 : Float32Inputs()) {
+ Float32 res = Float32::FromBits(reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, f32.get_bits(), 0, 0, 0, 0)));
+ Float32 exp = Float32::FromBits(f32.get_bits() ^ (1 << 31));
+ CHECK_EQ(exp.get_bits(), res.get_bits());
+ }
+}
+
+TEST(vneg_64) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ F_iiiii f = FUNCTION_CAST<F_iiiii>(AssembleCode([](Assembler& assm) {
+ __ vmov(d0, r0, r1);
+ __ vneg(d0, d0);
+ __ vmov(r1, r0, d0);
+ }));
+
+ for (Float64 f64 : Float64Inputs()) {
+ uint32_t p0 = static_cast<uint32_t>(f64.get_bits());
+ uint32_t p1 = static_cast<uint32_t>(f64.get_bits() >> 32);
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, p0, p1, 0, 0, 0));
+ Float64 exp = Float64::FromBits(f64.get_bits() ^ (1ull << 63));
+ // We just get back the top word, so only compare that one.
+ CHECK_EQ(exp.get_bits() >> 32, res);
+ }
+}
+
#undef __
} // namespace test_assembler_arm
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 9ebe524a6f..62f7ccf2c6 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -122,14 +122,14 @@ static void InitializeVM() {
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
- CHECK(isolate != NULL); \
+ CHECK_NOT_NULL(isolate); \
byte* buf = new byte[buf_size]; \
MacroAssembler masm(isolate, buf, buf_size, \
v8::internal::CodeObjectRequired::kYes); \
Decoder<DispatchingDecoderVisitor>* decoder = \
new Decoder<DispatchingDecoderVisitor>(); \
Simulator simulator(decoder); \
- PrintDisassembler* pdis = NULL; \
+ PrintDisassembler* pdis = nullptr; \
RegisterDump core;
/* if (Cctest::trace_sim()) { \
@@ -166,7 +166,7 @@ static void InitializeVM() {
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
- __ GetCode(masm.isolate(), NULL);
+ __ GetCode(masm.isolate(), nullptr);
#define TEARDOWN() \
delete pdis; \
@@ -174,15 +174,14 @@ static void InitializeVM() {
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
-#define SETUP_SIZE(buf_size) \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- CHECK(isolate != NULL); \
- size_t actual_size; \
- byte* buf = static_cast<byte*>( \
- v8::base::OS::Allocate(buf_size, &actual_size, true)); \
- MacroAssembler masm(isolate, buf, static_cast<unsigned>(actual_size), \
- v8::internal::CodeObjectRequired::kYes); \
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ CHECK_NOT_NULL(isolate); \
+ size_t allocated; \
+ byte* buf = AllocateAssemblerBuffer(&allocated, buf_size); \
+ MacroAssembler masm(isolate, buf, static_cast<int>(allocated), \
+ v8::internal::CodeObjectRequired::kYes); \
RegisterDump core;
#define RESET() \
@@ -212,10 +211,9 @@ static void InitializeVM() {
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
- __ GetCode(masm.isolate(), NULL);
+ __ GetCode(masm.isolate(), nullptr);
-#define TEARDOWN() \
- v8::base::OS::Free(buf, actual_size);
+#define TEARDOWN() CHECK(v8::base::OS::Free(buf, allocated));
#endif // ifdef USE_SIMULATOR.
@@ -10570,8 +10568,8 @@ TEST(fcvt_sd) {
float expected = test[i].expected;
// We only expect positive input.
- CHECK(std::signbit(in) == 0);
- CHECK(std::signbit(expected) == 0);
+ CHECK_EQ(std::signbit(in), 0);
+ CHECK_EQ(std::signbit(expected), 0);
SETUP();
START();
@@ -12368,7 +12366,7 @@ static void PushPopJsspSimpleHelper(int reg_count,
// Work out which registers to use, based on reg_size.
auto r = CreateRegisterArray<Register, kNumberOfRegisters>();
auto x = CreateRegisterArray<Register, kNumberOfRegisters>();
- RegList list = PopulateRegisterArray(NULL, x.data(), r.data(), reg_size,
+ RegList list = PopulateRegisterArray(nullptr, x.data(), r.data(), reg_size,
reg_count, allowed);
// The literal base is chosen to have two useful properties:
@@ -12409,7 +12407,7 @@ static void PushPopJsspSimpleHelper(int reg_count,
case 2: __ Push(r[1], r[0]); break;
case 1: __ Push(r[0]); break;
default:
- CHECK(i == 0);
+ CHECK_EQ(i, 0);
break;
}
break;
@@ -12552,7 +12550,7 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
// Work out which registers to use, based on reg_size.
auto v = CreateRegisterArray<VRegister, kNumberOfRegisters>();
auto d = CreateRegisterArray<VRegister, kNumberOfRegisters>();
- RegList list = PopulateVRegisterArray(NULL, d.data(), v.data(), reg_size,
+ RegList list = PopulateVRegisterArray(nullptr, d.data(), v.data(), reg_size,
reg_count, allowed);
// The literal base is chosen to have two useful properties:
@@ -12597,7 +12595,7 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
case 2: __ Push(v[1], v[0]); break;
case 1: __ Push(v[0]); break;
default:
- CHECK(i == 0);
+ CHECK_EQ(i, 0);
break;
}
break;
@@ -12721,7 +12719,7 @@ static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
// Work out which registers to use, based on reg_size.
auto r = CreateRegisterArray<Register, 10>();
auto x = CreateRegisterArray<Register, 10>();
- PopulateRegisterArray(NULL, x.data(), r.data(), reg_size, 10, allowed);
+ PopulateRegisterArray(nullptr, x.data(), r.data(), reg_size, 10, allowed);
// Calculate some handy register lists.
RegList r0_to_r3 = 0;
@@ -12827,7 +12825,7 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
auto w = CreateRegisterArray<Register, kNumberOfRegisters>();
auto x = CreateRegisterArray<Register, kNumberOfRegisters>();
RegList list =
- PopulateRegisterArray(w.data(), x.data(), NULL, 0, reg_count, allowed);
+ PopulateRegisterArray(w.data(), x.data(), nullptr, 0, reg_count, allowed);
// The number of W-sized slots we expect to pop. When we pop, we alternate
// between W and X registers, so we need reg_count*1.5 W-sized slots.
@@ -12965,7 +12963,7 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
}
next_is_64 = !next_is_64;
}
- CHECK(active_w_slots == 0);
+ CHECK_EQ(active_w_slots, 0);
// Drop memory to restore jssp.
__ Drop(claim, kByteSizeInBytes);
@@ -13263,6 +13261,346 @@ TEST(pop_queued) {
TEARDOWN();
}
+TEST(copy_slots_down) {
+ INIT_V8();
+ SETUP();
+
+ const uint64_t ones = 0x1111111111111111UL;
+ const uint64_t twos = 0x2222222222222222UL;
+ const uint64_t threes = 0x3333333333333333UL;
+ const uint64_t fours = 0x4444444444444444UL;
+
+ START();
+
+ // Test copying 12 slots down one slot.
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ __ Mov(x1, ones);
+ __ Mov(x2, twos);
+ __ Mov(x3, threes);
+ __ Mov(x4, fours);
+
+ __ Push(x1, x2, x3, x4);
+ __ Push(x1, x2, x1, x2);
+ __ Push(x3, x4, x3, x4);
+ __ Push(xzr);
+
+ __ Mov(x5, 0);
+ __ Mov(x6, 1);
+ __ Mov(x7, 12);
+ __ CopySlots(x5, x6, x7);
+
+ __ Pop(x4, x5, x6, x7);
+ __ Pop(x8, x9, x10, x11);
+ __ Pop(x12, x13, x14, x15);
+ __ Drop(1);
+
+ // Test copying one slot down one slot.
+ __ Push(x1, xzr, xzr);
+
+ __ Mov(x1, 1);
+ __ Mov(x2, 2);
+ __ Mov(x3, 1);
+ __ CopySlots(x1, x2, x3);
+
+ __ Drop(1);
+ __ Pop(x0);
+ __ Drop(1);
+
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ CHECK_EQUAL_64(fours, x4);
+ CHECK_EQUAL_64(threes, x5);
+ CHECK_EQUAL_64(fours, x6);
+ CHECK_EQUAL_64(threes, x7);
+
+ CHECK_EQUAL_64(twos, x8);
+ CHECK_EQUAL_64(ones, x9);
+ CHECK_EQUAL_64(twos, x10);
+ CHECK_EQUAL_64(ones, x11);
+
+ CHECK_EQUAL_64(fours, x12);
+ CHECK_EQUAL_64(threes, x13);
+ CHECK_EQUAL_64(twos, x14);
+ CHECK_EQUAL_64(ones, x15);
+
+ CHECK_EQUAL_64(ones, x0);
+
+ TEARDOWN();
+}
+
+TEST(copy_slots_up) {
+ INIT_V8();
+ SETUP();
+
+ const uint64_t ones = 0x1111111111111111UL;
+ const uint64_t twos = 0x2222222222222222UL;
+ const uint64_t threes = 0x3333333333333333UL;
+
+ START();
+
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ __ Mov(x1, ones);
+ __ Mov(x2, twos);
+ __ Mov(x3, threes);
+
+ // Test copying one slot to the next slot higher in memory.
+ __ Push(xzr, x1);
+
+ __ Mov(x5, 1);
+ __ Mov(x6, 0);
+ __ Mov(x7, 1);
+ __ CopySlots(x5, x6, x7);
+
+ __ Drop(1);
+ __ Pop(x10);
+
+ // Test copying two slots to the next two slots higher in memory.
+ __ Push(xzr, xzr);
+ __ Push(x1, x2);
+
+ __ Mov(x5, 2);
+ __ Mov(x6, 0);
+ __ Mov(x7, 2);
+ __ CopySlots(x5, x6, x7);
+
+ __ Drop(2);
+ __ Pop(x11, x12);
+
+ // Test copying three slots to the next three slots higher in memory.
+ __ Push(xzr, xzr, xzr);
+ __ Push(x1, x2, x3);
+
+ __ Mov(x5, 3);
+ __ Mov(x6, 0);
+ __ Mov(x7, 3);
+ __ CopySlots(x5, x6, x7);
+
+ __ Drop(3);
+ __ Pop(x0, x1, x2);
+
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ CHECK_EQUAL_64(ones, x10);
+ CHECK_EQUAL_64(twos, x11);
+ CHECK_EQUAL_64(ones, x12);
+ CHECK_EQUAL_64(threes, x0);
+ CHECK_EQUAL_64(twos, x1);
+ CHECK_EQUAL_64(ones, x2);
+
+ TEARDOWN();
+}
+
+TEST(copy_double_words_downwards_even) {
+ INIT_V8();
+ SETUP();
+
+ const uint64_t ones = 0x1111111111111111UL;
+ const uint64_t twos = 0x2222222222222222UL;
+ const uint64_t threes = 0x3333333333333333UL;
+ const uint64_t fours = 0x4444444444444444UL;
+
+ START();
+
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Test copying 12 slots up one slot.
+ __ Mov(x1, ones);
+ __ Mov(x2, twos);
+ __ Mov(x3, threes);
+ __ Mov(x4, fours);
+
+ __ Push(xzr);
+ __ Push(x1, x2, x3, x4);
+ __ Push(x1, x2, x1, x2);
+ __ Push(x3, x4, x3, x4);
+
+ __ SlotAddress(x5, 12);
+ __ SlotAddress(x6, 11);
+ __ Mov(x7, 12);
+ __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst);
+
+ __ Drop(1);
+ __ Pop(x4, x5, x6, x7);
+ __ Pop(x8, x9, x10, x11);
+ __ Pop(x12, x13, x14, x15);
+
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ CHECK_EQUAL_64(ones, x15);
+ CHECK_EQUAL_64(twos, x14);
+ CHECK_EQUAL_64(threes, x13);
+ CHECK_EQUAL_64(fours, x12);
+
+ CHECK_EQUAL_64(ones, x11);
+ CHECK_EQUAL_64(twos, x10);
+ CHECK_EQUAL_64(ones, x9);
+ CHECK_EQUAL_64(twos, x8);
+
+ CHECK_EQUAL_64(threes, x7);
+ CHECK_EQUAL_64(fours, x6);
+ CHECK_EQUAL_64(threes, x5);
+ CHECK_EQUAL_64(fours, x4);
+
+ TEARDOWN();
+}
+
+TEST(copy_double_words_downwards_odd) {
+ INIT_V8();
+ SETUP();
+
+ const uint64_t ones = 0x1111111111111111UL;
+ const uint64_t twos = 0x2222222222222222UL;
+ const uint64_t threes = 0x3333333333333333UL;
+ const uint64_t fours = 0x4444444444444444UL;
+ const uint64_t fives = 0x5555555555555555UL;
+
+ START();
+
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Test copying 13 slots up one slot.
+ __ Mov(x1, ones);
+ __ Mov(x2, twos);
+ __ Mov(x3, threes);
+ __ Mov(x4, fours);
+ __ Mov(x5, fives);
+
+ __ Push(xzr, x5);
+ __ Push(x1, x2, x3, x4);
+ __ Push(x1, x2, x1, x2);
+ __ Push(x3, x4, x3, x4);
+
+ __ SlotAddress(x5, 13);
+ __ SlotAddress(x6, 12);
+ __ Mov(x7, 13);
+ __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst);
+
+ __ Drop(1);
+ __ Pop(x4);
+ __ Pop(x5, x6, x7, x8);
+ __ Pop(x9, x10, x11, x12);
+ __ Pop(x13, x14, x15, x16);
+
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ CHECK_EQUAL_64(fives, x16);
+
+ CHECK_EQUAL_64(ones, x15);
+ CHECK_EQUAL_64(twos, x14);
+ CHECK_EQUAL_64(threes, x13);
+ CHECK_EQUAL_64(fours, x12);
+
+ CHECK_EQUAL_64(ones, x11);
+ CHECK_EQUAL_64(twos, x10);
+ CHECK_EQUAL_64(ones, x9);
+ CHECK_EQUAL_64(twos, x8);
+
+ CHECK_EQUAL_64(threes, x7);
+ CHECK_EQUAL_64(fours, x6);
+ CHECK_EQUAL_64(threes, x5);
+ CHECK_EQUAL_64(fours, x4);
+
+ TEARDOWN();
+}
+
+TEST(copy_noop) {
+ INIT_V8();
+ SETUP();
+
+ const uint64_t ones = 0x1111111111111111UL;
+ const uint64_t twos = 0x2222222222222222UL;
+ const uint64_t threes = 0x3333333333333333UL;
+ const uint64_t fours = 0x4444444444444444UL;
+ const uint64_t fives = 0x5555555555555555UL;
+
+ START();
+
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ __ Mov(x1, ones);
+ __ Mov(x2, twos);
+ __ Mov(x3, threes);
+ __ Mov(x4, fours);
+ __ Mov(x5, fives);
+
+ __ Push(xzr, x5, x5, xzr);
+ __ Push(x3, x4, x3, x4);
+ __ Push(x1, x2, x1, x2);
+ __ Push(x1, x2, x3, x4);
+
+ // src < dst, count == 0
+ __ SlotAddress(x5, 3);
+ __ SlotAddress(x6, 2);
+ __ Mov(x7, 0);
+ __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst);
+
+ // dst < src, count == 0
+ __ SlotAddress(x5, 2);
+ __ SlotAddress(x6, 3);
+ __ Mov(x7, 0);
+ __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kDstLessThanSrc);
+
+ __ Pop(x1, x2, x3, x4);
+ __ Pop(x5, x6, x7, x8);
+ __ Pop(x9, x10, x11, x12);
+ __ Pop(x13, x14, x15, x16);
+
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ CHECK_EQUAL_64(fours, x1);
+ CHECK_EQUAL_64(threes, x2);
+ CHECK_EQUAL_64(twos, x3);
+ CHECK_EQUAL_64(ones, x4);
+
+ CHECK_EQUAL_64(twos, x5);
+ CHECK_EQUAL_64(ones, x6);
+ CHECK_EQUAL_64(twos, x7);
+ CHECK_EQUAL_64(ones, x8);
+
+ CHECK_EQUAL_64(fours, x9);
+ CHECK_EQUAL_64(threes, x10);
+ CHECK_EQUAL_64(fours, x11);
+ CHECK_EQUAL_64(threes, x12);
+
+ CHECK_EQUAL_64(0, x13);
+ CHECK_EQUAL_64(fives, x14);
+ CHECK_EQUAL_64(fives, x15);
+ CHECK_EQUAL_64(0, x16);
+
+ TEARDOWN();
+}
TEST(jump_both_smi) {
INIT_V8();
@@ -15183,7 +15521,7 @@ static void AbsHelperX(int64_t value) {
__ Abs(x11, x1, &fail);
__ Abs(x12, x1, &fail, &next);
__ Bind(&next);
- __ Abs(x13, x1, NULL, &done);
+ __ Abs(x13, x1, nullptr, &done);
} else {
// labs is undefined for kXMinInt but our implementation in the
// MacroAssembler will return kXMinInt in such a case.
@@ -15192,7 +15530,7 @@ static void AbsHelperX(int64_t value) {
Label next;
// The result is not representable.
__ Abs(x10, x1);
- __ Abs(x11, x1, NULL, &fail);
+ __ Abs(x11, x1, nullptr, &fail);
__ Abs(x12, x1, &next, &fail);
__ Bind(&next);
__ Abs(x13, x1, &done);
@@ -15240,7 +15578,7 @@ static void AbsHelperW(int32_t value) {
__ Abs(w11, w1, &fail);
__ Abs(w12, w1, &fail, &next);
__ Bind(&next);
- __ Abs(w13, w1, NULL, &done);
+ __ Abs(w13, w1, nullptr, &done);
} else {
// abs is undefined for kWMinInt but our implementation in the
// MacroAssembler will return kWMinInt in such a case.
@@ -15249,7 +15587,7 @@ static void AbsHelperW(int32_t value) {
Label next;
// The result is not representable.
__ Abs(w10, w1);
- __ Abs(w11, w1, NULL, &fail);
+ __ Abs(w11, w1, nullptr, &fail);
__ Abs(w12, w1, &next, &fail);
__ Bind(&next);
__ Abs(w13, w1, &done);
@@ -15336,7 +15674,7 @@ TEST(pool_size) {
}
}
- CHECK(pool_count == 2);
+ CHECK_EQ(pool_count, 2);
TEARDOWN();
}
@@ -15513,3 +15851,23 @@ TEST(internal_reference_linked) {
} // namespace internal
} // namespace v8
+
+#undef __
+#undef BUF_SIZE
+#undef SETUP
+#undef INIT_V8
+#undef SETUP_SIZE
+#undef RESET
+#undef START_AFTER_RESET
+#undef START
+#undef RUN
+#undef END
+#undef TEARDOWN
+#undef CHECK_EQUAL_NZCV
+#undef CHECK_EQUAL_REGISTERS
+#undef CHECK_EQUAL_32
+#undef CHECK_EQUAL_FP32
+#undef CHECK_EQUAL_64
+#undef CHECK_EQUAL_FP64
+#undef CHECK_EQUAL_128
+#undef CHECK_CONSTANT_POOL_SIZE
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index ab4a72f790..e39489b93d 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -136,7 +136,7 @@ TEST(AssemblerIa322) {
// some relocated stuff here, not executed
__ mov(eax, isolate->factory()->true_value());
- __ jmp(NULL, RelocInfo::RUNTIME_ENTRY);
+ __ jmp(nullptr, RelocInfo::RUNTIME_ENTRY);
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -308,7 +308,7 @@ TEST(AssemblerIa3210) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label target;
__ j(equal, &target);
@@ -442,7 +442,7 @@ TEST(StackAlignmentForSSE2) {
global_template->Set(v8_str("do_sse2"),
v8::FunctionTemplate::New(isolate, DoSSE2));
- LocalContext env(NULL, global_template);
+ LocalContext env(nullptr, global_template);
CompileRun(
"function foo(vec) {"
" return do_sse2(vec);"
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index e191b1eb63..79a80c3a43 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -55,7 +55,8 @@ TEST(MIPS0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
// Addition.
__ addu(v0, a0, a1);
@@ -78,7 +79,8 @@ TEST(MIPS1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ mov(a1, a0);
@@ -114,7 +116,8 @@ TEST(MIPS2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label exit, error;
@@ -275,7 +278,8 @@ TEST(MIPS3) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Double precision floating point instructions.
@@ -404,7 +408,8 @@ TEST(MIPS4) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -473,7 +478,8 @@ TEST(MIPS5) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Load all structure elements to registers.
@@ -542,7 +548,7 @@ TEST(MIPS6) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
// Basic word load/store.
@@ -623,7 +629,8 @@ TEST(MIPS7) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label neither_is_nan, less_than, outa_here;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -714,7 +721,7 @@ TEST(MIPS8) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
// Basic word load.
@@ -799,7 +806,8 @@ TEST(MIPS9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label exit, exit2, exit3;
__ Branch(&exit, ge, a0, Operand(zero_reg));
@@ -834,7 +842,8 @@ TEST(MIPS10) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return;
@@ -909,7 +918,7 @@ TEST(MIPS11) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
// Test all combinations of LWL and vAddr.
__ lw(t0, MemOperand(a0, offsetof(T, reg_init)) );
@@ -1062,7 +1071,8 @@ TEST(MIPS12) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ mov(t6, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
@@ -1151,7 +1161,8 @@ TEST(MIPS13) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ sw(t0, MemOperand(a0, offsetof(T, cvt_small_in)));
__ Cvt_d_uw(f10, t0, f4);
@@ -1229,7 +1240,8 @@ TEST(MIPS14) {
#undef ROUND_STRUCT_ELEMENT
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
// Save FCSR.
__ cfc1(a1, FCSR);
@@ -1335,7 +1347,7 @@ TEST(MIPS15) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label target;
__ beq(v0, v1, &target);
@@ -1353,7 +1365,7 @@ TEST(seleqz_selnez) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test {
@@ -1540,7 +1552,7 @@ TEST(rint_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -1646,7 +1658,7 @@ TEST(sel) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test {
@@ -1721,7 +1733,7 @@ TEST(rint_s) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -1826,7 +1838,7 @@ TEST(Cvt_d_uw) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_struct {
@@ -1968,7 +1980,7 @@ TEST(trunc_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
@@ -2043,7 +2055,7 @@ TEST(movz_movn) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -2173,7 +2185,7 @@ TEST(movt_movd) {
test.fcsr = 1 << (24+condition_flags[j]);
}
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
__ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)));
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) );
@@ -2227,7 +2239,8 @@ TEST(cvt_w_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2304,7 +2317,8 @@ TEST(trunc_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2374,7 +2388,8 @@ TEST(round_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2444,7 +2459,7 @@ TEST(round_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
@@ -2518,7 +2533,8 @@ TEST(sub) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2591,7 +2607,8 @@ TEST(sqrt_rsqrt_recip) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2691,7 +2708,8 @@ TEST(neg) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2744,7 +2762,8 @@ TEST(mul) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2803,7 +2822,8 @@ TEST(mov) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2857,7 +2877,8 @@ TEST(floor_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2928,7 +2949,7 @@ TEST(floor_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
@@ -3001,7 +3022,8 @@ TEST(ceil_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3072,7 +3094,7 @@ TEST(ceil_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
@@ -3375,7 +3397,7 @@ TEST(BITSWAP) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ lw(a2, MemOperand(a0, offsetof(T, r1)));
__ nop();
@@ -3438,7 +3460,7 @@ TEST(class_fmt) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
__ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
@@ -3587,7 +3609,8 @@ TEST(ABS) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t fir;
@@ -3684,7 +3707,8 @@ TEST(ADD_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -3754,7 +3778,7 @@ TEST(C_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -3966,7 +3990,7 @@ TEST(CMP_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -4183,7 +4207,8 @@ TEST(CVT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float cvt_d_s_in;
@@ -4428,7 +4453,8 @@ TEST(DIV_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dOp1;
@@ -4550,7 +4576,8 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ align(v0, a0, a1, bp);
__ jr(ra);
@@ -4603,7 +4630,8 @@ uint32_t run_aluipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ aluipc(v0, offset);
__ jr(ra);
@@ -4657,7 +4685,8 @@ uint32_t run_auipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ auipc(v0, offset);
__ jr(ra);
@@ -4711,7 +4740,8 @@ uint32_t run_lwpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t7, t0, 0xffff; (0x250fffff)
@@ -4787,7 +4817,8 @@ uint32_t run_jic(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label get_program_counter, stop_execution;
__ push(ra);
@@ -4868,7 +4899,8 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label stop_execution;
__ li(v0, 0);
@@ -4936,12 +4968,196 @@ TEST(r6_beqzc) {
}
}
+void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
+ MSARegister w, Register t0, Register t1) {
+ __ li(t0, static_cast<uint32_t>(elements[0] & 0xffffffff));
+ __ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xffffffff));
+ __ insert_w(w, 0, t0);
+ __ insert_w(w, 1, t1);
+ __ li(t0, static_cast<uint32_t>(elements[1] & 0xffffffff));
+ __ li(t1, static_cast<uint32_t>((elements[1] >> 32) & 0xffffffff));
+ __ insert_w(w, 2, t0);
+ __ insert_w(w, 3, t1);
+}
+
+inline void store_elements_of_vector(MacroAssembler& assm, MSARegister w,
+ Register a) {
+ __ st_d(w, MemOperand(a, 0));
+}
-uint32_t run_jialc(int16_t offset) {
+typedef union {
+ uint8_t b[16];
+ uint16_t h[8];
+ uint32_t w[4];
+ uint64_t d[2];
+} msa_reg_t;
+
+struct TestCaseMsaBranch {
+ uint64_t wt_lo;
+ uint64_t wt_hi;
+};
+
+template <typename Branch>
+void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
+ bool branched) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+
+ typedef struct {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+ } T;
+ T t = {0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x0000000000000000,
+ 0x0000000000000000};
+ msa_reg_t res;
+ Label do_not_move_w0_to_w2;
+
+ load_elements_of_vector(assm, &t.ws_lo, w0, t0, t1);
+ load_elements_of_vector(assm, &t.wd_lo, w2, t0, t1);
+ load_elements_of_vector(assm, &input->wt_lo, w1, t0, t1);
+ GenerateBranch(assm, do_not_move_w0_to_w2);
+ __ nop();
+ __ move_v(w2, w0);
+
+ __ bind(&do_not_move_w0_to_w2);
+ store_elements_of_vector(assm, w2, a0);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ if (branched) {
+ CHECK_EQ(t.wd_lo, res.d[0]);
+ CHECK_EQ(t.wd_hi, res.d[1]);
+ } else {
+ CHECK_EQ(t.ws_lo, res.d[0]);
+ CHECK_EQ(t.ws_hi, res.d[1]);
+ }
+}
+
+TEST(MSA_bz_bnz) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ TestCaseMsaBranch tz_v[] = {
+ {0x0, 0x0}, {0xabc, 0x0}, {0x0, 0xabc}, {0xabc, 0xabc}};
+ for (unsigned i = 0; i < arraysize(tz_v); ++i) {
+ run_bz_bnz(
+ &tz_v[i],
+ [](MacroAssembler& assm, Label& br_target) { __ bz_v(w1, &br_target); },
+ tz_v[i].wt_lo == 0 && tz_v[i].wt_hi == 0);
+ }
+
+#define TEST_BZ_DF(input_array, lanes, instruction, int_type) \
+ for (unsigned i = 0; i < arraysize(input_array); ++i) { \
+ int j; \
+ int_type* element = reinterpret_cast<int_type*>(&input_array[i]); \
+ for (j = 0; j < lanes; ++j) { \
+ if (element[j] == 0) { \
+ break; \
+ } \
+ } \
+ run_bz_bnz(&input_array[i], \
+ [](MacroAssembler& assm, Label& br_target) { \
+ __ instruction(w1, &br_target); \
+ }, \
+ j != lanes); \
+ }
+ TestCaseMsaBranch tz_b[] = {{0x0, 0x0},
+ {0xbc0000, 0x0},
+ {0x0, 0xab000000000000cd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BZ_DF(tz_b, kMSALanesByte, bz_b, int8_t)
+
+ TestCaseMsaBranch tz_h[] = {{0x0, 0x0},
+ {0xbcde0000, 0x0},
+ {0x0, 0xabcd00000000abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BZ_DF(tz_h, kMSALanesHalf, bz_h, int16_t)
+
+ TestCaseMsaBranch tz_w[] = {{0x0, 0x0},
+ {0xbcde123400000000, 0x0},
+ {0x0, 0x000000001234abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BZ_DF(tz_w, kMSALanesWord, bz_w, int32_t)
+
+ TestCaseMsaBranch tz_d[] = {{0x0, 0x0},
+ {0xbcde0000, 0x0},
+ {0x0, 0xabcd00000000abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BZ_DF(tz_d, kMSALanesDword, bz_d, int64_t)
+#undef TEST_BZ_DF
+
+ TestCaseMsaBranch tnz_v[] = {
+ {0x0, 0x0}, {0xabc, 0x0}, {0x0, 0xabc}, {0xabc, 0xabc}};
+ for (unsigned i = 0; i < arraysize(tnz_v); ++i) {
+ run_bz_bnz(&tnz_v[i],
+ [](MacroAssembler& assm, Label& br_target) {
+ __ bnz_v(w1, &br_target);
+ },
+ tnz_v[i].wt_lo != 0 || tnz_v[i].wt_hi != 0);
+ }
+
+#define TEST_BNZ_DF(input_array, lanes, instruction, int_type) \
+ for (unsigned i = 0; i < arraysize(input_array); ++i) { \
+ int j; \
+ int_type* element = reinterpret_cast<int_type*>(&input_array[i]); \
+ for (j = 0; j < lanes; ++j) { \
+ if (element[j] == 0) { \
+ break; \
+ } \
+ } \
+ run_bz_bnz(&input_array[i], \
+ [](MacroAssembler& assm, Label& br_target) { \
+ __ instruction(w1, &br_target); \
+ }, \
+ j == lanes); \
+ }
+ TestCaseMsaBranch tnz_b[] = {{0x0, 0x0},
+ {0xbc0000, 0x0},
+ {0x0, 0xab000000000000cd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BNZ_DF(tnz_b, 16, bnz_b, int8_t)
+
+ TestCaseMsaBranch tnz_h[] = {{0x0, 0x0},
+ {0xbcde0000, 0x0},
+ {0x0, 0xabcd00000000abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BNZ_DF(tnz_h, 8, bnz_h, int16_t)
+
+ TestCaseMsaBranch tnz_w[] = {{0x0, 0x0},
+ {0xbcde123400000000, 0x0},
+ {0x0, 0x000000001234abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BNZ_DF(tnz_w, 4, bnz_w, int32_t)
+
+ TestCaseMsaBranch tnz_d[] = {{0x0, 0x0},
+ {0xbcde0000, 0x0},
+ {0x0, 0xabcd00000000abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BNZ_DF(tnz_d, 2, bnz_d, int64_t)
+#undef TEST_BNZ_DF
+}
+
+uint32_t run_jialc(int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label main_block, get_program_counter;
__ push(ra);
@@ -5032,7 +5248,8 @@ static uint32_t run_addiupc(int32_t imm19) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ addiupc(v0, imm19);
__ jr(ra);
@@ -5086,7 +5303,8 @@ int32_t run_bc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5166,7 +5384,8 @@ int32_t run_balc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5221,7 +5440,8 @@ uint32_t run_aui(uint32_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(t0, rs);
__ aui(v0, t0, offset);
@@ -5305,7 +5525,8 @@ uint32_t run_bal(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ mov(t0, ra);
__ bal(offset); // Equivalent for "BGEZAL zero_reg, offset".
@@ -5394,7 +5615,8 @@ void helper_madd_msub_maddf_msubf(F func) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
T x = std::sqrt(static_cast<T>(2.0));
T y = std::sqrt(static_cast<T>(3.0));
@@ -5516,7 +5738,8 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label code_start;
__ bind(&code_start);
@@ -5586,65 +5809,6 @@ TEST(Subu) {
}
}
-void load_uint64_elements_of_vector(MacroAssembler& assm,
- const uint64_t elements[], MSARegister w,
- Register t0, Register t1) {
- __ li(t0, static_cast<uint32_t>(elements[0] & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xffffffff));
- __ insert_w(w, 0, t0);
- __ insert_w(w, 1, t1);
- __ li(t0, static_cast<uint32_t>(elements[1] & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((elements[1] >> 32) & 0xffffffff));
- __ insert_w(w, 2, t0);
- __ insert_w(w, 3, t1);
-}
-
-void load_uint32_elements_of_vector(MacroAssembler& assm,
- const uint64_t elements[], MSARegister w,
- Register t0, Register t1) {
- const uint32_t* const element = reinterpret_cast<const uint32_t*>(elements);
- __ li(t0, element[0]);
- __ li(t1, element[1]);
- __ insert_w(w, 0, t0);
- __ insert_w(w, 1, t1);
- __ li(t0, element[2]);
- __ li(t1, element[3]);
- __ insert_w(w, 2, t0);
- __ insert_w(w, 3, t1);
-}
-
-void load_uint16_elements_of_vector(MacroAssembler& assm,
- const uint64_t elements[], MSARegister w,
- Register t0, Register t1) {
- const uint16_t* const element = reinterpret_cast<const uint16_t*>(elements);
- __ li(t0, element[0]);
- __ li(t1, element[1]);
- __ insert_h(w, 0, t0);
- __ insert_h(w, 1, t1);
- __ li(t0, element[2]);
- __ li(t1, element[3]);
- __ insert_h(w, 2, t0);
- __ insert_h(w, 3, t1);
- __ li(t0, element[4]);
- __ li(t1, element[5]);
- __ insert_h(w, 4, t0);
- __ insert_h(w, 5, t1);
- __ li(t0, element[6]);
- __ li(t1, element[7]);
- __ insert_h(w, 6, t0);
- __ insert_h(w, 7, t1);
-}
-
-inline void store_uint64_elements_of_vector(MacroAssembler& assm, MSARegister w,
- Register a, Register t) {
- __ st_d(w, MemOperand(a, 0));
-}
-
-inline void store_uint32_elements_of_vector(MacroAssembler& assm, MSARegister w,
- Register a, Register t) {
- __ st_w(w, MemOperand(a, 0));
-}
-
TEST(MSA_fill_copy) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -5660,7 +5824,8 @@ TEST(MSA_fill_copy) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -5725,7 +5890,8 @@ TEST(MSA_fill_copy_2) {
} T;
T t[2];
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -5794,7 +5960,8 @@ TEST(MSA_fill_copy_3) {
} T;
T t[2];
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -5833,19 +6000,13 @@ TEST(MSA_fill_copy_3) {
CHECK_EQ(0x5555555555555555, t[1].d0);
}
-typedef union {
- uint8_t b[16];
- uint16_t h[8];
- uint32_t w[4];
- uint64_t d[2];
-} msa_reg_t;
-
template <typename T>
void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
__ li(t0, -1);
@@ -5853,19 +6014,19 @@ void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
__ fill_w(w0, t0);
if (std::is_same<T, int8_t>::value) {
- DCHECK(n < 16);
+ DCHECK_LT(n, 16);
__ insert_b(w0, n, t1);
} else if (std::is_same<T, int16_t>::value) {
- DCHECK(n < 8);
+ DCHECK_LT(n, 8);
__ insert_h(w0, n, t1);
} else if (std::is_same<T, int32_t>::value) {
- DCHECK(n < 4);
+ DCHECK_LT(n, 4);
__ insert_w(w0, n, t1);
} else {
UNREACHABLE();
}
- store_uint64_elements_of_vector(assm, w0, a0, t2);
+ store_elements_of_vector(assm, w0, a0);
__ jr(ra);
__ nop();
@@ -5938,11 +6099,158 @@ TEST(MSA_insert) {
}
}
+TEST(MSA_move_v) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+ } T;
+ T t[] = {{0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x1e86678b52f8e1ff,
+ 0x706e51290ac76fb9},
+ {0x4414aed7883ffd18, 0x047d183a06b67016, 0x4ef258cf8d822870,
+ 0x2686b73484c2e843},
+ {0xd38ff9d048884ffc, 0x6dc63a57c0943ca7, 0x8520ca2f3e97c426,
+ 0xa9913868fb819c59}};
+
+ for (unsigned i = 0; i < arraysize(t); ++i) {
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+
+ load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
+ load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
+ __ move_v(w2, w0);
+ store_elements_of_vector(assm, w2, a0);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ (CALL_GENERATED_CODE(isolate, f, &t[i].wd_lo, 0, 0, 0, 0));
+ CHECK_EQ(t[i].ws_lo, t[i].wd_lo);
+ CHECK_EQ(t[i].ws_hi, t[i].wd_hi);
+ }
+}
+
+template <typename ExpectFunc, typename OperFunc>
+void run_msa_sldi(OperFunc GenerateOperation,
+ ExpectFunc GenerateExpectedResult) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+ } T;
+ T t[] = {{0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x1e86678b52f8e1ff,
+ 0x706e51290ac76fb9},
+ {0x4414aed7883ffd18, 0x047d183a06b67016, 0x4ef258cf8d822870,
+ 0x2686b73484c2e843},
+ {0xd38ff9d048884ffc, 0x6dc63a57c0943ca7, 0x8520ca2f3e97c426,
+ 0xa9913868fb819c59}};
+ uint64_t res[2];
+
+ for (unsigned i = 0; i < arraysize(t); ++i) {
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+ load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
+ load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
+ GenerateOperation(assm);
+ store_elements_of_vector(assm, w2, a0);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ (CALL_GENERATED_CODE(isolate, f, &res[0], 0, 0, 0, 0));
+ GenerateExpectedResult(reinterpret_cast<uint8_t*>(&t[i].ws_lo),
+ reinterpret_cast<uint8_t*>(&t[i].wd_lo));
+ CHECK_EQ(res[0], t[i].wd_lo);
+ CHECK_EQ(res[1], t[i].wd_hi);
+ }
+}
+
+TEST(MSA_sldi) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+ CcTest::InitializeVM();
+
+#define SLDI_DF(s, k) \
+ uint8_t v[32]; \
+ for (unsigned i = 0; i < s; i++) { \
+ v[i] = ws[s * k + i]; \
+ v[i + s] = wd[s * k + i]; \
+ } \
+ for (unsigned i = 0; i < s; i++) { \
+ wd[s * k + i] = v[i + n]; \
+ }
+
+ for (int n = 0; n < 16; ++n) {
+ run_msa_sldi([n](MacroAssembler& assm) { __ sldi_b(w2, w0, n); },
+ [n](uint8_t* ws, uint8_t* wd) {
+ SLDI_DF(kMSARegSize / sizeof(int8_t) / kBitsPerByte, 0)
+ });
+ }
+
+ for (int n = 0; n < 8; ++n) {
+ run_msa_sldi([n](MacroAssembler& assm) { __ sldi_h(w2, w0, n); },
+ [n](uint8_t* ws, uint8_t* wd) {
+ for (int k = 0; k < 2; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int16_t) / kBitsPerByte, k)
+ }
+ });
+ }
+
+ for (int n = 0; n < 4; ++n) {
+ run_msa_sldi([n](MacroAssembler& assm) { __ sldi_w(w2, w0, n); },
+ [n](uint8_t* ws, uint8_t* wd) {
+ for (int k = 0; k < 4; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int32_t) / kBitsPerByte, k)
+ }
+ });
+ }
+
+ for (int n = 0; n < 2; ++n) {
+ run_msa_sldi([n](MacroAssembler& assm) { __ sldi_d(w2, w0, n); },
+ [n](uint8_t* ws, uint8_t* wd) {
+ for (int k = 0; k < 8; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int64_t) / kBitsPerByte, k)
+ }
+ });
+ }
+#undef SLDI_DF
+}
+
void run_msa_ctc_cfc(uint32_t value) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
MSAControlRegister msareg = {kMSACSRRegister};
@@ -6003,7 +6311,8 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
uint64_t wd_lo = 0xf35862e13e38f8b0;
@@ -6059,7 +6368,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
UNREACHABLE();
}
- store_uint64_elements_of_vector(assm, w2, a0, t2);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6236,7 +6545,8 @@ uint32_t run_Ins(uint32_t imm, uint32_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(v0, imm);
__ li(t0, source);
@@ -6287,7 +6597,8 @@ uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(v0, 0xffffffff);
__ li(t0, source);
@@ -6343,17 +6654,18 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
int32_t i5 =
i5_sign_ext ? static_cast<int32_t>(input->i5 << 27) >> 27 : input->i5;
- load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
GenerateI5InstructionFunc(assm, i5);
- store_uint64_elements_of_vector(assm, w2, a0, t2);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6760,22 +7072,21 @@ struct TestCaseMsa2R {
uint64_t exp_res_hi;
};
-template <typename Func, typename FuncLoad, typename FuncStore>
+template <typename Func>
void run_msa_2r(const struct TestCaseMsa2R* input,
- Func Generate2RInstructionFunc,
- FuncLoad load_elements_of_vector,
- FuncStore store_elements_of_vector) {
+ Func Generate2RInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
load_elements_of_vector(assm, reinterpret_cast<const uint64_t*>(input), w0,
t0, t1);
Generate2RInstructionFunc(assm);
- store_elements_of_vector(assm, w2, a0, t2);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6791,17 +7102,8 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
(CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
- if (store_elements_of_vector == store_uint64_elements_of_vector) {
- CHECK_EQ(input->exp_res_lo, res.d[0]);
- CHECK_EQ(input->exp_res_hi, res.d[1]);
- } else if (store_elements_of_vector == store_uint32_elements_of_vector) {
- const uint32_t* exp_res =
- reinterpret_cast<const uint32_t*>(&input->exp_res_lo);
- CHECK_EQ(exp_res[0], res.w[0]);
- CHECK_EQ(exp_res[1], res.w[1]);
- CHECK_EQ(exp_res[2], res.w[2]);
- CHECK_EQ(exp_res[3], res.w[3]);
- }
+ CHECK_EQ(input->exp_res_lo, res.d[0]);
+ CHECK_EQ(input->exp_res_hi, res.d[1]);
}
TEST(MSA_pcnt) {
@@ -6852,14 +7154,10 @@ TEST(MSA_pcnt) {
{0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x20, 0x2a}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); });
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); });
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); });
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); });
}
}
@@ -6911,14 +7209,10 @@ TEST(MSA_nlzc) {
{0x00000000e338f8b0, 0x0754534acab32654, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); });
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); });
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); });
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); });
}
}
@@ -6970,14 +7264,10 @@ TEST(MSA_nloc) {
{0xFFFFFFFF1CC7074F, 0xF8ABACB5354CD9AB, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); });
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); });
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); });
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); });
}
}
@@ -7038,13 +7328,11 @@ TEST(MSA_fclass) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ fclass_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ fclass_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ fclass_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ fclass_d(w2, w0); });
}
#undef BIT
@@ -7110,13 +7398,11 @@ TEST(MSA_ftrunc_s) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_I); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ftrunc_s_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ftrunc_s_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_I); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ftrunc_s_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ftrunc_s_d(w2, w0); });
}
}
@@ -7149,13 +7435,11 @@ TEST(MSA_ftrunc_u) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ftrunc_u_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ftrunc_u_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ftrunc_u_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ftrunc_u_d(w2, w0); });
}
}
@@ -7194,13 +7478,11 @@ TEST(MSA_fsqrt) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ fsqrt_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ fsqrt_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ fsqrt_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ fsqrt_d(w2, w0); });
}
}
@@ -7224,13 +7506,11 @@ TEST(MSA_frsqrt) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ frsqrt_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ frsqrt_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ frsqrt_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ frsqrt_d(w2, w0); });
}
}
@@ -7256,13 +7536,11 @@ TEST(MSA_frcp) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ frcp_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ frcp_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ frcp_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ frcp_d(w2, w0); });
}
}
@@ -7277,8 +7555,7 @@ void test_frint_s(size_t data_size, TestCaseMsa2RF_F_F tc_d[],
__ ctcmsa(msareg, t0);
__ frint_w(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ });
}
}
@@ -7293,8 +7570,7 @@ void test_frint_d(size_t data_size, TestCaseMsa2RF_D_D tc_d[],
__ ctcmsa(msareg, t0);
__ frint_d(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ });
}
}
@@ -7376,14 +7652,12 @@ TEST(MSA_flog2) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ flog2_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ flog2_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ flog2_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ flog2_d(w2, w0); });
}
}
@@ -7398,8 +7672,7 @@ void test_ftint_s_s(size_t data_size, TestCaseMsa2RF_F_I tc_d[],
__ ctcmsa(msareg, t0);
__ ftint_s_w(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ });
}
}
@@ -7414,8 +7687,7 @@ void test_ftint_s_d(size_t data_size, TestCaseMsa2RF_D_I tc_d[],
__ ctcmsa(msareg, t0);
__ ftint_s_d(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ });
}
}
@@ -7512,8 +7784,7 @@ void test_ftint_u_s(size_t data_size, TestCaseMsa2RF_F_U tc_d[],
__ ctcmsa(msareg, t0);
__ ftint_u_w(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ });
}
}
@@ -7528,8 +7799,7 @@ void test_ftint_u_d(size_t data_size, TestCaseMsa2RF_D_U tc_d[],
__ ctcmsa(msareg, t0);
__ ftint_u_d(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ });
}
}
@@ -7645,13 +7915,11 @@ TEST(MSA_ffint_u) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ffint_u_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffint_u_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ffint_u_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffint_u_d(w2, w0); });
}
}
@@ -7687,13 +7955,11 @@ TEST(MSA_ffint_s) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_I_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ffint_s_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffint_s_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_I_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ffint_s_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffint_s_d(w2, w0); });
}
}
@@ -7746,13 +8012,11 @@ TEST(MSA_fexupl) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ fexupl_w(w2, w0); },
- load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ fexupl_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ fexupl_d(w2, w0); },
- load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ fexupl_d(w2, w0); });
}
}
@@ -7781,13 +8045,11 @@ TEST(MSA_fexupr) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ fexupr_w(w2, w0); },
- load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ fexupr_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ fexupr_d(w2, w0); },
- load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ fexupr_d(w2, w0); });
}
}
@@ -7816,13 +8078,11 @@ TEST(MSA_ffql) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ffql_w(w2, w0); },
- load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffql_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ffql_d(w2, w0); },
- load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffql_d(w2, w0); });
}
}
@@ -7842,13 +8102,11 @@ TEST(MSA_ffqr) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ffqr_w(w2, w0); },
- load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffqr_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ffqr_d(w2, w0); },
- load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffqr_d(w2, w0); });
}
}
@@ -7868,17 +8126,18 @@ void run_msa_vector(struct TestCaseMsaVector* input,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->wt_lo), w2, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->wd_lo), w4, t0, t1);
+ load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(assm, &(input->wt_lo), w2, t0, t1);
+ load_elements_of_vector(assm, &(input->wd_lo), w4, t0, t1);
GenerateVectorInstructionFunc(assm);
- store_uint64_elements_of_vector(assm, w4, a0, t2);
+ store_elements_of_vector(assm, w4, a0);
__ jr(ra);
__ nop();
@@ -7957,16 +8216,17 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+ load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
GenerateInstructionFunc(assm, input->m);
- store_uint64_elements_of_vector(assm, w2, a0, t2);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -8433,13 +8693,14 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
GenerateVectorInstructionFunc(assm, input);
- store_uint64_elements_of_vector(assm, w0, a0, t2);
+ store_elements_of_vector(assm, w0, a0);
__ jr(ra);
__ nop();
@@ -8503,7 +8764,8 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
T in_test_vector[1024];
T out_test_vector[1024];
@@ -8567,7 +8829,6 @@ TEST(MSA_load_store_vector) {
__ st_d(w0, MemOperand(a1, i));
}
});
-#undef LDI_DF
}
struct TestCaseMsa3R {
@@ -8587,18 +8848,18 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- uint64_t expected;
- load_uint64_elements_of_vector(assm, &(input->wt_lo), w0, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->ws_lo), w1, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+ load_elements_of_vector(assm, &(input->wt_lo), w0, t0, t1);
+ load_elements_of_vector(assm, &(input->ws_lo), w1, t0, t1);
+ load_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
GenerateI5InstructionFunc(assm);
- store_uint64_elements_of_vector(assm, w2, a0, t2);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -8614,14 +8875,12 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
(CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
- expected = GenerateOperationFunc(input->ws_lo, input->wt_lo, input->wd_lo);
- if (expected != Unpredictable) {
- CHECK_EQ(expected, res.d[0]);
+ GenerateOperationFunc(&input->ws_lo, &input->wt_lo, &input->wd_lo);
+ if (input->wd_lo != Unpredictable) {
+ CHECK_EQ(input->wd_lo, res.d[0]);
}
-
- expected = GenerateOperationFunc(input->ws_hi, input->wt_hi, input->wd_hi);
- if (expected != Unpredictable) {
- CHECK_EQ(expected, res.d[1]);
+ if (input->wd_hi != Unpredictable) {
+ CHECK_EQ(input->wd_hi, res.d[1]);
}
}
@@ -8659,479 +8918,630 @@ TEST(MSA_3R_instructions) {
{0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff,
0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff}};
-#define SLL_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>((wt >> shift) & mask) % size_in_bits; \
- res |= (static_cast<uint64_t>(src_op << shift_op) & mask) << shift; \
- } \
- return res
-
-#define SRA_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = ((wt >> shift) & mask) % size_in_bits; \
- res |= \
- (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) & mask)) \
- << shift; \
- } \
- return res
-
-#define SRL_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- res |= (static_cast<uint64_t>(src_op >> shift_op) & mask) << shift; \
- } \
- return res
-
-#define BCRL_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- T r = (static_cast<T>(~(1ull << shift_op)) & src_op) & mask; \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define BSET_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- T r = (static_cast<T>(1ull << shift_op) | src_op) & mask; \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define BNEG_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- T r = (static_cast<T>(1ull << shift_op) ^ src_op) & mask; \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define BINSL_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wd_op = static_cast<T>((wd >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- int bits = shift_op + 1; \
- T r; \
- if (bits == size_in_bits) { \
- r = static_cast<T>(ws_op); \
- } else { \
- uint64_t mask2 = ((1ull << bits) - 1) << (size_in_bits - bits); \
- r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
- (static_cast<T>(~mask2) & wd_op)); \
- } \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define BINSR_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wd_op = static_cast<T>((wd >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- int bits = shift_op + 1; \
- T r; \
- if (bits == size_in_bits) { \
- r = static_cast<T>(ws_op); \
- } else { \
- uint64_t mask2 = (1ull << bits) - 1; \
- r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
- (static_cast<T>(~mask2) & wd_op)); \
- } \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define ADDV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(ws_op + wt_op) & mask) << shift; \
- } \
- return res
-
-#define SUBV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(ws_op - wt_op) & mask) << shift; \
- } \
- return res
-
-#define MAX_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Max<T>(ws_op, wt_op)) & mask) << shift; \
- } \
- return res
-
-#define MIN_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Min<T>(ws_op, wt_op)) & mask) << shift; \
- } \
- return res
-
-#define MAXA_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Nabs(ws_op) < Nabs(wt_op) ? ws_op : wt_op) & \
- mask) \
- << shift; \
- } \
- return res
-
-#define MINA_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Nabs(ws_op) > Nabs(wt_op) ? ws_op : wt_op) & \
- mask) \
- << shift; \
- } \
- return res
-
-#define CEQ_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define SLL_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>((wt[i] >> shift) & mask) % size_in_bits; \
+ res |= (static_cast<uint64_t>(src_op << shift_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SRA_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = ((wt[i] >> shift) & mask) % size_in_bits; \
+ res |= (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) & \
+ mask)) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SRL_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ res |= (static_cast<uint64_t>(src_op >> shift_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BCRL_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(~(1ull << shift_op)) & src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BSET_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(1ull << shift_op) | src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BNEG_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(1ull << shift_op) ^ src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BINSL_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wd_op = static_cast<T>((wd[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ int bits = shift_op + 1; \
+ T r; \
+ if (bits == size_in_bits) { \
+ r = static_cast<T>(ws_op); \
+ } else { \
+ uint64_t mask2 = ((1ull << bits) - 1) << (size_in_bits - bits); \
+ r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
+ (static_cast<T>(~mask2) & wd_op)); \
+ } \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BINSR_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wd_op = static_cast<T>((wd[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ int bits = shift_op + 1; \
+ T r; \
+ if (bits == size_in_bits) { \
+ r = static_cast<T>(ws_op); \
+ } else { \
+ uint64_t mask2 = (1ull << bits) - 1; \
+ r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
+ (static_cast<T>(~mask2) & wd_op)); \
+ } \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ADDV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op + wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SUBV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op - wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MAX_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Max<T>(ws_op, wt_op)) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MIN_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Min<T>(ws_op, wt_op)) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MAXA_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= \
- (static_cast<uint64_t>(!Compare(ws_op, wt_op) ? -1ull : 0ull) & mask) \
- << shift; \
- } \
- return res
-
-#define CLT_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= \
- (static_cast<uint64_t>((Compare(ws_op, wt_op) == -1) ? -1ull : 0ull) & \
- mask) \
- << shift; \
- } \
- return res
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>(Nabs(ws_op) < Nabs(wt_op) ? ws_op : wt_op) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
-#define CLE_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define MINA_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= \
- (static_cast<uint64_t>((Compare(ws_op, wt_op) != 1) ? -1ull : 0ull) & \
- mask) \
- << shift; \
- } \
- return res
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>(Nabs(ws_op) > Nabs(wt_op) ? ws_op : wt_op) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
-#define ADD_A_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define CEQ_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Abs(ws_op) + Abs(wt_op)) & mask) << shift; \
- } \
- return res
-
-#define ADDS_A_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = Nabs(static_cast<T>((ws >> shift) & mask)); \
- T wt_op = Nabs(static_cast<T>((wt >> shift) & mask)); \
- T r; \
- if (ws_op < -std::numeric_limits<T>::max() - wt_op) { \
- r = std::numeric_limits<T>::max(); \
- } else { \
- r = -(ws_op + wt_op); \
- } \
- res |= (static_cast<uint64_t>(r) & mask) << shift; \
- } \
- return res
-
-#define ADDS_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(SaturateAdd(ws_op, wt_op)) & mask) << shift; \
- } \
- return res
-
-#define AVE_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(((wt_op & ws_op) + ((ws_op ^ wt_op) >> 1)) & \
- mask)) \
- << shift; \
- } \
- return res
-
-#define AVER_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(((wt_op | ws_op) - ((ws_op ^ wt_op) >> 1)) & \
- mask)) \
- << shift; \
- } \
- return res
-
-#define SUBS_DF(T, lanes, mask) \
- uint64_t res = 0; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(!Compare(ws_op, wt_op) ? -1ull : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define CLT_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>((Compare(ws_op, wt_op) == -1) ? -1ull \
+ : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define CLE_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>((Compare(ws_op, wt_op) != 1) ? -1ull \
+ : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ADD_A_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(SaturateSub(ws_op, wt_op)) & mask) << shift; \
- } \
- return res
-
-#define SUBSUS_U_DF(T, lanes, mask) \
- typedef typename std::make_unsigned<T>::type uT; \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- uT ws_op = static_cast<uT>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- T r; \
- if (wt_op > 0) { \
- uT wtu = static_cast<uT>(wt_op); \
- if (wtu > ws_op) { \
- r = 0; \
- } else { \
- r = static_cast<T>(ws_op - wtu); \
- } \
- } else { \
- if (ws_op > std::numeric_limits<uT>::max() + wt_op) { \
- r = static_cast<T>(std::numeric_limits<uT>::max()); \
- } else { \
- r = static_cast<T>(ws_op - wt_op); \
- } \
- } \
- res |= (static_cast<uint64_t>(r) & mask) << shift; \
- } \
- return res
-
-#define SUBSUU_S_DF(T, lanes, mask) \
- typedef typename std::make_unsigned<T>::type uT; \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- uT ws_op = static_cast<uT>((ws >> shift) & mask); \
- uT wt_op = static_cast<uT>((wt >> shift) & mask); \
- uT wdu; \
- T r; \
- if (ws_op > wt_op) { \
- wdu = ws_op - wt_op; \
- if (wdu > std::numeric_limits<T>::max()) { \
- r = std::numeric_limits<T>::max(); \
- } else { \
- r = static_cast<T>(wdu); \
- } \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Abs(ws_op) + Abs(wt_op)) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ADDS_A_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = Nabs(static_cast<T>((ws[i] >> shift) & mask)); \
+ T wt_op = Nabs(static_cast<T>((wt[i] >> shift) & mask)); \
+ T r; \
+ if (ws_op < -std::numeric_limits<T>::max() - wt_op) { \
+ r = std::numeric_limits<T>::max(); \
+ } else { \
+ r = -(ws_op + wt_op); \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ADDS_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(SaturateAdd(ws_op, wt_op)) & mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define AVE_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>( \
+ ((wt_op & ws_op) + ((ws_op ^ wt_op) >> 1)) & mask)) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define AVER_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>( \
+ ((wt_op | ws_op) - ((ws_op ^ wt_op) >> 1)) & mask)) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SUBS_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(SaturateSub(ws_op, wt_op)) & mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SUBSUS_U_DF(T, lanes, mask) \
+ typedef typename std::make_unsigned<T>::type uT; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ uT ws_op = static_cast<uT>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ T r; \
+ if (wt_op > 0) { \
+ uT wtu = static_cast<uT>(wt_op); \
+ if (wtu > ws_op) { \
+ r = 0; \
+ } else { \
+ r = static_cast<T>(ws_op - wtu); \
+ } \
+ } else { \
+ if (ws_op > std::numeric_limits<uT>::max() + wt_op) { \
+ r = static_cast<T>(std::numeric_limits<uT>::max()); \
+ } else { \
+ r = static_cast<T>(ws_op - wt_op); \
+ } \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SUBSUU_S_DF(T, lanes, mask) \
+ typedef typename std::make_unsigned<T>::type uT; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ uT ws_op = static_cast<uT>((ws[i] >> shift) & mask); \
+ uT wt_op = static_cast<uT>((wt[i] >> shift) & mask); \
+ uT wdu; \
+ T r; \
+ if (ws_op > wt_op) { \
+ wdu = ws_op - wt_op; \
+ if (wdu > std::numeric_limits<T>::max()) { \
+ r = std::numeric_limits<T>::max(); \
+ } else { \
+ r = static_cast<T>(wdu); \
+ } \
+ } else { \
+ wdu = wt_op - ws_op; \
+ CHECK(-std::numeric_limits<T>::max() == \
+ std::numeric_limits<T>::min() + 1); \
+ if (wdu <= std::numeric_limits<T>::max()) { \
+ r = -static_cast<T>(wdu); \
+ } else { \
+ r = std::numeric_limits<T>::min(); \
+ } \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ASUB_S_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Abs(ws_op - wt_op)) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ASUB_U_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op > wt_op ? ws_op - wt_op \
+ : wt_op - ws_op) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MULV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op * wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MADDV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ T wd_op = static_cast<T>((wd[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(wd_op + ws_op * wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MSUBV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ T wd_op = static_cast<T>((wd[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(wd_op - ws_op * wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define DIV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ if (wt_op == 0) { \
+ res = Unpredictable; \
+ break; \
+ } \
+ res |= (static_cast<uint64_t>(ws_op / wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MOD_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ if (wt_op == 0) { \
+ res = Unpredictable; \
+ break; \
+ } \
+ res |= (static_cast<uint64_t>(wt_op != 0 ? ws_op % wt_op : 0) & mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SRAR_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = ((wt[i] >> shift) & mask) % size_in_bits; \
+ uint32_t bit = shift_op == 0 ? 0 : src_op >> (shift_op - 1) & 1; \
+ res |= (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) + \
+ bit) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define PCKEV_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[i] = wt_p[2 * i]; \
+ wd_p[i + lanes / 2] = ws_p[2 * i]; \
+ }
+
+#define PCKOD_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[i] = wt_p[2 * i + 1]; \
+ wd_p[i + lanes / 2] = ws_p[2 * i + 1]; \
+ }
+
+#define ILVL_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[2 * i] = wt_p[i + lanes / 2]; \
+ wd_p[2 * i + 1] = ws_p[i + lanes / 2]; \
+ }
+
+#define ILVR_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[2 * i] = wt_p[i]; \
+ wd_p[2 * i + 1] = ws_p[i]; \
+ }
+
+#define ILVEV_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[2 * i] = wt_p[2 * i]; \
+ wd_p[2 * i + 1] = ws_p[2 * i]; \
+ }
+
+#define ILVOD_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[2 * i] = wt_p[2 * i + 1]; \
+ wd_p[2 * i + 1] = ws_p[2 * i + 1]; \
+ }
+
+#define VSHF_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ const int mask_not_valid = 0xc0; \
+ const int mask_6bits = 0x3f; \
+ for (int i = 0; i < lanes; ++i) { \
+ if ((wd_p[i] & mask_not_valid)) { \
+ wd_p[i] = 0; \
} else { \
- wdu = wt_op - ws_op; \
- CHECK(-std::numeric_limits<T>::max() == \
- std::numeric_limits<T>::min() + 1); \
- if (wdu <= std::numeric_limits<T>::max()) { \
- r = -static_cast<T>(wdu); \
- } else { \
- r = std::numeric_limits<T>::min(); \
- } \
+ int k = (wd_p[i] & mask_6bits) % (lanes * 2); \
+ wd_p[i] = k > lanes ? ws_p[k - lanes] : wt_p[k]; \
} \
- res |= (static_cast<uint64_t>(r) & mask) << shift; \
- } \
- return res
-
-#define ASUB_S_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Abs(ws_op - wt_op)) & mask) << shift; \
- } \
- return res
-
-#define ASUB_U_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(ws_op > wt_op ? ws_op - wt_op \
- : wt_op - ws_op) & \
- mask) \
- << shift; \
- } \
- return res
-
-#define MULV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(ws_op * wt_op) & mask) << shift; \
- } \
- return res
-
-#define MADDV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- T wd_op = static_cast<T>((wd >> shift) & mask); \
- res |= (static_cast<uint64_t>(wd_op + ws_op * wt_op) & mask) << shift; \
- } \
- return res
-
-#define MSUBV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- T wd_op = static_cast<T>((wd >> shift) & mask); \
- res |= (static_cast<uint64_t>(wd_op - ws_op * wt_op) & mask) << shift; \
- } \
- return res
-
-#define DIV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- if (wt_op == 0) { \
- res = Unpredictable; \
- break; \
- } \
- res |= (static_cast<uint64_t>(ws_op / wt_op) & mask) << shift; \
- } \
- return res
+ }
-#define MOD_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- if (wt_op == 0) { \
- res = Unpredictable; \
- break; \
- } \
- res |= (static_cast<uint64_t>(wt_op != 0 ? ws_op % wt_op : 0) & mask) \
- << shift; \
- } \
- return res
+#define HADD_DF(T, T_small, lanes) \
+ T_small* ws_p = reinterpret_cast<T_small*>(ws); \
+ T_small* wt_p = reinterpret_cast<T_small*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes; ++i) { \
+ wd_p[i] = static_cast<T>(ws_p[2 * i + 1]) + static_cast<T>(wt_p[2 * i]); \
+ }
-#define SRAR_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = ((wt >> shift) & mask) % size_in_bits; \
- uint32_t bit = shift_op == 0 ? 0 : src_op >> (shift_op - 1) & 1; \
- res |= \
- (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) + bit) & \
- mask) \
- << shift; \
- } \
- return res
+#define HSUB_DF(T, T_small, lanes) \
+ T_small* ws_p = reinterpret_cast<T_small*>(ws); \
+ T_small* wt_p = reinterpret_cast<T_small*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes; ++i) { \
+ wd_p[i] = static_cast<T>(ws_p[2 * i + 1]) - static_cast<T>(wt_p[2 * i]); \
+ }
#define TEST_CASE(V) \
V(sll_b, SLL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
V(sll_h, SLL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
V(sll_w, SLL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
V(sll_d, SLL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
- V(sra_b, SRA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
- V(sra_h, SRA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
- V(sra_w, SRA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
- V(sra_d, SRA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
V(srl_b, SRL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
V(srl_h, SRL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
V(srl_w, SRL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
@@ -9292,18 +9702,54 @@ TEST(MSA_3R_instructions) {
V(mod_u_h, MOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
V(mod_u_w, MOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
V(mod_u_d, MOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
- V(srar_b, SRAR_DF, int8_t, kMSALanesByte, UINT8_MAX) \
- V(srar_h, SRAR_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
- V(srar_w, SRAR_DF, int32_t, kMSALanesWord, UINT32_MAX) \
- V(srar_d, SRAR_DF, int64_t, kMSALanesDword, UINT64_MAX) \
V(srlr_b, SRAR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
V(srlr_h, SRAR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
V(srlr_w, SRAR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
- V(srlr_d, SRAR_DF, uint64_t, kMSALanesDword, UINT64_MAX)
+ V(srlr_d, SRAR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(pckev_b, PCKEV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(pckev_h, PCKEV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(pckev_w, PCKEV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(pckev_d, PCKEV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(pckod_b, PCKOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(pckod_h, PCKOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(pckod_w, PCKOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(pckod_d, PCKOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ilvl_b, ILVL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ilvl_h, ILVL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ilvl_w, ILVL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ilvl_d, ILVL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ilvr_b, ILVR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ilvr_h, ILVR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ilvr_w, ILVR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ilvr_d, ILVR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ilvev_b, ILVEV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ilvev_h, ILVEV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ilvev_w, ILVEV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ilvev_d, ILVEV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ilvod_b, ILVOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ilvod_h, ILVOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ilvod_w, ILVOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ilvod_d, ILVOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(vshf_b, VSHF_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(vshf_h, VSHF_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(vshf_w, VSHF_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(vshf_d, VSHF_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(hadd_s_h, HADD_DF, int16_t, int8_t, kMSALanesHalf) \
+ V(hadd_s_w, HADD_DF, int32_t, int16_t, kMSALanesWord) \
+ V(hadd_s_d, HADD_DF, int64_t, int32_t, kMSALanesDword) \
+ V(hadd_u_h, HADD_DF, uint16_t, uint8_t, kMSALanesHalf) \
+ V(hadd_u_w, HADD_DF, uint32_t, uint16_t, kMSALanesWord) \
+ V(hadd_u_d, HADD_DF, uint64_t, uint32_t, kMSALanesDword) \
+ V(hsub_s_h, HSUB_DF, int16_t, int8_t, kMSALanesHalf) \
+ V(hsub_s_w, HSUB_DF, int32_t, int16_t, kMSALanesWord) \
+ V(hsub_s_d, HSUB_DF, int64_t, int32_t, kMSALanesDword) \
+ V(hsub_u_h, HSUB_DF, uint16_t, uint8_t, kMSALanesHalf) \
+ V(hsub_u_w, HSUB_DF, uint32_t, uint16_t, kMSALanesWord) \
+ V(hsub_u_d, HSUB_DF, uint64_t, uint32_t, kMSALanesDword)
#define RUN_TEST(instr, verify, type, lanes, mask) \
run_msa_3r(&tc[i], [](MacroAssembler& assm) { __ instr(w2, w1, w0); }, \
- [](uint64_t ws, uint64_t wt, uint64_t wd) { \
+ [](uint64_t* ws, uint64_t* wt, uint64_t* wd) { \
verify(type, lanes, mask); \
});
@@ -9311,9 +9757,41 @@ TEST(MSA_3R_instructions) {
TEST_CASE(RUN_TEST)
}
+#define RUN_TEST2(instr, verify, type, lanes, mask) \
+ for (unsigned i = 0; i < arraysize(tc); i++) { \
+ for (unsigned j = 0; j < 3; j++) { \
+ for (unsigned k = 0; k < lanes; k++) { \
+ type* element = reinterpret_cast<type*>(&tc[i]); \
+ element[k + j * lanes] &= std::numeric_limits<type>::max(); \
+ } \
+ } \
+ } \
+ run_msa_3r(&tc[i], [](MacroAssembler& assm) { __ instr(w2, w1, w0); }, \
+ [](uint64_t* ws, uint64_t* wt, uint64_t* wd) { \
+ verify(type, lanes, mask); \
+ });
+
+#define TEST_CASE2(V) \
+ V(sra_b, SRA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(sra_h, SRA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(sra_w, SRA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(sra_d, SRA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(srar_b, SRAR_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(srar_h, SRAR_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(srar_w, SRAR_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(srar_d, SRAR_DF, int64_t, kMSALanesDword, UINT64_MAX)
+
+ for (size_t i = 0; i < arraysize(tc); ++i) {
+ TEST_CASE2(RUN_TEST2)
+ }
+
+#undef TEST_CASE
+#undef TEST_CASE2
#undef RUN_TEST
+#undef RUN_TEST2
#undef SLL_DF
#undef SRL_DF
+#undef SRA_DF
#undef BCRL_DF
#undef BSET_DF
#undef BNEG_DF
@@ -9344,8 +9822,690 @@ TEST(MSA_3R_instructions) {
#undef DIV_DF
#undef MOD_DF
#undef SRAR_DF
+#undef PCKEV_DF
+#undef PCKOD_DF
+#undef ILVL_DF
+#undef ILVR_DF
+#undef ILVEV_DF
+#undef ILVOD_DF
+#undef VSHF_DF
+#undef HADD_DF
+#undef HSUB_DF
} // namespace internal
+struct TestCaseMsa3RF {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wt_lo;
+ uint64_t wt_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+};
+
+struct ExpectedResult_MSA3RF {
+ uint64_t exp_res_lo;
+ uint64_t exp_res_hi;
+};
+
+template <typename Func>
+void run_msa_3rf(const struct TestCaseMsa3RF* input,
+ const struct ExpectedResult_MSA3RF* output,
+ Func Generate2RInstructionFunc) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+ msa_reg_t res;
+
+ load_elements_of_vector(
+ assm, reinterpret_cast<const uint64_t*>(&input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(
+ assm, reinterpret_cast<const uint64_t*>(&input->wt_lo), w1, t0, t1);
+ load_elements_of_vector(
+ assm, reinterpret_cast<const uint64_t*>(&input->wd_lo), w2, t0, t1);
+ Generate2RInstructionFunc(assm);
+ store_elements_of_vector(assm, w2, a0);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+
+ CHECK_EQ(output->exp_res_lo, res.d[0]);
+ CHECK_EQ(output->exp_res_hi, res.d[1]);
+}
+
+struct TestCaseMsa3RF_F {
+ float ws_1, ws_2, ws_3, ws_4;
+ float wt_1, wt_2, wt_3, wt_4;
+ float wd_1, wd_2, wd_3, wd_4;
+};
+struct ExpRes_32I {
+ int32_t exp_res_1;
+ int32_t exp_res_2;
+ int32_t exp_res_3;
+ int32_t exp_res_4;
+};
+
+struct TestCaseMsa3RF_D {
+ double ws_lo, ws_hi;
+ double wt_lo, wt_hi;
+ double wd_lo, wd_hi;
+};
+struct ExpRes_64I {
+ int64_t exp_res_lo;
+ int64_t exp_res_hi;
+};
+
+TEST(MSA_floating_point_quiet_compare) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float qnan_f = std::numeric_limits<float>::quiet_NaN();
+ const double qnan_d = std::numeric_limits<double>::quiet_NaN();
+ const float inf_f = std::numeric_limits<float>::infinity();
+ const double inf_d = std::numeric_limits<double>::infinity();
+ const int32_t ones = -1;
+
+ const struct TestCaseMsa3RF_F tc_w[]{
+ {qnan_f, -qnan_f, inf_f, 2.14e9f, // ws
+ qnan_f, 0.f, qnan_f, -2.14e9f, // wt
+ 0, 0, 0, 0}, // wd
+ {inf_f, -inf_f, -3.4e38f, 1.5e-45f, -inf_f, -inf_f, -inf_f, inf_f, 0, 0,
+ 0, 0},
+ {0.f, 19.871e24f, -1.5e-45f, -1.5e-45f, -19.871e24f, 19.871e24f, 1.5e-45f,
+ -1.5e-45f, 0, 0, 0, 0}};
+
+ const struct TestCaseMsa3RF_D tc_d[]{
+ // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi
+ {qnan_d, -qnan_d, qnan_f, 0., 0, 0},
+ {inf_d, 9.22e18, qnan_d, -9.22e18, 0, 0},
+ {inf_d, inf_d, -inf_d, inf_d, 0, 0},
+ {-2.3e-308, 5e-324, -inf_d, inf_d, 0, 0},
+ {0., 24.1e87, -1.6e308, 24.1e87, 0, 0},
+ {-5e-324, -5e-324, 5e-324, -5e-324, 0, 0}};
+
+ const struct ExpectedResult_MSA3RF exp_res_fcaf = {0, 0};
+ const struct ExpRes_32I exp_res_fcun_w[] = {
+ {ones, ones, ones, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}};
+ const struct ExpRes_64I exp_res_fcun_d[] = {{ones, ones}, {ones, 0}, {0, 0},
+ {0, 0}, {0, 0}, {0, 0}};
+ const struct ExpRes_32I exp_res_fceq_w[] = {
+ {0, 0, 0, 0}, {0, ones, 0, 0}, {0, ones, 0, ones}};
+ const struct ExpRes_64I exp_res_fceq_d[] = {{0, 0}, {0, 0}, {0, ones},
+ {0, 0}, {0, ones}, {0, ones}};
+ const struct ExpRes_32I exp_res_fcueq_w[] = {
+ {ones, ones, ones, 0}, {0, ones, 0, 0}, {0, ones, 0, ones}};
+ const struct ExpRes_64I exp_res_fcueq_d[] = {
+ {ones, ones}, {ones, 0}, {0, ones}, {0, 0}, {0, ones}, {0, ones}};
+ const struct ExpRes_32I exp_res_fclt_w[] = {
+ {0, 0, 0, 0}, {0, 0, 0, ones}, {0, 0, ones, 0}};
+ const struct ExpRes_64I exp_res_fclt_d[] = {{0, 0}, {0, 0}, {0, 0},
+ {0, ones}, {0, 0}, {ones, 0}};
+ const struct ExpRes_32I exp_res_fcult_w[] = {
+ {ones, ones, ones, 0}, {0, 0, 0, ones}, {0, 0, ones, 0}};
+ const struct ExpRes_64I exp_res_fcult_d[] = {
+ {ones, ones}, {ones, 0}, {0, 0}, {0, ones}, {0, 0}, {ones, 0}};
+ const struct ExpRes_32I exp_res_fcle_w[] = {
+ {0, 0, 0, 0}, {0, ones, 0, ones}, {0, ones, ones, ones}};
+ const struct ExpRes_64I exp_res_fcle_d[] = {
+ {0, 0}, {0, 0}, {0, ones}, {0, ones}, {0, ones}, {ones, ones}};
+ const struct ExpRes_32I exp_res_fcule_w[] = {
+ {ones, ones, ones, 0}, {0, ones, 0, ones}, {0, ones, ones, ones}};
+ const struct ExpRes_64I exp_res_fcule_d[] = {
+ {ones, ones}, {ones, 0}, {0, ones}, {0, ones}, {0, ones}, {ones, ones}};
+ const struct ExpRes_32I exp_res_fcor_w[] = {
+ {0, 0, 0, ones}, {ones, ones, ones, ones}, {ones, ones, ones, ones}};
+ const struct ExpRes_64I exp_res_fcor_d[] = {{0, 0}, {0, ones},
+ {ones, ones}, {ones, ones},
+ {ones, ones}, {ones, ones}};
+ const struct ExpRes_32I exp_res_fcune_w[] = {
+ {ones, ones, ones, ones}, {ones, 0, ones, ones}, {ones, 0, ones, 0}};
+ const struct ExpRes_64I exp_res_fcune_d[] = {{ones, ones}, {ones, ones},
+ {ones, 0}, {ones, ones},
+ {ones, 0}, {ones, 0}};
+ const struct ExpRes_32I exp_res_fcne_w[] = {
+ {0, 0, 0, ones}, {ones, 0, ones, ones}, {ones, 0, ones, 0}};
+ const struct ExpRes_64I exp_res_fcne_d[] = {
+ {0, 0}, {0, ones}, {ones, 0}, {ones, ones}, {ones, 0}, {ones, 0}};
+
+#define TEST_FP_QUIET_COMPARE_W(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+#define TEST_FP_QUIET_COMPARE_D(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FP_QUIET_COMPARE_W(fcaf_w, &tc_w[i], &exp_res_fcaf)
+ TEST_FP_QUIET_COMPARE_W(fcun_w, &tc_w[i], &exp_res_fcun_w[i])
+ TEST_FP_QUIET_COMPARE_W(fceq_w, &tc_w[i], &exp_res_fceq_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcueq_w, &tc_w[i], &exp_res_fcueq_w[i])
+ TEST_FP_QUIET_COMPARE_W(fclt_w, &tc_w[i], &exp_res_fclt_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcult_w, &tc_w[i], &exp_res_fcult_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcle_w, &tc_w[i], &exp_res_fcle_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcule_w, &tc_w[i], &exp_res_fcule_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcor_w, &tc_w[i], &exp_res_fcor_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcune_w, &tc_w[i], &exp_res_fcune_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcne_w, &tc_w[i], &exp_res_fcne_w[i])
+ }
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ TEST_FP_QUIET_COMPARE_D(fcaf_d, &tc_d[i], &exp_res_fcaf)
+ TEST_FP_QUIET_COMPARE_D(fcun_d, &tc_d[i], &exp_res_fcun_d[i])
+ TEST_FP_QUIET_COMPARE_D(fceq_d, &tc_d[i], &exp_res_fceq_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcueq_d, &tc_d[i], &exp_res_fcueq_d[i])
+ TEST_FP_QUIET_COMPARE_D(fclt_d, &tc_d[i], &exp_res_fclt_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcult_d, &tc_d[i], &exp_res_fcult_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcle_d, &tc_d[i], &exp_res_fcle_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcule_d, &tc_d[i], &exp_res_fcule_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcor_d, &tc_d[i], &exp_res_fcor_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcune_d, &tc_d[i], &exp_res_fcune_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcne_d, &tc_d[i], &exp_res_fcne_d[i])
+ }
+#undef TEST_FP_QUIET_COMPARE_W
+#undef TEST_FP_QUIET_COMPARE_D
+}
+
+template <typename T>
+inline const T* fadd_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = src1[i] + src2[i];
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fsub_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = src1[i] - src2[i];
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fmul_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = src1[i] * src2[i];
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fdiv_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = src1[i] / src2[i];
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fmadd_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = std::fma(src1[i], src2[i], src3[i]);
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fmsub_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = std::fma(src1[i], -src2[i], src3[i]);
+ }
+ return dst;
+}
+
+TEST(MSA_floating_point_arithmetic) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_f = std::numeric_limits<float>::infinity();
+ const double inf_d = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa3RF_F tc_w[] = {
+ {0.3, -2.14e13f, inf_f, 0.f, // ws
+ -inf_f, std::sqrt(8.e-26f), -23.e34, -2.14e9f, // wt
+ -1e30f, 4.6e12f, 0, 2.14e9f}, // wd
+ {3.4e38f, -1.2e-38f, 1e19f, -1e19f, 3.4e38f, 1.2e-38f, -1e19f, -1e-19f,
+ 3.4e38f, 1.2e-38f * 3, 3.4e38f, -4e19f},
+ {-3e-31f, 3e10f, 1e25f, 123.f, 1e-14f, 1e-34f, 4e25f, 321.f, 3e-17f,
+ 2e-24f, 2.f, -123456.f}};
+
+ const struct TestCaseMsa3RF_D tc_d[] = {
+ // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi
+ {0.3, -2.14e103, -inf_d, std::sqrt(8.e-206), -1e30, 4.6e102},
+ {inf_d, 0., -23.e304, -2.104e9, 0, 2.104e9},
+ {3.4e307, -1.2e-307, 3.4e307, 1.2e-307, 3.4e307, 1.2e-307 * 3},
+ {1e154, -1e154, -1e154, -1e-154, 2.9e38, -4e19},
+ {-3e-301, 3e100, 1e-104, 1e-304, 3e-107, 2e-204},
+ {1e205, 123., 4e205, 321., 2., -123456.}};
+
+ struct ExpectedResult_MSA3RF dst_container;
+
+#define FP_ARITHMETIC_DF_W(instr, function, src1, src2, src3) \
+ run_msa_3rf( \
+ reinterpret_cast<const struct TestCaseMsa3RF*>(src1), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(function( \
+ src1, src2, src3, reinterpret_cast<float*>(&dst_container))), \
+ [](MacroAssembler& assm) { __ instr(w2, w0, w1); });
+
+#define FP_ARITHMETIC_DF_D(instr, function, src1, src2, src3) \
+ run_msa_3rf( \
+ reinterpret_cast<const struct TestCaseMsa3RF*>(src1), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(function( \
+ src1, src2, src3, reinterpret_cast<double*>(&dst_container))), \
+ [](MacroAssembler& assm) { __ instr(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ FP_ARITHMETIC_DF_W(fadd_w, fadd_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fsub_w, fsub_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fmul_w, fmul_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fdiv_w, fdiv_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fmadd_w, fmadd_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fmsub_w, fmsub_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ }
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ FP_ARITHMETIC_DF_D(fadd_d, fadd_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fsub_d, fsub_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fmul_d, fmul_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fdiv_d, fdiv_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fmadd_d, fmadd_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fmsub_d, fmsub_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ }
+#undef FP_ARITHMETIC_DF_W
+#undef FP_ARITHMETIC_DF_D
+}
+
+struct ExpRes_F {
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct ExpRes_D {
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_fmin_fmin_a_fmax_fmax_a) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_f = std::numeric_limits<float>::infinity();
+ const double inf_d = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa3RF_F tc_w[] = {
+ {0.3f, -2.14e13f, inf_f, -0.f, // ws
+ -inf_f, -std::sqrt(8.e26f), -23.e34f, -2.14e9f, // wt
+ 0, 0, 0, 0}, // wd
+ {3.4e38f, 1.2e-41f, 1e19f, 1e19f, // ws
+ 3.4e38f, -1.1e-41f, -1e-42f, -1e29f, // wt
+ 0, 0, 0, 0}}; // wd
+
+ const struct TestCaseMsa3RF_D tc_d[] = {
+ // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi
+ {0.3, -2.14e103, -inf_d, -std::sqrt(8e206), 0, 0},
+ {inf_d, -0., -23e304, -2.14e90, 0, 0},
+ {3.4e307, 1.2e-320, 3.4e307, -1.1e-320, 0, 0},
+ {1e154, 1e154, -1e-321, -1e174, 0, 0}};
+
+ const struct ExpRes_F exp_res_fmax_w[] = {{0.3f, -2.14e13f, inf_f, -0.f},
+ {3.4e38f, 1.2e-41f, 1e19f, 1e19f}};
+ const struct ExpRes_F exp_res_fmax_a_w[] = {
+ {-inf_f, -std::sqrt(8e26f), inf_f, -2.14e9f},
+ {3.4e38f, 1.2e-41f, 1e19f, -1e29f}};
+ const struct ExpRes_F exp_res_fmin_w[] = {
+ {-inf_f, -std::sqrt(8.e26f), -23e34f, -2.14e9f},
+ {3.4e38f, -1.1e-41f, -1e-42f, -1e29f}};
+ const struct ExpRes_F exp_res_fmin_a_w[] = {
+ {0.3, -2.14e13f, -23.e34f, -0.f}, {3.4e38f, -1.1e-41f, -1e-42f, 1e19f}};
+
+ const struct ExpRes_D exp_res_fmax_d[] = {
+ {0.3, -2.14e103}, {inf_d, -0.}, {3.4e307, 1.2e-320}, {1e154, 1e154}};
+ const struct ExpRes_D exp_res_fmax_a_d[] = {{-inf_d, -std::sqrt(8e206)},
+ {inf_d, -2.14e90},
+ {3.4e307, 1.2e-320},
+ {1e154, -1e174}};
+ const struct ExpRes_D exp_res_fmin_d[] = {{-inf_d, -std::sqrt(8e206)},
+ {-23e304, -2.14e90},
+ {3.4e307, -1.1e-320},
+ {-1e-321, -1e174}};
+ const struct ExpRes_D exp_res_fmin_a_d[] = {
+ {0.3, -2.14e103}, {-23e304, -0.}, {3.4e307, -1.1e-320}, {-1e-321, 1e154}};
+
+#define TEST_FP_MIN_MAX_W(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FP_MIN_MAX_D(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FP_MIN_MAX_W(fmax_w, &tc_w[i], &exp_res_fmax_w[i])
+ TEST_FP_MIN_MAX_W(fmax_a_w, &tc_w[i], &exp_res_fmax_a_w[i])
+ TEST_FP_MIN_MAX_W(fmin_w, &tc_w[i], &exp_res_fmin_w[i])
+ TEST_FP_MIN_MAX_W(fmin_a_w, &tc_w[i], &exp_res_fmin_a_w[i])
+ }
+
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ TEST_FP_MIN_MAX_D(fmax_d, &tc_d[i], &exp_res_fmax_d[i])
+ TEST_FP_MIN_MAX_D(fmax_a_d, &tc_d[i], &exp_res_fmax_a_d[i])
+ TEST_FP_MIN_MAX_D(fmin_d, &tc_d[i], &exp_res_fmin_d[i])
+ TEST_FP_MIN_MAX_D(fmin_a_d, &tc_d[i], &exp_res_fmin_a_d[i])
+ }
+#undef TEST_FP_MIN_MAX_W
+#undef TEST_FP_MIN_MAX_D
+}
+
+struct TestCaseMsa3RF_16I {
+ int16_t ws_1, ws_2, ws_3, ws_4, ws_5, ws_6, ws_7, ws_8;
+ int16_t wt_1, wt_2, wt_3, wt_4, wt_5, wt_6, wt_7, wt_8;
+ int16_t wd_1, wd_2, wd_3, wd_4, wd_5, wd_6, wd_7, wd_8;
+};
+struct ExpRes_16I {
+ int16_t exp_res_1;
+ int16_t exp_res_2;
+ int16_t exp_res_3;
+ int16_t exp_res_4;
+ int16_t exp_res_5;
+ int16_t exp_res_6;
+ int16_t exp_res_7;
+ int16_t exp_res_8;
+};
+
+struct TestCaseMsa3RF_32I {
+ int32_t ws_1, ws_2, ws_3, ws_4;
+ int32_t wt_1, wt_2, wt_3, wt_4;
+ int32_t wd_1, wd_2, wd_3, wd_4;
+};
+
+TEST(MSA_fixed_point_arithmetic) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const struct TestCaseMsa3RF tc_h[]{
+ {0x800080007fff7fff, 0xe1ed8000fad3863a, 0x80007fff00af7fff,
+ 0x800015a77fffa0eb, 0x7fff800080007fff, 0x80007fff1f207364},
+ {0x800080007fff006a, 0x002affc4329ad87b, 0x80007fff7fff00f3,
+ 0xffecffb4d0d7f429, 0x80007fff80007c33, 0x54ac6bbce53b8c91}};
+
+ const struct TestCaseMsa3RF tc_w[]{
+ {0x8000000080000000, 0x7fffffff7fffffff, 0x800000007fffffff,
+ 0x00001ff37fffffff, 0x7fffffff80000000, 0x800000007fffffff},
+ {0xe1ed035580000000, 0xfad3863aed462c0b, 0x8000000015a70aec,
+ 0x7fffffffa0ebd354, 0x800000007fffffff, 0xd0d7f4291f207364},
+ {0x8000000080000000, 0x7fffffff0000da1f, 0x800000007fffffff,
+ 0x7fffffff00f39c3b, 0x800000007fffffff, 0x800000007c33f2fd},
+ {0x0000ac33ffff329a, 0x54ac6bbce53bd87b, 0xffffe2b4d0d7f429,
+ 0x0355ed462c0b1ff3, 0xb5deb625939dd3f9, 0xe642adfa69519596}};
+
+ const struct ExpectedResult_MSA3RF exp_res_mul_q_h[] = {
+ {0x7fff800100ae7ffe, 0x1e13ea59fad35a74},
+ {0x7fff80017ffe0000, 0xffff0000ed5b03a7}};
+ const struct ExpectedResult_MSA3RF exp_res_madd_q_h[] = {
+ {0x7fff800080ae7fff, 0x9e136a5819f37fff},
+ {0x00000000fffe7c33, 0x54ab6bbcd2969038}};
+ const struct ExpectedResult_MSA3RF exp_res_msub_q_h[] = {
+ {0xffffffff80000000, 0x80007fff244c18ef},
+ {0x80007fff80007c32, 0x54ac6bbbf7df88e9}};
+ const struct ExpectedResult_MSA3RF exp_res_mulr_q_h[] = {
+ {0x7fff800100af7ffe, 0x1e13ea59fad35a75},
+ {0x7fff80017ffe0001, 0x00000000ed5b03a8}};
+ const struct ExpectedResult_MSA3RF exp_res_maddr_q_h[] = {
+ {0x7fff800080af7fff, 0x9e136a5819f37fff},
+ {0x00000000fffe7c34, 0x54ac6bbcd2969039}};
+ const struct ExpectedResult_MSA3RF exp_res_msubr_q_h[] = {
+ {0xffffffff80000001, 0x80007fff244d18ef},
+ {0x80007fff80007c32, 0x54ac6bbcf7e088e9}};
+
+ const struct ExpectedResult_MSA3RF exp_res_mul_q_w[] = {
+ {0x7fffffff80000001, 0x00001ff27ffffffe},
+ {0x1e12fcabea58f514, 0xfad3863a0de8dee1},
+ {0x7fffffff80000001, 0x7ffffffe0000019f},
+ {0xffffffff00004bab, 0x0234e1fbf6ca3ee0}};
+ const struct ExpectedResult_MSA3RF exp_res_madd_q_w[] = {
+ {0x7fffffff80000000, 0x80001ff27fffffff},
+ {0x9e12fcab6a58f513, 0xcbab7a632d095245},
+ {0x0000000000000000, 0xfffffffe7c33f49c},
+ {0xb5deb624939e1fa4, 0xe8778ff5601bd476}};
+ const struct ExpectedResult_MSA3RF exp_res_msub_q_w[] = {
+ {0xffffffffffffffff, 0x8000000000000000},
+ {0x800000007fffffff, 0xd6046dee11379482},
+ {0x800000007fffffff, 0x800000007c33f15d},
+ {0xb5deb625939d884d, 0xe40dcbfe728756b5}};
+ const struct ExpectedResult_MSA3RF exp_res_mulr_q_w[] = {
+ {0x7fffffff80000001, 0x00001ff37ffffffe},
+ {0x1e12fcabea58f514, 0xfad3863a0de8dee2},
+ {0x7fffffff80000001, 0x7ffffffe0000019f},
+ {0x0000000000004bac, 0x0234e1fcf6ca3ee1}};
+ const struct ExpectedResult_MSA3RF exp_res_maddr_q_w[] = {
+ {0x7fffffff80000000, 0x80001ff37fffffff},
+ {0x9e12fcab6a58f513, 0xcbab7a632d095246},
+ {0x0000000000000000, 0xfffffffe7c33f49c},
+ {0xb5deb625939e1fa5, 0xe8778ff6601bd477}};
+ const struct ExpectedResult_MSA3RF exp_res_msubr_q_w[] = {
+ {0xffffffffffffffff, 0x8000000000000001},
+ {0x800000007fffffff, 0xd6046def11379482},
+ {0x800000007fffffff, 0x800000007c33f15e},
+ {0xb5deb625939d884d, 0xe40dcbfe728756b5}};
+
+#define TEST_FIXED_POINT_DF_H(instruction, src, exp_res) \
+ run_msa_3rf((src), (exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FIXED_POINT_DF_W(instruction, src, exp_res) \
+ run_msa_3rf((src), (exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_h); i++) {
+ TEST_FIXED_POINT_DF_H(mul_q_h, &tc_h[i], &exp_res_mul_q_h[i])
+ TEST_FIXED_POINT_DF_H(madd_q_h, &tc_h[i], &exp_res_madd_q_h[i])
+ TEST_FIXED_POINT_DF_H(msub_q_h, &tc_h[i], &exp_res_msub_q_h[i])
+ TEST_FIXED_POINT_DF_H(mulr_q_h, &tc_h[i], &exp_res_mulr_q_h[i])
+ TEST_FIXED_POINT_DF_H(maddr_q_h, &tc_h[i], &exp_res_maddr_q_h[i])
+ TEST_FIXED_POINT_DF_H(msubr_q_h, &tc_h[i], &exp_res_msubr_q_h[i])
+ }
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FIXED_POINT_DF_W(mul_q_w, &tc_w[i], &exp_res_mul_q_w[i])
+ TEST_FIXED_POINT_DF_W(madd_q_w, &tc_w[i], &exp_res_madd_q_w[i])
+ TEST_FIXED_POINT_DF_W(msub_q_w, &tc_w[i], &exp_res_msub_q_w[i])
+ TEST_FIXED_POINT_DF_W(mulr_q_w, &tc_w[i], &exp_res_mulr_q_w[i])
+ TEST_FIXED_POINT_DF_W(maddr_q_w, &tc_w[i], &exp_res_maddr_q_w[i])
+ TEST_FIXED_POINT_DF_W(msubr_q_w, &tc_w[i], &exp_res_msubr_q_w[i])
+ }
+#undef TEST_FIXED_POINT_DF_H
+#undef TEST_FIXED_POINT_DF_W
+}
+
+TEST(MSA_fexdo) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const float nan_float = std::numeric_limits<float>::quiet_NaN();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa3RF_F tc_w[] = {
+ // ws_1, ws_2, ws_3, ws_4, wt_1, wt_2, wt_3, wt_4, wd_1, wd_2, wd_3, wd_4
+ {inf_float, nan_float, 66505.f, 65504.f, 6.2e-5f, 5e-5f, -32.42f,
+ -inf_float, 0, 0, 0, 0},
+ {-0.f, 0.f, 123.567f, -765.321f, -6e-8f, 5.9e-8f, 1e-7f, -1e-20f, 0, 0, 0,
+ 0},
+ {1e-36f, 1e20f, -1e20f, 2e-20f, 6e-8f, -2.9e-8f, -66505.f, -65504.f}};
+
+ const struct TestCaseMsa3RF_D tc_d[] = {
+ // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi
+ {inf_double, -1234., 4e38, 3.4e38, 0, 0},
+ {1.2e-38, 1.1e-39, -38.92f, -inf_double, 0, 0},
+ {-0., 0., 123.567e31, -765.321e33, 0, 0},
+ {-1.5e-45, 1.3e-45, 1e-42, -1e-200, 0, 0},
+ {1e-202, 1e158, -1e159, 1e14, 0, 0},
+ {1.5e-42, 1.3e-46, -123.567e31, 765.321e33, 0, 0}};
+
+ const struct ExpRes_16I exp_res_fexdo_w[] = {
+ {static_cast<int16_t>(0x0410), static_cast<int16_t>(0x0347),
+ static_cast<int16_t>(0xd00d), static_cast<int16_t>(0xfc00),
+ static_cast<int16_t>(0x7c00), static_cast<int16_t>(0x7dff),
+ static_cast<int16_t>(0x7c00), static_cast<int16_t>(0x7bff)},
+ {static_cast<int16_t>(0x8001), static_cast<int16_t>(0x0001),
+ static_cast<int16_t>(0x0002), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0x8000), static_cast<int16_t>(0x0000),
+ static_cast<int16_t>(0x57b9), static_cast<int16_t>(0xe1fb)},
+ {static_cast<int16_t>(0x0001), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0xfc00), static_cast<int16_t>(0xfbff),
+ static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7c00),
+ static_cast<int16_t>(0xfc00), static_cast<int16_t>(0x0000)}};
+
+ const struct ExpRes_32I exp_res_fexdo_d[] = {
+ {bit_cast<int32_t>(0x7f800000), bit_cast<int32_t>(0x7f7fc99e),
+ bit_cast<int32_t>(0x7f800000), bit_cast<int32_t>(0xc49a4000)},
+ {bit_cast<int32_t>(0xc21bae14), bit_cast<int32_t>(0xff800000),
+ bit_cast<int32_t>(0x0082ab1e), bit_cast<int32_t>(0x000bfa5a)},
+ {bit_cast<int32_t>(0x7673b164), bit_cast<int32_t>(0xfb13653d),
+ bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000)},
+ {bit_cast<int32_t>(0x000002ca), bit_cast<int32_t>(0x80000000),
+ bit_cast<int32_t>(0x80000001), bit_cast<int32_t>(0x00000001)},
+ {bit_cast<int32_t>(0xff800000), bit_cast<int32_t>(0x56b5e621),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7f800000)},
+ {bit_cast<int32_t>(0xf673b164), bit_cast<int32_t>(0x7b13653d),
+ bit_cast<int32_t>(0x0000042e), bit_cast<int32_t>(0x00000000)}};
+
+#define TEST_FEXDO_H(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FEXDO_W(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FEXDO_H(fexdo_h, &tc_w[i], &exp_res_fexdo_w[i])
+ }
+
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ TEST_FEXDO_W(fexdo_w, &tc_d[i], &exp_res_fexdo_d[i])
+ }
+
+#undef TEST_FEXDO_H
+#undef TEST_FEXDO_W
+}
+
+TEST(MSA_ftq) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float nan_float = std::numeric_limits<float>::quiet_NaN();
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double nan_double = std::numeric_limits<double>::quiet_NaN();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa3RF_F tc_w[] = {
+ {1.f, -0.999f, 1.5f, -31e-6, 1e-7, -0.598, 0.0023, -0.f, 0, 0, 0, 0},
+ {100.f, -102.f, -1.1f, 1.3f, 0.f, -1.f, 0.9999f, -0.000322, 0, 0, 0, 0},
+ {nan_float, inf_float, -inf_float, -nan_float, -1e-40, 3e-44, 8.3e36,
+ -0.00003, 0, 0, 0, 0}};
+
+ const struct TestCaseMsa3RF_D tc_d[] = {
+ {1., -0.999, 1.5, -31e-6, 0, 0},
+ {1e-7, -0.598, 0.0023, -0.f, 0, 0},
+ {100.f, -102.f, -1.1f, 1.3f, 0, 0},
+ {0.f, -1.f, 0.9999f, -0.000322, 0, 0},
+ {nan_double, inf_double, -inf_double, -nan_double, 0, 0},
+ {-3e306, 2e-307, 9e307, 2e-307, 0, 0}};
+
+ const struct ExpRes_16I exp_res_ftq_w[] = {
+ {static_cast<int16_t>(0x0000), static_cast<int16_t>(0xb375),
+ static_cast<int16_t>(0x004b), static_cast<int16_t>(0x0000),
+ static_cast<int16_t>(0x7fff), static_cast<int16_t>(0x8021),
+ static_cast<int16_t>(0x7fff), static_cast<int16_t>(0xffff)},
+ {static_cast<int16_t>(0x0000), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0x7ffd), static_cast<int16_t>(0xfff5),
+ static_cast<int16_t>(0x7fff), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0x8000), static_cast<int16_t>(0x7fff)},
+ {static_cast<int16_t>(0x0000), static_cast<int16_t>(0x0000),
+ static_cast<int16_t>(0x7fff), static_cast<int16_t>(0xffff),
+ static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7fff),
+ static_cast<int16_t>(0x8000), static_cast<int16_t>(0x0000)}};
+
+ const struct ExpRes_32I exp_res_ftq_d[] = {
+ {bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0xfffefbf4),
+ bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x8020c49c)},
+ {bit_cast<int32_t>(0x004b5dcc), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x000000d7), bit_cast<int32_t>(0xb374bc6a)},
+ {bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x7fffffff),
+ bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x80000000)},
+ {bit_cast<int32_t>(0x7ffcb900), bit_cast<int32_t>(0xfff572de),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x80000000)},
+ {bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7fffffff)},
+ {bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000)}};
+
+#define TEST_FTQ_H(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FTQ_W(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FTQ_H(ftq_h, &tc_w[i], &exp_res_ftq_w[i])
+ }
+
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ TEST_FTQ_W(ftq_w, &tc_d[i], &exp_res_ftq_d[i])
+ }
+
+#undef TEST_FTQ_H
+#undef TEST_FTQ_W
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 976bd02824..f809ea8f39 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -56,7 +56,8 @@ TEST(MIPS0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
// Addition.
__ addu(v0, a0, a1);
@@ -79,7 +80,8 @@ TEST(MIPS1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ mov(a1, a0);
@@ -115,7 +117,8 @@ TEST(MIPS2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label exit, error;
@@ -285,7 +288,8 @@ TEST(MIPS3) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Double precision floating point instructions.
@@ -409,7 +413,8 @@ TEST(MIPS4) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -476,7 +481,8 @@ TEST(MIPS5) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Load all structure elements to registers.
@@ -545,7 +551,8 @@ TEST(MIPS6) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Basic word load/store.
@@ -624,7 +631,8 @@ TEST(MIPS7) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label neither_is_nan, less_than, outa_here;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -711,7 +719,7 @@ TEST(MIPS8) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
// Basic word load.
@@ -796,7 +804,8 @@ TEST(MIPS9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label exit, exit2, exit3;
__ Branch(&exit, ge, a0, Operand(zero_reg));
@@ -837,7 +846,8 @@ TEST(MIPS10) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
if (kArchVariant == kMips64r2) {
@@ -938,7 +948,7 @@ TEST(MIPS11) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
// Test all combinations of LWL and vAddr.
@@ -1091,7 +1101,8 @@ TEST(MIPS12) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ mov(t2, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
@@ -1180,7 +1191,8 @@ TEST(MIPS13) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ Sw(a4, MemOperand(a0, offsetof(T, cvt_small_in)));
__ Cvt_d_uw(f10, a4);
@@ -1258,7 +1270,8 @@ TEST(MIPS14) {
#undef ROUND_STRUCT_ELEMENT
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
// Save FCSR.
__ cfc1(a1, FCSR);
@@ -1363,7 +1376,7 @@ TEST(MIPS15) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label target;
__ beq(v0, v1, &target);
@@ -1401,7 +1414,8 @@ TEST(MIPS16) {
};
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Basic 32-bit word load/store, with un-signed data.
@@ -1535,7 +1549,7 @@ TEST(seleqz_selnez) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test {
@@ -1723,7 +1737,7 @@ TEST(rint_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -1827,7 +1841,7 @@ TEST(sel) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test {
@@ -1902,7 +1916,7 @@ TEST(rint_s) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -2106,7 +2120,7 @@ TEST(trunc_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
@@ -2180,7 +2194,7 @@ TEST(movz_movn) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -2309,7 +2323,7 @@ TEST(movt_movd) {
test.fcsr = 1 << (24+condition_flags[j]);
}
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
__ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)));
__ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)));
@@ -2364,7 +2378,8 @@ TEST(cvt_w_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2441,7 +2456,8 @@ TEST(trunc_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2511,7 +2527,8 @@ TEST(round_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2580,7 +2597,7 @@ TEST(round_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
@@ -2653,7 +2670,8 @@ TEST(sub) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2726,7 +2744,8 @@ TEST(sqrt_rsqrt_recip) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2817,7 +2836,8 @@ TEST(neg) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2871,7 +2891,8 @@ TEST(mul) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2930,7 +2951,8 @@ TEST(mov) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2983,7 +3005,8 @@ TEST(floor_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3053,7 +3076,7 @@ TEST(floor_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
@@ -3125,7 +3148,8 @@ TEST(ceil_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3195,7 +3219,7 @@ TEST(ceil_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
@@ -3268,7 +3292,8 @@ TEST(jump_tables1) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -3336,7 +3361,8 @@ TEST(jump_tables2) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -3406,7 +3432,8 @@ TEST(jump_tables3) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
Handle<Object> values[kNumCases];
@@ -3502,7 +3529,7 @@ TEST(BITSWAP) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
__ Ld(a4, MemOperand(a0, offsetof(T, r1)));
@@ -3594,7 +3621,7 @@ TEST(class_fmt) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
__ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
@@ -3744,7 +3771,8 @@ TEST(ABS) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t fir;
@@ -3842,7 +3870,8 @@ TEST(ADD_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -3912,7 +3941,7 @@ TEST(C_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -4124,7 +4153,7 @@ TEST(CMP_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
@@ -4341,7 +4370,8 @@ TEST(CVT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float cvt_d_s_in;
@@ -4538,7 +4568,8 @@ TEST(DIV_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dOp1;
@@ -4658,7 +4689,8 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ align(v0, a0, a1, bp);
__ jr(ra);
@@ -4711,7 +4743,8 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ dalign(v0, a0, a1, bp);
__ jr(ra);
@@ -4769,7 +4802,8 @@ uint64_t run_aluipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ aluipc(v0, offset);
__ jr(ra);
@@ -4823,7 +4857,8 @@ uint64_t run_auipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ auipc(v0, offset);
__ jr(ra);
@@ -4877,7 +4912,8 @@ uint64_t run_aui(uint64_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(t0, rs);
__ aui(v0, t0, offset);
@@ -4903,7 +4939,8 @@ uint64_t run_daui(uint64_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(t0, rs);
__ daui(v0, t0, offset);
@@ -4929,7 +4966,8 @@ uint64_t run_dahi(uint64_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(v0, rs);
__ dahi(v0, offset);
@@ -4955,7 +4993,8 @@ uint64_t run_dati(uint64_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(v0, rs);
__ dati(v0, offset);
@@ -5056,7 +5095,8 @@ TEST(r6_aui_family) {
uint64_t run_li_macro(uint64_t imm, LiFlags mode, int32_t num_instr = 0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label code_start;
__ bind(&code_start);
@@ -5252,7 +5292,8 @@ uint64_t run_lwpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t3, a4, 0xffff; (0x250fffff)
@@ -5328,7 +5369,8 @@ uint64_t run_lwupc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t3, a4, 0xffff; (0x250fffff)
@@ -5404,7 +5446,8 @@ uint64_t run_jic(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label get_program_counter, stop_execution;
__ push(ra);
@@ -5485,7 +5528,8 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label stop_execution;
__ li(v0, 0);
@@ -5553,12 +5597,196 @@ TEST(r6_beqzc) {
}
}
+void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
+ MSARegister w, Register t0, Register t1) {
+ __ li(t0, static_cast<uint32_t>(elements[0] & 0xffffffff));
+ __ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xffffffff));
+ __ insert_w(w, 0, t0);
+ __ insert_w(w, 1, t1);
+ __ li(t0, static_cast<uint32_t>(elements[1] & 0xffffffff));
+ __ li(t1, static_cast<uint32_t>((elements[1] >> 32) & 0xffffffff));
+ __ insert_w(w, 2, t0);
+ __ insert_w(w, 3, t1);
+}
-uint64_t run_jialc(int16_t offset) {
+inline void store_elements_of_vector(MacroAssembler& assm, MSARegister w,
+ Register a) {
+ __ st_d(w, MemOperand(a, 0));
+}
+
+typedef union {
+ uint8_t b[16];
+ uint16_t h[8];
+ uint32_t w[4];
+ uint64_t d[2];
+} msa_reg_t;
+
+struct TestCaseMsaBranch {
+ uint64_t wt_lo;
+ uint64_t wt_hi;
+};
+
+template <typename Branch>
+void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
+ bool branched) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+
+ typedef struct {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+ } T;
+ T t = {0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x0000000000000000,
+ 0x0000000000000000};
+ msa_reg_t res;
+ Label do_not_move_w0_to_w2;
+
+ load_elements_of_vector(assm, &t.ws_lo, w0, t0, t1);
+ load_elements_of_vector(assm, &t.wd_lo, w2, t0, t1);
+ load_elements_of_vector(assm, &input->wt_lo, w1, t0, t1);
+ GenerateBranch(assm, do_not_move_w0_to_w2);
+ __ nop();
+ __ move_v(w2, w0);
+
+ __ bind(&do_not_move_w0_to_w2);
+ store_elements_of_vector(assm, w2, a0);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ if (branched) {
+ CHECK_EQ(t.wd_lo, res.d[0]);
+ CHECK_EQ(t.wd_hi, res.d[1]);
+ } else {
+ CHECK_EQ(t.ws_lo, res.d[0]);
+ CHECK_EQ(t.ws_hi, res.d[1]);
+ }
+}
+
+TEST(MSA_bz_bnz) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ TestCaseMsaBranch tz_v[] = {
+ {0x0, 0x0}, {0xabc, 0x0}, {0x0, 0xabc}, {0xabc, 0xabc}};
+ for (unsigned i = 0; i < arraysize(tz_v); ++i) {
+ run_bz_bnz(
+ &tz_v[i],
+ [](MacroAssembler& assm, Label& br_target) { __ bz_v(w1, &br_target); },
+ tz_v[i].wt_lo == 0 && tz_v[i].wt_hi == 0);
+ }
+
+#define TEST_BZ_DF(input_array, lanes, instruction, int_type) \
+ for (unsigned i = 0; i < arraysize(input_array); ++i) { \
+ int j; \
+ int_type* element = reinterpret_cast<int_type*>(&input_array[i]); \
+ for (j = 0; j < lanes; ++j) { \
+ if (element[j] == 0) { \
+ break; \
+ } \
+ } \
+ run_bz_bnz(&input_array[i], \
+ [](MacroAssembler& assm, Label& br_target) { \
+ __ instruction(w1, &br_target); \
+ }, \
+ j != lanes); \
+ }
+ TestCaseMsaBranch tz_b[] = {{0x0, 0x0},
+ {0xbc0000, 0x0},
+ {0x0, 0xab000000000000cd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BZ_DF(tz_b, kMSALanesByte, bz_b, int8_t)
+
+ TestCaseMsaBranch tz_h[] = {{0x0, 0x0},
+ {0xbcde0000, 0x0},
+ {0x0, 0xabcd00000000abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BZ_DF(tz_h, kMSALanesHalf, bz_h, int16_t)
+
+ TestCaseMsaBranch tz_w[] = {{0x0, 0x0},
+ {0xbcde123400000000, 0x0},
+ {0x0, 0x000000001234abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BZ_DF(tz_w, kMSALanesWord, bz_w, int32_t)
+
+ TestCaseMsaBranch tz_d[] = {{0x0, 0x0},
+ {0xbcde0000, 0x0},
+ {0x0, 0xabcd00000000abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BZ_DF(tz_d, kMSALanesDword, bz_d, int64_t)
+#undef TEST_BZ_DF
+
+ TestCaseMsaBranch tnz_v[] = {
+ {0x0, 0x0}, {0xabc, 0x0}, {0x0, 0xabc}, {0xabc, 0xabc}};
+ for (unsigned i = 0; i < arraysize(tnz_v); ++i) {
+ run_bz_bnz(&tnz_v[i],
+ [](MacroAssembler& assm, Label& br_target) {
+ __ bnz_v(w1, &br_target);
+ },
+ tnz_v[i].wt_lo != 0 || tnz_v[i].wt_hi != 0);
+ }
+
+#define TEST_BNZ_DF(input_array, lanes, instruction, int_type) \
+ for (unsigned i = 0; i < arraysize(input_array); ++i) { \
+ int j; \
+ int_type* element = reinterpret_cast<int_type*>(&input_array[i]); \
+ for (j = 0; j < lanes; ++j) { \
+ if (element[j] == 0) { \
+ break; \
+ } \
+ } \
+ run_bz_bnz(&input_array[i], \
+ [](MacroAssembler& assm, Label& br_target) { \
+ __ instruction(w1, &br_target); \
+ }, \
+ j == lanes); \
+ }
+ TestCaseMsaBranch tnz_b[] = {{0x0, 0x0},
+ {0xbc0000, 0x0},
+ {0x0, 0xab000000000000cd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BNZ_DF(tnz_b, 16, bnz_b, int8_t)
+
+ TestCaseMsaBranch tnz_h[] = {{0x0, 0x0},
+ {0xbcde0000, 0x0},
+ {0x0, 0xabcd00000000abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BNZ_DF(tnz_h, 8, bnz_h, int16_t)
+
+ TestCaseMsaBranch tnz_w[] = {{0x0, 0x0},
+ {0xbcde123400000000, 0x0},
+ {0x0, 0x000000001234abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BNZ_DF(tnz_w, 4, bnz_w, int32_t)
+
+ TestCaseMsaBranch tnz_d[] = {{0x0, 0x0},
+ {0xbcde0000, 0x0},
+ {0x0, 0xabcd00000000abcd},
+ {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ TEST_BNZ_DF(tnz_d, 2, bnz_d, int64_t)
+#undef TEST_BNZ_DF
+}
+
+uint64_t run_jialc(int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label main_block, get_program_counter;
__ push(ra);
@@ -5652,7 +5880,8 @@ uint64_t run_addiupc(int32_t imm19) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ addiupc(v0, imm19);
__ jr(ra);
@@ -5706,7 +5935,8 @@ uint64_t run_ldpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2 * 2^7k = 2^8k
// addiu t3, a4, 0xffff; (0x250fffff)
@@ -5788,7 +6018,8 @@ int64_t run_bc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5868,7 +6099,8 @@ int64_t run_balc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5949,7 +6181,8 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ dsll(v0, a0, sa_value);
__ jr(ra);
@@ -5997,7 +6230,8 @@ uint64_t run_bal(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ mov(t0, ra);
__ bal(offset); // Equivalent for "BGEZAL zero_reg, offset".
@@ -6086,7 +6320,8 @@ void helper_madd_msub_maddf_msubf(F func) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
T x = std::sqrt(static_cast<T>(2.0));
T y = std::sqrt(static_cast<T>(3.0));
@@ -6206,7 +6441,8 @@ uint64_t run_Subu(uint64_t imm, int32_t num_instr) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label code_start;
__ bind(&code_start);
@@ -6288,7 +6524,8 @@ uint64_t run_Dsubu(uint64_t imm, int32_t num_instr) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label code_start;
__ bind(&code_start);
@@ -6384,7 +6621,8 @@ uint64_t run_Dins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(v0, imm);
__ li(t0, source);
@@ -6443,7 +6681,8 @@ uint64_t run_Ins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(v0, imm);
__ li(t0, source);
@@ -6512,7 +6751,8 @@ uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ li(v0, 0xffffffffffffffff);
__ li(t0, source);
@@ -6555,63 +6795,6 @@ TEST(Ext) {
CHECK_EQ(run_Ext(0x0000000040000000, 31, 1), 0x0000000000000000);
}
-// Load elements in w0 MSA vector register
-void load_uint64_elements_of_vector(MacroAssembler& assm,
- const uint64_t elements[], MSARegister w,
- Register t0, Register t1) {
- __ li(t0, elements[0]);
- __ li(t1, elements[1]);
- __ insert_d(w, 0, t0);
- __ insert_d(w, 1, t1);
-}
-
-void load_uint32_elements_of_vector(MacroAssembler& assm,
- const uint64_t elements[], MSARegister w,
- Register t0, Register t1) {
- const uint32_t* const element = reinterpret_cast<const uint32_t*>(elements);
- __ li(t0, element[0]);
- __ li(t1, element[1]);
- __ insert_w(w, 0, t0);
- __ insert_w(w, 1, t1);
- __ li(t0, element[2]);
- __ li(t1, element[3]);
- __ insert_w(w, 2, t0);
- __ insert_w(w, 3, t1);
-}
-
-void load_uint16_elements_of_vector(MacroAssembler& assm,
- const uint64_t elements[], MSARegister w,
- Register t0, Register t1) {
- const uint16_t* const element = reinterpret_cast<const uint16_t*>(elements);
- __ li(t0, element[0]);
- __ li(t1, element[1]);
- __ insert_h(w, 0, t0);
- __ insert_h(w, 1, t1);
- __ li(t0, element[2]);
- __ li(t1, element[3]);
- __ insert_h(w, 2, t0);
- __ insert_h(w, 3, t1);
- __ li(t0, element[4]);
- __ li(t1, element[5]);
- __ insert_h(w, 4, t0);
- __ insert_h(w, 5, t1);
- __ li(t0, element[6]);
- __ li(t1, element[7]);
- __ insert_h(w, 6, t0);
- __ insert_h(w, 7, t1);
-}
-
-// Store vector elements from w2 to the memory pointed by a0
-void store_uint64_elements_of_vector(MacroAssembler& assm, MSARegister w,
- Register a) {
- __ st_d(w, MemOperand(a, 0));
-}
-
-void store_uint32_elements_of_vector(MacroAssembler& assm, MSARegister w,
- Register a) {
- __ st_w(w, MemOperand(a, 0));
-}
-
TEST(MSA_fill_copy) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -6628,7 +6811,8 @@ TEST(MSA_fill_copy) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -6695,7 +6879,8 @@ TEST(MSA_fill_copy_2) {
} T;
T t[2];
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -6756,7 +6941,8 @@ TEST(MSA_fill_copy_3) {
} T;
T t[2];
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -6795,19 +6981,14 @@ TEST(MSA_fill_copy_3) {
CHECK_EQ(0x5555555555555555, t[1].d0);
}
-typedef union {
- uint8_t b[16];
- uint16_t h[8];
- uint32_t w[4];
- uint64_t d[2];
-} msa_reg_t;
template <typename T>
void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
__ li(t0, -1);
@@ -6815,22 +6996,22 @@ void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
__ fill_w(w0, t0);
if (std::is_same<T, int8_t>::value) {
- DCHECK(n < 16);
+ DCHECK_LT(n, 16);
__ insert_b(w0, n, t1);
} else if (std::is_same<T, int16_t>::value) {
- DCHECK(n < 8);
+ DCHECK_LT(n, 8);
__ insert_h(w0, n, t1);
} else if (std::is_same<T, int32_t>::value) {
- DCHECK(n < 4);
+ DCHECK_LT(n, 4);
__ insert_w(w0, n, t1);
} else if (std::is_same<T, int64_t>::value) {
- DCHECK(n < 2);
+ DCHECK_LT(n, 2);
__ insert_d(w0, n, t1);
} else {
UNREACHABLE();
}
- store_uint64_elements_of_vector(assm, w0, a0);
+ store_elements_of_vector(assm, w0, a0);
__ jr(ra);
__ nop();
@@ -6919,7 +7100,8 @@ void run_msa_ctc_cfc(uint64_t value) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
MSAControlRegister msareg = {kMSACSRRegister};
@@ -6950,6 +7132,152 @@ void run_msa_ctc_cfc(uint64_t value) {
res);
}
+TEST(MSA_move_v) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+ } T;
+ T t[] = {{0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x1e86678b52f8e1ff,
+ 0x706e51290ac76fb9},
+ {0x4414aed7883ffd18, 0x047d183a06b67016, 0x4ef258cf8d822870,
+ 0x2686b73484c2e843},
+ {0xd38ff9d048884ffc, 0x6dc63a57c0943ca7, 0x8520ca2f3e97c426,
+ 0xa9913868fb819c59}};
+
+ for (unsigned i = 0; i < arraysize(t); ++i) {
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+
+ load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
+ load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
+ __ move_v(w2, w0);
+ store_elements_of_vector(assm, w2, a0);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ (CALL_GENERATED_CODE(isolate, f, &t[i].wd_lo, 0, 0, 0, 0));
+ CHECK_EQ(t[i].ws_lo, t[i].wd_lo);
+ CHECK_EQ(t[i].ws_hi, t[i].wd_hi);
+ }
+}
+
+template <typename ExpectFunc, typename OperFunc>
+void run_msa_sldi(OperFunc GenerateOperation,
+ ExpectFunc GenerateExpectedResult) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+ } T;
+ T t[] = {{0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x1e86678b52f8e1ff,
+ 0x706e51290ac76fb9},
+ {0x4414aed7883ffd18, 0x047d183a06b67016, 0x4ef258cf8d822870,
+ 0x2686b73484c2e843},
+ {0xd38ff9d048884ffc, 0x6dc63a57c0943ca7, 0x8520ca2f3e97c426,
+ 0xa9913868fb819c59}};
+ uint64_t res[2];
+
+ for (unsigned i = 0; i < arraysize(t); ++i) {
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+ load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
+ load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
+ GenerateOperation(assm);
+ store_elements_of_vector(assm, w2, a0);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ (CALL_GENERATED_CODE(isolate, f, &res[0], 0, 0, 0, 0));
+ GenerateExpectedResult(reinterpret_cast<uint8_t*>(&t[i].ws_lo),
+ reinterpret_cast<uint8_t*>(&t[i].wd_lo));
+ CHECK_EQ(res[0], t[i].wd_lo);
+ CHECK_EQ(res[1], t[i].wd_hi);
+ }
+}
+
+TEST(MSA_sldi) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+ CcTest::InitializeVM();
+
+#define SLDI_DF(s, k) \
+ uint8_t v[32]; \
+ for (unsigned i = 0; i < s; i++) { \
+ v[i] = ws[s * k + i]; \
+ v[i + s] = wd[s * k + i]; \
+ } \
+ for (unsigned i = 0; i < s; i++) { \
+ wd[s * k + i] = v[i + n]; \
+ }
+
+ for (int n = 0; n < 16; ++n) {
+ run_msa_sldi([n](MacroAssembler& assm) { __ sldi_b(w2, w0, n); },
+ [n](uint8_t* ws, uint8_t* wd) {
+ SLDI_DF(kMSARegSize / sizeof(int8_t) / kBitsPerByte, 0)
+ });
+ }
+
+ for (int n = 0; n < 8; ++n) {
+ run_msa_sldi([n](MacroAssembler& assm) { __ sldi_h(w2, w0, n); },
+ [n](uint8_t* ws, uint8_t* wd) {
+ for (int k = 0; k < 2; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int16_t) / kBitsPerByte, k)
+ }
+ });
+ }
+
+ for (int n = 0; n < 4; ++n) {
+ run_msa_sldi([n](MacroAssembler& assm) { __ sldi_w(w2, w0, n); },
+ [n](uint8_t* ws, uint8_t* wd) {
+ for (int k = 0; k < 4; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int32_t) / kBitsPerByte, k)
+ }
+ });
+ }
+
+ for (int n = 0; n < 2; ++n) {
+ run_msa_sldi([n](MacroAssembler& assm) { __ sldi_d(w2, w0, n); },
+ [n](uint8_t* ws, uint8_t* wd) {
+ for (int k = 0; k < 8; ++k) {
+ SLDI_DF(kMSARegSize / sizeof(int64_t) / kBitsPerByte, k)
+ }
+ });
+ }
+#undef SLDI_DF
+}
+
TEST(MSA_cfc_ctc) {
if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -6984,7 +7312,8 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
uint64_t wd_lo = 0xf35862e13e38f8b0;
@@ -7036,7 +7365,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
UNREACHABLE();
}
- store_uint64_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -7222,17 +7551,18 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
int32_t i5 =
i5_sign_ext ? static_cast<int32_t>(input->i5 << 27) >> 27 : input->i5;
- load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
GenerateI5InstructionFunc(assm, i5);
- store_uint64_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -7639,15 +7969,14 @@ struct TestCaseMsa2R {
uint64_t exp_res_hi;
};
-template <typename Func, typename FuncLoad, typename FuncStore>
+template <typename Func>
void run_msa_2r(const struct TestCaseMsa2R* input,
- Func Generate2RInstructionFunc,
- FuncLoad load_elements_of_vector,
- FuncStore store_elements_of_vector) {
+ Func Generate2RInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -7670,17 +7999,8 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
(CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
- if (store_elements_of_vector == store_uint64_elements_of_vector) {
- CHECK_EQ(input->exp_res_lo, res.d[0]);
- CHECK_EQ(input->exp_res_hi, res.d[1]);
- } else if (store_elements_of_vector == store_uint32_elements_of_vector) {
- const uint32_t* exp_res =
- reinterpret_cast<const uint32_t*>(&input->exp_res_lo);
- CHECK_EQ(exp_res[0], res.w[0]);
- CHECK_EQ(exp_res[1], res.w[1]);
- CHECK_EQ(exp_res[2], res.w[2]);
- CHECK_EQ(exp_res[3], res.w[3]);
- }
+ CHECK_EQ(input->exp_res_lo, res.d[0]);
+ CHECK_EQ(input->exp_res_hi, res.d[1]);
}
TEST(MSA_pcnt) {
@@ -7731,14 +8051,10 @@ TEST(MSA_pcnt) {
{0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x20, 0x2a}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); });
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); });
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); });
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); });
}
}
@@ -7790,14 +8106,10 @@ TEST(MSA_nlzc) {
{0x00000000e338f8b0, 0x0754534acab32654, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); });
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); });
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); });
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); });
}
}
@@ -7849,14 +8161,10 @@ TEST(MSA_nloc) {
{0xFFFFFFFF1CC7074F, 0xF8ABACB5354CD9AB, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); });
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); });
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); });
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); });
}
}
@@ -7917,13 +8225,11 @@ TEST(MSA_fclass) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ fclass_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ fclass_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ fclass_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ fclass_d(w2, w0); });
}
#undef BIT
@@ -7989,13 +8295,11 @@ TEST(MSA_ftrunc_s) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_I); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ftrunc_s_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ftrunc_s_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_I); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ftrunc_s_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ftrunc_s_d(w2, w0); });
}
}
@@ -8028,13 +8332,11 @@ TEST(MSA_ftrunc_u) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ftrunc_u_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ftrunc_u_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ftrunc_u_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ftrunc_u_d(w2, w0); });
}
}
@@ -8073,13 +8375,11 @@ TEST(MSA_fsqrt) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ fsqrt_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ fsqrt_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ fsqrt_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ fsqrt_d(w2, w0); });
}
}
@@ -8103,13 +8403,11 @@ TEST(MSA_frsqrt) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ frsqrt_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ frsqrt_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ frsqrt_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ frsqrt_d(w2, w0); });
}
}
@@ -8135,13 +8433,11 @@ TEST(MSA_frcp) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ frcp_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ frcp_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ frcp_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ frcp_d(w2, w0); });
}
}
@@ -8156,8 +8452,7 @@ void test_frint_s(size_t data_size, TestCaseMsa2RF_F_F tc_d[],
__ ctcmsa(msareg, t0);
__ frint_w(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ });
}
}
@@ -8172,8 +8467,7 @@ void test_frint_d(size_t data_size, TestCaseMsa2RF_D_D tc_d[],
__ ctcmsa(msareg, t0);
__ frint_d(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ });
}
}
@@ -8255,14 +8549,12 @@ TEST(MSA_flog2) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ flog2_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ flog2_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ flog2_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ flog2_d(w2, w0); });
}
}
@@ -8277,8 +8569,7 @@ void test_ftint_s_s(size_t data_size, TestCaseMsa2RF_F_I tc_d[],
__ ctcmsa(msareg, t0);
__ ftint_s_w(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ });
}
}
@@ -8293,8 +8584,7 @@ void test_ftint_s_d(size_t data_size, TestCaseMsa2RF_D_I tc_d[],
__ ctcmsa(msareg, t0);
__ ftint_s_d(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ });
}
}
@@ -8391,8 +8681,7 @@ void test_ftint_u_s(size_t data_size, TestCaseMsa2RF_F_U tc_d[],
__ ctcmsa(msareg, t0);
__ ftint_u_w(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ });
}
}
@@ -8407,8 +8696,7 @@ void test_ftint_u_d(size_t data_size, TestCaseMsa2RF_D_U tc_d[],
__ ctcmsa(msareg, t0);
__ ftint_u_d(w2, w0);
__ ctcmsa(msareg, t1);
- },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ });
}
}
@@ -8524,13 +8812,11 @@ TEST(MSA_ffint_u) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ffint_u_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffint_u_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ffint_u_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffint_u_d(w2, w0); });
}
}
@@ -8566,13 +8852,11 @@ TEST(MSA_ffint_s) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_I_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ffint_s_w(w2, w0); },
- load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffint_s_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_I_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ffint_s_d(w2, w0); },
- load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffint_s_d(w2, w0); });
}
}
@@ -8625,13 +8909,11 @@ TEST(MSA_fexupl) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ fexupl_w(w2, w0); },
- load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ fexupl_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ fexupl_d(w2, w0); },
- load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ fexupl_d(w2, w0); });
}
}
@@ -8660,13 +8942,11 @@ TEST(MSA_fexupr) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ fexupr_w(w2, w0); },
- load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ fexupr_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ fexupr_d(w2, w0); },
- load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ fexupr_d(w2, w0); });
}
}
@@ -8695,13 +8975,11 @@ TEST(MSA_ffql) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ffql_w(w2, w0); },
- load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffql_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ffql_d(w2, w0); },
- load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffql_d(w2, w0); });
}
}
@@ -8721,13 +8999,11 @@ TEST(MSA_ffqr) {
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
- [](MacroAssembler& assm) { __ ffqr_w(w2, w0); },
- load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffqr_w(w2, w0); });
}
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
- [](MacroAssembler& assm) { __ ffqr_d(w2, w0); },
- load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ [](MacroAssembler& assm) { __ ffqr_d(w2, w0); });
}
}
@@ -8747,17 +9023,18 @@ void run_msa_vector(struct TestCaseMsaVector* input,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->wt_lo), w2, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->wd_lo), w4, t0, t1);
+ load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(assm, &(input->wt_lo), w2, t0, t1);
+ load_elements_of_vector(assm, &(input->wd_lo), w4, t0, t1);
GenerateVectorInstructionFunc(assm);
- store_uint64_elements_of_vector(assm, w4, a0);
+ store_elements_of_vector(assm, w4, a0);
__ jr(ra);
__ nop();
@@ -8836,16 +9113,17 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+ load_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
GenerateInstructionFunc(assm, input->m);
- store_uint64_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -9312,13 +9590,14 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
GenerateVectorInstructionFunc(assm, input);
- store_uint64_elements_of_vector(assm, w0, a0);
+ store_elements_of_vector(assm, w0, a0);
__ jr(ra);
__ nop();
@@ -9382,7 +9661,8 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
T in_test_vector[1024];
T out_test_vector[1024];
@@ -9446,7 +9726,6 @@ TEST(MSA_load_store_vector) {
__ st_d(w0, MemOperand(a1, i));
}
});
-#undef LDI_DF
}
struct TestCaseMsa3R {
@@ -9466,18 +9745,18 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- uint64_t expected;
- load_uint64_elements_of_vector(assm, &(input->wt_lo), w0, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->ws_lo), w1, t0, t1);
- load_uint64_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+ load_elements_of_vector(assm, &(input->wt_lo), w0, t0, t1);
+ load_elements_of_vector(assm, &(input->ws_lo), w1, t0, t1);
+ load_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
GenerateI5InstructionFunc(assm);
- store_uint64_elements_of_vector(assm, w2, a0);
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -9493,14 +9772,12 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
(CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
- expected = GenerateOperationFunc(input->ws_lo, input->wt_lo, input->wd_lo);
- if (expected != Unpredictable) {
- CHECK_EQ(expected, res.d[0]);
+ GenerateOperationFunc(&input->ws_lo, &input->wt_lo, &input->wd_lo);
+ if (input->wd_lo != Unpredictable) {
+ CHECK_EQ(input->wd_lo, res.d[0]);
}
-
- expected = GenerateOperationFunc(input->ws_hi, input->wt_hi, input->wd_hi);
- if (expected != Unpredictable) {
- CHECK_EQ(expected, res.d[1]);
+ if (input->wd_hi != Unpredictable) {
+ CHECK_EQ(input->wd_hi, res.d[1]);
}
}
@@ -9537,479 +9814,630 @@ TEST(MSA_3R_instructions) {
{0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff,
0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff}};
-#define SLL_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>((wt >> shift) & mask) % size_in_bits; \
- res |= (static_cast<uint64_t>(src_op << shift_op) & mask) << shift; \
- } \
- return res
-
-#define SRA_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- int shift_op = ((wt >> shift) & mask) % size_in_bits; \
- res |= \
- (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) & mask)) \
- << shift; \
- } \
- return res
-
-#define SRL_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- res |= (static_cast<uint64_t>(src_op >> shift_op) & mask) << shift; \
- } \
- return res
-
-#define BCRL_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- T r = (static_cast<T>(~(1ull << shift_op)) & src_op) & mask; \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define BSET_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- T r = (static_cast<T>(1ull << shift_op) | src_op) & mask; \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define BNEG_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
- T r = (static_cast<T>(1ull << shift_op) ^ src_op) & mask; \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define BINSL_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define SLL_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>((wt[i] >> shift) & mask) % size_in_bits; \
+ res |= (static_cast<uint64_t>(src_op << shift_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SRA_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ int shift_op = ((wt[i] >> shift) & mask) % size_in_bits; \
+ res |= (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) & \
+ mask)) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SRL_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ res |= (static_cast<uint64_t>(src_op >> shift_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BCRL_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(~(1ull << shift_op)) & src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BSET_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(1ull << shift_op) | src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BNEG_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(1ull << shift_op) ^ src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BINSL_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wd_op = static_cast<T>((wd[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ int64_t bits = shift_op + 1; \
+ T r; \
+ if (bits == size_in_bits) { \
+ r = static_cast<T>(ws_op); \
+ } else { \
+ uint64_t mask2 = ((1ull << bits) - 1) << (size_in_bits - bits); \
+ r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
+ (static_cast<T>(~mask2) & wd_op)); \
+ } \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define BINSR_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wd_op = static_cast<T>((wd[i] >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt[i] >> shift) & mask) % size_in_bits); \
+ int64_t bits = shift_op + 1; \
+ T r; \
+ if (bits == size_in_bits) { \
+ r = static_cast<T>(ws_op); \
+ } else { \
+ uint64_t mask2 = (1ull << bits) - 1; \
+ r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
+ (static_cast<T>(~mask2) & wd_op)); \
+ } \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ADDV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op + wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SUBV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op - wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MAX_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wd_op = static_cast<T>((wd >> shift) & mask); \
- int shift_op = static_cast<int>(((wt >> shift) & mask) % size_in_bits); \
- int bits = shift_op + 1; \
- T r; \
- if (bits == size_in_bits) { \
- r = static_cast<T>(ws_op); \
- } else { \
- uint64_t mask2 = ((1ull << bits) - 1) << (size_in_bits - bits); \
- r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
- (static_cast<T>(~mask2) & wd_op)); \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Max<T>(ws_op, wt_op)) & mask) << shift; \
} \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
+ wd[i] = res; \
+ }
-#define BINSR_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define MIN_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wd_op = static_cast<T>((wd >> shift) & mask); \
- int shift_op = static_cast<int>(((wt >> shift) & mask) % size_in_bits); \
- int bits = shift_op + 1; \
- T r; \
- if (bits == size_in_bits) { \
- r = static_cast<T>(ws_op); \
- } else { \
- uint64_t mask2 = (1ull << bits) - 1; \
- r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
- (static_cast<T>(~mask2) & wd_op)); \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Min<T>(ws_op, wt_op)) & mask) << shift; \
} \
- res |= static_cast<uint64_t>(r) << shift; \
- } \
- return res
-
-#define ADDV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(ws_op + wt_op) & mask) << shift; \
- } \
- return res
-
-#define SUBV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(ws_op - wt_op) & mask) << shift; \
- } \
- return res
-
-#define MAX_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Max<T>(ws_op, wt_op)) & mask) << shift; \
- } \
- return res
-
-#define MIN_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Min<T>(ws_op, wt_op)) & mask) << shift; \
- } \
- return res
-
-#define MAXA_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Nabs(ws_op) < Nabs(wt_op) ? ws_op : wt_op) & \
- mask) \
- << shift; \
- } \
- return res
-
-#define MINA_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Nabs(ws_op) > Nabs(wt_op) ? ws_op : wt_op) & \
- mask) \
- << shift; \
- } \
- return res
+ wd[i] = res; \
+ }
-#define CEQ_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define MAXA_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= \
- (static_cast<uint64_t>(!Compare(ws_op, wt_op) ? -1ull : 0ull) & mask) \
- << shift; \
- } \
- return res
-
-#define CLT_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= \
- (static_cast<uint64_t>((Compare(ws_op, wt_op) == -1) ? -1ull : 0ull) & \
- mask) \
- << shift; \
- } \
- return res
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>(Nabs(ws_op) < Nabs(wt_op) ? ws_op : wt_op) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
-#define CLE_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define MINA_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= \
- (static_cast<uint64_t>((Compare(ws_op, wt_op) != 1) ? -1ull : 0ull) & \
- mask) \
- << shift; \
- } \
- return res
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>(Nabs(ws_op) > Nabs(wt_op) ? ws_op : wt_op) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
-#define ADD_A_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define CEQ_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Abs(ws_op) + Abs(wt_op)) & mask) << shift; \
- } \
- return res
-
-#define ADDS_A_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = Nabs(static_cast<T>((ws >> shift) & mask)); \
- T wt_op = Nabs(static_cast<T>((wt >> shift) & mask)); \
- T r; \
- if (ws_op < -std::numeric_limits<T>::max() - wt_op) { \
- r = std::numeric_limits<T>::max(); \
- } else { \
- r = -(ws_op + wt_op); \
- } \
- res |= (static_cast<uint64_t>(r) & mask) << shift; \
- } \
- return res
-
-#define ADDS_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(SaturateAdd(ws_op, wt_op)) & mask) << shift; \
- } \
- return res
-
-#define AVE_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(((wt_op & ws_op) + ((ws_op ^ wt_op) >> 1)) & \
- mask)) \
- << shift; \
- } \
- return res
-
-#define AVER_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(((wt_op | ws_op) - ((ws_op ^ wt_op) >> 1)) & \
- mask)) \
- << shift; \
- } \
- return res
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(!Compare(ws_op, wt_op) ? -1ull : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define CLT_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>((Compare(ws_op, wt_op) == -1) ? -1ull \
+ : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
-#define SUBS_DF(T, lanes, mask) \
- uint64_t res = 0; \
+#define CLE_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>((Compare(ws_op, wt_op) != 1) ? -1ull \
+ : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ADD_A_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(SaturateSub(ws_op, wt_op)) & mask) << shift; \
- } \
- return res
-
-#define SUBSUS_U_DF(T, lanes, mask) \
- typedef typename std::make_unsigned<T>::type uT; \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- uT ws_op = static_cast<uT>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- T r; \
- if (wt_op > 0) { \
- uT wtu = static_cast<uT>(wt_op); \
- if (wtu > ws_op) { \
- r = 0; \
- } else { \
- r = static_cast<T>(ws_op - wtu); \
- } \
- } else { \
- if (ws_op > std::numeric_limits<uT>::max() + wt_op) { \
- r = static_cast<T>(std::numeric_limits<uT>::max()); \
- } else { \
- r = static_cast<T>(ws_op - wt_op); \
- } \
- } \
- res |= (static_cast<uint64_t>(r) & mask) << shift; \
- } \
- return res
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Abs(ws_op) + Abs(wt_op)) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ADDS_A_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = Nabs(static_cast<T>((ws[i] >> shift) & mask)); \
+ T wt_op = Nabs(static_cast<T>((wt[i] >> shift) & mask)); \
+ T r; \
+ if (ws_op < -std::numeric_limits<T>::max() - wt_op) { \
+ r = std::numeric_limits<T>::max(); \
+ } else { \
+ r = -(ws_op + wt_op); \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ADDS_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(SaturateAdd(ws_op, wt_op)) & mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define AVE_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>( \
+ ((wt_op & ws_op) + ((ws_op ^ wt_op) >> 1)) & mask)) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define AVER_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>( \
+ ((wt_op | ws_op) - ((ws_op ^ wt_op) >> 1)) & mask)) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SUBS_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(SaturateSub(ws_op, wt_op)) & mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SUBSUS_U_DF(T, lanes, mask) \
+ typedef typename std::make_unsigned<T>::type uT; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ uT ws_op = static_cast<uT>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ T r; \
+ if (wt_op > 0) { \
+ uT wtu = static_cast<uT>(wt_op); \
+ if (wtu > ws_op) { \
+ r = 0; \
+ } else { \
+ r = static_cast<T>(ws_op - wtu); \
+ } \
+ } else { \
+ if (ws_op > std::numeric_limits<uT>::max() + wt_op) { \
+ r = static_cast<T>(std::numeric_limits<uT>::max()); \
+ } else { \
+ r = static_cast<T>(ws_op - wt_op); \
+ } \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define SUBSUU_S_DF(T, lanes, mask) \
+ typedef typename std::make_unsigned<T>::type uT; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ uT ws_op = static_cast<uT>((ws[i] >> shift) & mask); \
+ uT wt_op = static_cast<uT>((wt[i] >> shift) & mask); \
+ uT wdu; \
+ T r; \
+ if (ws_op > wt_op) { \
+ wdu = ws_op - wt_op; \
+ if (wdu > std::numeric_limits<T>::max()) { \
+ r = std::numeric_limits<T>::max(); \
+ } else { \
+ r = static_cast<T>(wdu); \
+ } \
+ } else { \
+ wdu = wt_op - ws_op; \
+ CHECK(-std::numeric_limits<T>::max() == \
+ std::numeric_limits<T>::min() + 1); \
+ if (wdu <= std::numeric_limits<T>::max()) { \
+ r = -static_cast<T>(wdu); \
+ } else { \
+ r = std::numeric_limits<T>::min(); \
+ } \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ASUB_S_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Abs(ws_op - wt_op)) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define ASUB_U_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op > wt_op ? ws_op - wt_op \
+ : wt_op - ws_op) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MULV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op * wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MADDV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ T wd_op = static_cast<T>((wd[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(wd_op + ws_op * wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MSUBV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ T wd_op = static_cast<T>((wd[i] >> shift) & mask); \
+ res |= (static_cast<uint64_t>(wd_op - ws_op * wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define DIV_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ if (wt_op == 0) { \
+ res = Unpredictable; \
+ break; \
+ } \
+ res |= (static_cast<uint64_t>(ws_op / wt_op) & mask) << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define MOD_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T ws_op = static_cast<T>((ws[i] >> shift) & mask); \
+ T wt_op = static_cast<T>((wt[i] >> shift) & mask); \
+ if (wt_op == 0) { \
+ res = Unpredictable; \
+ break; \
+ } \
+ res |= (static_cast<uint64_t>(wt_op != 0 ? ws_op % wt_op : 0) & mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
-#define SUBSUU_S_DF(T, lanes, mask) \
- typedef typename std::make_unsigned<T>::type uT; \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- uT ws_op = static_cast<uT>((ws >> shift) & mask); \
- uT wt_op = static_cast<uT>((wt >> shift) & mask); \
- uT wdu; \
- T r; \
- if (ws_op > wt_op) { \
- wdu = ws_op - wt_op; \
- if (wdu > std::numeric_limits<T>::max()) { \
- r = std::numeric_limits<T>::max(); \
- } else { \
- r = static_cast<T>(wdu); \
- } \
+#define SRAR_DF(T, lanes, mask) \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < 2; i++) { \
+ uint64_t res = 0; \
+ for (int j = 0; j < lanes / 2; ++j) { \
+ uint64_t shift = size_in_bits * j; \
+ T src_op = static_cast<T>((ws[i] >> shift) & mask); \
+ int shift_op = ((wt[i] >> shift) & mask) % size_in_bits; \
+ uint32_t bit = shift_op == 0 ? 0 : src_op >> (shift_op - 1) & 1; \
+ res |= (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) + \
+ bit) & \
+ mask) \
+ << shift; \
+ } \
+ wd[i] = res; \
+ }
+
+#define PCKEV_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[i] = wt_p[2 * i]; \
+ wd_p[i + lanes / 2] = ws_p[2 * i]; \
+ }
+
+#define PCKOD_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[i] = wt_p[2 * i + 1]; \
+ wd_p[i + lanes / 2] = ws_p[2 * i + 1]; \
+ }
+
+#define ILVL_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[2 * i] = wt_p[i + lanes / 2]; \
+ wd_p[2 * i + 1] = ws_p[i + lanes / 2]; \
+ }
+
+#define ILVR_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[2 * i] = wt_p[i]; \
+ wd_p[2 * i + 1] = ws_p[i]; \
+ }
+
+#define ILVEV_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[2 * i] = wt_p[2 * i]; \
+ wd_p[2 * i + 1] = ws_p[2 * i]; \
+ }
+
+#define ILVOD_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ wd_p[2 * i] = wt_p[2 * i + 1]; \
+ wd_p[2 * i + 1] = ws_p[2 * i + 1]; \
+ }
+
+#define VSHF_DF(T, lanes, mask) \
+ T* ws_p = reinterpret_cast<T*>(ws); \
+ T* wt_p = reinterpret_cast<T*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ const int mask_not_valid = 0xc0; \
+ const int mask_6bits = 0x3f; \
+ for (int i = 0; i < lanes; ++i) { \
+ if ((wd_p[i] & mask_not_valid)) { \
+ wd_p[i] = 0; \
} else { \
- wdu = wt_op - ws_op; \
- CHECK(-std::numeric_limits<T>::max() == \
- std::numeric_limits<T>::min() + 1); \
- if (wdu <= std::numeric_limits<T>::max()) { \
- r = -static_cast<T>(wdu); \
- } else { \
- r = std::numeric_limits<T>::min(); \
- } \
+ int k = (wd_p[i] & mask_6bits) % (lanes * 2); \
+ wd_p[i] = k > lanes ? ws_p[k - lanes] : wt_p[k]; \
} \
- res |= (static_cast<uint64_t>(r) & mask) << shift; \
- } \
- return res
-
-#define ASUB_S_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(Abs(ws_op - wt_op)) & mask) << shift; \
- } \
- return res
-
-#define ASUB_U_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(ws_op > wt_op ? ws_op - wt_op \
- : wt_op - ws_op) & \
- mask) \
- << shift; \
- } \
- return res
-
-#define MULV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- res |= (static_cast<uint64_t>(ws_op * wt_op) & mask) << shift; \
- } \
- return res
-
-#define MADDV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- T wd_op = static_cast<T>((wd >> shift) & mask); \
- res |= (static_cast<uint64_t>(wd_op + ws_op * wt_op) & mask) << shift; \
- } \
- return res
-
-#define MSUBV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- T wd_op = static_cast<T>((wd >> shift) & mask); \
- res |= (static_cast<uint64_t>(wd_op - ws_op * wt_op) & mask) << shift; \
- } \
- return res
-
-#define DIV_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- if (wt_op == 0) { \
- res = Unpredictable; \
- break; \
- } \
- res |= (static_cast<uint64_t>(ws_op / wt_op) & mask) << shift; \
- } \
- return res
+ }
-#define MOD_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T ws_op = static_cast<T>((ws >> shift) & mask); \
- T wt_op = static_cast<T>((wt >> shift) & mask); \
- if (wt_op == 0) { \
- res = Unpredictable; \
- break; \
- } \
- res |= (static_cast<uint64_t>(wt_op != 0 ? ws_op % wt_op : 0) & mask) \
- << shift; \
- } \
- return res
+#define HADD_DF(T, T_small, lanes) \
+ T_small* ws_p = reinterpret_cast<T_small*>(ws); \
+ T_small* wt_p = reinterpret_cast<T_small*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes; ++i) { \
+ wd_p[i] = static_cast<T>(ws_p[2 * i + 1]) + static_cast<T>(wt_p[2 * i]); \
+ }
-#define SRAR_DF(T, lanes, mask) \
- uint64_t res = 0; \
- int size_in_bits = kMSARegSize / lanes; \
- for (int i = 0; i < lanes / 2; ++i) { \
- uint64_t shift = size_in_bits * i; \
- T src_op = static_cast<T>((ws >> shift) & mask); \
- int shift_op = ((wt >> shift) & mask) % size_in_bits; \
- uint32_t bit = shift_op == 0 ? 0 : src_op >> (shift_op - 1) & 1; \
- res |= \
- (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) + bit) & \
- mask) \
- << shift; \
- } \
- return res
+#define HSUB_DF(T, T_small, lanes) \
+ T_small* ws_p = reinterpret_cast<T_small*>(ws); \
+ T_small* wt_p = reinterpret_cast<T_small*>(wt); \
+ T* wd_p = reinterpret_cast<T*>(wd); \
+ for (int i = 0; i < lanes; ++i) { \
+ wd_p[i] = static_cast<T>(ws_p[2 * i + 1]) - static_cast<T>(wt_p[2 * i]); \
+ }
#define TEST_CASE(V) \
V(sll_b, SLL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
V(sll_h, SLL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
V(sll_w, SLL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
V(sll_d, SLL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
- V(sra_b, SRA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
- V(sra_h, SRA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
- V(sra_w, SRA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
- V(sra_d, SRA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
V(srl_b, SRL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
V(srl_h, SRL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
V(srl_w, SRL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
@@ -10170,18 +10598,54 @@ TEST(MSA_3R_instructions) {
V(mod_u_h, MOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
V(mod_u_w, MOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
V(mod_u_d, MOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
- V(srar_b, SRAR_DF, int8_t, kMSALanesByte, UINT8_MAX) \
- V(srar_h, SRAR_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
- V(srar_w, SRAR_DF, int32_t, kMSALanesWord, UINT32_MAX) \
- V(srar_d, SRAR_DF, int64_t, kMSALanesDword, UINT64_MAX) \
V(srlr_b, SRAR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
V(srlr_h, SRAR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
V(srlr_w, SRAR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
- V(srlr_d, SRAR_DF, uint64_t, kMSALanesDword, UINT64_MAX)
+ V(srlr_d, SRAR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(pckev_b, PCKEV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(pckev_h, PCKEV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(pckev_w, PCKEV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(pckev_d, PCKEV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(pckod_b, PCKOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(pckod_h, PCKOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(pckod_w, PCKOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(pckod_d, PCKOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ilvl_b, ILVL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ilvl_h, ILVL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ilvl_w, ILVL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ilvl_d, ILVL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ilvr_b, ILVR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ilvr_h, ILVR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ilvr_w, ILVR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ilvr_d, ILVR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ilvev_b, ILVEV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ilvev_h, ILVEV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ilvev_w, ILVEV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ilvev_d, ILVEV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ilvod_b, ILVOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ilvod_h, ILVOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ilvod_w, ILVOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ilvod_d, ILVOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(vshf_b, VSHF_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(vshf_h, VSHF_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(vshf_w, VSHF_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(vshf_d, VSHF_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(hadd_s_h, HADD_DF, int16_t, int8_t, kMSALanesHalf) \
+ V(hadd_s_w, HADD_DF, int32_t, int16_t, kMSALanesWord) \
+ V(hadd_s_d, HADD_DF, int64_t, int32_t, kMSALanesDword) \
+ V(hadd_u_h, HADD_DF, uint16_t, uint8_t, kMSALanesHalf) \
+ V(hadd_u_w, HADD_DF, uint32_t, uint16_t, kMSALanesWord) \
+ V(hadd_u_d, HADD_DF, uint64_t, uint32_t, kMSALanesDword) \
+ V(hsub_s_h, HSUB_DF, int16_t, int8_t, kMSALanesHalf) \
+ V(hsub_s_w, HSUB_DF, int32_t, int16_t, kMSALanesWord) \
+ V(hsub_s_d, HSUB_DF, int64_t, int32_t, kMSALanesDword) \
+ V(hsub_u_h, HSUB_DF, uint16_t, uint8_t, kMSALanesHalf) \
+ V(hsub_u_w, HSUB_DF, uint32_t, uint16_t, kMSALanesWord) \
+ V(hsub_u_d, HSUB_DF, uint64_t, uint32_t, kMSALanesDword)
#define RUN_TEST(instr, verify, type, lanes, mask) \
run_msa_3r(&tc[i], [](MacroAssembler& assm) { __ instr(w2, w1, w0); }, \
- [](uint64_t ws, uint64_t wt, uint64_t wd) { \
+ [](uint64_t* ws, uint64_t* wt, uint64_t* wd) { \
verify(type, lanes, mask); \
});
@@ -10189,9 +10653,41 @@ TEST(MSA_3R_instructions) {
TEST_CASE(RUN_TEST)
}
+#define RUN_TEST2(instr, verify, type, lanes, mask) \
+ for (unsigned i = 0; i < arraysize(tc); i++) { \
+ for (unsigned j = 0; j < 3; j++) { \
+ for (unsigned k = 0; k < lanes; k++) { \
+ type* element = reinterpret_cast<type*>(&tc[i]); \
+ element[k + j * lanes] &= std::numeric_limits<type>::max(); \
+ } \
+ } \
+ } \
+ run_msa_3r(&tc[i], [](MacroAssembler& assm) { __ instr(w2, w1, w0); }, \
+ [](uint64_t* ws, uint64_t* wt, uint64_t* wd) { \
+ verify(type, lanes, mask); \
+ });
+
+#define TEST_CASE2(V) \
+ V(sra_b, SRA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(sra_h, SRA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(sra_w, SRA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(sra_d, SRA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(srar_b, SRAR_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(srar_h, SRAR_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(srar_w, SRAR_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(srar_d, SRAR_DF, int64_t, kMSALanesDword, UINT64_MAX)
+
+ for (size_t i = 0; i < arraysize(tc); ++i) {
+ TEST_CASE2(RUN_TEST2)
+ }
+
+#undef TEST_CASE
+#undef TEST_CASE2
#undef RUN_TEST
+#undef RUN_TEST2
#undef SLL_DF
#undef SRL_DF
+#undef SRA_DF
#undef BCRL_DF
#undef BSET_DF
#undef BNEG_DF
@@ -10222,6 +10718,689 @@ TEST(MSA_3R_instructions) {
#undef DIV_DF
#undef MOD_DF
#undef SRAR_DF
+#undef PCKEV_DF
+#undef PCKOD_DF
+#undef ILVL_DF
+#undef ILVR_DF
+#undef ILVEV_DF
+#undef ILVOD_DF
+#undef VSHF_DF
+#undef HADD_DF
+#undef HSUB_DF
+}
+
+struct TestCaseMsa3RF {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wt_lo;
+ uint64_t wt_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+};
+
+struct ExpectedResult_MSA3RF {
+ uint64_t exp_res_lo;
+ uint64_t exp_res_hi;
+};
+
+template <typename Func>
+void run_msa_3rf(const struct TestCaseMsa3RF* input,
+ const struct ExpectedResult_MSA3RF* output,
+ Func Generate2RInstructionFunc) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+ msa_reg_t res;
+
+ load_elements_of_vector(
+ assm, reinterpret_cast<const uint64_t*>(&input->ws_lo), w0, t0, t1);
+ load_elements_of_vector(
+ assm, reinterpret_cast<const uint64_t*>(&input->wt_lo), w1, t0, t1);
+ load_elements_of_vector(
+ assm, reinterpret_cast<const uint64_t*>(&input->wd_lo), w2, t0, t1);
+ Generate2RInstructionFunc(assm);
+ store_elements_of_vector(assm, w2, a0);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+
+ CHECK_EQ(output->exp_res_lo, res.d[0]);
+ CHECK_EQ(output->exp_res_hi, res.d[1]);
+}
+
+struct TestCaseMsa3RF_F {
+ float ws_1, ws_2, ws_3, ws_4;
+ float wt_1, wt_2, wt_3, wt_4;
+ float wd_1, wd_2, wd_3, wd_4;
+};
+struct ExpRes_32I {
+ int32_t exp_res_1;
+ int32_t exp_res_2;
+ int32_t exp_res_3;
+ int32_t exp_res_4;
+};
+
+struct TestCaseMsa3RF_D {
+ double ws_lo, ws_hi;
+ double wt_lo, wt_hi;
+ double wd_lo, wd_hi;
+};
+struct ExpRes_64I {
+ int64_t exp_res_lo;
+ int64_t exp_res_hi;
+};
+
+TEST(MSA_floating_point_quiet_compare) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float qnan_f = std::numeric_limits<float>::quiet_NaN();
+ const double qnan_d = std::numeric_limits<double>::quiet_NaN();
+ const float inf_f = std::numeric_limits<float>::infinity();
+ const double inf_d = std::numeric_limits<double>::infinity();
+ const int32_t ones = -1;
+
+ const struct TestCaseMsa3RF_F tc_w[]{
+ {qnan_f, -qnan_f, inf_f, 2.14e9f, // ws
+ qnan_f, 0.f, qnan_f, -2.14e9f, // wt
+ 0, 0, 0, 0}, // wd
+ {inf_f, -inf_f, -3.4e38f, 1.5e-45f, -inf_f, -inf_f, -inf_f, inf_f, 0, 0,
+ 0, 0},
+ {0.f, 19.871e24f, -1.5e-45f, -1.5e-45f, -19.871e24f, 19.871e24f, 1.5e-45f,
+ -1.5e-45f, 0, 0, 0, 0}};
+
+ const struct TestCaseMsa3RF_D tc_d[]{
+ // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi
+ {qnan_d, -qnan_d, qnan_f, 0., 0, 0},
+ {inf_d, 9.22e18, qnan_d, -9.22e18, 0, 0},
+ {inf_d, inf_d, -inf_d, inf_d, 0, 0},
+ {-2.3e-308, 5e-324, -inf_d, inf_d, 0, 0},
+ {0., 24.1e87, -1.6e308, 24.1e87, 0, 0},
+ {-5e-324, -5e-324, 5e-324, -5e-324, 0, 0}};
+
+ const struct ExpectedResult_MSA3RF exp_res_fcaf = {0, 0};
+ const struct ExpRes_32I exp_res_fcun_w[] = {
+ {ones, ones, ones, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}};
+ const struct ExpRes_64I exp_res_fcun_d[] = {{ones, ones}, {ones, 0}, {0, 0},
+ {0, 0}, {0, 0}, {0, 0}};
+ const struct ExpRes_32I exp_res_fceq_w[] = {
+ {0, 0, 0, 0}, {0, ones, 0, 0}, {0, ones, 0, ones}};
+ const struct ExpRes_64I exp_res_fceq_d[] = {{0, 0}, {0, 0}, {0, ones},
+ {0, 0}, {0, ones}, {0, ones}};
+ const struct ExpRes_32I exp_res_fcueq_w[] = {
+ {ones, ones, ones, 0}, {0, ones, 0, 0}, {0, ones, 0, ones}};
+ const struct ExpRes_64I exp_res_fcueq_d[] = {
+ {ones, ones}, {ones, 0}, {0, ones}, {0, 0}, {0, ones}, {0, ones}};
+ const struct ExpRes_32I exp_res_fclt_w[] = {
+ {0, 0, 0, 0}, {0, 0, 0, ones}, {0, 0, ones, 0}};
+ const struct ExpRes_64I exp_res_fclt_d[] = {{0, 0}, {0, 0}, {0, 0},
+ {0, ones}, {0, 0}, {ones, 0}};
+ const struct ExpRes_32I exp_res_fcult_w[] = {
+ {ones, ones, ones, 0}, {0, 0, 0, ones}, {0, 0, ones, 0}};
+ const struct ExpRes_64I exp_res_fcult_d[] = {
+ {ones, ones}, {ones, 0}, {0, 0}, {0, ones}, {0, 0}, {ones, 0}};
+ const struct ExpRes_32I exp_res_fcle_w[] = {
+ {0, 0, 0, 0}, {0, ones, 0, ones}, {0, ones, ones, ones}};
+ const struct ExpRes_64I exp_res_fcle_d[] = {
+ {0, 0}, {0, 0}, {0, ones}, {0, ones}, {0, ones}, {ones, ones}};
+ const struct ExpRes_32I exp_res_fcule_w[] = {
+ {ones, ones, ones, 0}, {0, ones, 0, ones}, {0, ones, ones, ones}};
+ const struct ExpRes_64I exp_res_fcule_d[] = {
+ {ones, ones}, {ones, 0}, {0, ones}, {0, ones}, {0, ones}, {ones, ones}};
+ const struct ExpRes_32I exp_res_fcor_w[] = {
+ {0, 0, 0, ones}, {ones, ones, ones, ones}, {ones, ones, ones, ones}};
+ const struct ExpRes_64I exp_res_fcor_d[] = {{0, 0}, {0, ones},
+ {ones, ones}, {ones, ones},
+ {ones, ones}, {ones, ones}};
+ const struct ExpRes_32I exp_res_fcune_w[] = {
+ {ones, ones, ones, ones}, {ones, 0, ones, ones}, {ones, 0, ones, 0}};
+ const struct ExpRes_64I exp_res_fcune_d[] = {{ones, ones}, {ones, ones},
+ {ones, 0}, {ones, ones},
+ {ones, 0}, {ones, 0}};
+ const struct ExpRes_32I exp_res_fcne_w[] = {
+ {0, 0, 0, ones}, {ones, 0, ones, ones}, {ones, 0, ones, 0}};
+ const struct ExpRes_64I exp_res_fcne_d[] = {
+ {0, 0}, {0, ones}, {ones, 0}, {ones, ones}, {ones, 0}, {ones, 0}};
+
+#define TEST_FP_QUIET_COMPARE_W(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FP_QUIET_COMPARE_D(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FP_QUIET_COMPARE_W(fcaf_w, &tc_w[i], &exp_res_fcaf)
+ TEST_FP_QUIET_COMPARE_W(fcun_w, &tc_w[i], &exp_res_fcun_w[i])
+ TEST_FP_QUIET_COMPARE_W(fceq_w, &tc_w[i], &exp_res_fceq_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcueq_w, &tc_w[i], &exp_res_fcueq_w[i])
+ TEST_FP_QUIET_COMPARE_W(fclt_w, &tc_w[i], &exp_res_fclt_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcult_w, &tc_w[i], &exp_res_fcult_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcle_w, &tc_w[i], &exp_res_fcle_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcule_w, &tc_w[i], &exp_res_fcule_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcor_w, &tc_w[i], &exp_res_fcor_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcune_w, &tc_w[i], &exp_res_fcune_w[i])
+ TEST_FP_QUIET_COMPARE_W(fcne_w, &tc_w[i], &exp_res_fcne_w[i])
+ }
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ TEST_FP_QUIET_COMPARE_D(fcaf_d, &tc_d[i], &exp_res_fcaf)
+ TEST_FP_QUIET_COMPARE_D(fcun_d, &tc_d[i], &exp_res_fcun_d[i])
+ TEST_FP_QUIET_COMPARE_D(fceq_d, &tc_d[i], &exp_res_fceq_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcueq_d, &tc_d[i], &exp_res_fcueq_d[i])
+ TEST_FP_QUIET_COMPARE_D(fclt_d, &tc_d[i], &exp_res_fclt_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcult_d, &tc_d[i], &exp_res_fcult_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcle_d, &tc_d[i], &exp_res_fcle_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcule_d, &tc_d[i], &exp_res_fcule_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcor_d, &tc_d[i], &exp_res_fcor_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcune_d, &tc_d[i], &exp_res_fcune_d[i])
+ TEST_FP_QUIET_COMPARE_D(fcne_d, &tc_d[i], &exp_res_fcne_d[i])
+ }
+#undef TEST_FP_QUIET_COMPARE_W
+#undef TEST_FP_QUIET_COMPARE_D
+}
+
+template <typename T>
+inline const T* fadd_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = src1[i] + src2[i];
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fsub_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = src1[i] - src2[i];
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fmul_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = src1[i] * src2[i];
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fdiv_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = src1[i] / src2[i];
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fmadd_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = std::fma(src1[i], src2[i], src3[i]);
+ }
+ return dst;
+}
+template <typename T>
+inline const T* fmsub_function(const T* src1, const T* src2, const T* src3,
+ T* dst) {
+ for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) {
+ dst[i] = std::fma(src1[i], -src2[i], src3[i]);
+ }
+ return dst;
+}
+
+TEST(MSA_floating_point_arithmetic) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_f = std::numeric_limits<float>::infinity();
+ const double inf_d = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa3RF_F tc_w[] = {
+ {0.3, -2.14e13f, inf_f, 0.f, // ws
+ -inf_f, std::sqrt(8.e-26f), -23.e34, -2.14e9f, // wt
+ -1e30f, 4.6e12f, 0, 2.14e9f}, // wd
+ {3.4e38f, -1.2e-38f, 1e19f, -1e19f, 3.4e38f, 1.2e-38f, -1e19f, -1e-19f,
+ 3.4e38f, 1.2e-38f * 3, 3.4e38f, -4e19f},
+ {-3e-31f, 3e10f, 1e25f, 123.f, 1e-14f, 1e-34f, 4e25f, 321.f, 3e-17f,
+ 2e-24f, 2.f, -123456.f}};
+
+ const struct TestCaseMsa3RF_D tc_d[] = {
+ // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi
+ {0.3, -2.14e103, -inf_d, std::sqrt(8.e-206), -1e30, 4.6e102},
+ {inf_d, 0., -23.e304, -2.104e9, 0, 2.104e9},
+ {3.4e307, -1.2e-307, 3.4e307, 1.2e-307, 3.4e307, 1.2e-307 * 3},
+ {1e154, -1e154, -1e154, -1e-154, 2.9e38, -4e19},
+ {-3e-301, 3e100, 1e-104, 1e-304, 3e-107, 2e-204},
+ {1e205, 123., 4e205, 321., 2., -123456.}};
+
+ struct ExpectedResult_MSA3RF dst_container;
+
+#define FP_ARITHMETIC_DF_W(instr, function, src1, src2, src3) \
+ run_msa_3rf( \
+ reinterpret_cast<const struct TestCaseMsa3RF*>(src1), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(function( \
+ src1, src2, src3, reinterpret_cast<float*>(&dst_container))), \
+ [](MacroAssembler& assm) { __ instr(w2, w0, w1); });
+
+#define FP_ARITHMETIC_DF_D(instr, function, src1, src2, src3) \
+ run_msa_3rf( \
+ reinterpret_cast<const struct TestCaseMsa3RF*>(src1), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(function( \
+ src1, src2, src3, reinterpret_cast<double*>(&dst_container))), \
+ [](MacroAssembler& assm) { __ instr(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ FP_ARITHMETIC_DF_W(fadd_w, fadd_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fsub_w, fsub_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fmul_w, fmul_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fdiv_w, fdiv_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fmadd_w, fmadd_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ FP_ARITHMETIC_DF_W(fmsub_w, fmsub_function, &tc_w[i].ws_1, &tc_w[i].wt_1,
+ &tc_w[i].wd_1)
+ }
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ FP_ARITHMETIC_DF_D(fadd_d, fadd_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fsub_d, fsub_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fmul_d, fmul_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fdiv_d, fdiv_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fmadd_d, fmadd_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ FP_ARITHMETIC_DF_D(fmsub_d, fmsub_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo,
+ &tc_d[i].wd_lo)
+ }
+#undef FP_ARITHMETIC_DF_W
+#undef FP_ARITHMETIC_DF_D
+}
+
+struct ExpRes_F {
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct ExpRes_D {
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_fmin_fmin_a_fmax_fmax_a) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_f = std::numeric_limits<float>::infinity();
+ const double inf_d = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa3RF_F tc_w[] = {
+ {0.3f, -2.14e13f, inf_f, -0.f, // ws
+ -inf_f, -std::sqrt(8.e26f), -23.e34f, -2.14e9f, // wt
+ 0, 0, 0, 0}, // wd
+ {3.4e38f, 1.2e-41f, 1e19f, 1e19f, // ws
+ 3.4e38f, -1.1e-41f, -1e-42f, -1e29f, // wt
+ 0, 0, 0, 0}}; // wd
+
+ const struct TestCaseMsa3RF_D tc_d[] = {
+ // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi
+ {0.3, -2.14e103, -inf_d, -std::sqrt(8e206), 0, 0},
+ {inf_d, -0., -23e304, -2.14e90, 0, 0},
+ {3.4e307, 1.2e-320, 3.4e307, -1.1e-320, 0, 0},
+ {1e154, 1e154, -1e-321, -1e174, 0, 0}};
+
+ const struct ExpRes_F exp_res_fmax_w[] = {{0.3f, -2.14e13f, inf_f, -0.f},
+ {3.4e38f, 1.2e-41f, 1e19f, 1e19f}};
+ const struct ExpRes_F exp_res_fmax_a_w[] = {
+ {-inf_f, -std::sqrt(8e26f), inf_f, -2.14e9f},
+ {3.4e38f, 1.2e-41f, 1e19f, -1e29f}};
+ const struct ExpRes_F exp_res_fmin_w[] = {
+ {-inf_f, -std::sqrt(8.e26f), -23e34f, -2.14e9f},
+ {3.4e38f, -1.1e-41f, -1e-42f, -1e29f}};
+ const struct ExpRes_F exp_res_fmin_a_w[] = {
+ {0.3, -2.14e13f, -23.e34f, -0.f}, {3.4e38f, -1.1e-41f, -1e-42f, 1e19f}};
+
+ const struct ExpRes_D exp_res_fmax_d[] = {
+ {0.3, -2.14e103}, {inf_d, -0.}, {3.4e307, 1.2e-320}, {1e154, 1e154}};
+ const struct ExpRes_D exp_res_fmax_a_d[] = {{-inf_d, -std::sqrt(8e206)},
+ {inf_d, -2.14e90},
+ {3.4e307, 1.2e-320},
+ {1e154, -1e174}};
+ const struct ExpRes_D exp_res_fmin_d[] = {{-inf_d, -std::sqrt(8e206)},
+ {-23e304, -2.14e90},
+ {3.4e307, -1.1e-320},
+ {-1e-321, -1e174}};
+ const struct ExpRes_D exp_res_fmin_a_d[] = {
+ {0.3, -2.14e103}, {-23e304, -0.}, {3.4e307, -1.1e-320}, {-1e-321, 1e154}};
+
+#define TEST_FP_MIN_MAX_W(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FP_MIN_MAX_D(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FP_MIN_MAX_W(fmax_w, &tc_w[i], &exp_res_fmax_w[i])
+ TEST_FP_MIN_MAX_W(fmax_a_w, &tc_w[i], &exp_res_fmax_a_w[i])
+ TEST_FP_MIN_MAX_W(fmin_w, &tc_w[i], &exp_res_fmin_w[i])
+ TEST_FP_MIN_MAX_W(fmin_a_w, &tc_w[i], &exp_res_fmin_a_w[i])
+ }
+
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ TEST_FP_MIN_MAX_D(fmax_d, &tc_d[i], &exp_res_fmax_d[i])
+ TEST_FP_MIN_MAX_D(fmax_a_d, &tc_d[i], &exp_res_fmax_a_d[i])
+ TEST_FP_MIN_MAX_D(fmin_d, &tc_d[i], &exp_res_fmin_d[i])
+ TEST_FP_MIN_MAX_D(fmin_a_d, &tc_d[i], &exp_res_fmin_a_d[i])
+ }
+#undef TEST_FP_MIN_MAX_W
+#undef TEST_FP_MIN_MAX_D
+}
+
+struct TestCaseMsa3RF_16I {
+ int16_t ws_1, ws_2, ws_3, ws_4, ws_5, ws_6, ws_7, ws_8;
+ int16_t wt_1, wt_2, wt_3, wt_4, wt_5, wt_6, wt_7, wt_8;
+ int16_t wd_1, wd_2, wd_3, wd_4, wd_5, wd_6, wd_7, wd_8;
+};
+struct ExpRes_16I {
+ int16_t exp_res_1;
+ int16_t exp_res_2;
+ int16_t exp_res_3;
+ int16_t exp_res_4;
+ int16_t exp_res_5;
+ int16_t exp_res_6;
+ int16_t exp_res_7;
+ int16_t exp_res_8;
+};
+
+struct TestCaseMsa3RF_32I {
+ int32_t ws_1, ws_2, ws_3, ws_4;
+ int32_t wt_1, wt_2, wt_3, wt_4;
+ int32_t wd_1, wd_2, wd_3, wd_4;
+};
+
+TEST(MSA_fixed_point_arithmetic) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const struct TestCaseMsa3RF tc_h[]{
+ {0x800080007fff7fff, 0xe1ed8000fad3863a, 0x80007fff00af7fff,
+ 0x800015a77fffa0eb, 0x7fff800080007fff, 0x80007fff1f207364},
+ {0x800080007fff006a, 0x002affc4329ad87b, 0x80007fff7fff00f3,
+ 0xffecffb4d0d7f429, 0x80007fff80007c33, 0x54ac6bbce53b8c91}};
+
+ const struct TestCaseMsa3RF tc_w[]{
+ {0x8000000080000000, 0x7fffffff7fffffff, 0x800000007fffffff,
+ 0x00001ff37fffffff, 0x7fffffff80000000, 0x800000007fffffff},
+ {0xe1ed035580000000, 0xfad3863aed462c0b, 0x8000000015a70aec,
+ 0x7fffffffa0ebd354, 0x800000007fffffff, 0xd0d7f4291f207364},
+ {0x8000000080000000, 0x7fffffff0000da1f, 0x800000007fffffff,
+ 0x7fffffff00f39c3b, 0x800000007fffffff, 0x800000007c33f2fd},
+ {0x0000ac33ffff329a, 0x54ac6bbce53bd87b, 0xffffe2b4d0d7f429,
+ 0x0355ed462c0b1ff3, 0xb5deb625939dd3f9, 0xe642adfa69519596}};
+
+ const struct ExpectedResult_MSA3RF exp_res_mul_q_h[] = {
+ {0x7fff800100ae7ffe, 0x1e13ea59fad35a74},
+ {0x7fff80017ffe0000, 0xffff0000ed5b03a7}};
+ const struct ExpectedResult_MSA3RF exp_res_madd_q_h[] = {
+ {0x7fff800080ae7fff, 0x9e136a5819f37fff},
+ {0x00000000fffe7c33, 0x54ab6bbcd2969038}};
+ const struct ExpectedResult_MSA3RF exp_res_msub_q_h[] = {
+ {0xffffffff80000000, 0x80007fff244c18ef},
+ {0x80007fff80007c32, 0x54ac6bbbf7df88e9}};
+ const struct ExpectedResult_MSA3RF exp_res_mulr_q_h[] = {
+ {0x7fff800100af7ffe, 0x1e13ea59fad35a75},
+ {0x7fff80017ffe0001, 0x00000000ed5b03a8}};
+ const struct ExpectedResult_MSA3RF exp_res_maddr_q_h[] = {
+ {0x7fff800080af7fff, 0x9e136a5819f37fff},
+ {0x00000000fffe7c34, 0x54ac6bbcd2969039}};
+ const struct ExpectedResult_MSA3RF exp_res_msubr_q_h[] = {
+ {0xffffffff80000001, 0x80007fff244d18ef},
+ {0x80007fff80007c32, 0x54ac6bbcf7e088e9}};
+
+ const struct ExpectedResult_MSA3RF exp_res_mul_q_w[] = {
+ {0x7fffffff80000001, 0x00001ff27ffffffe},
+ {0x1e12fcabea58f514, 0xfad3863a0de8dee1},
+ {0x7fffffff80000001, 0x7ffffffe0000019f},
+ {0xffffffff00004bab, 0x0234e1fbf6ca3ee0}};
+ const struct ExpectedResult_MSA3RF exp_res_madd_q_w[] = {
+ {0x7fffffff80000000, 0x80001ff27fffffff},
+ {0x9e12fcab6a58f513, 0xcbab7a632d095245},
+ {0x0000000000000000, 0xfffffffe7c33f49c},
+ {0xb5deb624939e1fa4, 0xe8778ff5601bd476}};
+ const struct ExpectedResult_MSA3RF exp_res_msub_q_w[] = {
+ {0xffffffffffffffff, 0x8000000000000000},
+ {0x800000007fffffff, 0xd6046dee11379482},
+ {0x800000007fffffff, 0x800000007c33f15d},
+ {0xb5deb625939d884d, 0xe40dcbfe728756b5}};
+ const struct ExpectedResult_MSA3RF exp_res_mulr_q_w[] = {
+ {0x7fffffff80000001, 0x00001ff37ffffffe},
+ {0x1e12fcabea58f514, 0xfad3863a0de8dee2},
+ {0x7fffffff80000001, 0x7ffffffe0000019f},
+ {0x0000000000004bac, 0x0234e1fcf6ca3ee1}};
+ const struct ExpectedResult_MSA3RF exp_res_maddr_q_w[] = {
+ {0x7fffffff80000000, 0x80001ff37fffffff},
+ {0x9e12fcab6a58f513, 0xcbab7a632d095246},
+ {0x0000000000000000, 0xfffffffe7c33f49c},
+ {0xb5deb625939e1fa5, 0xe8778ff6601bd477}};
+ const struct ExpectedResult_MSA3RF exp_res_msubr_q_w[] = {
+ {0xffffffffffffffff, 0x8000000000000001},
+ {0x800000007fffffff, 0xd6046def11379482},
+ {0x800000007fffffff, 0x800000007c33f15e},
+ {0xb5deb625939d884d, 0xe40dcbfe728756b5}};
+
+#define TEST_FIXED_POINT_DF_H(instruction, src, exp_res) \
+ run_msa_3rf((src), (exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FIXED_POINT_DF_W(instruction, src, exp_res) \
+ run_msa_3rf((src), (exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_h); i++) {
+ TEST_FIXED_POINT_DF_H(mul_q_h, &tc_h[i], &exp_res_mul_q_h[i])
+ TEST_FIXED_POINT_DF_H(madd_q_h, &tc_h[i], &exp_res_madd_q_h[i])
+ TEST_FIXED_POINT_DF_H(msub_q_h, &tc_h[i], &exp_res_msub_q_h[i])
+ TEST_FIXED_POINT_DF_H(mulr_q_h, &tc_h[i], &exp_res_mulr_q_h[i])
+ TEST_FIXED_POINT_DF_H(maddr_q_h, &tc_h[i], &exp_res_maddr_q_h[i])
+ TEST_FIXED_POINT_DF_H(msubr_q_h, &tc_h[i], &exp_res_msubr_q_h[i])
+ }
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FIXED_POINT_DF_W(mul_q_w, &tc_w[i], &exp_res_mul_q_w[i])
+ TEST_FIXED_POINT_DF_W(madd_q_w, &tc_w[i], &exp_res_madd_q_w[i])
+ TEST_FIXED_POINT_DF_W(msub_q_w, &tc_w[i], &exp_res_msub_q_w[i])
+ TEST_FIXED_POINT_DF_W(mulr_q_w, &tc_w[i], &exp_res_mulr_q_w[i])
+ TEST_FIXED_POINT_DF_W(maddr_q_w, &tc_w[i], &exp_res_maddr_q_w[i])
+ TEST_FIXED_POINT_DF_W(msubr_q_w, &tc_w[i], &exp_res_msubr_q_w[i])
+ }
+#undef TEST_FIXED_POINT_DF_H
+#undef TEST_FIXED_POINT_DF_W
+}
+
+TEST(MSA_fexdo) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const float nan_float = std::numeric_limits<float>::quiet_NaN();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa3RF_F tc_w[] = {
+ // ws_1, ws_2, ws_3, ws_4, wt_1, wt_2, wt_3, wt_4, wd_1, wd_2, wd_3, wd_4
+ {inf_float, nan_float, 66505.f, 65504.f, 6.2e-5f, 5e-5f, -32.42f,
+ -inf_float, 0, 0, 0, 0},
+ {-0.f, 0.f, 123.567f, -765.321f, -6e-8f, 5.9e-8f, 1e-7f, -1e-20f, 0, 0, 0,
+ 0},
+ {1e-36f, 1e20f, -1e20f, 2e-20f, 6e-8f, -2.9e-8f, -66505.f, -65504.f}};
+
+ const struct TestCaseMsa3RF_D tc_d[] = {
+ // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi
+ {inf_double, -1234., 4e38, 3.4e38, 0, 0},
+ {1.2e-38, 1.1e-39, -38.92f, -inf_double, 0, 0},
+ {-0., 0., 123.567e31, -765.321e33, 0, 0},
+ {-1.5e-45, 1.3e-45, 1e-42, -1e-200, 0, 0},
+ {1e-202, 1e158, -1e159, 1e14, 0, 0},
+ {1.5e-42, 1.3e-46, -123.567e31, 765.321e33, 0, 0}};
+
+ const struct ExpRes_16I exp_res_fexdo_w[] = {
+ {static_cast<int16_t>(0x0410), static_cast<int16_t>(0x0347),
+ static_cast<int16_t>(0xd00d), static_cast<int16_t>(0xfc00),
+ static_cast<int16_t>(0x7c00), static_cast<int16_t>(0x7dff),
+ static_cast<int16_t>(0x7c00), static_cast<int16_t>(0x7bff)},
+ {static_cast<int16_t>(0x8001), static_cast<int16_t>(0x0001),
+ static_cast<int16_t>(0x0002), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0x8000), static_cast<int16_t>(0x0000),
+ static_cast<int16_t>(0x57b9), static_cast<int16_t>(0xe1fb)},
+ {static_cast<int16_t>(0x0001), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0xfc00), static_cast<int16_t>(0xfbff),
+ static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7c00),
+ static_cast<int16_t>(0xfc00), static_cast<int16_t>(0x0000)}};
+
+ const struct ExpRes_32I exp_res_fexdo_d[] = {
+ {bit_cast<int32_t>(0x7f800000), bit_cast<int32_t>(0x7f7fc99e),
+ bit_cast<int32_t>(0x7f800000), bit_cast<int32_t>(0xc49a4000)},
+ {bit_cast<int32_t>(0xc21bae14), bit_cast<int32_t>(0xff800000),
+ bit_cast<int32_t>(0x0082ab1e), bit_cast<int32_t>(0x000bfa5a)},
+ {bit_cast<int32_t>(0x7673b164), bit_cast<int32_t>(0xfb13653d),
+ bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000)},
+ {bit_cast<int32_t>(0x000002ca), bit_cast<int32_t>(0x80000000),
+ bit_cast<int32_t>(0x80000001), bit_cast<int32_t>(0x00000001)},
+ {bit_cast<int32_t>(0xff800000), bit_cast<int32_t>(0x56b5e621),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7f800000)},
+ {bit_cast<int32_t>(0xf673b164), bit_cast<int32_t>(0x7b13653d),
+ bit_cast<int32_t>(0x0000042e), bit_cast<int32_t>(0x00000000)}};
+
+#define TEST_FEXDO_H(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FEXDO_W(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FEXDO_H(fexdo_h, &tc_w[i], &exp_res_fexdo_w[i])
+ }
+
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ TEST_FEXDO_W(fexdo_w, &tc_d[i], &exp_res_fexdo_d[i])
+ }
+
+#undef TEST_FEXDO_H
+#undef TEST_FEXDO_W
+}
+
+TEST(MSA_ftq) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float nan_float = std::numeric_limits<float>::quiet_NaN();
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double nan_double = std::numeric_limits<double>::quiet_NaN();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa3RF_F tc_w[] = {
+ {1.f, -0.999f, 1.5f, -31e-6, 1e-7, -0.598, 0.0023, -0.f, 0, 0, 0, 0},
+ {100.f, -102.f, -1.1f, 1.3f, 0.f, -1.f, 0.9999f, -0.000322, 0, 0, 0, 0},
+ {nan_float, inf_float, -inf_float, -nan_float, -1e-40, 3e-44, 8.3e36,
+ -0.00003, 0, 0, 0, 0}};
+
+ const struct TestCaseMsa3RF_D tc_d[] = {
+ {1., -0.999, 1.5, -31e-6, 0, 0},
+ {1e-7, -0.598, 0.0023, -0.f, 0, 0},
+ {100.f, -102.f, -1.1f, 1.3f, 0, 0},
+ {0.f, -1.f, 0.9999f, -0.000322, 0, 0},
+ {nan_double, inf_double, -inf_double, -nan_double, 0, 0},
+ {-3e306, 2e-307, 9e307, 2e-307, 0, 0}};
+
+ const struct ExpRes_16I exp_res_ftq_w[] = {
+ {static_cast<int16_t>(0x0000), static_cast<int16_t>(0xb375),
+ static_cast<int16_t>(0x004b), static_cast<int16_t>(0x0000),
+ static_cast<int16_t>(0x7fff), static_cast<int16_t>(0x8021),
+ static_cast<int16_t>(0x7fff), static_cast<int16_t>(0xffff)},
+ {static_cast<int16_t>(0x0000), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0x7ffd), static_cast<int16_t>(0xfff5),
+ static_cast<int16_t>(0x7fff), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0x8000), static_cast<int16_t>(0x7fff)},
+ {static_cast<int16_t>(0x0000), static_cast<int16_t>(0x0000),
+ static_cast<int16_t>(0x7fff), static_cast<int16_t>(0xffff),
+ static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7fff),
+ static_cast<int16_t>(0x8000), static_cast<int16_t>(0x0000)}};
+
+ const struct ExpRes_32I exp_res_ftq_d[] = {
+ {bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0xfffefbf4),
+ bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x8020c49c)},
+ {bit_cast<int32_t>(0x004b5dcc), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x000000d7), bit_cast<int32_t>(0xb374bc6a)},
+ {bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x7fffffff),
+ bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x80000000)},
+ {bit_cast<int32_t>(0x7ffcb900), bit_cast<int32_t>(0xfff572de),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x80000000)},
+ {bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7fffffff)},
+ {bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000)}};
+
+#define TEST_FTQ_H(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+#define TEST_FTQ_W(instruction, src, exp_res) \
+ run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
+ reinterpret_cast<const struct ExpectedResult_MSA3RF*>(exp_res), \
+ [](MacroAssembler& assm) { __ instruction(w2, w0, w1); });
+
+ for (uint64_t i = 0; i < arraysize(tc_w); i++) {
+ TEST_FTQ_H(ftq_h, &tc_w[i], &exp_res_ftq_w[i])
+ }
+
+ for (uint64_t i = 0; i < arraysize(tc_d); i++) {
+ TEST_FTQ_W(ftq_w, &tc_d[i], &exp_res_ftq_d[i])
+ }
+
+#undef TEST_FTQ_H
+#undef TEST_FTQ_W
}
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index 9a11523605..1e150a0cb5 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -51,7 +51,7 @@ TEST(0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ function_descriptor();
@@ -79,7 +79,7 @@ TEST(1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
__ function_descriptor();
@@ -117,7 +117,7 @@ TEST(2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
__ function_descriptor();
@@ -175,7 +175,7 @@ TEST(3) {
} T;
T t;
- Assembler assm(CcTest::i_isolate(), NULL, 0);
+ Assembler assm(CcTest::i_isolate(), nullptr, 0);
Label L, C;
__ function_descriptor();
@@ -267,7 +267,7 @@ TEST(4) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(CcTest::i_isolate(), NULL, 0);
+ Assembler assm(CcTest::i_isolate(), nullptr, 0);
Label L, C;
if (CpuFeatures::IsSupported(VFP3)) {
@@ -383,7 +383,7 @@ TEST(5) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
@@ -420,7 +420,7 @@ TEST(6) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
@@ -463,7 +463,7 @@ static void TestRoundingMode(VCVTTypes types,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
@@ -668,7 +668,7 @@ TEST(8) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -780,7 +780,7 @@ TEST(9) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -896,7 +896,7 @@ TEST(10) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -993,7 +993,7 @@ TEST(11) {
i.a = 0xabcd0001;
i.b = 0xabcd0000;
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
// Test HeapObject untagging.
__ ldr(r1, MemOperand(r0, offsetof(I, a)));
@@ -1048,7 +1048,7 @@ TEST(12) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label target;
__ b(eq, &target);
__ b(ne, &target);
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index da2727402e..df33b96752 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -51,7 +51,7 @@ TEST(0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ lhi(r1, Operand(3)); // test 4-byte instr
__ llilf(r2, Operand(4)); // test 6-byte instr
@@ -79,7 +79,7 @@ TEST(1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L, C;
#if defined(_AIX)
@@ -120,7 +120,7 @@ TEST(2) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(CcTest::i_isolate(), NULL, 0);
+ Assembler assm(CcTest::i_isolate(), nullptr, 0);
Label L, C;
#if defined(_AIX)
@@ -170,7 +170,7 @@ TEST(3) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
__ ar(r14, r13);
__ sr(r14, r13);
@@ -224,7 +224,7 @@ TEST(4) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label L2, L3, L4;
__ chi(r2, Operand(10));
@@ -269,7 +269,7 @@ TEST(5) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, nullptr, 0);
__ mov(r2, Operand(0x12345678));
__ ExtractBitRange(r3, r2, 3, 2);
@@ -297,7 +297,7 @@ TEST(6) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, nullptr, 0);
Label yes;
@@ -331,7 +331,7 @@ TEST(7) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, nullptr, 0);
Label yes;
@@ -363,7 +363,7 @@ TEST(8) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, nullptr, 0);
// Zero upper bits of r3/r4
__ llihf(r3, Operand::Zero());
@@ -395,7 +395,7 @@ TEST(9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, nullptr, 0);
__ lzdr(d4);
__ b(r14);
@@ -426,7 +426,7 @@ TEST(10) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
+ Assembler assm(isolate, nullptr, 0);
Label ok, failed;
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 8e7af93b2e..e356fb2d82 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -67,17 +67,13 @@ static const Register arg1 = rdi;
static const Register arg2 = rsi;
#endif
-#define __ assm.
-
+#define __ masm.
TEST(AssemblerX64ReturnOperation) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that copies argument 2 and returns it.
__ movq(rax, arg2);
@@ -85,7 +81,7 @@ TEST(AssemblerX64ReturnOperation) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(2, result);
@@ -94,12 +90,9 @@ TEST(AssemblerX64ReturnOperation) {
TEST(AssemblerX64StackOperations) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
@@ -117,7 +110,7 @@ TEST(AssemblerX64StackOperations) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(2, result);
@@ -126,12 +119,9 @@ TEST(AssemblerX64StackOperations) {
TEST(AssemblerX64ArithmeticOperations) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that adds arguments returning the sum.
__ movq(rax, arg2);
@@ -139,7 +129,7 @@ TEST(AssemblerX64ArithmeticOperations) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(5, result);
@@ -148,12 +138,9 @@ TEST(AssemblerX64ArithmeticOperations) {
TEST(AssemblerX64CmpbOperation) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a function that compare argument byte returing 1 if equal else 0.
// On Windows, it compares rcx with rdx which does not require REX prefix;
@@ -168,7 +155,7 @@ TEST(AssemblerX64CmpbOperation) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(0x1002, 0x2002);
CHECK_EQ(1, result);
@@ -178,16 +165,14 @@ TEST(AssemblerX64CmpbOperation) {
TEST(Regression684407) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
- Address before = assm.pc();
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
+
+ Address before = masm.pc();
__ cmpl(Operand(arg1, 0),
Immediate(0, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
- Address after = assm.pc();
+ Address after = masm.pc();
size_t instruction_size = static_cast<size_t>(after - before);
// Check that the immediate is not encoded as uint8.
CHECK_LT(sizeof(uint32_t), instruction_size);
@@ -195,12 +180,9 @@ TEST(Regression684407) {
TEST(AssemblerX64ImulOperation) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that multiplies arguments returning the high
// word.
@@ -210,7 +192,7 @@ TEST(AssemblerX64ImulOperation) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(0, result);
@@ -223,12 +205,9 @@ TEST(AssemblerX64ImulOperation) {
TEST(AssemblerX64testbwqOperation) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ pushq(rbx);
__ pushq(rdi);
@@ -382,7 +361,7 @@ TEST(AssemblerX64testbwqOperation) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(0, 0);
CHECK_EQ(1, result);
@@ -390,12 +369,9 @@ TEST(AssemblerX64testbwqOperation) {
TEST(AssemblerX64XchglOperations) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, Operand(arg1, 0));
__ movq(r11, Operand(arg2, 0));
@@ -405,7 +381,7 @@ TEST(AssemblerX64XchglOperations) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@@ -418,19 +394,16 @@ TEST(AssemblerX64XchglOperations) {
TEST(AssemblerX64OrlOperations) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, Operand(arg2, 0));
__ orl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@@ -442,19 +415,16 @@ TEST(AssemblerX64OrlOperations) {
TEST(AssemblerX64RollOperations) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, arg1);
__ roll(rax, Immediate(1));
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t src = V8_2PART_UINT64_C(0x10000000, C0000000);
uint64_t result = FUNCTION_CAST<F5>(buffer)(src);
@@ -464,19 +434,16 @@ TEST(AssemblerX64RollOperations) {
TEST(AssemblerX64SublOperations) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, Operand(arg2, 0));
__ subl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@@ -488,12 +455,9 @@ TEST(AssemblerX64SublOperations) {
TEST(AssemblerX64TestlOperations) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Set rax with the ZF flag of the testl instruction.
Label done;
@@ -506,7 +470,7 @@ TEST(AssemblerX64TestlOperations) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 00000000);
@@ -517,12 +481,9 @@ TEST(AssemblerX64TestlOperations) {
TEST(AssemblerX64TestwOperations) {
typedef uint16_t (*F)(uint16_t * x);
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Set rax with the ZF flag of the testl instruction.
Label done;
@@ -534,7 +495,7 @@ TEST(AssemblerX64TestwOperations) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint16_t operand = 0x8000;
uint16_t result = FUNCTION_CAST<F>(buffer)(&operand);
@@ -543,19 +504,16 @@ TEST(AssemblerX64TestwOperations) {
TEST(AssemblerX64XorlOperations) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(rax, Operand(arg2, 0));
__ xorl(Operand(arg1, 0), rax);
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 60000000);
@@ -567,12 +525,9 @@ TEST(AssemblerX64XorlOperations) {
TEST(AssemblerX64MemoryOperands) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that copies argument 2 and returns it.
__ pushq(rbp);
@@ -592,7 +547,7 @@ TEST(AssemblerX64MemoryOperands) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(3, result);
@@ -601,12 +556,9 @@ TEST(AssemblerX64MemoryOperands) {
TEST(AssemblerX64ControlFlow) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
// Assemble a simple function that copies argument 1 and returns it.
__ pushq(rbp);
@@ -621,7 +573,7 @@ TEST(AssemblerX64ControlFlow) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(3, result);
@@ -630,12 +582,10 @@ TEST(AssemblerX64ControlFlow) {
TEST(AssemblerX64LoopImmediates) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
+
// Assemble two loops using rax as counter, and verify the ending counts.
Label Fail;
__ movq(rax, Immediate(-3));
@@ -671,7 +621,7 @@ TEST(AssemblerX64LoopImmediates) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(1, result);
@@ -725,7 +675,7 @@ TEST(AssemblerX64LabelChaining) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- Assembler assm(CcTest::i_isolate(), NULL, 0);
+ Assembler masm(CcTest::i_isolate(), nullptr, 0);
Label target;
__ j(equal, &target);
@@ -740,7 +690,7 @@ TEST(AssemblerMultiByteNop) {
v8::HandleScope scope(CcTest::isolate());
byte buffer[1024];
Isolate* isolate = CcTest::i_isolate();
- Assembler assm(isolate, buffer, sizeof(buffer));
+ Assembler masm(isolate, buffer, sizeof(buffer));
__ pushq(rbx);
__ pushq(rcx);
__ pushq(rdx);
@@ -753,9 +703,9 @@ TEST(AssemblerMultiByteNop) {
__ movq(rdi, Immediate(5));
__ movq(rsi, Immediate(6));
for (int i = 0; i < 16; i++) {
- int before = assm.pc_offset();
+ int before = masm.pc_offset();
__ Nop(i);
- CHECK_EQ(assm.pc_offset() - before, i);
+ CHECK_EQ(masm.pc_offset() - before, i);
}
Label fail;
@@ -788,7 +738,7 @@ TEST(AssemblerMultiByteNop) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
@@ -811,7 +761,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK_EQ(ELEMENT_COUNT, vec->Length());
Isolate* isolate = CcTest::i_isolate();
- Assembler assm(isolate, buffer, sizeof(buffer));
+ Assembler masm(isolate, buffer, sizeof(buffer));
// Remove return address from the stack for fix stack frame alignment.
__ popq(rcx);
@@ -844,7 +794,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
@@ -865,7 +815,7 @@ TEST(StackAlignmentForSSE2) {
global_template->Set(v8_str("do_sse2"),
v8::FunctionTemplate::New(isolate, DoSSE2));
- LocalContext env(NULL, global_template);
+ LocalContext env(nullptr, global_template);
CompileRun(
"function foo(vec) {"
" return do_sse2(vec);"
@@ -900,14 +850,15 @@ TEST(AssemblerX64Extractps) {
v8::HandleScope scope(CcTest::isolate());
byte buffer[256];
Isolate* isolate = CcTest::i_isolate();
- Assembler assm(isolate, buffer, sizeof(buffer));
- { CpuFeatureScope fscope2(&assm, SSE4_1);
+ Assembler masm(isolate, buffer, sizeof(buffer));
+ {
+ CpuFeatureScope fscope2(&masm, SSE4_1);
__ extractps(rax, xmm0, 0x1);
__ ret(0);
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -930,7 +881,7 @@ TEST(AssemblerX64SSE) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
@@ -945,7 +896,7 @@ TEST(AssemblerX64SSE) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -966,10 +917,10 @@ TEST(AssemblerX64FMA_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
- CpuFeatureScope fscope(&assm, FMA3);
+ CpuFeatureScope fscope(&masm, FMA3);
Label exit;
// argument in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
@@ -1171,7 +1122,7 @@ TEST(AssemblerX64FMA_sd) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -1192,10 +1143,10 @@ TEST(AssemblerX64FMA_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
- CpuFeatureScope fscope(&assm, FMA3);
+ CpuFeatureScope fscope(&masm, FMA3);
Label exit;
// arguments in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
@@ -1397,7 +1348,7 @@ TEST(AssemblerX64FMA_ss) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -1416,7 +1367,7 @@ TEST(AssemblerX64SSE_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- Assembler assm(isolate, buffer, sizeof(buffer));
+ Assembler masm(isolate, buffer, sizeof(buffer));
{
Label exit;
// arguments in xmm0, xmm1 and xmm2
@@ -1472,7 +1423,7 @@ TEST(AssemblerX64SSE_ss) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -1494,9 +1445,9 @@ TEST(AssemblerX64AVX_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- Assembler assm(isolate, buffer, sizeof(buffer));
+ Assembler masm(isolate, buffer, sizeof(buffer));
{
- CpuFeatureScope avx_scope(&assm, AVX);
+ CpuFeatureScope avx_scope(&masm, AVX);
Label exit;
// arguments in xmm0, xmm1 and xmm2
__ subq(rsp, Immediate(kDoubleSize * 2)); // For memory operand
@@ -1557,7 +1508,7 @@ TEST(AssemblerX64AVX_ss) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -1579,9 +1530,9 @@ TEST(AssemblerX64AVX_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- Assembler assm(isolate, buffer, sizeof(buffer));
+ Assembler masm(isolate, buffer, sizeof(buffer));
{
- CpuFeatureScope avx_scope(&assm, AVX);
+ CpuFeatureScope avx_scope(&masm, AVX);
Label exit;
// arguments in xmm0, xmm1 and xmm2
__ subq(rsp, Immediate(kDoubleSize * 2)); // For memory operand
@@ -1796,7 +1747,7 @@ TEST(AssemblerX64AVX_sd) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -1818,10 +1769,10 @@ TEST(AssemblerX64BMI1) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
- CpuFeatureScope fscope(&assm, BMI1);
+ CpuFeatureScope fscope(&masm, BMI1);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@@ -1988,7 +1939,7 @@ TEST(AssemblerX64BMI1) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -2008,10 +1959,10 @@ TEST(AssemblerX64LZCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
- CpuFeatureScope fscope(&assm, LZCNT);
+ CpuFeatureScope fscope(&masm, LZCNT);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@@ -2048,7 +1999,7 @@ TEST(AssemblerX64LZCNT) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -2068,10 +2019,10 @@ TEST(AssemblerX64POPCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
- CpuFeatureScope fscope(&assm, POPCNT);
+ CpuFeatureScope fscope(&masm, POPCNT);
Label exit;
__ movq(rcx, V8_UINT64_C(0x1111111111111100)); // source operand
@@ -2108,7 +2059,7 @@ TEST(AssemblerX64POPCNT) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -2128,10 +2079,10 @@ TEST(AssemblerX64BMI2) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
- CpuFeatureScope fscope(&assm, BMI2);
+ CpuFeatureScope fscope(&masm, BMI2);
Label exit;
__ pushq(rbx); // save rbx
__ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
@@ -2371,7 +2322,7 @@ TEST(AssemblerX64BMI2) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -2389,7 +2340,7 @@ TEST(AssemblerX64JumpTables1) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
+ MacroAssembler masm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
@@ -2416,7 +2367,7 @@ TEST(AssemblerX64JumpTables1) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -2437,7 +2388,7 @@ TEST(AssemblerX64JumpTables2) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
+ MacroAssembler masm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
@@ -2465,7 +2416,7 @@ TEST(AssemblerX64JumpTables2) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
@@ -2482,12 +2433,9 @@ TEST(AssemblerX64JumpTables2) {
TEST(AssemblerX64PslldWithXmm15) {
CcTest::InitializeVM();
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(CcTest::i_isolate(), buffer, static_cast<int>(allocated));
__ movq(xmm15, arg1);
__ pslld(xmm15, 1);
@@ -2495,7 +2443,7 @@ TEST(AssemblerX64PslldWithXmm15) {
__ ret(0);
CodeDesc desc;
- assm.GetCode(CcTest::i_isolate(), &desc);
+ masm.GetCode(CcTest::i_isolate(), &desc);
uint64_t result = FUNCTION_CAST<F5>(buffer)(V8_UINT64_C(0x1122334455667788));
CHECK_EQ(V8_UINT64_C(0x22446688aaccef10), result);
}
@@ -2508,10 +2456,10 @@ TEST(AssemblerX64vmovups) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
{
- CpuFeatureScope avx_scope(&assm, AVX);
+ CpuFeatureScope avx_scope(&masm, AVX);
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
__ shufps(xmm1, xmm1, 0x0); // brocast second argument
// copy xmm1 to xmm0 through the stack to test the "vmovups reg, mem".
@@ -2524,7 +2472,7 @@ TEST(AssemblerX64vmovups) {
}
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 1c48225a14..7a94d3a511 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -212,8 +212,8 @@ TEST(LoadHeapNumberValue) {
CodeAssemblerTester asm_tester(isolate);
CodeStubAssembler m(asm_tester.state());
Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(1234);
- m.Return(m.SmiFromWord32(
- m.ChangeFloat64ToUint32(m.LoadHeapNumberValue(m.HeapConstant(number)))));
+ m.Return(m.SmiFromWord32(m.Signed(
+ m.ChangeFloat64ToUint32(m.LoadHeapNumberValue(m.HeapConstant(number))))));
FunctionTester ft(asm_tester.GenerateCode());
MaybeHandle<Object> result = ft.Call();
CHECK_EQ(1234, Handle<Smi>::cast(result.ToHandleChecked())->value());
@@ -657,10 +657,7 @@ TEST(NameDictionaryLookup) { TestNameDictionaryLookup<NameDictionary>(); }
TEST(GlobalDictionaryLookup) { TestNameDictionaryLookup<GlobalDictionary>(); }
-namespace {
-
-template <typename Dictionary>
-void TestNumberDictionaryLookup() {
+TEST(NumberDictionaryLookup) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 4;
@@ -678,8 +675,8 @@ void TestNumberDictionaryLookup() {
Label if_found(&m), if_not_found(&m);
Variable var_entry(&m, MachineType::PointerRepresentation());
- m.NumberDictionaryLookup<Dictionary>(dictionary, key, &if_found, &var_entry,
- &if_not_found);
+ m.NumberDictionaryLookup(dictionary, key, &if_found, &var_entry,
+ &if_not_found);
m.BIND(&if_found);
m.GotoIfNot(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
@@ -705,7 +702,8 @@ void TestNumberDictionaryLookup() {
Handle<Object> expect_not_found(Smi::FromInt(kNotFound), isolate);
const int kKeysCount = 1000;
- Handle<Dictionary> dictionary = Dictionary::New(isolate, kKeysCount);
+ Handle<NumberDictionary> dictionary =
+ NumberDictionary::New(isolate, kKeysCount);
uint32_t keys[kKeysCount];
Handle<Object> fake_value(Smi::FromInt(42), isolate);
@@ -716,15 +714,16 @@ void TestNumberDictionaryLookup() {
for (int i = 0; i < kKeysCount; i++) {
int random_key = rand_gen.NextInt(Smi::kMaxValue);
keys[i] = static_cast<uint32_t>(random_key);
- if (dictionary->FindEntry(keys[i]) != Dictionary::kNotFound) continue;
+ if (dictionary->FindEntry(keys[i]) != NumberDictionary::kNotFound) continue;
- dictionary = Dictionary::Add(dictionary, keys[i], fake_value, fake_details);
+ dictionary =
+ NumberDictionary::Add(dictionary, keys[i], fake_value, fake_details);
}
// Now try querying existing keys.
for (int i = 0; i < kKeysCount; i++) {
int entry = dictionary->FindEntry(keys[i]);
- CHECK_NE(Dictionary::kNotFound, entry);
+ CHECK_NE(NumberDictionary::kNotFound, entry);
Handle<Object> key(Smi::FromInt(keys[i]), isolate);
Handle<Object> expected_entry(Smi::FromInt(entry), isolate);
@@ -735,7 +734,7 @@ void TestNumberDictionaryLookup() {
for (int i = 0; i < kKeysCount;) {
int random_key = rand_gen.NextInt(Smi::kMaxValue);
int entry = dictionary->FindEntry(random_key);
- if (entry != Dictionary::kNotFound) continue;
+ if (entry != NumberDictionary::kNotFound) continue;
i++;
Handle<Object> key(Smi::FromInt(random_key), isolate);
@@ -743,16 +742,6 @@ void TestNumberDictionaryLookup() {
}
}
-} // namespace
-
-TEST(SeededNumberDictionaryLookup) {
- TestNumberDictionaryLookup<SeededNumberDictionary>();
-}
-
-TEST(UnseededNumberDictionaryLookup) {
- TestNumberDictionaryLookup<UnseededNumberDictionary>();
-}
-
namespace {
void AddProperties(Handle<JSObject> object, Handle<Name> names[],
@@ -902,13 +891,15 @@ TEST(TryHasOwnProperty) {
{
// Dictionary mode object.
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
Handle<JSObject> object = factory->NewJSObject(function);
AddProperties(object, names, arraysize(names));
JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0, "test");
JSObject::AddProperty(object, deleted_property_name, object, NONE);
- CHECK(JSObject::DeleteProperty(object, deleted_property_name, SLOPPY)
+ CHECK(JSObject::DeleteProperty(object, deleted_property_name,
+ LanguageMode::kSloppy)
.FromJust());
CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
@@ -918,7 +909,8 @@ TEST(TryHasOwnProperty) {
{
// Global object.
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
JSFunction::EnsureHasInitialMap(function);
function->initial_map()->set_instance_type(JS_GLOBAL_OBJECT_TYPE);
function->initial_map()->set_is_prototype_map(true);
@@ -928,7 +920,8 @@ TEST(TryHasOwnProperty) {
AddProperties(object, names, arraysize(names));
JSObject::AddProperty(object, deleted_property_name, object, NONE);
- CHECK(JSObject::DeleteProperty(object, deleted_property_name, SLOPPY)
+ CHECK(JSObject::DeleteProperty(object, deleted_property_name,
+ LanguageMode::kSloppy)
.FromJust());
CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map()->instance_type());
@@ -967,7 +960,8 @@ TEST(TryHasOwnProperty) {
}
{
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
Handle<JSProxy> object = factory->NewJSProxy(function, objects[0]);
CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
ft.CheckTrue(object, names[0], expect_bailout);
@@ -1040,11 +1034,11 @@ TEST(TryGetOwnProperty) {
factory->NewPrivateSymbol(),
};
Handle<Object> values[] = {
- factory->NewFunction(factory->empty_string()),
+ factory->NewFunctionForTest(factory->empty_string()),
factory->NewSymbol(),
factory->InternalizeUtf8String("a"),
CreateAccessorPair(&ft, "() => 188;", "() => 199;"),
- factory->NewFunction(factory->InternalizeUtf8String("bb")),
+ factory->NewFunctionForTest(factory->InternalizeUtf8String("bb")),
factory->InternalizeUtf8String("ccc"),
CreateAccessorPair(&ft, "() => 88;", nullptr),
handle(Smi::FromInt(1), isolate),
@@ -1052,7 +1046,8 @@ TEST(TryGetOwnProperty) {
CreateAccessorPair(&ft, nullptr, "() => 99;"),
factory->NewHeapNumber(4.2),
handle(Smi::FromInt(153), isolate),
- factory->NewJSObject(factory->NewFunction(factory->empty_string())),
+ factory->NewJSObject(
+ factory->NewFunctionForTest(factory->empty_string())),
factory->NewPrivateSymbol(),
};
STATIC_ASSERT(arraysize(values) < arraysize(names));
@@ -1102,14 +1097,16 @@ TEST(TryGetOwnProperty) {
{
// Dictionary mode object.
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
Handle<JSObject> object = factory->NewJSObject(function);
AddProperties(object, names, arraysize(names), values, arraysize(values),
rand_gen.NextInt());
JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0, "test");
JSObject::AddProperty(object, deleted_property_name, object, NONE);
- CHECK(JSObject::DeleteProperty(object, deleted_property_name, SLOPPY)
+ CHECK(JSObject::DeleteProperty(object, deleted_property_name,
+ LanguageMode::kSloppy)
.FromJust());
CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
@@ -1124,7 +1121,8 @@ TEST(TryGetOwnProperty) {
rand_gen.NextInt());
JSObject::AddProperty(object, deleted_property_name, object, NONE);
- CHECK(JSObject::DeleteProperty(object, deleted_property_name, SLOPPY)
+ CHECK(JSObject::DeleteProperty(object, deleted_property_name,
+ LanguageMode::kSloppy)
.FromJust());
CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map()->instance_type());
@@ -1170,7 +1168,8 @@ TEST(TryGetOwnProperty) {
}
{
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
Handle<JSProxy> object = factory->NewJSProxy(function, objects[0]);
CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
Handle<Object> value = ft.Call(object, names[0]).ToHandleChecked();
@@ -1402,7 +1401,8 @@ TEST(TryLookupElement) {
{
Handle<JSArray> handler = factory->NewJSArray(0);
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
Handle<JSProxy> object = factory->NewJSProxy(function, handler);
CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
ft.CheckTrue(object, smi0, expect_bailout);
@@ -1573,11 +1573,9 @@ TEST(OneToTwoByteStringCopy) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- m.CopyStringCharacters(
- m.Parameter(0), m.Parameter(1), m.SmiConstant(Smi::FromInt(0)),
- m.SmiConstant(Smi::FromInt(0)), m.SmiConstant(Smi::FromInt(5)),
- String::ONE_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
- CodeStubAssembler::SMI_PARAMETERS);
+ m.CopyStringCharacters(m.Parameter(0), m.Parameter(1), m.IntPtrConstant(0),
+ m.IntPtrConstant(0), m.IntPtrConstant(5),
+ String::ONE_BYTE_ENCODING, String::TWO_BYTE_ENCODING);
m.Return(m.SmiConstant(Smi::FromInt(0)));
Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
@@ -1606,11 +1604,9 @@ TEST(OneToOneByteStringCopy) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- m.CopyStringCharacters(
- m.Parameter(0), m.Parameter(1), m.SmiConstant(Smi::FromInt(0)),
- m.SmiConstant(Smi::FromInt(0)), m.SmiConstant(Smi::FromInt(5)),
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
- CodeStubAssembler::SMI_PARAMETERS);
+ m.CopyStringCharacters(m.Parameter(0), m.Parameter(1), m.IntPtrConstant(0),
+ m.IntPtrConstant(0), m.IntPtrConstant(5),
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
m.Return(m.SmiConstant(Smi::FromInt(0)));
Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
@@ -1639,11 +1635,9 @@ TEST(OneToOneByteStringCopyNonZeroStart) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- m.CopyStringCharacters(
- m.Parameter(0), m.Parameter(1), m.SmiConstant(Smi::FromInt(0)),
- m.SmiConstant(Smi::FromInt(3)), m.SmiConstant(Smi::FromInt(2)),
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
- CodeStubAssembler::SMI_PARAMETERS);
+ m.CopyStringCharacters(m.Parameter(0), m.Parameter(1), m.IntPtrConstant(0),
+ m.IntPtrConstant(3), m.IntPtrConstant(2),
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
m.Return(m.SmiConstant(Smi::FromInt(0)));
Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
@@ -1669,11 +1663,9 @@ TEST(TwoToTwoByteStringCopy) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- m.CopyStringCharacters(
- m.Parameter(0), m.Parameter(1), m.SmiConstant(Smi::FromInt(0)),
- m.SmiConstant(Smi::FromInt(0)), m.SmiConstant(Smi::FromInt(5)),
- String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
- CodeStubAssembler::SMI_PARAMETERS);
+ m.CopyStringCharacters(m.Parameter(0), m.Parameter(1), m.IntPtrConstant(0),
+ m.IntPtrConstant(0), m.IntPtrConstant(5),
+ String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING);
m.Return(m.SmiConstant(Smi::FromInt(0)));
uc16 array1[] = {2000, 2001, 2002, 2003, 2004};
@@ -1799,10 +1791,12 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
Handle<JSArray> array = isolate->factory()->NewJSArray(
kind_, 2, initial_size, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
JSObject::SetElement(isolate, array, 0,
- Handle<Smi>(Smi::FromInt(1), isolate), SLOPPY)
+ Handle<Smi>(Smi::FromInt(1), isolate),
+ LanguageMode::kSloppy)
.Check();
JSObject::SetElement(isolate, array, 1,
- Handle<Smi>(Smi::FromInt(2), isolate), SLOPPY)
+ Handle<Smi>(Smi::FromInt(2), isolate),
+ LanguageMode::kSloppy)
.Check();
CodeStubArguments args(this, IntPtrConstant(kNumParams));
TVariable<IntPtrT> arg_index(this);
@@ -2138,8 +2132,6 @@ TEST(CreatePromiseResolvingFunctionsContext) {
CHECK_EQ(isolate->native_context()->closure(), context_js->closure());
CHECK_EQ(isolate->heap()->the_hole_value(), context_js->extension());
CHECK_EQ(*isolate->native_context(), context_js->native_context());
- CHECK_EQ(Smi::FromInt(0),
- context_js->get(PromiseBuiltinsAssembler::kAlreadyVisitedSlot));
CHECK(context_js->get(PromiseBuiltinsAssembler::kPromiseSlot)->IsJSPromise());
CHECK_EQ(isolate->heap()->false_value(),
context_js->get(PromiseBuiltinsAssembler::kDebugEventSlot));
@@ -2263,7 +2255,7 @@ TEST(AllocateFunctionWithMapAndContext) {
CHECK_EQ(isolate->heap()->empty_property_array(), fun->property_array());
CHECK_EQ(isolate->heap()->empty_fixed_array(), fun->elements());
CHECK_EQ(isolate->heap()->undefined_cell(), fun->feedback_vector_cell());
- CHECK_EQ(isolate->heap()->the_hole_value(), fun->prototype_or_initial_map());
+ CHECK(!fun->has_prototype_slot());
CHECK_EQ(*isolate->promise_resolve_shared_fun(), fun->shared());
CHECK_EQ(isolate->promise_resolve_shared_fun()->code(), fun->code());
}
@@ -2278,7 +2270,7 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
Node* const context = m.Parameter(kNumParams + 2);
Node* const native_context = m.LoadNativeContext(context);
- Node* const map = m.LoadRoot(Heap::kPromiseCapabilityMapRootIndex);
+ Node* const map = m.LoadRoot(Heap::kTuple3MapRootIndex);
Node* const capability = m.AllocateStruct(map);
m.StoreObjectFieldNoWriteBarrier(
capability, PromiseCapability::kPromiseOffset, m.UndefinedConstant());
@@ -2625,7 +2617,7 @@ TEST(AllocateStruct) {
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Map> maps[] = {
- handle(isolate->heap()->promise_capability_map(), isolate),
+ handle(isolate->heap()->tuple3_map(), isolate),
handle(isolate->heap()->tuple2_map(), isolate),
};
@@ -2679,9 +2671,9 @@ TEST(BranchIfNumericRelationalComparison) {
{
CodeStubAssembler m(asm_tester.state());
Label return_true(&m), return_false(&m);
- m.BranchIfNumericRelationalComparison(
- CodeStubAssembler::kGreaterThanOrEqual, m.Parameter(0), m.Parameter(1),
- &return_true, &return_false);
+ m.BranchIfNumericRelationalComparison(Operation::kGreaterThanOrEqual,
+ m.Parameter(0), m.Parameter(1),
+ &return_true, &return_false);
m.BIND(&return_true);
m.Return(m.BooleanConstant(true));
m.BIND(&return_false);
@@ -2848,6 +2840,228 @@ TEST(NumberAddSub) {
CHECK_EQ(ft_sub.CallChecked<HeapNumber>(double_a, smi_1)->value(), 1.5);
}
+TEST(CloneEmptyFixedArray) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ m.Return(m.CloneFixedArray(m.Parameter(0)));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->empty_fixed_array());
+ Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_EQ(0, result->length());
+ CHECK_EQ(*(isolate->factory()->empty_fixed_array()), result);
+}
+
+TEST(CloneFixedArray) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ m.Return(m.CloneFixedArray(m.Parameter(0)));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
+ source->set(1, Smi::FromInt(1234));
+ Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_EQ(5, result->length());
+ CHECK(result->get(0)->IsTheHole(isolate));
+ CHECK_EQ(Smi::cast(result->get(1))->value(), 1234);
+ CHECK(result->get(2)->IsTheHole(isolate));
+ CHECK(result->get(3)->IsTheHole(isolate));
+ CHECK(result->get(4)->IsTheHole(isolate));
+}
+
+TEST(CloneFixedArrayCOW) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ m.Return(m.CloneFixedArray(m.Parameter(0)));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
+ source->set(1, Smi::FromInt(1234));
+ source->set_map(isolate->heap()->fixed_cow_array_map());
+ Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_EQ(*source, result);
+}
+
+TEST(ExtractFixedArrayCOWForceCopy) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ CodeStubAssembler::ExtractFixedArrayFlags flags;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
+ m.Return(m.ExtractFixedArray(m.Parameter(0), m.SmiConstant(0), nullptr,
+ nullptr, flags,
+ CodeStubAssembler::SMI_PARAMETERS));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
+ source->set(1, Smi::FromInt(1234));
+ source->set_map(isolate->heap()->fixed_cow_array_map());
+ Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_NE(*source, result);
+ CHECK_EQ(5, result->length());
+ CHECK(result->get(0)->IsTheHole(isolate));
+ CHECK_EQ(Smi::cast(result->get(1))->value(), 1234);
+ CHECK(result->get(2)->IsTheHole(isolate));
+ CHECK(result->get(3)->IsTheHole(isolate));
+ CHECK(result->get(4)->IsTheHole(isolate));
+}
+
+TEST(ExtractFixedArraySimple) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 3;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ CodeStubAssembler::ExtractFixedArrayFlags flags;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
+ m.Return(m.ExtractFixedArray(m.Parameter(0), m.Parameter(1), m.Parameter(2),
+ nullptr, flags,
+ CodeStubAssembler::SMI_PARAMETERS));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
+ source->set(1, Smi::FromInt(1234));
+ Handle<Object> result_raw =
+ ft.Call(source, Handle<Smi>(Smi::FromInt(1), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate))
+ .ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_EQ(2, result->length());
+ CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
+ CHECK(result->get(1)->IsTheHole(isolate));
+}
+
+TEST(ExtractFixedArraySimpleSmiConstant) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ CodeStubAssembler::ExtractFixedArrayFlags flags;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
+ m.Return(m.ExtractFixedArray(m.Parameter(0), m.SmiConstant(1),
+ m.SmiConstant(2), nullptr, flags,
+ CodeStubAssembler::SMI_PARAMETERS));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
+ source->set(1, Smi::FromInt(1234));
+ Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_EQ(2, result->length());
+ CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
+ CHECK(result->get(1)->IsTheHole(isolate));
+}
+
+TEST(ExtractFixedArraySimpleIntPtrConstant) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ CodeStubAssembler::ExtractFixedArrayFlags flags;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
+ m.Return(m.ExtractFixedArray(m.Parameter(0), m.IntPtrConstant(1),
+ m.IntPtrConstant(2), nullptr, flags,
+ CodeStubAssembler::INTPTR_PARAMETERS));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
+ source->set(1, Smi::FromInt(1234));
+ Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_EQ(2, result->length());
+ CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
+ CHECK(result->get(1)->IsTheHole(isolate));
+}
+
+TEST(ExtractFixedArraySimpleIntPtrConstantNoDoubles) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ m.Return(m.ExtractFixedArray(
+ m.Parameter(0), m.IntPtrConstant(1), m.IntPtrConstant(2), nullptr,
+ CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays,
+ CodeStubAssembler::INTPTR_PARAMETERS));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
+ source->set(1, Smi::FromInt(1234));
+ Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_EQ(2, result->length());
+ CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
+ CHECK(result->get(1)->IsTheHole(isolate));
+}
+
+TEST(ExtractFixedArraySimpleIntPtrParameters) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 3;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ Node* p1_untagged = m.SmiUntag(m.Parameter(1));
+ Node* p2_untagged = m.SmiUntag(m.Parameter(2));
+ m.Return(m.ExtractFixedArray(m.Parameter(0), p1_untagged, p2_untagged));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
+ source->set(1, Smi::FromInt(1234));
+ Handle<Object> result_raw =
+ ft.Call(source, Handle<Smi>(Smi::FromInt(1), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate))
+ .ToHandleChecked();
+ FixedArray* result(FixedArray::cast(*result_raw));
+ CHECK_EQ(2, result->length());
+ CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
+ CHECK(result->get(1)->IsTheHole(isolate));
+
+ Handle<FixedDoubleArray> source_double(Handle<FixedDoubleArray>::cast(
+ isolate->factory()->NewFixedDoubleArray(5)));
+ source_double->set(0, 10);
+ source_double->set(1, 11);
+ source_double->set(2, 12);
+ source_double->set(3, 13);
+ source_double->set(4, 14);
+ Handle<Object> double_result_raw =
+ ft.Call(source_double, Handle<Smi>(Smi::FromInt(1), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate))
+ .ToHandleChecked();
+ FixedDoubleArray* double_result(FixedDoubleArray::cast(*double_result_raw));
+ CHECK_EQ(2, double_result->length());
+ CHECK_EQ(double_result->get_scalar(0), 11);
+ CHECK_EQ(double_result->get_scalar(1), 12);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index cb80382901..076c918906 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -44,22 +44,17 @@ namespace internal {
#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register source_reg,
- Register destination_reg,
- bool inline_fastpath) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
+ Register destination_reg) {
HandleScope handles(isolate);
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
- DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
- inline_fastpath);
+
+ DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
- Label done;
// Save callee save registers.
__ Push(r7, r6, r5, r4);
@@ -72,9 +67,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Push the double argument.
__ sub(sp, sp, Operand(kDoubleSize));
__ vstr(d0, sp, 0);
- if (source_reg != sp) {
- __ mov(source_reg, sp);
- }
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
@@ -94,16 +86,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ vstr(d0, sp, 0);
// Call through to the actual stub
- if (inline_fastpath) {
- __ vldr(d0, MemOperand(source_reg));
- __ TryInlineTruncateDoubleToI(destination_reg, d0, &done);
- if (destination_reg == source_reg && source_reg != sp) {
- // Restore clobbered source_reg.
- __ add(source_reg, sp, Operand(source_reg_offset));
- }
- }
__ Call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ bind(&done);
__ add(sp, sp, Operand(kDoubleSize));
@@ -132,7 +115,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Assembler::FlushICache(isolate, buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
@@ -168,24 +151,12 @@ TEST(ConvertDToI) {
RunAllTruncationTests(&ConvertDToICVersion);
#endif
- Register source_registers[] = {sp, r0, r1, r2, r3, r4, r5, r6, r7};
Register dest_registers[] = {r0, r1, r2, r3, r4, r5, r6, r7};
- for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d],
- false));
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d],
- true));
- }
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
}
}
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
index 64435703c9..db175de5ec 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -44,22 +44,18 @@ namespace internal {
#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register source_reg,
- Register destination_reg,
- bool inline_fastpath) {
- // Allocate an executable page of memory.
- size_t actual_size = 4 * Assembler::kMinimalBufferSize;
- byte* buffer = static_cast<byte*>(
- v8::base::OS::Allocate(actual_size, &actual_size, true));
- CHECK(buffer);
+ Register destination_reg) {
HandleScope handles(isolate);
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer =
+ AllocateAssemblerBuffer(&allocated, 4 * Assembler::kMinimalBufferSize);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
- DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
- inline_fastpath);
+
+ DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
- Label done;
__ SetStackPointer(csp);
__ PushCalleeSavedRegisters();
@@ -68,7 +64,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Push the double argument.
__ Push(d0);
- __ Mov(source_reg, jssp);
MacroAssembler::PushPopQueue queue(&masm);
@@ -90,16 +85,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
queue.PushQueued();
// Call through to the actual stub
- if (inline_fastpath) {
- __ Ldr(d0, MemOperand(source_reg));
- __ TryConvertDoubleToInt64(destination_reg, d0, &done);
- if (destination_reg.is(source_reg)) {
- // Restore clobbered source_reg.
- __ add(source_reg, jssp, Operand(source_reg_offset));
- }
- }
__ Call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ bind(&done);
__ Drop(1, kDoubleSize);
@@ -129,7 +115,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Assembler::FlushICache(isolate, buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
@@ -170,28 +156,14 @@ TEST(ConvertDToI) {
RunAllTruncationTests(&ConvertDToICVersion);
#endif
- Register source_registers[] = {jssp, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9,
- x10, x11, x12, x13, x14, x15, x18, x19, x20,
- x21, x22, x23, x24};
Register dest_registers[] = {x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
x12, x13, x14, x15, x18, x19, x20, x21, x22, x23,
x24};
- for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d],
- false));
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d],
- true));
- }
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
}
}
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index 3ddf7323ad..2fe7e26ddc 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -42,21 +42,18 @@
namespace v8 {
namespace internal {
-#define __ assm.
+#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register source_reg,
Register destination_reg) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
- int offset = source_reg == esp ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
- DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
+
+ DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
__ push(ebx);
@@ -65,10 +62,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ push(esi);
__ push(edi);
- if (source_reg != esp) {
- __ lea(source_reg, MemOperand(esp, 6 * kPointerSize - offset));
- }
-
int param_offset = 7 * kPointerSize;
// Save registers make sure they don't get clobbered.
int reg_num = 0;
@@ -114,7 +107,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ ret(kDoubleSize);
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}
@@ -140,16 +133,11 @@ TEST(ConvertDToI) {
RunAllTruncationTests(&ConvertDToICVersion);
#endif
- Register source_registers[] = {esp, eax, ebx, ecx, edx, edi, esi};
Register dest_registers[] = {eax, ebx, ecx, edx, edi, esi};
- for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d]));
- }
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
}
}
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
index 39aa88bea2..123089614b 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -46,22 +46,17 @@ namespace internal {
#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register source_reg,
- Register destination_reg,
- bool inline_fastpath) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
+ Register destination_reg) {
HandleScope handles(isolate);
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
- DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
- inline_fastpath);
+
+ DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
- Label done;
// Save callee save registers.
__ MultiPush(kCalleeSaved | ra.bit());
@@ -78,7 +73,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Push the double argument.
__ Subu(sp, sp, Operand(kDoubleSize));
__ Sdc1(f12, MemOperand(sp));
- __ Move(source_reg, sp);
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
@@ -98,16 +92,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ Sdc1(f12, MemOperand(sp));
// Call through to the actual stub
- if (inline_fastpath) {
- __ Ldc1(f12, MemOperand(source_reg));
- __ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
- if (destination_reg == source_reg && source_reg != sp) {
- // Restore clobbered source_reg.
- __ Addu(source_reg, sp, Operand(source_reg_offset));
- }
- }
__ Call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ bind(&done);
__ Addu(sp, sp, Operand(kDoubleSize));
@@ -143,7 +128,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Assembler::FlushICache(isolate, buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
@@ -181,26 +166,13 @@ TEST(ConvertDToI) {
RunAllTruncationTests(&ConvertDToICVersion);
#endif
- Register source_registers[] = {
- sp, v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5};
Register dest_registers[] = {
v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5};
- for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d],
- false));
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d],
- true));
- }
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
}
}
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index ee6388e316..ad4c49338a 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -46,22 +46,17 @@ namespace internal {
#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register source_reg,
- Register destination_reg,
- bool inline_fastpath) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
+ Register destination_reg) {
HandleScope handles(isolate);
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
- DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
- inline_fastpath);
+
+ DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
- Label done;
// Save callee save registers.
__ MultiPush(kCalleeSaved | ra.bit());
@@ -78,7 +73,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Push the double argument.
__ Dsubu(sp, sp, Operand(kDoubleSize));
__ Sdc1(f12, MemOperand(sp));
- __ Move(source_reg, sp);
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
@@ -97,16 +91,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ Sdc1(f12, MemOperand(sp));
// Call through to the actual stub
- if (inline_fastpath) {
- __ Ldc1(f12, MemOperand(source_reg));
- __ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
- if (destination_reg == source_reg && source_reg != sp) {
- // Restore clobbered source_reg.
- __ Daddu(source_reg, sp, Operand(source_reg_offset));
- }
- }
__ Call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ bind(&done);
__ Daddu(sp, sp, Operand(kDoubleSize));
@@ -140,7 +125,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Assembler::FlushICache(isolate, buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
@@ -179,26 +164,13 @@ TEST(ConvertDToI) {
RunAllTruncationTests(&ConvertDToICVersion);
#endif
- Register source_registers[] = {
- sp, v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
Register dest_registers[] = {
v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
- for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d],
- false));
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d],
- true));
- }
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
}
}
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index bd29e7ab5c..d69da6d0f6 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -42,21 +42,18 @@ namespace v8 {
namespace internal {
namespace test_code_stubs_x64 {
-#define __ assm.
+#define __ masm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register source_reg,
Register destination_reg) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
- int offset = source_reg == rsp ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
- DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
+
+ DoubleToIStub stub(isolate, destination_reg);
byte* start = stub.GetCode()->instruction_start();
__ pushq(rbx);
@@ -66,16 +63,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ pushq(rdi);
const RegisterConfiguration* config = RegisterConfiguration::Default();
- if (source_reg != rsp) {
- // The argument we pass to the stub is not a heap number, but instead
- // stack-allocated and offset-wise made to look like a heap number for
- // the stub. We create that "heap number" after pushing all allocatable
- // registers.
- int double_argument_slot =
- (config->num_allocatable_general_registers() - 1) * kPointerSize +
- kDoubleSize;
- __ leaq(source_reg, MemOperand(rsp, -double_argument_slot - offset));
- }
// Save registers make sure they don't get clobbered.
int reg_num = 0;
@@ -118,7 +105,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ ret(0);
CodeDesc desc;
- assm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}
@@ -144,16 +131,11 @@ TEST(ConvertDToI) {
RunAllTruncationTests(&ConvertDToICVersion);
#endif
- Register source_registers[] = {rsp, rax, rbx, rcx, rdx, rsi, rdi, r8, r9};
Register dest_registers[] = {rax, rbx, rcx, rdx, rsi, rdi, r8, r9};
- for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- MakeConvertDToIFuncTrampoline(isolate,
- source_registers[s],
- dest_registers[d]));
- }
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
}
}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 8a50ff0f57..92ed988b06 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -55,7 +55,8 @@ static void SetGlobalProperty(const char* name, Object* value) {
isolate->factory()->InternalizeUtf8String(name);
Handle<JSObject> global(isolate->context()->global_object());
Runtime::SetObjectProperty(isolate, global, internalized_name, object,
- SLOPPY).Check();
+ LanguageMode::kSloppy)
+ .Check();
}
@@ -67,7 +68,8 @@ static Handle<JSFunction> Compile(const char* source) {
Compiler::GetSharedFunctionInfoForScript(
source_code, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
- NULL, NULL, v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE,
+ nullptr, nullptr, v8::ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE,
MaybeHandle<FixedArray>())
.ToHandleChecked();
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -84,7 +86,7 @@ static double Inc(Isolate* isolate, int x) {
if (fun.is_null()) return -1;
Handle<JSObject> global(isolate->context()->global_object());
- Execution::Call(isolate, fun, global, 0, NULL).Check();
+ Execution::Call(isolate, fun, global, 0, nullptr).Check();
return GetGlobalProperty("result")->Number();
}
@@ -103,7 +105,7 @@ static double Add(Isolate* isolate, int x, int y) {
SetGlobalProperty("x", Smi::FromInt(x));
SetGlobalProperty("y", Smi::FromInt(y));
Handle<JSObject> global(isolate->context()->global_object());
- Execution::Call(isolate, fun, global, 0, NULL).Check();
+ Execution::Call(isolate, fun, global, 0, nullptr).Check();
return GetGlobalProperty("result")->Number();
}
@@ -121,7 +123,7 @@ static double Abs(Isolate* isolate, int x) {
SetGlobalProperty("x", Smi::FromInt(x));
Handle<JSObject> global(isolate->context()->global_object());
- Execution::Call(isolate, fun, global, 0, NULL).Check();
+ Execution::Call(isolate, fun, global, 0, nullptr).Check();
return GetGlobalProperty("result")->Number();
}
@@ -140,7 +142,7 @@ static double Sum(Isolate* isolate, int n) {
SetGlobalProperty("n", Smi::FromInt(n));
Handle<JSObject> global(isolate->context()->global_object());
- Execution::Call(isolate, fun, global, 0, NULL).Check();
+ Execution::Call(isolate, fun, global, 0, nullptr).Check();
return GetGlobalProperty("result")->Number();
}
@@ -160,7 +162,7 @@ TEST(Print) {
Handle<JSFunction> fun = Compile(source);
if (fun.is_null()) return;
Handle<JSObject> global(CcTest::i_isolate()->context()->global_object());
- Execution::Call(CcTest::i_isolate(), fun, global, 0, NULL).Check();
+ Execution::Call(CcTest::i_isolate(), fun, global, 0, nullptr).Check();
}
@@ -191,8 +193,7 @@ TEST(Stuff) {
Handle<JSFunction> fun = Compile(source);
CHECK(!fun.is_null());
Handle<JSObject> global(CcTest::i_isolate()->context()->global_object());
- Execution::Call(
- CcTest::i_isolate(), fun, global, 0, NULL).Check();
+ Execution::Call(CcTest::i_isolate(), fun, global, 0, nullptr).Check();
CHECK_EQ(511.0, GetGlobalProperty("r")->Number());
}
@@ -206,7 +207,7 @@ TEST(UncaughtThrow) {
CHECK(!fun.is_null());
Isolate* isolate = fun->GetIsolate();
Handle<JSObject> global(isolate->context()->global_object());
- CHECK(Execution::Call(isolate, fun, global, 0, NULL).is_null());
+ CHECK(Execution::Call(isolate, fun, global, 0, nullptr).is_null());
CHECK_EQ(42.0, isolate->pending_exception()->Number());
}
@@ -232,7 +233,7 @@ TEST(C2JSFrames) {
// Run the generated code to populate the global object with 'foo'.
Handle<JSObject> global(isolate->context()->global_object());
- Execution::Call(isolate, fun0, global, 0, NULL).Check();
+ Execution::Call(isolate, fun0, global, 0, nullptr).Check();
Handle<Object> fun1 =
JSReceiver::GetProperty(isolate, isolate->global_object(), "foo")
@@ -416,10 +417,10 @@ TEST(CompileFunctionInContext) {
"y = r * sin(PI / 2);"));
v8::Local<v8::Function> fun =
v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
- 0, NULL, 1, &math)
+ 0, nullptr, 1, &math)
.ToLocalChecked();
CHECK(!fun.IsEmpty());
- fun->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
+ fun->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK(env->Global()->Has(env.local(), v8_str("a")).FromJust());
v8::Local<v8::Value> a =
env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked();
@@ -456,10 +457,10 @@ TEST(CompileFunctionInContextComplex) {
v8::ScriptCompiler::Source script_source(v8_str("result = x + y + z"));
v8::Local<v8::Function> fun =
v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
- 0, NULL, 2, ext)
+ 0, nullptr, 2, ext)
.ToLocalChecked();
CHECK(!fun.IsEmpty());
- fun->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
+ fun->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK(env->Global()->Has(env.local(), v8_str("result")).FromJust());
v8::Local<v8::Value> result =
env->Global()->Get(env.local(), v8_str("result")).ToLocalChecked();
@@ -526,7 +527,7 @@ TEST(CompileFunctionInContextNonIdentifierArgs) {
v8::ScriptCompiler::Source script_source(v8_str("result = 1"));
v8::Local<v8::String> arg = v8_str("b }");
CHECK(v8::ScriptCompiler::CompileFunctionInContext(
- env.local(), &script_source, 1, &arg, 0, NULL)
+ env.local(), &script_source, 1, &arg, 0, nullptr)
.IsEmpty());
}
@@ -541,18 +542,18 @@ TEST(CompileFunctionInContextScriptOrigin) {
v8::ScriptCompiler::Source script_source(v8_str("throw new Error()"), origin);
v8::Local<v8::Function> fun =
v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
- 0, NULL, 0, NULL)
+ 0, nullptr, 0, nullptr)
.ToLocalChecked();
CHECK(!fun.IsEmpty());
v8::TryCatch try_catch(CcTest::isolate());
CcTest::isolate()->SetCaptureStackTraceForUncaughtExceptions(true);
- CHECK(fun->Call(env.local(), env->Global(), 0, NULL).IsEmpty());
+ CHECK(fun->Call(env.local(), env->Global(), 0, nullptr).IsEmpty());
CHECK(try_catch.HasCaught());
CHECK(!try_catch.Exception().IsEmpty());
v8::Local<v8::StackTrace> stack =
v8::Exception::GetStackTrace(try_catch.Exception());
CHECK(!stack.IsEmpty());
- CHECK(stack->GetFrameCount() > 0);
+ CHECK_GT(stack->GetFrameCount(), 0);
v8::Local<v8::StackFrame> frame = stack->GetFrame(0);
CHECK_EQ(23, frame->GetLineNumber());
CHECK_EQ(42 + strlen("throw "), static_cast<unsigned>(frame->GetColumn()));
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index aa2ae26da4..6dfd22e34a 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -83,17 +83,17 @@ TEST(StartStop) {
static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
i::Address frame1,
- i::Address frame2 = NULL,
- i::Address frame3 = NULL) {
+ i::Address frame2 = nullptr,
+ i::Address frame3 = nullptr) {
v8::TickSample* sample = proc->StartTickSample();
sample->pc = frame1;
sample->tos = frame1;
sample->frames_count = 0;
- if (frame2 != NULL) {
+ if (frame2 != nullptr) {
sample->stack[0] = frame2;
sample->frames_count = 1;
}
- if (frame3 != NULL) {
+ if (frame3 != nullptr) {
sample->stack[1] = frame3;
sample->frames_count = 2;
}
@@ -151,11 +151,8 @@ TEST(CodeEvents) {
i::AbstractCode* aaa_code = CreateCode(&env);
i::AbstractCode* comment_code = CreateCode(&env);
- i::AbstractCode* args5_code = CreateCode(&env);
i::AbstractCode* comment2_code = CreateCode(&env);
i::AbstractCode* moved_code = CreateCode(&env);
- i::AbstractCode* args3_code = CreateCode(&env);
- i::AbstractCode* args4_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfileGenerator* generator = new ProfileGenerator(profiles);
@@ -175,12 +172,9 @@ TEST(CodeEvents) {
*aaa_name);
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, comment_code,
"comment");
- profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, args5_code, 5);
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, comment2_code,
"comment2");
profiler_listener.CodeMoveEvent(comment2_code, moved_code->address());
- profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, args3_code, 3);
- profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, args4_code, 4);
// Enqueue a tick event to enable code events processing.
EnqueueTickSampleEvent(processor, aaa_code->address());
@@ -199,10 +193,6 @@ TEST(CodeEvents) {
CHECK(comment);
CHECK_EQ(0, strcmp("comment", comment->name()));
- CodeEntry* args5 = generator->code_map()->FindEntry(args5_code->address());
- CHECK(args5);
- CHECK_EQ(0, strcmp("5", args5->name()));
-
CHECK(!generator->code_map()->FindEntry(comment2_code->address()));
CodeEntry* comment2 = generator->code_map()->FindEntry(moved_code->address());
@@ -238,7 +228,7 @@ TEST(TickEvents) {
profiler_listener.AddObserver(&profiler);
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame1_code, "bbb");
- profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, frame2_code, 5);
+ profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, frame2_code, "ccc");
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame3_code, "ddd");
EnqueueTickSampleEvent(processor, frame1_code->instruction_start());
@@ -264,7 +254,7 @@ TEST(TickEvents) {
const std::vector<ProfileNode*>* top_down_bbb_children =
top_down_root_children->back()->children();
CHECK_EQ(1, top_down_bbb_children->size());
- CHECK_EQ(0, strcmp("5", top_down_bbb_children->back()->entry()->name()));
+ CHECK_EQ(0, strcmp("ccc", top_down_bbb_children->back()->entry()->name()));
const std::vector<ProfileNode*>* top_down_stub_children =
top_down_bbb_children->back()->children();
CHECK_EQ(1, top_down_stub_children->size());
@@ -497,7 +487,7 @@ static const v8::CpuProfileNode* FindChild(v8::Local<v8::Context> context,
return child;
}
}
- return NULL;
+ return nullptr;
}
@@ -1282,7 +1272,7 @@ TEST(CpuProfileDeepStack) {
v8::Local<v8::Function> function = GetFunction(env, "start");
v8::Local<v8::String> profile_name = v8_str("my_profile");
- function->Call(env, env->Global(), 0, NULL).ToLocalChecked();
+ function->Call(env, env->Global(), 0, nullptr).ToLocalChecked();
v8::CpuProfile* profile = helper.profiler()->StopProfiling(profile_name);
CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
@@ -1483,7 +1473,7 @@ static const char* js_force_collect_sample_source =
"}";
static void CallCollectSample(const v8::FunctionCallbackInfo<v8::Value>& info) {
- i::ProfilerExtension::profiler()->CollectSample();
+ v8::CpuProfiler::CollectSample(info.GetIsolate());
}
TEST(CollectSampleAPI) {
@@ -1615,7 +1605,7 @@ TEST(Inlining) {
v8::Local<v8::Function> function = GetFunction(env, "start");
v8::Local<v8::String> profile_name = v8_str("my_profile");
- function->Call(env, env->Global(), 0, NULL).ToLocalChecked();
+ function->Call(env, env->Global(), 0, nullptr).ToLocalChecked();
v8::CpuProfile* profile = helper.profiler()->StopProfiling(profile_name);
CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
@@ -1839,14 +1829,14 @@ TEST(DontStopOnFinishedProfileDelete) {
CHECK(inner_profile);
CHECK_EQ(1, iprofiler->GetProfilesCount());
inner_profile->Delete();
- inner_profile = NULL;
+ inner_profile = nullptr;
CHECK_EQ(0, iprofiler->GetProfilesCount());
v8::CpuProfile* outer_profile = profiler->StopProfiling(outer);
CHECK(outer_profile);
CHECK_EQ(1, iprofiler->GetProfilesCount());
outer_profile->Delete();
- outer_profile = NULL;
+ outer_profile = nullptr;
CHECK_EQ(0, iprofiler->GetProfilesCount());
profiler->Dispose();
}
@@ -1856,7 +1846,7 @@ const char* GetBranchDeoptReason(v8::Local<v8::Context> context,
i::CpuProfile* iprofile, const char* branch[],
int length) {
v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
- const ProfileNode* iopt_function = NULL;
+ const ProfileNode* iopt_function = nullptr;
iopt_function = GetSimpleBranch(context, profile, branch, length);
CHECK_EQ(1U, iopt_function->deopt_infos().size());
return iopt_function->deopt_infos()[0].deopt_reason;
@@ -2220,17 +2210,19 @@ class CpuProfileEventChecker : public v8::platform::tracing::TraceWriter {
TEST(TracingCpuProfiler) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
- v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
- i::V8::SetPlatformForTesting(default_platform);
+ std::unique_ptr<v8::Platform> default_platform =
+ v8::platform::NewDefaultPlatform();
+ i::V8::SetPlatformForTesting(default_platform.get());
- v8::platform::tracing::TracingController tracing_controller;
- static_cast<v8::platform::DefaultPlatform*>(default_platform)
- ->SetTracingController(&tracing_controller);
+ auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
+ v8::platform::tracing::TracingController* tracing_controller = tracing.get();
+ static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
+ ->SetTracingController(std::move(tracing));
CpuProfileEventChecker* event_checker = new CpuProfileEventChecker();
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, event_checker);
- tracing_controller.Initialize(ring_buffer);
+ tracing_controller->Initialize(ring_buffer);
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory(
TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
@@ -2238,10 +2230,10 @@ TEST(TracingCpuProfiler) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
{
- tracing_controller.StartTracing(trace_config);
+ tracing_controller->StartTracing(trace_config);
auto profiler = v8::TracingCpuProfiler::Create(env->GetIsolate());
CompileRun("function foo() { } foo();");
- tracing_controller.StopTracing();
+ tracing_controller->StopTracing();
CompileRun("function bar() { } bar();");
}
@@ -2316,6 +2308,72 @@ TEST(Issue763073) {
cpu_profiler->Dispose();
}
+static const char* js_collect_sample_api_source =
+ "%NeverOptimizeFunction(start);\n"
+ "function start() {\n"
+ " CallStaticCollectSample();\n"
+ "}";
+
+static void CallStaticCollectSample(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::CpuProfiler::CollectSample(info.GetIsolate());
+}
+
+TEST(StaticCollectSampleAPI) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(env->GetIsolate(), CallStaticCollectSample);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env.local()).ToLocalChecked();
+ func->SetName(v8_str("CallStaticCollectSample"));
+ env->Global()
+ ->Set(env.local(), v8_str("CallStaticCollectSample"), func)
+ .FromJust();
+
+ CompileRun(js_collect_sample_api_source);
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+
+ ProfilerHelper helper(env.local());
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100);
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ GetChild(env.local(), start_node, "CallStaticCollectSample");
+
+ profile->Delete();
+}
+
+TEST(CodeEntriesMemoryLeak) {
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+
+ std::string source = "function start() {}\n";
+ for (int i = 0; i < 1000; ++i) {
+ source += "function foo" + std::to_string(i) + "() { return " +
+ std::to_string(i) +
+ "; }\n"
+ "foo" +
+ std::to_string(i) + "();\n";
+ }
+ CompileRun(source.c_str());
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+
+ ProfilerHelper helper(env);
+
+ for (int j = 0; j < 100; ++j) {
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0);
+ profile->Delete();
+ }
+ ProfilerListener* profiler_listener =
+ CcTest::i_isolate()->logger()->profiler_listener();
+
+ CHECK_GE(10000ul, profiler_listener->entries_count_for_test());
+}
+
} // namespace test_cpu_profiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index c67d20160a..444d5a77b3 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -50,7 +50,7 @@ class DateCacheMock: public DateCache {
int year, month, day;
YearMonthDayFromDays(days, &year, &month, &day);
Rule* rule = FindRuleFor(year, month, day, time_in_day_sec);
- return rule == NULL ? 0 : rule->offset_sec * 1000;
+ return rule == nullptr ? 0 : rule->offset_sec * 1000;
}
@@ -60,7 +60,7 @@ class DateCacheMock: public DateCache {
private:
Rule* FindRuleFor(int year, int month, int day, int time_in_day_sec) {
- Rule* result = NULL;
+ Rule* result = nullptr;
for (int i = 0; i < rules_count_; i++)
if (Match(&rules_[i], year, month, day, time_in_day_sec)) {
result = &rules_[i];
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 794bc9c841..3d6130549f 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -37,6 +37,7 @@
#include "src/deoptimizer.h"
#include "src/frames.h"
#include "src/objects-inl.h"
+#include "src/snapshot/snapshot.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -389,7 +390,8 @@ void CheckDebuggerUnloaded() {
// Iterate the heap and check that there are no debugger related objects left.
HeapIterator iterator(CcTest::heap());
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
CHECK(!obj->IsDebugInfo());
}
}
@@ -615,7 +617,7 @@ static void DebugEventCounter(
v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
event_data->Get(context, fun_name).ToLocalChecked());
v8::Local<v8::Value> result =
- fun->Call(context, event_data, 0, NULL).ToLocalChecked();
+ fun->Call(context, event_data, 0, nullptr).ToLocalChecked();
if (result->IsTrue()) {
uncaught_exception_hit_count++;
}
@@ -635,7 +637,7 @@ static void DebugEventCounter(
// Run callback from DebugEventListener and check the result.
if (!debug_event_listener_callback.IsEmpty()) {
v8::Local<v8::Value> result =
- debug_event_listener_callback->Call(context, event_data, 0, NULL)
+ debug_event_listener_callback->Call(context, event_data, 0, nullptr)
.ToLocalChecked();
CHECK(!result.IsEmpty());
CHECK_EQ(debug_event_listener_callback_result,
@@ -659,7 +661,7 @@ struct EvaluateCheck {
// Array of checks to do.
-struct EvaluateCheck* checks = NULL;
+struct EvaluateCheck* checks = nullptr;
// Source for The JavaScript function which can do the evaluation when a break
// point is hit.
const char* evaluate_check_source =
@@ -681,7 +683,7 @@ static void DebugEventEvaluate(
if (event == v8::Break) {
break_point_hit_count++;
- for (int i = 0; checks[i].expr != NULL; i++) {
+ for (int i = 0; checks[i].expr != nullptr; i++) {
const int argc = 3;
v8::Local<v8::String> string = v8_str(isolate, checks[i].expr);
v8::Local<v8::Value> argv[argc] = {exec_state, string,
@@ -742,7 +744,7 @@ static void DebugEventStep(
// String containing the expected function call sequence. Note: this only works
// if functions have name length of one.
-const char* expected_step_sequence = NULL;
+const char* expected_step_sequence = nullptr;
// The actual debug event described by the longer comment above.
static void DebugEventStepSequence(
@@ -928,19 +930,19 @@ TEST(BreakPointICStore) {
CompileFunction(&env, "function foo(){bar=0;}", "foo");
// Run without breakpoints.
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
int bp = SetBreakPoint(foo, 0);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -960,19 +962,19 @@ TEST(BreakPointICLoad) {
CompileFunction(&env, "function foo(){var x=bar;}", "foo");
// Run without breakpoints.
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -991,19 +993,19 @@ TEST(BreakPointICCall) {
CompileFunction(&env, "function foo(){bar();}", "foo");
// Run without breakpoints.
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
int bp = SetBreakPoint(foo, 0);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -1023,7 +1025,7 @@ TEST(BreakPointICCallWithGC) {
v8::Local<v8::Context> context = env.context();
// Run without breakpoints.
- CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, nullptr)
.ToLocalChecked()
->Int32Value(context)
.FromJust());
@@ -1031,12 +1033,12 @@ TEST(BreakPointICCallWithGC) {
// Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
- CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, nullptr)
.ToLocalChecked()
->Int32Value(context)
.FromJust());
CHECK_EQ(1, break_point_hit_count);
- CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, nullptr)
.ToLocalChecked()
->Int32Value(context)
.FromJust());
@@ -1044,7 +1046,7 @@ TEST(BreakPointICCallWithGC) {
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -1064,7 +1066,7 @@ TEST(BreakPointConstructCallWithGC) {
v8::Local<v8::Context> context = env.context();
// Run without breakpoints.
- CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, nullptr)
.ToLocalChecked()
->Int32Value(context)
.FromJust());
@@ -1072,12 +1074,12 @@ TEST(BreakPointConstructCallWithGC) {
// Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
- CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, nullptr)
.ToLocalChecked()
->Int32Value(context)
.FromJust());
CHECK_EQ(1, break_point_hit_count);
- CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, nullptr)
.ToLocalChecked()
->Int32Value(context)
.FromJust());
@@ -1085,7 +1087,7 @@ TEST(BreakPointConstructCallWithGC) {
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -1114,23 +1116,23 @@ TEST(BreakPointReturn) {
v8::Local<v8::Context> context = env.context();
// Run without breakpoints.
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
int bp = SetBreakPoint(foo, 0);
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ(0, last_source_line);
CHECK_EQ(15, last_source_column);
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ(0, last_source_line);
CHECK_EQ(15, last_source_column);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -1144,7 +1146,7 @@ static void CallWithBreakPoints(v8::Local<v8::Context> context,
int break_point_count, int call_count) {
break_point_hit_count = 0;
for (int i = 0; i < call_count; i++) {
- f->Call(context, recv, 0, NULL).ToLocalChecked();
+ f->Call(context, recv, 0, nullptr).ToLocalChecked();
CHECK_EQ((i + 1) * break_point_count, break_point_hit_count);
}
}
@@ -1198,17 +1200,17 @@ static void CallAndGC(v8::Local<v8::Context> context,
for (int i = 0; i < 3; i++) {
// Call function.
- f->Call(context, recv, 0, NULL).ToLocalChecked();
+ f->Call(context, recv, 0, nullptr).ToLocalChecked();
CHECK_EQ(1 + i * 3, break_point_hit_count);
// Scavenge and call function.
CcTest::CollectGarbage(v8::internal::NEW_SPACE);
- f->Call(context, recv, 0, NULL).ToLocalChecked();
+ f->Call(context, recv, 0, nullptr).ToLocalChecked();
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
CcTest::CollectAllGarbage();
- f->Call(context, recv, 0, NULL).ToLocalChecked();
+ f->Call(context, recv, 0, nullptr).ToLocalChecked();
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
}
@@ -1371,33 +1373,33 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
// Call f and g without break points.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 12.
int sbp1 = SetScriptBreakPointByNameFromJS(isolate, "test", 12, 0);
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
// Remove the break point again.
break_point_hit_count = 0;
ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 2.
int sbp2 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 2, 0);
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Call f and g with break point on line 2, 4, 12, 14 and 15.
@@ -1406,9 +1408,9 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
int sbp5 = SetScriptBreakPointByNameFromJS(isolate, "test", 14, 0);
int sbp6 = SetScriptBreakPointByNameFromJS(isolate, "test", 15, 0);
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(7, break_point_hit_count);
// Remove all the break points again.
@@ -1418,9 +1420,9 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
ClearBreakPointFromJS(isolate, sbp4);
ClearBreakPointFromJS(isolate, sbp5);
ClearBreakPointFromJS(isolate, sbp6);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
SetDebugEventListener(isolate, nullptr);
@@ -1480,33 +1482,33 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
// Call f and g without break points.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 12.
int sbp1 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 12, 0);
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
// Remove the break point again.
break_point_hit_count = 0;
ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 2.
int sbp2 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 2, 0);
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Call f and g with break point on line 2, 4, 12, 14 and 15.
@@ -1515,9 +1517,9 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
int sbp5 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 14, 0);
int sbp6 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 15, 0);
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(7, break_point_hit_count);
// Remove all the break points again.
@@ -1527,9 +1529,9 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
ClearBreakPointFromJS(env->GetIsolate(), sbp4);
ClearBreakPointFromJS(env->GetIsolate(), sbp5);
ClearBreakPointFromJS(env->GetIsolate(), sbp6);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
SetDebugEventListener(isolate, nullptr);
@@ -1575,19 +1577,19 @@ TEST(EnableDisableScriptBreakPoint) {
// Call f while enabeling and disabling the script break point.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
DisableScriptBreakPointFromJS(isolate, sbp);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
EnableScriptBreakPointFromJS(isolate, sbp);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
DisableScriptBreakPointFromJS(isolate, sbp);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
SetDebugEventListener(isolate, nullptr);
@@ -1631,18 +1633,18 @@ TEST(ConditionalScriptBreakPoint) {
// Call f with different conditions on the script break point.
break_point_hit_count = 0;
ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "false");
- f->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "true");
break_point_hit_count = 0;
- f->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "x % 2 == 0");
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
- f->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
}
CHECK_EQ(5, break_point_hit_count);
@@ -1697,9 +1699,9 @@ TEST(ScriptBreakPointMultiple) {
// Call f and g and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Clear the script break point.
@@ -1707,9 +1709,9 @@ TEST(ScriptBreakPointMultiple) {
// Call f and g and check that the script break point is no longer active.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Set script break point with the scripts loaded.
@@ -1717,9 +1719,9 @@ TEST(ScriptBreakPointMultiple) {
// Call f and g and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -1767,7 +1769,7 @@ TEST(ScriptBreakPointLineOffset) {
// Call f and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Clear the script break points.
@@ -1776,7 +1778,7 @@ TEST(ScriptBreakPointLineOffset) {
// Call f and check that no script break points are active.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Set a script break point with the script loaded.
@@ -1784,7 +1786,7 @@ TEST(ScriptBreakPointLineOffset) {
// Call f and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -1854,12 +1856,12 @@ TEST(ScriptBreakPointLine) {
CHECK_EQ(0, StrLength(last_function_hit));
// Call f and check that the script break point.
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ(0, strcmp("f", last_function_hit));
// Call g and check that the script break point.
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ(0, strcmp("g", last_function_hit));
@@ -1869,7 +1871,7 @@ TEST(ScriptBreakPointLine) {
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 6, -1);
// Call g and check that the script break point in h is hit.
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
CHECK_EQ(0, strcmp("h", last_function_hit));
@@ -1881,8 +1883,8 @@ TEST(ScriptBreakPointLine) {
int sbp5 =
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 4, -1);
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Set a break point in the code after the last function decleration.
@@ -1937,7 +1939,7 @@ TEST(ScriptBreakPointLineTopLevel) {
// Call f and check that there was no break points.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Recompile and run script and check that break point was not reapplied.
@@ -1998,11 +2000,11 @@ TEST(RemoveBreakPointInBreak) {
debug_event_remove_break_point = SetBreakPoint(foo, 0);
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -2037,11 +2039,11 @@ TEST(DebuggerStatement) {
.ToLocalChecked());
// Run function with debugger statement
- bar->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ bar->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
// Run function with two debugger statement
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -2067,13 +2069,13 @@ TEST(DebuggerStatementBreakpoint) {
.ToLocalChecked());
// The debugger statement triggers breakpoint hit
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
int bp = SetBreakPoint(foo, 0);
// Set breakpoint does not duplicate hits
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
ClearBreakPoint(bp);
@@ -2101,15 +2103,15 @@ TEST(DebugEvaluate) {
// d = Hello, world!).
struct EvaluateCheck checks_uu[] = {{"x", v8::Undefined(isolate)},
{"a", v8::Undefined(isolate)},
- {NULL, v8::Local<v8::Value>()}};
+ {nullptr, v8::Local<v8::Value>()}};
struct EvaluateCheck checks_hu[] = {
{"x", v8_str(env->GetIsolate(), "Hello, world!")},
{"a", v8::Undefined(isolate)},
- {NULL, v8::Local<v8::Value>()}};
+ {nullptr, v8::Local<v8::Value>()}};
struct EvaluateCheck checks_hh[] = {
{"x", v8_str(env->GetIsolate(), "Hello, world!")},
{"a", v8_str(env->GetIsolate(), "Hello, world!")},
- {NULL, v8::Local<v8::Value>()}};
+ {nullptr, v8::Local<v8::Value>()}};
// Simple test function. The "y=0" is in the function foo to provide a break
// location. For "y=0" the "y" is at position 15 in the foo function
@@ -2134,7 +2136,7 @@ TEST(DebugEvaluate) {
// Call foo with breakpoint set before a=x and undefined as parameter.
int bp = SetBreakPoint(foo, foo_break_position_1);
checks = checks_uu;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Call foo with breakpoint set before a=x and parameter "Hello, world!".
checks = checks_hu;
@@ -2250,7 +2252,7 @@ TEST(ConditionalBreakpointWithCodeGenerationDisallowed) {
debugEventCount = 0;
env->AllowCodeGenerationFromStrings(false);
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, debugEventCount);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -2314,7 +2316,7 @@ TEST(DebugEvaluateWithCodeGenerationDisallowed) {
"checkFrameEval");
debugEventCount = 0;
env->AllowCodeGenerationFromStrings(false);
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, debugEventCount);
checkGlobalEvalFunction.Clear();
@@ -2345,7 +2347,7 @@ TEST(DebugStepLinear) {
step_action = StepIn;
break_point_hit_count = 0;
v8::Local<v8::Context> context = env.context();
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(4, break_point_hit_count);
@@ -2358,7 +2360,7 @@ TEST(DebugStepLinear) {
SetBreakPoint(foo, 3);
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Without stepping only active break points are hit.
CHECK_EQ(1, break_point_hit_count);
@@ -2498,13 +2500,13 @@ TEST(DebugStepNamedLoadLoop) {
"foo");
// Call function without any break points to ensure inlining is in place.
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Set up break point and step through the function.
SetBreakPoint(foo, 4);
step_action = StepNext;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(65, break_point_hit_count);
@@ -2534,13 +2536,13 @@ static void DoDebugStepNamedStoreLoop(int expected) {
"foo");
// Call function without any break points to ensure inlining is in place.
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Set up break point and step through the function.
SetBreakPoint(foo, 3);
step_action = StepNext;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// With stepping all expected break locations are hit.
CHECK_EQ(expected, break_point_hit_count);
@@ -2578,7 +2580,7 @@ TEST(DebugStepLinearMixedICs) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(10, break_point_hit_count);
@@ -2591,7 +2593,7 @@ TEST(DebugStepLinearMixedICs) {
SetBreakPoint(foo, 0);
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Without stepping only active break points are hit.
CHECK_EQ(1, break_point_hit_count);
@@ -2626,7 +2628,7 @@ TEST(DebugStepDeclarations) {
// Stepping through the declarations.
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(5, break_point_hit_count);
// Get rid of the debug event listener.
@@ -2660,7 +2662,7 @@ TEST(DebugStepLocals) {
// Stepping through the declarations.
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(5, break_point_hit_count);
// Get rid of the debug event listener.
@@ -3050,7 +3052,7 @@ TEST(DebugStepForIn) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(8, break_point_hit_count);
// Create a function for testing stepping. Run it to allow it to get
@@ -3067,7 +3069,7 @@ TEST(DebugStepForIn) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(10, break_point_hit_count);
// Get rid of the debug event listener.
@@ -3102,7 +3104,7 @@ TEST(DebugStepWith) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
// Get rid of the debug event listener.
@@ -3132,7 +3134,7 @@ TEST(DebugConditional) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
step_action = StepIn;
@@ -3174,7 +3176,7 @@ TEST(StepInOutSimple) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "abcbaca";
- a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ a->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3182,7 +3184,7 @@ TEST(StepInOutSimple) {
step_action = StepNext;
break_point_hit_count = 0;
expected_step_sequence = "aaa";
- a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ a->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3190,7 +3192,7 @@ TEST(StepInOutSimple) {
step_action = StepOut;
break_point_hit_count = 0;
expected_step_sequence = "a";
- a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ a->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3227,7 +3229,7 @@ TEST(StepInOutTree) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "adacadabcbadacada";
- a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ a->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3235,7 +3237,7 @@ TEST(StepInOutTree) {
step_action = StepNext;
break_point_hit_count = 0;
expected_step_sequence = "aaaa";
- a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ a->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3243,7 +3245,7 @@ TEST(StepInOutTree) {
step_action = StepOut;
break_point_hit_count = 0;
expected_step_sequence = "a";
- a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ a->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3279,7 +3281,7 @@ TEST(StepInOutBranch) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "abbaca";
- a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ a->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3306,7 +3308,7 @@ TEST(DebugStepNatives) {
v8::Local<v8::Context> context = env.context();
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(3, break_point_hit_count);
@@ -3318,7 +3320,7 @@ TEST(DebugStepNatives) {
SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Without stepping only active break points are hit.
CHECK_EQ(1, break_point_hit_count);
@@ -3346,7 +3348,7 @@ TEST(DebugStepFunctionApply) {
v8::Local<v8::Context> context = env.context();
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(7, break_point_hit_count);
@@ -3358,7 +3360,7 @@ TEST(DebugStepFunctionApply) {
SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Without stepping only the debugger statement is hit.
CHECK_EQ(1, break_point_hit_count);
@@ -3394,7 +3396,7 @@ TEST(DebugStepFunctionCall) {
// Check stepping where the if condition in bar is false.
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(6, break_point_hit_count);
// Check stepping where the if condition in bar is true.
@@ -3411,7 +3413,7 @@ TEST(DebugStepFunctionCall) {
SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Without stepping only the debugger statement is hit.
CHECK_EQ(1, break_point_hit_count);
@@ -3444,7 +3446,7 @@ TEST(DebugStepFunctionCallApply) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(6, break_point_hit_count);
SetDebugEventListener(isolate, nullptr);
@@ -3454,7 +3456,7 @@ TEST(DebugStepFunctionCallApply) {
SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Without stepping only the debugger statement is hit.
CHECK_EQ(1, break_point_hit_count);
@@ -3542,117 +3544,117 @@ TEST(BreakOnException) {
// Initial state should be no break on exceptions.
DebugEventCounterClear();
MessageCallbackCountClear();
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(0, 0, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(0, 0, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 2);
// No break on exception
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnException(false, false);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(0, 0, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(0, 0, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 2);
// Break on uncaught exception
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnException(false, true);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(1, 1, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(2, 2, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(3, 3, 2);
// Break on exception and uncaught exception
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnException(true, true);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
// Break on exception
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnException(true, false);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
// No break on exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnExceptionFromJS(env->GetIsolate(), false, false);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(0, 0, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(0, 0, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 2);
// Break on uncaught exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnExceptionFromJS(env->GetIsolate(), false, true);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(1, 1, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(2, 2, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(3, 3, 2);
// Break on exception and uncaught exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnExceptionFromJS(env->GetIsolate(), true, true);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
// Break on exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnExceptionFromJS(env->GetIsolate(), true, false);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
// No break on exception using native API
@@ -3660,13 +3662,13 @@ TEST(BreakOnException) {
MessageCallbackCountClear();
ChangeBreakOnExceptionFromAPI(env->GetIsolate(),
v8::debug::NoBreakOnException);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(0, 0, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(0, 0, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 2);
// // Break on uncaught exception using native API
@@ -3674,13 +3676,13 @@ TEST(BreakOnException) {
MessageCallbackCountClear();
ChangeBreakOnExceptionFromAPI(env->GetIsolate(),
v8::debug::BreakOnUncaughtException);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(1, 1, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(2, 2, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(3, 3, 2);
// // Break on exception and uncaught exception using native API
@@ -3688,13 +3690,13 @@ TEST(BreakOnException) {
MessageCallbackCountClear();
ChangeBreakOnExceptionFromAPI(env->GetIsolate(),
v8::debug::BreakOnAnyException);
- caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ caught->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaught->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, nullptr).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ edgeCaseFinally->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -3832,7 +3834,7 @@ TEST(StepWithException) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "aa";
- CHECK(a->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(a->Call(context, env->Global(), 0, nullptr).IsEmpty());
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3843,7 +3845,7 @@ TEST(StepWithException) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "bcc";
- CHECK(b->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(b->Call(context, env->Global(), 0, nullptr).IsEmpty());
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3855,7 +3857,7 @@ TEST(StepWithException) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "ddedd";
- d->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ d->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3864,7 +3866,7 @@ TEST(StepWithException) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "ddeedd";
- d->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ d->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3876,7 +3878,7 @@ TEST(StepWithException) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "ffghhff";
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3885,7 +3887,7 @@ TEST(StepWithException) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "ffghhhff";
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3924,10 +3926,10 @@ TEST(DebugBreak) {
v8::Number::New(isolate, 1), v8::Number::New(isolate, 1)};
// Call all functions to make sure that they are compiled.
- f0->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- f1->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- f2->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- f3->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f0->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
+ f1->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
+ f2->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
+ f3->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// Set the debug break flag.
v8::debug::DebugBreak(env->GetIsolate());
@@ -3942,13 +3944,43 @@ TEST(DebugBreak) {
}
// One break for each function called.
- CHECK(4 * arraysize(argv) == break_point_hit_count);
+ CHECK_EQ(4 * arraysize(argv), break_point_hit_count);
// Get rid of the debug event listener.
SetDebugEventListener(isolate, nullptr);
CheckDebuggerUnloaded();
}
+TEST(DebugBreakWithoutJS) {
+ i::FLAG_stress_compaction = false;
+#ifdef VERIFY_HEAP
+ i::FLAG_verify_heap = true;
+#endif
+ DebugLocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::Local<v8::Context> context = env.context();
+ v8::HandleScope scope(isolate);
+
+ // Register a debug event listener which sets the break flag and counts.
+ SetDebugEventListener(isolate, DebugEventBreak);
+
+ // Set the debug break flag.
+ v8::debug::DebugBreak(env->GetIsolate());
+
+ v8::Local<v8::String> json = v8_str("[1]");
+ v8::Local<v8::Value> parsed = v8::JSON::Parse(context, json).ToLocalChecked();
+ CHECK(v8::JSON::Stringify(context, parsed)
+ .ToLocalChecked()
+ ->Equals(context, json)
+ .FromJust());
+ CHECK_EQ(0, break_point_hit_count);
+ CompileRun("");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded();
+}
// Test to ensure that JavaScript code keeps running while the debug break
// through the stack limit flag is set but breaks are disabled.
@@ -3973,18 +4005,18 @@ TEST(DisableBreak) {
// Call all functions with different argument count.
break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
{
v8::debug::DebugBreak(env->GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
v8::internal::DisableBreak disable_break(isolate->debug());
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
}
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Get rid of the debug event listener.
@@ -4112,7 +4144,7 @@ TEST(InterceptorPropertyMirror) {
// Create object with named interceptor.
v8::Local<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
named->SetHandler(v8::NamedPropertyHandlerConfiguration(
- NamedGetter, NULL, NULL, NULL, NamedEnum));
+ NamedGetter, nullptr, nullptr, nullptr, NamedEnum));
CHECK(env->Global()
->Set(context, v8_str(isolate, "intercepted_named"),
named->NewInstance(context).ToLocalChecked())
@@ -4121,7 +4153,7 @@ TEST(InterceptorPropertyMirror) {
// Create object with indexed interceptor.
v8::Local<v8::ObjectTemplate> indexed = v8::ObjectTemplate::New(isolate);
indexed->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- IndexedGetter, NULL, NULL, NULL, IndexedEnum));
+ IndexedGetter, nullptr, nullptr, nullptr, IndexedEnum));
CHECK(env->Global()
->Set(context, v8_str(isolate, "intercepted_indexed"),
indexed->NewInstance(context).ToLocalChecked())
@@ -4130,9 +4162,9 @@ TEST(InterceptorPropertyMirror) {
// Create object with both named and indexed interceptor.
v8::Local<v8::ObjectTemplate> both = v8::ObjectTemplate::New(isolate);
both->SetHandler(v8::NamedPropertyHandlerConfiguration(
- NamedGetter, NULL, NULL, NULL, NamedEnum));
+ NamedGetter, nullptr, nullptr, nullptr, NamedEnum));
both->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- IndexedGetter, NULL, NULL, NULL, IndexedEnum));
+ IndexedGetter, nullptr, nullptr, nullptr, IndexedEnum));
CHECK(env->Global()
->Set(context, v8_str(isolate, "intercepted_both"),
both->NewInstance(context).ToLocalChecked())
@@ -4388,8 +4420,8 @@ TEST(NativeGetterPropertyMirror) {
v8::Local<v8::String> name = v8_str(isolate, "x");
// Create object with named accessor.
v8::Local<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
- named->SetAccessor(name, &ProtperyXNativeGetter, NULL, v8::Local<v8::Value>(),
- v8::DEFAULT, v8::None);
+ named->SetAccessor(name, &ProtperyXNativeGetter, nullptr,
+ v8::Local<v8::Value>(), v8::DEFAULT, v8::None);
// Create object with named property getter.
CHECK(env->Global()
@@ -4434,7 +4466,7 @@ TEST(NativeGetterThrowingErrorPropertyMirror) {
v8::Local<v8::String> name = v8_str(isolate, "x");
// Create object with named accessor.
v8::Local<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
- named->SetAccessor(name, &ProtperyXNativeGetterThrowingError, NULL,
+ named->SetAccessor(name, &ProtperyXNativeGetterThrowingError, nullptr,
v8::Local<v8::Value>(), v8::DEFAULT, v8::None);
// Create object with named property getter.
@@ -4672,7 +4704,7 @@ TEST(CallFunctionInDebugger) {
global_template->Set(v8_str(isolate, "CheckClosure"),
v8::FunctionTemplate::New(isolate, CheckClosure));
v8::Local<v8::Context> context =
- v8::Context::New(isolate, NULL, global_template);
+ v8::Context::New(isolate, nullptr, global_template);
v8::Context::Scope context_scope(context);
// Compile a function for checking the number of JavaScript frames.
@@ -4808,9 +4840,9 @@ TEST(DebuggerUnload) {
// Make sure that the break points are there.
break_point_hit_count = 0;
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- bar->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ bar->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
}
@@ -4942,7 +4974,7 @@ TEST(ScriptNameAndData) {
->Get(context, v8_str(env->GetIsolate(), "f"))
.ToLocalChecked());
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ(0, strcmp("name", last_script_name_hit));
@@ -4956,7 +4988,7 @@ TEST(ScriptNameAndData) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "f"))
.ToLocalChecked());
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ(0, strcmp("name", last_script_name_hit));
@@ -4979,7 +5011,7 @@ TEST(ScriptNameAndData) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "f"))
.ToLocalChecked());
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ(0, strcmp("new name", last_script_name_hit));
@@ -4990,7 +5022,7 @@ TEST(ScriptNameAndData) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "f"))
.ToLocalChecked());
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
}
@@ -5022,8 +5054,10 @@ TEST(ContextData) {
v8::Local<v8::ObjectTemplate> global_template =
v8::Local<v8::ObjectTemplate>();
v8::Local<v8::Value> global_object = v8::Local<v8::Value>();
- context_1 = v8::Context::New(isolate, NULL, global_template, global_object);
- context_2 = v8::Context::New(isolate, NULL, global_template, global_object);
+ context_1 =
+ v8::Context::New(isolate, nullptr, global_template, global_object);
+ context_2 =
+ v8::Context::New(isolate, nullptr, global_template, global_object);
SetDebugEventListener(isolate, ContextCheckEventListener);
@@ -5048,7 +5082,7 @@ TEST(ContextData) {
expected_context = context_1;
expected_context_data = data_1;
v8::Local<v8::Function> f = CompileFunction(isolate, source, "f");
- f->Call(context_1, context_1->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context_1, context_1->Global(), 0, nullptr).ToLocalChecked();
}
@@ -5058,7 +5092,7 @@ TEST(ContextData) {
expected_context = context_2;
expected_context_data = data_2;
v8::Local<v8::Function> f = CompileFunction(isolate, source, "f");
- f->Call(context_2, context_2->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context_2, context_2->Global(), 0, nullptr).ToLocalChecked();
}
// Two times compile event and two times break event.
@@ -5103,10 +5137,10 @@ TEST(DebugBreakInEventListener) {
// Call f then g. The debugger statement in f will cause a break which will
// cause another break.
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, event_listener_break_hit_count);
// Calling g will not cause any additional breaks.
- g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(2, event_listener_break_hit_count);
}
@@ -5194,8 +5228,7 @@ TEST(EvalContextData) {
v8::Local<v8::Context> context_1;
v8::Local<v8::ObjectTemplate> global_template =
v8::Local<v8::ObjectTemplate>();
- context_1 =
- v8::Context::New(CcTest::isolate(), NULL, global_template);
+ context_1 = v8::Context::New(CcTest::isolate(), nullptr, global_template);
SetDebugEventListener(CcTest::isolate(), ContextCheckEventListener);
@@ -5216,7 +5249,7 @@ TEST(EvalContextData) {
expected_context = context_1;
expected_context_data = data_1;
v8::Local<v8::Function> f = CompileFunction(CcTest::isolate(), source, "f");
- f->Call(context_1, context_1->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context_1, context_1->Global(), 0, nullptr).ToLocalChecked();
}
SetDebugEventListener(CcTest::isolate(), nullptr);
@@ -5259,7 +5292,7 @@ TEST(AfterCompileEventWhenEventListenerIsReset) {
->Run(context)
.ToLocalChecked();
- // Setting listener to NULL should cause debugger unload.
+ // Setting listener to nullptr should cause debugger unload.
SetDebugEventListener(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
@@ -5349,9 +5382,9 @@ TEST(BreakEventWhenEventListenerIsReset) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "f"))
.ToLocalChecked());
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- // Setting event listener to NULL should cause debugger unload.
+ // Setting event listener to nullptr should cause debugger unload.
SetDebugEventListener(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
@@ -5389,9 +5422,9 @@ TEST(ExceptionEventWhenEventListenerIsReset) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "f"))
.ToLocalChecked());
- CHECK(f->Call(context, env->Global(), 0, NULL).IsEmpty());
+ CHECK(f->Call(context, env->Global(), 0, nullptr).IsEmpty());
- // Setting event listener to NULL should cause debugger unload.
+ // Setting event listener to nullptr should cause debugger unload.
SetDebugEventListener(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
@@ -5468,7 +5501,7 @@ TEST(NoDebugBreakInAfterCompileEventListener) {
// Set the debug break flag again.
v8::debug::DebugBreak(env->GetIsolate());
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// There should be one more break event when the script is evaluated in 'f'.
CHECK_EQ(2, break_point_hit_count);
@@ -5502,7 +5535,7 @@ TEST(DebugBreakFunctionApply) {
// where this test would enter an infinite loop.
break_point_hit_count = 0;
max_break_point_hit_count = 10000; // 10000 => infinite loop.
- foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// When keeping the debug break several break will happen.
CHECK_GT(break_point_hit_count, 1);
@@ -5585,7 +5618,7 @@ TEST(CallingContextIsNotDebugContext) {
"foo");
break_point_hit_count = 0;
- foo->Call(debugee_context, env->Global(), 0, NULL).ToLocalChecked();
+ foo->Call(debugee_context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
SetDebugEventListener(isolate, nullptr);
@@ -5681,11 +5714,13 @@ TEST(DeoptimizeDuringDebugBreak) {
// Compile and run function bar which will optimize it for some flag settings.
v8::Local<v8::Function> f = CompileFunction(&env, "function bar(){}", "bar");
- f->Call(context, v8::Undefined(env->GetIsolate()), 0, NULL).ToLocalChecked();
+ f->Call(context, v8::Undefined(env->GetIsolate()), 0, nullptr)
+ .ToLocalChecked();
// Set debug break and call bar again.
v8::debug::DebugBreak(env->GetIsolate());
- f->Call(context, v8::Undefined(env->GetIsolate()), 0, NULL).ToLocalChecked();
+ f->Call(context, v8::Undefined(env->GetIsolate()), 0, nullptr)
+ .ToLocalChecked();
CHECK(debug_event_break_deoptimize_done);
@@ -5806,7 +5841,7 @@ static void TestDebugBreakInLoop(const char* loop_head,
// Receive 10 breaks for each test and then terminate JavaScript execution.
static const int kBreaksPerTest = 10;
- for (int i = 0; loop_bodies[i] != NULL; i++) {
+ for (int i = 0; loop_bodies[i] != nullptr; i++) {
// Perform a lazy deoptimization after various numbers of breaks
// have been hit.
@@ -5841,15 +5876,13 @@ static void TestDebugBreakInLoop(const char* loop_head,
}
}
-
static const char* loop_bodies_1[] = {"",
"g()",
"if (a == 0) { g() }",
"if (a == 1) { g() }",
"if (a == 0) { g() } else { h() }",
"if (a == 0) { continue }",
- NULL};
-
+ nullptr};
static const char* loop_bodies_2[] = {
"if (a == 1) { continue }",
@@ -5857,8 +5890,7 @@ static const char* loop_bodies_2[] = {
"switch (a) { case 1: continue; }",
"switch (a) { case 1: g(); break; default: h() }",
"switch (a) { case 1: continue; break; default: h() }",
- NULL};
-
+ nullptr};
void DebugBreakLoop(const char* loop_header, const char** loop_bodies,
const char* loop_footer) {
@@ -6354,7 +6386,7 @@ static void NoInterruptsOnDebugEvent(
if (event_details.GetEvent() != v8::AfterCompile) return;
++after_compile_handler_depth;
// Do not allow nested AfterCompile events.
- CHECK(after_compile_handler_depth <= 1);
+ CHECK_LE(after_compile_handler_depth, 1);
v8::Isolate* isolate = event_details.GetEventContext()->GetIsolate();
v8::Isolate::AllowJavascriptExecutionScope allow_script(isolate);
isolate->RequestInterrupt(&HandleInterrupt, nullptr);
@@ -6618,15 +6650,20 @@ TEST(DebugCoverageWithScriptDataOutOfScope) {
TEST(BuiltinsExceptionPrediction) {
v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* iisolate = CcTest::i_isolate();
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate);
- i::Builtins* builtins = CcTest::i_isolate()->builtins();
+ i::Builtins* builtins = iisolate->builtins();
bool fail = false;
for (int i = 0; i < i::Builtins::builtin_count; i++) {
Code* builtin = builtins->builtin(i);
if (builtin->kind() != Code::BUILTIN) continue;
+ if (builtin->builtin_index() == i::Builtins::kDeserializeLazy &&
+ i::Builtins::IsLazy(i)) {
+ builtin = i::Snapshot::DeserializeBuiltin(iisolate, i);
+ }
auto prediction = builtin->GetBuiltinCatchPrediction();
USE(prediction);
@@ -6648,7 +6685,7 @@ TEST(DebugGetPossibleBreakpointsReturnLocations) {
CompileRun(source);
v8::PersistentValueVector<v8::debug::Script> scripts(isolate);
v8::debug::GetLoadedScripts(isolate, scripts);
- CHECK(scripts.Size() == 1);
+ CHECK_EQ(scripts.Size(), 1);
std::vector<v8::debug::BreakLocation> locations;
CHECK(scripts.Get(0)->GetPossibleBreakpoints(
v8::debug::Location(0, 17), v8::debug::Location(), true, &locations));
@@ -6660,7 +6697,7 @@ TEST(DebugGetPossibleBreakpointsReturnLocations) {
}
// With Ignition we generate one return location per return statement,
// each has line = 5, column = 0 as statement position.
- CHECK(returns_count == 4);
+ CHECK_EQ(returns_count, 4);
}
TEST(DebugEvaluateNoSideEffect) {
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 88d0ee3dcd..f0e8080275 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -595,15 +595,17 @@ TEST(CrossScriptReferencesHarmony) {
HandleScope scope(isolate);
// Check that simple cross-script global scope access works.
- const char* decs[] = {
- "'use strict'; var x = 1; x", "x",
- "'use strict'; function x() { return 1 }; x()", "x()",
- "'use strict'; let x = 1; x", "x",
- "'use strict'; const x = 1; x", "x",
- NULL
- };
-
- for (int i = 0; decs[i] != NULL; i += 2) {
+ const char* decs[] = {"'use strict'; var x = 1; x",
+ "x",
+ "'use strict'; function x() { return 1 }; x()",
+ "x()",
+ "'use strict'; let x = 1; x",
+ "x",
+ "'use strict'; const x = 1; x",
+ "x",
+ nullptr};
+
+ for (int i = 0; decs[i] != nullptr; i += 2) {
SimpleContext context;
context.Check(decs[i], EXPECT_RESULT, Number::New(isolate, 1));
context.Check(decs[i+1], EXPECT_RESULT, Number::New(isolate, 1));
@@ -779,23 +781,13 @@ TEST(CrossScriptConflicts) {
HandleScope scope(CcTest::isolate());
- const char* firsts[] = {
- "var x = 1; x",
- "function x() { return 1 }; x()",
- "let x = 1; x",
- "const x = 1; x",
- NULL
- };
- const char* seconds[] = {
- "var x = 2; x",
- "function x() { return 2 }; x()",
- "let x = 2; x",
- "const x = 2; x",
- NULL
- };
+ const char* firsts[] = {"var x = 1; x", "function x() { return 1 }; x()",
+ "let x = 1; x", "const x = 1; x", nullptr};
+ const char* seconds[] = {"var x = 2; x", "function x() { return 2 }; x()",
+ "let x = 2; x", "const x = 2; x", nullptr};
- for (int i = 0; firsts[i] != NULL; ++i) {
- for (int j = 0; seconds[j] != NULL; ++j) {
+ for (int i = 0; firsts[i] != nullptr; ++i) {
+ for (int j = 0; seconds[j] != nullptr; ++j) {
SimpleContext context;
context.Check(firsts[i], EXPECT_RESULT,
Number::New(CcTest::isolate(), 1));
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index c8c9daa0e2..253daefa6c 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -73,6 +73,12 @@ bool DisassembleAndCompare(byte* begin, S... expected_strings) {
}
}
+ // Fail after printing expected disassembly if we expected a different number
+ // of instructions.
+ if (disassembly.size() != expected_disassembly.size()) {
+ return false;
+ }
+
return test_passed;
}
@@ -1579,5 +1585,35 @@ TEST(LoadStoreExclusive) {
VERIFY_RUN();
}
+TEST(SplitAddImmediate) {
+ SET_UP();
+
+ // Re-use the destination as a scratch.
+ COMPARE(add(r0, r1, Operand(0x12345678)),
+ "e3050678 movw r0, #22136",
+ "e3410234 movt r0, #4660",
+ "e0810000 add r0, r1, r0");
+
+ // Use ip as a scratch.
+ COMPARE(add(r0, r0, Operand(0x12345678)),
+ "e305c678 movw ip, #22136",
+ "e341c234 movt ip, #4660",
+ "e080000c add r0, r0, ip");
+
+ // If ip is not available, split the operation into multiple additions.
+ {
+ UseScratchRegisterScope temps(&assm);
+ Register reserved = temps.Acquire();
+ USE(reserved);
+ COMPARE(add(r2, r2, Operand(0x12345678)),
+ "e2822f9e add r2, r2, #632",
+ "e2822b15 add r2, r2, #21504",
+ "e282278d add r2, r2, #36962304",
+ "e2822201 add r2, r2, #268435456");
+ }
+
+ VERIFY_RUN();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index efebac208c..a3823518fc 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -77,7 +77,7 @@ namespace internal {
#define COMPARE(ASM, EXP) \
assm->Reset(); \
assm->ASM; \
- assm->GetCode(isolate, NULL); \
+ assm->GetCode(isolate, nullptr); \
decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
encoding = *reinterpret_cast<uint32_t*>(buf); \
if (strcmp(disasm->GetOutput(), EXP) != 0) { \
@@ -89,7 +89,7 @@ namespace internal {
#define COMPARE_PREFIX(ASM, EXP) \
assm->Reset(); \
assm->ASM; \
- assm->GetCode(isolate, NULL); \
+ assm->GetCode(isolate, nullptr); \
decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
encoding = *reinterpret_cast<uint32_t*>(buf); \
if (strncmp(disasm->GetOutput(), EXP, strlen(EXP)) != 0) { \
@@ -1490,9 +1490,9 @@ TEST_(load_store_acquire_release) {
COMPARE(ldaxrb(wzr, csp), "ldaxrb wzr, [csp]");
COMPARE(ldaxrh(wzr, csp), "ldaxrh wzr, [csp]");
COMPARE(ldaxr(wzr, csp), "ldaxr wzr, [csp]");
- COMPARE(stlxrb(wzr, wzr, csp), "stlxrb wzr, wzr, [csp]");
- COMPARE(stlxrh(wzr, wzr, csp), "stlxrh wzr, wzr, [csp]");
- COMPARE(stlxr(wzr, wzr, csp), "stlxr wzr, wzr, [csp]");
+ COMPARE(stlxrb(w0, wzr, csp), "stlxrb w0, wzr, [csp]");
+ COMPARE(stlxrh(wzr, w1, csp), "stlxrh wzr, w1, [csp]");
+ COMPARE(stlxr(w2, wzr, csp), "stlxr w2, wzr, [csp]");
CLEANUP();
}
@@ -1879,7 +1879,7 @@ TEST_(system_nop) {
TEST_(debug) {
SET_UP_ASM();
- CHECK(kImmExceptionIsDebug == 0xdeb0);
+ CHECK_EQ(kImmExceptionIsDebug, 0xdeb0);
// All debug codes should produce the same instruction, and the debug code
// can be any uint32_t.
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 84940c51b7..66716e9d44 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -53,7 +53,7 @@ TEST(DisasmIa320) {
HandleScope scope(isolate);
v8::internal::byte buffer[8192];
Assembler assm(isolate, buffer, sizeof buffer);
- DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
+ DummyStaticFunction(nullptr); // just bloody use it (DELETE; debugging)
// Short immediate instructions
__ adc(eax, 12345678);
__ add(eax, Immediate(12345678));
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 0c7ebbc1a7..10608f1877 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -54,7 +54,7 @@ TEST(DisasmX64) {
HandleScope scope(isolate);
v8::internal::byte buffer[8192];
Assembler assm(isolate, buffer, sizeof buffer);
- DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
+ DummyStaticFunction(nullptr); // just bloody use it (DELETE; debugging)
// Short immediate instructions
__ addq(rax, Immediate(12345678));
diff --git a/deps/v8/test/cctest/test-diy-fp.cc b/deps/v8/test/cctest/test-diy-fp.cc
index 50c2621a56..4c597883f1 100644
--- a/deps/v8/test/cctest/test-diy-fp.cc
+++ b/deps/v8/test/cctest/test-diy-fp.cc
@@ -41,10 +41,10 @@ TEST(Subtract) {
DiyFp diy_fp2 = DiyFp(1, 0);
DiyFp diff = DiyFp::Minus(diy_fp1, diy_fp2);
- CHECK(2 == diff.f()); // NOLINT
+ CHECK_EQ(2, diff.f());
CHECK_EQ(0, diff.e());
diy_fp1.Subtract(diy_fp2);
- CHECK(2 == diy_fp1.f()); // NOLINT
+ CHECK_EQ(2, diy_fp1.f());
CHECK_EQ(0, diy_fp1.e());
}
@@ -54,29 +54,29 @@ TEST(Multiply) {
DiyFp diy_fp2 = DiyFp(2, 0);
DiyFp product = DiyFp::Times(diy_fp1, diy_fp2);
- CHECK(0 == product.f()); // NOLINT
+ CHECK_EQ(0, product.f());
CHECK_EQ(64, product.e());
diy_fp1.Multiply(diy_fp2);
- CHECK(0 == diy_fp1.f()); // NOLINT
+ CHECK_EQ(0, diy_fp1.f());
CHECK_EQ(64, diy_fp1.e());
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x80000000, 00000000), 11);
diy_fp2 = DiyFp(2, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
- CHECK(1 == product.f()); // NOLINT
+ CHECK_EQ(1, product.f());
CHECK_EQ(11 + 13 + 64, product.e());
// Test rounding.
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x80000000, 00000001), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
- CHECK(1 == product.f()); // NOLINT
+ CHECK_EQ(1, product.f());
CHECK_EQ(11 + 13 + 64, product.e());
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x7fffffff, ffffffff), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
- CHECK(0 == product.f()); // NOLINT
+ CHECK_EQ(0, product.f());
CHECK_EQ(11 + 13 + 64, product.e());
// Halfway cases are allowed to round either way. So don't check for it.
diff --git a/deps/v8/test/cctest/test-double.cc b/deps/v8/test/cctest/test-double.cc
index d0d8bf1ef2..81a06bf997 100644
--- a/deps/v8/test/cctest/test-double.cc
+++ b/deps/v8/test/cctest/test-double.cc
@@ -61,7 +61,7 @@ TEST(AsDiyFp) {
diy_fp = Double(min_double64).AsDiyFp();
CHECK_EQ(-0x3FF - 52 + 1, diy_fp.e());
// This is a denormal; so no hidden bit.
- CHECK(1 == diy_fp.f()); // NOLINT
+ CHECK_EQ(1, diy_fp.f());
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsDiyFp();
diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc
index b1d86e6367..ae06f6db47 100644
--- a/deps/v8/test/cctest/test-elements-kind.cc
+++ b/deps/v8/test/cctest/test-elements-kind.cc
@@ -75,7 +75,8 @@ TEST(JSObjectAddingProperties) {
Handle<FixedArray> empty_fixed_array(factory->empty_fixed_array());
Handle<PropertyArray> empty_property_array(factory->empty_property_array());
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
Handle<Object> value(Smi::FromInt(42), isolate);
Handle<JSObject> object = factory->NewJSObject(function);
@@ -104,7 +105,8 @@ TEST(JSObjectInObjectAddingProperties) {
Handle<FixedArray> empty_fixed_array(factory->empty_fixed_array());
Handle<PropertyArray> empty_property_array(factory->empty_property_array());
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
int nof_inobject_properties = 10;
// force in object properties by changing the expected_nof_properties
function->shared()->set_expected_nof_properties(nof_inobject_properties);
@@ -151,7 +153,8 @@ TEST(JSObjectAddingElements) {
Handle<String> name;
Handle<FixedArray> empty_fixed_array(factory->empty_fixed_array());
Handle<PropertyArray> empty_property_array(factory->empty_property_array());
- Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->empty_string());
Handle<Object> value(Smi::FromInt(42), isolate);
Handle<JSObject> object = factory->NewJSObject(function);
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 119ca4c150..1dd99c5362 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -574,9 +574,9 @@ TEST(ReferenceContextAllocatesNoSlots) {
CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kStoreNamedStrict);
CHECK_SLOT_KIND(helper, 2, FeedbackSlotKind::kStoreNamedStrict);
CHECK_SLOT_KIND(helper, 3, FeedbackSlotKind::kStoreNamedStrict);
- CHECK_SLOT_KIND(helper, 4, FeedbackSlotKind::kLoadProperty);
+ CHECK_SLOT_KIND(helper, 4, FeedbackSlotKind::kBinaryOp);
CHECK_SLOT_KIND(helper, 5, FeedbackSlotKind::kLoadProperty);
- CHECK_SLOT_KIND(helper, 6, FeedbackSlotKind::kBinaryOp);
+ CHECK_SLOT_KIND(helper, 6, FeedbackSlotKind::kLoadProperty);
}
}
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 89845e9bf5..9622da53b8 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -66,11 +66,11 @@ static Handle<AccessorPair> CreateAccessorPair(bool with_getter,
Handle<AccessorPair> pair = factory->NewAccessorPair();
Handle<String> empty_string = factory->empty_string();
if (with_getter) {
- Handle<JSFunction> func = factory->NewFunction(empty_string);
+ Handle<JSFunction> func = factory->NewFunctionForTest(empty_string);
pair->set_getter(*func);
}
if (with_setter) {
- Handle<JSFunction> func = factory->NewFunction(empty_string);
+ Handle<JSFunction> func = factory->NewFunctionForTest(empty_string);
pair->set_setter(*func);
}
return pair;
@@ -401,7 +401,7 @@ class Expectations {
Handle<String> name = MakeName("prop", property_index);
Map* target =
TransitionsAccessor(map).SearchTransition(*name, kData, attributes);
- CHECK(target != NULL);
+ CHECK_NOT_NULL(target);
return handle(target);
}
@@ -609,7 +609,7 @@ static void TestGeneralizeField(int detach_property_at_index,
CHECK(detach_property_at_index >= -1 &&
detach_property_at_index < kPropCount);
- CHECK(property_index < kPropCount);
+ CHECK_LT(property_index, kPropCount);
CHECK_NE(detach_property_at_index, property_index);
const bool is_detached_map = detach_property_at_index >= 0;
@@ -1405,7 +1405,7 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
Expectations expectations(isolate);
const int kSplitProp = 2;
- CHECK(kSplitProp < kCustomPropIndex);
+ CHECK_LT(kSplitProp, kCustomPropIndex);
const PropertyConstness constness = kMutable;
const Representation representation = Representation::Smi();
@@ -1484,7 +1484,7 @@ TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
TestConfig() {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- js_func_ = factory->NewFunction(factory->empty_string());
+ js_func_ = factory->NewFunctionForTest(factory->empty_string());
}
Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
@@ -1570,7 +1570,7 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToAccConstantAfterTargetMap) {
TestConfig() {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- js_func_ = factory->NewFunction(factory->empty_string());
+ js_func_ = factory->NewFunctionForTest(factory->empty_string());
pair_ = CreateAccessorPair(true, true);
}
@@ -2121,7 +2121,7 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
Handle<String> name = MakeName("prop", i);
Map* target =
TransitionsAccessor(map2).SearchTransition(*name, kData, NONE);
- CHECK(target != NULL);
+ CHECK_NOT_NULL(target);
map2 = handle(target);
}
@@ -2643,7 +2643,8 @@ TEST(TransitionDataConstantToSameDataConstant) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Handle<JSFunction> js_func = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> js_func =
+ factory->NewFunctionForTest(factory->empty_string());
TransitionToDataConstantOperator transition_op(js_func);
SameMapChecker checker;
@@ -2690,7 +2691,8 @@ TEST(TransitionDataConstantToDataField) {
Factory* factory = isolate->factory();
Handle<FieldType> any_type = FieldType::Any(isolate);
- Handle<JSFunction> js_func1 = factory->NewFunction(factory->empty_string());
+ Handle<JSFunction> js_func1 =
+ factory->NewFunctionForTest(factory->empty_string());
TransitionToDataConstantOperator transition_op1(js_func1);
Handle<Object> value2 = isolate->factory()->NewHeapNumber(0);
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index d3e229530f..f4a3072763 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -36,6 +36,102 @@
namespace v8 {
namespace internal {
+namespace {
+
+void SimpleCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_num(0));
+}
+
+struct FlagAndPersistent {
+ bool flag;
+ v8::Global<v8::Object> handle;
+};
+
+void ResetHandleAndSetFlag(
+ const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+ data.GetParameter()->handle.Reset();
+ data.GetParameter()->flag = true;
+}
+
+using ConstructFunction = void (*)(v8::Isolate* isolate,
+ v8::Local<v8::Context> context,
+ FlagAndPersistent* flag_and_persistent);
+
+void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ FlagAndPersistent* flag_and_persistent) {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Object> object(v8::Object::New(isolate));
+ CHECK(!object.IsEmpty());
+ flag_and_persistent->handle.Reset(isolate, object);
+ CHECK(!flag_and_persistent->handle.IsEmpty());
+}
+
+void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ FlagAndPersistent* flag_and_persistent) {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::FunctionTemplate> fun =
+ v8::FunctionTemplate::New(isolate, SimpleCallback);
+ v8::Local<v8::Object> object = fun->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
+ CHECK(!object.IsEmpty());
+ flag_and_persistent->handle.Reset(isolate, object);
+ CHECK(!flag_and_persistent->handle.IsEmpty());
+}
+
+enum class SurvivalMode { kSurvives, kDies };
+
+template <typename ModifierFunction, typename GCFunction>
+void WeakHandleTest(v8::Isolate* isolate, ConstructFunction construct_function,
+ ModifierFunction modifier_function, GCFunction gc_function,
+ SurvivalMode survives) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ FlagAndPersistent fp;
+ construct_function(isolate, context, &fp);
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> tmp = v8::Local<v8::Object>::New(isolate, fp.handle);
+ CHECK(
+ CcTest::i_isolate()->heap()->InNewSpace(*v8::Utils::OpenHandle(*tmp)));
+ }
+
+ fp.handle.SetWeak(&fp, &ResetHandleAndSetFlag,
+ v8::WeakCallbackType::kParameter);
+ fp.flag = false;
+ modifier_function(&fp);
+ gc_function();
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !fp.flag);
+ CHECK_IMPLIES(survives == SurvivalMode::kDies, fp.flag);
+}
+
+void ResurrectingFinalizer(
+ const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
+ data.GetParameter()->ClearWeak();
+}
+
+void ResettingFinalizer(
+ const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
+ data.GetParameter()->Reset();
+}
+
+void EmptyWeakCallback(const v8::WeakCallbackInfo<void>& data) {}
+
+void ResurrectingFinalizerSettingProperty(
+ const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
+ data.GetParameter()->ClearWeak();
+ v8::Local<v8::Object> o =
+ v8::Local<v8::Object>::New(data.GetIsolate(), *data.GetParameter());
+ o->Set(data.GetIsolate()->GetCurrentContext(), v8_str("finalizer"),
+ v8_str("was here"))
+ .FromJust();
+}
+
+} // namespace
+
TEST(EternalHandles) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -117,10 +213,6 @@ TEST(PersistentBaseGetLocal) {
CHECK(v8::Local<v8::Object>::New(isolate, g) == g.Get(isolate));
}
-
-void WeakCallback(const v8::WeakCallbackInfo<void>& data) {}
-
-
TEST(WeakPersistentSmi) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@@ -130,16 +222,8 @@ TEST(WeakPersistentSmi) {
v8::Global<v8::Number> g(isolate, n);
// Should not crash.
- g.SetWeak<void>(nullptr, &WeakCallback, v8::WeakCallbackType::kParameter);
-}
-
-void finalizer(const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
- data.GetParameter()->ClearWeak();
- v8::Local<v8::Object> o =
- v8::Local<v8::Object>::New(data.GetIsolate(), *data.GetParameter());
- o->Set(data.GetIsolate()->GetCurrentContext(), v8_str("finalizer"),
- v8_str("was here"))
- .FromJust();
+ g.SetWeak<void>(nullptr, &EmptyWeakCallback,
+ v8::WeakCallbackType::kParameter);
}
TEST(FinalizerWeakness) {
@@ -154,7 +238,8 @@ TEST(FinalizerWeakness) {
v8::Local<v8::Object> o = v8::Object::New(isolate);
identity = o->GetIdentityHash();
g.Reset(isolate, o);
- g.SetWeak(&g, finalizer, v8::WeakCallbackType::kFinalizer);
+ g.SetWeak(&g, &ResurrectingFinalizerSettingProperty,
+ v8::WeakCallbackType::kFinalizer);
}
CcTest::CollectAllAvailableGarbage();
@@ -185,41 +270,218 @@ TEST(PhatomHandlesWithoutCallbacks) {
CHECK_EQ(0u, isolate->NumberOfPhantomHandleResetsSinceLastCall());
}
+TEST(WeakHandleToUnmodifiedJSObjectSurvivesScavenge) {
+ CcTest::InitializeVM();
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSObject, [](FlagAndPersistent* fp) {},
+ []() { CcTest::CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kSurvives);
+}
+
+TEST(WeakHandleToUnmodifiedJSObjectDiesOnMarkCompact) {
+ CcTest::InitializeVM();
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSObject, [](FlagAndPersistent* fp) {},
+ []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kDies);
+}
+
+TEST(WeakHandleToUnmodifiedJSObjectSurvivesMarkCompactWhenInHandle) {
+ CcTest::InitializeVM();
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSObject,
+ [](FlagAndPersistent* fp) {
+ v8::Local<v8::Object> handle =
+ v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
+ USE(handle);
+ },
+ []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kSurvives);
+}
+
+TEST(WeakHandleToUnmodifiedJSApiObjectDiesOnScavenge) {
+ CcTest::InitializeVM();
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSApiObject, [](FlagAndPersistent* fp) {},
+ []() { CcTest::CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kDies);
+}
+
+TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesScavengeWhenInHandle) {
+ CcTest::InitializeVM();
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSApiObject,
+ [](FlagAndPersistent* fp) {
+ v8::Local<v8::Object> handle =
+ v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
+ USE(handle);
+ },
+ []() { CcTest::CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kSurvives);
+}
+
+TEST(WeakHandleToUnmodifiedJSApiObjectDiesOnMarkCompact) {
+ CcTest::InitializeVM();
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSApiObject, [](FlagAndPersistent* fp) {},
+ []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kDies);
+}
+
+TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
+ CcTest::InitializeVM();
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSApiObject,
+ [](FlagAndPersistent* fp) {
+ v8::Local<v8::Object> handle =
+ v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
+ USE(handle);
+ },
+ []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kSurvives);
+}
+
+TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesScavenge) {
+ CcTest::InitializeVM();
+ WeakHandleTest(CcTest::isolate(), &ConstructJSApiObject,
+ [](FlagAndPersistent* fp) { fp->handle.MarkActive(); },
+ []() { CcTest::CollectGarbage(i::NEW_SPACE); },
+ SurvivalMode::kSurvives);
+}
+
+TEST(WeakHandleToActiveUnmodifiedJSApiObjectDiesOnMarkCompact) {
+ CcTest::InitializeVM();
+ WeakHandleTest(CcTest::isolate(), &ConstructJSApiObject,
+ [](FlagAndPersistent* fp) { fp->handle.MarkActive(); },
+ []() { CcTest::CollectGarbage(i::OLD_SPACE); },
+ SurvivalMode::kDies);
+}
+
+TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
+ CcTest::InitializeVM();
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSApiObject,
+ [](FlagAndPersistent* fp) {
+ fp->handle.MarkActive();
+ v8::Local<v8::Object> handle =
+ v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
+ USE(handle);
+ },
+ []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kSurvives);
+}
+
namespace {
-void ResurrectingFinalizer(
- const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
- data.GetParameter()->ClearWeak();
+void ConstructFinalizerPointingPhantomHandle(
+ v8::Isolate* isolate, v8::Global<v8::Object>* g1,
+ v8::Global<v8::Object>* g2,
+ typename v8::WeakCallbackInfo<v8::Global<v8::Object>>::Callback
+ finalizer_for_g1) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> o1 =
+ v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
+ v8::Local<v8::Object> o2 =
+ v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
+ o1->Set(isolate->GetCurrentContext(), v8_str("link"), o2).FromJust();
+ g1->Reset(isolate, o1);
+ g2->Reset(isolate, o2);
+ // g1 will be finalized but resurrected.
+ g1->SetWeak(g1, finalizer_for_g1, v8::WeakCallbackType::kFinalizer);
+ // g2 will be a phantom handle that is dependent on the finalizer handle
+ // g1 as it is in its subgraph.
+ g2->SetWeak();
}
} // namespace
-TEST(Regress772299) {
+TEST(FinalizerResurrectsAndKeepsPhantomAliveOnMarkCompact) {
+ // See crbug.com/772299.
CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
-
v8::Global<v8::Object> g1, g2;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> o1 =
- v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
- v8::Local<v8::Object> o2 =
- v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
- o1->Set(isolate->GetCurrentContext(), v8_str("link"), o2).FromJust();
- g1.Reset(isolate, o1);
- g2.Reset(isolate, o2);
- // g1 will be finalized but resurrected.
- g1.SetWeak(&g1, ResurrectingFinalizer, v8::WeakCallbackType::kFinalizer);
- // g2 will be a phantom handle that should not be reset as g1 transitively
- // keeps it alive.
- g2.SetWeak();
- }
-
- CcTest::CollectAllAvailableGarbage();
+ ConstructFinalizerPointingPhantomHandle(CcTest::isolate(), &g1, &g2,
+ ResurrectingFinalizer);
+ CcTest::CollectGarbage(i::OLD_SPACE);
// Both, g1 and g2, should stay alive as the finalizer resurrects the root
// object that transitively keeps the other one alive.
CHECK(!g1.IsEmpty());
CHECK(!g2.IsEmpty());
+ CcTest::CollectGarbage(i::OLD_SPACE);
+ // The finalizer handle is now strong, so it should keep the objects alive.
+ CHECK(!g1.IsEmpty());
+ CHECK(!g2.IsEmpty());
+}
+
+TEST(FinalizerDiesAndKeepsPhantomAliveOnMarkCompact) {
+ CcTest::InitializeVM();
+ v8::Global<v8::Object> g1, g2;
+ ConstructFinalizerPointingPhantomHandle(CcTest::isolate(), &g1, &g2,
+ ResettingFinalizer);
+ CcTest::CollectGarbage(i::OLD_SPACE);
+ // Finalizer (g1) dies but the phantom handle (g2) is kept alive for one
+ // more round as the underlying object only dies on the next GC.
+ CHECK(g1.IsEmpty());
+ CHECK(!g2.IsEmpty());
+ CcTest::CollectGarbage(i::OLD_SPACE);
+ // Phantom handle dies after one more round.
+ CHECK(g1.IsEmpty());
+ CHECK(g2.IsEmpty());
+}
+
+namespace {
+
+void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
+
+void InvokeMarkSweep() { CcTest::CollectAllGarbage(); }
+
+void ForceScavenge2(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+ data.GetParameter()->flag = true;
+ InvokeScavenge();
+}
+
+void ForceScavenge1(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+ data.GetParameter()->handle.Reset();
+ data.SetSecondPassCallback(ForceScavenge2);
+}
+
+void ForceMarkSweep2(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+ data.GetParameter()->flag = true;
+ InvokeMarkSweep();
+}
+
+void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+ data.GetParameter()->handle.Reset();
+ data.SetSecondPassCallback(ForceMarkSweep2);
+}
+
+} // namespace
+
+TEST(GCFromWeakCallbacks) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Locker locker(CcTest::isolate());
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ static const int kNumberOfGCTypes = 2;
+ typedef v8::WeakCallbackInfo<FlagAndPersistent>::Callback Callback;
+ Callback gc_forcing_callback[kNumberOfGCTypes] = {&ForceScavenge1,
+ &ForceMarkSweep1};
+
+ typedef void (*GCInvoker)();
+ GCInvoker invoke_gc[kNumberOfGCTypes] = {&InvokeScavenge, &InvokeMarkSweep};
+
+ for (int outer_gc = 0; outer_gc < kNumberOfGCTypes; outer_gc++) {
+ for (int inner_gc = 0; inner_gc < kNumberOfGCTypes; inner_gc++) {
+ FlagAndPersistent fp;
+ ConstructJSApiObject(isolate, context, &fp);
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> tmp =
+ v8::Local<v8::Object>::New(isolate, fp.handle);
+ CHECK(CcTest::i_isolate()->heap()->InNewSpace(
+ *v8::Utils::OpenHandle(*tmp)));
+ }
+ fp.flag = false;
+ fp.handle.SetWeak(&fp, gc_forcing_callback[inner_gc],
+ v8::WeakCallbackType::kParameter);
+ invoke_gc[outer_gc]();
+ EmptyMessageQueues(isolate);
+ CHECK(fp.flag);
+ }
+ }
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-hashmap.cc b/deps/v8/test/cctest/test-hashmap.cc
index 163bc09f19..1d72cfaf1c 100644
--- a/deps/v8/test/cctest/test-hashmap.cc
+++ b/deps/v8/test/cctest/test-hashmap.cc
@@ -43,26 +43,26 @@ class IntSet {
explicit IntSet(IntKeyHash hash) : hash_(hash) {}
void Insert(int x) {
- CHECK_NE(0, x); // 0 corresponds to (void*)NULL - illegal key value
+ CHECK_NE(0, x); // 0 corresponds to (void*)nullptr - illegal key value
v8::base::HashMap::Entry* p =
map_.LookupOrInsert(reinterpret_cast<void*>(x), hash_(x));
- CHECK(p != NULL); // insert is set!
+ CHECK_NOT_NULL(p); // insert is set!
CHECK_EQ(reinterpret_cast<void*>(x), p->key);
// we don't care about p->value
}
void Remove(int x) {
- CHECK_NE(0, x); // 0 corresponds to (void*)NULL - illegal key value
+ CHECK_NE(0, x); // 0 corresponds to (void*)nullptr - illegal key value
map_.Remove(reinterpret_cast<void*>(x), hash_(x));
}
bool Present(int x) {
v8::base::HashMap::Entry* p =
map_.Lookup(reinterpret_cast<void*>(x), hash_(x));
- if (p != NULL) {
+ if (p != nullptr) {
CHECK_EQ(reinterpret_cast<void*>(x), p->key);
}
- return p != NULL;
+ return p != nullptr;
}
void Clear() {
@@ -71,7 +71,7 @@ class IntSet {
uint32_t occupancy() const {
uint32_t count = 0;
- for (v8::base::HashMap::Entry* p = map_.Start(); p != NULL;
+ for (v8::base::HashMap::Entry* p = map_.Start(); p != nullptr;
p = map_.Next(p)) {
count++;
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index b089fa8521..ce015777e5 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -117,7 +117,7 @@ static const v8::HeapGraphNode* GetProperty(v8::Isolate* isolate,
if (prop->GetType() == type && strcmp(name, *prop_name) == 0)
return prop->GetToNode();
}
- return NULL;
+ return nullptr;
}
static bool HasString(v8::Isolate* isolate, const v8::HeapGraphNode* node,
@@ -402,6 +402,7 @@ TEST(HeapSnapshotHeapNumbers) {
TEST(HeapSnapshotSlicedString) {
+ if (!i::FLAG_string_slices) return;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -438,7 +439,7 @@ TEST(HeapSnapshotConsString) {
v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(1);
- LocalContext env(NULL, global_template);
+ LocalContext env(nullptr, global_template);
v8::Local<v8::Object> global_proxy = env->Global();
v8::Local<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
CHECK_EQ(1, global->InternalFieldCount());
@@ -558,7 +559,7 @@ TEST(HeapSnapshotWeakCollection) {
++weak_entries;
}
}
- CHECK_EQ(2, weak_entries);
+ CHECK_EQ(1, weak_entries); // Key is the only weak.
const v8::HeapGraphNode* wm_s =
GetProperty(env->GetIsolate(), wm, v8::HeapGraphEdge::kProperty, "str");
CHECK(wm_s);
@@ -674,7 +675,7 @@ TEST(HeapSnapshotInternalReferences) {
v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(2);
- LocalContext env(NULL, global_template);
+ LocalContext env(nullptr, global_template);
v8::Local<v8::Object> global_proxy = env->Global();
v8::Local<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
CHECK_EQ(2, global->InternalFieldCount());
@@ -1101,7 +1102,7 @@ class TestStatsStream : public v8::OutputStream {
static TestStatsStream GetHeapStatsUpdate(
v8::HeapProfiler* heap_profiler,
- v8::SnapshotObjectId* object_id = NULL) {
+ v8::SnapshotObjectId* object_id = nullptr) {
TestStatsStream stream;
int64_t timestamp = -1;
v8::SnapshotObjectId last_seen_id =
@@ -1460,7 +1461,7 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
}
}
CHECK(false);
- return NULL;
+ return nullptr;
}
static std::vector<TestRetainedObjectInfo*> instances;
@@ -1490,7 +1491,7 @@ static const v8::HeapGraphNode* GetNode(const v8::HeapGraphNode* parent,
return node;
}
}
- return NULL;
+ return nullptr;
}
@@ -1635,7 +1636,7 @@ TEST(GlobalObjectName) {
NameResolver name_resolver;
const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(NULL, &name_resolver);
+ heap_profiler->TakeHeapSnapshot(nullptr, &name_resolver);
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK(global);
@@ -2346,7 +2347,7 @@ static const v8::HeapGraphNode* GetNodeByPath(v8::Isolate* isolate,
break;
}
}
- if (i == count) return NULL;
+ if (i == count) return nullptr;
}
return node;
}
@@ -2431,10 +2432,10 @@ static const char* record_trace_tree_source =
static AllocationTraceNode* FindNode(
AllocationTracker* tracker, const Vector<const char*>& names) {
AllocationTraceNode* node = tracker->trace_tree()->root();
- for (int i = 0; node != NULL && i < names.length(); i++) {
+ for (int i = 0; node != nullptr && i < names.length(); i++) {
const char* name = names[i];
const std::vector<AllocationTraceNode*>& children = node->children();
- node = NULL;
+ node = nullptr;
for (AllocationTraceNode* child : children) {
unsigned index = child->function_info_index();
AllocationTracker::FunctionInfo* info =
@@ -2687,7 +2688,7 @@ TEST(ArrayBufferSharedBackingStore) {
CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
void* data = ab_contents.Data();
- CHECK(data != NULL);
+ CHECK_NOT_NULL(data);
v8::Local<v8::ArrayBuffer> ab2 =
v8::ArrayBuffer::New(isolate, data, ab_contents.ByteLength());
CHECK(ab2->IsExternal());
@@ -2907,7 +2908,7 @@ TEST(SamplingHeapProfiler) {
// Sample should be empty if requested before sampling has started.
{
v8::AllocationProfile* profile = heap_profiler->GetAllocationProfile();
- CHECK(profile == nullptr);
+ CHECK_NULL(profile);
}
int count_1024 = 0;
@@ -2935,7 +2936,7 @@ TEST(SamplingHeapProfiler) {
// Samples should get cleared once sampling is stopped.
{
v8::AllocationProfile* profile = heap_profiler->GetAllocationProfile();
- CHECK(profile == nullptr);
+ CHECK_NULL(profile);
}
// Sampling at a higher rate should give us similar numbers of objects.
@@ -3108,7 +3109,7 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
v8::Local<v8::Function>::Cast(CompileRun(source.start()));
// Make sure the function is producing pre-tenured objects.
- auto res = f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
+ auto res = f->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
i::Handle<i::JSObject> o = i::Handle<i::JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(o->elements()));
@@ -3116,8 +3117,8 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
// Call the function and profile it.
heap_profiler->StartSamplingHeapProfiler(64);
- for (int i = 0; i < 100; ++i) {
- f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
+ for (int i = 0; i < 80; ++i) {
+ f->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
}
std::unique_ptr<v8::AllocationProfile> profile(
@@ -3135,5 +3136,5 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
count += allocation.count;
}
- CHECK_GE(count, 9000);
+ CHECK_GE(count, 8000);
}
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 9586d72456..48ec9e18cd 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -102,7 +102,7 @@ bool IsObjectShrinkable(JSObject* obj) {
CcTest::i_isolate()->factory()->one_pointer_filler_map();
int inobject_properties = obj->map()->GetInObjectProperties();
- int unused = obj->map()->unused_property_fields();
+ int unused = obj->map()->UnusedPropertyFields();
if (unused == 0) return false;
for (int i = inobject_properties - unused; i < inobject_properties; i++) {
@@ -227,13 +227,13 @@ TEST(JSObjectComplex) {
CHECK(!IsObjectShrinkable(*obj5));
CHECK_EQ(5, obj1->map()->GetInObjectProperties());
- CHECK_EQ(4, obj1->map()->unused_property_fields());
+ CHECK_EQ(4, obj1->map()->UnusedPropertyFields());
CHECK_EQ(5, obj3->map()->GetInObjectProperties());
- CHECK_EQ(2, obj3->map()->unused_property_fields());
+ CHECK_EQ(2, obj3->map()->UnusedPropertyFields());
CHECK_EQ(5, obj5->map()->GetInObjectProperties());
- CHECK_EQ(0, obj5->map()->unused_property_fields());
+ CHECK_EQ(0, obj5->map()->UnusedPropertyFields());
// Since slack tracking is complete, the new objects should not be shrinkable.
obj1 = CompileRunI<JSObject>("new A(1);");
@@ -596,6 +596,10 @@ static void TestClassHierarchy(const std::vector<int>& hierarchy_desc, int n) {
Handle<JSObject> tmp = RunI<JSObject>(new_script);
CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
+ if (!initial_map->IsInobjectSlackTrackingInProgress()) {
+ // Turbofan can force completion of in-object slack tracking.
+ break;
+ }
CHECK_EQ(Map::kSlackTrackingCounterStart - i - 1,
initial_map->construction_counter());
}
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index ff4dae3a52..809ef81b0a 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -61,10 +61,13 @@ class StringCompareInput : public Comparator::Input {
class DiffChunkStruct : public ZoneObject {
public:
- DiffChunkStruct(int pos1_param, int pos2_param,
- int len1_param, int len2_param)
- : pos1(pos1_param), pos2(pos2_param),
- len1(len1_param), len2(len2_param), next(NULL) {}
+ DiffChunkStruct(int pos1_param, int pos2_param, int len1_param,
+ int len2_param)
+ : pos1(pos1_param),
+ pos2(pos2_param),
+ len1(len1_param),
+ len2(len2_param),
+ next(nullptr) {}
int pos1;
int pos2;
int len1;
@@ -78,7 +81,7 @@ class ListDiffOutputWriter : public Comparator::Output {
explicit ListDiffOutputWriter(DiffChunkStruct** next_chunk_pointer,
Zone* zone)
: next_chunk_pointer_(next_chunk_pointer), zone_(zone) {
- (*next_chunk_pointer_) = NULL;
+ (*next_chunk_pointer_) = nullptr;
}
void AddChunk(int pos1, int pos2, int len1, int len2) {
current_chunk_ = new(zone_) DiffChunkStruct(pos1, pos2, len1, len2);
@@ -112,9 +115,8 @@ void CompareStringsOneWay(const char* s1, const char* s2,
int diff_parameter = 0;
- for (DiffChunkStruct* chunk = first_chunk;
- chunk != NULL;
- chunk = chunk->next) {
+ for (DiffChunkStruct* chunk = first_chunk; chunk != nullptr;
+ chunk = chunk->next) {
int diff_pos1 = chunk->pos1;
int similar_part_length = diff_pos1 - pos1;
int diff_pos2 = pos2 + similar_part_length;
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 23622499b7..9c25f3ce3e 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -31,7 +31,7 @@
#include "include/v8-profiler.h"
#include "src/api.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/disassembler.h"
#include "src/isolate.h"
#include "src/log.h"
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 03d90b5012..05ae2e8fcd 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -34,6 +34,7 @@
#include <cmath>
#endif // __linux__
+#include <unordered_set>
#include "src/api.h"
#include "src/log-utils.h"
#include "src/log.h"
@@ -63,13 +64,43 @@ namespace {
i::FLAG_logfile = i::Log::kLogToTemporaryFile; \
i::FLAG_logfile_per_isolate = false
+static const char* StrNStr(const char* s1, const char* s2, size_t n) {
+ CHECK_EQ(s1[n], '\0');
+ return strstr(s1, s2);
+}
+
+// Look for a log line which starts with {prefix} and ends with {suffix}.
+static const char* FindLogLine(const char* start, const char* end,
+ const char* prefix,
+ const char* suffix = nullptr) {
+ CHECK_LT(start, end);
+ CHECK_EQ(end[0], '\0');
+ size_t prefixLength = strlen(prefix);
+ // Loop through the input until we find /{prefix}[^\n]+{suffix}/.
+ while (start < end) {
+ const char* prefixResult = strstr(start, prefix);
+ if (!prefixResult) return NULL;
+ if (suffix == nullptr) return prefixResult;
+ const char* suffixResult =
+ StrNStr(prefixResult, suffix, (end - prefixResult));
+ if (!suffixResult) return NULL;
+ // Check that there are no newlines in between the {prefix} and the {suffix}
+ // results.
+ const char* newlineResult =
+ StrNStr(prefixResult, "\n", (end - prefixResult));
+ if (!newlineResult) return prefixResult;
+ if (newlineResult > suffixResult) return prefixResult;
+ start = prefixResult + prefixLength;
+ }
+ return NULL;
+}
class ScopedLoggerInitializer {
public:
ScopedLoggerInitializer(bool saved_log, bool saved_prof, v8::Isolate* isolate)
: saved_log_(saved_log),
saved_prof_(saved_prof),
- temp_file_(NULL),
+ temp_file_(nullptr),
isolate_(isolate),
isolate_scope_(isolate),
scope_(isolate),
@@ -81,9 +112,10 @@ class ScopedLoggerInitializer {
~ScopedLoggerInitializer() {
env_->Exit();
logger_->TearDown();
- if (temp_file_ != NULL) fclose(temp_file_);
+ if (temp_file_ != nullptr) fclose(temp_file_);
i::FLAG_prof = saved_prof_;
i::FLAG_log = saved_log_;
+ log_.Dispose();
}
v8::Local<v8::Context>& env() { return env_; }
@@ -92,6 +124,112 @@ class ScopedLoggerInitializer {
Logger* logger() { return logger_; }
+ void PrintLog(int nofLines = 0) {
+ if (nofLines <= 0) {
+ printf("%s", log_.start());
+ return;
+ }
+ // Try to print the last {nofLines} of the log.
+ const char* start = log_.start();
+ const char* current = log_.end();
+ while (current > start && nofLines > 0) {
+ current--;
+ if (*current == '\n') nofLines--;
+ }
+ printf(
+ "======================================================\n"
+ "Last log lines:\n...%s\n"
+ "======================================================\n",
+ current);
+ }
+
+ v8::Local<v8::String> GetLogString() {
+ return v8::String::NewFromUtf8(isolate_, log_.start(),
+ v8::NewStringType::kNormal, log_.length())
+ .ToLocalChecked();
+ }
+
+ void StopLogging() {
+ bool exists = false;
+ log_ = i::ReadFile(StopLoggingGetTempFile(), &exists, true);
+ CHECK(exists);
+ }
+
+ const char* FindLine(const char* prefix, const char* suffix = nullptr,
+ const char* start = nullptr) {
+ // Make sure that StopLogging() has been called before.
+ CHECK(log_.size());
+ if (start == nullptr) start = log_.start();
+ const char* end = log_.start() + log_.length();
+ return FindLogLine(start, end, prefix, suffix);
+ }
+
+ // Find all log lines specified by the {prefix, suffix} pairs and ensure they
+ // occurr in the specified order.
+ void FindLogLines(const char* pairs[][2], size_t limit,
+ const char* start = nullptr) {
+ const char* prefix = pairs[0][0];
+ const char* suffix = pairs[0][1];
+ const char* last_position = FindLine(prefix, suffix, start);
+ if (last_position == nullptr) {
+ PrintLog(50);
+ V8_Fatal(__FILE__, __LINE__, "Could not find log line: %s ... %s", prefix,
+ suffix);
+ }
+ CHECK(last_position);
+ for (size_t i = 1; i < limit; i++) {
+ prefix = pairs[i][0];
+ suffix = pairs[i][1];
+ const char* position = FindLine(prefix, suffix, start);
+ if (position == nullptr) {
+ PrintLog(50);
+ V8_Fatal(__FILE__, __LINE__, "Could not find log line: %s ... %s",
+ prefix, suffix);
+ }
+ // Check that all string positions are in order.
+ if (position <= last_position) {
+ PrintLog(50);
+ V8_Fatal(__FILE__, __LINE__,
+ "Log statements not in expected order (prev=%p, current=%p): "
+ "%s ... %s",
+ reinterpret_cast<const void*>(last_position),
+ reinterpret_cast<const void*>(position), prefix, suffix);
+ }
+ last_position = position;
+ }
+ }
+
+ void LogCompiledFunctions() { logger_->LogCompiledFunctions(); }
+
+ void StringEvent(const char* name, const char* value) {
+ logger_->StringEvent(name, value);
+ }
+
+ void ExtractAllAddresses(std::unordered_set<uintptr_t>* map,
+ const char* prefix, int field_index) {
+ // Make sure that StopLogging() has been called before.
+ CHECK(log_.size());
+ const char* current = log_.start();
+ while (current != nullptr) {
+ current = FindLine(prefix, nullptr, current);
+ if (current == nullptr) return;
+ // Find token number {index}.
+ const char* previous;
+ for (int i = 0; i <= field_index; i++) {
+ previous = current;
+ current = strchr(current + 1, ',');
+ if (current == nullptr) break;
+ // Skip the comma.
+ current++;
+ }
+ if (current == nullptr) break;
+ uintptr_t address = strtoll(previous, nullptr, 16);
+ CHECK_LT(0, address);
+ map->insert(address);
+ }
+ }
+
+ private:
FILE* StopLoggingGetTempFile() {
temp_file_ = logger_->TearDown();
CHECK(temp_file_);
@@ -100,7 +238,6 @@ class ScopedLoggerInitializer {
return temp_file_;
}
- private:
const bool saved_log_;
const bool saved_prof_;
FILE* temp_file_;
@@ -109,23 +246,37 @@ class ScopedLoggerInitializer {
v8::HandleScope scope_;
v8::Local<v8::Context> env_;
Logger* logger_;
+ i::Vector<const char> log_;
DISALLOW_COPY_AND_ASSIGN(ScopedLoggerInitializer);
};
} // namespace
-
-static const char* StrNStr(const char* s1, const char* s2, int n) {
- if (s1[n] == '\0') return strstr(s1, s2);
- i::ScopedVector<char> str(n + 1);
- i::StrNCpy(str, s1, static_cast<size_t>(n));
- str[n] = '\0';
- char* found = strstr(str.start(), s2);
- return found != NULL ? s1 + (found - str.start()) : NULL;
+TEST(FindLogLine) {
+ const char* string =
+ "prefix1, stuff, suffix1\n"
+ "prefix2, stuff\n, suffix2\n"
+ "prefix3suffix3\n"
+ "prefix4 suffix4";
+ const char* end = string + strlen(string);
+ // Make sure the vector contains the terminating \0 character.
+ CHECK(FindLogLine(string, end, "prefix1, stuff, suffix1"));
+ CHECK(FindLogLine(string, end, "prefix1, stuff"));
+ CHECK(FindLogLine(string, end, "prefix1"));
+ CHECK(FindLogLine(string, end, "prefix1", "suffix1"));
+ CHECK(FindLogLine(string, end, "prefix1", "suffix1"));
+ CHECK(!FindLogLine(string, end, "prefix2", "suffix2"));
+ CHECK(!FindLogLine(string, end, "prefix1", "suffix2"));
+ CHECK(!FindLogLine(string, end, "prefix1", "suffix3"));
+ CHECK(FindLogLine(string, end, "prefix3", "suffix3"));
+ CHECK(FindLogLine(string, end, "prefix4", "suffix4"));
+ CHECK(!FindLogLine(string, end, "prefix4", "suffix4XXXXXXXXXXXX"));
+ CHECK(
+ !FindLogLine(string, end, "prefix4XXXXXXXXXXXXXXXXXXXXXXxxx", "suffix4"));
+ CHECK(!FindLogLine(string, end, "suffix", "suffix5XXXXXXXXXXXXXXXXXXXX"));
}
-
// BUG(913). Need to implement support for profiling multiple VM threads.
#if 0
@@ -177,7 +328,7 @@ class LoopingJsThread : public LoopingThread {
: LoopingThread(isolate) { }
void RunLoop() {
v8::Locker locker;
- CHECK(CcTest::i_isolate() != NULL);
+ CHECK_NOT_NULL(CcTest::i_isolate());
CHECK_GT(CcTest::i_isolate()->thread_manager()->CurrentId(), 0);
SetV8ThreadId();
while (IsRunning()) {
@@ -205,7 +356,7 @@ class LoopingNonJsThread : public LoopingThread {
v8::Locker locker;
v8::Unlocker unlocker;
// Now thread has V8's id, but will not run VM code.
- CHECK(CcTest::i_isolate() != NULL);
+ CHECK_NOT_NULL(CcTest::i_isolate());
CHECK_GT(CcTest::i_isolate()->thread_manager()->CurrentId(), 0);
double i = 10;
SignalRunning();
@@ -248,7 +399,7 @@ class TestSampler : public v8::internal::Sampler {
} // namespace
TEST(ProfMultipleThreads) {
- TestSampler* sampler = NULL;
+ TestSampler* sampler = nullptr;
{
v8::Locker locker;
sampler = new TestSampler(CcTest::i_isolate());
@@ -327,7 +478,7 @@ TEST(Issue23768) {
i::ExternalTwoByteString::cast(*v8::Utils::OpenHandle(*source)));
// This situation can happen if source was an external string disposed
// by its owner.
- i_source->set_resource(NULL);
+ i_source->set_resource(nullptr);
// Must not crash.
CcTest::i_isolate()->logger()->LogCompiledFunctions();
@@ -344,8 +495,7 @@ TEST(LogCallbacks) {
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
- ScopedLoggerInitializer initialize_logger(saved_log, saved_prof, isolate);
- Logger* logger = initialize_logger.logger();
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
v8::Local<v8::FunctionTemplate> obj = v8::Local<v8::FunctionTemplate>::New(
isolate, v8::FunctionTemplate::New(isolate));
@@ -357,31 +507,25 @@ TEST(LogCallbacks) {
v8::Local<v8::Value>(), signature),
static_cast<v8::PropertyAttribute>(v8::DontDelete));
- initialize_logger.env()
+ logger.env()
->Global()
- ->Set(initialize_logger.env(), v8_str("Obj"),
- obj->GetFunction(initialize_logger.env()).ToLocalChecked())
+ ->Set(logger.env(), v8_str("Obj"),
+ obj->GetFunction(logger.env()).ToLocalChecked())
.FromJust();
CompileRun("Obj.prototype.method1.toString();");
- logger->LogCompiledFunctions();
+ logger.LogCompiledFunctions();
- bool exists = false;
- i::Vector<const char> log(
- i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
- CHECK(exists);
+ logger.StopLogging();
Address ObjMethod1_entry = reinterpret_cast<Address>(ObjMethod1);
#if USES_FUNCTION_DESCRIPTORS
ObjMethod1_entry = *FUNCTION_ENTRYPOINT_ADDRESS(ObjMethod1_entry);
#endif
i::EmbeddedVector<char, 100> ref_data;
- i::SNPrintF(ref_data,
- "code-creation,Callback,-2,-1,0x%" V8PRIxPTR ",1,\"method1\"",
+ i::SNPrintF(ref_data, ",0x%" V8PRIxPTR ",1,method1",
reinterpret_cast<intptr_t>(ObjMethod1_entry));
-
- CHECK(StrNStr(log.start(), ref_data.start(), log.length()));
- log.Dispose();
+ CHECK(logger.FindLine("code-creation,Callback,-2,", ref_data.start()));
}
isolate->Dispose();
}
@@ -407,8 +551,7 @@ TEST(LogAccessorCallbacks) {
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
- ScopedLoggerInitializer initialize_logger(saved_log, saved_prof, isolate);
- Logger* logger = initialize_logger.logger();
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
v8::Local<v8::FunctionTemplate> obj = v8::Local<v8::FunctionTemplate>::New(
isolate, v8::FunctionTemplate::New(isolate));
@@ -417,51 +560,43 @@ TEST(LogAccessorCallbacks) {
inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
inst->SetAccessor(v8_str("prop2"), Prop2Getter);
- logger->LogAccessorCallbacks();
+ logger.logger()->LogAccessorCallbacks();
- bool exists = false;
- i::Vector<const char> log(
- i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
- CHECK(exists);
+ logger.StopLogging();
Address Prop1Getter_entry = reinterpret_cast<Address>(Prop1Getter);
#if USES_FUNCTION_DESCRIPTORS
Prop1Getter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(Prop1Getter_entry);
#endif
EmbeddedVector<char, 100> prop1_getter_record;
- i::SNPrintF(prop1_getter_record,
- "code-creation,Callback,-2,-1,0x%" V8PRIxPTR ",1,\"get prop1\"",
+ i::SNPrintF(prop1_getter_record, ",0x%" V8PRIxPTR ",1,get prop1",
reinterpret_cast<intptr_t>(Prop1Getter_entry));
- CHECK(StrNStr(log.start(), prop1_getter_record.start(), log.length()));
+ CHECK(logger.FindLine("code-creation,Callback,-2,",
+ prop1_getter_record.start()));
Address Prop1Setter_entry = reinterpret_cast<Address>(Prop1Setter);
#if USES_FUNCTION_DESCRIPTORS
Prop1Setter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(Prop1Setter_entry);
#endif
EmbeddedVector<char, 100> prop1_setter_record;
- i::SNPrintF(prop1_setter_record,
- "code-creation,Callback,-2,-1,0x%" V8PRIxPTR ",1,\"set prop1\"",
+ i::SNPrintF(prop1_setter_record, ",0x%" V8PRIxPTR ",1,set prop1",
reinterpret_cast<intptr_t>(Prop1Setter_entry));
- CHECK(StrNStr(log.start(), prop1_setter_record.start(), log.length()));
+ CHECK(logger.FindLine("code-creation,Callback,-2,",
+ prop1_setter_record.start()));
Address Prop2Getter_entry = reinterpret_cast<Address>(Prop2Getter);
#if USES_FUNCTION_DESCRIPTORS
Prop2Getter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(Prop2Getter_entry);
#endif
EmbeddedVector<char, 100> prop2_getter_record;
- i::SNPrintF(prop2_getter_record,
- "code-creation,Callback,-2,-1,0x%" V8PRIxPTR ",1,\"get prop2\"",
+ i::SNPrintF(prop2_getter_record, ",0x%" V8PRIxPTR ",1,get prop2",
reinterpret_cast<intptr_t>(Prop2Getter_entry));
- CHECK(StrNStr(log.start(), prop2_getter_record.start(), log.length()));
- log.Dispose();
+ CHECK(logger.FindLine("code-creation,Callback,-2,",
+ prop2_getter_record.start()));
}
isolate->Dispose();
}
-
-typedef i::NativesCollection<i::TEST> TestSources;
-
-
// Test that logging of code create / move events is equivalent to traversal of
// a resulting heap.
TEST(EquivalenceOfLoggingAndTraversal) {
@@ -477,8 +612,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
- ScopedLoggerInitializer initialize_logger(saved_log, saved_prof, isolate);
- Logger* logger = initialize_logger.logger();
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
// Compile and run a function that creates other functions.
CompileRun(
@@ -486,29 +620,26 @@ TEST(EquivalenceOfLoggingAndTraversal) {
" obj.test =\n"
" (function a(j) { return function b() { return j; } })(100);\n"
"})(this);");
- logger->StopProfiler();
+ logger.logger()->StopProfiler();
reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
i::Heap::kMakeHeapIterableMask, i::GarbageCollectionReason::kTesting);
- logger->StringEvent("test-logging-done", "");
+ logger.StringEvent("test-logging-done", "");
// Iterate heap to find compiled functions, will write to log.
- logger->LogCompiledFunctions();
- logger->StringEvent("test-traversal-done", "");
+ logger.LogCompiledFunctions();
+ logger.StringEvent("test-traversal-done", "");
- bool exists = false;
- i::Vector<const char> log(
- i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
- CHECK(exists);
- v8::Local<v8::String> log_str =
- v8::String::NewFromUtf8(isolate, log.start(),
- v8::NewStringType::kNormal, log.length())
- .ToLocalChecked();
- initialize_logger.env()
+ logger.StopLogging();
+
+ v8::Local<v8::String> log_str = logger.GetLogString();
+ logger.env()
->Global()
- ->Set(initialize_logger.env(), v8_str("_log"), log_str)
+ ->Set(logger.env(), v8_str("_log"), log_str)
.FromJust();
- i::Vector<const char> source = TestSources::GetScriptsSource();
+ // Load the Test snapshot's sources, see log-eq-of-logging-and-traversal.js
+ i::Vector<const char> source =
+ i::NativesCollection<i::TEST>::GetScriptsSource();
v8::Local<v8::String> source_str =
v8::String::NewFromUtf8(isolate, source.start(),
v8::NewStringType::kNormal, source.length())
@@ -521,15 +652,14 @@ TEST(EquivalenceOfLoggingAndTraversal) {
CHECK(false);
}
v8::Local<v8::Value> result;
- if (!script->Run(initialize_logger.env()).ToLocal(&result)) {
+ if (!script->Run(logger.env()).ToLocal(&result)) {
v8::String::Utf8Value exception(isolate, try_catch.Exception());
printf("run: %s\n", *exception);
CHECK(false);
}
- // The result either be a "true" literal or problem description.
+ // The result either be the "true" literal or problem description.
if (!result->IsTrue()) {
- v8::Local<v8::String> s =
- result->ToString(initialize_logger.env()).ToLocalChecked();
+ v8::Local<v8::String> s = result->ToString(logger.env()).ToLocalChecked();
i::ScopedVector<char> data(s->Utf8Length() + 1);
CHECK(data.start());
s->WriteUtf8(data.start());
@@ -549,17 +679,14 @@ TEST(LogVersion) {
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
- ScopedLoggerInitializer initialize_logger(saved_log, saved_prof, isolate);
- bool exists = false;
- i::Vector<const char> log(
- i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
- CHECK(exists);
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+ logger.StopLogging();
+
i::EmbeddedVector<char, 100> ref_data;
- i::SNPrintF(ref_data, "v8-version,%d,%d,%d,%d,%d", i::Version::GetMajor(),
+ i::SNPrintF(ref_data, "%d,%d,%d,%d,%d", i::Version::GetMajor(),
i::Version::GetMinor(), i::Version::GetBuild(),
i::Version::GetPatch(), i::Version::IsCandidate());
- CHECK(StrNStr(log.start(), ref_data.start(), log.length()));
- log.Dispose();
+ CHECK(logger.FindLine("v8-version,", ref_data.start()));
}
isolate->Dispose();
}
@@ -584,9 +711,8 @@ TEST(Issue539892) {
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
- ScopedLoggerInitializer initialize_logger(saved_log, saved_prof, isolate);
- Logger* logger = initialize_logger.logger();
- logger->addCodeEventListener(&code_event_logger);
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+ logger.logger()->addCodeEventListener(&code_event_logger);
// Function with a really large name.
const char* source_text =
@@ -613,7 +739,214 @@ TEST(Issue539892) {
CompileRun(source_text);
// Must not crash.
- logger->LogCompiledFunctions();
+ logger.LogCompiledFunctions();
+ }
+ isolate->Dispose();
+}
+
+TEST(LogAll) {
+ SETUP_FLAGS();
+ i::FLAG_log_all = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+ {
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+
+ // Function that will
+ const char* source_text =
+ "function testAddFn(a,b) { return a + b };"
+ "let result;"
+ "for (let i = 0; i < 100000; i++) { result = testAddFn(i, i); };"
+ "testAddFn('1', 1);"
+ "for (let i = 0; i < 100000; i++) { result = testAddFn('1', i); }";
+ CompileRun(source_text);
+
+ logger.StopLogging();
+
+ // We should find at least one code-creation even for testAddFn();
+ CHECK(logger.FindLine("api,v8::Context::New"));
+ CHECK(logger.FindLine("timer-event-start", "V8.CompileCode"));
+ CHECK(logger.FindLine("timer-event-end", "V8.CompileCode"));
+ CHECK(logger.FindLine("code-creation,Script", ":1:1"));
+ CHECK(logger.FindLine("api,v8::Script::Run"));
+ CHECK(logger.FindLine("code-creation,LazyCompile,", "testAddFn"));
+ if (i::FLAG_opt && !i::FLAG_always_opt) {
+ CHECK(logger.FindLine("code-deopt,", "soft"));
+ CHECK(logger.FindLine("timer-event-start", "V8.DeoptimizeCode"));
+ CHECK(logger.FindLine("timer-event-end", "V8.DeoptimizeCode"));
+ }
+ }
+ isolate->Dispose();
+}
+
+TEST(TraceMaps) {
+ SETUP_FLAGS();
+ i::FLAG_trace_maps = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+ // Try to create many different kind of maps to make sure the logging won't
+ // crash. More detailed tests are implemented separately.
+ const char* source_text =
+ "let a = {};"
+ "for (let i = 0; i < 500; i++) { a['p'+i] = i };"
+ "class Test { constructor(i) { this.a = 1; this['p'+i] = 1; }};"
+ "let t = new Test();"
+ "t.b = 1; t.c = 1; t.d = 3;"
+ "for (let i = 0; i < 100; i++) { t = new Test(i) };"
+ "t.b = {};";
+ CompileRun(source_text);
+
+ logger.StopLogging();
+
+ // Mostly superficial checks.
+ CHECK(logger.FindLine("map,InitialMap", ",0x"));
+ CHECK(logger.FindLine("map,Transition", ",0x"));
+ CHECK(logger.FindLine("map-details", ",0x"));
+ }
+ i::FLAG_trace_maps = false;
+ isolate->Dispose();
+}
+
+TEST(LogMaps) {
+ // Test that all Map details from Maps in the snapshot are logged properly.
+ SETUP_FLAGS();
+ i::FLAG_trace_maps = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+ logger.StopLogging();
+ // Extract all the map-detail entry addresses from the log.
+ std::unordered_set<uintptr_t> map_addresses;
+ logger.ExtractAllAddresses(&map_addresses, "map-details", 2);
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ i::HeapIterator iterator(heap);
+ i::DisallowHeapAllocation no_gc;
+
+ // Iterate over all maps on the heap.
+ size_t i = 0;
+ for (i::HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
+ i++;
+ if (!obj->IsMap()) continue;
+ uintptr_t address = reinterpret_cast<uintptr_t>(obj);
+ if (map_addresses.find(address) != map_addresses.end()) continue;
+ logger.PrintLog(200);
+ i::Map::cast(obj)->Print();
+ V8_Fatal(__FILE__, __LINE__,
+ "Map (%p, #%zu) was not logged during startup with --trace-maps!"
+ "\n# Expected Log Line: map_details, ... %p"
+ "\n# Use logger::PrintLog() for more details.",
+ reinterpret_cast<void*>(obj), i, reinterpret_cast<void*>(obj));
+ }
+ logger.PrintLog(200);
+ }
+ i::FLAG_log_function_events = false;
+ isolate->Dispose();
+}
+
+TEST(ConsoleTimeEvents) {
+ SETUP_FLAGS();
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+ // Test that console time events are properly logged
+ const char* source_text =
+ "console.time();"
+ "console.timeEnd();"
+ "console.timeStamp();"
+ "console.time('timerEvent1');"
+ "console.timeEnd('timerEvent1');"
+ "console.timeStamp('timerEvent2');"
+ "console.timeStamp('timerEvent3');";
+ CompileRun(source_text);
+
+ logger.StopLogging();
+
+ const char* pairs[][2] = {{"timer-event-start,default,", nullptr},
+ {"timer-event-end,default,", nullptr},
+ {"timer-event,default,", nullptr},
+ {"timer-event-start,timerEvent1,", nullptr},
+ {"timer-event-end,timerEvent1,", nullptr},
+ {"timer-event,timerEvent2,", nullptr},
+ {"timer-event,timerEvent3,", nullptr}};
+ logger.FindLogLines(pairs, arraysize(pairs));
+ }
+
+ isolate->Dispose();
+}
+
+TEST(LogFunctionEvents) {
+ SETUP_FLAGS();
+ i::FLAG_log_function_events = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+ const char* source_text =
+ "function lazyNotExecutedFunction() { return 'lazy' };"
+ "function lazyFunction() { "
+ " function lazyInnerFunction() { return 'lazy' };"
+ " return lazyInnerFunction;"
+ "};"
+ "let innerFn = lazyFunction();"
+ "innerFn();"
+ "(function eagerFunction(){ return 'eager' })();"
+ "function Foo() { this.foo = function(){}; };"
+ "let i = new Foo(); i.foo();";
+ CompileRun(source_text);
+
+ logger.StopLogging();
+
+ // TODO(cbruni): Extend with first-execution log statements.
+ CHECK_NULL(
+ logger.FindLine("function,compile-lazy,", ",lazyNotExecutedFunction"));
+ // Only consider the log starting from the first preparse statement on.
+ const char* start =
+ logger.FindLine("function,preparse-", ",lazyNotExecutedFunction");
+ const char* pairs[][2] = {
+ // Step 1: parsing top-level script, preparsing functions
+ {"function,preparse-", ",lazyNotExecutedFunction"},
+ // Missing name for preparsing lazyInnerFunction
+ // {"function,preparse-", nullptr},
+ {"function,preparse-", ",lazyFunction"},
+ {"function,full-parse,", ",eagerFunction"},
+ {"function,preparse-", ",Foo"},
+ // Missing name for inner preparsing of Foo.foo
+ // {"function,preparse-", nullptr},
+ // Missing name for top-level script.
+ {"function,parse-script,", nullptr},
+
+ // Step 2: compiling top-level script and eager functions
+ // - Compiling script without name.
+ {"function,compile,,", nullptr},
+ {"function,compile,", ",eagerFunction"},
+
+ // Step 3: start executing script
+ // Step 4. - lazy parse, lazy compiling and execute skipped functions
+ // - execute eager functions.
+ {"function,parse-function,", ",lazyFunction"},
+ {"function,compile-lazy,", ",lazyFunction"},
+
+ {"function,parse-function,", ",lazyInnerFunction"},
+ {"function,compile-lazy,", ",lazyInnerFunction"},
+
+ {"function,parse-function,", ",Foo"},
+ {"function,compile-lazy,", ",Foo"},
+ {"function,parse-function,", ",Foo.foo"},
+ {"function,compile-lazy,", ",Foo.foo"},
+ };
+ logger.FindLogLines(pairs, arraysize(pairs), start);
}
+ i::FLAG_log_function_events = false;
isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 97579674c0..0becfa52ab 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -45,18 +45,16 @@ typedef void* (*F)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
typedef int (*F5)(void*, void*, void*, void*, void*);
-
TEST(LoadAndStoreWithRepresentation) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
+
__ sub(sp, sp, Operand(1 * kPointerSize));
Label exit;
@@ -138,14 +136,12 @@ TEST(LoadAndStoreWithRepresentation) {
TEST(ExtractLane) {
if (!CpuFeatures::IsSupported(NEON)) return;
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
@@ -281,14 +277,12 @@ TEST(ExtractLane) {
TEST(ReplaceLane) {
if (!CpuFeatures::IsSupported(NEON)) return;
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index f1a0b2a1a6..7879ff2622 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -60,7 +60,7 @@ TEST(BYTESWAP) {
};
T t;
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -823,12 +823,12 @@ TEST(BranchOverflowInt32LeftLabel) {
__ li(rc.right, valRight);
switch (branchType) {
case kAddBranchOverflow:
- __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
- rc.scratch);
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ nullptr, rc.scratch);
break;
case kSubBranchOverflow:
- __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
- rc.scratch);
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ nullptr, rc.scratch);
break;
}
__ li(v0, 0);
@@ -846,11 +846,11 @@ TEST(BranchOverflowInt32LeftLabel) {
switch (branchType) {
case kAddBranchOverflow:
__ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
- &overflow, NULL, rc.scratch);
+ &overflow, nullptr, rc.scratch);
break;
case kSubBranchOverflow:
__ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
- &overflow, NULL, rc.scratch);
+ &overflow, nullptr, rc.scratch);
break;
}
__ li(v0, 0);
@@ -905,11 +905,11 @@ TEST(BranchOverflowInt32RightLabel) {
__ li(rc.right, valRight);
switch (branchType) {
case kAddBranchOverflow:
- __ AddBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, nullptr,
&no_overflow, rc.scratch);
break;
case kSubBranchOverflow:
- __ SubBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, nullptr,
&no_overflow, rc.scratch);
break;
}
@@ -927,11 +927,11 @@ TEST(BranchOverflowInt32RightLabel) {
__ li(rc.left, valLeft);
switch (branchType) {
case kAddBranchOverflow:
- __ AddBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight), nullptr,
&no_overflow, rc.scratch);
break;
case kSubBranchOverflow:
- __ SubBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight), nullptr,
&no_overflow, rc.scratch);
break;
}
@@ -1446,7 +1446,7 @@ TEST(macro_float_minmax_f32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -1589,7 +1589,7 @@ TEST(macro_float_minmax_f64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 58e5b32dd6..c695d29203 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -64,7 +64,7 @@ TEST(BYTESWAP) {
};
T t;
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -144,7 +144,7 @@ TEST(LoadConstants) {
refConstants[i] = ~(mask << i);
}
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -179,7 +179,7 @@ TEST(LoadAddress) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
Label to_jump, skip;
@@ -981,12 +981,12 @@ TEST(BranchOverflowInt64LeftLabel) {
__ li(rc.right, valRight);
switch (branchType) {
case kAddBranchOverflow:
- __ DaddBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
- rc.scratch);
+ __ DaddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ nullptr, rc.scratch);
break;
case kSubBranchOverflow:
- __ DsubBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
- rc.scratch);
+ __ DsubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ nullptr, rc.scratch);
break;
}
__ li(v0, 0);
@@ -1004,11 +1004,11 @@ TEST(BranchOverflowInt64LeftLabel) {
switch (branchType) {
case kAddBranchOverflow:
__ DaddBranchOvf(rc.dst, rc.left, Operand(valRight),
- &overflow, NULL, rc.scratch);
+ &overflow, nullptr, rc.scratch);
break;
case kSubBranchOverflow:
__ DsubBranchOvf(rc.dst, rc.left, Operand(valRight),
- &overflow, NULL, rc.scratch);
+ &overflow, nullptr, rc.scratch);
break;
}
__ li(v0, 0);
@@ -1063,11 +1063,11 @@ TEST(BranchOverflowInt64RightLabel) {
__ li(rc.right, valRight);
switch (branchType) {
case kAddBranchOverflow:
- __ DaddBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ __ DaddBranchOvf(rc.dst, rc.left, rc.right, nullptr,
&no_overflow, rc.scratch);
break;
case kSubBranchOverflow:
- __ DsubBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ __ DsubBranchOvf(rc.dst, rc.left, rc.right, nullptr,
&no_overflow, rc.scratch);
break;
}
@@ -1085,12 +1085,12 @@ TEST(BranchOverflowInt64RightLabel) {
__ li(rc.left, valLeft);
switch (branchType) {
case kAddBranchOverflow:
- __ DaddBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
- &no_overflow, rc.scratch);
+ __ DaddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ nullptr, &no_overflow, rc.scratch);
break;
case kSubBranchOverflow:
- __ DsubBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
- &no_overflow, rc.scratch);
+ __ DsubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ nullptr, &no_overflow, rc.scratch);
break;
}
__ li(v0, 1);
@@ -1721,7 +1721,7 @@ TEST(macro_float_minmax_f32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -1864,7 +1864,7 @@ TEST(macro_float_minmax_f64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 6da2ee492f..5d94412d9b 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -52,7 +52,6 @@ typedef int (*F0)();
#define __ masm->
-
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
__ pushq(kRootRegister);
@@ -98,14 +97,11 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
// Test that we can move a Smi value literally into a register.
TEST(SmiMove) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
@@ -184,14 +180,11 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
// Test that we can compare smis for equality (and more).
TEST(SmiCompare) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize * 2, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -233,14 +226,11 @@ TEST(SmiCompare) {
TEST(Integer32ToSmi) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -333,14 +323,11 @@ TEST(Integer32ToSmi) {
}
TEST(SmiCheck) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -409,443 +396,6 @@ TEST(SmiCheck) {
CHECK_EQ(0, result);
}
-static void SmiAddTest(MacroAssembler* masm,
- Label* exit,
- int id,
- int first,
- int second) {
- __ movl(rcx, Immediate(first));
- __ Integer32ToSmi(rcx, rcx);
- __ movl(rdx, Immediate(second));
- __ Integer32ToSmi(rdx, rdx);
- __ movl(r8, Immediate(first + second));
- __ Integer32ToSmi(r8, r8);
-
- __ movl(rax, Immediate(id)); // Test number.
- __ SmiAdd(r9, rcx, rdx, exit);
- __ cmpq(r9, r8);
- __ j(not_equal, exit);
-
- __ incq(rax);
- __ SmiAdd(rcx, rcx, rdx, exit);
- __ cmpq(rcx, r8);
- __ j(not_equal, exit);
-
- __ movl(rcx, Immediate(first));
- __ Integer32ToSmi(rcx, rcx);
-
- __ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(second));
- __ cmpq(r9, r8);
- __ j(not_equal, exit);
-
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(second));
- __ cmpq(rcx, r8);
- __ j(not_equal, exit);
-
- __ movl(rcx, Immediate(first));
- __ Integer32ToSmi(rcx, rcx);
-
- SmiOperationConstraints constraints =
- SmiOperationConstraint::kPreserveSourceRegister |
- SmiOperationConstraint::kBailoutOnOverflow;
- __ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(second), constraints, exit);
- __ cmpq(r9, r8);
- __ j(not_equal, exit);
-
- __ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), constraints, exit);
- __ cmpq(rcx, r8);
- __ j(not_equal, exit);
-
- __ movl(rcx, Immediate(first));
- __ Integer32ToSmi(rcx, rcx);
-
- constraints = SmiOperationConstraint::kPreserveSourceRegister |
- SmiOperationConstraint::kBailoutOnNoOverflow;
- Label done;
- __ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), constraints, &done);
- __ jmp(exit);
- __ bind(&done);
- __ cmpq(rcx, r8);
- __ j(not_equal, exit);
-}
-
-
-static void SmiAddOverflowTest(MacroAssembler* masm,
- Label* exit,
- int id,
- int x) {
- // Adds a Smi to x so that the addition overflows.
- CHECK(x != 0); // Can't overflow by adding a Smi.
- int y_max = (x > 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue - x - 1);
- int y_min = (x > 0) ? (Smi::kMaxValue - x + 1) : (Smi::kMinValue + 0);
-
- __ movl(rax, Immediate(id));
- __ Move(rcx, Smi::FromInt(x));
- __ movq(r11, rcx); // Store original Smi value of x in r11.
- __ Move(rdx, Smi::FromInt(y_min));
- {
- Label overflow_ok;
- __ SmiAdd(r9, rcx, rdx, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiAdd(rcx, rcx, rdx, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- SmiOperationConstraints constraints =
- SmiOperationConstraint::kPreserveSourceRegister |
- SmiOperationConstraint::kBailoutOnOverflow;
- __ movq(rcx, r11);
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(y_min), constraints, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_min), constraints, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- __ Move(rdx, Smi::FromInt(y_max));
-
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiAdd(r9, rcx, rdx, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiAdd(rcx, rcx, rdx, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- __ movq(rcx, r11);
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(y_max), constraints, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- constraints = SmiOperationConstraint::kBailoutOnOverflow;
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_max), constraints, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(equal, exit);
- }
-}
-
-
-TEST(SmiAdd) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize * 3, &actual_size, true));
- CHECK(buffer);
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
- v8::internal::CodeObjectRequired::kYes);
-
- MacroAssembler* masm = &assembler;
- EntryCode(masm);
- Label exit;
-
- // No-overflow tests.
- SmiAddTest(masm, &exit, 0x10, 1, 2);
- SmiAddTest(masm, &exit, 0x20, 1, -2);
- SmiAddTest(masm, &exit, 0x30, -1, 2);
- SmiAddTest(masm, &exit, 0x40, -1, -2);
- SmiAddTest(masm, &exit, 0x50, 0x1000, 0x2000);
- SmiAddTest(masm, &exit, 0x60, Smi::kMinValue, 5);
- SmiAddTest(masm, &exit, 0x70, Smi::kMaxValue, -5);
- SmiAddTest(masm, &exit, 0x80, Smi::kMaxValue, Smi::kMinValue);
-
- SmiAddOverflowTest(masm, &exit, 0x90, -1);
- SmiAddOverflowTest(masm, &exit, 0xA0, 1);
- SmiAddOverflowTest(masm, &exit, 0xB0, 1024);
- SmiAddOverflowTest(masm, &exit, 0xC0, Smi::kMaxValue);
- SmiAddOverflowTest(masm, &exit, 0xD0, -2);
- SmiAddOverflowTest(masm, &exit, 0xE0, -42000);
- SmiAddOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
-
- __ xorq(rax, rax); // Success.
- __ bind(&exit);
- ExitCode(masm);
- __ ret(0);
-
- CodeDesc desc;
- masm->GetCode(isolate, &desc);
- // Call the function from C++.
- int result = FUNCTION_CAST<F0>(buffer)();
- CHECK_EQ(0, result);
-}
-
-
-static void SmiSubTest(MacroAssembler* masm,
- Label* exit,
- int id,
- int first,
- int second) {
- __ Move(rcx, Smi::FromInt(first));
- __ Move(rdx, Smi::FromInt(second));
- __ Move(r8, Smi::FromInt(first - second));
-
- __ movl(rax, Immediate(id)); // Test 0.
- __ SmiSub(r9, rcx, rdx, exit);
- __ cmpq(r9, r8);
- __ j(not_equal, exit);
-
- __ incq(rax); // Test 1.
- __ SmiSub(rcx, rcx, rdx, exit);
- __ cmpq(rcx, r8);
- __ j(not_equal, exit);
-
- __ Move(rcx, Smi::FromInt(first));
-
- __ incq(rax); // Test 2.
- __ SmiSubConstant(r9, rcx, Smi::FromInt(second));
- __ cmpq(r9, r8);
- __ j(not_equal, exit);
-
- __ incq(rax); // Test 3.
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(second));
- __ cmpq(rcx, r8);
- __ j(not_equal, exit);
-
- SmiOperationConstraints constraints =
- SmiOperationConstraint::kPreserveSourceRegister |
- SmiOperationConstraint::kBailoutOnOverflow;
- __ Move(rcx, Smi::FromInt(first));
- __ incq(rax); // Test 4.
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), constraints, exit);
- __ cmpq(rcx, r8);
- __ j(not_equal, exit);
-
- __ Move(rcx, Smi::FromInt(first));
- __ incq(rax); // Test 5.
- __ SmiSubConstant(r9, rcx, Smi::FromInt(second), constraints, exit);
- __ cmpq(r9, r8);
- __ j(not_equal, exit);
-
- constraints = SmiOperationConstraint::kPreserveSourceRegister |
- SmiOperationConstraint::kBailoutOnNoOverflow;
- __ Move(rcx, Smi::FromInt(first));
- Label done;
- __ incq(rax); // Test 6.
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), constraints, &done);
- __ jmp(exit);
- __ bind(&done);
- __ cmpq(rcx, r8);
- __ j(not_equal, exit);
-}
-
-
-static void SmiSubOverflowTest(MacroAssembler* masm,
- Label* exit,
- int id,
- int x) {
- // Subtracts a Smi from x so that the subtraction overflows.
- CHECK(x != -1); // Can't overflow by subtracting a Smi.
- int y_max = (x < 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue + 0);
- int y_min = (x < 0) ? (Smi::kMaxValue + x + 2) : (Smi::kMinValue + x);
-
- __ movl(rax, Immediate(id));
- __ Move(rcx, Smi::FromInt(x));
- __ movq(r11, rcx); // Store original Smi value of x in r11.
- __ Move(rdx, Smi::FromInt(y_min));
- {
- Label overflow_ok;
- __ SmiSub(r9, rcx, rdx, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiSub(rcx, rcx, rdx, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- SmiOperationConstraints constraints =
- SmiOperationConstraint::kPreserveSourceRegister |
- SmiOperationConstraint::kBailoutOnOverflow;
-
- __ movq(rcx, r11);
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), constraints, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), constraints, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- __ Move(rdx, Smi::FromInt(y_max));
-
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiSub(r9, rcx, rdx, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiSub(rcx, rcx, rdx, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- __ movq(rcx, r11);
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), constraints, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(not_equal, exit);
- }
-
- constraints = SmiOperationConstraint::kBailoutOnOverflow;
- __ movq(rcx, r11);
- {
- Label overflow_ok;
- __ incq(rax);
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), constraints, &overflow_ok);
- __ jmp(exit);
- __ bind(&overflow_ok);
- __ incq(rax);
- __ cmpq(rcx, r11);
- __ j(equal, exit);
- }
-}
-
-
-TEST(SmiSub) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize * 4, &actual_size, true));
- CHECK(buffer);
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
- v8::internal::CodeObjectRequired::kYes);
-
- MacroAssembler* masm = &assembler;
- EntryCode(masm);
- Label exit;
-
- SmiSubTest(masm, &exit, 0x10, 1, 2);
- SmiSubTest(masm, &exit, 0x20, 1, -2);
- SmiSubTest(masm, &exit, 0x30, -1, 2);
- SmiSubTest(masm, &exit, 0x40, -1, -2);
- SmiSubTest(masm, &exit, 0x50, 0x1000, 0x2000);
- SmiSubTest(masm, &exit, 0x60, Smi::kMinValue, -5);
- SmiSubTest(masm, &exit, 0x70, Smi::kMaxValue, 5);
- SmiSubTest(masm, &exit, 0x80, -Smi::kMaxValue, Smi::kMinValue);
- SmiSubTest(masm, &exit, 0x90, 0, Smi::kMaxValue);
-
- SmiSubOverflowTest(masm, &exit, 0xA0, 1);
- SmiSubOverflowTest(masm, &exit, 0xB0, 1024);
- SmiSubOverflowTest(masm, &exit, 0xC0, Smi::kMaxValue);
- SmiSubOverflowTest(masm, &exit, 0xD0, -2);
- SmiSubOverflowTest(masm, &exit, 0xE0, -42000);
- SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
- SmiSubOverflowTest(masm, &exit, 0x100, 0);
-
- __ xorq(rax, rax); // Success.
- __ bind(&exit);
- ExitCode(masm);
- __ ret(0);
-
- CodeDesc desc;
- masm->GetCode(isolate, &desc);
- // Call the function from C++.
- int result = FUNCTION_CAST<F0>(buffer)();
- CHECK_EQ(0, result);
-}
-
void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ movl(rax, Immediate(id));
@@ -870,14 +420,11 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
}
TEST(SmiIndex) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize * 5, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -902,82 +449,15 @@ TEST(SmiIndex) {
CHECK_EQ(0, result);
}
-void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
- CHECK(x >= 0);
- int powers[] = { 0, 1, 2, 3, 8, 16, 24, 31 };
- int power_count = 8;
- __ movl(rax, Immediate(id));
- for (int i = 0; i < power_count; i++) {
- int power = powers[i];
- intptr_t result = static_cast<intptr_t>(x) << power;
- __ Set(r8, result);
- __ Move(rcx, Smi::FromInt(x));
- __ movq(r11, rcx);
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rcx, power);
- __ cmpq(rdx, r8);
- __ j(not_equal, exit);
- __ incq(rax);
- __ cmpq(r11, rcx); // rcx unchanged.
- __ j(not_equal, exit);
- __ incq(rax);
- __ PositiveSmiTimesPowerOfTwoToInteger64(rcx, rcx, power);
- __ cmpq(rdx, r8);
- __ j(not_equal, exit);
- __ incq(rax);
- }
-}
-
-
-TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize * 4, &actual_size, true));
- CHECK(buffer);
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
- v8::internal::CodeObjectRequired::kYes);
-
- MacroAssembler* masm = &assembler;
- EntryCode(masm);
- Label exit;
-
- TestPositiveSmiPowerUp(masm, &exit, 0x20, 0);
- TestPositiveSmiPowerUp(masm, &exit, 0x40, 1);
- TestPositiveSmiPowerUp(masm, &exit, 0x60, 127);
- TestPositiveSmiPowerUp(masm, &exit, 0x80, 128);
- TestPositiveSmiPowerUp(masm, &exit, 0xA0, 255);
- TestPositiveSmiPowerUp(masm, &exit, 0xC0, 256);
- TestPositiveSmiPowerUp(masm, &exit, 0x100, 65535);
- TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536);
- TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue);
-
- __ xorq(rax, rax); // Success.
- __ bind(&exit);
- ExitCode(masm);
- __ ret(0);
-
- CodeDesc desc;
- masm->GetCode(isolate, &desc);
- // Call the function from C++.
- int result = FUNCTION_CAST<F0>(buffer)();
- CHECK_EQ(0, result);
-}
-
-
TEST(OperandOffset) {
uint32_t data[256];
for (uint32_t i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize * 2, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -1321,15 +801,13 @@ TEST(OperandOffset) {
TEST(LoadAndStoreWithRepresentation) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
+
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
__ subq(rsp, Immediate(1 * kPointerSize));
@@ -1590,14 +1068,11 @@ void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
}
TEST(SIMDMacros) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize * 2, &actual_size, true));
- CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index b61b10bcea..6de2efc6ca 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -281,115 +281,4 @@ TEST(ModuleEvaluationCompletion2) {
CHECK(!try_catch.HasCaught());
}
-TEST(ModuleNamespace) {
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext env;
- v8::TryCatch try_catch(isolate);
-
- Local<v8::Object> ReferenceError =
- CompileRun("ReferenceError")->ToObject(env.local()).ToLocalChecked();
-
- Local<String> source_text = v8_str(
- "import {a, b} from 'export var a = 1; export let b = 2';"
- "export function geta() {return a};"
- "export function getb() {return b};"
- "export let radio = 3;"
- "export var gaga = 4;");
- ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- Local<Module> module =
- ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- CHECK(module
- ->InstantiateModule(env.local(),
- CompileSpecifierAsModuleResolveCallback)
- .FromJust());
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
- Local<Value> ns = module->GetModuleNamespace();
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
- Local<v8::Object> nsobj = ns->ToObject(env.local()).ToLocalChecked();
-
- // a, b
- CHECK(nsobj->Get(env.local(), v8_str("a")).ToLocalChecked()->IsUndefined());
- CHECK(nsobj->Get(env.local(), v8_str("b")).ToLocalChecked()->IsUndefined());
-
- // geta
- {
- auto geta = nsobj->Get(env.local(), v8_str("geta")).ToLocalChecked();
- auto a = geta.As<v8::Function>()
- ->Call(env.local(), geta, 0, nullptr)
- .ToLocalChecked();
- CHECK(a->IsUndefined());
- }
-
- // getb
- {
- v8::TryCatch inner_try_catch(isolate);
- auto getb = nsobj->Get(env.local(), v8_str("getb")).ToLocalChecked();
- CHECK(
- getb.As<v8::Function>()->Call(env.local(), getb, 0, nullptr).IsEmpty());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()
- ->InstanceOf(env.local(), ReferenceError)
- .FromJust());
- }
-
- // radio
- {
- v8::TryCatch inner_try_catch(isolate);
- // https://bugs.chromium.org/p/v8/issues/detail?id=7235
- // CHECK(nsobj->Get(env.local(), v8_str("radio")).IsEmpty());
- CHECK(nsobj->Get(env.local(), v8_str("radio"))
- .ToLocalChecked()
- ->IsUndefined());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()
- ->InstanceOf(env.local(), ReferenceError)
- .FromJust());
- }
-
- // gaga
- {
- auto gaga = nsobj->Get(env.local(), v8_str("gaga")).ToLocalChecked();
- CHECK(gaga->IsUndefined());
- }
-
- CHECK(!try_catch.HasCaught());
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
- module->Evaluate(env.local()).ToLocalChecked();
- CHECK_EQ(Module::kEvaluated, module->GetStatus());
-
- // geta
- {
- auto geta = nsobj->Get(env.local(), v8_str("geta")).ToLocalChecked();
- auto a = geta.As<v8::Function>()
- ->Call(env.local(), geta, 0, nullptr)
- .ToLocalChecked();
- CHECK_EQ(1, a->Int32Value(env.local()).FromJust());
- }
-
- // getb
- {
- auto getb = nsobj->Get(env.local(), v8_str("getb")).ToLocalChecked();
- auto b = getb.As<v8::Function>()
- ->Call(env.local(), getb, 0, nullptr)
- .ToLocalChecked();
- CHECK_EQ(2, b->Int32Value(env.local()).FromJust());
- }
-
- // radio
- {
- auto radio = nsobj->Get(env.local(), v8_str("radio")).ToLocalChecked();
- CHECK_EQ(3, radio->Int32Value(env.local()).FromJust());
- }
-
- // gaga
- {
- auto gaga = nsobj->Get(env.local(), v8_str("gaga")).ToLocalChecked();
- CHECK_EQ(4, gaga->Int32Value(env.local()).FromJust());
- }
-
- CHECK(!try_catch.HasCaught());
-}
} // anonymous namespace
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 088803ec36..42c0691d83 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -57,6 +57,9 @@ TEST(NoSideEffectsToString) {
CheckBoolean(isolate, true, "true");
CheckBoolean(isolate, false, "false");
CheckBoolean(isolate, false, "false");
+ Handle<Object> smi_42 = handle(Smi::FromInt(42), isolate);
+ CheckObject(isolate, BigInt::FromNumber(isolate, smi_42).ToHandleChecked(),
+ "42");
CheckObject(isolate, factory->undefined_value(), "undefined");
CheckObject(isolate, factory->null_value(), "null");
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index e604040ffd..0ecdbf2dd6 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -63,7 +63,7 @@ namespace test_parsing {
namespace {
-int* global_use_counts = NULL;
+int* global_use_counts = nullptr;
void MockUseCounterCallback(v8::Isolate* isolate,
v8::Isolate::UseCounterFeature feature) {
@@ -82,12 +82,12 @@ TEST(ScanKeywords) {
#define KEYWORD(t, s, d) { s, i::Token::t },
TOKEN_LIST(IGNORE_TOKEN, KEYWORD, IGNORE_TOKEN)
#undef KEYWORD
- {NULL, i::Token::IDENTIFIER}};
+ {nullptr, i::Token::IDENTIFIER}};
KeywordToken key_token;
i::UnicodeCache unicode_cache;
char buffer[32];
- for (int i = 0; (key_token = keywords[i]).keyword != NULL; i++) {
+ for (int i = 0; (key_token = keywords[i]).keyword != nullptr; i++) {
const char* keyword = key_token.keyword;
size_t length = strlen(key_token.keyword);
CHECK(static_cast<int>(sizeof(buffer)) >= length);
@@ -134,6 +134,7 @@ TEST(ScanKeywords) {
TEST(ScanHTMLEndComments) {
v8::V8::Initialize();
v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
v8::HandleScope handles(isolate);
// Regression test. See:
@@ -169,7 +170,7 @@ TEST(ScanHTMLEndComments) {
"/* SLDC */ /* MLC \n */ --> is eol-comment\nvar y = 37;\n",
"/* MLC1 \n */ /* SLDC1 */ /* MLC2 \n */ /* SLDC2 */ --> is eol-comment\n"
"var y = 37;\n",
- NULL
+ nullptr
};
const char* fail_tests[] = {
@@ -177,28 +178,28 @@ TEST(ScanHTMLEndComments) {
"\"\\n\" --> is eol-comment\nvar y = 37;\n",
"x/* precomment */ --> is eol-comment\nvar y = 37;\n",
"var x = 42; --> is eol-comment\nvar y = 37;\n",
- NULL
+ nullptr
};
// clang-format on
// Parser/Scanner needs a stack limit.
- CcTest::i_isolate()->stack_guard()->SetStackLimit(
- i::GetCurrentStackPosition() - 128 * 1024);
- uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
+ i_isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+ uintptr_t stack_limit = i_isolate->stack_guard()->real_climit();
for (int i = 0; tests[i]; i++) {
const char* source = tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
+ i::Scanner scanner(i_isolate->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
- i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(
- &zone, CcTest::i_isolate()->ast_string_constants(),
- CcTest::i_isolate()->heap()->HashSeed());
+ i::Zone zone(i_isolate->allocator(), ZONE_NAME);
+ i::AstValueFactory ast_value_factory(&zone,
+ i_isolate->ast_string_constants(),
+ i_isolate->heap()->HashSeed());
i::PendingCompilationErrorHandler pending_error_handler;
- i::PreParser preparser(
- &zone, &scanner, stack_limit, &ast_value_factory,
- &pending_error_handler,
- CcTest::i_isolate()->counters()->runtime_call_stats());
+ i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
+ &pending_error_handler,
+ i_isolate->counters()->runtime_call_stats(),
+ i_isolate->logger());
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
CHECK(!pending_error_handler.has_pending_error());
@@ -207,17 +208,17 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; fail_tests[i]; i++) {
const char* source = fail_tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
+ i::Scanner scanner(i_isolate->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
- i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(
- &zone, CcTest::i_isolate()->ast_string_constants(),
- CcTest::i_isolate()->heap()->HashSeed());
+ i::Zone zone(i_isolate->allocator(), ZONE_NAME);
+ i::AstValueFactory ast_value_factory(&zone,
+ i_isolate->ast_string_constants(),
+ i_isolate->heap()->HashSeed());
i::PendingCompilationErrorHandler pending_error_handler;
- i::PreParser preparser(
- &zone, &scanner, stack_limit, &ast_value_factory,
- &pending_error_handler,
- CcTest::i_isolate()->counters()->runtime_call_stats());
+ i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
+ &pending_error_handler,
+ i_isolate->counters()->runtime_call_stats(),
+ i_isolate->logger());
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -363,7 +364,7 @@ TEST(PreparseFunctionDataIsUsed) {
const v8::ScriptCompiler::CachedData* cached_data =
good_source.GetCachedData();
- CHECK(cached_data->data != NULL);
+ CHECK_NOT_NULL(cached_data->data);
CHECK_GT(cached_data->length, 0);
// Now compile the erroneous code with the good preparse data. If the
@@ -383,35 +384,34 @@ TEST(PreparseFunctionDataIsUsed) {
TEST(StandAlonePreParser) {
v8::V8::Initialize();
+ i::Isolate* i_isolate = CcTest::i_isolate();
- CcTest::i_isolate()->stack_guard()->SetStackLimit(
- i::GetCurrentStackPosition() - 128 * 1024);
+ i_isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
- const char* programs[] = {
- "{label: 42}",
- "var x = 42;",
- "function foo(x, y) { return x + y; }",
- "%ArgleBargle(glop);",
- "var x = new new Function('this.x = 42');",
- "var f = (x, y) => x + y;",
- NULL
- };
+ const char* programs[] = {"{label: 42}",
+ "var x = 42;",
+ "function foo(x, y) { return x + y; }",
+ "%ArgleBargle(glop);",
+ "var x = new new Function('this.x = 42');",
+ "var f = (x, y) => x + y;",
+ nullptr};
- uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
+ uintptr_t stack_limit = i_isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
+ i::Scanner scanner(i_isolate->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
- i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(
- &zone, CcTest::i_isolate()->ast_string_constants(),
- CcTest::i_isolate()->heap()->HashSeed());
+ i::Zone zone(i_isolate->allocator(), ZONE_NAME);
+ i::AstValueFactory ast_value_factory(&zone,
+ i_isolate->ast_string_constants(),
+ i_isolate->heap()->HashSeed());
i::PendingCompilationErrorHandler pending_error_handler;
- i::PreParser preparser(
- &zone, &scanner, stack_limit, &ast_value_factory,
- &pending_error_handler,
- CcTest::i_isolate()->counters()->runtime_call_stats());
+ i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
+ &pending_error_handler,
+ i_isolate->counters()->runtime_call_stats(),
+ i_isolate->logger());
preparser.set_allow_natives(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -427,11 +427,8 @@ TEST(StandAlonePreParserNoNatives) {
CcTest::i_isolate()->stack_guard()->SetStackLimit(
i::GetCurrentStackPosition() - 128 * 1024);
- const char* programs[] = {
- "%ArgleBargle(glop);",
- "var x = %_IsSmi(42);",
- NULL
- };
+ const char* programs[] = {"%ArgleBargle(glop);", "var x = %_IsSmi(42);",
+ nullptr};
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
@@ -447,7 +444,8 @@ TEST(StandAlonePreParserNoNatives) {
i::PendingCompilationErrorHandler pending_error_handler;
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
- isolate->counters()->runtime_call_stats());
+ isolate->counters()->runtime_call_stats(),
+ isolate->logger());
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
CHECK(pending_error_handler.has_pending_error());
@@ -513,10 +511,10 @@ TEST(RegressChromium62639) {
&zone, CcTest::i_isolate()->ast_string_constants(),
CcTest::i_isolate()->heap()->HashSeed());
i::PendingCompilationErrorHandler pending_error_handler;
- i::PreParser preparser(&zone, &scanner,
- CcTest::i_isolate()->stack_guard()->real_climit(),
- &ast_value_factory, &pending_error_handler,
- isolate->counters()->runtime_call_stats());
+ i::PreParser preparser(
+ &zone, &scanner, CcTest::i_isolate()->stack_guard()->real_climit(),
+ &ast_value_factory, &pending_error_handler,
+ isolate->counters()->runtime_call_stats(), isolate->logger());
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -589,9 +587,9 @@ TEST(PreParseOverflow) {
&zone, CcTest::i_isolate()->ast_string_constants(),
CcTest::i_isolate()->heap()->HashSeed());
i::PendingCompilationErrorHandler pending_error_handler;
- i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
- &pending_error_handler,
- isolate->counters()->runtime_call_stats());
+ i::PreParser preparser(
+ &zone, &scanner, stack_limit, &ast_value_factory, &pending_error_handler,
+ isolate->counters()->runtime_call_stats(), isolate->logger());
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseStackOverflow, result);
}
@@ -850,7 +848,7 @@ TEST(ScopeUsesArgumentsSuperThis) {
i::DeclarationScope::Analyze(&info);
i::DeclarationScope::AllocateScopeInfos(&info, isolate,
i::AnalyzeMode::kRegular);
- CHECK(info.literal() != NULL);
+ CHECK_NOT_NULL(info.literal());
i::DeclarationScope* script_scope = info.literal()->scope();
CHECK(script_scope->is_script_scope());
@@ -917,7 +915,7 @@ static void CheckParsesToNumber(const char* source) {
CHECK_EQ(1, info.scope()->declarations()->LengthForTest());
i::Declaration* decl = info.scope()->declarations()->AtForTest(0);
i::FunctionLiteral* fun = decl->AsFunctionDeclaration()->fun();
- CHECK(fun->body()->length() == 1);
+ CHECK_EQ(fun->body()->length(), 1);
CHECK(fun->body()->at(0)->IsReturnStatement());
i::ReturnStatement* ret = fun->body()->at(0)->AsReturnStatement();
i::Literal* lit = ret->expression()->AsLiteral();
@@ -961,45 +959,48 @@ TEST(ScopePositions) {
};
const SourceData source_data[] = {
- {" with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::SLOPPY},
- {" with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::SLOPPY},
+ {" with ({}) ", "{ block; }", " more;", i::WITH_SCOPE,
+ i::LanguageMode::kSloppy},
+ {" with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE,
+ i::LanguageMode::kSloppy},
{" with ({}) ",
"{\n"
" block;\n"
" }",
"\n"
" more;",
- i::WITH_SCOPE, i::SLOPPY},
- {" with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::SLOPPY},
+ i::WITH_SCOPE, i::LanguageMode::kSloppy},
+ {" with ({}) ", "statement;", " more;", i::WITH_SCOPE,
+ i::LanguageMode::kSloppy},
{" with ({}) ", "statement",
"\n"
" more;",
- i::WITH_SCOPE, i::SLOPPY},
+ i::WITH_SCOPE, i::LanguageMode::kSloppy},
{" with ({})\n"
" ",
"statement;",
"\n"
" more;",
- i::WITH_SCOPE, i::SLOPPY},
+ i::WITH_SCOPE, i::LanguageMode::kSloppy},
{" try {} catch ", "(e) { block; }", " more;", i::CATCH_SCOPE,
- i::SLOPPY},
+ i::LanguageMode::kSloppy},
{" try {} catch ", "(e) { block; }", "; more;", i::CATCH_SCOPE,
- i::SLOPPY},
+ i::LanguageMode::kSloppy},
{" try {} catch ",
"(e) {\n"
" block;\n"
" }",
"\n"
" more;",
- i::CATCH_SCOPE, i::SLOPPY},
+ i::CATCH_SCOPE, i::LanguageMode::kSloppy},
{" try {} catch ", "(e) { block; }", " finally { block; } more;",
- i::CATCH_SCOPE, i::SLOPPY},
+ i::CATCH_SCOPE, i::LanguageMode::kSloppy},
{" start;\n"
" ",
- "{ let block; }", " more;", i::BLOCK_SCOPE, i::STRICT},
+ "{ let block; }", " more;", i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" start;\n"
" ",
- "{ let block; }", "; more;", i::BLOCK_SCOPE, i::STRICT},
+ "{ let block; }", "; more;", i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" start;\n"
" ",
"{\n"
@@ -1007,10 +1008,11 @@ TEST(ScopePositions) {
" }",
"\n"
" more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" start;\n"
" function fun",
- "(a,b) { infunction; }", " more;", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", " more;", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
{" start;\n"
" function fun",
"(a,b) {\n"
@@ -1018,149 +1020,170 @@ TEST(ScopePositions) {
" }",
"\n"
" more;",
- i::FUNCTION_SCOPE, i::SLOPPY},
- {" start;\n", "(a,b) => a + b", "; more;", i::FUNCTION_SCOPE, i::SLOPPY},
+ i::FUNCTION_SCOPE, i::LanguageMode::kSloppy},
+ {" start;\n", "(a,b) => a + b", "; more;", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
{" start;\n", "(a,b) => { return a+b; }", "\nmore;", i::FUNCTION_SCOPE,
- i::SLOPPY},
+ i::LanguageMode::kSloppy},
{" start;\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
{" for ", "(let x = 1 ; x < 10; ++ x) { block; }", " more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" for ", "(let x = 1 ; x < 10; ++ x) { block; }", "; more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" for ",
"(let x = 1 ; x < 10; ++ x) {\n"
" block;\n"
" }",
"\n"
" more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" for ", "(let x = 1 ; x < 10; ++ x) statement;", " more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" for ", "(let x = 1 ; x < 10; ++ x) statement",
"\n"
" more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" for ",
"(let x = 1 ; x < 10; ++ x)\n"
" statement;",
"\n"
" more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" for ", "(let x in {}) { block; }", " more;", i::BLOCK_SCOPE,
- i::STRICT},
+ i::LanguageMode::kStrict},
{" for ", "(let x in {}) { block; }", "; more;", i::BLOCK_SCOPE,
- i::STRICT},
+ i::LanguageMode::kStrict},
{" for ",
"(let x in {}) {\n"
" block;\n"
" }",
"\n"
" more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" for ", "(let x in {}) statement;", " more;", i::BLOCK_SCOPE,
- i::STRICT},
+ i::LanguageMode::kStrict},
{" for ", "(let x in {}) statement",
"\n"
" more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
{" for ",
"(let x in {})\n"
" statement;",
"\n"
" more;",
- i::BLOCK_SCOPE, i::STRICT},
+ i::BLOCK_SCOPE, i::LanguageMode::kStrict},
// Check that 6-byte and 4-byte encodings of UTF-8 strings do not throw
// the preparser off in terms of byte offsets.
// 2 surrogates, encode a character that doesn't need a surrogate.
{" 'foo\355\240\201\355\260\211';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// 4 byte encoding.
{" 'foo\360\220\220\212';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// 3 byte encoding of \u0fff.
{" 'foo\340\277\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// 3 byte surrogate, followed by broken 2-byte surrogate w/ impossible 2nd
// byte and last byte missing.
{" 'foo\355\240\201\355\211';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Broken 3 byte encoding of \u0fff with missing last byte.
{" 'foo\340\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Broken 3 byte encoding of \u0fff with missing 2 last bytes.
{" 'foo\340';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Broken 3 byte encoding of \u00ff should be a 2 byte encoding.
{" 'foo\340\203\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Broken 3 byte encoding of \u007f should be a 2 byte encoding.
{" 'foo\340\201\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Unpaired lead surrogate.
{" 'foo\355\240\201';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Unpaired lead surrogate where following code point is a 3 byte
// sequence.
{" 'foo\355\240\201\340\277\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Unpaired lead surrogate where following code point is a 4 byte encoding
// of a trail surrogate.
{" 'foo\355\240\201\360\215\260\211';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Unpaired trail surrogate.
{" 'foo\355\260\211';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// 2 byte encoding of \u00ff.
{" 'foo\303\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Broken 2 byte encoding of \u00ff with missing last byte.
{" 'foo\303';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Broken 2 byte encoding of \u007f should be a 1 byte encoding.
{" 'foo\301\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Illegal 5 byte encoding.
{" 'foo\370\277\277\277\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Illegal 6 byte encoding.
{" 'foo\374\277\277\277\277\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Illegal 0xfe byte
{" 'foo\376\277\277\277\277\277\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
// Illegal 0xff byte
{" 'foo\377\277\277\277\277\277\277\277';\n"
" (function fun",
- "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
+ i::LanguageMode::kSloppy},
{" 'foo';\n"
" (function fun",
"(a,b) { 'bar\355\240\201\355\260\213'; }", ")();", i::FUNCTION_SCOPE,
- i::SLOPPY},
+ i::LanguageMode::kSloppy},
{" 'foo';\n"
" (function fun",
"(a,b) { 'bar\360\220\220\214'; }", ")();", i::FUNCTION_SCOPE,
- i::SLOPPY},
- {NULL, NULL, NULL, i::EVAL_SCOPE, i::SLOPPY}};
+ i::LanguageMode::kSloppy},
+ {nullptr, nullptr, nullptr, i::EVAL_SCOPE, i::LanguageMode::kSloppy}};
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -1226,7 +1249,7 @@ TEST(DiscardFunctionBody) {
"(function f() { 0, function g() { var a; } })();",
"(function f() { 0, { g() { var a; } } })();",
"(function f() { 0, class c { g() { var a; } } })();", */
- NULL};
+ nullptr};
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -1291,12 +1314,9 @@ enum ParserFlag {
kAllowLazy,
kAllowNatives,
kAllowHarmonyFunctionSent,
- kAllowHarmonyRestrictiveGenerators,
- kAllowHarmonyClassFields,
- kAllowHarmonyObjectRestSpread,
+ kAllowHarmonyPublicFields,
kAllowHarmonyDynamicImport,
kAllowHarmonyAsyncIteration,
- kAllowHarmonyTemplateEscapes,
kAllowHarmonyImportMeta,
};
@@ -1309,36 +1329,24 @@ enum ParserSyncTestResult {
void SetGlobalFlags(i::EnumSet<ParserFlag> flags) {
i::FLAG_allow_natives_syntax = flags.Contains(kAllowNatives);
i::FLAG_harmony_function_sent = flags.Contains(kAllowHarmonyFunctionSent);
- i::FLAG_harmony_restrictive_generators =
- flags.Contains(kAllowHarmonyRestrictiveGenerators);
- i::FLAG_harmony_class_fields = flags.Contains(kAllowHarmonyClassFields);
- i::FLAG_harmony_object_rest_spread =
- flags.Contains(kAllowHarmonyObjectRestSpread);
+ i::FLAG_harmony_public_fields = flags.Contains(kAllowHarmonyPublicFields);
i::FLAG_harmony_dynamic_import = flags.Contains(kAllowHarmonyDynamicImport);
i::FLAG_harmony_import_meta = flags.Contains(kAllowHarmonyImportMeta);
i::FLAG_harmony_async_iteration = flags.Contains(kAllowHarmonyAsyncIteration);
- i::FLAG_harmony_template_escapes =
- flags.Contains(kAllowHarmonyTemplateEscapes);
}
void SetParserFlags(i::PreParser* parser, i::EnumSet<ParserFlag> flags) {
parser->set_allow_natives(flags.Contains(kAllowNatives));
parser->set_allow_harmony_function_sent(
flags.Contains(kAllowHarmonyFunctionSent));
- parser->set_allow_harmony_restrictive_generators(
- flags.Contains(kAllowHarmonyRestrictiveGenerators));
- parser->set_allow_harmony_class_fields(
- flags.Contains(kAllowHarmonyClassFields));
- parser->set_allow_harmony_object_rest_spread(
- flags.Contains(kAllowHarmonyObjectRestSpread));
+ parser->set_allow_harmony_public_fields(
+ flags.Contains(kAllowHarmonyPublicFields));
parser->set_allow_harmony_dynamic_import(
flags.Contains(kAllowHarmonyDynamicImport));
parser->set_allow_harmony_import_meta(
flags.Contains(kAllowHarmonyImportMeta));
parser->set_allow_harmony_async_iteration(
flags.Contains(kAllowHarmonyAsyncIteration));
- parser->set_allow_harmony_template_escapes(
- flags.Contains(kAllowHarmonyTemplateEscapes));
}
void TestParserSyncWithFlags(i::Handle<i::String> source,
@@ -1363,10 +1371,11 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
- isolate->counters()->runtime_call_stats());
+ isolate->counters()->runtime_call_stats(),
+ isolate->logger(), -1, is_module);
SetParserFlags(&preparser, flags);
scanner.Initialize(stream.get(), is_module);
- i::PreParser::PreParseResult result = preparser.PreParseProgram(is_module);
+ i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
}
@@ -1383,7 +1392,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
}
// Check that preparsing fails iff parsing fails.
- if (function == NULL) {
+ if (function == nullptr) {
// Extract exception from the parser.
CHECK(isolate->has_pending_exception());
i::Handle<i::JSObject> exception_handle(
@@ -1417,7 +1426,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
// Check that preparser and parser produce the same error.
if (test_preparser && !ignore_error_msg) {
i::Handle<i::String> preparser_message =
- pending_error_handler.FormatMessage(CcTest::i_isolate());
+ pending_error_handler.FormatErrorMessageForTest(CcTest::i_isolate());
if (!i::String::Equals(message_string, preparser_message)) {
v8::base::OS::Print(
"Expected parser and preparser to produce the same error on:\n"
@@ -1438,7 +1447,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
"\t%s\n"
"However, the parser succeeded",
source->ToCString().get(),
- pending_error_handler.FormatMessage(CcTest::i_isolate())
+ pending_error_handler.FormatErrorMessageForTest(CcTest::i_isolate())
->ToCString()
.get());
CHECK(false);
@@ -1455,9 +1464,9 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
void TestParserSync(const char* source, const ParserFlag* varying_flags,
size_t varying_flags_length,
ParserSyncTestResult result = kSuccessOrError,
- const ParserFlag* always_true_flags = NULL,
+ const ParserFlag* always_true_flags = nullptr,
size_t always_true_flags_length = 0,
- const ParserFlag* always_false_flags = NULL,
+ const ParserFlag* always_false_flags = nullptr,
size_t always_false_flags_length = 0,
bool is_module = false, bool test_preparser = true,
bool ignore_error_msg = false) {
@@ -1484,74 +1493,37 @@ void TestParserSync(const char* source, const ParserFlag* varying_flags,
TEST(ParserSync) {
- const char* context_data[][2] = {
- { "", "" },
- { "{", "}" },
- { "if (true) ", " else {}" },
- { "if (true) {} else ", "" },
- { "if (true) ", "" },
- { "do ", " while (false)" },
- { "while (false) ", "" },
- { "for (;;) ", "" },
- { "with ({})", "" },
- { "switch (12) { case 12: ", "}" },
- { "switch (12) { default: ", "}" },
- { "switch (12) { ", "case 12: }" },
- { "label2: ", "" },
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""},
+ {"{", "}"},
+ {"if (true) ", " else {}"},
+ {"if (true) {} else ", ""},
+ {"if (true) ", ""},
+ {"do ", " while (false)"},
+ {"while (false) ", ""},
+ {"for (;;) ", ""},
+ {"with ({})", ""},
+ {"switch (12) { case 12: ", "}"},
+ {"switch (12) { default: ", "}"},
+ {"switch (12) { ", "case 12: }"},
+ {"label2: ", ""},
+ {nullptr, nullptr}};
const char* statement_data[] = {
- "{}",
- "var x",
- "var x = 1",
- "const x",
- "const x = 1",
- ";",
- "12",
- "if (false) {} else ;",
- "if (false) {} else {}",
- "if (false) {} else 12",
- "if (false) ;",
- "if (false) {}",
- "if (false) 12",
- "do {} while (false)",
- "for (;;) ;",
- "for (;;) {}",
- "for (;;) 12",
- "continue",
- "continue label",
- "continue\nlabel",
- "break",
- "break label",
- "break\nlabel",
- // TODO(marja): activate once parsing 'return' is merged into ParserBase.
- // "return",
- // "return 12",
- // "return\n12",
- "with ({}) ;",
- "with ({}) {}",
- "with ({}) 12",
- "switch ({}) { default: }",
- "label3: ",
- "throw",
- "throw 12",
- "throw\n12",
- "try {} catch(e) {}",
- "try {} finally {}",
- "try {} catch(e) {} finally {}",
- "debugger",
- NULL
- };
-
- const char* termination_data[] = {
- "",
- ";",
- "\n",
- ";\n",
- "\n;",
- NULL
- };
+ "{}", "var x", "var x = 1", "const x", "const x = 1", ";", "12",
+ "if (false) {} else ;", "if (false) {} else {}", "if (false) {} else 12",
+ "if (false) ;", "if (false) {}", "if (false) 12", "do {} while (false)",
+ "for (;;) ;", "for (;;) {}", "for (;;) 12", "continue", "continue label",
+ "continue\nlabel", "break", "break label", "break\nlabel",
+ // TODO(marja): activate once parsing 'return' is merged into ParserBase.
+ // "return",
+ // "return 12",
+ // "return\n12",
+ "with ({}) ;", "with ({}) {}", "with ({}) 12", "switch ({}) { default: }",
+ "label3: ", "throw", "throw 12", "throw\n12", "try {} catch(e) {}",
+ "try {} finally {}", "try {} catch(e) {} finally {}", "debugger",
+ nullptr};
+
+ const char* termination_data[] = {"", ";", "\n", ";\n", "\n;", nullptr};
v8::HandleScope handles(CcTest::isolate());
v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
@@ -1560,9 +1532,9 @@ TEST(ParserSync) {
CcTest::i_isolate()->stack_guard()->SetStackLimit(
i::GetCurrentStackPosition() - 128 * 1024);
- for (int i = 0; context_data[i][0] != NULL; ++i) {
- for (int j = 0; statement_data[j] != NULL; ++j) {
- for (int k = 0; termination_data[k] != NULL; ++k) {
+ for (int i = 0; context_data[i][0] != nullptr; ++i) {
+ for (int j = 0; statement_data[j] != nullptr; ++j) {
+ for (int k = 0; termination_data[k] != nullptr; ++k) {
int kPrefixLen = i::StrLength(context_data[i][0]);
int kStatementLen = i::StrLength(statement_data[j]);
int kTerminationLen = i::StrLength(termination_data[k]);
@@ -1578,8 +1550,8 @@ TEST(ParserSync) {
statement_data[j],
termination_data[k],
context_data[i][1]);
- CHECK(length == kProgramSize);
- TestParserSync(program.start(), NULL, 0);
+ CHECK_EQ(length, kProgramSize);
+ TestParserSync(program.start(), nullptr, 0);
}
}
}
@@ -1587,8 +1559,8 @@ TEST(ParserSync) {
// Neither Harmony numeric literals nor our natives syntax have any
// interaction with the flags above, so test these separately to reduce
// the combinatorial explosion.
- TestParserSync("0o1234", NULL, 0);
- TestParserSync("0b1011", NULL, 0);
+ TestParserSync("0o1234", nullptr, 0);
+ TestParserSync("0b1011", nullptr, 0);
static const ParserFlag flags3[] = { kAllowNatives };
TestParserSync("%DebugPrint(123)", flags3, arraysize(flags3));
@@ -1621,9 +1593,9 @@ TEST(StrictOctal) {
void RunParserSyncTest(
const char* context_data[][2], const char* statement_data[],
- ParserSyncTestResult result, const ParserFlag* flags = NULL,
- int flags_len = 0, const ParserFlag* always_true_flags = NULL,
- int always_true_len = 0, const ParserFlag* always_false_flags = NULL,
+ ParserSyncTestResult result, const ParserFlag* flags = nullptr,
+ int flags_len = 0, const ParserFlag* always_true_flags = nullptr,
+ int always_true_len = 0, const ParserFlag* always_false_flags = nullptr,
int always_false_len = 0, bool is_module = false,
bool test_preparser = true, bool ignore_error_msg = false) {
v8::HandleScope handles(CcTest::isolate());
@@ -1639,14 +1611,14 @@ void RunParserSyncTest(
kAllowLazy,
kAllowNatives,
};
- ParserFlag* generated_flags = NULL;
- if (flags == NULL) {
+ ParserFlag* generated_flags = nullptr;
+ if (flags == nullptr) {
flags = default_flags;
flags_len = arraysize(default_flags);
- if (always_true_flags != NULL || always_false_flags != NULL) {
+ if (always_true_flags != nullptr || always_false_flags != nullptr) {
// Remove always_true/false_flags from default_flags (if present).
- CHECK((always_true_flags != NULL) == (always_true_len > 0));
- CHECK((always_false_flags != NULL) == (always_false_len > 0));
+ CHECK((always_true_flags != nullptr) == (always_true_len > 0));
+ CHECK((always_false_flags != nullptr) == (always_false_len > 0));
generated_flags = new ParserFlag[flags_len + always_true_len];
int flag_index = 0;
for (int i = 0; i < flags_len; ++i) {
@@ -1663,8 +1635,8 @@ void RunParserSyncTest(
flags = generated_flags;
}
}
- for (int i = 0; context_data[i][0] != NULL; ++i) {
- for (int j = 0; statement_data[j] != NULL; ++j) {
+ for (int i = 0; context_data[i][0] != nullptr; ++i) {
+ for (int j = 0; statement_data[j] != nullptr; ++j) {
int kPrefixLen = i::StrLength(context_data[i][0]);
int kStatementLen = i::StrLength(statement_data[j]);
int kSuffixLen = i::StrLength(context_data[i][1]);
@@ -1677,7 +1649,7 @@ void RunParserSyncTest(
context_data[i][0],
statement_data[j],
context_data[i][1]);
- CHECK(length == kProgramSize);
+ CHECK_EQ(length, kProgramSize);
TestParserSync(program.start(), flags, flags_len, result,
always_true_flags, always_true_len, always_false_flags,
always_false_len, is_module, test_preparser,
@@ -1689,9 +1661,9 @@ void RunParserSyncTest(
void RunModuleParserSyncTest(
const char* context_data[][2], const char* statement_data[],
- ParserSyncTestResult result, const ParserFlag* flags = NULL,
- int flags_len = 0, const ParserFlag* always_true_flags = NULL,
- int always_true_len = 0, const ParserFlag* always_false_flags = NULL,
+ ParserSyncTestResult result, const ParserFlag* flags = nullptr,
+ int flags_len = 0, const ParserFlag* always_true_flags = nullptr,
+ int always_true_len = 0, const ParserFlag* always_false_flags = nullptr,
int always_false_len = 0, bool test_preparser = true,
bool ignore_error_msg = false) {
RunParserSyncTest(context_data, statement_data, result, flags, flags_len,
@@ -1708,35 +1680,33 @@ TEST(ErrorsEvalAndArguments) {
const char* context_data[][2] = {
{"\"use strict\";", ""},
{"var eval; function test_func() {\"use strict\"; ", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "var eval;",
- "var arguments",
- "var foo, eval;",
- "var foo, arguments;",
- "try { } catch (eval) { }",
- "try { } catch (arguments) { }",
- "function eval() { }",
- "function arguments() { }",
- "function foo(eval) { }",
- "function foo(arguments) { }",
- "function foo(bar, eval) { }",
- "function foo(bar, arguments) { }",
- "(eval) => { }",
- "(arguments) => { }",
- "(foo, eval) => { }",
- "(foo, arguments) => { }",
- "eval = 1;",
- "arguments = 1;",
- "var foo = eval = 1;",
- "var foo = arguments = 1;",
- "++eval;",
- "++arguments;",
- "eval++;",
- "arguments++;",
- NULL
- };
+ const char* statement_data[] = {"var eval;",
+ "var arguments",
+ "var foo, eval;",
+ "var foo, arguments;",
+ "try { } catch (eval) { }",
+ "try { } catch (arguments) { }",
+ "function eval() { }",
+ "function arguments() { }",
+ "function foo(eval) { }",
+ "function foo(arguments) { }",
+ "function foo(bar, eval) { }",
+ "function foo(bar, arguments) { }",
+ "(eval) => { }",
+ "(arguments) => { }",
+ "(foo, eval) => { }",
+ "(foo, arguments) => { }",
+ "eval = 1;",
+ "arguments = 1;",
+ "var foo = eval = 1;",
+ "var foo = arguments = 1;",
+ "++eval;",
+ "++arguments;",
+ "eval++;",
+ "arguments++;",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -1746,34 +1716,29 @@ TEST(NoErrorsEvalAndArgumentsSloppy) {
// Tests that both preparsing and parsing accept "eval" and "arguments" as
// identifiers when needed.
const char* context_data[][2] = {
- { "", "" },
- { "function test_func() {", "}"},
- { NULL, NULL }
- };
-
- const char* statement_data[] = {
- "var eval;",
- "var arguments",
- "var foo, eval;",
- "var foo, arguments;",
- "try { } catch (eval) { }",
- "try { } catch (arguments) { }",
- "function eval() { }",
- "function arguments() { }",
- "function foo(eval) { }",
- "function foo(arguments) { }",
- "function foo(bar, eval) { }",
- "function foo(bar, arguments) { }",
- "eval = 1;",
- "arguments = 1;",
- "var foo = eval = 1;",
- "var foo = arguments = 1;",
- "++eval;",
- "++arguments;",
- "eval++;",
- "arguments++;",
- NULL
- };
+ {"", ""}, {"function test_func() {", "}"}, {nullptr, nullptr}};
+
+ const char* statement_data[] = {"var eval;",
+ "var arguments",
+ "var foo, eval;",
+ "var foo, arguments;",
+ "try { } catch (eval) { }",
+ "try { } catch (arguments) { }",
+ "function eval() { }",
+ "function arguments() { }",
+ "function foo(eval) { }",
+ "function foo(arguments) { }",
+ "function foo(bar, eval) { }",
+ "function foo(bar, arguments) { }",
+ "eval = 1;",
+ "arguments = 1;",
+ "var foo = eval = 1;",
+ "var foo = arguments = 1;",
+ "++eval;",
+ "++arguments;",
+ "eval++;",
+ "arguments++;",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -1781,23 +1746,20 @@ TEST(NoErrorsEvalAndArgumentsSloppy) {
TEST(NoErrorsEvalAndArgumentsStrict) {
const char* context_data[][2] = {
- { "\"use strict\";", "" },
- { "function test_func() { \"use strict\";", "}" },
- { "() => { \"use strict\"; ", "}" },
- { NULL, NULL }
- };
+ {"\"use strict\";", ""},
+ {"function test_func() { \"use strict\";", "}"},
+ {"() => { \"use strict\"; ", "}"},
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "eval;",
- "arguments;",
- "var foo = eval;",
- "var foo = arguments;",
- "var foo = { eval: 1 };",
- "var foo = { arguments: 1 };",
- "var foo = { }; foo.eval = {};",
- "var foo = { }; foo.arguments = {};",
- NULL
- };
+ const char* statement_data[] = {"eval;",
+ "arguments;",
+ "var foo = eval;",
+ "var foo = arguments;",
+ "var foo = { eval: 1 };",
+ "var foo = { arguments: 1 };",
+ "var foo = { }; foo.eval = {};",
+ "var foo = { }; foo.arguments = {};",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -1859,13 +1821,13 @@ TEST(ErrorsFutureStrictReservedWords) {
const char* strict_contexts[][2] = {
{"function test_func() {\"use strict\"; ", "}"},
{"() => { \"use strict\"; ", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* statement_data[] {
LIMITED_FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_STATEMENTS)
LIMITED_FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_LEX_BINDINGS)
- NULL
+ nullptr
};
// clang-format on
@@ -1879,9 +1841,9 @@ TEST(ErrorsFutureStrictReservedWords) {
const char* non_strict_contexts[][2] = {{"", ""},
{"function test_func() {", "}"},
{"() => {", "}"},
- {NULL, NULL}};
- const char* invalid_statements[] = {FUTURE_STRICT_RESERVED_LEX_BINDINGS("let")
- NULL};
+ {nullptr, nullptr}};
+ const char* invalid_statements[] = {
+ FUTURE_STRICT_RESERVED_LEX_BINDINGS("let") nullptr};
RunParserSyncTest(non_strict_contexts, invalid_statements, kError);
}
@@ -1890,18 +1852,16 @@ TEST(ErrorsFutureStrictReservedWords) {
TEST(NoErrorsFutureStrictReservedWords) {
- const char* context_data[][2] = {
- { "", "" },
- { "function test_func() {", "}"},
- { "() => {", "}" },
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""},
+ {"function test_func() {", "}"},
+ {"() => {", "}"},
+ {nullptr, nullptr}};
// clang-format off
const char* statement_data[] = {
FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_STATEMENTS)
FUTURE_STRICT_RESERVED_WORDS_NO_LET(FUTURE_STRICT_RESERVED_LEX_BINDINGS)
- NULL
+ nullptr
};
// clang-format on
@@ -1914,31 +1874,28 @@ TEST(ErrorsReservedWords) {
// using future reserved words as identifiers. These tests don't depend on the
// strict mode.
const char* context_data[][2] = {
- { "", "" },
- { "\"use strict\";", "" },
- { "var eval; function test_func() {", "}"},
- { "var eval; function test_func() {\"use strict\"; ", "}"},
- { "var eval; () => {", "}"},
- { "var eval; () => {\"use strict\"; ", "}"},
- { NULL, NULL }
- };
+ {"", ""},
+ {"\"use strict\";", ""},
+ {"var eval; function test_func() {", "}"},
+ {"var eval; function test_func() {\"use strict\"; ", "}"},
+ {"var eval; () => {", "}"},
+ {"var eval; () => {\"use strict\"; ", "}"},
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "var super;",
- "var foo, super;",
- "try { } catch (super) { }",
- "function super() { }",
- "function foo(super) { }",
- "function foo(bar, super) { }",
- "(super) => { }",
- "(bar, super) => { }",
- "super = 1;",
- "var foo = super = 1;",
- "++super;",
- "super++;",
- "function foo super",
- NULL
- };
+ const char* statement_data[] = {"var super;",
+ "var foo, super;",
+ "try { } catch (super) { }",
+ "function super() { }",
+ "function foo(super) { }",
+ "function foo(bar, super) { }",
+ "(super) => { }",
+ "(bar, super) => { }",
+ "super = 1;",
+ "var foo = super = 1;",
+ "++super;",
+ "super++;",
+ "function foo super",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -1946,12 +1903,10 @@ TEST(ErrorsReservedWords) {
TEST(NoErrorsLetSloppyAllModes) {
// In sloppy mode, it's okay to use "let" as identifier.
- const char* context_data[][2] = {
- { "", "" },
- { "function f() {", "}" },
- { "(function f() {", "})" },
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""},
+ {"function f() {", "}"},
+ {"(function f() {", "})"},
+ {nullptr, nullptr}};
const char* statement_data[] = {
"var let;",
@@ -1973,7 +1928,7 @@ TEST(NoErrorsLetSloppyAllModes) {
"let(100)",
"L: let\nx",
"L: let\n{x}",
- NULL};
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -1982,34 +1937,31 @@ TEST(NoErrorsLetSloppyAllModes) {
TEST(NoErrorsYieldSloppyAllModes) {
// In sloppy mode, it's okay to use "yield" as identifier, *except* inside a
// generator (see other test).
- const char* context_data[][2] = {
- { "", "" },
- { "function not_gen() {", "}" },
- { "(function not_gen() {", "})" },
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""},
+ {"function not_gen() {", "}"},
+ {"(function not_gen() {", "})"},
+ {nullptr, nullptr}};
const char* statement_data[] = {
- "var yield;",
- "var foo, yield;",
- "try { } catch (yield) { }",
- "function yield() { }",
- "(function yield() { })",
- "function foo(yield) { }",
- "function foo(bar, yield) { }",
- "yield = 1;",
- "var foo = yield = 1;",
- "yield * 2;",
- "++yield;",
- "yield++;",
- "yield: 34",
- "function yield(yield) { yield: yield (yield + yield(0)); }",
- "({ yield: 1 })",
- "({ get yield() { 1 } })",
- "yield(100)",
- "yield[100]",
- NULL
- };
+ "var yield;",
+ "var foo, yield;",
+ "try { } catch (yield) { }",
+ "function yield() { }",
+ "(function yield() { })",
+ "function foo(yield) { }",
+ "function foo(bar, yield) { }",
+ "yield = 1;",
+ "var foo = yield = 1;",
+ "yield * 2;",
+ "++yield;",
+ "yield++;",
+ "yield: 34",
+ "function yield(yield) { yield: yield (yield + yield(0)); }",
+ "({ yield: 1 })",
+ "({ get yield() { 1 } })",
+ "yield(100)",
+ "yield[100]",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2019,36 +1971,34 @@ TEST(NoErrorsYieldSloppyGeneratorsEnabled) {
// In sloppy mode, it's okay to use "yield" as identifier, *except* inside a
// generator (see next test).
const char* context_data[][2] = {
- { "", "" },
- { "function not_gen() {", "}" },
- { "function * gen() { function not_gen() {", "} }" },
- { "(function not_gen() {", "})" },
- { "(function * gen() { (function not_gen() {", "}) })" },
- { NULL, NULL }
- };
+ {"", ""},
+ {"function not_gen() {", "}"},
+ {"function * gen() { function not_gen() {", "} }"},
+ {"(function not_gen() {", "})"},
+ {"(function * gen() { (function not_gen() {", "}) })"},
+ {nullptr, nullptr}};
const char* statement_data[] = {
- "var yield;",
- "var foo, yield;",
- "try { } catch (yield) { }",
- "function yield() { }",
- "(function yield() { })",
- "function foo(yield) { }",
- "function foo(bar, yield) { }",
- "function * yield() { }",
- "yield = 1;",
- "var foo = yield = 1;",
- "yield * 2;",
- "++yield;",
- "yield++;",
- "yield: 34",
- "function yield(yield) { yield: yield (yield + yield(0)); }",
- "({ yield: 1 })",
- "({ get yield() { 1 } })",
- "yield(100)",
- "yield[100]",
- NULL
- };
+ "var yield;",
+ "var foo, yield;",
+ "try { } catch (yield) { }",
+ "function yield() { }",
+ "(function yield() { })",
+ "function foo(yield) { }",
+ "function foo(bar, yield) { }",
+ "function * yield() { }",
+ "yield = 1;",
+ "var foo = yield = 1;",
+ "yield * 2;",
+ "++yield;",
+ "yield++;",
+ "yield: 34",
+ "function yield(yield) { yield: yield (yield + yield(0)); }",
+ "({ yield: 1 })",
+ "({ get yield() { 1 } })",
+ "yield(100)",
+ "yield[100]",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2063,42 +2013,35 @@ TEST(ErrorsYieldStrict) {
{"\"use strict\"; (function not_gen() {", "})"},
{"\"use strict\"; (function * gen() { (function not_gen() {", "}) })"},
{"() => {\"use strict\"; ", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "var yield;",
- "var foo, yield;",
- "try { } catch (yield) { }",
- "function yield() { }",
- "(function yield() { })",
- "function foo(yield) { }",
- "function foo(bar, yield) { }",
- "function * yield() { }",
- "(function * yield() { })",
- "yield = 1;",
- "var foo = yield = 1;",
- "++yield;",
- "yield++;",
- "yield: 34;",
- NULL
- };
+ const char* statement_data[] = {"var yield;",
+ "var foo, yield;",
+ "try { } catch (yield) { }",
+ "function yield() { }",
+ "(function yield() { })",
+ "function foo(yield) { }",
+ "function foo(bar, yield) { }",
+ "function * yield() { }",
+ "(function * yield() { })",
+ "yield = 1;",
+ "var foo = yield = 1;",
+ "++yield;",
+ "yield++;",
+ "yield: 34;",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
TEST(ErrorsYieldSloppy) {
- const char* context_data[][2] = {
- { "", "" },
- { "function not_gen() {", "}" },
- { "(function not_gen() {", "})" },
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""},
+ {"function not_gen() {", "}"},
+ {"(function not_gen() {", "})"},
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "(function * yield() { })",
- NULL
- };
+ const char* statement_data[] = {"(function * yield() { })", nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2110,7 +2053,7 @@ TEST(NoErrorsGenerator) {
{ "function * gen() {", "}" },
{ "(function * gen() {", "})" },
{ "(function * () {", "})" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* statement_data[] = {
@@ -2164,7 +2107,7 @@ TEST(NoErrorsGenerator) {
"x = class extends f(yield) {}",
"x = class extends (null, yield) { }",
"x = class extends (a ? null : yield) { }",
- NULL
+ nullptr
};
// clang-format on
@@ -2177,7 +2120,7 @@ TEST(ErrorsYieldGenerator) {
const char* context_data[][2] = {
{ "function * gen() {", "}" },
{ "\"use strict\"; function * gen() {", "}" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* statement_data[] = {
@@ -2229,7 +2172,7 @@ TEST(ErrorsYieldGenerator) {
"for (yield 'x' in {} in {});",
"for (yield 'x' in {} of {});",
"class C extends yield { }",
- NULL
+ nullptr
};
// clang-format on
@@ -2240,60 +2183,37 @@ TEST(ErrorsYieldGenerator) {
TEST(ErrorsNameOfStrictFunction) {
// Tests that illegal tokens as names of a strict function produce the correct
// errors.
- const char* context_data[][2] = {
- { "function ", ""},
- { "\"use strict\"; function", ""},
- { "function * ", ""},
- { "\"use strict\"; function * ", ""},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"function ", ""},
+ {"\"use strict\"; function", ""},
+ {"function * ", ""},
+ {"\"use strict\"; function * ", ""},
+ {nullptr, nullptr}};
const char* statement_data[] = {
- "eval() {\"use strict\";}",
- "arguments() {\"use strict\";}",
- "interface() {\"use strict\";}",
- "yield() {\"use strict\";}",
- // Future reserved words are always illegal
- "super() { }",
- "super() {\"use strict\";}",
- NULL
- };
+ "eval() {\"use strict\";}", "arguments() {\"use strict\";}",
+ "interface() {\"use strict\";}", "yield() {\"use strict\";}",
+ // Future reserved words are always illegal
+ "super() { }", "super() {\"use strict\";}", nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
TEST(NoErrorsNameOfStrictFunction) {
- const char* context_data[][2] = {
- { "function ", ""},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"function ", ""}, {nullptr, nullptr}};
- const char* statement_data[] = {
- "eval() { }",
- "arguments() { }",
- "interface() { }",
- "yield() { }",
- NULL
- };
+ const char* statement_data[] = {"eval() { }", "arguments() { }",
+ "interface() { }", "yield() { }", nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
TEST(NoErrorsNameOfStrictGenerator) {
- const char* context_data[][2] = {
- { "function * ", ""},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"function * ", ""}, {nullptr, nullptr}};
- const char* statement_data[] = {
- "eval() { }",
- "arguments() { }",
- "interface() { }",
- "yield() { }",
- NULL
- };
+ const char* statement_data[] = {"eval() { }", "arguments() { }",
+ "interface() { }", "yield() { }", nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2301,17 +2221,13 @@ TEST(NoErrorsNameOfStrictGenerator) {
TEST(ErrorsIllegalWordsAsLabelsSloppy) {
// Using future reserved words as labels is always an error.
- const char* context_data[][2] = {
- { "", ""},
- { "function test_func() {", "}" },
- { "() => {", "}" },
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""},
+ {"function test_func() {", "}"},
+ {"() => {", "}"},
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "super: while(true) { break super; }",
- NULL
- };
+ const char* statement_data[] = {"super: while(true) { break super; }",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2323,14 +2239,12 @@ TEST(ErrorsIllegalWordsAsLabelsStrict) {
{"\"use strict\";", ""},
{"function test_func() {\"use strict\"; ", "}"},
{"() => {\"use strict\"; ", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
#define LABELLED_WHILE(NAME) #NAME ": while (true) { break " #NAME "; }",
const char* statement_data[] = {
- "super: while(true) { break super; }",
- FUTURE_STRICT_RESERVED_WORDS(LABELLED_WHILE)
- NULL
- };
+ "super: while(true) { break super; }",
+ FUTURE_STRICT_RESERVED_WORDS(LABELLED_WHILE) nullptr};
#undef LABELLED_WHILE
RunParserSyncTest(context_data, statement_data, kError);
@@ -2340,39 +2254,32 @@ TEST(ErrorsIllegalWordsAsLabelsStrict) {
TEST(NoErrorsIllegalWordsAsLabels) {
// Using eval and arguments as labels is legal even in strict mode.
const char* context_data[][2] = {
- { "", ""},
- { "function test_func() {", "}" },
- { "() => {", "}" },
- { "\"use strict\";", "" },
- { "\"use strict\"; function test_func() {", "}" },
- { "\"use strict\"; () => {", "}" },
- { NULL, NULL }
- };
+ {"", ""},
+ {"function test_func() {", "}"},
+ {"() => {", "}"},
+ {"\"use strict\";", ""},
+ {"\"use strict\"; function test_func() {", "}"},
+ {"\"use strict\"; () => {", "}"},
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "mylabel: while(true) { break mylabel; }",
- "eval: while(true) { break eval; }",
- "arguments: while(true) { break arguments; }",
- NULL
- };
+ const char* statement_data[] = {"mylabel: while(true) { break mylabel; }",
+ "eval: while(true) { break eval; }",
+ "arguments: while(true) { break arguments; }",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
TEST(NoErrorsFutureStrictReservedAsLabelsSloppy) {
- const char* context_data[][2] = {
- { "", ""},
- { "function test_func() {", "}" },
- { "() => {", "}" },
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""},
+ {"function test_func() {", "}"},
+ {"() => {", "}"},
+ {nullptr, nullptr}};
#define LABELLED_WHILE(NAME) #NAME ": while (true) { break " #NAME "; }",
- const char* statement_data[] {
- FUTURE_STRICT_RESERVED_WORDS(LABELLED_WHILE)
- NULL
- };
+ const char* statement_data[]{
+ FUTURE_STRICT_RESERVED_WORDS(LABELLED_WHILE) nullptr};
#undef LABELLED_WHILE
RunParserSyncTest(context_data, statement_data, kSuccess);
@@ -2381,17 +2288,13 @@ TEST(NoErrorsFutureStrictReservedAsLabelsSloppy) {
TEST(ErrorsParenthesizedLabels) {
// Parenthesized identifiers shouldn't be recognized as labels.
- const char* context_data[][2] = {
- { "", ""},
- { "function test_func() {", "}" },
- { "() => {", "}" },
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""},
+ {"function test_func() {", "}"},
+ {"() => {", "}"},
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "(mylabel): while(true) { break mylabel; }",
- NULL
- };
+ const char* statement_data[] = {"(mylabel): while(true) { break mylabel; }",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2399,15 +2302,9 @@ TEST(ErrorsParenthesizedLabels) {
TEST(NoErrorsParenthesizedDirectivePrologue) {
// Parenthesized directive prologue shouldn't be recognized.
- const char* context_data[][2] = {
- { "", ""},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
- const char* statement_data[] = {
- "(\"use strict\"); var eval;",
- NULL
- };
+ const char* statement_data[] = {"(\"use strict\"); var eval;", nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2415,20 +2312,15 @@ TEST(NoErrorsParenthesizedDirectivePrologue) {
TEST(ErrorsNotAnIdentifierName) {
const char* context_data[][2] = {
- { "", ""},
- { "\"use strict\";", ""},
- { NULL, NULL }
- };
-
- const char* statement_data[] = {
- "var foo = {}; foo.{;",
- "var foo = {}; foo.};",
- "var foo = {}; foo.=;",
- "var foo = {}; foo.888;",
- "var foo = {}; foo.-;",
- "var foo = {}; foo.--;",
- NULL
- };
+ {"", ""}, {"\"use strict\";", ""}, {nullptr, nullptr}};
+
+ const char* statement_data[] = {"var foo = {}; foo.{;",
+ "var foo = {}; foo.};",
+ "var foo = {}; foo.=;",
+ "var foo = {}; foo.888;",
+ "var foo = {}; foo.-;",
+ "var foo = {}; foo.--;",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2437,20 +2329,15 @@ TEST(ErrorsNotAnIdentifierName) {
TEST(NoErrorsIdentifierNames) {
// Keywords etc. are valid as property names.
const char* context_data[][2] = {
- { "", ""},
- { "\"use strict\";", ""},
- { NULL, NULL }
- };
-
- const char* statement_data[] = {
- "var foo = {}; foo.if;",
- "var foo = {}; foo.yield;",
- "var foo = {}; foo.super;",
- "var foo = {}; foo.interface;",
- "var foo = {}; foo.eval;",
- "var foo = {}; foo.arguments;",
- NULL
- };
+ {"", ""}, {"\"use strict\";", ""}, {nullptr, nullptr}};
+
+ const char* statement_data[] = {"var foo = {}; foo.if;",
+ "var foo = {}; foo.yield;",
+ "var foo = {}; foo.super;",
+ "var foo = {}; foo.interface;",
+ "var foo = {}; foo.eval;",
+ "var foo = {}; foo.arguments;",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2471,19 +2358,20 @@ TEST(DontRegressPreParserDataSizes) {
const char* program;
int functions;
} test_cases[] = {
- // No functions.
- {"var x = 42;", 0},
- // Functions.
- {"function foo() {}", 1},
- {"function foo() {} function bar() {}", 2},
- // Getter / setter functions are recorded as functions if they're on the top
- // level.
- {"var x = {get foo(){} };", 1},
- // Functions insize lazy functions are not recorded.
- {"function lazy() { function a() {} function b() {} function c() {} }", 1},
- {"function lazy() { var x = {get foo(){} } }", 1},
- {NULL, 0}
- };
+ // No functions.
+ {"var x = 42;", 0},
+ // Functions.
+ {"function foo() {}", 1},
+ {"function foo() {} function bar() {}", 2},
+ // Getter / setter functions are recorded as functions if they're on the
+ // top
+ // level.
+ {"var x = {get foo(){} };", 1},
+ // Functions insize lazy functions are not recorded.
+ {"function lazy() { function a() {} function b() {} function c() {} }",
+ 1},
+ {"function lazy() { var x = {get foo(){} } }", 1},
+ {nullptr, 0}};
for (int i = 0; test_cases[i].program; i++) {
const char* program = test_cases[i].program;
@@ -2492,7 +2380,7 @@ TEST(DontRegressPreParserDataSizes) {
factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
i::ParseInfo info(script);
- i::ScriptData* sd = NULL;
+ i::ScriptData* sd = nullptr;
info.set_cached_data(&sd);
info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
i::parsing::ParseProgram(&info, CcTest::i_isolate());
@@ -2517,32 +2405,24 @@ TEST(FunctionDeclaresItselfStrict) {
// itself strict (we cannot produce there errors as soon as we see the
// offending identifiers, because we don't know at that point whether the
// function is strict or not).
- const char* context_data[][2] = {
- {"function eval() {", "}"},
- {"function arguments() {", "}"},
- {"function yield() {", "}"},
- {"function interface() {", "}"},
- {"function foo(eval) {", "}"},
- {"function foo(arguments) {", "}"},
- {"function foo(yield) {", "}"},
- {"function foo(interface) {", "}"},
- {"function foo(bar, eval) {", "}"},
- {"function foo(bar, arguments) {", "}"},
- {"function foo(bar, yield) {", "}"},
- {"function foo(bar, interface) {", "}"},
- {"function foo(bar, bar) {", "}"},
- { NULL, NULL }
- };
-
- const char* strict_statement_data[] = {
- "\"use strict\";",
- NULL
- };
-
- const char* non_strict_statement_data[] = {
- ";",
- NULL
- };
+ const char* context_data[][2] = {{"function eval() {", "}"},
+ {"function arguments() {", "}"},
+ {"function yield() {", "}"},
+ {"function interface() {", "}"},
+ {"function foo(eval) {", "}"},
+ {"function foo(arguments) {", "}"},
+ {"function foo(yield) {", "}"},
+ {"function foo(interface) {", "}"},
+ {"function foo(bar, eval) {", "}"},
+ {"function foo(bar, arguments) {", "}"},
+ {"function foo(bar, yield) {", "}"},
+ {"function foo(bar, interface) {", "}"},
+ {"function foo(bar, bar) {", "}"},
+ {nullptr, nullptr}};
+
+ const char* strict_statement_data[] = {"\"use strict\";", nullptr};
+
+ const char* non_strict_statement_data[] = {";", nullptr};
RunParserSyncTest(context_data, strict_statement_data, kError);
RunParserSyncTest(context_data, non_strict_statement_data, kSuccess);
@@ -2550,67 +2430,40 @@ TEST(FunctionDeclaresItselfStrict) {
TEST(ErrorsTryWithoutCatchOrFinally) {
- const char* context_data[][2] = {
- {"", ""},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
const char* statement_data[] = {
- "try { }",
- "try { } foo();",
- "try { } catch (e) foo();",
- "try { } catch { }",
- "try { } finally foo();",
- NULL
- };
+ "try { }", "try { } foo();", "try { } catch (e) foo();",
+ "try { } catch { }", "try { } finally foo();", nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
TEST(NoErrorsTryCatchFinally) {
- const char* context_data[][2] = {
- {"", ""},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
- const char* statement_data[] = {
- "try { } catch (e) { }",
- "try { } catch (e) { } finally { }",
- "try { } finally { }",
- NULL
- };
+ const char* statement_data[] = {"try { } catch (e) { }",
+ "try { } catch (e) { } finally { }",
+ "try { } finally { }", nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
TEST(ErrorsRegexpLiteral) {
- const char* context_data[][2] = {
- {"var r = ", ""},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"var r = ", ""}, {nullptr, nullptr}};
- const char* statement_data[] = {
- "/unterminated",
- NULL
- };
+ const char* statement_data[] = {"/unterminated", nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
TEST(NoErrorsRegexpLiteral) {
- const char* context_data[][2] = {
- {"var r = ", ""},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"var r = ", ""}, {nullptr, nullptr}};
- const char* statement_data[] = {
- "/foo/",
- "/foo/g",
- NULL
- };
+ const char* statement_data[] = {"/foo/", "/foo/g", nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2618,38 +2471,24 @@ TEST(NoErrorsRegexpLiteral) {
TEST(NoErrorsNewExpression) {
const char* context_data[][2] = {
- {"", ""},
- {"var f =", ""},
- { NULL, NULL }
- };
+ {"", ""}, {"var f =", ""}, {nullptr, nullptr}};
const char* statement_data[] = {
- "new foo",
- "new foo();",
- "new foo(1);",
- "new foo(1, 2);",
- // The first () will be processed as a part of the NewExpression and the
- // second () will be processed as part of LeftHandSideExpression.
- "new foo()();",
- // The first () will be processed as a part of the inner NewExpression and
- // the second () will be processed as a part of the outer NewExpression.
- "new new foo()();",
- "new foo.bar;",
- "new foo.bar();",
- "new foo.bar.baz;",
- "new foo.bar().baz;",
- "new foo[bar];",
- "new foo[bar]();",
- "new foo[bar][baz];",
- "new foo[bar]()[baz];",
- "new foo[bar].baz(baz)()[bar].baz;",
- "new \"foo\"", // Runtime error
- "new 1", // Runtime error
- // This even runs:
- "(new new Function(\"this.x = 1\")).x;",
- "new new Test_Two(String, 2).v(0123).length;",
- NULL
- };
+ "new foo", "new foo();", "new foo(1);", "new foo(1, 2);",
+ // The first () will be processed as a part of the NewExpression and the
+ // second () will be processed as part of LeftHandSideExpression.
+ "new foo()();",
+ // The first () will be processed as a part of the inner NewExpression and
+ // the second () will be processed as a part of the outer NewExpression.
+ "new new foo()();", "new foo.bar;", "new foo.bar();", "new foo.bar.baz;",
+ "new foo.bar().baz;", "new foo[bar];", "new foo[bar]();",
+ "new foo[bar][baz];", "new foo[bar]()[baz];",
+ "new foo[bar].baz(baz)()[bar].baz;",
+ "new \"foo\"", // Runtime error
+ "new 1", // Runtime error
+ // This even runs:
+ "(new new Function(\"this.x = 1\")).x;",
+ "new new Test_Two(String, 2).v(0123).length;", nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2657,43 +2496,29 @@ TEST(NoErrorsNewExpression) {
TEST(ErrorsNewExpression) {
const char* context_data[][2] = {
- {"", ""},
- {"var f =", ""},
- { NULL, NULL }
- };
+ {"", ""}, {"var f =", ""}, {nullptr, nullptr}};
- const char* statement_data[] = {
- "new foo bar",
- "new ) foo",
- "new ++foo",
- "new foo ++",
- NULL
- };
+ const char* statement_data[] = {"new foo bar", "new ) foo", "new ++foo",
+ "new foo ++", nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
TEST(StrictObjectLiteralChecking) {
- const char* context_data[][2] = {
- {"\"use strict\"; var myobject = {", "};"},
- {"\"use strict\"; var myobject = {", ",};"},
- {"var myobject = {", "};"},
- {"var myobject = {", ",};"},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"\"use strict\"; var myobject = {", "};"},
+ {"\"use strict\"; var myobject = {", ",};"},
+ {"var myobject = {", "};"},
+ {"var myobject = {", ",};"},
+ {nullptr, nullptr}};
// These are only errors in strict mode.
const char* statement_data[] = {
- "foo: 1, foo: 2",
- "\"foo\": 1, \"foo\": 2",
- "foo: 1, \"foo\": 2",
- "1: 1, 1: 2",
- "1: 1, \"1\": 2",
- "get: 1, get: 2", // Not a getter for real, just a property called get.
- "set: 1, set: 2", // Not a setter for real, just a property called set.
- NULL
- };
+ "foo: 1, foo: 2", "\"foo\": 1, \"foo\": 2", "foo: 1, \"foo\": 2",
+ "1: 1, 1: 2", "1: 1, \"1\": 2",
+ "get: 1, get: 2", // Not a getter for real, just a property called get.
+ "set: 1, set: 2", // Not a setter for real, just a property called set.
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2704,7 +2529,7 @@ TEST(ErrorsObjectLiteralChecking) {
const char* context_data[][2] = {
{"\"use strict\"; var myobject = {", "};"},
{"var myobject = {", "};"},
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* statement_data[] = {
@@ -2749,7 +2574,7 @@ TEST(ErrorsObjectLiteralChecking) {
"async get *x(){}",
"async set x(y){}",
"async get : 0",
- NULL
+ nullptr
};
// clang-format on
@@ -2764,7 +2589,7 @@ TEST(NoErrorsObjectLiteralChecking) {
{"var myobject = {", ",};"},
{"\"use strict\"; var myobject = {", "};"},
{"\"use strict\"; var myobject = {", ",};"},
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* statement_data[] = {
@@ -2836,7 +2661,7 @@ TEST(NoErrorsObjectLiteralChecking) {
"async : 0",
"async(){}",
"*async(){}",
- NULL
+ nullptr
};
// clang-format on
@@ -2845,10 +2670,7 @@ TEST(NoErrorsObjectLiteralChecking) {
TEST(TooManyArguments) {
- const char* context_data[][2] = {
- {"foo(", "0)"},
- { NULL, NULL }
- };
+ const char* context_data[][2] = {{"foo(", "0)"}, {nullptr, nullptr}};
using v8::internal::Code;
char statement[Code::kMaxArguments * 2 + 1];
@@ -2858,10 +2680,7 @@ TEST(TooManyArguments) {
}
statement[Code::kMaxArguments * 2] = 0;
- const char* statement_data[] = {
- statement,
- NULL
- };
+ const char* statement_data[] = {statement, nullptr};
// The test is quite slow, so run it with a reduced set of flags.
static const ParserFlag empty_flags[] = {kAllowLazy};
@@ -2871,46 +2690,31 @@ TEST(TooManyArguments) {
TEST(StrictDelete) {
// "delete <Identifier>" is not allowed in strict mode.
- const char* strict_context_data[][2] = {
- {"\"use strict\"; ", ""},
- { NULL, NULL }
- };
+ const char* strict_context_data[][2] = {{"\"use strict\"; ", ""},
+ {nullptr, nullptr}};
- const char* sloppy_context_data[][2] = {
- {"", ""},
- { NULL, NULL }
- };
+ const char* sloppy_context_data[][2] = {{"", ""}, {nullptr, nullptr}};
// These are errors in the strict mode.
- const char* sloppy_statement_data[] = {
- "delete foo;",
- "delete foo + 1;",
- "delete (foo);",
- "delete eval;",
- "delete interface;",
- NULL
- };
+ const char* sloppy_statement_data[] = {"delete foo;", "delete foo + 1;",
+ "delete (foo);", "delete eval;",
+ "delete interface;", nullptr};
// These are always OK
- const char* good_statement_data[] = {
- "delete this;",
- "delete 1;",
- "delete 1 + 2;",
- "delete foo();",
- "delete foo.bar;",
- "delete foo[bar];",
- "delete foo--;",
- "delete --foo;",
- "delete new foo();",
- "delete new foo(bar);",
- NULL
- };
+ const char* good_statement_data[] = {"delete this;",
+ "delete 1;",
+ "delete 1 + 2;",
+ "delete foo();",
+ "delete foo.bar;",
+ "delete foo[bar];",
+ "delete foo--;",
+ "delete --foo;",
+ "delete new foo();",
+ "delete new foo(bar);",
+ nullptr};
// These are always errors
- const char* bad_statement_data[] = {
- "delete if;",
- NULL
- };
+ const char* bad_statement_data[] = {"delete if;", nullptr};
RunParserSyncTest(strict_context_data, sloppy_statement_data, kError);
RunParserSyncTest(sloppy_context_data, sloppy_statement_data, kSuccess);
@@ -2952,68 +2756,53 @@ TEST(NoErrorsDeclsInCase) {
TEST(InvalidLeftHandSide) {
const char* assignment_context_data[][2] = {
- {"", " = 1;"},
- {"\"use strict\"; ", " = 1;"},
- { NULL, NULL }
- };
+ {"", " = 1;"}, {"\"use strict\"; ", " = 1;"}, {nullptr, nullptr}};
const char* prefix_context_data[][2] = {
- {"++", ";"},
- {"\"use strict\"; ++", ";"},
- {NULL, NULL},
+ {"++", ";"}, {"\"use strict\"; ++", ";"}, {nullptr, nullptr},
};
const char* postfix_context_data[][2] = {
- {"", "++;"},
- {"\"use strict\"; ", "++;"},
- { NULL, NULL }
- };
+ {"", "++;"}, {"\"use strict\"; ", "++;"}, {nullptr, nullptr}};
// Good left hand sides for assigment or prefix / postfix operations.
- const char* good_statement_data[] = {
- "foo",
- "foo.bar",
- "foo[bar]",
- "foo()[bar]",
- "foo().bar",
- "this.foo",
- "this[foo]",
- "new foo()[bar]",
- "new foo().bar",
- "foo()",
- "foo(bar)",
- "foo[bar]()",
- "foo.bar()",
- "this()",
- "this.foo()",
- "this[foo].bar()",
- "this.foo[foo].bar(this)(bar)[foo]()",
- NULL
- };
+ const char* good_statement_data[] = {"foo",
+ "foo.bar",
+ "foo[bar]",
+ "foo()[bar]",
+ "foo().bar",
+ "this.foo",
+ "this[foo]",
+ "new foo()[bar]",
+ "new foo().bar",
+ "foo()",
+ "foo(bar)",
+ "foo[bar]()",
+ "foo.bar()",
+ "this()",
+ "this.foo()",
+ "this[foo].bar()",
+ "this.foo[foo].bar(this)(bar)[foo]()",
+ nullptr};
// Bad left hand sides for assigment or prefix / postfix operations.
const char* bad_statement_data_common[] = {
- "2",
- "new foo",
- "new foo()",
- "null",
- "if", // Unexpected token
- "{x: 1}", // Unexpected token
- "this",
- "\"bar\"",
- "(foo + bar)",
- "new new foo()[bar]", // means: new (new foo()[bar])
- "new new foo().bar", // means: new (new foo()[bar])
- NULL
- };
+ "2",
+ "new foo",
+ "new foo()",
+ "null",
+ "if", // Unexpected token
+ "{x: 1}", // Unexpected token
+ "this",
+ "\"bar\"",
+ "(foo + bar)",
+ "new new foo()[bar]", // means: new (new foo()[bar])
+ "new new foo().bar", // means: new (new foo()[bar])
+ nullptr};
// These are not okay for assignment, but okay for prefix / postix.
- const char* bad_statement_data_for_assignment[] = {
- "++foo",
- "foo++",
- "foo + bar",
- NULL
- };
+ const char* bad_statement_data_for_assignment[] = {"++foo", "foo++",
+ "foo + bar", nullptr};
RunParserSyncTest(assignment_context_data, good_statement_data, kSuccess);
RunParserSyncTest(assignment_context_data, bad_statement_data_common, kError);
@@ -3189,14 +2978,14 @@ TEST(SerializationOfMaybeAssignmentFlag) {
&zone, context->scope_info(), script_scope, &avf,
i::Scope::DeserializationMode::kIncludingVariables);
CHECK(s != script_scope);
- CHECK(name != NULL);
+ CHECK_NOT_NULL(name);
// Get result from h's function context (that is f's context)
i::Variable* var = s->Lookup(name);
- CHECK(var != NULL);
+ CHECK_NOT_NULL(var);
// Maybe assigned should survive deserialization
- CHECK(var->maybe_assigned() == i::kMaybeAssigned);
+ CHECK_EQ(var->maybe_assigned(), i::kMaybeAssigned);
// TODO(sigurds) Figure out if is_used should survive context serialization.
}
@@ -3241,8 +3030,8 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
// Get result from f's function context (that is g's outer context)
i::Variable* var_x = s->Lookup(name_x);
- CHECK(var_x != NULL);
- CHECK(var_x->maybe_assigned() == i::kMaybeAssigned);
+ CHECK_NOT_NULL(var_x);
+ CHECK_EQ(var_x->maybe_assigned(), i::kMaybeAssigned);
}
@@ -3398,7 +3187,7 @@ TEST(InnerAssignment) {
CHECK(i::parsing::ParseProgram(info.get(), isolate));
}
CHECK(i::Compiler::Analyze(info.get()));
- CHECK(info->literal() != NULL);
+ CHECK_NOT_NULL(info->literal());
i::Scope* scope = info->literal()->scope();
if (!lazy) {
@@ -3411,7 +3200,7 @@ TEST(InnerAssignment) {
info->ast_value_factory()->GetOneByteString("x");
i::Variable* var = scope->Lookup(var_name);
bool expected = outers[i].assigned || inners[j].assigned;
- CHECK(var != NULL);
+ CHECK_NOT_NULL(var);
CHECK(var->is_used() || !expected);
bool is_maybe_assigned = var->maybe_assigned() == i::kMaybeAssigned;
if (i::FLAG_lazy_inner_functions) {
@@ -4143,7 +3932,7 @@ TEST(ErrorsArrowFunctions) {
{"bar[", "];"},
{"bar, ", ";"},
{"", ", bar;"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* statement_data[] = {
@@ -4232,7 +4021,7 @@ TEST(ErrorsArrowFunctions) {
"(...rest - a) => b",
"(a, ...b - 10) => b",
- NULL
+ nullptr
};
// clang-format on
@@ -4260,7 +4049,7 @@ TEST(NoErrorsArrowFunctions) {
{"bar ? baz : (", ");"},
{"bar, ", ";"},
{"", ", bar;"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* statement_data[] = {
@@ -4323,7 +4112,7 @@ TEST(NoErrorsArrowFunctions) {
"([x] = []) => {}",
"({a = 42}) => {}",
"([x = 0]) => {}",
- NULL
+ nullptr
};
// clang-format on
@@ -4341,44 +4130,35 @@ TEST(NoErrorsArrowFunctions) {
TEST(ArrowFunctionsSloppyParameterNames) {
- const char* strict_context_data[][2] = {
- {"'use strict'; ", ";"},
- {"'use strict'; bar ? (", ") : baz;"},
- {"'use strict'; bar ? baz : (", ");"},
- {"'use strict'; bar, ", ";"},
- {"'use strict'; ", ", bar;"},
- {NULL, NULL}
- };
+ const char* strict_context_data[][2] = {{"'use strict'; ", ";"},
+ {"'use strict'; bar ? (", ") : baz;"},
+ {"'use strict'; bar ? baz : (", ");"},
+ {"'use strict'; bar, ", ";"},
+ {"'use strict'; ", ", bar;"},
+ {nullptr, nullptr}};
const char* sloppy_context_data[][2] = {
- {"", ";"},
- {"bar ? (", ") : baz;"},
- {"bar ? baz : (", ");"},
- {"bar, ", ";"},
- {"", ", bar;"},
- {NULL, NULL}
- };
-
- const char* statement_data[] = {
- "eval => {}",
- "arguments => {}",
- "yield => {}",
- "interface => {}",
- "(eval) => {}",
- "(arguments) => {}",
- "(yield) => {}",
- "(interface) => {}",
- "(eval, bar) => {}",
- "(bar, eval) => {}",
- "(bar, arguments) => {}",
- "(bar, yield) => {}",
- "(bar, interface) => {}",
- "(interface, eval) => {}",
- "(interface, arguments) => {}",
- "(eval, interface) => {}",
- "(arguments, interface) => {}",
- NULL
- };
+ {"", ";"}, {"bar ? (", ") : baz;"}, {"bar ? baz : (", ");"},
+ {"bar, ", ";"}, {"", ", bar;"}, {nullptr, nullptr}};
+
+ const char* statement_data[] = {"eval => {}",
+ "arguments => {}",
+ "yield => {}",
+ "interface => {}",
+ "(eval) => {}",
+ "(arguments) => {}",
+ "(yield) => {}",
+ "(interface) => {}",
+ "(eval, bar) => {}",
+ "(bar, eval) => {}",
+ "(bar, arguments) => {}",
+ "(bar, yield) => {}",
+ "(bar, interface) => {}",
+ "(interface, eval) => {}",
+ "(interface, arguments) => {}",
+ "(eval, interface) => {}",
+ "(arguments, interface) => {}",
+ nullptr};
RunParserSyncTest(strict_context_data, statement_data, kError);
RunParserSyncTest(sloppy_context_data, statement_data, kSuccess);
@@ -4387,32 +4167,20 @@ TEST(ArrowFunctionsSloppyParameterNames) {
TEST(ArrowFunctionsYieldParameterNameInGenerator) {
const char* sloppy_function_context_data[][2] = {
- {"(function f() { (", "); });"},
- {NULL, NULL}
- };
+ {"(function f() { (", "); });"}, {nullptr, nullptr}};
const char* strict_function_context_data[][2] = {
- {"(function f() {'use strict'; (", "); });"},
- {NULL, NULL}
- };
+ {"(function f() {'use strict'; (", "); });"}, {nullptr, nullptr}};
const char* generator_context_data[][2] = {
- {"(function *g() {'use strict'; (", "); });"},
- {"(function *g() { (", "); });"},
- {NULL, NULL}
- };
+ {"(function *g() {'use strict'; (", "); });"},
+ {"(function *g() { (", "); });"},
+ {nullptr, nullptr}};
const char* arrow_data[] = {
- "yield => {}",
- "(yield) => {}",
- "(a, yield) => {}",
- "(yield, a) => {}",
- "(yield, ...a) => {}",
- "(a, ...yield) => {}",
- "({yield}) => {}",
- "([yield]) => {}",
- NULL
- };
+ "yield => {}", "(yield) => {}", "(a, yield) => {}",
+ "(yield, a) => {}", "(yield, ...a) => {}", "(a, ...yield) => {}",
+ "({yield}) => {}", "([yield]) => {}", nullptr};
RunParserSyncTest(sloppy_function_context_data, arrow_data, kSuccess);
RunParserSyncTest(strict_function_context_data, arrow_data, kError);
@@ -4422,51 +4190,40 @@ TEST(ArrowFunctionsYieldParameterNameInGenerator) {
TEST(SuperNoErrors) {
// Tests that parser and preparser accept 'super' keyword in right places.
- const char* context_data[][2] = {
- {"class C { m() { ", "; } }"},
- {"class C { m() { k = ", "; } }"},
- {"class C { m() { foo(", "); } }"},
- {"class C { m() { () => ", "; } }"},
- {NULL, NULL}
- };
+ const char* context_data[][2] = {{"class C { m() { ", "; } }"},
+ {"class C { m() { k = ", "; } }"},
+ {"class C { m() { foo(", "); } }"},
+ {"class C { m() { () => ", "; } }"},
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "super.x",
- "super[27]",
- "new super.x",
- "new super.x()",
- "new super[27]",
- "new super[27]()",
- "z.super", // Ok, property lookup.
- NULL
- };
+ const char* statement_data[] = {"super.x", "super[27]",
+ "new super.x", "new super.x()",
+ "new super[27]", "new super[27]()",
+ "z.super", // Ok, property lookup.
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
TEST(SuperErrors) {
- const char* context_data[][2] = {
- {"class C { m() { ", "; } }"},
- {"class C { m() { k = ", "; } }"},
- {"class C { m() { foo(", "); } }"},
- {"class C { m() { () => ", "; } }"},
- {NULL, NULL}
- };
-
- const char* expression_data[] = {
- "super",
- "super = x",
- "y = super",
- "f(super)",
- "new super",
- "new super()",
- "new super(12, 45)",
- "new new super",
- "new new super()",
- "new new super()()",
- NULL
- };
+ const char* context_data[][2] = {{"class C { m() { ", "; } }"},
+ {"class C { m() { k = ", "; } }"},
+ {"class C { m() { foo(", "); } }"},
+ {"class C { m() { () => ", "; } }"},
+ {nullptr, nullptr}};
+
+ const char* expression_data[] = {"super",
+ "super = x",
+ "y = super",
+ "f(super)",
+ "new super",
+ "new super()",
+ "new super(12, 45)",
+ "new new super",
+ "new new super()",
+ "new new super()()",
+ nullptr};
RunParserSyncTest(context_data, expression_data, kError);
}
@@ -4475,7 +4232,7 @@ TEST(ImportExpressionSuccess) {
// clang-format off
const char* context_data[][2] = {
{"", ""},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* data[] = {
@@ -4495,7 +4252,7 @@ TEST(ImportExpressionSuccess) {
"let x = import(x)",
"for(x of import(x)) {}",
"import(x).then()",
- NULL
+ nullptr
};
// clang-format on
@@ -4510,12 +4267,12 @@ TEST(ImportExpressionSuccess) {
// run without kAllowHarmonyDynamicImport flag), so this results in
// an "Unexpected token import" error.
RunParserSyncTest(context_data, data, kError);
- RunModuleParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0, NULL, 0,
- true, true);
+ RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
+ nullptr, 0, true, true);
static const ParserFlag flags[] = {kAllowHarmonyDynamicImport};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, flags,
+ RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, flags,
arraysize(flags));
- RunModuleParserSyncTest(context_data, data, kSuccess, NULL, 0, flags,
+ RunModuleParserSyncTest(context_data, data, kSuccess, nullptr, 0, flags,
arraysize(flags));
}
@@ -4527,7 +4284,7 @@ TEST(ImportExpressionErrors) {
{"var ", ""},
{"let ", ""},
{"new ", ""},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* data[] = {
@@ -4557,17 +4314,17 @@ TEST(ImportExpressionErrors) {
"import = 1",
"import.wat",
"new import(x)",
- NULL
+ nullptr
};
// clang-format on
RunParserSyncTest(context_data, data, kError);
// We ignore the error messages for the reason explained in the
// ImportExpressionSuccess test.
- RunModuleParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0, NULL,
- 0, true, true);
+ RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
+ nullptr, 0, true, true);
static const ParserFlag flags[] = {kAllowHarmonyDynamicImport};
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
arraysize(flags));
// We ignore test error messages because the error message from
@@ -4577,8 +4334,8 @@ TEST(ImportExpressionErrors) {
// correctly and then shows an "Unexpected end of input" error
// message because of the '{'. The preparser shows an "Unexpected
// token {" because it's not a valid token in a CallExpression.
- RunModuleParserSyncTest(context_data, data, kError, NULL, 0, flags,
- arraysize(flags), NULL, 0, true, true);
+ RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, flags,
+ arraysize(flags), nullptr, 0, true, true);
}
{
@@ -4586,12 +4343,12 @@ TEST(ImportExpressionErrors) {
const char* context_data[][2] = {
{"var ", ""},
{"let ", ""},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* data[] = {
"import('x')",
- NULL
+ nullptr
};
// clang-format on
@@ -4599,100 +4356,83 @@ TEST(ImportExpressionErrors) {
RunModuleParserSyncTest(context_data, data, kError);
static const ParserFlag flags[] = {kAllowHarmonyDynamicImport};
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
arraysize(flags));
- RunModuleParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, flags,
arraysize(flags));
}
}
TEST(SuperCall) {
- const char* context_data[][2] = {{"", ""},
- {NULL, NULL}};
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
const char* success_data[] = {
- "class C extends B { constructor() { super(); } }",
- "class C extends B { constructor() { () => super(); } }",
- NULL
- };
+ "class C extends B { constructor() { super(); } }",
+ "class C extends B { constructor() { () => super(); } }", nullptr};
RunParserSyncTest(context_data, success_data, kSuccess);
- const char* error_data[] = {
- "class C { constructor() { super(); } }",
- "class C { method() { super(); } }",
- "class C { method() { () => super(); } }",
- "class C { *method() { super(); } }",
- "class C { get x() { super(); } }",
- "class C { set x(_) { super(); } }",
- "({ method() { super(); } })",
- "({ *method() { super(); } })",
- "({ get x() { super(); } })",
- "({ set x(_) { super(); } })",
- "({ f: function() { super(); } })",
- "(function() { super(); })",
- "var f = function() { super(); }",
- "({ f: function*() { super(); } })",
- "(function*() { super(); })",
- "var f = function*() { super(); }",
- NULL
- };
+ const char* error_data[] = {"class C { constructor() { super(); } }",
+ "class C { method() { super(); } }",
+ "class C { method() { () => super(); } }",
+ "class C { *method() { super(); } }",
+ "class C { get x() { super(); } }",
+ "class C { set x(_) { super(); } }",
+ "({ method() { super(); } })",
+ "({ *method() { super(); } })",
+ "({ get x() { super(); } })",
+ "({ set x(_) { super(); } })",
+ "({ f: function() { super(); } })",
+ "(function() { super(); })",
+ "var f = function() { super(); }",
+ "({ f: function*() { super(); } })",
+ "(function*() { super(); })",
+ "var f = function*() { super(); }",
+ nullptr};
RunParserSyncTest(context_data, error_data, kError);
}
TEST(SuperNewNoErrors) {
- const char* context_data[][2] = {
- {"class C { constructor() { ", " } }"},
- {"class C { *method() { ", " } }"},
- {"class C { get x() { ", " } }"},
- {"class C { set x(_) { ", " } }"},
- {"({ method() { ", " } })"},
- {"({ *method() { ", " } })"},
- {"({ get x() { ", " } })"},
- {"({ set x(_) { ", " } })"},
- {NULL, NULL}
- };
+ const char* context_data[][2] = {{"class C { constructor() { ", " } }"},
+ {"class C { *method() { ", " } }"},
+ {"class C { get x() { ", " } }"},
+ {"class C { set x(_) { ", " } }"},
+ {"({ method() { ", " } })"},
+ {"({ *method() { ", " } })"},
+ {"({ get x() { ", " } })"},
+ {"({ set x(_) { ", " } })"},
+ {nullptr, nullptr}};
- const char* expression_data[] = {
- "new super.x;",
- "new super.x();",
- "() => new super.x;",
- "() => new super.x();",
- NULL
- };
+ const char* expression_data[] = {"new super.x;", "new super.x();",
+ "() => new super.x;", "() => new super.x();",
+ nullptr};
RunParserSyncTest(context_data, expression_data, kSuccess);
}
TEST(SuperNewErrors) {
- const char* context_data[][2] = {
- {"class C { method() { ", " } }"},
- {"class C { *method() { ", " } }"},
- {"class C { get x() { ", " } }"},
- {"class C { set x(_) { ", " } }"},
- {"({ method() { ", " } })"},
- {"({ *method() { ", " } })"},
- {"({ get x() { ", " } })"},
- {"({ set x(_) { ", " } })"},
- {"({ f: function() { ", " } })"},
- {"(function() { ", " })"},
- {"var f = function() { ", " }"},
- {"({ f: function*() { ", " } })"},
- {"(function*() { ", " })"},
- {"var f = function*() { ", " }"},
- {NULL, NULL}
- };
+ const char* context_data[][2] = {{"class C { method() { ", " } }"},
+ {"class C { *method() { ", " } }"},
+ {"class C { get x() { ", " } }"},
+ {"class C { set x(_) { ", " } }"},
+ {"({ method() { ", " } })"},
+ {"({ *method() { ", " } })"},
+ {"({ get x() { ", " } })"},
+ {"({ set x(_) { ", " } })"},
+ {"({ f: function() { ", " } })"},
+ {"(function() { ", " })"},
+ {"var f = function() { ", " }"},
+ {"({ f: function*() { ", " } })"},
+ {"(function*() { ", " })"},
+ {"var f = function*() { ", " }"},
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "new super;",
- "new super();",
- "() => new super;",
- "() => new super();",
- NULL
- };
+ const char* statement_data[] = {"new super;", "new super();",
+ "() => new super;", "() => new super();",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -4700,37 +4440,24 @@ TEST(SuperNewErrors) {
TEST(SuperErrorsNonMethods) {
// super is only allowed in methods, accessors and constructors.
- const char* context_data[][2] = {
- {"", ";"},
- {"k = ", ";"},
- {"foo(", ");"},
- {"if (", ") {}"},
- {"if (true) {", "}"},
- {"if (false) {} else {", "}"},
- {"while (true) {", "}"},
- {"function f() {", "}"},
- {"class C extends (", ") {}"},
- {"class C { m() { function f() {", "} } }"},
- {"({ m() { function f() {", "} } })"},
- {NULL, NULL}
- };
+ const char* context_data[][2] = {{"", ";"},
+ {"k = ", ";"},
+ {"foo(", ");"},
+ {"if (", ") {}"},
+ {"if (true) {", "}"},
+ {"if (false) {} else {", "}"},
+ {"while (true) {", "}"},
+ {"function f() {", "}"},
+ {"class C extends (", ") {}"},
+ {"class C { m() { function f() {", "} } }"},
+ {"({ m() { function f() {", "} } })"},
+ {nullptr, nullptr}};
const char* statement_data[] = {
- "super",
- "super = x",
- "y = super",
- "f(super)",
- "super.x",
- "super[27]",
- "super.x()",
- "super[27]()",
- "super()",
- "new super.x",
- "new super.x()",
- "new super[27]",
- "new super[27]()",
- NULL
- };
+ "super", "super = x", "y = super", "f(super)",
+ "super.x", "super[27]", "super.x()", "super[27]()",
+ "super()", "new super.x", "new super.x()", "new super[27]",
+ "new super[27]()", nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -4741,16 +4468,11 @@ TEST(NoErrorsMethodDefinition) {
{"'use strict'; ({", "});"},
{"({*", "});"},
{"'use strict'; ({*", "});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* object_literal_body_data[] = {
- "m() {}",
- "m(x) { return x; }",
- "m(x, y) {}, n() {}",
- "set(x, y) {}",
- "get(x, y) {}",
- NULL
- };
+ "m() {}", "m(x) { return x; }", "m(x, y) {}, n() {}",
+ "set(x, y) {}", "get(x, y) {}", nullptr};
RunParserSyncTest(context_data, object_literal_body_data, kSuccess);
}
@@ -4761,70 +4483,20 @@ TEST(MethodDefinitionNames) {
{"'use strict'; ({", "(x, y) {}});"},
{"({*", "(x, y) {}});"},
{"'use strict'; ({*", "(x, y) {}});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* name_data[] = {
- "m",
- "'m'",
- "\"m\"",
- "\"m n\"",
- "true",
- "false",
- "null",
- "0",
- "1.2",
- "1e1",
- "1E1",
- "1e+1",
- "1e-1",
-
- // Keywords
- "async",
- "await",
- "break",
- "case",
- "catch",
- "class",
- "const",
- "continue",
- "debugger",
- "default",
- "delete",
- "do",
- "else",
- "enum",
- "export",
- "extends",
- "finally",
- "for",
- "function",
- "if",
- "implements",
- "import",
- "in",
- "instanceof",
- "interface",
- "let",
- "new",
- "package",
- "private",
- "protected",
- "public",
- "return",
- "static",
- "super",
- "switch",
- "this",
- "throw",
- "try",
- "typeof",
- "var",
- "void",
- "while",
- "with",
- "yield",
- NULL
- };
+ "m", "'m'", "\"m\"", "\"m n\"", "true", "false", "null", "0", "1.2",
+ "1e1", "1E1", "1e+1", "1e-1",
+
+ // Keywords
+ "async", "await", "break", "case", "catch", "class", "const", "continue",
+ "debugger", "default", "delete", "do", "else", "enum", "export",
+ "extends", "finally", "for", "function", "if", "implements", "import",
+ "in", "instanceof", "interface", "let", "new", "package", "private",
+ "protected", "public", "return", "static", "super", "switch", "this",
+ "throw", "try", "typeof", "var", "void", "while", "with", "yield",
+ nullptr};
RunParserSyncTest(context_data, name_data, kSuccess);
}
@@ -4835,34 +4507,23 @@ TEST(MethodDefinitionStrictFormalParamereters) {
{"'use strict'; ({method(", "){}});"},
{"({*method(", "){}});"},
{"'use strict'; ({*method(", "){}});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* params_data[] = {
- "x, x",
- "x, y, x",
- "var",
- "const",
- NULL
- };
+ const char* params_data[] = {"x, x", "x, y, x", "var", "const", nullptr};
RunParserSyncTest(context_data, params_data, kError);
}
TEST(MethodDefinitionEvalArguments) {
- const char* strict_context_data[][2] =
- {{"'use strict'; ({method(", "){}});"},
- {"'use strict'; ({*method(", "){}});"},
- {NULL, NULL}};
- const char* sloppy_context_data[][2] =
- {{"({method(", "){}});"},
- {"({*method(", "){}});"},
- {NULL, NULL}};
+ const char* strict_context_data[][2] = {
+ {"'use strict'; ({method(", "){}});"},
+ {"'use strict'; ({*method(", "){}});"},
+ {nullptr, nullptr}};
+ const char* sloppy_context_data[][2] = {
+ {"({method(", "){}});"}, {"({*method(", "){}});"}, {nullptr, nullptr}};
- const char* data[] = {
- "eval",
- "arguments",
- NULL};
+ const char* data[] = {"eval", "arguments", nullptr};
// Fail in strict mode
RunParserSyncTest(strict_context_data, data, kError);
@@ -4873,19 +4534,14 @@ TEST(MethodDefinitionEvalArguments) {
TEST(MethodDefinitionDuplicateEvalArguments) {
- const char* context_data[][2] =
- {{"'use strict'; ({method(", "){}});"},
- {"'use strict'; ({*method(", "){}});"},
- {"({method(", "){}});"},
- {"({*method(", "){}});"},
- {NULL, NULL}};
+ const char* context_data[][2] = {{"'use strict'; ({method(", "){}});"},
+ {"'use strict'; ({*method(", "){}});"},
+ {"({method(", "){}});"},
+ {"({*method(", "){}});"},
+ {nullptr, nullptr}};
- const char* data[] = {
- "eval, eval",
- "eval, a, eval",
- "arguments, arguments",
- "arguments, a, arguments",
- NULL};
+ const char* data[] = {"eval, eval", "eval, a, eval", "arguments, arguments",
+ "arguments, a, arguments", nullptr};
// In strict mode, the error is using "eval" or "arguments" as parameter names
// In sloppy mode, the error is that eval / arguments are duplicated
@@ -4895,55 +4551,50 @@ TEST(MethodDefinitionDuplicateEvalArguments) {
TEST(MethodDefinitionDuplicateProperty) {
const char* context_data[][2] = {{"'use strict'; ({", "});"},
- {NULL, NULL}};
-
- const char* params_data[] = {
- "x: 1, x() {}",
- "x() {}, x: 1",
- "x() {}, get x() {}",
- "x() {}, set x(_) {}",
- "x() {}, x() {}",
- "x() {}, y() {}, x() {}",
- "x() {}, \"x\"() {}",
- "x() {}, 'x'() {}",
- "0() {}, '0'() {}",
- "1.0() {}, 1: 1",
-
- "x: 1, *x() {}",
- "*x() {}, x: 1",
- "*x() {}, get x() {}",
- "*x() {}, set x(_) {}",
- "*x() {}, *x() {}",
- "*x() {}, y() {}, *x() {}",
- "*x() {}, *\"x\"() {}",
- "*x() {}, *'x'() {}",
- "*0() {}, *'0'() {}",
- "*1.0() {}, 1: 1",
-
- NULL
- };
+ {nullptr, nullptr}};
+
+ const char* params_data[] = {"x: 1, x() {}",
+ "x() {}, x: 1",
+ "x() {}, get x() {}",
+ "x() {}, set x(_) {}",
+ "x() {}, x() {}",
+ "x() {}, y() {}, x() {}",
+ "x() {}, \"x\"() {}",
+ "x() {}, 'x'() {}",
+ "0() {}, '0'() {}",
+ "1.0() {}, 1: 1",
+
+ "x: 1, *x() {}",
+ "*x() {}, x: 1",
+ "*x() {}, get x() {}",
+ "*x() {}, set x(_) {}",
+ "*x() {}, *x() {}",
+ "*x() {}, y() {}, *x() {}",
+ "*x() {}, *\"x\"() {}",
+ "*x() {}, *'x'() {}",
+ "*0() {}, *'0'() {}",
+ "*1.0() {}, 1: 1",
+
+ nullptr};
RunParserSyncTest(context_data, params_data, kSuccess);
}
TEST(ClassExpressionNoErrors) {
- const char* context_data[][2] = {{"(", ");"},
- {"var C = ", ";"},
- {"bar, ", ";"},
- {NULL, NULL}};
- const char* class_data[] = {
- "class {}",
- "class name {}",
- "class extends F {}",
- "class name extends F {}",
- "class extends (F, G) {}",
- "class name extends (F, G) {}",
- "class extends class {} {}",
- "class name extends class {} {}",
- "class extends class base {} {}",
- "class name extends class base {} {}",
- NULL};
+ const char* context_data[][2] = {
+ {"(", ");"}, {"var C = ", ";"}, {"bar, ", ";"}, {nullptr, nullptr}};
+ const char* class_data[] = {"class {}",
+ "class name {}",
+ "class extends F {}",
+ "class name extends F {}",
+ "class extends (F, G) {}",
+ "class name extends (F, G) {}",
+ "class extends class {} {}",
+ "class name extends class {} {}",
+ "class extends class base {} {}",
+ "class name extends class base {} {}",
+ nullptr};
RunParserSyncTest(context_data, class_data, kSuccess);
}
@@ -4953,14 +4604,13 @@ TEST(ClassDeclarationNoErrors) {
const char* context_data[][2] = {{"'use strict'; ", ""},
{"'use strict'; {", "}"},
{"'use strict'; if (true) {", "}"},
- {NULL, NULL}};
- const char* statement_data[] = {
- "class name {}",
- "class name extends F {}",
- "class name extends (F, G) {}",
- "class name extends class {} {}",
- "class name extends class base {} {}",
- NULL};
+ {nullptr, nullptr}};
+ const char* statement_data[] = {"class name {}",
+ "class name extends F {}",
+ "class name extends (F, G) {}",
+ "class name extends class {} {}",
+ "class name extends class base {} {}",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -4973,7 +4623,7 @@ TEST(ClassBodyNoErrors) {
{"(class extends Base {", "});"},
{"class C {", "}"},
{"class C extends Base {", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* class_body_data[] = {
";",
";;",
@@ -5032,7 +4682,7 @@ TEST(ClassBodyNoErrors) {
"async async(){}",
"async(){}",
"*async(){}",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, class_body_data, kSuccess);
@@ -5056,37 +4706,13 @@ TEST(ClassPropertyNameNoErrors) {
{"class C { static set ", "(v) {}}"},
{"class C { *", "() {}}"},
{"class C { static *", "() {}}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* name_data[] = {
- "42",
- "42.5",
- "42e2",
- "42e+2",
- "42e-2",
- "null",
- "false",
- "true",
- "'str'",
- "\"str\"",
- "static",
- "get",
- "set",
- "var",
- "const",
- "let",
- "this",
- "class",
- "function",
- "yield",
- "if",
- "else",
- "for",
- "while",
- "do",
- "try",
- "catch",
- "finally",
- NULL};
+ "42", "42.5", "42e2", "42e+2", "42e-2", "null",
+ "false", "true", "'str'", "\"str\"", "static", "get",
+ "set", "var", "const", "let", "this", "class",
+ "function", "yield", "if", "else", "for", "while",
+ "do", "try", "catch", "finally", nullptr};
RunParserSyncTest(context_data, name_data, kSuccess);
}
@@ -5098,7 +4724,7 @@ TEST(ClassFieldsNoErrors) {
{"(class extends Base {", "});"},
{"class C {", "}"},
{"class C extends Base {", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* class_body_data[] = {
// Basic syntax
"a = 0;",
@@ -5174,12 +4800,12 @@ TEST(ClassFieldsNoErrors) {
"await;",
"await = 0;",
"await\n a",
- NULL
+ nullptr
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyClassFields};
- RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
+ static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
always_flags, arraysize(always_flags));
}
@@ -5190,10 +4816,13 @@ TEST(ClassFieldsErrors) {
{"(class extends Base {", "});"},
{"class C {", "}"},
{"class C extends Base {", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* class_body_data[] = {
"a : 0",
"a =",
+ "static constructor",
+ "static prototype",
+ "constructor",
"*a = 0",
"*a",
"get a",
@@ -5215,68 +4844,64 @@ TEST(ClassFieldsErrors) {
"a = 0\n *b(){}",
"a = 0\n ['b'](){}",
"get\n a",
- NULL
+ nullptr
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyClassFields};
- RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+ static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
always_flags, arraysize(always_flags));
}
TEST(ClassExpressionErrors) {
- const char* context_data[][2] = {{"(", ");"},
- {"var C = ", ";"},
- {"bar, ", ";"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"(", ");"}, {"var C = ", ";"}, {"bar, ", ";"}, {nullptr, nullptr}};
const char* class_data[] = {
- "class",
- "class name",
- "class name extends",
- "class extends",
- "class {",
- "class { m }",
- "class { m; n }",
- "class { m: 1 }",
- "class { m(); n() }",
- "class { get m }",
- "class { get m() }",
- "class { get m() { }",
- "class { set m() {} }", // Missing required parameter.
- "class { m() {}, n() {} }", // No commas allowed.
- NULL};
+ "class",
+ "class name",
+ "class name extends",
+ "class extends",
+ "class {",
+ "class { m }",
+ "class { m; n }",
+ "class { m: 1 }",
+ "class { m(); n() }",
+ "class { get m }",
+ "class { get m() }",
+ "class { get m() { }",
+ "class { set m() {} }", // Missing required parameter.
+ "class { m() {}, n() {} }", // No commas allowed.
+ nullptr};
RunParserSyncTest(context_data, class_data, kError);
}
TEST(ClassDeclarationErrors) {
- const char* context_data[][2] = {{"", ""},
- {"{", "}"},
- {"if (true) {", "}"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"", ""}, {"{", "}"}, {"if (true) {", "}"}, {nullptr, nullptr}};
const char* class_data[] = {
- "class",
- "class name",
- "class name extends",
- "class extends",
- "class name {",
- "class name { m }",
- "class name { m; n }",
- "class name { m: 1 }",
- "class name { m(); n() }",
- "class name { get x }",
- "class name { get x() }",
- "class name { set x() {) }", // missing required param
- "class {}", // Name is required for declaration
- "class extends base {}",
- "class name { *",
- "class name { * }",
- "class name { *; }",
- "class name { *get x() {} }",
- "class name { *set x(_) {} }",
- "class name { *static m() {} }",
- NULL};
+ "class",
+ "class name",
+ "class name extends",
+ "class extends",
+ "class name {",
+ "class name { m }",
+ "class name { m; n }",
+ "class name { m: 1 }",
+ "class name { m(); n() }",
+ "class name { get x }",
+ "class name { get x() }",
+ "class name { set x() {) }", // missing required param
+ "class {}", // Name is required for declaration
+ "class extends base {}",
+ "class name { *",
+ "class name { * }",
+ "class name { *; }",
+ "class name { *get x() {} }",
+ "class name { *set x(_) {} }",
+ "class name { *static m() {} }",
+ nullptr};
RunParserSyncTest(context_data, class_data, kError);
}
@@ -5287,7 +4912,7 @@ TEST(ClassAsyncErrors) {
{"(class extends Base {", "});"},
{"class C {", "}"},
{"class C extends Base {", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* async_data[] = {
"*async x(){}",
"async *(){}",
@@ -5306,7 +4931,7 @@ TEST(ClassAsyncErrors) {
"static async set x(y){}",
"static async x : 0",
"static async : 0",
- NULL
+ nullptr
};
// clang-format on
@@ -5318,21 +4943,11 @@ TEST(ClassNameErrors) {
{"(class ", "{});"},
{"'use strict'; class ", "{}"},
{"'use strict'; (class ", "{});"},
- {NULL, NULL}};
- const char* class_name[] = {
- "arguments",
- "eval",
- "implements",
- "interface",
- "let",
- "package",
- "private",
- "protected",
- "public",
- "static",
- "var",
- "yield",
- NULL};
+ {nullptr, nullptr}};
+ const char* class_name[] = {"arguments", "eval", "implements", "interface",
+ "let", "package", "private", "protected",
+ "public", "static", "var", "yield",
+ nullptr};
RunParserSyncTest(context_data, class_name, kError);
}
@@ -5340,243 +4955,163 @@ TEST(ClassNameErrors) {
TEST(ClassGetterParamNameErrors) {
const char* context_data[][2] = {
- {"class C { get name(", ") {} }"},
- {"(class { get name(", ") {} });"},
- {"'use strict'; class C { get name(", ") {} }"},
- {"'use strict'; (class { get name(", ") {} })"},
- {NULL, NULL}
- };
-
- const char* class_name[] = {
- "arguments",
- "eval",
- "implements",
- "interface",
- "let",
- "package",
- "private",
- "protected",
- "public",
- "static",
- "var",
- "yield",
- NULL};
+ {"class C { get name(", ") {} }"},
+ {"(class { get name(", ") {} });"},
+ {"'use strict'; class C { get name(", ") {} }"},
+ {"'use strict'; (class { get name(", ") {} })"},
+ {nullptr, nullptr}};
+
+ const char* class_name[] = {"arguments", "eval", "implements", "interface",
+ "let", "package", "private", "protected",
+ "public", "static", "var", "yield",
+ nullptr};
RunParserSyncTest(context_data, class_name, kError);
}
TEST(ClassStaticPrototypeErrors) {
- const char* context_data[][2] = {{"class C {", "}"},
- {"(class {", "});"},
- {NULL, NULL}};
-
- const char* class_body_data[] = {
- "static prototype() {}",
- "static get prototype() {}",
- "static set prototype(_) {}",
- "static *prototype() {}",
- "static 'prototype'() {}",
- "static *'prototype'() {}",
- "static prot\\u006ftype() {}",
- "static 'prot\\u006ftype'() {}",
- "static get 'prot\\u006ftype'() {}",
- "static set 'prot\\u006ftype'(_) {}",
- "static *'prot\\u006ftype'() {}",
- NULL};
+ const char* context_data[][2] = {
+ {"class C {", "}"}, {"(class {", "});"}, {nullptr, nullptr}};
+
+ const char* class_body_data[] = {"static prototype() {}",
+ "static get prototype() {}",
+ "static set prototype(_) {}",
+ "static *prototype() {}",
+ "static 'prototype'() {}",
+ "static *'prototype'() {}",
+ "static prot\\u006ftype() {}",
+ "static 'prot\\u006ftype'() {}",
+ "static get 'prot\\u006ftype'() {}",
+ "static set 'prot\\u006ftype'(_) {}",
+ "static *'prot\\u006ftype'() {}",
+ nullptr};
RunParserSyncTest(context_data, class_body_data, kError);
}
TEST(ClassSpecialConstructorErrors) {
- const char* context_data[][2] = {{"class C {", "}"},
- {"(class {", "});"},
- {NULL, NULL}};
-
- const char* class_body_data[] = {
- "get constructor() {}",
- "get constructor(_) {}",
- "*constructor() {}",
- "get 'constructor'() {}",
- "*'constructor'() {}",
- "get c\\u006fnstructor() {}",
- "*c\\u006fnstructor() {}",
- "get 'c\\u006fnstructor'() {}",
- "get 'c\\u006fnstructor'(_) {}",
- "*'c\\u006fnstructor'() {}",
- NULL};
+ const char* context_data[][2] = {
+ {"class C {", "}"}, {"(class {", "});"}, {nullptr, nullptr}};
+
+ const char* class_body_data[] = {"get constructor() {}",
+ "get constructor(_) {}",
+ "*constructor() {}",
+ "get 'constructor'() {}",
+ "*'constructor'() {}",
+ "get c\\u006fnstructor() {}",
+ "*c\\u006fnstructor() {}",
+ "get 'c\\u006fnstructor'() {}",
+ "get 'c\\u006fnstructor'(_) {}",
+ "*'c\\u006fnstructor'() {}",
+ nullptr};
RunParserSyncTest(context_data, class_body_data, kError);
}
TEST(ClassConstructorNoErrors) {
- const char* context_data[][2] = {{"class C {", "}"},
- {"(class {", "});"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"class C {", "}"}, {"(class {", "});"}, {nullptr, nullptr}};
- const char* class_body_data[] = {
- "constructor() {}",
- "static constructor() {}",
- "static get constructor() {}",
- "static set constructor(_) {}",
- "static *constructor() {}",
- NULL};
+ const char* class_body_data[] = {"constructor() {}",
+ "static constructor() {}",
+ "static get constructor() {}",
+ "static set constructor(_) {}",
+ "static *constructor() {}",
+ nullptr};
RunParserSyncTest(context_data, class_body_data, kSuccess);
}
TEST(ClassMultipleConstructorErrors) {
- const char* context_data[][2] = {{"class C {", "}"},
- {"(class {", "});"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"class C {", "}"}, {"(class {", "});"}, {nullptr, nullptr}};
- const char* class_body_data[] = {
- "constructor() {}; constructor() {}",
- NULL};
+ const char* class_body_data[] = {"constructor() {}; constructor() {}",
+ nullptr};
RunParserSyncTest(context_data, class_body_data, kError);
}
TEST(ClassMultiplePropertyNamesNoErrors) {
- const char* context_data[][2] = {{"class C {", "}"},
- {"(class {", "});"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"class C {", "}"}, {"(class {", "});"}, {nullptr, nullptr}};
const char* class_body_data[] = {
- "constructor() {}; static constructor() {}",
- "m() {}; static m() {}",
- "m() {}; m() {}",
- "static m() {}; static m() {}",
- "get m() {}; set m(_) {}; get m() {}; set m(_) {};",
- NULL};
+ "constructor() {}; static constructor() {}",
+ "m() {}; static m() {}",
+ "m() {}; m() {}",
+ "static m() {}; static m() {}",
+ "get m() {}; set m(_) {}; get m() {}; set m(_) {};",
+ nullptr};
RunParserSyncTest(context_data, class_body_data, kSuccess);
}
TEST(ClassesAreStrictErrors) {
- const char* context_data[][2] = {{"", ""},
- {"(", ");"},
- {NULL, NULL}};
+ const char* context_data[][2] = {{"", ""}, {"(", ");"}, {nullptr, nullptr}};
const char* class_body_data[] = {
- "class C { method() { with ({}) {} } }",
- "class C extends function() { with ({}) {} } {}",
- "class C { *method() { with ({}) {} } }",
- NULL};
+ "class C { method() { with ({}) {} } }",
+ "class C extends function() { with ({}) {} } {}",
+ "class C { *method() { with ({}) {} } }", nullptr};
RunParserSyncTest(context_data, class_body_data, kError);
}
TEST(ObjectLiteralPropertyShorthandKeywordsError) {
- const char* context_data[][2] = {{"({", "});"},
- {"'use strict'; ({", "});"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"({", "});"}, {"'use strict'; ({", "});"}, {nullptr, nullptr}};
const char* name_data[] = {
- "break",
- "case",
- "catch",
- "class",
- "const",
- "continue",
- "debugger",
- "default",
- "delete",
- "do",
- "else",
- "enum",
- "export",
- "extends",
- "false",
- "finally",
- "for",
- "function",
- "if",
- "import",
- "in",
- "instanceof",
- "new",
- "null",
- "return",
- "super",
- "switch",
- "this",
- "throw",
- "true",
- "try",
- "typeof",
- "var",
- "void",
- "while",
- "with",
- NULL
- };
+ "break", "case", "catch", "class", "const", "continue",
+ "debugger", "default", "delete", "do", "else", "enum",
+ "export", "extends", "false", "finally", "for", "function",
+ "if", "import", "in", "instanceof", "new", "null",
+ "return", "super", "switch", "this", "throw", "true",
+ "try", "typeof", "var", "void", "while", "with",
+ nullptr};
RunParserSyncTest(context_data, name_data, kError);
}
TEST(ObjectLiteralPropertyShorthandStrictKeywords) {
- const char* context_data[][2] = {{"({", "});"},
- {NULL, NULL}};
+ const char* context_data[][2] = {{"({", "});"}, {nullptr, nullptr}};
- const char* name_data[] = {
- "implements",
- "interface",
- "let",
- "package",
- "private",
- "protected",
- "public",
- "static",
- "yield",
- NULL
- };
+ const char* name_data[] = {"implements", "interface", "let", "package",
+ "private", "protected", "public", "static",
+ "yield", nullptr};
RunParserSyncTest(context_data, name_data, kSuccess);
const char* context_strict_data[][2] = {{"'use strict'; ({", "});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
RunParserSyncTest(context_strict_data, name_data, kError);
}
TEST(ObjectLiteralPropertyShorthandError) {
- const char* context_data[][2] = {{"({", "});"},
- {"'use strict'; ({", "});"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"({", "});"}, {"'use strict'; ({", "});"}, {nullptr, nullptr}};
- const char* name_data[] = {
- "1",
- "1.2",
- "0",
- "0.1",
- "1.0",
- "1e1",
- "0x1",
- "\"s\"",
- "'s'",
- NULL
- };
+ const char* name_data[] = {"1", "1.2", "0", "0.1", "1.0",
+ "1e1", "0x1", "\"s\"", "'s'", nullptr};
RunParserSyncTest(context_data, name_data, kError);
}
TEST(ObjectLiteralPropertyShorthandYieldInGeneratorError) {
- const char* context_data[][2] = {{"", ""},
- {NULL, NULL}};
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
- const char* name_data[] = {
- "function* g() { ({yield}); }",
- NULL
- };
+ const char* name_data[] = {"function* g() { ({yield}); }", nullptr};
RunParserSyncTest(context_data, name_data, kError);
}
@@ -5585,14 +5120,11 @@ TEST(ObjectLiteralPropertyShorthandYieldInGeneratorError) {
TEST(ConstParsingInForIn) {
const char* context_data[][2] = {{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* data[] = {
- "for(const x = 1; ; ) {}",
- "for(const x = 1, y = 2;;){}",
- "for(const x in [1,2,3]) {}",
- "for(const x of [1,2,3]) {}",
- NULL};
+ "for(const x = 1; ; ) {}", "for(const x = 1, y = 2;;){}",
+ "for(const x in [1,2,3]) {}", "for(const x of [1,2,3]) {}", nullptr};
RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, nullptr, 0);
}
@@ -5601,11 +5133,11 @@ TEST(StatementParsingInForIn) {
const char* context_data[][2] = {{"", ""},
{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* data[] = {"for(x in {}, {}) {}", "for(var x in {}, {}) {}",
"for(let x in {}, {}) {}", "for(const x in {}, {}) {}",
- NULL};
+ nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5614,19 +5146,14 @@ TEST(StatementParsingInForIn) {
TEST(ConstParsingInForInError) {
const char* context_data[][2] = {{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* data[] = {
- "for(const x,y = 1; ; ) {}",
- "for(const x = 4 in [1,2,3]) {}",
- "for(const x = 4, y in [1,2,3]) {}",
- "for(const x = 4 of [1,2,3]) {}",
- "for(const x = 4, y of [1,2,3]) {}",
- "for(const x = 1, y = 2 in []) {}",
- "for(const x,y in []) {}",
- "for(const x = 1, y = 2 of []) {}",
- "for(const x,y of []) {}",
- NULL};
+ "for(const x,y = 1; ; ) {}", "for(const x = 4 in [1,2,3]) {}",
+ "for(const x = 4, y in [1,2,3]) {}", "for(const x = 4 of [1,2,3]) {}",
+ "for(const x = 4, y of [1,2,3]) {}", "for(const x = 1, y = 2 in []) {}",
+ "for(const x,y in []) {}", "for(const x = 1, y = 2 of []) {}",
+ "for(const x,y of []) {}", nullptr};
RunParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0);
}
@@ -5690,24 +5217,23 @@ TEST(ForInMultipleDeclarationsError) {
{"function foo(){", "}"},
{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* data[] = {
- "for (var i, j in {}) {}",
- "for (var i, j in [1, 2, 3]) {}",
- "for (var i, j = 1 in {}) {}",
- "for (var i, j = void 0 in [1, 2, 3]) {}",
-
- "for (let i, j in {}) {}",
- "for (let i, j in [1, 2, 3]) {}",
- "for (let i, j = 1 in {}) {}",
- "for (let i, j = void 0 in [1, 2, 3]) {}",
-
- "for (const i, j in {}) {}",
- "for (const i, j in [1, 2, 3]) {}",
- "for (const i, j = 1 in {}) {}",
- "for (const i, j = void 0 in [1, 2, 3]) {}",
- NULL};
+ const char* data[] = {"for (var i, j in {}) {}",
+ "for (var i, j in [1, 2, 3]) {}",
+ "for (var i, j = 1 in {}) {}",
+ "for (var i, j = void 0 in [1, 2, 3]) {}",
+
+ "for (let i, j in {}) {}",
+ "for (let i, j in [1, 2, 3]) {}",
+ "for (let i, j = 1 in {}) {}",
+ "for (let i, j = void 0 in [1, 2, 3]) {}",
+
+ "for (const i, j in {}) {}",
+ "for (const i, j in [1, 2, 3]) {}",
+ "for (const i, j = 1 in {}) {}",
+ "for (const i, j = void 0 in [1, 2, 3]) {}",
+ nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -5717,24 +5243,23 @@ TEST(ForOfMultipleDeclarationsError) {
{"function foo(){", "}"},
{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* data[] = {
- "for (var i, j of {}) {}",
- "for (var i, j of [1, 2, 3]) {}",
- "for (var i, j = 1 of {}) {}",
- "for (var i, j = void 0 of [1, 2, 3]) {}",
-
- "for (let i, j of {}) {}",
- "for (let i, j of [1, 2, 3]) {}",
- "for (let i, j = 1 of {}) {}",
- "for (let i, j = void 0 of [1, 2, 3]) {}",
-
- "for (const i, j of {}) {}",
- "for (const i, j of [1, 2, 3]) {}",
- "for (const i, j = 1 of {}) {}",
- "for (const i, j = void 0 of [1, 2, 3]) {}",
- NULL};
+ const char* data[] = {"for (var i, j of {}) {}",
+ "for (var i, j of [1, 2, 3]) {}",
+ "for (var i, j = 1 of {}) {}",
+ "for (var i, j = void 0 of [1, 2, 3]) {}",
+
+ "for (let i, j of {}) {}",
+ "for (let i, j of [1, 2, 3]) {}",
+ "for (let i, j = 1 of {}) {}",
+ "for (let i, j = void 0 of [1, 2, 3]) {}",
+
+ "for (const i, j of {}) {}",
+ "for (const i, j of [1, 2, 3]) {}",
+ "for (const i, j = 1 of {}) {}",
+ "for (const i, j = void 0 of [1, 2, 3]) {}",
+ nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -5744,12 +5269,9 @@ TEST(ForInNoDeclarationsError) {
{"function foo(){", "}"},
{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* data[] = {
- "for (var in {}) {}",
- "for (const in {}) {}",
- NULL};
+ const char* data[] = {"for (var in {}) {}", "for (const in {}) {}", nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -5759,12 +5281,10 @@ TEST(ForOfNoDeclarationsError) {
{"function foo(){", "}"},
{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* data[] = {
- "for (var of [1, 2, 3]) {}",
- "for (const of [1, 2, 3]) {}",
- NULL};
+ const char* data[] = {"for (var of [1, 2, 3]) {}",
+ "for (const of [1, 2, 3]) {}", nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -5773,22 +5293,23 @@ TEST(ForOfInOperator) {
const char* context_data[][2] = {{"", ""},
{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* data[] = {
- "for(x of 'foo' in {}) {}", "for(var x of 'foo' in {}) {}",
- "for(let x of 'foo' in {}) {}", "for(const x of 'foo' in {}) {}", NULL};
+ const char* data[] = {"for(x of 'foo' in {}) {}",
+ "for(var x of 'foo' in {}) {}",
+ "for(let x of 'foo' in {}) {}",
+ "for(const x of 'foo' in {}) {}", nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
TEST(ForOfYieldIdentifier) {
- const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
const char* data[] = {"for(x of yield) {}", "for(var x of yield) {}",
"for(let x of yield) {}", "for(const x of yield) {}",
- NULL};
+ nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5798,12 +5319,12 @@ TEST(ForOfYieldExpression) {
const char* context_data[][2] = {{"", ""},
{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* data[] = {"function* g() { for(x of yield) {} }",
"function* g() { for(var x of yield) {} }",
"function* g() { for(let x of yield) {} }",
- "function* g() { for(const x of yield) {} }", NULL};
+ "function* g() { for(const x of yield) {} }", nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5813,7 +5334,7 @@ TEST(ForOfExpressionError) {
const char* context_data[][2] = {{"", ""},
{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* data[] = {
"for(x of [], []) {}", "for(var x of [], []) {}",
@@ -5821,77 +5342,61 @@ TEST(ForOfExpressionError) {
// AssignmentExpression should be validated statically:
"for(x of { y = 23 }) {}", "for(var x of { y = 23 }) {}",
- "for(let x of { y = 23 }) {}", "for(const x of { y = 23 }) {}", NULL};
+ "for(let x of { y = 23 }) {}", "for(const x of { y = 23 }) {}", nullptr};
RunParserSyncTest(context_data, data, kError);
}
TEST(InvalidUnicodeEscapes) {
- const char* context_data[][2] = {{"", ""},
- {"'use strict';", ""},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"", ""}, {"'use strict';", ""}, {nullptr, nullptr}};
const char* data[] = {
- "var foob\\u123r = 0;",
- "var \\u123roo = 0;",
- "\"foob\\u123rr\"",
- // No escapes allowed in regexp flags
- "/regex/\\u0069g",
- "/regex/\\u006g",
- // Braces gone wrong
- "var foob\\u{c481r = 0;",
- "var foob\\uc481}r = 0;",
- "var \\u{0052oo = 0;",
- "var \\u0052}oo = 0;",
- "\"foob\\u{c481r\"",
- "var foob\\u{}ar = 0;",
- // Too high value for the unicode escape
- "\"\\u{110000}\"",
- // Not an unicode escape
- "var foob\\v1234r = 0;",
- "var foob\\U1234r = 0;",
- "var foob\\v{1234}r = 0;",
- "var foob\\U{1234}r = 0;",
- NULL};
+ "var foob\\u123r = 0;", "var \\u123roo = 0;", "\"foob\\u123rr\"",
+ // No escapes allowed in regexp flags
+ "/regex/\\u0069g", "/regex/\\u006g",
+ // Braces gone wrong
+ "var foob\\u{c481r = 0;", "var foob\\uc481}r = 0;", "var \\u{0052oo = 0;",
+ "var \\u0052}oo = 0;", "\"foob\\u{c481r\"", "var foob\\u{}ar = 0;",
+ // Too high value for the unicode escape
+ "\"\\u{110000}\"",
+ // Not an unicode escape
+ "var foob\\v1234r = 0;", "var foob\\U1234r = 0;",
+ "var foob\\v{1234}r = 0;", "var foob\\U{1234}r = 0;", nullptr};
RunParserSyncTest(context_data, data, kError);
}
TEST(UnicodeEscapes) {
- const char* context_data[][2] = {{"", ""},
- {"'use strict';", ""},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"", ""}, {"'use strict';", ""}, {nullptr, nullptr}};
const char* data[] = {
- // Identifier starting with escape
- "var \\u0052oo = 0;",
- "var \\u{0052}oo = 0;",
- "var \\u{52}oo = 0;",
- "var \\u{00000000052}oo = 0;",
- // Identifier with an escape but not starting with an escape
- "var foob\\uc481r = 0;",
- "var foob\\u{c481}r = 0;",
- // String with an escape
- "\"foob\\uc481r\"",
- "\"foob\\{uc481}r\"",
- // This character is a valid unicode character, representable as a surrogate
- // pair, not representable as 4 hex digits.
- "\"foo\\u{10e6d}\"",
- // Max value for the unicode escape
- "\"\\u{10ffff}\"",
- NULL};
+ // Identifier starting with escape
+ "var \\u0052oo = 0;", "var \\u{0052}oo = 0;", "var \\u{52}oo = 0;",
+ "var \\u{00000000052}oo = 0;",
+ // Identifier with an escape but not starting with an escape
+ "var foob\\uc481r = 0;", "var foob\\u{c481}r = 0;",
+ // String with an escape
+ "\"foob\\uc481r\"", "\"foob\\{uc481}r\"",
+ // This character is a valid unicode character, representable as a
+ // surrogate
+ // pair, not representable as 4 hex digits.
+ "\"foo\\u{10e6d}\"",
+ // Max value for the unicode escape
+ "\"\\u{10ffff}\"", nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
TEST(OctalEscapes) {
const char* sloppy_context_data[][2] = {{"", ""}, // as a directive
{"0;", ""}, // as a string literal
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* strict_context_data[][2] = {
{"'use strict';", ""}, // as a directive before 'use strict'
{"", ";'use strict';"}, // as a directive after 'use strict'
{"'use strict'; 0;", ""}, // as a string literal
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -5900,7 +5405,7 @@ TEST(OctalEscapes) {
"'\\001'",
"'\\08'",
"'\\09'",
- NULL};
+ nullptr};
// clang-format on
// Permitted in sloppy mode
@@ -5913,33 +5418,33 @@ TEST(OctalEscapes) {
TEST(ScanTemplateLiterals) {
const char* context_data[][2] = {{"'use strict';", ""},
{"function foo(){ 'use strict';"
- " var a, b, c; return ", "}"},
- {NULL, NULL}};
+ " var a, b, c; return ",
+ "}"},
+ {nullptr, nullptr}};
- const char* data[] = {
- "``",
- "`no-subst-template`",
- "`template-head${a}`",
- "`${a}`",
- "`${a}template-tail`",
- "`template-head${a}template-tail`",
- "`${a}${b}${c}`",
- "`a${a}b${b}c${c}`",
- "`${a}a${b}b${c}c`",
- "`foo\n\nbar\r\nbaz`",
- "`foo\n\n${ bar }\r\nbaz`",
- "`foo${a /* comment */}`",
- "`foo${a // comment\n}`",
- "`foo${a \n}`",
- "`foo${a \r\n}`",
- "`foo${a \r}`",
- "`foo${/* comment */ a}`",
- "`foo${// comment\na}`",
- "`foo${\n a}`",
- "`foo${\r\n a}`",
- "`foo${\r a}`",
- "`foo${'a' in a}`",
- NULL};
+ const char* data[] = {"``",
+ "`no-subst-template`",
+ "`template-head${a}`",
+ "`${a}`",
+ "`${a}template-tail`",
+ "`template-head${a}template-tail`",
+ "`${a}${b}${c}`",
+ "`a${a}b${b}c${c}`",
+ "`${a}a${b}b${c}c`",
+ "`foo\n\nbar\r\nbaz`",
+ "`foo\n\n${ bar }\r\nbaz`",
+ "`foo${a /* comment */}`",
+ "`foo${a // comment\n}`",
+ "`foo${a \n}`",
+ "`foo${a \r\n}`",
+ "`foo${a \r}`",
+ "`foo${/* comment */ a}`",
+ "`foo${// comment\na}`",
+ "`foo${\n a}`",
+ "`foo${\r\n a}`",
+ "`foo${\r a}`",
+ "`foo${'a' in a}`",
+ nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5948,59 +5453,48 @@ TEST(ScanTaggedTemplateLiterals) {
const char* context_data[][2] = {{"'use strict';", ""},
{"function foo(){ 'use strict';"
" function tag() {}"
- " var a, b, c; return ", "}"},
- {NULL, NULL}};
+ " var a, b, c; return ",
+ "}"},
+ {nullptr, nullptr}};
- const char* data[] = {
- "tag ``",
- "tag `no-subst-template`",
- "tag`template-head${a}`",
- "tag `${a}`",
- "tag `${a}template-tail`",
- "tag `template-head${a}template-tail`",
- "tag\n`${a}${b}${c}`",
- "tag\r\n`a${a}b${b}c${c}`",
- "tag `${a}a${b}b${c}c`",
- "tag\t`foo\n\nbar\r\nbaz`",
- "tag\r`foo\n\n${ bar }\r\nbaz`",
- "tag`foo${a /* comment */}`",
- "tag`foo${a // comment\n}`",
- "tag`foo${a \n}`",
- "tag`foo${a \r\n}`",
- "tag`foo${a \r}`",
- "tag`foo${/* comment */ a}`",
- "tag`foo${// comment\na}`",
- "tag`foo${\n a}`",
- "tag`foo${\r\n a}`",
- "tag`foo${\r a}`",
- "tag`foo${'a' in a}`",
- NULL};
+ const char* data[] = {"tag ``",
+ "tag `no-subst-template`",
+ "tag`template-head${a}`",
+ "tag `${a}`",
+ "tag `${a}template-tail`",
+ "tag `template-head${a}template-tail`",
+ "tag\n`${a}${b}${c}`",
+ "tag\r\n`a${a}b${b}c${c}`",
+ "tag `${a}a${b}b${c}c`",
+ "tag\t`foo\n\nbar\r\nbaz`",
+ "tag\r`foo\n\n${ bar }\r\nbaz`",
+ "tag`foo${a /* comment */}`",
+ "tag`foo${a // comment\n}`",
+ "tag`foo${a \n}`",
+ "tag`foo${a \r\n}`",
+ "tag`foo${a \r}`",
+ "tag`foo${/* comment */ a}`",
+ "tag`foo${// comment\na}`",
+ "tag`foo${\n a}`",
+ "tag`foo${\r\n a}`",
+ "tag`foo${\r a}`",
+ "tag`foo${'a' in a}`",
+ nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
TEST(TemplateMaterializedLiterals) {
- const char* context_data[][2] = {
- {
- "'use strict';\n"
- "function tag() {}\n"
- "var a, b, c;\n"
- "(", ")"
- },
- {NULL, NULL}
- };
+ const char* context_data[][2] = {{"'use strict';\n"
+ "function tag() {}\n"
+ "var a, b, c;\n"
+ "(",
+ ")"},
+ {nullptr, nullptr}};
- const char* data[] = {
- "tag``",
- "tag`a`",
- "tag`a${1}b`",
- "tag`a${1}b${2}c`",
- "``",
- "`a`",
- "`a${1}b`",
- "`a${1}b${2}c`",
- NULL
- };
+ const char* data[] = {"tag``", "tag`a`", "tag`a${1}b`", "tag`a${1}b${2}c`",
+ "``", "`a`", "`a${1}b`", "`a${1}b${2}c`",
+ nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -6009,33 +5503,33 @@ TEST(TemplateMaterializedLiterals) {
TEST(ScanUnterminatedTemplateLiterals) {
const char* context_data[][2] = {{"'use strict';", ""},
{"function foo(){ 'use strict';"
- " var a, b, c; return ", "}"},
- {NULL, NULL}};
+ " var a, b, c; return ",
+ "}"},
+ {nullptr, nullptr}};
- const char* data[] = {
- "`no-subst-template",
- "`template-head${a}",
- "`${a}template-tail",
- "`template-head${a}template-tail",
- "`${a}${b}${c}",
- "`a${a}b${b}c${c}",
- "`${a}a${b}b${c}c",
- "`foo\n\nbar\r\nbaz",
- "`foo\n\n${ bar }\r\nbaz",
- "`foo${a /* comment } */`",
- "`foo${a /* comment } `*/",
- "`foo${a // comment}`",
- "`foo${a \n`",
- "`foo${a \r\n`",
- "`foo${a \r`",
- "`foo${/* comment */ a`",
- "`foo${// commenta}`",
- "`foo${\n a`",
- "`foo${\r\n a`",
- "`foo${\r a`",
- "`foo${fn(}`",
- "`foo${1 if}`",
- NULL};
+ const char* data[] = {"`no-subst-template",
+ "`template-head${a}",
+ "`${a}template-tail",
+ "`template-head${a}template-tail",
+ "`${a}${b}${c}",
+ "`a${a}b${b}c${c}",
+ "`${a}a${b}b${c}c",
+ "`foo\n\nbar\r\nbaz",
+ "`foo\n\n${ bar }\r\nbaz",
+ "`foo${a /* comment } */`",
+ "`foo${a /* comment } `*/",
+ "`foo${a // comment}`",
+ "`foo${a \n`",
+ "`foo${a \r\n`",
+ "`foo${a \r`",
+ "`foo${/* comment */ a`",
+ "`foo${// commenta}`",
+ "`foo${\n a`",
+ "`foo${\r\n a`",
+ "`foo${\r a`",
+ "`foo${fn(}`",
+ "`foo${1 if}`",
+ nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -6043,18 +5537,13 @@ TEST(ScanUnterminatedTemplateLiterals) {
TEST(TemplateLiteralsIllegalTokens) {
const char* context_data[][2] = {{"'use strict';", ""},
{"function foo(){ 'use strict';"
- " var a, b, c; return ", "}"},
- {NULL, NULL}};
+ " var a, b, c; return ",
+ "}"},
+ {nullptr, nullptr}};
const char* data[] = {
- "`hello\\x`",
- "`hello\\x${1}`",
- "`hello${1}\\x`",
- "`hello${1}\\x${2}`",
- "`hello\\x\n`",
- "`hello\\x\n${1}`",
- "`hello${1}\\x\n`",
- "`hello${1}\\x\n${2}`",
- NULL};
+ "`hello\\x`", "`hello\\x${1}`", "`hello${1}\\x`",
+ "`hello${1}\\x${2}`", "`hello\\x\n`", "`hello\\x\n${1}`",
+ "`hello${1}\\x\n`", "`hello${1}\\x\n${2}`", nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -6064,9 +5553,10 @@ TEST(ParseRestParameters) {
const char* context_data[][2] = {{"'use strict';(function(",
"){ return args;})(1, [], /regexp/, 'str',"
"function(){});"},
- {"(function(", "){ return args;})(1, [],"
+ {"(function(",
+ "){ return args;})(1, [],"
"/regexp/, 'str', function(){});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* data[] = {"...args",
"a, ...args",
@@ -6085,7 +5575,7 @@ TEST(ParseRestParameters) {
"...[a, b]",
"...[]",
"...[...[a, b, ...c]]",
- NULL};
+ nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -6094,28 +5584,28 @@ TEST(ParseRestParametersErrors) {
const char* context_data[][2] = {{"'use strict';(function(",
"){ return args;}(1, [], /regexp/, 'str',"
"function(){});"},
- {"(function(", "){ return args;}(1, [],"
+ {"(function(",
+ "){ return args;}(1, [],"
"/regexp/, 'str', function(){});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* data[] = {
- "...args, b",
- "a, ...args, b",
- "...args, b",
- "a, ...args, b",
- "...args,\tb",
- "a,...args\t,b",
- "...args\r\n, b",
- "a, ... args,\r\nb",
- "...args\r,b",
- "a, ... args,\rb",
- "...args\t\n\t\t\n, b",
- "a, ... args, \n \n b",
- "a, a, ...args",
- "a,\ta, ...args",
- "a,\ra, ...args",
- "a,\na, ...args",
- NULL};
+ const char* data[] = {"...args, b",
+ "a, ...args, b",
+ "...args, b",
+ "a, ...args, b",
+ "...args,\tb",
+ "a,...args\t,b",
+ "...args\r\n, b",
+ "a, ... args,\r\nb",
+ "...args\r,b",
+ "a, ... args,\rb",
+ "...args\t\n\t\t\n, b",
+ "a, ... args, \n \n b",
+ "a, a, ...args",
+ "a,\ta, ...args",
+ "a,\ra, ...args",
+ "a,\na, ...args",
+ nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -6140,11 +5630,11 @@ TEST(RestParametersEvalArguments) {
const char* strict_context_data[][2] =
{{"'use strict';(function(",
"){ return;})(1, [], /regexp/, 'str',function(){});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* sloppy_context_data[][2] =
{{"(function(",
"){ return;})(1, [],/regexp/, 'str', function(){});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* data[] = {
"...eval",
@@ -6152,7 +5642,7 @@ TEST(RestParametersEvalArguments) {
"...arguments",
// See https://bugs.chromium.org/p/v8/issues/detail?id=4577
// "arguments, ...args",
- NULL};
+ nullptr};
// clang-format on
// Fail in strict mode
@@ -6164,19 +5654,15 @@ TEST(RestParametersEvalArguments) {
TEST(RestParametersDuplicateEvalArguments) {
- const char* context_data[][2] =
- {{"'use strict';(function(",
- "){ return;})(1, [], /regexp/, 'str',function(){});"},
- {"(function(",
- "){ return;})(1, [],/regexp/, 'str', function(){});"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"'use strict';(function(",
+ "){ return;})(1, [], /regexp/, 'str',function(){});"},
+ {"(function(", "){ return;})(1, [],/regexp/, 'str', function(){});"},
+ {nullptr, nullptr}};
- const char* data[] = {
- "eval, ...eval",
- "eval, eval, ...args",
- "arguments, ...arguments",
- "arguments, arguments, ...args",
- NULL};
+ const char* data[] = {"eval, ...eval", "eval, eval, ...args",
+ "arguments, ...arguments",
+ "arguments, arguments, ...args", nullptr};
// In strict mode, the error is using "eval" or "arguments" as parameter names
// In sloppy mode, the error is that eval / arguments are duplicated
@@ -6187,15 +5673,19 @@ TEST(RestParametersDuplicateEvalArguments) {
TEST(SpreadCall) {
const char* context_data[][2] = {{"function fn() { 'use strict';} fn(", ");"},
{"function fn() {} fn(", ");"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* data[] = {
- "...([1, 2, 3])", "...'123', ...'456'", "...new Set([1, 2, 3]), 4",
- "1, ...[2, 3], 4", "...Array(...[1,2,3,4])", "...NaN",
- "0, 1, ...[2, 3, 4], 5, 6, 7, ...'89'",
- "0, 1, ...[2, 3, 4], 5, 6, 7, ...'89', 10",
- "...[0, 1, 2], 3, 4, 5, 6, ...'7', 8, 9",
- "...[0, 1, 2], 3, 4, 5, 6, ...'7', 8, 9, ...[10]", NULL};
+ const char* data[] = {"...([1, 2, 3])",
+ "...'123', ...'456'",
+ "...new Set([1, 2, 3]), 4",
+ "1, ...[2, 3], 4",
+ "...Array(...[1,2,3,4])",
+ "...NaN",
+ "0, 1, ...[2, 3, 4], 5, 6, 7, ...'89'",
+ "0, 1, ...[2, 3, 4], 5, 6, 7, ...'89', 10",
+ "...[0, 1, 2], 3, 4, 5, 6, ...'7', 8, 9",
+ "...[0, 1, 2], 3, 4, 5, 6, ...'7', 8, 9, ...[10]",
+ nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -6204,9 +5694,9 @@ TEST(SpreadCall) {
TEST(SpreadCallErrors) {
const char* context_data[][2] = {{"function fn() { 'use strict';} fn(", ");"},
{"function fn() {} fn(", ");"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* data[] = {"(...[1, 2, 3])", "......[1,2,3]", NULL};
+ const char* data[] = {"(...[1, 2, 3])", "......[1,2,3]", nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -6215,26 +5705,22 @@ TEST(SpreadCallErrors) {
TEST(BadRestSpread) {
const char* context_data[][2] = {{"function fn() { 'use strict';", "} fn();"},
{"function fn() { ", "} fn();"},
- {NULL, NULL}};
- const char* data[] = {"return ...[1,2,3];", "var ...x = [1,2,3];",
- "var [...x,] = [1,2,3];", "var [...x, y] = [1,2,3];",
- "var {...x} = [1,2,3];", "var { x } = {x: ...[1,2,3]}",
- NULL};
+ {nullptr, nullptr}};
+ const char* data[] = {"return ...[1,2,3];",
+ "var ...x = [1,2,3];",
+ "var [...x,] = [1,2,3];",
+ "var [...x, y] = [1,2,3];",
+ "var { x } = {x: ...[1,2,3]}",
+ nullptr};
RunParserSyncTest(context_data, data, kError);
}
TEST(LexicalScopingSloppyMode) {
const char* context_data[][2] = {
- {"", ""},
- {"function f() {", "}"},
- {"{", "}"},
- {NULL, NULL}};
+ {"", ""}, {"function f() {", "}"}, {"{", "}"}, {nullptr, nullptr}};
- const char* good_data[] = {
- "let = 1;",
- "for(let = 1;;){}",
- NULL};
+ const char* good_data[] = {"let = 1;", "for(let = 1;;){}", nullptr};
RunParserSyncTest(context_data, good_data, kSuccess);
}
@@ -6249,34 +5735,21 @@ TEST(ComputedPropertyName) {
{"(class {set [", "](_) {}});"},
{"(class {[", "]() {}});"},
{"(class {*[", "]() {}});"},
- {NULL, NULL}};
- const char* error_data[] = {
- "1, 2",
- "var name",
- NULL};
+ {nullptr, nullptr}};
+ const char* error_data[] = {"1, 2", "var name", nullptr};
RunParserSyncTest(context_data, error_data, kError);
- const char* name_data[] = {
- "1",
- "1 + 2",
- "'name'",
- "\"name\"",
- "[]",
- "{}",
- NULL};
+ const char* name_data[] = {"1", "1 + 2", "'name'", "\"name\"",
+ "[]", "{}", nullptr};
RunParserSyncTest(context_data, name_data, kSuccess);
}
TEST(ComputedPropertyNameShorthandError) {
- const char* context_data[][2] = {{"({", "});"},
- {NULL, NULL}};
- const char* error_data[] = {
- "a: 1, [2]",
- "[1], a: 1",
- NULL};
+ const char* context_data[][2] = {{"({", "});"}, {nullptr, nullptr}};
+ const char* error_data[] = {"a: 1, [2]", "[1], a: 1", nullptr};
RunParserSyncTest(context_data, error_data, kError);
}
@@ -6558,17 +6031,17 @@ TEST(ModuleAwaitReserved) {
"({ set p(await) {} });",
"try {} catch (await) {}",
"try {} catch (await) {} finally {}",
- NULL
+ nullptr
};
// clang-format on
- const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
RunModuleParserSyncTest(context_data, kErrorSources, kError);
}
TEST(ModuleAwaitReservedPreParse) {
- const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
- const char* error_data[] = {"function f() { var await = 0; }", NULL};
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
+ const char* error_data[] = {"function f() { var await = 0; }", nullptr};
RunModuleParserSyncTest(context_data, error_data, kError);
}
@@ -6585,10 +6058,10 @@ TEST(ModuleAwaitPermitted) {
"(class { static await() {} });",
"(class { *await() {} });",
"(class { static *await() {} });",
- NULL
+ nullptr
};
// clang-format on
- const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
RunModuleParserSyncTest(context_data, kValidSources, kSuccess);
}
@@ -6640,10 +6113,10 @@ TEST(EnumReserved) {
"({ set p(enum) {} });",
"try {} catch (enum) {}",
"try {} catch (enum) {} finally {}",
- NULL
+ nullptr
};
// clang-format on
- const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
RunModuleParserSyncTest(context_data, kErrorSources, kError);
}
@@ -6827,7 +6300,7 @@ TEST(ModuleParsingInternals) {
}
}
- CHECK_EQ(3, descriptor->special_exports().length());
+ CHECK_EQ(3, descriptor->special_exports().size());
CheckEntry(descriptor->special_exports().at(0), "b", nullptr, "a", 0);
CheckEntry(descriptor->special_exports().at(1), nullptr, nullptr, nullptr, 2);
CheckEntry(descriptor->special_exports().at(2), "bb", nullptr, "aa",
@@ -6873,7 +6346,7 @@ TEST(ModuleParsingInternals) {
CheckEntry(entry, "y", "x", nullptr, -1);
}
- CHECK_EQ(2, descriptor->namespace_imports().length());
+ CHECK_EQ(2, descriptor->namespace_imports().size());
CheckEntry(descriptor->namespace_imports().at(0), nullptr, "loo", nullptr, 4);
CheckEntry(descriptor->namespace_imports().at(1), nullptr, "foob", nullptr,
4);
@@ -6900,17 +6373,11 @@ TEST(ModuleParsingInternals) {
TEST(DuplicateProtoError) {
const char* context_data[][2] = {
- {"({", "});"},
- {"'use strict'; ({", "});"},
- {NULL, NULL}
- };
- const char* error_data[] = {
- "__proto__: {}, __proto__: {}",
- "__proto__: {}, \"__proto__\": {}",
- "__proto__: {}, \"__\x70roto__\": {}",
- "__proto__: {}, a: 1, __proto__: {}",
- NULL
- };
+ {"({", "});"}, {"'use strict'; ({", "});"}, {nullptr, nullptr}};
+ const char* error_data[] = {"__proto__: {}, __proto__: {}",
+ "__proto__: {}, \"__proto__\": {}",
+ "__proto__: {}, \"__\x70roto__\": {}",
+ "__proto__: {}, a: 1, __proto__: {}", nullptr};
RunParserSyncTest(context_data, error_data, kError);
}
@@ -6918,18 +6385,11 @@ TEST(DuplicateProtoError) {
TEST(DuplicateProtoNoError) {
const char* context_data[][2] = {
- {"({", "});"},
- {"'use strict'; ({", "});"},
- {NULL, NULL}
- };
+ {"({", "});"}, {"'use strict'; ({", "});"}, {nullptr, nullptr}};
const char* error_data[] = {
- "__proto__: {}, ['__proto__']: {}",
- "__proto__: {}, __proto__() {}",
- "__proto__: {}, get __proto__() {}",
- "__proto__: {}, set __proto__(v) {}",
- "__proto__: {}, __proto__",
- NULL
- };
+ "__proto__: {}, ['__proto__']: {}", "__proto__: {}, __proto__() {}",
+ "__proto__: {}, get __proto__() {}", "__proto__: {}, set __proto__(v) {}",
+ "__proto__: {}, __proto__", nullptr};
RunParserSyncTest(context_data, error_data, kSuccess);
}
@@ -6942,13 +6402,10 @@ TEST(DeclarationsError) {
{"'use strict'; for (;;)", ""},
{"'use strict'; for (x in y)", ""},
{"'use strict'; do ", " while (false)"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
- const char* statement_data[] = {
- "let x = 1;",
- "const x = 1;",
- "class C {}",
- NULL};
+ const char* statement_data[] = {"let x = 1;", "const x = 1;", "class C {}",
+ nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -6968,75 +6425,73 @@ void TestLanguageMode(const char* source,
factory->NewScript(factory->NewStringFromAsciiChecked(source));
i::ParseInfo info(script);
i::parsing::ParseProgram(&info, isolate);
- CHECK(info.literal() != NULL);
+ CHECK_NOT_NULL(info.literal());
CHECK_EQ(expected_language_mode, info.literal()->language_mode());
}
TEST(LanguageModeDirectives) {
- TestLanguageMode("\"use nothing\"", i::SLOPPY);
- TestLanguageMode("\"use strict\"", i::STRICT);
+ TestLanguageMode("\"use nothing\"", i::LanguageMode::kSloppy);
+ TestLanguageMode("\"use strict\"", i::LanguageMode::kStrict);
- TestLanguageMode("var x = 1; \"use strict\"", i::SLOPPY);
+ TestLanguageMode("var x = 1; \"use strict\"", i::LanguageMode::kSloppy);
- TestLanguageMode("\"use some future directive\"; \"use strict\";", i::STRICT);
+ TestLanguageMode("\"use some future directive\"; \"use strict\";",
+ i::LanguageMode::kStrict);
}
TEST(PropertyNameEvalArguments) {
- const char* context_data[][2] = {{"'use strict';", ""},
- {NULL, NULL}};
+ const char* context_data[][2] = {{"'use strict';", ""}, {nullptr, nullptr}};
+
+ const char* statement_data[] = {"({eval: 1})",
+ "({arguments: 1})",
+ "({eval() {}})",
+ "({arguments() {}})",
+ "({*eval() {}})",
+ "({*arguments() {}})",
+ "({get eval() {}})",
+ "({get arguments() {}})",
+ "({set eval(_) {}})",
+ "({set arguments(_) {}})",
+
+ "class C {eval() {}}",
+ "class C {arguments() {}}",
+ "class C {*eval() {}}",
+ "class C {*arguments() {}}",
+ "class C {get eval() {}}",
+ "class C {get arguments() {}}",
+ "class C {set eval(_) {}}",
+ "class C {set arguments(_) {}}",
+
+ "class C {static eval() {}}",
+ "class C {static arguments() {}}",
+ "class C {static *eval() {}}",
+ "class C {static *arguments() {}}",
+ "class C {static get eval() {}}",
+ "class C {static get arguments() {}}",
+ "class C {static set eval(_) {}}",
+ "class C {static set arguments(_) {}}",
- const char* statement_data[] = {
- "({eval: 1})",
- "({arguments: 1})",
- "({eval() {}})",
- "({arguments() {}})",
- "({*eval() {}})",
- "({*arguments() {}})",
- "({get eval() {}})",
- "({get arguments() {}})",
- "({set eval(_) {}})",
- "({set arguments(_) {}})",
-
- "class C {eval() {}}",
- "class C {arguments() {}}",
- "class C {*eval() {}}",
- "class C {*arguments() {}}",
- "class C {get eval() {}}",
- "class C {get arguments() {}}",
- "class C {set eval(_) {}}",
- "class C {set arguments(_) {}}",
-
- "class C {static eval() {}}",
- "class C {static arguments() {}}",
- "class C {static *eval() {}}",
- "class C {static *arguments() {}}",
- "class C {static get eval() {}}",
- "class C {static get arguments() {}}",
- "class C {static set eval(_) {}}",
- "class C {static set arguments(_) {}}",
-
- NULL};
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
TEST(FunctionLiteralDuplicateParameters) {
- const char* strict_context_data[][2] =
- {{"'use strict';(function(", "){})();"},
- {"(function(", ") { 'use strict'; })();"},
- {"'use strict'; function fn(", ") {}; fn();"},
- {"function fn(", ") { 'use strict'; }; fn();"},
- {NULL, NULL}};
+ const char* strict_context_data[][2] = {
+ {"'use strict';(function(", "){})();"},
+ {"(function(", ") { 'use strict'; })();"},
+ {"'use strict'; function fn(", ") {}; fn();"},
+ {"function fn(", ") { 'use strict'; }; fn();"},
+ {nullptr, nullptr}};
- const char* sloppy_context_data[][2] =
- {{"(function(", "){})();"},
- {"(function(", ") {})();"},
- {"function fn(", ") {}; fn();"},
- {"function fn(", ") {}; fn();"},
- {NULL, NULL}};
+ const char* sloppy_context_data[][2] = {{"(function(", "){})();"},
+ {"(function(", ") {})();"},
+ {"function fn(", ") {}; fn();"},
+ {"function fn(", ") {}; fn();"},
+ {nullptr, nullptr}};
const char* data[] = {
"a, a",
@@ -7044,7 +6499,7 @@ TEST(FunctionLiteralDuplicateParameters) {
"b, a, a",
"a, b, c, c",
"a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, w",
- NULL};
+ nullptr};
RunParserSyncTest(strict_context_data, data, kError);
RunParserSyncTest(sloppy_context_data, data, kSuccess);
@@ -7052,17 +6507,16 @@ TEST(FunctionLiteralDuplicateParameters) {
TEST(ArrowFunctionASIErrors) {
- const char* context_data[][2] = {{"'use strict';", ""}, {"", ""},
- {NULL, NULL}};
-
- const char* data[] = {
- "(a\n=> a)(1)",
- "(a/*\n*/=> a)(1)",
- "((a)\n=> a)(1)",
- "((a)/*\n*/=> a)(1)",
- "((a, b)\n=> a + b)(1, 2)",
- "((a, b)/*\n*/=> a + b)(1, 2)",
- NULL};
+ const char* context_data[][2] = {
+ {"'use strict';", ""}, {"", ""}, {nullptr, nullptr}};
+
+ const char* data[] = {"(a\n=> a)(1)",
+ "(a/*\n*/=> a)(1)",
+ "((a)\n=> a)(1)",
+ "((a)/*\n*/=> a)(1)",
+ "((a, b)\n=> a + b)(1, 2)",
+ "((a, b)/*\n*/=> a + b)(1, 2)",
+ nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -7071,7 +6525,7 @@ TEST(ObjectSpreadPositiveTests) {
const char* context_data[][2] = {
{"x = ", ""},
{"'use strict'; x = ", ""},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -7092,18 +6546,15 @@ TEST(ObjectSpreadPositiveTests) {
"{ ...async function() { }}",
"{ ...async () => { }}",
"{ ...new Foo()}",
- NULL};
+ nullptr};
// clang-format on
- static const ParserFlag flags[] = {kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
TEST(ObjectSpreadNegativeTests) {
- const char* context_data[][2] = {{"x = ", ""},
- {"'use strict'; x = ", ""},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"x = ", ""}, {"'use strict'; x = ", ""}, {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -7114,11 +6565,9 @@ TEST(ObjectSpreadNegativeTests) {
"{get ...foo}",
"{set ...foo}",
"{async ...foo}",
- NULL};
+ nullptr};
- static const ParserFlag flags[] = {kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(context_data, data, kError);
}
TEST(TemplateEscapesPositiveTests) {
@@ -7126,7 +6575,7 @@ TEST(TemplateEscapesPositiveTests) {
const char* context_data[][2] = {
{"", ""},
{"'use strict';", ""},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -7193,16 +6642,10 @@ TEST(TemplateEscapesPositiveTests) {
"tag`\\u``\\u`",
"` ${tag`\\u`}`",
"` ``\\u`",
- NULL};
+ nullptr};
// clang-format on
- // No error with flag
- static const ParserFlag flags[] = {kAllowHarmonyTemplateEscapes};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, flags,
- arraysize(flags));
-
- // Still an error without flag
- RunParserSyncTest(context_data, data, kError);
+ RunParserSyncTest(context_data, data, kSuccess);
}
TEST(TemplateEscapesNegativeTests) {
@@ -7210,7 +6653,7 @@ TEST(TemplateEscapesNegativeTests) {
const char* context_data[][2] = {
{"", ""},
{"'use strict';", ""},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -7274,15 +6717,9 @@ TEST(TemplateEscapesNegativeTests) {
"`\\1``\\2`",
"tag` ${`\\u`}`",
"`\\u```",
- NULL};
+ nullptr};
// clang-format on
- // Error with flag
- static const ParserFlag flags[] = {kAllowHarmonyTemplateEscapes};
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
- arraysize(flags));
-
- // Still an error without flag
RunParserSyncTest(context_data, data, kError);
}
@@ -7295,7 +6732,7 @@ TEST(DestructuringPositiveTests) {
{"var f = (", ") => {};"},
{"var f = (argument1,", ") => {};"},
{"try {} catch(", ") {}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -7338,9 +6775,6 @@ TEST(DestructuringPositiveTests) {
"{ __proto__: x, __proto__: y}",
"{arguments: x}",
"{eval: x}",
- NULL};
-
- const char* rest_data[] = {
"{ x : y, ...z }",
"{ x : y = 1, ...z }",
"{ x : x, y : y, ...z }",
@@ -7359,17 +6793,11 @@ TEST(DestructuringPositiveTests) {
"{[1+1] : z, ...x}",
"{arguments: x, ...z}",
"{ __proto__: x, __proto__: y, ...z}",
- NULL};
+ nullptr
+ };
// clang-format on
RunParserSyncTest(context_data, data, kSuccess);
- RunParserSyncTest(context_data, rest_data, kError);
-
- static const ParserFlag flags[] = {kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, flags,
- arraysize(flags));
- RunParserSyncTest(context_data, rest_data, kSuccess, NULL, 0, flags,
- arraysize(flags));
// v8:5201
{
@@ -7381,13 +6809,7 @@ TEST(DestructuringPositiveTests) {
{"var f = (", ") => {};"},
{"var f = (argument1,", ") => {};"},
{"try {} catch(", ") {}"},
- {NULL, NULL}
- };
-
- const char* rest_data[] = {
- "{...arguments}",
- "{...eval}",
- NULL
+ {nullptr, nullptr}
};
const char* data[] = {
@@ -7397,16 +6819,12 @@ TEST(DestructuringPositiveTests) {
"{x: eval}",
"{arguments = false}",
"{eval = false}",
- NULL
+ "{...arguments}",
+ "{...eval}",
+ nullptr
};
// clang-format on
RunParserSyncTest(sloppy_context_data, data, kSuccess);
- RunParserSyncTest(sloppy_context_data, rest_data, kError);
- static const ParserFlag flags[] = {kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, flags,
- arraysize(flags));
- RunParserSyncTest(sloppy_context_data, rest_data, kSuccess, NULL, 0, flags,
- arraysize(flags));
}
}
@@ -7422,7 +6840,7 @@ TEST(DestructuringNegativeTests) {
{"var f = ", " => {};"},
{"var f = (argument1,", ") => {};"},
{"try {} catch(", ") {}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -7499,70 +6917,58 @@ TEST(DestructuringNegativeTests) {
"{ set a() {} }",
"{ method() {} }",
"{ *method() {} }",
- NULL};
-
- const char* rest_data[] = {
- "...a++",
- "...++a",
- "...typeof a",
- "...[a++]",
- "...(x => y)",
- "{ ...x, }",
- "{ ...x, y }",
- "{ y, ...x, y }",
- "{ ...x, ...y }",
- "{ ...x, ...x }",
- "{ ...x, ...x = {} }",
- "{ ...x, ...x = ...x }",
- "{ ...x, ...x = ...{ x } }",
- "{ ,, ...x }",
- "{ ...get a() {} }",
- "{ ...set a() {} }",
- "{ ...method() {} }",
- "{ ...function() {} }",
- "{ ...*method() {} }",
- "{...{x} }",
- "{...[x] }",
- "{...{ x = 5 } }",
- "{...[ x = 5 ] }",
- "{...x.f }",
- "{...x[0] }",
- NULL
+ "...a++",
+ "...++a",
+ "...typeof a",
+ "...[a++]",
+ "...(x => y)",
+ "{ ...x, }",
+ "{ ...x, y }",
+ "{ y, ...x, y }",
+ "{ ...x, ...y }",
+ "{ ...x, ...x }",
+ "{ ...x, ...x = {} }",
+ "{ ...x, ...x = ...x }",
+ "{ ...x, ...x = ...{ x } }",
+ "{ ,, ...x }",
+ "{ ...get a() {} }",
+ "{ ...set a() {} }",
+ "{ ...method() {} }",
+ "{ ...function() {} }",
+ "{ ...*method() {} }",
+ "{...{x} }",
+ "{...[x] }",
+ "{...{ x = 5 } }",
+ "{...[ x = 5 ] }",
+ "{...x.f }",
+ "{...x[0] }",
+ nullptr
};
const char* async_gen_data[] = {
"async function* a() {}",
- NULL
+ nullptr
};
// clang-format on
RunParserSyncTest(context_data, data, kError);
- RunParserSyncTest(context_data, rest_data, kError);
- static const ParserFlag flags[] = {kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
- arraysize(flags));
- RunParserSyncTest(context_data, rest_data, kError, NULL, 0, flags,
- arraysize(flags));
static const ParserFlag async_gen_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, async_gen_data, kError, NULL, 0,
+ RunParserSyncTest(context_data, async_gen_data, kError, nullptr, 0,
async_gen_flags, arraysize(async_gen_flags));
}
{ // All modes.
- const char* context_data[][2] = {{"'use strict'; let ", " = {};"},
- {"var ", " = {};"},
- {"'use strict'; const ", " = {};"},
- {"function f(", ") {}"},
- {"function f(argument1, ", ") {}"},
- {"var f = (", ") => {};"},
- {"var f = (argument1,", ") => {};"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"'use strict'; let ", " = {};"}, {"var ", " = {};"},
+ {"'use strict'; const ", " = {};"}, {"function f(", ") {}"},
+ {"function f(argument1, ", ") {}"}, {"var f = (", ") => {};"},
+ {"var f = (argument1,", ") => {};"}, {nullptr, nullptr}};
// clang-format off
const char* data[] = {
"x => x",
"() => x",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, data, kError);
}
@@ -7574,7 +6980,7 @@ TEST(DestructuringNegativeTests) {
{"'use strict'; const ", " = {};"},
{"'use strict'; function f(", ") {}"},
{"'use strict'; function f(argument1, ", ") {}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -7590,21 +6996,12 @@ TEST(DestructuringNegativeTests) {
"{ eval }",
"{ arguments = false }"
"{ eval = false }",
- NULL};
-
- const char* rest_data[] = {
"{ ...eval }",
"{ ...arguments }",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, data, kError);
- RunParserSyncTest(context_data, rest_data, kError);
- static const ParserFlag flags[] = {kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
- arraysize(flags));
- RunParserSyncTest(context_data, rest_data, kError, NULL, 0, flags,
- arraysize(flags));
}
{ // 'yield' in generators.
@@ -7612,14 +7009,14 @@ TEST(DestructuringNegativeTests) {
{"function*() { var ", " = {};"},
{"function*() { 'use strict'; let ", " = {};"},
{"function*() { 'use strict'; const ", " = {};"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
"yield",
"[yield]",
"{ x : yield }",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, data, kError);
}
@@ -7637,24 +7034,17 @@ TEST(DestructuringNegativeTests) {
{"for (var ", ";;) {}"},
{"for (let ", ";;) {}"},
{"for (const ", ";;) {}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
"{ a }",
"[ a ]",
- NULL};
- const char* rest_data[] = {
"{ ...a }",
- NULL};
+ nullptr
+ };
// clang-format on
RunParserSyncTest(context_data, data, kError);
- RunParserSyncTest(context_data, rest_data, kError);
- static const ParserFlag flags[] = {kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
- arraysize(flags));
- RunParserSyncTest(context_data, rest_data, kError, NULL, 0, flags,
- arraysize(flags));
}
}
@@ -7662,7 +7052,7 @@ TEST(ObjectRestNegativeTestSlow) {
// clang-format off
const char* context_data[][2] = {
{"var { ", " } = { a: 1};"},
- { NULL, NULL }
+ { nullptr, nullptr }
};
using v8::internal::Code;
@@ -7674,13 +7064,13 @@ TEST(ObjectRestNegativeTestSlow) {
const char* statement_data[] = {
statement.c_str(),
- NULL
+ nullptr
};
// clang-format on
// The test is quite slow, so run it with a reduced set of flags.
- static const ParserFlag flags[] = {kAllowLazy, kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0, flags,
+ static const ParserFlag flags[] = {kAllowLazy};
+ RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, flags,
arraysize(flags));
}
@@ -7700,7 +7090,7 @@ TEST(DestructuringAssignmentPositiveTests) {
{"var x, y, z; m(['b']) ? lhs : ", " = {}"},
{"'use strict'; var x, y, z; m(['a']) ? ", " = {} : rhs"},
{"'use strict'; var x, y, z; m(['b']) ? lhs : ", " = {}"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* mixed_assignments_context_data[][2] = {
{"'use strict'; let x, y, z; (", " = z = {});"},
@@ -7715,7 +7105,7 @@ TEST(DestructuringAssignmentPositiveTests) {
{"var x, y, z; for (x in x = ", " = z = {});"},
{"var x, y, z; for (x of ", " = z = {});"},
{"var x, y, z; for (x of x = ", " = z = {});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -7856,17 +7246,14 @@ TEST(DestructuringAssignmentPositiveTests) {
"[ (foo.bar) ]",
"[ (foo['bar']) ]",
- NULL};
+ nullptr};
// clang-format on
- static const ParserFlag flags[] = {kAllowHarmonyObjectRestSpread};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(context_data, data, kSuccess);
- RunParserSyncTest(mixed_assignments_context_data, data, kSuccess, NULL, 0,
- flags, arraysize(flags));
+ RunParserSyncTest(mixed_assignments_context_data, data, kSuccess);
const char* empty_context_data[][2] = {
- {"'use strict';", ""}, {"", ""}, {NULL, NULL}};
+ {"'use strict';", ""}, {"", ""}, {nullptr, nullptr}};
// CoverInitializedName ambiguity handling in various contexts
const char* ambiguity_data[] = {
@@ -7884,7 +7271,7 @@ TEST(DestructuringAssignmentPositiveTests) {
"({ __proto__: x, __proto__: y } = {})",
"var { x = 10 } = (o = { x = 20 } = {});",
"var x; (({ x = 10 } = { x = 20 } = {}) => x)({})",
- NULL,
+ nullptr,
};
RunParserSyncTest(empty_context_data, ambiguity_data, kSuccess);
}
@@ -7898,13 +7285,12 @@ TEST(DestructuringAssignmentNegativeTests) {
{"'use strict'; let x, y, z; for (x of ", " = {});"},
{"var x, y, z; for (x in ", " = {});"},
{"var x, y, z; for (x of ", " = {});"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
// clang-format off
const char* data[] = {
"{ x : ++y }",
"{ x : y * 2 }",
- "{ ...x }",
"{ get x() {} }",
"{ set x() {} }",
"{ x: y() }",
@@ -8007,32 +7393,27 @@ TEST(DestructuringAssignmentNegativeTests) {
"[ x += x ]",
"{ foo: x += x }",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, data, kError);
const char* empty_context_data[][2] = {
- {"'use strict';", ""}, {"", ""}, {NULL, NULL}};
+ {"'use strict';", ""}, {"", ""}, {nullptr, nullptr}};
// CoverInitializedName ambiguity handling in various contexts
const char* ambiguity_data[] = {
- "var foo = { x = 10 };",
- "var foo = { q } = { x = 10 };",
- "var foo; foo = { x = 10 };",
- "var foo; foo = { q } = { x = 10 };",
- "var x; ({ x = 10 });",
- "var q, x; ({ q } = { x = 10 });",
- "var x; [{ x = 10 }]",
- "var x; (true ? { x = true } : { x = false })",
- "var q, x; (q, { x = 10 });",
- "var { x = 10 } = { x = 20 };",
+ "var foo = { x = 10 };", "var foo = { q } = { x = 10 };",
+ "var foo; foo = { x = 10 };", "var foo; foo = { q } = { x = 10 };",
+ "var x; ({ x = 10 });", "var q, x; ({ q } = { x = 10 });",
+ "var x; [{ x = 10 }]", "var x; (true ? { x = true } : { x = false })",
+ "var q, x; (q, { x = 10 });", "var { x = 10 } = { x = 20 };",
"var { x = 10 } = (o = { x = 20 });",
"var x; (({ x = 10 } = { x = 20 }) => x)({})",
// Not ambiguous, but uses same context data
"switch([window %= []] = []) { default: }",
- NULL,
+ nullptr,
};
RunParserSyncTest(empty_context_data, ambiguity_data, kError);
@@ -8040,60 +7421,41 @@ TEST(DestructuringAssignmentNegativeTests) {
const char* strict_context_data[][2] = {{"'use strict'; (", " = {})"},
{"'use strict'; for (", " of {}) {}"},
{"'use strict'; for (", " in {}) {}"},
- {NULL, NULL}};
- const char* strict_data[] = {"{ eval }",
- "{ arguments }",
- "{ foo: eval }",
- "{ foo: arguments }",
- "{ eval = 0 }",
- "{ arguments = 0 }",
- "{ foo: eval = 0 }",
- "{ foo: arguments = 0 }",
- "[ eval ]",
- "[ arguments ]",
- "[ eval = 0 ]",
- "[ arguments = 0 ]",
-
- // v8:4662
- "{ x: (eval) }",
- "{ x: (arguments) }",
- "{ x: (eval = 0) }",
- "{ x: (arguments = 0) }",
- "{ x: (eval) = 0 }",
- "{ x: (arguments) = 0 }",
- "[ (eval) ]",
- "[ (arguments) ]",
- "[ (eval = 0) ]",
- "[ (arguments = 0) ]",
- "[ (eval) = 0 ]",
- "[ (arguments) = 0 ]",
- "[ ...(eval) ]",
- "[ ...(arguments) ]",
- "[ ...(eval = 0) ]",
- "[ ...(arguments = 0) ]",
- "[ ...(eval) = 0 ]",
- "[ ...(arguments) = 0 ]",
-
- NULL};
+ {nullptr, nullptr}};
+ const char* strict_data[] = {
+ "{ eval }", "{ arguments }", "{ foo: eval }", "{ foo: arguments }",
+ "{ eval = 0 }", "{ arguments = 0 }", "{ foo: eval = 0 }",
+ "{ foo: arguments = 0 }", "[ eval ]", "[ arguments ]", "[ eval = 0 ]",
+ "[ arguments = 0 ]",
+
+ // v8:4662
+ "{ x: (eval) }", "{ x: (arguments) }", "{ x: (eval = 0) }",
+ "{ x: (arguments = 0) }", "{ x: (eval) = 0 }", "{ x: (arguments) = 0 }",
+ "[ (eval) ]", "[ (arguments) ]", "[ (eval = 0) ]", "[ (arguments = 0) ]",
+ "[ (eval) = 0 ]", "[ (arguments) = 0 ]", "[ ...(eval) ]",
+ "[ ...(arguments) ]", "[ ...(eval = 0) ]", "[ ...(arguments = 0) ]",
+ "[ ...(eval) = 0 ]", "[ ...(arguments) = 0 ]",
+
+ nullptr};
RunParserSyncTest(strict_context_data, strict_data, kError);
}
TEST(DestructuringDisallowPatternsInForVarIn) {
const char* context_data[][2] = {
- {"", ""}, {"function f() {", "}"}, {NULL, NULL}};
+ {"", ""}, {"function f() {", "}"}, {nullptr, nullptr}};
// clang-format off
const char* error_data[] = {
"for (let x = {} in null);",
"for (let x = {} of null);",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, error_data, kError);
// clang-format off
const char* success_data[] = {
"for (var x = {} in null);",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, success_data, kSuccess);
}
@@ -8161,22 +7523,22 @@ TEST(DefaultParametersYieldInInitializers) {
// clang-format off
const char* sloppy_function_context_data[][2] = {
{"(function f(", ") { });"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* strict_function_context_data[][2] = {
{"'use strict'; (function f(", ") { });"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* sloppy_arrow_context_data[][2] = {
{"((", ")=>{});"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* strict_arrow_context_data[][2] = {
{"'use strict'; ((", ")=>{});"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* generator_context_data[][2] = {
@@ -8188,7 +7550,7 @@ TEST(DefaultParametersYieldInInitializers) {
// And similarly for arrow functions in the parameter list.
{"'use strict'; (function *g(z = (", ") => {}) { });"},
{"(function *g(z = (", ") => {}) { });"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* parameter_data[] = {
@@ -8215,7 +7577,7 @@ TEST(DefaultParametersYieldInInitializers) {
"{x}=f(yield)",
"[x]=f(yield)",
- NULL
+ nullptr
};
// Because classes are always in strict mode, these are always errors.
@@ -8231,7 +7593,7 @@ TEST(DefaultParametersYieldInInitializers) {
"x = class { static [yield]() { } }",
"x = class { [(yield, 1)]() { } }",
"x = class { [y = (yield, 1)]() { } }",
- NULL
+ nullptr
};
// clang-format on
@@ -8247,7 +7609,7 @@ TEST(DefaultParametersYieldInInitializers) {
TEST(SpreadArray) {
const char* context_data[][2] = {
- {"'use strict';", ""}, {"", ""}, {NULL, NULL}};
+ {"'use strict';", ""}, {"", ""}, {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -8261,7 +7623,7 @@ TEST(SpreadArray) {
"[...[...a]]",
"[, ...a]",
"[, , ...a]",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -8269,7 +7631,7 @@ TEST(SpreadArray) {
TEST(SpreadArrayError) {
const char* context_data[][2] = {
- {"'use strict';", ""}, {"", ""}, {NULL, NULL}};
+ {"'use strict';", ""}, {"", ""}, {nullptr, nullptr}};
// clang-format off
const char* data[] = {
@@ -8278,7 +7640,7 @@ TEST(SpreadArrayError) {
"[..., ]",
"[..., ...]",
"[ (...a)]",
- NULL};
+ nullptr};
// clang-format on
RunParserSyncTest(context_data, data, kError);
}
@@ -8302,13 +7664,13 @@ TEST(NewTarget) {
{"class C {m() {", "}}"},
{"class C {get x() {", "}}"},
{"class C {set x(_) {", "}}"},
- {NULL}
+ {nullptr}
};
const char* bad_context_data[][2] = {
{"", ""},
{"'use strict';", ""},
- {NULL}
+ {nullptr}
};
const char* data[] = {
@@ -8320,7 +7682,7 @@ TEST(NewTarget) {
"if (1) {} else { new.target }",
"while (0) { new.target }",
"do { new.target } while (0)",
- NULL
+ nullptr
};
// clang-format on
@@ -8349,7 +7711,7 @@ TEST(ImportMetaSuccess) {
{"class C {m() {", "}}"},
{"class C {get x() {", "}}"},
{"class C {set x(_) {", "}}"},
- {NULL}
+ {nullptr}
};
const char* data[] = {
@@ -8369,28 +7731,27 @@ TEST(ImportMetaSuccess) {
"t = [...import.meta]",
"f = {...import.meta}",
"delete import.meta",
- NULL
+ nullptr
};
// clang-format on
// Making sure the same *wouldn't* parse without the flags
- RunModuleParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0, NULL, 0,
- true, true);
+ RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
+ nullptr, 0, true, true);
static const ParserFlag flags[] = {
kAllowHarmonyImportMeta, kAllowHarmonyDynamicImport,
- kAllowHarmonyObjectRestSpread,
};
// 2.1.1 Static Semantics: Early Errors
// ImportMeta
// * It is an early Syntax Error if Module is not the syntactic goal symbol.
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
arraysize(flags));
// Making sure the same wouldn't parse without the flags either
RunParserSyncTest(context_data, data, kError);
- RunModuleParserSyncTest(context_data, data, kSuccess, NULL, 0, flags,
+ RunModuleParserSyncTest(context_data, data, kSuccess, nullptr, 0, flags,
arraysize(flags));
}
@@ -8405,28 +7766,27 @@ TEST(ImportMetaFailure) {
{"({", "} = {1})"},
{"var {", " = 1} = 1"},
{"for (var ", " of [1]) {}"},
- {NULL}
+ {nullptr}
};
const char* data[] = {
"import.meta",
- NULL
+ nullptr
};
// clang-format on
static const ParserFlag flags[] = {
kAllowHarmonyImportMeta, kAllowHarmonyDynamicImport,
- kAllowHarmonyObjectRestSpread,
};
- RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
arraysize(flags));
- RunModuleParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, flags,
arraysize(flags));
- RunModuleParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0, NULL, 0,
- true, true);
+ RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
+ nullptr, 0, true, true);
RunParserSyncTest(context_data, data, kError);
}
@@ -8435,7 +7795,7 @@ TEST(ConstSloppy) {
const char* context_data[][2] = {
{"", ""},
{"{", "}"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* data[] = {
@@ -8443,7 +7803,7 @@ TEST(ConstSloppy) {
"for (const x = 1; x < 1; x++) {}",
"for (const x in {}) {}",
"for (const x of []) {}",
- NULL
+ nullptr
};
// clang-format on
RunParserSyncTest(context_data, data, kSuccess);
@@ -8456,7 +7816,7 @@ TEST(LetSloppy) {
{"", ""},
{"'use strict';", ""},
{"{", "}"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* data[] = {
@@ -8465,7 +7825,7 @@ TEST(LetSloppy) {
"for (let x = 1; x < 1; x++) {}",
"for (let x in {}) {}",
"for (let x of []) {}",
- NULL
+ nullptr
};
// clang-format on
@@ -8496,7 +7856,7 @@ TEST(LanguageModeDirectivesNonSimpleParameterListErrors) {
{"'use strict'; var c = { m(", ") { 'use strict'; }"},
{"'use strict'; var c = { *gm(", ") { 'use strict'; }"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* data[] = {
// TODO(@caitp): support formal parameter initializers
@@ -8513,7 +7873,7 @@ TEST(LanguageModeDirectivesNonSimpleParameterListErrors) {
"[a, b, ...rest]",
"{ bindingPattern = {} }",
"{ initializedBindingPattern } = { initializedBindingPattern: true }",
- NULL};
+ nullptr};
RunParserSyncTest(context_data, data, kError);
}
@@ -8525,7 +7885,7 @@ TEST(LetSloppyOnly) {
{"", ""},
{"{", "}"},
{"(function() {", "})()"},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* data[] = {
@@ -8539,7 +7899,7 @@ TEST(LetSloppyOnly) {
"for (var [let] in {}) {}",
"var let",
"var [let] = []",
- NULL
+ nullptr
};
// clang-format on
@@ -8573,7 +7933,7 @@ TEST(LetSloppyOnly) {
"let [l\\u0065t] = 1",
"const [l\\u0065t] = 1",
"for (let l\\u0065t in {}) {}",
- NULL
+ nullptr
};
// clang-format on
@@ -8585,12 +7945,12 @@ TEST(EscapedKeywords) {
// clang-format off
const char* sloppy_context_data[][2] = {
{"", ""},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* strict_context_data[][2] = {
{"'use strict';", ""},
- {NULL, NULL}
+ {nullptr, nullptr}
};
const char* fail_data[] = {
@@ -8673,7 +8033,7 @@ TEST(EscapedKeywords) {
"(y\\u0069eld);",
"var y\\u0069eld = 1;",
"var { y\\u0069eld } = {};",
- NULL
+ nullptr
};
// clang-format on
@@ -8686,7 +8046,7 @@ TEST(EscapedKeywords) {
"var l\\u0065t = 1;",
"l\\u0065t = 1;",
"(l\\u0065t === 1);",
- NULL
+ nullptr
};
// clang-format on
@@ -8715,7 +8075,7 @@ TEST(EscapedKeywords) {
"(st\\u0061tic);",
"var st\\u0061tic = 1;",
"var { st\\u0061tic } = {};",
- NULL};
+ nullptr};
RunParserSyncTest(sloppy_context_data, valid_data, kSuccess);
RunParserSyncTest(strict_context_data, valid_data, kError);
RunModuleParserSyncTest(strict_context_data, valid_data, kError);
@@ -8727,14 +8087,14 @@ TEST(MiscSyntaxErrors) {
const char* context_data[][2] = {
{ "'use strict'", "" },
{ "", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* error_data[] = {
"for (();;) {}",
// crbug.com/582626
"{ NaN ,chA((evarA=new t ( l = !.0[((... co -a0([1]))=> greturnkf",
- NULL
+ nullptr
};
// clang-format on
@@ -8751,8 +8111,7 @@ TEST(EscapeSequenceErrors) {
{ "`${'", "'}`" },
{ "`${\"", "\"}`" },
{ "`${`", "`}`" },
- { "f(tag`", "`);" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* error_data[] = {
"\\uABCG",
@@ -8763,7 +8122,7 @@ TEST(EscapeSequenceErrors) {
"\\u{110000",
"\\u{FFFD }",
"\\xZF",
- NULL
+ nullptr
};
// clang-format on
@@ -8776,12 +8135,12 @@ TEST(FunctionSentErrors) {
const char* context_data[][2] = {
{ "'use strict'", "" },
{ "", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* error_data[] = {
"var x = function.sent",
"function* g() { yield function.s\\u0065nt; }",
- NULL
+ nullptr
};
// clang-format on
@@ -8795,12 +8154,12 @@ TEST(NewTargetErrors) {
const char* context_data[][2] = {
{ "'use strict'", "" },
{ "", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* error_data[] = {
"var x = new.target",
"function f() { return new.t\\u0061rget; }",
- NULL
+ nullptr
};
// clang-format on
RunParserSyncTest(context_data, error_data, kError);
@@ -8813,14 +8172,14 @@ TEST(FunctionDeclarationError) {
{ "'use strict'; { ", "}" },
{"(function() { 'use strict';", "})()"},
{"(function() { 'use strict'; {", "} })()"},
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* sloppy_context[][2] = {
{ "", "" },
{ "{", "}" },
{"(function() {", "})()"},
{"(function() { {", "} })()"},
- { NULL, NULL }
+ { nullptr, nullptr }
};
// Invalid in all contexts
const char* error_data[] = {
@@ -8851,7 +8210,7 @@ TEST(FunctionDeclarationError) {
"label: function* f() { }",
"if (true) async function f() { }",
"label: async function f() { }",
- NULL
+ nullptr
};
// Valid only in sloppy mode.
const char* sloppy_data[] = {
@@ -8861,7 +8220,7 @@ TEST(FunctionDeclarationError) {
"label: if (true) function f() { }",
"label: if (true) {} else function f() { }",
"label: label2: function f() { }",
- NULL
+ nullptr
};
// clang-format on
@@ -8878,14 +8237,14 @@ TEST(FunctionDeclarationError) {
const char* async_iterator_data[] = {
"if (true) async function* f() { }",
"label: async function* f() { }",
- NULL,
+ nullptr,
};
// clang-format on
static const ParserFlag flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(sloppy_context, async_iterator_data, kError, NULL, 0, flags,
- arraysize(flags));
- RunParserSyncTest(strict_context, async_iterator_data, kError, NULL, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(sloppy_context, async_iterator_data, kError, nullptr, 0,
+ flags, arraysize(flags));
+ RunParserSyncTest(strict_context, async_iterator_data, kError, nullptr, 0,
+ flags, arraysize(flags));
}
TEST(ExponentiationOperator) {
@@ -8894,7 +8253,7 @@ TEST(ExponentiationOperator) {
{ "var O = { p: 1 }, x = 10; ; if (", ") { foo(); }" },
{ "var O = { p: 1 }, x = 10; ; (", ")" },
{ "var O = { p: 1 }, x = 10; foo(", ")" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* data[] = {
"(delete O.p) ** 10",
@@ -8920,7 +8279,7 @@ TEST(ExponentiationOperator) {
"x++ ** 10",
"O.p-- ** 10",
"x-- ** 10",
- NULL
+ nullptr
};
// clang-format on
@@ -8933,7 +8292,7 @@ TEST(ExponentiationOperatorErrors) {
{ "var O = { p: 1 }, x = 10; ; if (", ") { foo(); }" },
{ "var O = { p: 1 }, x = 10; ; (", ")" },
{ "var O = { p: 1 }, x = 10; foo(", ")" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* error_data[] = {
"delete O.p ** 10",
@@ -8967,7 +8326,7 @@ TEST(ExponentiationOperatorErrors) {
"{ x: x **= 2 ] = { x: 2 }",
// TODO(caitp): a Call expression as LHS should be an early ReferenceError!
// "Array() **= 10",
- NULL
+ nullptr
};
// clang-format on
@@ -8979,7 +8338,7 @@ TEST(AsyncAwait) {
const char* context_data[][2] = {
{ "'use strict';", "" },
{ "", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* data[] = {
@@ -8999,7 +8358,7 @@ TEST(AsyncAwait) {
"function* g() { var f = async(yield); }",
"function* g() { var f = async(x = yield); }",
- NULL
+ nullptr
};
// clang-format on
@@ -9015,7 +8374,7 @@ TEST(AsyncAwait) {
{ "'use strict'; var f = async function() {", "}" },
{ "'use strict'; var f = async() => {", "}" },
{ "'use strict'; var O = { async method() {", "} }" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* body_context_data[][2] = {
@@ -9033,7 +8392,7 @@ TEST(AsyncAwait) {
{ "'use strict'; var O = { method() {", "} }" },
{ "'use strict'; var O = { *method() {", "} }" },
{ "'use strict'; var f = () => {", "}" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* body_data[] = {
@@ -9053,7 +8412,7 @@ TEST(AsyncAwait) {
"var O = { *method(await) { return await; } };",
"(function await() {})",
- NULL
+ nullptr
};
// clang-format on
@@ -9066,12 +8425,12 @@ TEST(AsyncAwaitErrors) {
const char* context_data[][2] = {
{ "'use strict';", "" },
{ "", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* strict_context_data[][2] = {
{ "'use strict';", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* error_data[] = {
@@ -9145,7 +8504,7 @@ TEST(AsyncAwaitErrors) {
"async(...a = b) => b",
"async(...a,) => b",
"async(...a, b) => b",
- NULL
+ nullptr
};
const char* strict_error_data[] = {
@@ -9164,7 +8523,7 @@ TEST(AsyncAwaitErrors) {
// TODO(caitp): preparser needs to report duplicate parameter errors, too.
// "var f = async(dupe, dupe) => {}",
- NULL
+ nullptr
};
RunParserSyncTest(context_data, error_data, kError);
@@ -9180,7 +8539,7 @@ TEST(AsyncAwaitErrors) {
{ "'use strict'; var f = async function() {", "}" },
{ "'use strict'; var f = async() => {", "}" },
{ "'use strict'; var O = { async method() {", "} }" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* async_body_error_data[] = {
@@ -9205,7 +8564,7 @@ TEST(AsyncAwaitErrors) {
"var e = [await];",
"var e = {await};",
- NULL
+ nullptr
};
// clang-format on
@@ -9264,6 +8623,11 @@ TEST(AsyncAwaitFormalParameters) {
"x = class await {}",
"x = 1 ? class await {} : 0",
"x = async function await() {}",
+
+ "x = y[await]",
+ "x = `${await}`",
+ "x = y()[await]",
+
nullptr
};
// clang-format on
@@ -9279,26 +8643,26 @@ TEST(AsyncAwaitModule) {
// clang-format off
const char* context_data[][2] = {
{ "", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* data[] = {
"export default async function() { await 1; }",
"export default async function async() { await 1; }",
"export async function async() { await 1; }",
- NULL
+ nullptr
};
// clang-format on
- RunModuleParserSyncTest(context_data, data, kSuccess, NULL, 0, NULL, 0, NULL,
- 0, false);
+ RunModuleParserSyncTest(context_data, data, kSuccess, nullptr, 0, nullptr, 0,
+ nullptr, 0, false);
}
TEST(AsyncAwaitModuleErrors) {
// clang-format off
const char* context_data[][2] = {
{ "", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* error_data[] = {
@@ -9308,32 +8672,32 @@ TEST(AsyncAwaitModuleErrors) {
"export async function() {}",
"export async",
"export async\nfunction async() { await 1; }",
- NULL
+ nullptr
};
// clang-format on
- RunModuleParserSyncTest(context_data, error_data, kError, NULL, 0, NULL, 0,
- NULL, 0, false);
+ RunModuleParserSyncTest(context_data, error_data, kError, nullptr, 0, nullptr,
+ 0, nullptr, 0, false);
}
TEST(RestrictiveForInErrors) {
// clang-format off
const char* strict_context_data[][2] = {
{ "'use strict'", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* sloppy_context_data[][2] = {
{ "", "" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* error_data[] = {
"for (const x = 0 in {});",
"for (let x = 0 in {});",
- NULL
+ nullptr
};
const char* sloppy_data[] = {
"for (var x = 0 in {});",
- NULL
+ nullptr
};
// clang-format on
@@ -9349,24 +8713,21 @@ TEST(NoDuplicateGeneratorsInBlock) {
{"{", "}"},
{"(function() { {", "} })()"},
{"(function() {'use strict'; {", "} })()"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* top_level_context_data[][2] = {
{"'use strict';", ""},
{"", ""},
{"(function() {", "})()"},
{"(function() {'use strict';", "})()"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* error_data[] = {"function* x() {} function* x() {}",
"function x() {} function* x() {}",
- "function* x() {} function x() {}", NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyRestrictiveGenerators};
+ "function* x() {} function x() {}", nullptr};
// The preparser doesn't enforce the restriction, so turn it off.
bool test_preparser = false;
- RunParserSyncTest(block_context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags), NULL, 0, false,
- test_preparser);
- RunParserSyncTest(top_level_context_data, error_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(block_context_data, error_data, kError, nullptr, 0, nullptr,
+ 0, nullptr, 0, false, test_preparser);
+ RunParserSyncTest(top_level_context_data, error_data, kSuccess);
}
TEST(NoDuplicateAsyncFunctionInBlock) {
@@ -9375,13 +8736,13 @@ TEST(NoDuplicateAsyncFunctionInBlock) {
{"{", "}"},
{"(function() { {", "} })()"},
{"(function() {'use strict'; {", "} })()"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* top_level_context_data[][2] = {
{"'use strict';", ""},
{"", ""},
{"(function() {", "})()"},
{"(function() {'use strict';", "})()"},
- {NULL, NULL}};
+ {nullptr, nullptr}};
const char* error_data[] = {"async function x() {} async function x() {}",
"function x() {} async function x() {}",
"async function x() {} function x() {}",
@@ -9389,11 +8750,11 @@ TEST(NoDuplicateAsyncFunctionInBlock) {
"function* x() {} async function x() {}",
"async function x() {} function* x() {}",
"function* x() {} async function x() {}",
- NULL};
+ nullptr};
// The preparser doesn't enforce the restriction, so turn it off.
bool test_preparser = false;
- RunParserSyncTest(block_context_data, error_data, kError, NULL, 0, NULL, 0,
- NULL, 0, false, test_preparser);
+ RunParserSyncTest(block_context_data, error_data, kError, nullptr, 0, nullptr,
+ 0, nullptr, 0, false, test_preparser);
RunParserSyncTest(top_level_context_data, error_data, kSuccess);
}
@@ -9404,7 +8765,7 @@ TEST(TrailingCommasInParameters) {
{ "'use strict';", "" },
{ "function foo() {", "}" },
{ "function foo() {'use strict';", "}" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* data[] = {
@@ -9427,7 +8788,7 @@ TEST(TrailingCommasInParameters) {
"a(...[],);",
"a(1, 2, ...[],);",
"a(...[], 2, ...[],);",
- NULL
+ nullptr
};
// clang-format on
@@ -9441,7 +8802,7 @@ TEST(TrailingCommasInParametersErrors) {
{ "'use strict';", "" },
{ "function foo() {", "}" },
{ "function foo() {'use strict';", "}" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* data[] = {
@@ -9490,7 +8851,7 @@ TEST(TrailingCommasInParametersErrors) {
"(,);",
"(a,);",
"(a,b,c,);",
- NULL
+ nullptr
};
// clang-format on
@@ -9502,14 +8863,14 @@ TEST(ArgumentsRedeclaration) {
// clang-format off
const char* context_data[][2] = {
{ "function f(", ") {}" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* success_data[] = {
"{arguments}",
"{arguments = false}",
"arg1, arguments",
"arg1, ...arguments",
- NULL
+ nullptr
};
// clang-format on
RunParserSyncTest(context_data, success_data, kSuccess);
@@ -9519,13 +8880,13 @@ TEST(ArgumentsRedeclaration) {
// clang-format off
const char* context_data[][2] = {
{ "function f() {", "}" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* data[] = {
"const arguments = 1",
"let arguments",
"var arguments",
- NULL
+ nullptr
};
// clang-format on
RunParserSyncTest(context_data, data, kSuccess);
@@ -9809,7 +9170,7 @@ TEST(NoPessimisticContextAllocation) {
CHECK(i::parsing::ParseProgram(&info, isolate));
CHECK(i::Compiler::Analyze(&info));
- CHECK(info.literal() != NULL);
+ CHECK_NOT_NULL(info.literal());
i::Scope* scope = info.literal()->scope()->inner_scope();
DCHECK_NOT_NULL(scope);
@@ -9827,7 +9188,7 @@ TEST(NoPessimisticContextAllocation) {
TEST(EscapedStrictReservedWord) {
// Test that identifiers which are both escaped and only reserved in the
// strict mode are accepted in non-strict mode.
- const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
const char* statement_data[] = {"if (true) l\\u0065t: ;",
"function l\\u0065t() { }",
@@ -9839,7 +9200,7 @@ TEST(EscapedStrictReservedWord) {
"function packag\\u0065() {}",
"function impl\\u0065ments() {}",
"function privat\\u0065() {}",
- NULL};
+ nullptr};
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -9865,7 +9226,7 @@ TEST(ForAwaitOf) {
{ "async function f() { 'use strict'; for await\n", " ; }" },
{ "async function f() { 'use strict'; for await\n", " { } }" },
{ "async function * f() { 'use strict'; for await\n", " { } }" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* context_data2[][2] = {
@@ -9887,7 +9248,7 @@ TEST(ForAwaitOf) {
{ "async function f() { 'use strict'; let a; for await\n", " ; }" },
{ "async function f() { 'use strict'; let a; for await\n", " { } }" },
{ "async function * f() { 'use strict'; let a; for await\n", " { } }" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* expr_data[] = {
@@ -9909,7 +9270,7 @@ TEST(ForAwaitOf) {
"({\"a\": a = 1} of [])",
"({[Symbol.iterator]: a = 1} of [])",
"({0: a = 1} of [])",
- NULL
+ nullptr
};
const char* var_data[] = {
@@ -9930,7 +9291,7 @@ TEST(ForAwaitOf) {
"(var {\"a\": a = 1} of [])",
"(var {[Symbol.iterator]: a = 1} of [])",
"(var {0: a = 1} of [])",
- NULL
+ nullptr
};
const char* lexical_data[] = {
@@ -9968,25 +9329,26 @@ TEST(ForAwaitOf) {
"(const {\"a\": a = 1} of [])",
"(const {[Symbol.iterator]: a = 1} of [])",
"(const {0: a = 1} of [])",
- NULL
+ nullptr
};
// clang-format on
static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, expr_data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data2, expr_data, kSuccess, NULL, 0, always_flags,
+ RunParserSyncTest(context_data, expr_data, kSuccess, nullptr, 0, always_flags,
arraysize(always_flags));
+ RunParserSyncTest(context_data2, expr_data, kSuccess, nullptr, 0,
+ always_flags, arraysize(always_flags));
- RunParserSyncTest(context_data, var_data, kSuccess, NULL, 0, always_flags,
+ RunParserSyncTest(context_data, var_data, kSuccess, nullptr, 0, always_flags,
arraysize(always_flags));
// TODO(marja): PreParser doesn't report early errors.
// (https://bugs.chromium.org/p/v8/issues/detail?id=2728)
- // RunParserSyncTest(context_data2, var_data, kError, NULL, 0, always_flags,
+ // RunParserSyncTest(context_data2, var_data, kError, nullptr, 0,
+ // always_flags,
// arraysize(always_flags));
- RunParserSyncTest(context_data, lexical_data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data2, lexical_data, kSuccess, NULL, 0,
+ RunParserSyncTest(context_data, lexical_data, kSuccess, nullptr, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data2, lexical_data, kSuccess, nullptr, 0,
always_flags, arraysize(always_flags));
}
@@ -10001,7 +9363,7 @@ TEST(ForAwaitOfErrors) {
{ "async function * f() { for await ", " { } }" },
{ "async function * f() { 'use strict'; for await ", " ; }" },
{ "async function * f() { 'use strict'; for await ", " { } }" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* data[] = {
@@ -10145,11 +9507,11 @@ TEST(ForAwaitOfErrors) {
"(const {0: a = 1} = 1 of [])",
"(const {0: a = 1}, b of [])",
- NULL
+ nullptr
};
// clang-format on
static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -10158,7 +9520,7 @@ TEST(ForAwaitOfFunctionDeclaration) {
const char* context_data[][2] = {
{ "async function f() {", "}" },
{ "async function f() { 'use strict'; ", "}" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* data[] = {
@@ -10169,12 +9531,12 @@ TEST(ForAwaitOfFunctionDeclaration) {
// TODO(caitp): handle async function declarations in ParseScopedStatement.
// "for await (x of []) async function a() {};",
// "for await (x of []) async function a() {}; return a;",
- NULL
+ nullptr
};
// clang-format on
static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -10185,7 +9547,7 @@ TEST(AsyncGenerator) {
{ "(async function * gen() {", "})" },
{ "(async function * () {", "})" },
{ "({ async * gen () {", "} })" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* statement_data[] = {
@@ -10268,12 +9630,12 @@ TEST(AsyncGenerator) {
"yield await // comment\n 10",
"await (yield /* comment */)",
"await (yield // comment\n)",
- NULL
+ nullptr
};
// clang-format on
static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0,
always_flags, arraysize(always_flags));
}
@@ -10282,7 +9644,7 @@ TEST(AsyncGeneratorErrors) {
const char* context_data[][2] = {
{ "async function * gen() {", "}" },
{ "\"use strict\"; async function * gen() {", "}" },
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* statement_data[] = {
@@ -10358,13 +9720,13 @@ TEST(AsyncGeneratorErrors) {
"for (await 'x' in {} of {});",
"class C extends yield { }",
"class C extends await { }",
- NULL
+ nullptr
};
// clang-format on
static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError, nullptr, 0,
+ always_flags, arraysize(always_flags));
}
TEST(LexicalLoopVariable) {
@@ -10386,7 +9748,7 @@ TEST(LexicalLoopVariable) {
i::DeclarationScope::Analyze(&info);
i::DeclarationScope::AllocateScopeInfos(&info, isolate,
i::AnalyzeMode::kRegular);
- CHECK(info.literal() != NULL);
+ CHECK_NOT_NULL(info.literal());
i::DeclarationScope* script_scope = info.literal()->scope();
CHECK(script_scope->is_script_scope());
diff --git a/deps/v8/test/cctest/test-platform-linux.cc b/deps/v8/test/cctest/test-platform-linux.cc
deleted file mode 100644
index d41222e2a5..0000000000
--- a/deps/v8/test/cctest/test-platform-linux.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#include "src/base/platform/platform.h"
-#include "test/cctest/cctest.h"
-
-using OS = v8::base::OS;
-
-namespace v8 {
-namespace internal {
-
-TEST(OSReserveMemory) {
- size_t mem_size = 0;
- void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
- GetRandomMmapAddr(), &mem_size);
- CHECK_NE(0, mem_size);
- CHECK_NOT_NULL(mem_addr);
- size_t block_size = 4 * KB;
- CHECK(OS::CommitRegion(mem_addr, block_size, false));
- // Check whether we can write to memory.
- int* addr = static_cast<int*>(mem_addr);
- addr[KB - 1] = 2;
- CHECK(OS::UncommitRegion(mem_addr, block_size));
- OS::ReleaseRegion(mem_addr, mem_size);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-platform-win32.cc b/deps/v8/test/cctest/test-platform-win32.cc
deleted file mode 100644
index d41222e2a5..0000000000
--- a/deps/v8/test/cctest/test-platform-win32.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#include "src/base/platform/platform.h"
-#include "test/cctest/cctest.h"
-
-using OS = v8::base::OS;
-
-namespace v8 {
-namespace internal {
-
-TEST(OSReserveMemory) {
- size_t mem_size = 0;
- void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
- GetRandomMmapAddr(), &mem_size);
- CHECK_NE(0, mem_size);
- CHECK_NOT_NULL(mem_addr);
- size_t block_size = 4 * KB;
- CHECK(OS::CommitRegion(mem_addr, block_size, false));
- // Check whether we can write to memory.
- int* addr = static_cast<int*>(mem_addr);
- addr[KB - 1] = 2;
- CHECK(OS::UncommitRegion(mem_addr, block_size));
- OS::ReleaseRegion(mem_addr, mem_size);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index a0fbc21f46..a50a08b35f 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -7,8 +7,51 @@
#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
-#ifdef V8_CC_GNU
+using OS = v8::base::OS;
+
+namespace v8 {
+namespace internal {
+
+TEST(OSAllocateAndFree) {
+ size_t page_size = OS::AllocatePageSize();
+ CHECK_NE(0, page_size);
+
+ // A large allocation, aligned at native allocation granularity.
+ const size_t kAllocationSize = 1 * MB;
+ void* mem_addr = OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize,
+ page_size, OS::MemoryPermission::kReadWrite);
+ CHECK_NOT_NULL(mem_addr);
+ CHECK(OS::Free(mem_addr, kAllocationSize));
+
+ // A large allocation, aligned significantly beyond native granularity.
+ const size_t kBigAlignment = 64 * MB;
+ void* aligned_mem_addr =
+ OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize, kBigAlignment,
+ OS::MemoryPermission::kReadWrite);
+ CHECK_NOT_NULL(aligned_mem_addr);
+ CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
+ CHECK(OS::Free(aligned_mem_addr, kAllocationSize));
+}
+TEST(OSReserveMemory) {
+ size_t page_size = OS::AllocatePageSize();
+ const size_t kAllocationSize = 1 * MB;
+ void* mem_addr = OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize,
+ page_size, OS::MemoryPermission::kReadWrite);
+ CHECK_NE(0, page_size);
+ CHECK_NOT_NULL(mem_addr);
+ size_t commit_size = OS::CommitPageSize();
+ CHECK(OS::SetPermissions(mem_addr, commit_size,
+ OS::MemoryPermission::kReadWrite));
+ // Check whether we can write to memory.
+ int* addr = static_cast<int*>(mem_addr);
+ addr[KB - 1] = 2;
+ CHECK(OS::SetPermissions(mem_addr, commit_size,
+ OS::MemoryPermission::kNoAccess));
+ CHECK(OS::Free(mem_addr, kAllocationSize));
+}
+
+#ifdef V8_CC_GNU
static uintptr_t sp_addr = 0;
void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -40,7 +83,6 @@ void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetIsolate(), static_cast<uint32_t>(sp_addr)));
}
-
TEST(StackAlignment) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -49,7 +91,7 @@ TEST(StackAlignment) {
global_template->Set(v8_str("get_stack_pointer"),
v8::FunctionTemplate::New(isolate, GetStackPointer));
- LocalContext env(NULL, global_template);
+ LocalContext env(nullptr, global_template);
CompileRun(
"function foo() {"
" return get_stack_pointer();"
@@ -61,10 +103,12 @@ TEST(StackAlignment) {
.ToLocalChecked());
v8::Local<v8::Value> result =
- foo->Call(isolate->GetCurrentContext(), global_object, 0, NULL)
+ foo->Call(isolate->GetCurrentContext(), global_object, 0, nullptr)
.ToLocalChecked();
CHECK_EQ(0u, result->Uint32Value(isolate->GetCurrentContext()).FromJust() %
- v8::base::OS::ActivationFrameAlignment());
+ OS::ActivationFrameAlignment());
}
-
#endif // V8_CC_GNU
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 6a16cca906..46c0c4e132 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -90,17 +90,16 @@ class ProfileTreeTestHelper {
explicit ProfileTreeTestHelper(const ProfileTree* tree)
: tree_(tree) { }
- ProfileNode* Walk(CodeEntry* entry1,
- CodeEntry* entry2 = NULL,
- CodeEntry* entry3 = NULL) {
+ ProfileNode* Walk(CodeEntry* entry1, CodeEntry* entry2 = nullptr,
+ CodeEntry* entry3 = nullptr) {
ProfileNode* node = tree_->root();
node = node->FindChild(entry1);
- if (node == NULL) return NULL;
- if (entry2 != NULL) {
+ if (node == nullptr) return nullptr;
+ if (entry2 != nullptr) {
node = node->FindChild(entry2);
- if (node == NULL) return NULL;
+ if (node == nullptr) return nullptr;
}
- if (entry3 != NULL) {
+ if (entry3 != nullptr) {
node = node->FindChild(entry3);
}
return node;
@@ -124,7 +123,8 @@ TEST(ProfileTreeAddPathFromEnd) {
CHECK(!helper.Walk(&entry2));
CHECK(!helper.Walk(&entry3));
- CodeEntry* path[] = {NULL, &entry3, NULL, &entry2, NULL, NULL, &entry1, NULL};
+ CodeEntry* path[] = {nullptr, &entry3, nullptr, &entry2,
+ nullptr, nullptr, &entry1, nullptr};
std::vector<CodeEntry*> path_vec(path, path + arraysize(path));
tree.AddPathFromEnd(path_vec);
CHECK(!helper.Walk(&entry2));
@@ -500,7 +500,7 @@ static const ProfileNode* PickChild(const ProfileNode* parent,
for (const ProfileNode* child : *parent->children()) {
if (strcmp(child->entry()->name(), name) == 0) return child;
}
- return NULL;
+ return nullptr;
}
@@ -578,7 +578,7 @@ static const v8::CpuProfileNode* PickChild(const v8::CpuProfileNode* parent,
child->GetFunctionName());
if (strcmp(*function_name, name) == 0) return child;
}
- return NULL;
+ return nullptr;
}
diff --git a/deps/v8/test/cctest/test-random-number-generator.cc b/deps/v8/test/cctest/test-random-number-generator.cc
index 4fff8e906f..5b13bda3f9 100644
--- a/deps/v8/test/cctest/test-random-number-generator.cc
+++ b/deps/v8/test/cctest/test-random-number-generator.cc
@@ -126,7 +126,7 @@ void RandomBitCorrelation(int random_bit) {
// For 1 degree of freedom this corresponds to 1 in a million. We are
// running ~8000 tests, so that would be surprising.
- CHECK(chi_squared <= 24);
+ CHECK_LE(chi_squared, 24);
// If the predictor bit is a fixed 0 or 1 then it makes no sense to
// repeat the test with a different age.
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 5d28a577ff..c2e6526f40 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -116,7 +116,7 @@ static void CheckParseEq(const char* input, const char* expected,
if (unicode) flags |= JSRegExp::kUnicode;
CHECK(v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), &zone,
&reader, flags, &result));
- CHECK(result.tree != NULL);
+ CHECK_NOT_NULL(result.tree);
CHECK(result.error.is_null());
std::ostringstream os;
result.tree->Print(os, &zone);
@@ -134,7 +134,7 @@ static bool CheckSimple(const char* input) {
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
- CHECK(result.tree != NULL);
+ CHECK_NOT_NULL(result.tree);
CHECK(result.error.is_null());
return result.simple;
}
@@ -152,7 +152,7 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
- CHECK(result.tree != NULL);
+ CHECK_NOT_NULL(result.tree);
CHECK(result.error.is_null());
int min_match = result.tree->min_match();
int max_match = result.tree->max_match();
@@ -169,10 +169,7 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
CHECK_EQ(max, min_max.max_match); \
}
-
-void TestRegExpParser(bool lookbehind) {
- FLAG_harmony_regexp_lookbehind = lookbehind;
-
+TEST(RegExpParser) {
CHECK_PARSE_ERROR("?");
CheckParseEq("abc", "'abc'");
@@ -204,13 +201,8 @@ void TestRegExpParser(bool lookbehind) {
CheckParseEq("foo|(bar|baz)|quux", "(| 'foo' (^ (| 'bar' 'baz')) 'quux')");
CheckParseEq("foo(?=bar)baz", "(: 'foo' (-> + 'bar') 'baz')");
CheckParseEq("foo(?!bar)baz", "(: 'foo' (-> - 'bar') 'baz')");
- if (lookbehind) {
- CheckParseEq("foo(?<=bar)baz", "(: 'foo' (<- + 'bar') 'baz')");
- CheckParseEq("foo(?<!bar)baz", "(: 'foo' (<- - 'bar') 'baz')");
- } else {
- CHECK_PARSE_ERROR("foo(?<=bar)baz");
- CHECK_PARSE_ERROR("foo(?<!bar)baz");
- }
+ CheckParseEq("foo(?<=bar)baz", "(: 'foo' (<- + 'bar') 'baz')");
+ CheckParseEq("foo(?<!bar)baz", "(: 'foo' (<- - 'bar') 'baz')");
CheckParseEq("()", "(^ %)");
CheckParseEq("(?=)", "(-> + %)");
CheckParseEq("[]", "^[\\x00-\\u{10ffff}]"); // Doesn't compile on windows
@@ -294,10 +286,8 @@ void TestRegExpParser(bool lookbehind) {
"(: (-> - (: (<- 1) (^ 'a') (<- 1))) (<- 1))");
CheckParseEq("\\1\\2(a(?:\\1(b\\1\\2))\\2)\\1",
"(: (<- 1) (<- 2) (^ (: 'a' (?: (^ 'b')) (<- 2))) (<- 1))");
- if (lookbehind) {
- CheckParseEq("\\1\\2(a(?<=\\1(b\\1\\2))\\2)\\1",
- "(: (<- 1) (<- 2) (^ (: 'a' (<- + (^ 'b')) (<- 2))) (<- 1))");
- }
+ CheckParseEq("\\1\\2(a(?<=\\1(b\\1\\2))\\2)\\1",
+ "(: (<- 1) (<- 2) (^ (: 'a' (<- + (^ 'b')) (<- 2))) (<- 1))");
CheckParseEq("[\\0]", "[\\x00]");
CheckParseEq("[\\11]", "[\\x09]");
CheckParseEq("[\\11a]", "[\\x09 a]");
@@ -460,16 +450,6 @@ void TestRegExpParser(bool lookbehind) {
FLAG_harmony_regexp_named_captures = false;
}
-
-TEST(ParserWithLookbehind) {
- TestRegExpParser(true); // Lookbehind enabled.
-}
-
-
-TEST(ParserWithoutLookbehind) {
- TestRegExpParser(true); // Lookbehind enabled.
-}
-
TEST(ParserRegression) {
CheckParseEq("[A-Z$-][x]", "(! [A-Z $ -] [x])");
CheckParseEq("a{3,4*}", "(: 'a{3,' (# 0 - g '4') '}')");
@@ -487,7 +467,7 @@ static void ExpectError(const char* input, const char* expected,
if (unicode) flags |= JSRegExp::kUnicode;
CHECK(!v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), &zone,
&reader, flags, &result));
- CHECK(result.tree == NULL);
+ CHECK_NULL(result.tree);
CHECK(!result.error.is_null());
std::unique_ptr<char[]> str = result.error->ToCString(ALLOW_NULLS);
CHECK_EQ(0, strcmp(expected, str.get()));
@@ -604,7 +584,7 @@ static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
if (unicode) flags = JSRegExp::kUnicode;
if (!v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), zone,
&reader, flags, &compile_data))
- return NULL;
+ return nullptr;
Handle<String> pattern = isolate->factory()
->NewStringFromUtf8(CStrVector(input))
.ToHandleChecked();
@@ -883,13 +863,13 @@ TEST(MacroAssemblerNativeSimple) {
Label fail, backtrack;
m.PushBacktrack(&fail);
- m.CheckNotAtStart(0, NULL);
- m.LoadCurrentCharacter(2, NULL);
- m.CheckNotCharacter('o', NULL);
- m.LoadCurrentCharacter(1, NULL, false);
- m.CheckNotCharacter('o', NULL);
- m.LoadCurrentCharacter(0, NULL, false);
- m.CheckNotCharacter('f', NULL);
+ m.CheckNotAtStart(0, nullptr);
+ m.LoadCurrentCharacter(2, nullptr);
+ m.CheckNotCharacter('o', nullptr);
+ m.LoadCurrentCharacter(1, nullptr, false);
+ m.CheckNotCharacter('o', nullptr);
+ m.LoadCurrentCharacter(0, nullptr, false);
+ m.CheckNotCharacter('f', nullptr);
m.WriteCurrentPositionToRegister(0, 0);
m.WriteCurrentPositionToRegister(1, 3);
m.AdvanceCurrentPosition(3);
@@ -950,13 +930,13 @@ TEST(MacroAssemblerNativeSimpleUC16) {
Label fail, backtrack;
m.PushBacktrack(&fail);
- m.CheckNotAtStart(0, NULL);
- m.LoadCurrentCharacter(2, NULL);
- m.CheckNotCharacter('o', NULL);
- m.LoadCurrentCharacter(1, NULL, false);
- m.CheckNotCharacter('o', NULL);
- m.LoadCurrentCharacter(0, NULL, false);
- m.CheckNotCharacter('f', NULL);
+ m.CheckNotAtStart(0, nullptr);
+ m.LoadCurrentCharacter(2, nullptr);
+ m.CheckNotCharacter('o', nullptr);
+ m.LoadCurrentCharacter(1, nullptr, false);
+ m.CheckNotCharacter('o', nullptr);
+ m.LoadCurrentCharacter(0, nullptr, false);
+ m.CheckNotCharacter('f', nullptr);
m.WriteCurrentPositionToRegister(0, 0);
m.WriteCurrentPositionToRegister(1, 3);
m.AdvanceCurrentPosition(3);
@@ -1027,7 +1007,7 @@ TEST(MacroAssemblerNativeBacktrack) {
m.Succeed();
m.Bind(&fail);
m.PushBacktrack(&backtrack);
- m.LoadCurrentCharacter(10, NULL);
+ m.LoadCurrentCharacter(10, nullptr);
m.Succeed();
m.Bind(&backtrack);
m.Fail();
@@ -1040,13 +1020,8 @@ TEST(MacroAssemblerNativeBacktrack) {
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- NativeRegExpMacroAssembler::Result result =
- Execute(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- NULL);
+ NativeRegExpMacroAssembler::Result result = Execute(
+ *code, *input, 0, start_adr, start_adr + input->length(), nullptr);
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
@@ -1194,22 +1169,13 @@ TEST(MacroAssemblernativeAtStart) {
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- NativeRegExpMacroAssembler::Result result =
- Execute(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- NULL);
+ NativeRegExpMacroAssembler::Result result = Execute(
+ *code, *input, 0, start_adr, start_adr + input->length(), nullptr);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
- result = Execute(*code,
- *input,
- 3,
- start_adr + 3,
- start_adr + input->length(),
- NULL);
+ result = Execute(*code, *input, 3, start_adr + 3, start_adr + input->length(),
+ nullptr);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
}
@@ -1401,13 +1367,8 @@ TEST(MacroAssemblerStackOverflow) {
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- NativeRegExpMacroAssembler::Result result =
- Execute(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- NULL);
+ NativeRegExpMacroAssembler::Result result = Execute(
+ *code, *input, 0, start_adr, start_adr + input->length(), nullptr);
CHECK_EQ(NativeRegExpMacroAssembler::EXCEPTION, result);
CHECK(isolate->has_pending_exception());
@@ -1481,13 +1442,13 @@ TEST(MacroAssembler) {
m.Fail();
m.Bind(&start);
m.PushBacktrack(&fail);
- m.CheckNotAtStart(0, NULL);
- m.LoadCurrentCharacter(0, NULL);
- m.CheckNotCharacter('f', NULL);
- m.LoadCurrentCharacter(1, NULL);
- m.CheckNotCharacter('o', NULL);
- m.LoadCurrentCharacter(2, NULL);
- m.CheckNotCharacter('o', NULL);
+ m.CheckNotAtStart(0, nullptr);
+ m.LoadCurrentCharacter(0, nullptr);
+ m.CheckNotCharacter('f', nullptr);
+ m.LoadCurrentCharacter(1, nullptr);
+ m.CheckNotCharacter('o', nullptr);
+ m.LoadCurrentCharacter(2, nullptr);
+ m.CheckNotCharacter('o', nullptr);
m.WriteCurrentPositionToRegister(0, 0);
m.WriteCurrentPositionToRegister(1, 3);
m.WriteCurrentPositionToRegister(2, 1);
@@ -1572,7 +1533,7 @@ TEST(AddInverseToTable) {
static uc32 canonicalize(uc32 c) {
unibrow::uchar canon[unibrow::Ecma262Canonicalize::kMaxWidth];
- int count = unibrow::Ecma262Canonicalize::Convert(c, '\0', canon, NULL);
+ int count = unibrow::Ecma262Canonicalize::Convert(c, '\0', canon, nullptr);
if (count == 0) {
return c;
} else {
@@ -1616,7 +1577,7 @@ TEST(LatinCanonicalize) {
static uc32 CanonRangeEnd(uc32 c) {
unibrow::uchar canon[unibrow::CanonicalizationRange::kMaxWidth];
- int count = unibrow::CanonicalizationRange::Convert(c, '\0', canon, NULL);
+ int count = unibrow::CanonicalizationRange::Convert(c, '\0', canon, nullptr);
if (count == 0) {
return c;
} else {
@@ -1729,8 +1690,7 @@ TEST(CharacterRangeCaseIndependence) {
static bool InClass(uc32 c, ZoneList<CharacterRange>* ranges) {
- if (ranges == NULL)
- return false;
+ if (ranges == nullptr) return false;
for (int i = 0; i < ranges->length(); i++) {
CharacterRange range = ranges->at(i);
if (range.from() <= c && c <= range.to())
@@ -1939,7 +1899,7 @@ TEST(Graph) {
namespace {
-int* global_use_counts = NULL;
+int* global_use_counts = nullptr;
void MockUseCounterCallback(v8::Isolate* isolate,
v8::Isolate::UseCounterFeature feature) {
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
index fe01199f5f..d1931d87dd 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
@@ -27,7 +27,7 @@ TEST(WasmRelocationArmContextReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
- DummyStaticFunction(NULL);
+ DummyStaticFunction(nullptr);
int32_t imm = 1234567;
Assembler assm(isolate, buffer, sizeof buffer);
@@ -55,6 +55,8 @@ TEST(WasmRelocationArmContextReference) {
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
isolate, it.rinfo()->wasm_context_reference() + offset,
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
index 7448250ed6..0e2b09e43a 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
@@ -31,7 +31,7 @@ TEST(WasmRelocationArm64ContextReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
- DummyStaticFunction(NULL);
+ DummyStaticFunction(nullptr);
int64_t imm = 1234567;
MacroAssembler masm(isolate, buffer, sizeof buffer,
@@ -60,6 +60,8 @@ TEST(WasmRelocationArm64ContextReference) {
// Relocating reference by offset
int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
isolate, it.rinfo()->wasm_context_reference() + offset,
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
index a59dbfec8a..829e0685a8 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
@@ -32,7 +32,7 @@ TEST(WasmRelocationIa32ContextReference) {
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
Assembler assm(isolate, buffer, sizeof buffer);
- DummyStaticFunction(NULL);
+ DummyStaticFunction(nullptr);
int32_t imm = 1234567;
__ mov(eax, Immediate(reinterpret_cast<Address>(imm),
@@ -64,6 +64,8 @@ TEST(WasmRelocationIa32ContextReference) {
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
isolate, it.rinfo()->wasm_context_reference() + offset,
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
index b886c6fde2..d5c29604dd 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
@@ -30,7 +30,7 @@ TEST(WasmRelocationX64ContextReference) {
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
Assembler assm(isolate, buffer, sizeof buffer);
- DummyStaticFunction(NULL);
+ DummyStaticFunction(nullptr);
int64_t imm = 1234567;
__ movq(rax, imm, RelocInfo::WASM_CONTEXT_REFERENCE);
@@ -60,6 +60,8 @@ TEST(WasmRelocationX64ContextReference) {
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
isolate, it.rinfo()->wasm_context_reference() + offset,
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index 9d17e8bed1..a73c9765df 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -39,7 +39,7 @@ class SimulatorHelper {
->thread_local_top()
->simulator_;
// Check if there is active simulator.
- return simulator_ != NULL;
+ return simulator_ != nullptr;
}
inline void FillRegisters(v8::RegisterState* state) {
@@ -104,26 +104,26 @@ class SamplingTestHelper {
v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate_);
global->Set(v8_str("CollectSample"),
v8::FunctionTemplate::New(isolate_, CollectSample));
- LocalContext env(isolate_, NULL, global);
+ LocalContext env(isolate_, nullptr, global);
isolate_->SetJitCodeEventHandler(v8::kJitCodeEventDefault,
JitCodeEventHandler);
CompileRun(v8_str(test_function.c_str()));
}
~SamplingTestHelper() {
- isolate_->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
- instance_ = NULL;
+ isolate_->SetJitCodeEventHandler(v8::kJitCodeEventDefault, nullptr);
+ instance_ = nullptr;
}
Sample& sample() { return sample_; }
const CodeEventEntry* FindEventEntry(const void* address) {
CodeEntries::const_iterator it = code_entries_.upper_bound(address);
- if (it == code_entries_.begin()) return NULL;
+ if (it == code_entries_.begin()) return nullptr;
const CodeEventEntry& entry = (--it)->second;
const void* code_end =
static_cast<const uint8_t*>(entry.code_start) + entry.code_len;
- return address < code_end ? &entry : NULL;
+ return address < code_end ? &entry : nullptr;
}
private:
@@ -143,7 +143,7 @@ class SamplingTestHelper {
if (!simulator_helper.Init(isolate_)) return;
simulator_helper.FillRegisters(&state);
#else
- state.pc = NULL;
+ state.pc = nullptr;
state.fp = &state;
state.sp = &state;
#endif
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 6cd9110856..20aa3f008c 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -60,7 +60,6 @@ namespace internal {
void DisableLazyDeserialization() {
// UNINITIALIZED tests do not set up the isolate sufficiently for lazy
// deserialization to work.
- // TODO(jgruber): Fix this. It may just be enough to set the snapshot_blob.
FLAG_lazy_deserialization = false;
}
@@ -79,7 +78,7 @@ class TestIsolate : public Isolate {
isolate->setup_delegate_ = new SetupIsolateDelegateForTests(true);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Isolate::Scope isolate_scope(v8_isolate);
- isolate->Init(NULL);
+ isolate->Init(nullptr);
return v8_isolate;
}
// Wraps v8::Isolate::New, but with a TestIsolate under the hood.
@@ -138,7 +137,7 @@ static StartupBlobs Serialize(v8::Isolate* isolate) {
ser.SerializeStrongReferences();
i::BuiltinSerializer builtin_serializer(internal_isolate, &ser);
- builtin_serializer.SerializeBuiltins();
+ builtin_serializer.SerializeBuiltinsAndHandlers();
ser.SerializeWeakReferencesAndDeferred();
SnapshotData startup_snapshot(&ser);
@@ -165,7 +164,7 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
}
v8::Isolate* InitializeFromBlob(StartupBlobs& blobs) {
- v8::Isolate* v8_isolate = NULL;
+ v8::Isolate* v8_isolate = nullptr;
{
SnapshotData startup_snapshot(blobs.startup);
BuiltinSnapshotData builtin_snapshot(blobs.builtin);
@@ -194,7 +193,6 @@ static void SanityCheck(v8::Isolate* v8_isolate) {
#endif
CHECK(isolate->global_object()->IsJSObject());
CHECK(isolate->native_context()->IsContext());
- CHECK(isolate->heap()->string_table()->IsStringTable());
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("Empty"));
}
@@ -205,7 +203,6 @@ UNINITIALIZED_TEST(StartupSerializerOnce) {
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
isolate = Deserialize(blobs);
- blobs.Dispose();
{
v8::HandleScope handle_scope(isolate);
v8::Isolate::Scope isolate_scope(isolate);
@@ -216,6 +213,7 @@ UNINITIALIZED_TEST(StartupSerializerOnce) {
SanityCheck(isolate);
}
isolate->Dispose();
+ blobs.Dispose();
}
UNINITIALIZED_TEST(StartupSerializerRootMapDependencies) {
@@ -275,7 +273,6 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
isolate->Dispose();
blobs1.Dispose();
isolate = Deserialize(blobs2);
- blobs2.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -286,6 +283,7 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
SanityCheck(isolate);
}
isolate->Dispose();
+ blobs2.Dispose();
}
UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
@@ -295,7 +293,6 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
isolate = Deserialize(blobs);
- blobs.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -312,6 +309,7 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
CHECK_EQ(4, result.FromJust());
}
isolate->Dispose();
+ blobs.Dispose();
}
UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
@@ -323,7 +321,6 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
isolate->Dispose();
blobs1.Dispose();
isolate = Deserialize(blobs2);
- blobs2.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -339,6 +336,7 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
CHECK_EQ(4, result.FromJust());
}
isolate->Dispose();
+ blobs2.Dispose();
}
static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
@@ -385,7 +383,7 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
partial_serializer.Serialize(&raw_context, false);
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
- builtin_serializer.SerializeBuiltins();
+ builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -411,7 +409,6 @@ UNINITIALIZED_TEST(PartialSerializerContext) {
StartupBlobs blobs = {startup_blob, builtin_blob};
v8::Isolate* v8_isolate = InitializeFromBlob(blobs);
CHECK(v8_isolate);
- blobs.Dispose();
{
v8::Isolate::Scope isolate_scope(v8_isolate);
@@ -444,6 +441,7 @@ UNINITIALIZED_TEST(PartialSerializerContext) {
partial_blob.Dispose();
}
v8_isolate->Dispose();
+ blobs.Dispose();
}
static void PartiallySerializeCustomContext(
@@ -510,7 +508,7 @@ static void PartiallySerializeCustomContext(
partial_serializer.Serialize(&raw_context, false);
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
- builtin_serializer.SerializeBuiltins();
+ builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -536,7 +534,6 @@ UNINITIALIZED_TEST(PartialSerializerCustomContext) {
StartupBlobs blobs = {startup_blob, builtin_blob};
v8::Isolate* v8_isolate = InitializeFromBlob(blobs);
CHECK(v8_isolate);
- blobs.Dispose();
{
v8::Isolate::Scope isolate_scope(v8_isolate);
@@ -618,6 +615,7 @@ UNINITIALIZED_TEST(PartialSerializerCustomContext) {
partial_blob.Dispose();
}
v8_isolate->Dispose();
+ blobs.Dispose();
}
TEST(CustomSnapshotDataBlob1) {
@@ -636,7 +634,6 @@ TEST(CustomSnapshotDataBlob1) {
v8::Isolate::Scope i_scope(isolate1);
v8::HandleScope h_scope(isolate1);
v8::Local<v8::Context> context = v8::Context::New(isolate1);
- delete[] data1.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
v8::Maybe<int32_t> result =
CompileRun("f()")->Int32Value(isolate1->GetCurrentContext());
@@ -644,6 +641,7 @@ TEST(CustomSnapshotDataBlob1) {
CHECK(CompileRun("this.g")->IsUndefined());
}
isolate1->Dispose();
+ delete[] data1.data; // We can dispose of the snapshot blob now.
}
struct InternalFieldData {
@@ -686,8 +684,10 @@ void TestInt32Expectations(const Int32Expectations& expectations) {
}
}
-void TypedArrayTestHelper(const char* code,
- const Int32Expectations& expectations) {
+void TypedArrayTestHelper(
+ const char* code, const Int32Expectations& expectations,
+ const char* code_to_run_after_restore = nullptr,
+ const Int32Expectations& after_restore_expectations = Int32Expectations()) {
DisableAlwaysOpt();
i::FLAG_allow_natives_syntax = true;
v8::StartupData blob;
@@ -717,16 +717,20 @@ void TypedArrayTestHelper(const char* code,
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(
- isolate, NULL, v8::MaybeLocal<v8::ObjectTemplate>(),
+ isolate, nullptr, v8::MaybeLocal<v8::ObjectTemplate>(),
v8::MaybeLocal<v8::Value>(),
v8::DeserializeInternalFieldsCallback(DeserializeInternalFields,
reinterpret_cast<void*>(2017)));
- delete[] blob.data; // We can dispose of the snapshot blob now.
CHECK(deserialized_data.empty()); // We do not expect any embedder data.
v8::Context::Scope c_scope(context);
TestInt32Expectations(expectations);
+ if (code_to_run_after_restore) {
+ CompileRun(code_to_run_after_restore);
+ }
+ TestInt32Expectations(after_restore_expectations);
}
isolate->Dispose();
+ delete[] blob.data; // We can dispose of the snapshot blob now.
}
TEST(CustomSnapshotDataBlobWithOffHeapTypedArray) {
@@ -771,6 +775,25 @@ TEST(CustomSnapshotDataBlobSharedArrayBuffer) {
TypedArrayTestHelper(code, expectations);
}
+TEST(CustomSnapshotDataBlobArrayBufferWithOffset) {
+ const char* code =
+ "var x = new Int32Array([12, 24, 48, 96]);"
+ "var y = new Int32Array(x.buffer, 4, 2)";
+ Int32Expectations expectations = {
+ std::make_tuple("x[1]", 24), std::make_tuple("x[2]", 48),
+ std::make_tuple("y[0]", 24), std::make_tuple("y[1]", 48),
+ };
+
+ // Verify that the typed arrays use the same buffer (not independent copies).
+ const char* code_to_run_after_restore = "x[2] = 57; y[0] = 42;";
+ Int32Expectations after_restore_expectations = {
+ std::make_tuple("x[1]", 42), std::make_tuple("y[1]", 57),
+ };
+
+ TypedArrayTestHelper(code, expectations, code_to_run_after_restore,
+ after_restore_expectations);
+}
+
TEST(CustomSnapshotDataBlobDataView) {
const char* code =
"var x = new Int8Array([1, 2, 3, 4]);"
@@ -790,7 +813,6 @@ TEST(CustomSnapshotDataBlobNeuteredArrayBuffer) {
Int32Expectations expectations = {std::make_tuple("x.buffer.byteLength", 0),
std::make_tuple("x.length", 0)};
- DisableLazyDeserialization();
DisableAlwaysOpt();
i::FLAG_allow_natives_syntax = true;
v8::StartupData blob;
@@ -820,11 +842,10 @@ TEST(CustomSnapshotDataBlobNeuteredArrayBuffer) {
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(
- isolate, NULL, v8::MaybeLocal<v8::ObjectTemplate>(),
+ isolate, nullptr, v8::MaybeLocal<v8::ObjectTemplate>(),
v8::MaybeLocal<v8::Value>(),
v8::DeserializeInternalFieldsCallback(DeserializeInternalFields,
reinterpret_cast<void*>(2017)));
- delete[] blob.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
TestInt32Expectations(expectations);
@@ -837,6 +858,7 @@ TEST(CustomSnapshotDataBlobNeuteredArrayBuffer) {
FixedTypedArrayBase::cast(array->elements())->external_pointer());
}
isolate->Dispose();
+ delete[] blob.data; // We can dispose of the snapshot blob now.
}
i::Handle<i::JSArrayBuffer> GetBufferFromTypedArray(
@@ -890,11 +912,10 @@ TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(
- isolate, NULL, v8::MaybeLocal<v8::ObjectTemplate>(),
+ isolate, nullptr, v8::MaybeLocal<v8::ObjectTemplate>(),
v8::MaybeLocal<v8::Value>(),
v8::DeserializeInternalFieldsCallback(DeserializeInternalFields,
reinterpret_cast<void*>(2017)));
- delete[] blob.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
TestInt32Expectations(expectations);
@@ -911,6 +932,7 @@ TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
CHECK_NOT_NULL(buffer->backing_store());
}
isolate->Dispose();
+ delete[] blob.data; // We can dispose of the snapshot blob now.
}
TEST(CustomSnapshotDataBlob2) {
@@ -931,7 +953,6 @@ TEST(CustomSnapshotDataBlob2) {
v8::Isolate::Scope i_scope(isolate2);
v8::HandleScope h_scope(isolate2);
v8::Local<v8::Context> context = v8::Context::New(isolate2);
- delete[] data2.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
v8::Maybe<int32_t> result =
CompileRun("f()")->Int32Value(isolate2->GetCurrentContext());
@@ -940,6 +961,7 @@ TEST(CustomSnapshotDataBlob2) {
CHECK_EQ(43, result.FromJust());
}
isolate2->Dispose();
+ delete[] data2.data; // We can dispose of the snapshot blob now.
}
static void SerializationFunctionTemplate(
@@ -982,8 +1004,7 @@ TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
property->Set(isolate, "bar", function);
global->Set(isolate, "foo", property);
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
- delete[] data.data; // We can dispose of the snapshot blob now.
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope c_scope(context);
v8::Local<v8::Value> result = CompileRun(source2);
v8::Maybe<bool> compare = v8_str("42")->Equals(
@@ -991,6 +1012,7 @@ TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
CHECK(compare.FromJust());
}
isolate->Dispose();
+ delete[] data.data; // We can dispose of the snapshot blob now.
}
TEST(CustomSnapshotDataBlobWithLocker) {
@@ -1024,12 +1046,12 @@ TEST(CustomSnapshotDataBlobWithLocker) {
v8::Isolate::Scope i_scope(isolate1);
v8::HandleScope h_scope(isolate1);
v8::Local<v8::Context> context = v8::Context::New(isolate1);
- delete[] data1.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
v8::Maybe<int32_t> result = CompileRun("f()")->Int32Value(context);
CHECK_EQ(42, result.FromJust());
}
isolate1->Dispose();
+ delete[] data1.data; // We can dispose of the snapshot blob now.
}
TEST(CustomSnapshotDataBlobStackOverflow) {
@@ -1056,7 +1078,6 @@ TEST(CustomSnapshotDataBlobStackOverflow) {
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
- delete[] data.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
const char* test =
"var sum = 0;"
@@ -1070,6 +1091,7 @@ TEST(CustomSnapshotDataBlobStackOverflow) {
CHECK_EQ(9999 * 5000, result.FromJust());
}
isolate->Dispose();
+ delete[] data.data; // We can dispose of the snapshot blob now.
}
bool IsCompiled(const char* name) {
@@ -1080,7 +1102,6 @@ bool IsCompiled(const char* name) {
}
TEST(SnapshotDataBlobWithWarmup) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
const char* warmup = "Math.abs(1); Math.random = 1;";
@@ -1098,19 +1119,18 @@ TEST(SnapshotDataBlobWithWarmup) {
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
- delete[] warm.data;
v8::Context::Scope c_scope(context);
// Running the warmup script has effect on whether functions are
// pre-compiled, but does not pollute the context.
CHECK(IsCompiled("Math.abs"));
- CHECK(!IsCompiled("String.raw"));
+ CHECK(IsCompiled("String.raw"));
CHECK(CompileRun("Math.random")->IsFunction());
}
isolate->Dispose();
+ delete[] warm.data;
}
TEST(CustomSnapshotDataBlobWithWarmup) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
const char* source =
"function f() { return Math.abs(1); }\n"
@@ -1133,18 +1153,18 @@ TEST(CustomSnapshotDataBlobWithWarmup) {
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
- delete[] warm.data;
v8::Context::Scope c_scope(context);
// Running the warmup script has effect on whether functions are
// pre-compiled, but does not pollute the context.
CHECK(IsCompiled("f"));
CHECK(IsCompiled("Math.abs"));
CHECK(!IsCompiled("g"));
- CHECK(!IsCompiled("String.raw"));
+ CHECK(IsCompiled("String.raw"));
CHECK(!IsCompiled("Array.prototype.sort"));
CHECK_EQ(5, CompileRun("a")->Int32Value(context).FromJust());
}
isolate->Dispose();
+ delete[] warm.data;
}
TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
@@ -1170,12 +1190,12 @@ TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
- delete[] data.data; // We can dispose of the snapshot blob now.
v8::Context::Scope c_scope(context);
CHECK_EQ(7, CompileRun("a[0]()")->Int32Value(context).FromJust());
}
isolate->Dispose();
source.Dispose();
+ delete[] data.data; // We can dispose of the snapshot blob now.
}
TEST(TestThatAlwaysSucceeds) {
@@ -1193,7 +1213,8 @@ int CountBuiltins() {
HeapIterator iterator(CcTest::heap());
DisallowHeapAllocation no_allocation;
int counter = 0;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
if (obj->IsCode() && Code::cast(obj)->kind() == Code::BUILTIN) counter++;
}
return counter;
@@ -1205,8 +1226,9 @@ static Handle<SharedFunctionInfo> CompileScript(
ScriptData** cached_data, v8::ScriptCompiler::CompileOptions options) {
return Compiler::GetSharedFunctionInfoForScript(
source, name, 0, 0, v8::ScriptOriginOptions(), Handle<Object>(),
- Handle<Context>(isolate->native_context()), NULL, cached_data,
- options, NOT_NATIVES_CODE, Handle<FixedArray>())
+ Handle<Context>(isolate->native_context()), nullptr, cached_data,
+ options, ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE,
+ Handle<FixedArray>())
.ToHandleChecked();
}
@@ -1228,7 +1250,7 @@ TEST(CodeSerializerOnePlusOne) {
CHECK(!orig_source.is_identical_to(copy_source));
CHECK(orig_source->Equals(*copy_source));
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, orig_source, Handle<String>(), &cache,
@@ -1251,7 +1273,7 @@ TEST(CodeSerializerOnePlusOne) {
copy, isolate->native_context());
Handle<JSObject> global(isolate->context()->global_object());
Handle<Object> copy_result =
- Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
CHECK_EQ(2, Handle<Smi>::cast(copy_result)->value());
CHECK_EQ(builtins_count, CountBuiltins());
@@ -1270,7 +1292,7 @@ TEST(CodeSerializerPromotedToCompilationCache) {
Handle<String> src = isolate->factory()
->NewStringFromUtf8(CStrVector(source))
.ToHandleChecked();
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
CompileScript(isolate, src, src, &cache,
v8::ScriptCompiler::kProduceCodeCache);
@@ -1281,7 +1303,7 @@ TEST(CodeSerializerPromotedToCompilationCache) {
InfoVectorPair pair = isolate->compilation_cache()->LookupScript(
src, src, 0, 0, v8::ScriptOriginOptions(), isolate->native_context(),
- SLOPPY);
+ LanguageMode::kSloppy);
CHECK(pair.shared() == *copy);
@@ -1307,7 +1329,7 @@ TEST(CodeSerializerInternalizedString) {
CHECK(orig_source->Equals(*copy_source));
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, orig_source, Handle<String>(), &cache,
@@ -1316,7 +1338,7 @@ TEST(CodeSerializerInternalizedString) {
isolate->factory()->NewFunctionFromSharedFunctionInfo(
orig, isolate->native_context());
Handle<Object> orig_result =
- Execution::Call(isolate, orig_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, orig_fun, global, 0, nullptr).ToHandleChecked();
CHECK(orig_result->IsInternalizedString());
int builtins_count = CountBuiltins();
@@ -1335,7 +1357,7 @@ TEST(CodeSerializerInternalizedString) {
copy, isolate->native_context());
CHECK_NE(*orig_fun, *copy_fun);
Handle<Object> copy_result =
- Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
CHECK(orig_result.is_identical_to(copy_result));
Handle<String> expected =
isolate->factory()->NewStringFromAsciiChecked("string1");
@@ -1365,7 +1387,7 @@ TEST(CodeSerializerLargeCodeObject) {
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, source_str, Handle<String>(), &cache,
@@ -1386,7 +1408,7 @@ TEST(CodeSerializerLargeCodeObject) {
copy, isolate->native_context());
Handle<Object> copy_result =
- Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
int result_int;
CHECK(copy_result->ToInt32(&result_int));
@@ -1431,7 +1453,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
}
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, source_str, Handle<String>(), &cache,
@@ -1465,7 +1487,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
copy, isolate->native_context());
Handle<Object> copy_result =
- Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
int result_int;
CHECK(copy_result->ToInt32(&result_int));
@@ -1494,7 +1516,7 @@ TEST(CodeSerializerLargeStrings) {
.ToHandleChecked();
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, source_str, Handle<String>(), &cache,
@@ -1513,7 +1535,7 @@ TEST(CodeSerializerLargeStrings) {
copy, isolate->native_context());
Handle<Object> copy_result =
- Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
CHECK_EQ(6 * 1999999, Handle<String>::cast(copy_result)->length());
Handle<Object> property = JSReceiver::GetDataProperty(
@@ -1562,7 +1584,7 @@ TEST(CodeSerializerThreeBigStrings) {
source_c_str).ToHandleChecked();
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, source_str, Handle<String>(), &cache,
@@ -1580,7 +1602,7 @@ TEST(CodeSerializerThreeBigStrings) {
isolate->factory()->NewFunctionFromSharedFunctionInfo(
copy, isolate->native_context());
- USE(Execution::Call(isolate, copy_fun, global, 0, NULL));
+ USE(Execution::Call(isolate, copy_fun, global, 0, nullptr));
v8::Maybe<int32_t> result =
CompileRun("(a + b).length")
@@ -1674,7 +1696,7 @@ TEST(CodeSerializerExternalString) {
.ToHandleChecked();
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, source_string, Handle<String>(), &cache,
@@ -1693,7 +1715,7 @@ TEST(CodeSerializerExternalString) {
copy, isolate->native_context());
Handle<Object> copy_result =
- Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
CHECK_EQ(15.0, copy_result->Number());
@@ -1731,7 +1753,7 @@ TEST(CodeSerializerLargeExternalString) {
.ToHandleChecked()).ToHandleChecked();
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, source_str, Handle<String>(), &cache,
@@ -1749,7 +1771,7 @@ TEST(CodeSerializerLargeExternalString) {
f->NewFunctionFromSharedFunctionInfo(copy, isolate->native_context());
Handle<Object> copy_result =
- Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
CHECK_EQ(42.0, copy_result->Number());
@@ -1780,7 +1802,7 @@ TEST(CodeSerializerExternalScriptName) {
CHECK(!name->IsInternalizedString());
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = NULL;
+ ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
CompileScript(isolate, source_string, name, &cache,
@@ -1798,7 +1820,7 @@ TEST(CodeSerializerExternalScriptName) {
f->NewFunctionFromSharedFunctionInfo(copy, isolate->native_context());
Handle<Object> copy_result =
- Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+ Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
CHECK_EQ(10.0, copy_result->Number());
@@ -2093,13 +2115,14 @@ TEST(Regress503552) {
HandleScope scope(isolate);
Handle<String> source = isolate->factory()->NewStringFromAsciiChecked(
"function f() {} function g() {}");
- ScriptData* script_data = NULL;
+ ScriptData* script_data = nullptr;
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
source, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
- NULL, &script_data, v8::ScriptCompiler::kProduceCodeCache,
- NOT_NATIVES_CODE, MaybeHandle<FixedArray>())
+ nullptr, &script_data, v8::ScriptCompiler::kProduceCodeCache,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE,
+ MaybeHandle<FixedArray>())
.ToHandleChecked();
delete script_data;
@@ -2341,6 +2364,114 @@ TEST(SnapshotCreatorShortExternalReferences) {
delete[] blob.data;
}
+v8::StartupData CreateSnapshotWithDefaultAndCustom() {
+ v8::SnapshotCreator creator(original_external_references);
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ v8::HandleScope handle_scope(isolate);
+ {
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ CompileRun("function f() { return 41; }");
+ creator.SetDefaultContext(context);
+ ExpectInt32("f()", 41);
+ }
+ {
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ v8::Local<v8::FunctionTemplate> function_template =
+ v8::FunctionTemplate::New(isolate, SerializedCallback);
+ v8::Local<v8::Value> function =
+ function_template->GetFunction(context).ToLocalChecked();
+ CHECK(context->Global()->Set(context, v8_str("f"), function).FromJust());
+ v8::Local<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->SetAccessor(v8_str("x"), AccessorForSerialization);
+ v8::Local<v8::Object> object =
+ object_template->NewInstance(context).ToLocalChecked();
+ CHECK(context->Global()->Set(context, v8_str("o"), object).FromJust());
+ ExpectInt32("f()", 42);
+ ExpectInt32("o.x", 2017);
+ creator.AddContext(context);
+ }
+ }
+ return creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+}
+
+TEST(SnapshotCreatorNoExternalReferencesDefault) {
+ DisableAlwaysOpt();
+ v8::StartupData blob = CreateSnapshotWithDefaultAndCustom();
+
+ // Deserialize with an incomplete list of external references.
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ params.external_references = nullptr;
+ // Test-appropriate equivalent of v8::Isolate::New.
+ v8::Isolate* isolate = TestIsolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("f()", 41);
+ }
+ isolate->Dispose();
+ }
+ delete[] blob.data;
+}
+
+TEST(SnapshotCreatorNoExternalReferencesCustomFail1) {
+ DisableAlwaysOpt();
+ v8::StartupData blob = CreateSnapshotWithDefaultAndCustom();
+
+ // Deserialize with an incomplete list of external references.
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ params.external_references = nullptr;
+ // Test-appropriate equivalent of v8::Isolate::New.
+ v8::Isolate* isolate = TestIsolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("f()", 42);
+ }
+ isolate->Dispose();
+ }
+ delete[] blob.data;
+}
+
+TEST(SnapshotCreatorNoExternalReferencesCustomFail2) {
+ DisableAlwaysOpt();
+ v8::StartupData blob = CreateSnapshotWithDefaultAndCustom();
+
+ // Deserialize with an incomplete list of external references.
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ params.external_references = nullptr;
+ // Test-appropriate equivalent of v8::Isolate::New.
+ v8::Isolate* isolate = TestIsolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("o.x", 2017);
+ }
+ isolate->Dispose();
+ }
+ delete[] blob.data;
+}
+
TEST(SnapshotCreatorUnknownExternalReferences) {
DisableAlwaysOpt();
v8::SnapshotCreator creator;
@@ -2718,7 +2849,7 @@ TEST(SnapshotCreatorIncludeGlobalProxy) {
delete[] blob.data;
}
-UNINITIALIZED_TEST(ReinitializeStringHashSeedNotRehashable) {
+UNINITIALIZED_TEST(ReinitializeHashSeedNotRehashable) {
DisableAlwaysOpt();
i::FLAG_rehash_snapshot = true;
i::FLAG_hash_seed = 42;
@@ -2731,13 +2862,12 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeedNotRehashable) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- // Create dictionary mode object.
+ // Create an object with an ordered hash table.
CompileRun(
- "var a = {};"
- "a.b = 1;"
- "a.c = 2;"
- "delete a.b;");
- ExpectInt32("a.c", 2);
+ "var m = new Map();"
+ "m.set('a', 1);"
+ "m.set('b', 2);");
+ ExpectInt32("m.get('b')", 2);
creator.SetDefaultContext(context);
}
blob =
@@ -2757,7 +2887,76 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeedNotRehashable) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
CHECK(!context.IsEmpty());
v8::Context::Scope context_scope(context);
- ExpectInt32("a.c", 2);
+ ExpectInt32("m.get('b')", 2);
+ }
+ isolate->Dispose();
+ delete[] blob.data;
+}
+
+UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
+ DisableAlwaysOpt();
+ i::FLAG_rehash_snapshot = true;
+ i::FLAG_hash_seed = 42;
+ i::FLAG_allow_natives_syntax = true;
+ v8::StartupData blob;
+ {
+ v8::SnapshotCreator creator;
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ // Create dictionary mode object.
+ CompileRun(
+ "var a = new Array(10000);"
+ "%NormalizeElements(a);"
+ "a[133] = 1;"
+ "a[177] = 2;"
+ "a[971] = 3;"
+ "a[7997] = 4;"
+ "a[2111] = 5;"
+ "var o = {};"
+ "%OptimizeObjectForAddingMultipleProperties(o, 3);"
+ "o.a = 1;"
+ "o.b = 2;"
+ "o.c = 3;"
+ "var p = { foo: 1 };" // Test rehashing of transition arrays.
+ "p = JSON.parse('{\"foo\": {\"x\": 1}}');");
+ i::Handle<i::Object> i_a = v8::Utils::OpenHandle(*CompileRun("a"));
+ i::Handle<i::Object> i_o = v8::Utils::OpenHandle(*CompileRun("o"));
+ CHECK(i_a->IsJSArray());
+ CHECK(i_a->IsJSObject());
+ CHECK(!i::Handle<i::JSArray>::cast(i_a)->HasFastElements());
+ CHECK(!i::Handle<i::JSObject>::cast(i_o)->HasFastProperties());
+ ExpectInt32("a[2111]", 5);
+ ExpectInt32("o.c", 3);
+ creator.SetDefaultContext(context);
+ }
+ blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ }
+
+ i::FLAG_hash_seed = 1337;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ create_params.snapshot_blob = &blob;
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ // Check that rehashing has been performed.
+ CHECK_EQ(1337, reinterpret_cast<i::Isolate*>(isolate)->heap()->HashSeed());
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ CHECK(!context.IsEmpty());
+ v8::Context::Scope context_scope(context);
+ i::Handle<i::Object> i_a = v8::Utils::OpenHandle(*CompileRun("a"));
+ i::Handle<i::Object> i_o = v8::Utils::OpenHandle(*CompileRun("o"));
+ CHECK(i_a->IsJSArray());
+ CHECK(i_a->IsJSObject());
+ CHECK(!i::Handle<i::JSArray>::cast(i_a)->HasFastElements());
+ CHECK(!i::Handle<i::JSObject>::cast(i_o)->HasFastProperties());
+ ExpectInt32("a[2111]", 5);
+ ExpectInt32("o.c", 3);
}
isolate->Dispose();
delete[] blob.data;
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index ec6b659406..66a221f948 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -364,7 +364,7 @@ void AccumulateStatsWithOperator(
ConsStringIterator iter(cons_string);
String* string;
int offset;
- while (NULL != (string = iter.Next(&offset))) {
+ while (nullptr != (string = iter.Next(&offset))) {
// Accumulate stats.
CHECK_EQ(0, offset);
stats->leaves_++;
@@ -1070,6 +1070,7 @@ TEST(ExternalShortStringAdd) {
TEST(JSONStringifySliceMadeExternal) {
+ if (!FLAG_string_slices) return;
CcTest::InitializeVM();
// Create a sliced string from a one-byte string. The latter is turned
// into a two-byte external string. Check that JSON.stringify works.
@@ -1155,6 +1156,7 @@ TEST(CachedHashOverflow) {
TEST(SliceFromCons) {
+ if (!FLAG_string_slices) return;
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -1221,6 +1223,7 @@ TEST(InternalizeExternal) {
}
TEST(SliceFromExternal) {
+ if (!FLAG_string_slices) return;
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -1241,6 +1244,7 @@ TEST(SliceFromExternal) {
TEST(TrivialSlice) {
// This tests whether a slice that contains the entire parent string
// actually creates a new string (it should not).
+ if (!FLAG_string_slices) return;
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -1270,6 +1274,7 @@ TEST(TrivialSlice) {
TEST(SliceFromSlice) {
// This tests whether a slice that contains the entire parent string
// actually creates a new string (it should not).
+ if (!FLAG_string_slices) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> result;
@@ -1299,7 +1304,11 @@ UNINITIALIZED_TEST(OneByteArrayJoin) {
v8::Isolate::CreateParams create_params;
// Set heap limits.
create_params.constraints.set_max_semi_space_size_in_kb(1024);
+#ifdef DEBUG
+ create_params.constraints.set_max_old_space_size(20);
+#else
create_params.constraints.set_max_old_space_size(7);
+#endif
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
@@ -1421,7 +1430,7 @@ TEST(RobustSubStringStubExternalStrings) {
namespace {
-int* global_use_counts = NULL;
+int* global_use_counts = nullptr;
void MockUseCounterCallback(v8::Isolate* isolate,
v8::Isolate::UseCounterFeature feature) {
@@ -1466,7 +1475,7 @@ TEST(StringReplaceAtomTwoByteResult) {
"subject.replace(/~/g, replace); ");
CHECK(result->IsString());
Handle<String> string = v8::Utils::OpenHandle(v8::String::Cast(*result));
- CHECK(string->IsSeqTwoByteString());
+ CHECK(string->IsTwoByteRepresentation());
v8::Local<v8::String> expected = v8_str("one_byte\x80only\x80string\x80");
CHECK(expected->Equals(context.local(), result).FromJust());
@@ -1474,8 +1483,8 @@ TEST(StringReplaceAtomTwoByteResult) {
TEST(IsAscii) {
- CHECK(String::IsAscii(static_cast<char*>(NULL), 0));
- CHECK(String::IsOneByte(static_cast<uc16*>(NULL), 0));
+ CHECK(String::IsAscii(static_cast<char*>(nullptr), 0));
+ CHECK(String::IsOneByte(static_cast<uc16*>(nullptr), 0));
}
@@ -1484,7 +1493,7 @@ template<typename Op, bool return_first>
static uint16_t ConvertLatin1(uint16_t c) {
uint32_t result[Op::kMaxWidth];
int chars;
- chars = Op::Convert(c, 0, result, NULL);
+ chars = Op::Convert(c, 0, result, nullptr);
if (chars == 0) return 0;
CHECK_LE(chars, static_cast<int>(sizeof(result)));
if (!return_first && chars > 1) {
@@ -1538,14 +1547,14 @@ TEST(Latin1IgnoreCase) {
class DummyResource: public v8::String::ExternalStringResource {
public:
- virtual const uint16_t* data() const { return NULL; }
+ virtual const uint16_t* data() const { return nullptr; }
virtual size_t length() const { return 1 << 30; }
};
class DummyOneByteResource: public v8::String::ExternalOneByteStringResource {
public:
- virtual const char* data() const { return NULL; }
+ virtual const char* data() const { return nullptr; }
virtual size_t length() const { return 1 << 30; }
};
diff --git a/deps/v8/test/cctest/test-simulator-arm.cc b/deps/v8/test/cctest/test-sync-primitives-arm.cc
index eabc43df31..403d41ffe9 100644
--- a/deps/v8/test/cctest/test-simulator-arm.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "src/v8.h"
+#include "test/cctest/assembler-helper-arm.h"
#include "test/cctest/cctest.h"
#include "src/arm/simulator-arm.h"
@@ -37,18 +38,29 @@
namespace v8 {
namespace internal {
+// These tests rely on the behaviour specific to the simulator so we cannot
+// expect the same results on real hardware. The reason for this is that our
+// simulation of synchronisation primitives is more conservative than the
+// reality.
+// For example:
+// ldrex r1, [r2] ; Load acquire at address r2; r2 is now marked as exclusive.
+// ldr r0, [r4] ; This is a normal load, and at a different address.
+// ; However, any memory accesses can potentially clear the
+// ; exclusivity (See ARM DDI 0406C.c A3.4.5). This is unlikely
+// ; on real hardware but to be conservative, the simulator
+// ; always does it.
+// strex r3, r1, [r2] ; As a result, this will always fail in the simulator
+// ; but will likely succeed on hardware.
#if defined(USE_SIMULATOR)
#ifndef V8_TARGET_LITTLE_ENDIAN
#error Expected ARM to be little-endian
#endif
-// Define these function prototypes to match JSEntryFunction in execution.cc.
-typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
-
#define __ assm.
+namespace {
+
struct MemoryAccess {
enum class Kind {
None,
@@ -85,9 +97,9 @@ struct TestData {
int dummy;
};
-static void AssembleMemoryAccess(Assembler* assembler, MemoryAccess access,
- Register dest_reg, Register value_reg,
- Register addr_reg) {
+void AssembleMemoryAccess(Assembler* assembler, MemoryAccess access,
+ Register dest_reg, Register value_reg,
+ Register addr_reg) {
Assembler& assm = *assembler;
__ add(addr_reg, r0, Operand(access.offset));
@@ -167,38 +179,31 @@ static void AssembleMemoryAccess(Assembler* assembler, MemoryAccess access,
}
}
-static void AssembleLoadExcl(Assembler* assembler, MemoryAccess access,
- Register value_reg, Register addr_reg) {
+void AssembleLoadExcl(Assembler* assembler, MemoryAccess access,
+ Register value_reg, Register addr_reg) {
DCHECK(access.kind == MemoryAccess::Kind::LoadExcl);
AssembleMemoryAccess(assembler, access, no_reg, value_reg, addr_reg);
}
-static void AssembleStoreExcl(Assembler* assembler, MemoryAccess access,
- Register dest_reg, Register value_reg,
- Register addr_reg) {
+void AssembleStoreExcl(Assembler* assembler, MemoryAccess access,
+ Register dest_reg, Register value_reg,
+ Register addr_reg) {
DCHECK(access.kind == MemoryAccess::Kind::StoreExcl);
AssembleMemoryAccess(assembler, access, dest_reg, value_reg, addr_reg);
}
-static void TestInvalidateExclusiveAccess(
- TestData initial_data, MemoryAccess access1, MemoryAccess access2,
- MemoryAccess access3, int expected_res, TestData expected_data) {
+void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
+ MemoryAccess access2, MemoryAccess access3,
+ int expected_res, TestData expected_data) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
-
- AssembleLoadExcl(&assm, access1, r1, r1);
- AssembleMemoryAccess(&assm, access2, r3, r2, r1);
- AssembleStoreExcl(&assm, access3, r0, r3, r1);
-
- __ mov(pc, Operand(lr));
+ F_piiii f = FUNCTION_CAST<F_piiii>(AssembleCode([&](Assembler& assm) {
+ AssembleLoadExcl(&assm, access1, r1, r1);
+ AssembleMemoryAccess(&assm, access2, r3, r2, r1);
+ AssembleStoreExcl(&assm, access3, r0, r3, r1);
+ }));
- CodeDesc desc;
- assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
TestData t = initial_data;
int res =
@@ -219,6 +224,8 @@ static void TestInvalidateExclusiveAccess(
}
}
+} // namespace
+
TEST(simulator_invalidate_exclusive_access) {
using Kind = MemoryAccess::Kind;
using Size = MemoryAccess::Size;
@@ -255,28 +262,26 @@ TEST(simulator_invalidate_exclusive_access) {
0, TestData(7));
}
-static int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
- MemoryAccess access) {
- HandleScope scope(isolate);
- Assembler assm(isolate, NULL, 0);
- AssembleMemoryAccess(&assm, access, r0, r2, r1);
- __ bx(lr);
+namespace {
- CodeDesc desc;
- assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
+ MemoryAccess access) {
+ HandleScope scope(isolate);
+ F_piiii f = FUNCTION_CAST<F_piiii>(AssembleCode([&](Assembler& assm) {
+ AssembleMemoryAccess(&assm, access, r0, r2, r1);
+ }));
return reinterpret_cast<int>(
CALL_GENERATED_CODE(isolate, f, test_data, 0, 0, 0, 0));
}
+} // namespace
+
class MemoryAccessThread : public v8::base::Thread {
public:
MemoryAccessThread()
: Thread(Options("MemoryAccessThread")),
- test_data_(NULL),
+ test_data_(nullptr),
is_finished_(false),
has_request_(false),
did_request_(false),
@@ -389,7 +394,7 @@ TEST(simulator_invalidate_exclusive_access_threaded) {
#undef __
-#endif // USE_SIMULATOR
+#endif // defined(USE_SIMULATOR)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-simulator-arm64.cc b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
index 9ba216a643..a4edee69fa 100644
--- a/deps/v8/test/cctest/test-simulator-arm64.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
@@ -36,6 +36,19 @@
namespace v8 {
namespace internal {
+// These tests rely on the behaviour specific to the simulator so we cannot
+// expect the same results on real hardware. The reason for this is that our
+// simulation of synchronisation primitives is more conservative than the
+// reality.
+// For example:
+// ldxr x1, [x2] ; Load acquire at address x2; x2 is now marked as exclusive.
+// ldr x0, [x4] ; This is a normal load, and at a different address.
+// ; However, any memory accesses can potentially clear the
+// ; exclusivity (See ARM DDI 0487B.a B2.9.5). This is unlikely
+// ; on real hardware but to be conservative, the simulator
+// ; always does it.
+// stxr w3, x1, [x2] ; As a result, this will always fail in the simulator but
+// ; will likely succeed on hardware.
#if defined(USE_SIMULATOR)
#ifndef V8_TARGET_LITTLE_ENDIAN
@@ -80,9 +93,11 @@ struct TestData {
int dummy;
};
-static void AssembleMemoryAccess(MacroAssembler* assembler, MemoryAccess access,
- Register dest_reg, Register value_reg,
- Register addr_reg) {
+namespace {
+
+void AssembleMemoryAccess(MacroAssembler* assembler, MemoryAccess access,
+ Register dest_reg, Register value_reg,
+ Register addr_reg) {
MacroAssembler& masm = *assembler;
__ Add(addr_reg, x0, Operand(access.offset));
@@ -162,25 +177,26 @@ static void AssembleMemoryAccess(MacroAssembler* assembler, MemoryAccess access,
}
}
-static void AssembleLoadExcl(MacroAssembler* assembler, MemoryAccess access,
- Register value_reg, Register addr_reg) {
+void AssembleLoadExcl(MacroAssembler* assembler, MemoryAccess access,
+ Register value_reg, Register addr_reg) {
DCHECK(access.kind == MemoryAccess::Kind::LoadExcl);
AssembleMemoryAccess(assembler, access, no_reg, value_reg, addr_reg);
}
-static void AssembleStoreExcl(MacroAssembler* assembler, MemoryAccess access,
- Register dest_reg, Register value_reg,
- Register addr_reg) {
+void AssembleStoreExcl(MacroAssembler* assembler, MemoryAccess access,
+ Register dest_reg, Register value_reg,
+ Register addr_reg) {
DCHECK(access.kind == MemoryAccess::Kind::StoreExcl);
AssembleMemoryAccess(assembler, access, dest_reg, value_reg, addr_reg);
}
-static void TestInvalidateExclusiveAccess(
- TestData initial_data, MemoryAccess access1, MemoryAccess access2,
- MemoryAccess access3, int expected_res, TestData expected_data) {
+void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
+ MemoryAccess access2, MemoryAccess access3,
+ int expected_res, TestData expected_data) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler masm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
AssembleLoadExcl(&masm, access1, w1, x1);
AssembleMemoryAccess(&masm, access2, w3, w2, x1);
@@ -191,6 +207,7 @@ static void TestInvalidateExclusiveAccess(
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+
TestData t = initial_data;
Simulator::CallArgument args[] = {
Simulator::CallArgument(reinterpret_cast<uintptr_t>(&t)),
@@ -214,6 +231,8 @@ static void TestInvalidateExclusiveAccess(
}
}
+} // namespace
+
TEST(simulator_invalidate_exclusive_access) {
using Kind = MemoryAccess::Kind;
using Size = MemoryAccess::Size;
@@ -250,10 +269,13 @@ TEST(simulator_invalidate_exclusive_access) {
0, TestData(7));
}
-static int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
- MemoryAccess access) {
+namespace {
+
+int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
+ MemoryAccess access) {
HandleScope scope(isolate);
- MacroAssembler masm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
AssembleMemoryAccess(&masm, access, w0, w2, x1);
__ br(lr);
@@ -268,11 +290,13 @@ static int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
return Simulator::current(isolate)->wreg(0);
}
+} // namespace
+
class MemoryAccessThread : public v8::base::Thread {
public:
MemoryAccessThread()
: Thread(Options("MemoryAccessThread")),
- test_data_(NULL),
+ test_data_(nullptr),
is_finished_(false),
has_request_(false),
did_request_(false),
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 0c6c0b6a0a..af8e41d2cf 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -33,9 +33,7 @@
#include "src/base/platform/platform.h"
-
-v8::base::Semaphore* semaphore = NULL;
-
+v8::base::Semaphore* semaphore = nullptr;
void Signal(const v8::FunctionCallbackInfo<v8::Value>& args) {
semaphore->Signal();
@@ -129,7 +127,7 @@ TEST(TerminateOnlyV8ThreadFromThreadItself) {
v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), TerminateCurrentThread, DoLoop);
v8::Local<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::New(CcTest::isolate(), nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
@@ -152,7 +150,7 @@ TEST(TerminateOnlyV8ThreadFromThreadItselfNoLoop) {
v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
CcTest::isolate(), TerminateCurrentThread, DoLoopNoCall);
v8::Local<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::New(CcTest::isolate(), nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
@@ -194,7 +192,7 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
v8::Local<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::New(CcTest::isolate(), nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
@@ -204,7 +202,7 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
CHECK(result.IsEmpty());
thread.Join();
delete semaphore;
- semaphore = NULL;
+ semaphore = nullptr;
}
// Test that execution can be terminated from within JSON.stringify.
@@ -217,7 +215,7 @@ TEST(TerminateJsonStringify) {
v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
v8::Local<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::New(CcTest::isolate(), nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!CcTest::isolate()->IsExecutionTerminating());
v8::MaybeLocal<v8::Value> result =
@@ -230,7 +228,7 @@ TEST(TerminateJsonStringify) {
CHECK(result.IsEmpty());
thread.Join();
delete semaphore;
- semaphore = NULL;
+ semaphore = nullptr;
}
int call_count = 0;
@@ -289,7 +287,7 @@ TEST(TerminateLoadICException) {
global->Set(v8_str("loop"),
v8::FunctionTemplate::New(isolate, LoopGetProperty));
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!isolate->IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
@@ -336,7 +334,7 @@ TEST(TerminateAndReenterFromThreadItself) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
isolate, TerminateCurrentThread, ReenterAfterTermination);
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
// Create script strings upfront as it won't work when terminating.
@@ -394,7 +392,7 @@ TEST(TerminateCancelTerminateFromThreadItself) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
isolate, TerminateCurrentThread, DoLoopCancelTerminate);
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Check that execution completed with correct return value.
@@ -436,7 +434,7 @@ TEST(TerminateFromOtherThreadWhileMicrotaskRunning) {
v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
v8::Local<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::New(CcTest::isolate(), nullptr, global);
v8::Context::Scope context_scope(context);
isolate->EnqueueMicrotask(
v8::Function::New(isolate->GetCurrentContext(), MicrotaskLoopForever)
@@ -453,7 +451,7 @@ TEST(TerminateFromOtherThreadWhileMicrotaskRunning) {
thread.Join();
delete semaphore;
- semaphore = NULL;
+ semaphore = nullptr;
}
@@ -471,7 +469,7 @@ TEST(PostponeTerminateException) {
v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), TerminateCurrentThread, DoLoop);
v8::Local<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::New(CcTest::isolate(), nullptr, global);
v8::Context::Scope context_scope(context);
v8::TryCatch try_catch(isolate);
@@ -483,7 +481,7 @@ TEST(PostponeTerminateException) {
i::StackGuard::TERMINATE_EXECUTION);
// API interrupts should still be triggered.
- CcTest::isolate()->RequestInterrupt(&CounterCallback, NULL);
+ CcTest::isolate()->RequestInterrupt(&CounterCallback, nullptr);
CHECK_EQ(0, callback_counter);
CompileRun(terminate_and_loop);
CHECK(!try_catch.HasTerminated());
@@ -494,7 +492,7 @@ TEST(PostponeTerminateException) {
i::StackGuard::API_INTERRUPT);
// None of the two interrupts should trigger.
- CcTest::isolate()->RequestInterrupt(&CounterCallback, NULL);
+ CcTest::isolate()->RequestInterrupt(&CounterCallback, nullptr);
CompileRun(terminate_and_loop);
CHECK(!try_catch.HasTerminated());
CHECK_EQ(1, callback_counter);
@@ -551,7 +549,7 @@ TEST(TerminationInInnerTryCall) {
v8_str("inner_try_call_terminate"),
v8::FunctionTemplate::New(isolate, InnerTryCallTerminate));
v8::Local<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global_template);
+ v8::Context::New(CcTest::isolate(), nullptr, global_template);
v8::Context::Scope context_scope(context);
{
v8::TryCatch try_catch(isolate);
@@ -571,7 +569,7 @@ TEST(TerminateAndTryCall) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
isolate, TerminateCurrentThread, DoLoopCancelTerminate);
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!isolate->IsExecutionTerminating());
v8::TryCatch try_catch(isolate);
@@ -612,7 +610,7 @@ TEST(TerminateConsole) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
isolate, TerminateCurrentThread, DoLoopCancelTerminate);
- v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!isolate->IsExecutionTerminating());
v8::TryCatch try_catch(isolate);
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 5b4630cf75..d5c94eff0d 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -50,7 +50,7 @@ class ThreadIdValidationThread : public v8::base::Thread {
}
CHECK(thread_id.IsValid());
(*refs_)[thread_no_] = thread_id;
- if (thread_to_start_ != NULL) {
+ if (thread_to_start_ != nullptr) {
thread_to_start_->Start();
}
semaphore_->Signal();
@@ -71,7 +71,7 @@ TEST(ThreadIdValidation) {
threads.reserve(kNThreads);
refs.reserve(kNThreads);
v8::base::Semaphore semaphore(0);
- ThreadIdValidationThread* prev = NULL;
+ ThreadIdValidationThread* prev = nullptr;
for (int i = kNThreads - 1; i >= 0; i--) {
ThreadIdValidationThread* newThread =
new ThreadIdValidationThread(prev, &refs, i, &semaphore);
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 471619062a..9705afb4ef 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -22,16 +22,14 @@ struct MockTraceObject {
uint64_t bind_id;
int num_args;
unsigned int flags;
- int64_t timestamp;
MockTraceObject(char phase, std::string name, uint64_t id, uint64_t bind_id,
- int num_args, int flags, int64_t timestamp)
+ int num_args, int flags)
: phase(phase),
name(name),
id(id),
bind_id(bind_id),
num_args(num_args),
- flags(flags),
- timestamp(timestamp) {}
+ flags(flags) {}
};
typedef std::vector<MockTraceObject*> MockTraceObjectList;
@@ -53,20 +51,8 @@ class MockTracingController : public v8::TracingController {
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags) override {
- return AddTraceEventWithTimestamp(
- phase, category_enabled_flag, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, arg_convertables, flags, 0);
- }
-
- uint64_t AddTraceEventWithTimestamp(
- char phase, const uint8_t* category_enabled_flag, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, int num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values,
- std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
- unsigned int flags, int64_t timestamp) override {
- MockTraceObject* to = new MockTraceObject(
- phase, std::string(name), id, bind_id, num_args, flags, timestamp);
+ MockTraceObject* to = new MockTraceObject(phase, std::string(name), id,
+ bind_id, num_args, flags);
trace_object_list_.push_back(to);
return 0;
}
@@ -253,24 +239,3 @@ TEST(TestEventInContext) {
CHECK_EQ("Isolate", GET_TRACE_OBJECT(2)->name);
CHECK_EQ(isolate_id, GET_TRACE_OBJECT(2)->id);
}
-
-TEST(TestEventWithTimestamp) {
- MockTracingPlatform platform;
-
- TRACE_EVENT_INSTANT_WITH_TIMESTAMP0("v8-cat", "0arg",
- TRACE_EVENT_SCOPE_GLOBAL, 1729);
- TRACE_EVENT_INSTANT_WITH_TIMESTAMP1("v8-cat", "1arg",
- TRACE_EVENT_SCOPE_GLOBAL, 4104, "val", 1);
- TRACE_EVENT_MARK_WITH_TIMESTAMP2("v8-cat", "mark", 13832, "a", 1, "b", 2);
-
- CHECK_EQ(3, GET_TRACE_OBJECTS_LIST->size());
-
- CHECK_EQ(1729, GET_TRACE_OBJECT(0)->timestamp);
- CHECK_EQ(0, GET_TRACE_OBJECT(0)->num_args);
-
- CHECK_EQ(4104, GET_TRACE_OBJECT(1)->timestamp);
- CHECK_EQ(1, GET_TRACE_OBJECT(1)->num_args);
-
- CHECK_EQ(13832, GET_TRACE_OBJECT(2)->timestamp);
- CHECK_EQ(2, GET_TRACE_OBJECT(2)->num_args);
-}
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 782d4fd045..fcd09eaee5 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -100,9 +100,8 @@ struct Tests {
void IsSomeType() {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Type* t = *it;
- CHECK(1 ==
- this->IsBitset(t) + t->IsHeapConstant() + t->IsRange() +
- t->IsOtherNumberConstant() + this->IsUnion(t));
+ CHECK_EQ(1, this->IsBitset(t) + t->IsHeapConstant() + t->IsRange() +
+ t->IsOtherNumberConstant() + this->IsUnion(t));
}
}
@@ -414,7 +413,7 @@ struct Tests {
// T->Is(Range(T->Min(), T->Max())).
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Type* type = *it;
- CHECK(!type->Is(T.Integer) || !type->IsInhabited() ||
+ CHECK(!type->Is(T.Integer) || type->IsNone() ||
type->Is(T.Range(type->Min(), type->Max())));
}
}
@@ -543,7 +542,7 @@ struct Tests {
(type1->IsRange() && type2->IsRange()) ||
(type1->IsOtherNumberConstant() &&
type2->IsOtherNumberConstant()) ||
- !type1->IsInhabited());
+ type1->IsNone());
}
}
}
@@ -667,7 +666,7 @@ struct Tests {
// T->Maybe(Any) iff T inhabited
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Type* type = *it;
- CHECK(type->Maybe(T.Any) == type->IsInhabited());
+ CHECK(type->Maybe(T.Any) == !type->IsNone());
}
// T->Maybe(None) never
@@ -679,7 +678,7 @@ struct Tests {
// Reflexivity upto Inhabitation: T->Maybe(T) iff T inhabited
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Type* type = *it;
- CHECK(type->Maybe(type) == type->IsInhabited());
+ CHECK(type->Maybe(type) == !type->IsNone());
}
// Symmetry: T1->Maybe(T2) iff T2->Maybe(T1)
@@ -696,8 +695,7 @@ struct Tests {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
Type* type1 = *it1;
Type* type2 = *it2;
- CHECK(!type1->Maybe(type2) ||
- (type1->IsInhabited() && type2->IsInhabited()));
+ CHECK(!type1->Maybe(type2) || (!type1->IsNone() && !type2->IsNone()));
}
}
@@ -707,7 +705,7 @@ struct Tests {
Type* type1 = *it1;
Type* type2 = *it2;
Type* intersect12 = T.Intersect(type1, type2);
- CHECK(!type1->Maybe(type2) || intersect12->IsInhabited());
+ CHECK(!type1->Maybe(type2) || !intersect12->IsNone());
}
}
@@ -716,8 +714,7 @@ struct Tests {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
Type* type1 = *it1;
Type* type2 = *it2;
- CHECK(!(type1->Is(type2) && type1->IsInhabited()) ||
- type1->Maybe(type2));
+ CHECK(!(type1->Is(type2) && !type1->IsNone()) || type1->Maybe(type2));
}
}
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 42b3e355ab..5f1584bc06 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -118,7 +118,7 @@ static Handle<DescriptorArray> CreateDescriptorArray(Isolate* isolate,
Descriptor d;
if (kind == PROP_ACCESSOR_INFO) {
Handle<AccessorInfo> info =
- Accessors::MakeAccessor(isolate, name, nullptr, nullptr, NONE);
+ Accessors::MakeAccessor(isolate, name, nullptr, nullptr);
d = Descriptor::AccessorConstant(name, info, NONE);
} else {
@@ -227,7 +227,7 @@ TEST(LayoutDescriptorBasicSlow) {
CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
CHECK(layout_descriptor->IsSlowLayout());
CHECK(!layout_descriptor->IsFastPointerLayout());
- CHECK(layout_descriptor->capacity() > kSmiValueSize);
+ CHECK_GT(layout_descriptor->capacity(), kSmiValueSize);
CHECK(!layout_descriptor->IsTagged(0));
CHECK(!layout_descriptor->IsTagged(kPropsCount - 1));
@@ -307,7 +307,7 @@ static void TestLayoutDescriptorQueries(int layout_descriptor_length,
int sequence_length;
CHECK_EQ(tagged,
layout_desc->IsTagged(i, max_sequence_length, &sequence_length));
- CHECK(sequence_length > 0);
+ CHECK_GT(sequence_length, 0);
CHECK_EQ(expected_sequence_length, sequence_length);
}
@@ -328,7 +328,7 @@ static void TestLayoutDescriptorQueriesFast(int max_sequence_length) {
for (int i = 0; i < kNumberOfBits; i++) {
CHECK_EQ(true,
layout_desc->IsTagged(i, max_sequence_length, &sequence_length));
- CHECK(sequence_length > 0);
+ CHECK_GT(sequence_length, 0);
CHECK_EQ(max_sequence_length, sequence_length);
}
}
@@ -437,7 +437,7 @@ static void TestLayoutDescriptorQueriesSlow(int max_sequence_length) {
bit_flip_positions[i] = cur;
cur = (cur + 1) * 2;
}
- CHECK(cur < 10000);
+ CHECK_LT(cur, 10000);
bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
max_sequence_length);
@@ -450,7 +450,7 @@ static void TestLayoutDescriptorQueriesSlow(int max_sequence_length) {
bit_flip_positions[i] = cur;
cur = (cur + 1) * 2;
}
- CHECK(cur < 10000);
+ CHECK_LT(cur, 10000);
bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
max_sequence_length);
@@ -642,7 +642,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
Descriptor d;
if (kind == PROP_ACCESSOR_INFO) {
Handle<AccessorInfo> info =
- Accessors::MakeAccessor(isolate, name, nullptr, nullptr, NONE);
+ Accessors::MakeAccessor(isolate, name, nullptr, nullptr);
d = Descriptor::AccessorConstant(name, info, NONE);
} else {
@@ -1235,8 +1235,8 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
CHECK_EQ(expected_tagged, helper.IsTagged(index.offset()));
CHECK_EQ(expected_tagged, helper.IsTagged(index.offset(), instance_size,
&end_of_region_offset));
- CHECK(end_of_region_offset > 0);
- CHECK(end_of_region_offset % kPointerSize == 0);
+ CHECK_GT(end_of_region_offset, 0);
+ CHECK_EQ(end_of_region_offset % kPointerSize, 0);
CHECK(end_of_region_offset <= instance_size);
for (int offset = index.offset(); offset < end_of_region_offset;
diff --git a/deps/v8/test/cctest/test-usecounters.cc b/deps/v8/test/cctest/test-usecounters.cc
index c300361a51..a4512ac21d 100644
--- a/deps/v8/test/cctest/test-usecounters.cc
+++ b/deps/v8/test/cctest/test-usecounters.cc
@@ -10,70 +10,13 @@ namespace v8 {
namespace internal {
namespace test_usecounters {
-int* global_use_counts = NULL;
+int* global_use_counts = nullptr;
void MockUseCounterCallback(v8::Isolate* isolate,
v8::Isolate::UseCounterFeature feature) {
++global_use_counts[feature];
}
-TEST(DefineGetterSetterThrowUseCount) {
- i::FLAG_harmony_strict_legacy_accessor_builtins = false;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- LocalContext env;
- int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
- global_use_counts = use_counts;
- CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
-
- // __defineGetter__ and __defineSetter__ do not increment
- // kDefineGetterOrSetterWouldThrow on success
- CompileRun(
- "var a = {};"
- "Object.defineProperty(a, 'b', { value: 0, configurable: true });"
- "a.__defineGetter__('b', ()=>{});");
- CHECK_EQ(0, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
- CompileRun(
- "var a = {};"
- "Object.defineProperty(a, 'b', { value: 0, configurable: true });"
- "a.__defineSetter__('b', ()=>{});");
- CHECK_EQ(0, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
-
- // __defineGetter__ and __defineSetter__ do not increment
- // kDefineGetterOrSetterWouldThrow on other errors
- v8::Local<v8::Value> resultProxyThrow = CompileRun(
- "var exception;"
- "try {"
- "var a = new Proxy({}, { defineProperty: ()=>{throw new Error;} });"
- "a.__defineGetter__('b', ()=>{});"
- "} catch (e) { exception = e; }"
- "exception");
- CHECK_EQ(0, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
- CHECK(resultProxyThrow->IsObject());
- resultProxyThrow = CompileRun(
- "var exception;"
- "try {"
- "var a = new Proxy({}, { defineProperty: ()=>{throw new Error;} });"
- "a.__defineSetter__('b', ()=>{});"
- "} catch (e) { exception = e; }"
- "exception");
- CHECK_EQ(0, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
- CHECK(resultProxyThrow->IsObject());
-
- // __defineGetter__ and __defineSetter__ increment
- // kDefineGetterOrSetterWouldThrow when they would throw per spec (B.2.2.2)
- CompileRun(
- "var a = {};"
- "Object.defineProperty(a, 'b', { value: 0, configurable: false });"
- "a.__defineGetter__('b', ()=>{});");
- CHECK_EQ(1, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
- CompileRun(
- "var a = {};"
- "Object.defineProperty(a, 'b', { value: 0, configurable: false });"
- "a.__defineSetter__('b', ()=>{});");
- CHECK_EQ(2, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
-}
-
TEST(AssigmentExpressionLHSIsCall) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index 5abe0e46d3..b65b4a765a 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -193,8 +193,8 @@ static char FlagV(uint32_t flags) {
bool EqualNzcv(uint32_t expected, uint32_t result) {
- CHECK((expected & ~NZCVFlag) == 0);
- CHECK((result & ~NZCVFlag) == 0);
+ CHECK_EQ(expected & ~NZCVFlag, 0);
+ CHECK_EQ(result & ~NZCVFlag, 0);
if (result != expected) {
printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
@@ -451,3 +451,5 @@ void RegisterDump::Dump(MacroAssembler* masm) {
} // namespace internal
} // namespace v8
+
+#undef __
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index f629a17646..a709240662 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -112,7 +112,7 @@ class RegisterDump {
// Flags accessors.
inline uint32_t flags_nzcv() const {
CHECK(IsComplete());
- CHECK((dump_.flags_ & ~Flags_mask) == 0);
+ CHECK_EQ(dump_.flags_ & ~Flags_mask, 0);
return dump_.flags_ & Flags_mask;
}
@@ -129,7 +129,7 @@ class RegisterDump {
// ::Dump method, or a failure in the simulator.
bool RegAliasesMatch(unsigned code) const {
CHECK(IsComplete());
- CHECK(code < kNumberOfRegisters);
+ CHECK_LT(code, kNumberOfRegisters);
return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
}
@@ -142,7 +142,7 @@ class RegisterDump {
// As RegAliasesMatch, but for floating-point registers.
bool FPRegAliasesMatch(unsigned code) const {
CHECK(IsComplete());
- CHECK(code < kNumberOfVRegisters);
+ CHECK_LT(code, kNumberOfVRegisters);
return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
}
@@ -225,7 +225,7 @@ std::array<RegType, Size> CreateRegisterArray() {
// (such as the push and pop tests), but where certain registers must be
// avoided as they are used for other purposes.
//
-// Any of w, x, or r can be NULL if they are not required.
+// Any of w, x, or r can be nullptr if they are not required.
//
// The return value is a RegList indicating which registers were allocated.
RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 60f6498022..8db1855cf5 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -44,7 +44,10 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
static Handle<JSWeakMap> AllocateJSWeakMap(Isolate* isolate) {
- Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
+ Handle<Map> map =
+ isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
+ Handle<JSObject> weakmap_obj = isolate->factory()->NewJSObjectFromMap(map);
+ Handle<JSWeakMap> weakmap(JSWeakMap::cast(*weakmap_obj));
// Do not leak handles for the hash table, it would make entries strong.
{
HandleScope scope(isolate);
@@ -168,8 +171,8 @@ TEST(Regress2060a) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
- Handle<JSFunction> function = factory->NewFunction(
- factory->function_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->function_string());
Handle<JSObject> key = factory->NewJSObject(function);
Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
@@ -209,8 +212,8 @@ TEST(Regress2060b) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
- Handle<JSFunction> function = factory->NewFunction(
- factory->function_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->function_string());
// Start second old-space page so that keys land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page();
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 900515aadd..a2d4ff4bed 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -167,8 +167,8 @@ TEST(WeakSet_Regress2060a) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
- Handle<JSFunction> function = factory->NewFunction(
- factory->function_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->function_string());
Handle<JSObject> key = factory->NewJSObject(function);
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
@@ -208,8 +208,8 @@ TEST(WeakSet_Regress2060b) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
- Handle<JSFunction> function = factory->NewFunction(
- factory->function_string());
+ Handle<JSFunction> function =
+ factory->NewFunctionForTest(factory->function_string());
// Start second old-space page so that keys land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page();
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index 36db837c6c..d9c3c23609 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -35,6 +35,7 @@ from testrunner.objects import testcase
class CcTestSuite(testsuite.TestSuite):
+ SHELL = 'cctest'
def __init__(self, name, root):
super(CcTestSuite, self).__init__(name, root)
@@ -44,13 +45,13 @@ class CcTestSuite(testsuite.TestSuite):
build_dir = "out"
def ListTests(self, context):
- shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
+ shell = os.path.abspath(os.path.join(context.shell_dir, self.SHELL))
if utils.IsWindows():
shell += ".exe"
- output = commands.Execute(context.command_prefix +
- [shell, "--list"] +
- context.extra_flags)
+ cmd = context.command_prefix + [shell, "--list"] + context.extra_flags
+ output = commands.Execute(cmd)
if output.exit_code != 0:
+ print ' '.join(cmd)
print output.stdout
print output.stderr
return []
@@ -61,12 +62,11 @@ class CcTestSuite(testsuite.TestSuite):
tests.sort(key=lambda t: t.path)
return tests
- def GetFlagsForTestCase(self, testcase, context):
- testname = testcase.path.split(os.path.sep)[-1]
- return (testcase.flags + [testcase.path] + context.mode_flags)
+ def GetShellForTestCase(self, testcase):
+ return self.SHELL
- def shell(self):
- return "cctest"
+ def GetParametersForTestCase(self, testcase, context):
+ return [testcase.path], testcase.flags + context.mode_flags, {}
def GetSuite(name, root):
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index 4224e51fde..6907b8381e 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -30,7 +30,7 @@ class CWasmEntryArgTester {
public:
CWasmEntryArgTester(std::initializer_list<uint8_t> wasm_function_bytes,
std::function<ReturnType(Args...)> expected_fn)
- : runner_(kExecuteCompiled),
+ : runner_(kExecuteTurbofan),
isolate_(runner_.main_isolate()),
expected_fn_(expected_fn),
sig_(runner_.template CreateSig<ReturnType, Args...>()) {
@@ -62,7 +62,12 @@ class CWasmEntryArgTester {
Handle<Object> buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
isolate_);
CHECK(!buffer_obj->IsHeapObject());
- Handle<Object> call_args[]{wasm_code_, buffer_obj};
+ Handle<Object> call_args[]{
+ (FLAG_wasm_jit_to_native
+ ? Handle<Object>::cast(isolate_->factory()->NewForeign(
+ wasm_code_.GetWasmCode()->instructions().start(), TENURED))
+ : Handle<Object>::cast(wasm_code_.GetCode())),
+ buffer_obj};
static_assert(
arraysize(call_args) == compiler::CWasmEntryParameters::kNumParameters,
"adapt this test");
@@ -88,7 +93,7 @@ class CWasmEntryArgTester {
std::function<ReturnType(Args...)> expected_fn_;
FunctionSig* sig_;
Handle<JSFunction> c_wasm_entry_fn_;
- Handle<Code> wasm_code_;
+ WasmCodeWrapper wasm_code_;
};
} // namespace
@@ -100,8 +105,7 @@ TEST(TestCWasmEntryArgPassing_int32) {
WASM_I32_ADD(WASM_I32_MUL(WASM_I32V_1(2), WASM_GET_LOCAL(0)), WASM_ONE)},
[](int32_t a) { return 2 * a + 1; });
- std::vector<int32_t> test_values = compiler::ValueHelper::int32_vector();
- for (int32_t v : test_values) tester.CheckCall(v);
+ FOR_INT32_INPUTS(v) { tester.CheckCall(*v); }
}
// Pass int64_t, return double.
@@ -111,10 +115,7 @@ TEST(TestCWasmEntryArgPassing_double_int64) {
WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0))},
[](int64_t a) { return static_cast<double>(a); });
- std::vector<int64_t> test_values_i64 = compiler::ValueHelper::int64_vector();
- for (int64_t v : test_values_i64) {
- tester.CheckCall(v);
- }
+ FOR_INT64_INPUTS(v) { tester.CheckCall(*v); }
}
// Pass double, return int64_t.
@@ -124,9 +125,7 @@ TEST(TestCWasmEntryArgPassing_int64_double) {
WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0))},
[](double d) { return static_cast<int64_t>(d); });
- for (int64_t i : compiler::ValueHelper::int64_vector()) {
- tester.CheckCall(i);
- }
+ FOR_INT64_INPUTS(i) { tester.CheckCall(*i); }
}
// Pass float, return double.
@@ -138,8 +137,7 @@ TEST(TestCWasmEntryArgPassing_float_double) {
WASM_F64(1))},
[](float f) { return 2. * static_cast<double>(f) + 1.; });
- std::vector<float> test_values = compiler::ValueHelper::float32_vector();
- for (float f : test_values) tester.CheckCall(f);
+ FOR_FLOAT32_INPUTS(f) { tester.CheckCall(*f); }
}
// Pass two doubles, return double.
@@ -149,11 +147,8 @@ TEST(TestCWasmEntryArgPassing_double_double) {
WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))},
[](double a, double b) { return a + b; });
- std::vector<double> test_values = compiler::ValueHelper::float64_vector();
- for (double d1 : test_values) {
- for (double d2 : test_values) {
- tester.CheckCall(d1, d2);
- }
+ FOR_FLOAT64_INPUTS(d1) {
+ FOR_FLOAT64_INPUTS(d2) { tester.CheckCall(*d1, *d2); }
}
}
@@ -176,10 +171,11 @@ TEST(TestCWasmEntryArgPassing_AllTypes) {
return 0. + a + b + c + d;
});
- std::vector<int32_t> test_values_i32 = compiler::ValueHelper::int32_vector();
- std::vector<int64_t> test_values_i64 = compiler::ValueHelper::int64_vector();
- std::vector<float> test_values_f32 = compiler::ValueHelper::float32_vector();
- std::vector<double> test_values_f64 = compiler::ValueHelper::float64_vector();
+ Vector<const int32_t> test_values_i32 = compiler::ValueHelper::int32_vector();
+ Vector<const int64_t> test_values_i64 = compiler::ValueHelper::int64_vector();
+ Vector<const float> test_values_f32 = compiler::ValueHelper::float32_vector();
+ Vector<const double> test_values_f64 =
+ compiler::ValueHelper::float64_vector();
size_t max_len =
std::max(std::max(test_values_i32.size(), test_values_i64.size()),
std::max(test_values_f32.size(), test_values_f64.size()));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index c7bb737299..3fe8b4ae99 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -923,6 +923,25 @@ WASM_EXEC_TEST(CallI64Parameter) {
}
}
+WASM_EXEC_TEST(CallI64Return) {
+ ValueType return_types[3]; // TODO(rossberg): support more in the future
+ for (int i = 0; i < 3; i++) return_types[i] = kWasmI64;
+ return_types[1] = kWasmI32;
+ FunctionSig sig(2, 1, return_types);
+
+ WasmRunner<int64_t> r(execution_mode);
+ // Build the target function.
+ WasmFunctionCompiler& t = r.NewFunction(&sig);
+ BUILD(t, WASM_GET_LOCAL(0), WASM_I32V(7));
+
+ // Build the first calling function.
+ BUILD(r,
+ WASM_CALL_FUNCTION(
+ t.function_index(), WASM_I64V(0xbcd12340000000b)), WASM_DROP);
+
+ CHECK_EQ(0xbcd12340000000b, r.Call());
+}
+
void TestI64Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
int64_t expected, int64_t a, int64_t b) {
{
@@ -1545,12 +1564,12 @@ WASM_EXEC_TEST(UnalignedInt64Store) {
for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
} while (false)
-static void CompileCallIndirectMany(ValueType param) {
+static void CompileCallIndirectMany(WasmExecutionMode mode, ValueType param) {
// Make sure we don't run out of registers when compiling indirect calls
// with many many parameters.
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; num_params++) {
- WasmRunner<void> r(kExecuteCompiled);
+ WasmRunner<void> r(mode);
FunctionSig* sig = sigs.many(r.zone(), kWasmStmt, param, num_params);
r.builder().AddSignature(sig);
@@ -1570,7 +1589,9 @@ static void CompileCallIndirectMany(ValueType param) {
}
}
-TEST(Compile_Wasm_CallIndirect_Many_i64) { CompileCallIndirectMany(kWasmI64); }
+WASM_EXEC_TEST(Compile_Wasm_CallIndirect_Many_i64) {
+ CompileCallIndirectMany(execution_mode, kWasmI64);
+}
static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
const int kExpected = 6333;
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index cb291b8741..b7b200984d 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -52,9 +52,10 @@ T CompareExchange(T initial, T a, T b) {
return a;
}
-void RunU32BinOp(WasmOpcode wasm_op, Uint32BinOp expected_op) {
+void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
+ Uint32BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t> r(execution_mode);
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
r.builder().SetHasSharedMemory();
@@ -72,16 +73,29 @@ void RunU32BinOp(WasmOpcode wasm_op, Uint32BinOp expected_op) {
}
}
-TEST(I32AtomicAdd) { RunU32BinOp(kExprI32AtomicAdd, Add); }
-TEST(I32AtomicSub) { RunU32BinOp(kExprI32AtomicSub, Sub); }
-TEST(I32AtomicAnd) { RunU32BinOp(kExprI32AtomicAnd, And); }
-TEST(I32AtomicOr) { RunU32BinOp(kExprI32AtomicOr, Or); }
-TEST(I32AtomicXor) { RunU32BinOp(kExprI32AtomicXor, Xor); }
-TEST(I32AtomicExchange) { RunU32BinOp(kExprI32AtomicExchange, Exchange); }
+WASM_EXEC_TEST(I32AtomicAdd) {
+ RunU32BinOp(execution_mode, kExprI32AtomicAdd, Add);
+}
+WASM_EXEC_TEST(I32AtomicSub) {
+ RunU32BinOp(execution_mode, kExprI32AtomicSub, Sub);
+}
+WASM_EXEC_TEST(I32AtomicAnd) {
+ RunU32BinOp(execution_mode, kExprI32AtomicAnd, And);
+}
+WASM_EXEC_TEST(I32AtomicOr) {
+ RunU32BinOp(execution_mode, kExprI32AtomicOr, Or);
+}
+WASM_EXEC_TEST(I32AtomicXor) {
+ RunU32BinOp(execution_mode, kExprI32AtomicXor, Xor);
+}
+WASM_EXEC_TEST(I32AtomicExchange) {
+ RunU32BinOp(execution_mode, kExprI32AtomicExchange, Exchange);
+}
-void RunU16BinOp(WasmOpcode wasm_op, Uint16BinOp expected_op) {
+void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
+ Uint16BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t> r(mode);
r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
@@ -99,16 +113,29 @@ void RunU16BinOp(WasmOpcode wasm_op, Uint16BinOp expected_op) {
}
}
-TEST(I32AtomicAdd16U) { RunU16BinOp(kExprI32AtomicAdd16U, Add); }
-TEST(I32AtomicSub16U) { RunU16BinOp(kExprI32AtomicSub16U, Sub); }
-TEST(I32AtomicAnd16U) { RunU16BinOp(kExprI32AtomicAnd16U, And); }
-TEST(I32AtomicOr16U) { RunU16BinOp(kExprI32AtomicOr16U, Or); }
-TEST(I32AtomicXor16U) { RunU16BinOp(kExprI32AtomicXor16U, Xor); }
-TEST(I32AtomicExchange16U) { RunU16BinOp(kExprI32AtomicExchange16U, Exchange); }
+WASM_EXEC_TEST(I32AtomicAdd16U) {
+ RunU16BinOp(execution_mode, kExprI32AtomicAdd16U, Add);
+}
+WASM_EXEC_TEST(I32AtomicSub16U) {
+ RunU16BinOp(execution_mode, kExprI32AtomicSub16U, Sub);
+}
+WASM_EXEC_TEST(I32AtomicAnd16U) {
+ RunU16BinOp(execution_mode, kExprI32AtomicAnd16U, And);
+}
+WASM_EXEC_TEST(I32AtomicOr16U) {
+ RunU16BinOp(execution_mode, kExprI32AtomicOr16U, Or);
+}
+WASM_EXEC_TEST(I32AtomicXor16U) {
+ RunU16BinOp(execution_mode, kExprI32AtomicXor16U, Xor);
+}
+WASM_EXEC_TEST(I32AtomicExchange16U) {
+ RunU16BinOp(execution_mode, kExprI32AtomicExchange16U, Exchange);
+}
-void RunU8BinOp(WasmOpcode wasm_op, Uint8BinOp expected_op) {
+void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
+ Uint8BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
@@ -126,16 +153,28 @@ void RunU8BinOp(WasmOpcode wasm_op, Uint8BinOp expected_op) {
}
}
-TEST(I32AtomicAdd8U) { RunU8BinOp(kExprI32AtomicAdd8U, Add); }
-TEST(I32AtomicSub8U) { RunU8BinOp(kExprI32AtomicSub8U, Sub); }
-TEST(I32AtomicAnd8U) { RunU8BinOp(kExprI32AtomicAnd8U, And); }
-TEST(I32AtomicOr8U) { RunU8BinOp(kExprI32AtomicOr8U, Or); }
-TEST(I32AtomicXor8U) { RunU8BinOp(kExprI32AtomicXor8U, Xor); }
-TEST(I32AtomicExchange8U) { RunU8BinOp(kExprI32AtomicExchange8U, Exchange); }
+WASM_EXEC_TEST(I32AtomicAdd8U) {
+ RunU8BinOp(execution_mode, kExprI32AtomicAdd8U, Add);
+}
+WASM_EXEC_TEST(I32AtomicSub8U) {
+ RunU8BinOp(execution_mode, kExprI32AtomicSub8U, Sub);
+}
+WASM_EXEC_TEST(I32AtomicAnd8U) {
+ RunU8BinOp(execution_mode, kExprI32AtomicAnd8U, And);
+}
+WASM_EXEC_TEST(I32AtomicOr8U) {
+ RunU8BinOp(execution_mode, kExprI32AtomicOr8U, Or);
+}
+WASM_EXEC_TEST(I32AtomicXor8U) {
+ RunU8BinOp(execution_mode, kExprI32AtomicXor8U, Xor);
+}
+WASM_EXEC_TEST(I32AtomicExchange8U) {
+ RunU8BinOp(execution_mode, kExprI32AtomicExchange8U, Exchange);
+}
-TEST(I32AtomicCompareExchange) {
+WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
BUILD(r, WASM_ATOMICS_TERNARY_OP(
@@ -153,9 +192,9 @@ TEST(I32AtomicCompareExchange) {
}
}
-TEST(I32AtomicCompareExchange16U) {
+WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange16U,
@@ -174,9 +213,9 @@ TEST(I32AtomicCompareExchange16U) {
}
}
-TEST(I32AtomicCompareExchange8U) {
+WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
BUILD(r,
@@ -195,9 +234,9 @@ TEST(I32AtomicCompareExchange8U) {
}
}
-TEST(I32AtomicLoad) {
+WASM_COMPILED_EXEC_TEST(I32AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_ZERO,
@@ -210,9 +249,9 @@ TEST(I32AtomicLoad) {
}
}
-TEST(I32AtomicLoad16U) {
+WASM_COMPILED_EXEC_TEST(I32AtomicLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad16U, WASM_ZERO,
@@ -225,9 +264,9 @@ TEST(I32AtomicLoad16U) {
}
}
-TEST(I32AtomicLoad8U) {
+WASM_COMPILED_EXEC_TEST(I32AtomicLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad8U, WASM_ZERO,
@@ -240,9 +279,9 @@ TEST(I32AtomicLoad8U) {
}
}
-TEST(I32AtomicStoreLoad) {
+WASM_COMPILED_EXEC_TEST(I32AtomicStoreLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
@@ -259,9 +298,9 @@ TEST(I32AtomicStoreLoad) {
}
}
-TEST(I32AtomicStoreLoad16U) {
+WASM_COMPILED_EXEC_TEST(I32AtomicStoreLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
@@ -279,9 +318,9 @@ TEST(I32AtomicStoreLoad16U) {
}
}
-TEST(I32AtomicStoreLoad8U) {
+WASM_COMPILED_EXEC_TEST(I32AtomicStoreLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ WasmRunner<uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 10ba64c993..f67ce2d121 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -22,7 +22,7 @@ namespace wasm {
namespace test_run_wasm_interpreter {
TEST(Run_WasmInt8Const_i) {
- WasmRunner<int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t> r(kExecuteInterpreter);
const byte kExpectedValue = 109;
// return(kExpectedValue)
BUILD(r, WASM_I32V_2(kExpectedValue));
@@ -30,14 +30,14 @@ TEST(Run_WasmInt8Const_i) {
}
TEST(Run_WasmIfElse) {
- WasmRunner<int32_t, int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, int32_t> r(kExecuteInterpreter);
BUILD(r, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I32V_1(9), WASM_I32V_1(10)));
CHECK_EQ(10, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
TEST(Run_WasmIfReturn) {
- WasmRunner<int32_t, int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, int32_t> r(kExecuteInterpreter);
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_RETURN1(WASM_I32V_2(77))),
WASM_I32V_2(65));
CHECK_EQ(65, r.Call(0));
@@ -53,7 +53,7 @@ TEST(Run_WasmNopsN) {
code[nops] = kExprI32Const;
code[nops + 1] = expected;
- WasmRunner<int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t> r(kExecuteInterpreter);
r.Build(code, code + nops + 2);
CHECK_EQ(expected, r.Call());
}
@@ -76,7 +76,7 @@ TEST(Run_WasmConstsN) {
}
}
- WasmRunner<int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t> r(kExecuteInterpreter);
r.Build(code, code + (count * 3));
CHECK_EQ(expected, r.Call());
}
@@ -95,7 +95,7 @@ TEST(Run_WasmBlocksN) {
code[2 + nops + 1] = expected;
code[2 + nops + 2] = kExprEnd;
- WasmRunner<int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t> r(kExecuteInterpreter);
r.Build(code, code + nops + kExtra);
CHECK_EQ(expected, r.Call());
}
@@ -120,7 +120,7 @@ TEST(Run_WasmBlockBreakN) {
code[2 + index + 2] = kExprBr;
code[2 + index + 3] = 0;
- WasmRunner<int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t> r(kExecuteInterpreter);
r.Build(code, code + kMaxNops + kExtra);
CHECK_EQ(expected, r.Call());
}
@@ -128,7 +128,7 @@ TEST(Run_WasmBlockBreakN) {
}
TEST(Run_Wasm_nested_ifs_i) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteInterpreter);
BUILD(
r,
@@ -178,7 +178,7 @@ TEST(Breakpoint_I32Add) {
Find(code, sizeof(code), kNumBreakpoints, kExprGetLocal, kExprGetLocal,
kExprI32Add);
- WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreter);
r.Build(code, code + arraysize(code));
@@ -217,7 +217,7 @@ TEST(Step_I32Mul) {
static const int kTraceLength = 4;
byte code[] = {WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
- WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreter);
r.Build(code, code + arraysize(code));
@@ -255,7 +255,7 @@ TEST(Breakpoint_I32And_disable) {
std::unique_ptr<int[]> offsets =
Find(code, sizeof(code), kNumBreakpoints, kExprI32And);
- WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreter);
r.Build(code, code + arraysize(code));
@@ -293,14 +293,14 @@ TEST(Breakpoint_I32And_disable) {
TEST(GrowMemory) {
{
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
r.builder().AddMemory(WasmModule::kPageSize);
r.builder().SetMaxMemPages(10);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
CHECK_EQ(1, r.Call(1));
}
{
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
r.builder().AddMemory(WasmModule::kPageSize);
r.builder().SetMaxMemPages(10);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
@@ -311,7 +311,7 @@ TEST(GrowMemory) {
TEST(GrowMemoryPreservesData) {
int32_t index = 16;
int32_t value = 2335;
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
r.builder().AddMemory(WasmModule::kPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
WASM_I32V(value)),
@@ -322,7 +322,7 @@ TEST(GrowMemoryPreservesData) {
TEST(GrowMemoryInvalidSize) {
// Grow memory by an invalid amount without initial memory.
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
r.builder().AddMemory(WasmModule::kPageSize);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
CHECK_EQ(-1, r.Call(1048575));
@@ -330,40 +330,40 @@ TEST(GrowMemoryInvalidSize) {
TEST(TestPossibleNondeterminism) {
{
- WasmRunner<int32_t, float> r(kExecuteInterpreted);
+ WasmRunner<int32_t, float> r(kExecuteInterpreter);
BUILD(r, WASM_I32_REINTERPRET_F32(WASM_GET_LOCAL(0)));
r.Call(1048575.5f);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<float>::quiet_NaN());
- CHECK(r.possible_nondeterminism());
+ CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<int64_t, double> r(kExecuteInterpreted);
+ WasmRunner<int64_t, double> r(kExecuteInterpreter);
BUILD(r, WASM_I64_REINTERPRET_F64(WASM_GET_LOCAL(0)));
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
- CHECK(r.possible_nondeterminism());
+ CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<float, float> r(kExecuteInterpreted);
+ WasmRunner<float, float> r(kExecuteInterpreter);
BUILD(r, WASM_F32_COPYSIGN(WASM_F32(42.0f), WASM_GET_LOCAL(0)));
r.Call(16.0f);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
- CHECK(r.possible_nondeterminism());
+ CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<double, double> r(kExecuteInterpreted);
+ WasmRunner<double, double> r(kExecuteInterpreter);
BUILD(r, WASM_F64_COPYSIGN(WASM_F64(42.0), WASM_GET_LOCAL(0)));
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
- CHECK(r.possible_nondeterminism());
+ CHECK(!r.possible_nondeterminism());
}
{
int32_t index = 16;
- WasmRunner<int32_t, float> r(kExecuteInterpreted);
+ WasmRunner<int32_t, float> r(kExecuteInterpreter);
r.builder().AddMemory(WasmModule::kPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Float32(), WASM_I32V(index),
WASM_GET_LOCAL(0)),
@@ -371,11 +371,11 @@ TEST(TestPossibleNondeterminism) {
r.Call(1345.3456f);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<float>::quiet_NaN());
- CHECK(r.possible_nondeterminism());
+ CHECK(!r.possible_nondeterminism());
}
{
int32_t index = 16;
- WasmRunner<int32_t, double> r(kExecuteInterpreted);
+ WasmRunner<int32_t, double> r(kExecuteInterpreter);
r.builder().AddMemory(WasmModule::kPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Float64(), WASM_I32V(index),
WASM_GET_LOCAL(0)),
@@ -383,12 +383,60 @@ TEST(TestPossibleNondeterminism) {
r.Call(1345.3456);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
+ CHECK(!r.possible_nondeterminism());
+ }
+ {
+ WasmRunner<float, float> r(kExecuteInterpreter);
+ BUILD(r, WASM_F32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ r.Call(1048575.5f);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(std::numeric_limits<float>::quiet_NaN());
+ CHECK(r.possible_nondeterminism());
+ }
+ {
+ WasmRunner<double, double> r(kExecuteInterpreter);
+ BUILD(r, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ r.Call(16.0);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(std::numeric_limits<double>::quiet_NaN());
+ CHECK(r.possible_nondeterminism());
+ }
+ {
+ WasmRunner<int32_t, float> r(kExecuteInterpreter);
+ BUILD(r, WASM_F32_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ r.Call(16.0);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(std::numeric_limits<float>::quiet_NaN());
+ CHECK(!r.possible_nondeterminism());
+ }
+ {
+ WasmRunner<int32_t, double> r(kExecuteInterpreter);
+ BUILD(r, WASM_F64_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ r.Call(16.0);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(std::numeric_limits<double>::quiet_NaN());
+ CHECK(!r.possible_nondeterminism());
+ }
+ {
+ WasmRunner<float, float> r(kExecuteInterpreter);
+ BUILD(r, WASM_F32_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ r.Call(1048575.5f);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(std::numeric_limits<float>::quiet_NaN());
+ CHECK(r.possible_nondeterminism());
+ }
+ {
+ WasmRunner<double, double> r(kExecuteInterpreter);
+ BUILD(r, WASM_F64_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ r.Call(16.0);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(std::numeric_limits<double>::quiet_NaN());
CHECK(r.possible_nondeterminism());
}
}
TEST(WasmInterpreterActivations) {
- WasmRunner<void> r(kExecuteInterpreted);
+ WasmRunner<void> r(kExecuteInterpreter);
Isolate* isolate = r.main_isolate();
BUILD(r, WASM_NOP);
@@ -418,7 +466,7 @@ TEST(WasmInterpreterActivations) {
}
TEST(InterpreterLoadWithoutMemory) {
- WasmRunner<int32_t, int32_t> r(kExecuteInterpreted);
+ WasmRunner<int32_t, int32_t> r(kExecuteInterpreter);
r.builder().AddMemory(0);
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
CHECK_TRAP32(r.Call(0));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 72ed1f03f5..b4e0298a72 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -256,7 +256,7 @@ class WasmSerializationTest {
uint32_t* slot = reinterpret_cast<uint32_t*>(
const_cast<uint8_t*>(serialized_bytes_.first) +
SerializedCodeData::kPayloadLengthOffset);
- *slot = 0xfefefefeu;
+ *slot = FLAG_wasm_jit_to_native ? 0u : 0xfefefefeu;
}
v8::MaybeLocal<v8::WasmCompiledModule> Deserialize() {
@@ -281,7 +281,7 @@ class WasmSerializationTest {
wire_bytes().first, wire_bytes().second),
0);
}
- Handle<JSObject> instance =
+ Handle<WasmInstanceObject> instance =
SyncInstantiate(current_isolate(), &thrower, module_object,
Handle<JSReceiver>::null(),
MaybeHandle<JSArrayBuffer>())
@@ -321,10 +321,6 @@ class WasmSerializationTest {
WasmSerializationTest::BuildWireBytes(zone(), &buffer);
Isolate* serialization_isolate = CcTest::InitIsolateOnce();
- // Isolates do not have serialization enabled by default. We must enable it
- // here or else the assembler will not mark external references so that the
- // serializer can handle them correctly.
- serialization_isolate->set_serializer_enabled_for_test(true);
ErrorThrower thrower(serialization_isolate, "");
uint8_t* bytes = nullptr;
size_t bytes_size = 0;
@@ -693,7 +689,8 @@ TEST(TestInterruptLoop) {
{}, {})
.ToHandleChecked();
- Handle<JSArrayBuffer> memory(instance->memory_buffer(), isolate);
+ Handle<JSArrayBuffer> memory(instance->memory_object()->array_buffer(),
+ isolate);
int32_t* memory_array = reinterpret_cast<int32_t*>(memory->backing_store());
InterruptThread thread(isolate, memory_array);
@@ -930,18 +927,18 @@ TEST(InitDataAtTheUpperLimit) {
ErrorThrower thrower(isolate, "Run_WasmModule_InitDataAtTheUpperLimit");
const byte data[] = {
- WASM_MODULE_HEADER, // --
- kMemorySectionCode, // --
- U32V_1(4), // section size
- ENTRY_COUNT(1), // --
- kResizableMaximumFlag, // --
- 1, // initial size
- 2, // maximum size
- kDataSectionCode, // --
- U32V_1(9), // section size
- ENTRY_COUNT(1), // --
- 0, // linear memory index
- WASM_I32V_3(0xffff), // destination offset
+ WASM_MODULE_HEADER, // --
+ kMemorySectionCode, // --
+ U32V_1(4), // section size
+ ENTRY_COUNT(1), // --
+ kHasMaximumFlag, // --
+ 1, // initial size
+ 2, // maximum size
+ kDataSectionCode, // --
+ U32V_1(9), // section size
+ ENTRY_COUNT(1), // --
+ 0, // linear memory index
+ WASM_I32V_3(0xffff), // destination offset
kExprEnd,
U32V_1(1), // source size
'c' // data bytes
@@ -967,18 +964,18 @@ TEST(EmptyMemoryNonEmptyDataSegment) {
ErrorThrower thrower(isolate, "Run_WasmModule_InitDataAtTheUpperLimit");
const byte data[] = {
- WASM_MODULE_HEADER, // --
- kMemorySectionCode, // --
- U32V_1(4), // section size
- ENTRY_COUNT(1), // --
- kResizableMaximumFlag, // --
- 0, // initial size
- 0, // maximum size
- kDataSectionCode, // --
- U32V_1(7), // section size
- ENTRY_COUNT(1), // --
- 0, // linear memory index
- WASM_I32V_1(8), // destination offset
+ WASM_MODULE_HEADER, // --
+ kMemorySectionCode, // --
+ U32V_1(4), // section size
+ ENTRY_COUNT(1), // --
+ kHasMaximumFlag, // --
+ 0, // initial size
+ 0, // maximum size
+ kDataSectionCode, // --
+ U32V_1(7), // section size
+ ENTRY_COUNT(1), // --
+ 0, // linear memory index
+ WASM_I32V_1(8), // destination offset
kExprEnd,
U32V_1(1), // source size
'c' // data bytes
@@ -1002,18 +999,18 @@ TEST(EmptyMemoryEmptyDataSegment) {
ErrorThrower thrower(isolate, "Run_WasmModule_InitDataAtTheUpperLimit");
const byte data[] = {
- WASM_MODULE_HEADER, // --
- kMemorySectionCode, // --
- U32V_1(4), // section size
- ENTRY_COUNT(1), // --
- kResizableMaximumFlag, // --
- 0, // initial size
- 0, // maximum size
- kDataSectionCode, // --
- U32V_1(6), // section size
- ENTRY_COUNT(1), // --
- 0, // linear memory index
- WASM_I32V_1(0), // destination offset
+ WASM_MODULE_HEADER, // --
+ kMemorySectionCode, // --
+ U32V_1(4), // section size
+ ENTRY_COUNT(1), // --
+ kHasMaximumFlag, // --
+ 0, // initial size
+ 0, // maximum size
+ kDataSectionCode, // --
+ U32V_1(6), // section size
+ ENTRY_COUNT(1), // --
+ 0, // linear memory index
+ WASM_I32V_1(0), // destination offset
kExprEnd,
U32V_1(0), // source size
};
@@ -1040,7 +1037,7 @@ TEST(MemoryWithOOBEmptyDataSegment) {
kMemorySectionCode, // --
U32V_1(4), // section size
ENTRY_COUNT(1), // --
- kResizableMaximumFlag, // --
+ kHasMaximumFlag, // --
1, // initial size
1, // maximum size
kDataSectionCode, // --
@@ -1087,7 +1084,8 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
ModuleWireBytes(buffer.begin(), buffer.end()),
{}, {})
.ToHandleChecked();
- Handle<JSArrayBuffer> memory(instance->memory_buffer(), isolate);
+ Handle<JSArrayBuffer> memory(instance->memory_object()->array_buffer(),
+ isolate);
Handle<WasmMemoryObject> mem_obj(instance->memory_object(), isolate);
void* const old_allocation_base = memory->allocation_base();
size_t const old_allocation_length = memory->allocation_length();
@@ -1106,7 +1104,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
wasm::DetachMemoryBuffer(isolate, memory, free_memory);
CHECK_EQ(16, result);
memory = handle(mem_obj->array_buffer());
- instance->set_memory_buffer(*memory);
+ instance->memory_object()->set_array_buffer(*memory);
// Externalize should make no difference without the JS API as in this case
// the buffer is not detached.
v8::Utils::ToLocal(memory)->Externalize();
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
index 030b7a4288..482ab9e905 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
@@ -17,54 +17,51 @@ namespace internal {
namespace wasm {
namespace test_run_wasm_relocation {
-#define FOREACH_TYPE(TEST_BODY) \
- TEST_BODY(int32_t, WASM_I32_ADD) \
- TEST_BODY(int64_t, WASM_I64_ADD) \
- TEST_BODY(float, WASM_F32_ADD) \
- TEST_BODY(double, WASM_F64_ADD)
+WASM_COMPILED_EXEC_TEST(RunPatchWasmContext) {
+ WasmRunner<uint32_t, uint32_t> r(execution_mode);
+ Isolate* isolate = CcTest::i_isolate();
-#define LOAD_SET_GLOBAL_TEST_BODY(C_TYPE, ADD) \
- WASM_EXEC_TEST(WasmRelocateGlobal_##C_TYPE) { \
- WasmRunner<C_TYPE, C_TYPE> r(execution_mode); \
- Isolate* isolate = CcTest::i_isolate(); \
- \
- r.builder().AddGlobal<C_TYPE>(); \
- r.builder().AddGlobal<C_TYPE>(); \
- \
- /* global = global + p0 */ \
- BUILD(r, WASM_SET_GLOBAL(1, ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0))), \
- WASM_GET_GLOBAL(0)); \
- CHECK_EQ(1, r.builder().CodeTableLength()); \
- \
- int filter = 1 << RelocInfo::WASM_GLOBAL_REFERENCE; \
- \
- Handle<Code> code = r.builder().GetFunctionCode(0); \
- \
- Address old_start = r.builder().globals_start(); \
- Address new_start = old_start + 1; \
- \
- Address old_addresses[4]; \
- uint32_t address_index = 0U; \
- for (RelocIterator it(*code, filter); !it.done(); it.next()) { \
- old_addresses[address_index] = it.rinfo()->wasm_global_reference(); \
- it.rinfo()->update_wasm_global_reference(isolate, old_start, new_start); \
- ++address_index; \
- } \
- CHECK_LE(address_index, 4U); \
- \
- address_index = 0U; \
- for (RelocIterator it(*code, filter); !it.done(); it.next()) { \
- CHECK_EQ(old_addresses[address_index] + 1, \
- it.rinfo()->wasm_global_reference()); \
- ++address_index; \
- } \
- CHECK_LE(address_index, 4U); \
- }
+ r.builder().AddGlobal<uint32_t>();
+ r.builder().AddGlobal<uint32_t>();
+
+ BUILD(r, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_GET_GLOBAL(0));
+ CHECK_EQ(1, r.builder().CodeTableLength());
+
+ // Run with the old global data.
+ CHECK_EQ(113, r.Call(113));
+
+ WasmContext* old_wasm_context =
+ r.builder().instance_object()->wasm_context()->get();
+ Address old_wasm_context_address =
+ reinterpret_cast<Address>(old_wasm_context);
-FOREACH_TYPE(LOAD_SET_GLOBAL_TEST_BODY)
+ uint32_t new_global_data[3] = {0, 0, 0};
+ WasmContext new_wasm_context;
+ new_wasm_context.globals_start = reinterpret_cast<byte*>(new_global_data);
+
+ {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+
+ // Patch in a new WasmContext that points to the new global data.
+ int filter = 1 << RelocInfo::WASM_CONTEXT_REFERENCE;
+ bool patched = false;
+ Handle<Code> code = r.GetWrapperCode();
+ for (RelocIterator it(*code, filter); !it.done(); it.next()) {
+ CHECK_EQ(old_wasm_context_address, it.rinfo()->wasm_context_reference());
+ it.rinfo()->set_wasm_context_reference(
+ isolate, reinterpret_cast<Address>(&new_wasm_context));
+ patched = true;
+ }
+ CHECK(patched);
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ }
-#undef FOREACH_TYPE
-#undef LOAD_SET_GLOBAL_TEST_BODY
+ // Run with the new global data.
+ CHECK_EQ(115, r.Call(115));
+ CHECK_EQ(115, new_global_data[0]);
+}
} // namespace test_run_wasm_relocation
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 5c5d74e747..93895d7f3c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -35,7 +35,7 @@ typedef int8_t (*Int8ShiftOp)(int8_t, int);
void RunWasm_##name##_Impl(WasmExecutionMode execution_mode); \
TEST(RunWasm_##name##_compiled) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kExecuteCompiled); \
+ RunWasm_##name##_Impl(kExecuteTurbofan); \
} \
TEST(RunWasm_##name##_simd_lowered) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
@@ -47,7 +47,7 @@ typedef int8_t (*Int8ShiftOp)(int8_t, int);
void RunWasm_##name##_Impl(WasmExecutionMode execution_mode); \
TEST(RunWasm_##name##_compiled) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kExecuteCompiled); \
+ RunWasm_##name##_Impl(kExecuteTurbofan); \
} \
void RunWasm_##name##_Impl(WasmExecutionMode execution_mode)
@@ -1113,8 +1113,6 @@ WASM_SIMD_COMPILED_TEST(I16x8ConvertI8x16) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
void RunI16x8UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
Int16UnOp expected_op) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
@@ -1131,8 +1129,6 @@ void RunI16x8UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
WASM_SIMD_TEST(I16x8Neg) {
RunI16x8UnOpTest(execution_mode, kExprI16x8Neg, Negate);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
@@ -1166,8 +1162,6 @@ WASM_SIMD_COMPILED_TEST(I16x8ConvertI32x4) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void RunI16x8BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
Int16BinOp expected_op) {
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
@@ -1334,8 +1328,6 @@ void RunI8x16UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
WASM_SIMD_TEST(I8x16Neg) {
RunI8x16UnOpTest(execution_mode, kExprI8x16Neg, Negate);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
@@ -1369,8 +1361,6 @@ WASM_SIMD_COMPILED_TEST(I8x16ConvertI16x8) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void RunI8x16BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
Int8BinOp expected_op) {
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
@@ -1490,8 +1480,6 @@ WASM_SIMD_TEST(I8x16LtU) {
WASM_SIMD_TEST(I8x16LeU) {
RunI8x16CompareOpTest(execution_mode, kExprI8x16LeU, UnsignedLessEqual);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
@@ -2187,22 +2175,27 @@ const T& GetScalar(T* v, int lane) {
WASM_SIMD_TEST(SimdI32x4GetGlobal) {
WasmRunner<int32_t, int32_t> r(execution_mode);
+ // Pad the globals with a few unused slots to get a non-zero offset.
+ r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
+ r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
+ r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
+ r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
SetVectorByLanes(global, {{0, 1, 2, 3}});
r.AllocateLocal(kWasmI32);
BUILD(
r, WASM_SET_LOCAL(1, WASM_I32V(1)),
WASM_IF(WASM_I32_NE(WASM_I32V(0),
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_GLOBAL(0))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_GLOBAL(4))),
WASM_SET_LOCAL(1, WASM_I32V(0))),
WASM_IF(WASM_I32_NE(WASM_I32V(1),
- WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_GLOBAL(0))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_GLOBAL(4))),
WASM_SET_LOCAL(1, WASM_I32V(0))),
WASM_IF(WASM_I32_NE(WASM_I32V(2),
- WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GET_GLOBAL(0))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GET_GLOBAL(4))),
WASM_SET_LOCAL(1, WASM_I32V(0))),
WASM_IF(WASM_I32_NE(WASM_I32V(3),
- WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GET_GLOBAL(0))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GET_GLOBAL(4))),
WASM_SET_LOCAL(1, WASM_I32V(0))),
WASM_GET_LOCAL(1));
CHECK_EQ(1, r.Call(0));
@@ -2210,13 +2203,18 @@ WASM_SIMD_TEST(SimdI32x4GetGlobal) {
WASM_SIMD_TEST(SimdI32x4SetGlobal) {
WasmRunner<int32_t, int32_t> r(execution_mode);
+ // Pad the globals with a few unused slots to get a non-zero offset.
+ r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
+ r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
+ r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
+ r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
- BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
- WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(0),
+ BUILD(r, WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
+ WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(4),
WASM_I32V(34))),
- WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_GLOBAL(0),
+ WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_GLOBAL(4),
WASM_I32V(45))),
- WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_REPLACE_LANE(3, WASM_GET_GLOBAL(0),
+ WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(3, WASM_GET_GLOBAL(4),
WASM_I32V(56))),
WASM_I32V(1));
CHECK_EQ(1, r.Call(0));
@@ -2277,14 +2275,15 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(execution_mode);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(4);
-
- BUILD(r, WASM_SIMD_STORE_MEM(WASM_ZERO, WASM_SIMD_LOAD_MEM(WASM_ZERO)),
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
+ // Load memory, store it, then reload it and extract the first lane. Use a
+ // non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
+ BUILD(r, WASM_SIMD_STORE_MEM(WASM_I32V(4), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(4))));
FOR_INT32_INPUTS(i) {
int32_t expected = *i;
- r.builder().WriteMemory(&memory[0], expected);
+ r.builder().WriteMemory(&memory[1], expected);
CHECK_EQ(expected, r.Call());
}
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 3b27c78f60..f928904e9c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -96,7 +96,10 @@ WASM_EXEC_TEST(Int32Add_P_fallthru) {
static void RunInt32AddTest(WasmExecutionMode execution_mode, const byte* code,
size_t size) {
+ TestSignatures sigs;
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ r.builder().AddSignature(sigs.ii_v());
+ r.builder().AddSignature(sigs.iii_v());
r.Build(code, code + size);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -117,7 +120,7 @@ WASM_EXEC_TEST(Int32Add_P2) {
WASM_EXEC_TEST(Int32Add_block1) {
EXPERIMENTAL_FLAG_SCOPE(mv);
static const byte code[] = {
- WASM_BLOCK_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_BLOCK_X(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
kExprI32Add};
RunInt32AddTest(execution_mode, code, sizeof(code));
}
@@ -125,8 +128,7 @@ WASM_EXEC_TEST(Int32Add_block1) {
WASM_EXEC_TEST(Int32Add_block2) {
EXPERIMENTAL_FLAG_SCOPE(mv);
static const byte code[] = {
- WASM_BLOCK_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- kExprBr, DEPTH_0),
+ WASM_BLOCK_X(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), kExprBr, DEPTH_0),
kExprI32Add};
RunInt32AddTest(execution_mode, code, sizeof(code));
}
@@ -134,9 +136,9 @@ WASM_EXEC_TEST(Int32Add_block2) {
WASM_EXEC_TEST(Int32Add_multi_if) {
EXPERIMENTAL_FLAG_SCOPE(mv);
static const byte code[] = {
- WASM_IF_ELSE_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ WASM_IF_ELSE_X(0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
kExprI32Add};
RunInt32AddTest(execution_mode, code, sizeof(code));
}
@@ -156,54 +158,68 @@ WASM_EXEC_TEST(Float64Add) {
CHECK_EQ(57, r.Call());
}
-void TestInt32Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
- int32_t expected, int32_t a, int32_t b) {
- {
- WasmRunner<int32_t> r(execution_mode);
- // K op K
- BUILD(r, WASM_BINOP(opcode, WASM_I32V(a), WASM_I32V(b)));
- CHECK_EQ(expected, r.Call());
+// clang-format messes up the FOR_INT32_INPUTS macros.
+// clang-format off
+template<typename ctype>
+static void TestInt32Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+ ctype(*expected)(ctype, ctype)) {
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ WasmRunner<ctype> r(execution_mode);
+ // Apply {opcode} on two constants.
+ BUILD(r, WASM_BINOP(opcode, WASM_I32V(*i), WASM_I32V(*j)));
+ CHECK_EQ(expected(*i, *j), r.Call());
+ }
}
{
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
- // a op b
+ WasmRunner<ctype, ctype, ctype> r(execution_mode);
+ // Apply {opcode} on two parameters.
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(expected, r.Call(a, b));
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ CHECK_EQ(expected(*i, *j), r.Call(*i, *j));
+ }
+ }
}
}
-
-WASM_EXEC_TEST(Int32Binops) {
- TestInt32Binop(execution_mode, kExprI32Add, 88888888, 33333333, 55555555);
- TestInt32Binop(execution_mode, kExprI32Sub, -1111111, 7777777, 8888888);
- TestInt32Binop(execution_mode, kExprI32Mul, 65130756, 88734, 734);
- TestInt32Binop(execution_mode, kExprI32DivS, -66, -4777344, 72384);
- TestInt32Binop(execution_mode, kExprI32DivU, 805306368, 0xF0000000, 5);
- TestInt32Binop(execution_mode, kExprI32RemS, -3, -3003, 1000);
- TestInt32Binop(execution_mode, kExprI32RemU, 4, 4004, 1000);
- TestInt32Binop(execution_mode, kExprI32And, 0xEE, 0xFFEE, 0xFF0000FF);
- TestInt32Binop(execution_mode, kExprI32Ior, 0xF0FF00FF, 0xF0F000EE,
- 0x000F0011);
- TestInt32Binop(execution_mode, kExprI32Xor, 0xABCDEF01, 0xABCDEFFF, 0xFE);
- TestInt32Binop(execution_mode, kExprI32Shl, 0xA0000000, 0xA, 28);
- TestInt32Binop(execution_mode, kExprI32ShrU, 0x07000010, 0x70000100, 4);
- TestInt32Binop(execution_mode, kExprI32ShrS, 0xFF000000, 0x80000000, 7);
- TestInt32Binop(execution_mode, kExprI32Ror, 0x01000000, 0x80000000, 7);
- TestInt32Binop(execution_mode, kExprI32Ror, 0x01000000, 0x80000000, 39);
- TestInt32Binop(execution_mode, kExprI32Rol, 0x00000040, 0x80000000, 7);
- TestInt32Binop(execution_mode, kExprI32Rol, 0x00000040, 0x80000000, 39);
- TestInt32Binop(execution_mode, kExprI32Eq, 1, -99, -99);
- TestInt32Binop(execution_mode, kExprI32Ne, 0, -97, -97);
-
- TestInt32Binop(execution_mode, kExprI32LtS, 1, -4, 4);
- TestInt32Binop(execution_mode, kExprI32LeS, 0, -2, -3);
- TestInt32Binop(execution_mode, kExprI32LtU, 1, 0, -6);
- TestInt32Binop(execution_mode, kExprI32LeU, 1, 98978, 0xF0000000);
-
- TestInt32Binop(execution_mode, kExprI32GtS, 1, 4, -4);
- TestInt32Binop(execution_mode, kExprI32GeS, 0, -3, -2);
- TestInt32Binop(execution_mode, kExprI32GtU, 1, -6, 0);
- TestInt32Binop(execution_mode, kExprI32GeU, 1, 0xF0000000, 98978);
-}
+// clang-format on
+
+#define WASM_I32_BINOP_TEST(expr, ctype, expected) \
+ WASM_EXEC_TEST(I32Binop_##expr) { \
+ TestInt32Binop<ctype>(execution_mode, kExprI32##expr, \
+ [](ctype a, ctype b) -> ctype { return expected; }); \
+ }
+
+WASM_I32_BINOP_TEST(Add, int32_t, a + b)
+WASM_I32_BINOP_TEST(Sub, int32_t, a - b)
+WASM_I32_BINOP_TEST(Mul, int32_t, a* b)
+WASM_I32_BINOP_TEST(DivS, int32_t,
+ (a == kMinInt && b == -1) || b == 0
+ ? static_cast<int32_t>(0xdeadbeef)
+ : a / b)
+WASM_I32_BINOP_TEST(DivU, uint32_t, b == 0 ? 0xdeadbeef : a / b)
+WASM_I32_BINOP_TEST(RemS, int32_t, b == 0 ? 0xdeadbeef : b == -1 ? 0 : a % b)
+WASM_I32_BINOP_TEST(RemU, uint32_t, b == 0 ? 0xdeadbeef : a % b)
+WASM_I32_BINOP_TEST(And, int32_t, a& b)
+WASM_I32_BINOP_TEST(Ior, int32_t, a | b)
+WASM_I32_BINOP_TEST(Xor, int32_t, a ^ b)
+WASM_I32_BINOP_TEST(Shl, int32_t, a << (b & 0x1f))
+WASM_I32_BINOP_TEST(ShrU, uint32_t, a >> (b & 0x1f))
+WASM_I32_BINOP_TEST(ShrS, int32_t, a >> (b & 0x1f))
+WASM_I32_BINOP_TEST(Ror, uint32_t, (a >> (b & 0x1f)) | (a << (32 - (b & 0x1f))))
+WASM_I32_BINOP_TEST(Rol, uint32_t, (a << (b & 0x1f)) | (a >> (32 - (b & 0x1f))))
+WASM_I32_BINOP_TEST(Eq, int32_t, a == b)
+WASM_I32_BINOP_TEST(Ne, int32_t, a != b)
+WASM_I32_BINOP_TEST(LtS, int32_t, a < b)
+WASM_I32_BINOP_TEST(LeS, int32_t, a <= b)
+WASM_I32_BINOP_TEST(LtU, uint32_t, a < b)
+WASM_I32_BINOP_TEST(LeU, uint32_t, a <= b)
+WASM_I32_BINOP_TEST(GtS, int32_t, a > b)
+WASM_I32_BINOP_TEST(GeS, int32_t, a >= b)
+WASM_I32_BINOP_TEST(GtU, uint32_t, a > b)
+WASM_I32_BINOP_TEST(GeU, uint32_t, a >= b)
+
+#undef WASM_I32_BINOP_TEST
void TestInt32Unop(WasmExecutionMode execution_mode, WasmOpcode opcode,
int32_t expected, int32_t a) {
@@ -309,41 +325,6 @@ WASM_EXEC_TEST(I32Eqz) {
TestInt32Unop(execution_mode, kExprI32Eqz, 1, 0);
}
-WASM_EXEC_TEST(I32Shl) {
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
- BUILD(r, WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
-
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i) << (*j & 0x1f);
- CHECK_EQ(expected, r.Call(*i, *j));
- }
- }
-}
-
-WASM_EXEC_TEST(I32Shr) {
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
- BUILD(r, WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
-
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i) >> (*j & 0x1f);
- CHECK_EQ(expected, r.Call(*i, *j));
- }
- }
-}
-
-WASM_EXEC_TEST(I32Sar) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
- BUILD(r, WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
-
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t expected = (*i) >> (*j & 0x1f);
- CHECK_EQ(expected, r.Call(*i, *j));
- }
- }
-}
WASM_EXEC_TEST(Int32DivS_trap) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
@@ -1865,30 +1846,30 @@ WASM_EXEC_TEST(Unreachable0b) {
CHECK_EQ(7, r.Call(1));
}
-TEST(Build_Wasm_Unreachable1) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable1) {
+ WasmRunner<int32_t, int32_t> r(execution_mode);
BUILD(r, WASM_UNREACHABLE);
}
-TEST(Build_Wasm_Unreachable2) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable2) {
+ WasmRunner<int32_t, int32_t> r(execution_mode);
BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE);
}
-TEST(Build_Wasm_Unreachable3) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable3) {
+ WasmRunner<int32_t, int32_t> r(execution_mode);
BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE, WASM_UNREACHABLE);
}
-TEST(Build_Wasm_UnreachableIf1) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(Build_Wasm_UnreachableIf1) {
+ WasmRunner<int32_t, int32_t> r(execution_mode);
BUILD(r, WASM_UNREACHABLE,
WASM_IF(WASM_GET_LOCAL(0), WASM_SEQ(WASM_GET_LOCAL(0), WASM_DROP)),
WASM_ZERO);
}
-TEST(Build_Wasm_UnreachableIf2) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(Build_Wasm_UnreachableIf2) {
+ WasmRunner<int32_t, int32_t> r(execution_mode);
BUILD(r, WASM_UNREACHABLE,
WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_UNREACHABLE));
}
@@ -2929,12 +2910,12 @@ WASM_EXEC_TEST(F32CopySign) {
}
}
-static void CompileCallIndirectMany(ValueType param) {
+static void CompileCallIndirectMany(WasmExecutionMode mode, ValueType param) {
// Make sure we don't run out of registers when compiling indirect calls
// with many many parameters.
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; ++num_params) {
- WasmRunner<void> r(kExecuteCompiled);
+ WasmRunner<void> r(mode);
FunctionSig* sig = sigs.many(r.zone(), kWasmStmt, param, num_params);
r.builder().AddSignature(sig);
@@ -2954,11 +2935,17 @@ static void CompileCallIndirectMany(ValueType param) {
}
}
-TEST(Compile_Wasm_CallIndirect_Many_i32) { CompileCallIndirectMany(kWasmI32); }
+WASM_COMPILED_EXEC_TEST(Compile_Wasm_CallIndirect_Many_i32) {
+ CompileCallIndirectMany(execution_mode, kWasmI32);
+}
-TEST(Compile_Wasm_CallIndirect_Many_f32) { CompileCallIndirectMany(kWasmF32); }
+WASM_COMPILED_EXEC_TEST(Compile_Wasm_CallIndirect_Many_f32) {
+ CompileCallIndirectMany(execution_mode, kWasmF32);
+}
-TEST(Compile_Wasm_CallIndirect_Many_f64) { CompileCallIndirectMany(kWasmF64); }
+WASM_COMPILED_EXEC_TEST(Compile_Wasm_CallIndirect_Many_f64) {
+ CompileCallIndirectMany(execution_mode, kWasmF64);
+}
WASM_EXEC_TEST(Int32RemS_dead) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 0e541efbbd..a2c352bb4d 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -24,44 +24,66 @@ namespace wasm {
class MockPlatform final : public TestPlatform {
public:
- MockPlatform() : old_platform_(i::V8::GetCurrentPlatform()) {
+ MockPlatform() : task_runner_(std::make_shared<MockTaskRunner>()) {
// Now that it's completely constructed, make this the current platform.
i::V8::SetPlatformForTesting(this);
}
- virtual ~MockPlatform() {
- // Delete all remaining tasks in the queue.
- while (!tasks_.empty()) {
- Task* task = tasks_.back();
- tasks_.pop_back();
- delete task;
- }
- i::V8::SetPlatformForTesting(old_platform_);
+
+ std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate* isolate) override {
+ return task_runner_;
+ }
+
+ std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
+ v8::Isolate* isolate) override {
+ return task_runner_;
}
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
- tasks_.push_back(task);
+ task_runner_->PostTask(std::unique_ptr<Task>(task));
}
void CallOnBackgroundThread(v8::Task* task,
ExpectedRuntime expected_runtime) override {
- tasks_.push_back(task);
+ task_runner_->PostTask(std::unique_ptr<Task>(task));
}
bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
- void ExecuteTasks() {
- while (!tasks_.empty()) {
- Task* task = tasks_.back();
- tasks_.pop_back();
- task->Run();
- delete task;
- }
- }
+ void ExecuteTasks() { task_runner_->ExecuteTasks(); }
private:
- // We do not execute tasks concurrently, so we only need one list of tasks.
- std::vector<Task*> tasks_;
- v8::Platform* old_platform_;
+ class MockTaskRunner final : public TaskRunner {
+ public:
+ void PostTask(std::unique_ptr<v8::Task> task) override {
+ tasks_.push_back(std::move(task));
+ }
+
+ void PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) override {
+ UNREACHABLE();
+ };
+
+ void PostIdleTask(std::unique_ptr<IdleTask> task) override {
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled() override { return false; };
+
+ void ExecuteTasks() {
+ while (!tasks_.empty()) {
+ std::unique_ptr<Task> task = std::move(tasks_.back());
+ tasks_.pop_back();
+ task->Run();
+ }
+ }
+
+ private:
+ // We do not execute tasks concurrently, so we only need one list of tasks.
+ std::vector<std::unique_ptr<v8::Task>> tasks_;
+ };
+
+ std::shared_ptr<MockTaskRunner> task_runner_;
};
namespace {
@@ -813,6 +835,64 @@ STREAM_TEST(TestAbortAfterCompilationError2) {
tester.RunCompilerTasks();
}
+STREAM_TEST(TestOnlyModuleHeader) {
+ StreamTester tester;
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+STREAM_TEST(TestModuleWithZeroFunctions) {
+ StreamTester tester;
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1), // section size
+ U32V_1(0), // type count
+ kFunctionSectionCode, // section code
+ U32V_1(1), // section size
+ U32V_1(0), // functions count
+ kCodeSectionCode, // section code
+ U32V_1(1), // section size
+ U32V_1(0), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+// Test that all bytes arrive before doing any compilation. FinishStream is
+// called immediately.
+STREAM_TEST(TestModuleWithImportedFunction) {
+ StreamTester tester;
+ ZoneBuffer buffer(tester.zone());
+ TestSignatures sigs;
+ WasmModuleBuilder builder(tester.zone());
+ builder.AddImport(ArrayVector("Test"), sigs.i_iii());
+ {
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ uint8_t code[] = {kExprGetLocal, 0, kExprEnd};
+ f->EmitCode(code, arraysize(code));
+ }
+ builder.WriteTo(buffer);
+
+ tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
+ tester.FinishStream();
+
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseFulfilled());
+}
#undef STREAM_TEST
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 492ec4670e..22b51bfae5 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -132,9 +132,8 @@ Handle<JSObject> MakeFakeBreakpoint(Isolate* isolate, int position) {
desc.set_value(triggered_fun);
Handle<String> name =
isolate->factory()->InternalizeUtf8String(CStrVector("isTriggered"));
- CHECK(
- JSObject::DefineOwnProperty(isolate, obj, name, &desc, Object::DONT_THROW)
- .FromMaybe(false));
+ CHECK(JSObject::DefineOwnProperty(isolate, obj, name, &desc, kDontThrow)
+ .FromMaybe(false));
return obj;
}
@@ -265,8 +264,8 @@ std::vector<WasmValue> wasmVec(Args... args) {
} // namespace
-TEST(WasmCollectPossibleBreakpoints) {
- WasmRunner<int> runner(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(WasmCollectPossibleBreakpoints) {
+ WasmRunner<int> runner(execution_mode);
BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_ZERO, WASM_ONE));
@@ -290,8 +289,8 @@ TEST(WasmCollectPossibleBreakpoints) {
CheckLocationsFail(instance->compiled_module(), {0, 9}, {1, 0});
}
-TEST(WasmSimpleBreak) {
- WasmRunner<int> runner(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(WasmSimpleBreak) {
+ WasmRunner<int> runner(execution_mode);
Isolate* isolate = runner.main_isolate();
BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(3)));
@@ -311,8 +310,8 @@ TEST(WasmSimpleBreak) {
CHECK_EQ(14, result);
}
-TEST(WasmSimpleStepping) {
- WasmRunner<int> runner(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
+ WasmRunner<int> runner(execution_mode);
BUILD(runner, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(3)));
Isolate* isolate = runner.main_isolate();
@@ -338,8 +337,8 @@ TEST(WasmSimpleStepping) {
CHECK_EQ(14, result);
}
-TEST(WasmStepInAndOut) {
- WasmRunner<int, int> runner(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
+ WasmRunner<int, int> runner(execution_mode);
WasmFunctionCompiler& f2 = runner.NewFunction<void>();
f2.AllocateLocal(ValueType::kWord32);
@@ -378,8 +377,8 @@ TEST(WasmStepInAndOut) {
.is_null());
}
-TEST(WasmGetLocalsAndStack) {
- WasmRunner<void, int> runner(kExecuteCompiled);
+WASM_COMPILED_EXEC_TEST(WasmGetLocalsAndStack) {
+ WasmRunner<void, int> runner(execution_mode);
runner.AllocateLocal(ValueType::kWord64);
runner.AllocateLocal(ValueType::kFloat32);
runner.AllocateLocal(ValueType::kFloat64);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-codegen.cc b/deps/v8/test/cctest/wasm/test-wasm-codegen.cc
new file mode 100644
index 0000000000..189ef46878
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-wasm-codegen.cc
@@ -0,0 +1,101 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests effects of (CSP) "unsafe-eval" and "wasm-eval" callback functions.
+//
+// Note: These tests are in a separate test file because the tests dynamically
+// change the isolate in terms of callbacks allow_code_gen_callback and
+// allow_wasm_code_gen_callback.
+
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-objects.h"
+
+#include "test/cctest/cctest.h"
+#include "test/common/wasm/wasm-module-runner.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+
+// Possible values for callback pointers.
+enum TestValue {
+ kTestUsingNull, // no callback.
+ kTestUsingFalse, // callback returning false.
+ kTestUsingTrue, // callbacl returning true.
+};
+
+constexpr int kNumTestValues = 3;
+
+const char* TestValueName[kNumTestValues] = {"null", "false", "true"};
+
+// Defined to simplify iterating over TestValues;
+const TestValue AllTestValues[kNumTestValues] = {
+ kTestUsingNull, kTestUsingFalse, kTestUsingTrue};
+
+// This matrix holds the results of setting allow_code_gen_callback
+// (first index) and allow_wasm_code_gen_callback (second index) using
+// TestValue's. The value in the matrix is true if compilation is
+// allowed, and false otherwise.
+const bool ExpectedResults[kNumTestValues][kNumTestValues] = {
+ {true, false, true}, {false, false, true}, {true, false, true}};
+
+bool TrueCallback(Local<v8::Context>, Local<v8::String>) { return true; }
+
+bool FalseCallback(Local<v8::Context>, Local<v8::String>) { return false; }
+
+typedef bool (*CallbackFn)(Local<v8::Context>, Local<v8::String>);
+
+// Defines the Callback to use for the corresponding TestValue.
+CallbackFn Callback[kNumTestValues] = {nullptr, FalseCallback, TrueCallback};
+
+void BuildTrivialModule(Zone* zone, ZoneBuffer* buffer) {
+ WasmModuleBuilder* builder = new (zone) WasmModuleBuilder(zone);
+ builder->WriteTo(*buffer);
+}
+
+bool TestModule(Isolate* isolate,
+ v8::WasmCompiledModule::CallerOwnedBuffer wire_bytes) {
+ HandleScope scope(isolate);
+
+ v8::WasmCompiledModule::CallerOwnedBuffer serialized_module(nullptr, 0);
+ MaybeLocal<v8::WasmCompiledModule> module =
+ v8::WasmCompiledModule::DeserializeOrCompile(
+ reinterpret_cast<v8::Isolate*>(isolate), serialized_module,
+ wire_bytes);
+ return !module.IsEmpty();
+}
+
+} // namespace
+
+TEST(PropertiesOfCodegenCallbacks) {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ ZoneBuffer buffer(&zone);
+ BuildTrivialModule(&zone, &buffer);
+ v8::WasmCompiledModule::CallerOwnedBuffer wire_bytes = {buffer.begin(),
+ buffer.size()};
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ for (TestValue codegen : AllTestValues) {
+ for (TestValue wasm_codegen : AllTestValues) {
+ fprintf(stderr, "Test codegen = %s, wasm_codegen = %s\n",
+ TestValueName[codegen], TestValueName[wasm_codegen]);
+ isolate->set_allow_code_gen_callback(Callback[codegen]);
+ isolate->set_allow_wasm_code_gen_callback(Callback[wasm_codegen]);
+ bool found = TestModule(isolate, wire_bytes);
+ bool expected = ExpectedResults[codegen][wasm_codegen];
+ CHECK_EQ(expected, found);
+ CcTest::CollectAllAvailableGarbage();
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index 4f7b66ead4..818433bd57 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -47,6 +47,7 @@ class ArgPassingHelper {
runner.Build(outer_code.data(), outer_code.data() + outer_code.size());
int funcs_to_redict[] = {static_cast<int>(inner_compiler.function_index())};
+ runner.builder().Link();
WasmDebugInfo::RedirectToInterpreter(debug_info_,
ArrayVector(funcs_to_redict));
main_fun_wrapper_ = runner.builder().WrapCode(runner.function_index());
@@ -90,7 +91,7 @@ static ArgPassingHelper<T> GetHelper(
// Pass int32_t, return int32_t.
TEST(TestArgumentPassing_int32) {
- WasmRunner<int32_t, int32_t> runner(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> runner(kExecuteTurbofan);
WasmFunctionCompiler& f2 = runner.NewFunction<int32_t, int32_t>();
auto helper = GetHelper(
@@ -101,13 +102,12 @@ TEST(TestArgumentPassing_int32) {
WASM_GET_LOCAL(0), WASM_CALL_FUNCTION0(f2.function_index())},
[](int32_t a) { return 2 * a + 1; });
- std::vector<int32_t> test_values = compiler::ValueHelper::int32_vector();
- for (int32_t v : test_values) helper.CheckCall(v);
+ FOR_INT32_INPUTS(v) { helper.CheckCall(*v); }
}
// Pass int64_t, return double.
TEST(TestArgumentPassing_double_int64) {
- WasmRunner<double, int32_t, int32_t> runner(kExecuteCompiled);
+ WasmRunner<double, int32_t, int32_t> runner(kExecuteTurbofan);
WasmFunctionCompiler& f2 = runner.NewFunction<double, int64_t>();
auto helper = GetHelper(
@@ -125,17 +125,13 @@ TEST(TestArgumentPassing_double_int64) {
return static_cast<double>(a64 | b64);
});
- std::vector<int32_t> test_values_i32 = compiler::ValueHelper::int32_vector();
- for (int32_t v1 : test_values_i32) {
- for (int32_t v2 : test_values_i32) {
- helper.CheckCall(v1, v2);
- }
+ FOR_INT32_INPUTS(v1) {
+ FOR_INT32_INPUTS(v2) { helper.CheckCall(*v1, *v2); }
}
- std::vector<int64_t> test_values_i64 = compiler::ValueHelper::int64_vector();
- for (int64_t v : test_values_i64) {
- int32_t v1 = static_cast<int32_t>(v);
- int32_t v2 = static_cast<int32_t>(v >> 32);
+ FOR_INT64_INPUTS(v) {
+ int32_t v1 = static_cast<int32_t>(*v);
+ int32_t v2 = static_cast<int32_t>(*v >> 32);
helper.CheckCall(v1, v2);
helper.CheckCall(v2, v1);
}
@@ -144,7 +140,7 @@ TEST(TestArgumentPassing_double_int64) {
// Pass double, return int64_t.
TEST(TestArgumentPassing_int64_double) {
// Outer function still returns double.
- WasmRunner<double, double> runner(kExecuteCompiled);
+ WasmRunner<double, double> runner(kExecuteTurbofan);
WasmFunctionCompiler& f2 = runner.NewFunction<int64_t, double>();
auto helper = GetHelper(
@@ -163,7 +159,7 @@ TEST(TestArgumentPassing_int64_double) {
// Pass float, return double.
TEST(TestArgumentPassing_float_double) {
- WasmRunner<double, float> runner(kExecuteCompiled);
+ WasmRunner<double, float> runner(kExecuteTurbofan);
WasmFunctionCompiler& f2 = runner.NewFunction<double, float>();
auto helper = GetHelper(
@@ -176,13 +172,12 @@ TEST(TestArgumentPassing_float_double) {
WASM_GET_LOCAL(0), WASM_CALL_FUNCTION0(f2.function_index())},
[](float f) { return 2. * static_cast<double>(f) + 1.; });
- std::vector<float> test_values = compiler::ValueHelper::float32_vector();
- for (float f : test_values) helper.CheckCall(f);
+ FOR_FLOAT32_INPUTS(f) { helper.CheckCall(*f); }
}
// Pass two doubles, return double.
TEST(TestArgumentPassing_double_double) {
- WasmRunner<double, double, double> runner(kExecuteCompiled);
+ WasmRunner<double, double, double> runner(kExecuteTurbofan);
WasmFunctionCompiler& f2 = runner.NewFunction<double, double, double>();
auto helper = GetHelper(runner, f2,
@@ -193,11 +188,8 @@ TEST(TestArgumentPassing_double_double) {
WASM_CALL_FUNCTION0(f2.function_index())},
[](double a, double b) { return a + b; });
- std::vector<double> test_values = compiler::ValueHelper::float64_vector();
- for (double d1 : test_values) {
- for (double d2 : test_values) {
- helper.CheckCall(d1, d2);
- }
+ FOR_FLOAT64_INPUTS(d1) {
+ FOR_FLOAT64_INPUTS(d2) { helper.CheckCall(*d1, *d2); }
}
}
@@ -205,7 +197,7 @@ TEST(TestArgumentPassing_double_double) {
TEST(TestArgumentPassing_AllTypes) {
// The second and third argument will be combined to an i64.
WasmRunner<double, int32_t, int32_t, int32_t, float, double> runner(
- kExecuteCompiled);
+ kExecuteTurbofan);
WasmFunctionCompiler& f2 =
runner.NewFunction<double, int32_t, int64_t, float, double>();
@@ -242,10 +234,11 @@ TEST(TestArgumentPassing_AllTypes) {
helper.CheckCall(a, b1, b0, c, d);
};
- std::vector<int32_t> test_values_i32 = compiler::ValueHelper::int32_vector();
- std::vector<int64_t> test_values_i64 = compiler::ValueHelper::int64_vector();
- std::vector<float> test_values_f32 = compiler::ValueHelper::float32_vector();
- std::vector<double> test_values_f64 = compiler::ValueHelper::float64_vector();
+ Vector<const int32_t> test_values_i32 = compiler::ValueHelper::int32_vector();
+ Vector<const int64_t> test_values_i64 = compiler::ValueHelper::int64_vector();
+ Vector<const float> test_values_f32 = compiler::ValueHelper::float32_vector();
+ Vector<const double> test_values_f64 =
+ compiler::ValueHelper::float64_vector();
size_t max_len =
std::max(std::max(test_values_i32.size(), test_values_i64.size()),
std::max(test_values_f32.size(), test_values_f64.size()));
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 2a489b58b6..b775e948b2 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -101,8 +101,8 @@ void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
} // namespace
// Call from JS to wasm to JS and throw an Error from JS.
-TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
- WasmRunner<void> r(kExecuteCompiled);
+WASM_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
+ WasmRunner<void> r(execution_mode);
TestSignatures sigs;
Handle<FixedArray> js_imports_table =
@@ -150,13 +150,13 @@ TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
}
// Trigger a trap in wasm, stack should be JS -> wasm -> wasm.
-TEST(CollectDetailedWasmStack_WasmError) {
+WASM_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
for (int pos_shift = 0; pos_shift < 3; ++pos_shift) {
// Test a position with 1, 2 or 3 bytes needed to represent it.
int unreachable_pos = 1 << (8 * pos_shift);
TestSignatures sigs;
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<int> r(kExecuteCompiled, "main",
+ WasmRunner<int> r(execution_mode, "main",
compiler::kRuntimeExceptionSupport);
std::vector<byte> code(unreachable_pos + 1, kExprNop);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 1b674ab60c..33090cfb2a 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -22,13 +22,14 @@ TestingModuleBuilder::TestingModuleBuilder(
mem_start_(nullptr),
mem_size_(0),
interpreter_(nullptr),
+ execution_mode_(mode),
runtime_exception_support_(exception_support),
lower_simd_(mode == kExecuteSimdLowered) {
WasmJs::Install(isolate_, true);
test_module_.globals_size = kMaxGlobalsSize;
memset(globals_data_, 0, sizeof(globals_data_));
instance_object_ = InitInstanceObject();
- if (mode == kExecuteInterpreted) {
+ if (mode == kExecuteInterpreter) {
interpreter_ = WasmDebugInfo::SetupForTesting(instance_object_);
}
}
@@ -37,7 +38,6 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
CHECK(!test_module_.has_memory);
CHECK_NULL(mem_start_);
CHECK_EQ(0, mem_size_);
- DCHECK(!instance_object_->has_memory_buffer());
DCHECK(!instance_object_->has_memory_object());
test_module_.has_memory = true;
const bool enable_guard_regions =
@@ -47,15 +47,11 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
Handle<JSArrayBuffer> new_buffer =
wasm::NewArrayBuffer(isolate_, alloc_size, enable_guard_regions);
CHECK(!new_buffer.is_null());
- instance_object_->set_memory_buffer(*new_buffer);
mem_start_ = reinterpret_cast<byte*>(new_buffer->backing_store());
mem_size_ = size;
CHECK(size == 0 || mem_start_);
memset(mem_start_, 0, size);
- if (interpreter_) {
- interpreter_->UpdateMemory(mem_start_, mem_size_);
- }
// Create the WasmMemoryObject.
Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
isolate_, new_buffer,
@@ -65,19 +61,20 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
// TODO(wasm): Delete the following two lines when test-run-wasm will use a
// multiple of kPageSize as memory size. At the moment, the effect of these
// two lines is used to shrink the memory for testing purposes.
- instance_object_->wasm_context()->mem_start = mem_start_;
- instance_object_->wasm_context()->mem_size = mem_size_;
+ instance_object_->wasm_context()->get()->SetRawMemory(mem_start_, mem_size_);
return mem_start_;
}
-uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, Handle<Code> code,
- const char* name) {
+uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name) {
if (test_module_.functions.size() == 0) {
// TODO(titzer): Reserving space here to avoid the underlying WasmFunction
// structs from moving.
test_module_.functions.reserve(kMaxFunctions);
}
uint32_t index = static_cast<uint32_t>(test_module_.functions.size());
+ if (FLAG_wasm_jit_to_native) {
+ native_module_->ResizeCodeTableForTest(index);
+ }
test_module_.functions.push_back(
{sig, index, 0, {0, 0}, {0, 0}, false, false});
if (name) {
@@ -85,7 +82,7 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, Handle<Code> code,
test_module_.functions.back().name = {
AddBytes(name_vec), static_cast<uint32_t>(name_vec.length())};
}
- function_code_.push_back(code);
+ function_code_.push_back(Handle<Code>::null());
if (interpreter_) {
interpreter_->AddFunctionForTesting(&test_module_.functions.back());
}
@@ -97,17 +94,30 @@ uint32_t TestingModuleBuilder::AddJsFunction(
FunctionSig* sig, const char* source, Handle<FixedArray> js_imports_table) {
Handle<JSFunction> jsfunc = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(source))));
- uint32_t index = AddFunction(sig, Handle<Code>::null(), nullptr);
+ uint32_t index = AddFunction(sig, nullptr);
js_imports_table->set(0, *isolate_->native_context());
- Handle<Code> code = compiler::CompileWasmToJSWrapper(
- isolate_, jsfunc, sig, index, test_module_.origin(), js_imports_table);
- function_code_[index] = code;
+ if (FLAG_wasm_jit_to_native) {
+ native_module_->ResizeCodeTableForTest(index);
+ Handle<Code> wrapper = compiler::CompileWasmToJSWrapper(
+ isolate_, jsfunc, sig, index, test_module_.origin(), js_imports_table);
+ native_module_->AddCodeCopy(wrapper, wasm::WasmCode::WasmToJsWrapper,
+ index);
+ } else {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
+ Handle<Code> code = compiler::CompileWasmToJSWrapper(
+ isolate_, jsfunc, sig, index, test_module_.origin(), js_imports_table);
+ function_code_[index] = code;
+ }
return index;
}
Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
// Wrap the code so it can be called as a JS function.
- Handle<Code> code = function_code_[index];
+ Link();
+ WasmCodeWrapper code = FLAG_wasm_jit_to_native
+ ? WasmCodeWrapper(native_module_->GetCode(index))
+ : WasmCodeWrapper(function_code_[index]);
byte* context_address =
test_module_.has_memory
? reinterpret_cast<byte*>(instance_object_->wasm_context())
@@ -143,7 +153,6 @@ void TestingModuleBuilder::AddIndirectFunctionTable(uint16_t* function_indexes,
table.has_maximum_size = true;
for (uint32_t i = 0; i < table_size; ++i) {
table.values.push_back(function_indexes[i]);
- table.map.FindOrInsert(test_module_.functions[function_indexes[i]].sig);
}
function_tables_.push_back(
@@ -168,8 +177,18 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() {
int table_size = static_cast<int>(table.values.size());
for (int j = 0; j < table_size; j++) {
WasmFunction& function = test_module_.functions[table.values[j]];
- signature_table->set(j, Smi::FromInt(table.map.Find(function.sig)));
- function_table->set(j, *function_code_[function.func_index]);
+ signature_table->set(
+ j, Smi::FromInt(test_module_.signature_map.Find(function.sig)));
+ if (FLAG_wasm_jit_to_native) {
+ Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
+ native_module_->GetCode(function.func_index)
+ ->instructions()
+ .start(),
+ TENURED);
+ function_table->set(j, *foreign_holder);
+ } else {
+ function_table->set(j, *function_code_[function.func_index]);
+ }
}
}
}
@@ -192,20 +211,8 @@ uint32_t TestingModuleBuilder::AddBytes(Vector<const byte> bytes) {
}
compiler::ModuleEnv TestingModuleBuilder::CreateModuleEnv() {
- std::vector<SignatureMap*> signature_maps;
- for (size_t i = 0; i < test_module_.function_tables.size(); i++) {
- auto& function_table = test_module_.function_tables[i];
- signature_maps.push_back(&function_table.map);
- }
- return {
- &test_module_,
- function_tables_,
- signature_tables_,
- signature_maps,
- function_code_,
- Handle<Code>::null(),
- reinterpret_cast<uintptr_t>(globals_data_),
- };
+ return {&test_module_, function_tables_, signature_tables_, function_code_,
+ Handle<Code>::null()};
}
const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
@@ -235,19 +242,23 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
Handle<FixedArray> code_table = isolate_->factory()->NewFixedArray(0);
Handle<FixedArray> export_wrappers = isolate_->factory()->NewFixedArray(0);
Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
- isolate_, shared_module_data, code_table, export_wrappers,
- function_tables_, signature_tables_);
+ isolate_, test_module_ptr_, code_table, export_wrappers, function_tables_,
+ signature_tables_);
+ compiled_module->OnWasmModuleDecodingComplete(shared_module_data);
// This method is called when we initialize TestEnvironment. We don't
// have a memory yet, so we won't create it here. We'll update the
// interpreter when we get a memory. We do have globals, though.
- WasmCompiledModule::recreate_globals_start(
- compiled_module, isolate_->factory(),
- reinterpret_cast<size_t>(globals_data_));
+ native_module_ = compiled_module->GetNativeModule();
+
Handle<FixedArray> weak_exported = isolate_->factory()->NewFixedArray(0);
compiled_module->set_weak_exported_functions(weak_exported);
DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
script->set_wasm_compiled_module(*compiled_module);
- return WasmInstanceObject::New(isolate_, compiled_module);
+ auto instance = WasmInstanceObject::New(isolate_, compiled_module);
+ instance->wasm_context()->get()->globals_start = globals_data_;
+ Handle<WeakCell> weak_instance = isolate()->factory()->NewWeakCell(instance);
+ compiled_module->set_weak_owning_instance(weak_instance);
+ return instance;
}
void TestBuildingGraph(
@@ -371,10 +382,10 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
r.LowerGraph();
}
- CompilationInfo info(ArrayVector("testing"), isolate, graph()->zone(),
- Code::STUB);
- code_ = compiler::Pipeline::GenerateCodeForTesting(&info, descriptor,
- graph(), nullptr);
+ CompilationInfo info(ArrayVector("testing"), graph()->zone(),
+ Code::C_WASM_ENTRY);
+ code_ = compiler::Pipeline::GenerateCodeForTesting(
+ &info, isolate, descriptor, graph(), nullptr);
CHECK(!code_.is_null());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
@@ -413,6 +424,10 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
Handle<WasmCompiledModule> compiled_module(
builder_->instance_object()->compiled_module(), isolate());
+ NativeModule* native_module = compiled_module->GetNativeModule();
+ if (FLAG_wasm_jit_to_native) {
+ native_module->ResizeCodeTableForTest(function_->func_index);
+ }
Handle<SeqOneByteString> wire_bytes(compiled_module->module_bytes(),
isolate());
@@ -428,43 +443,57 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
FunctionBody func_body{function_->sig, function_->code.offset(),
func_wire_bytes.start(), func_wire_bytes.end()};
+ compiler::WasmCompilationUnit::CompilationMode comp_mode =
+ builder_->execution_mode() == WasmExecutionMode::kExecuteLiftoff
+ ? compiler::WasmCompilationUnit::CompilationMode::kLiftoff
+ : compiler::WasmCompilationUnit::CompilationMode::kTurbofan;
compiler::WasmCompilationUnit unit(
- isolate(), &module_env, func_body, func_name, function_->func_index,
- CEntryStub(isolate(), 1).GetCode(), isolate()->counters(),
- builder_->runtime_exception_support(), builder_->lower_simd());
+ isolate(), &module_env, native_module, func_body, func_name,
+ function_->func_index, CEntryStub(isolate(), 1).GetCode(), comp_mode,
+ isolate()->counters(), builder_->runtime_exception_support(),
+ builder_->lower_simd());
unit.ExecuteCompilation();
- Handle<Code> code = unit.FinishCompilation(&thrower).ToHandleChecked();
+ WasmCodeWrapper code_wrapper = unit.FinishCompilation(&thrower);
CHECK(!thrower.error());
-
- // Manually add the deoptimization info that would otherwise be added
- // during instantiation. Deopt data holds <WeakCell<wasm_instance>,
- // func_index>.
- DCHECK_EQ(0, code->deoptimization_data()->length());
- Handle<FixedArray> deopt_data =
- isolate()->factory()->NewFixedArray(2, TENURED);
- Handle<Object> weak_instance =
- isolate()->factory()->NewWeakCell(builder_->instance_object());
- deopt_data->set(0, *weak_instance);
- deopt_data->set(1, Smi::FromInt(static_cast<int>(function_index())));
- code->set_deoptimization_data(*deopt_data);
-
- // Build the TurboFan graph.
- builder_->SetFunctionCode(function_index(), code);
-
- // Add to code table.
- Handle<FixedArray> code_table = compiled_module->code_table();
- if (static_cast<int>(function_index()) >= code_table->length()) {
- Handle<FixedArray> new_arr = isolate()->factory()->NewFixedArray(
- static_cast<int>(function_index()) + 1);
- code_table->CopyTo(0, *new_arr, 0, code_table->length());
- code_table = new_arr;
- compiled_module->ReplaceCodeTableForTesting(code_table);
- }
- DCHECK(code_table->get(static_cast<int>(function_index()))
- ->IsUndefined(isolate()));
- code_table->set(static_cast<int>(function_index()), *code);
- if (trap_handler::UseTrapHandler()) {
- UnpackAndRegisterProtectedInstructions(isolate(), code_table);
+ if (!FLAG_wasm_jit_to_native) {
+ Handle<Code> code = code_wrapper.GetCode();
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate()->heap());
+
+ // Manually add the deoptimization info that would otherwise be added
+ // during instantiation. Deopt data holds <WeakCell<wasm_instance>,
+ // func_index>.
+ DCHECK_EQ(0, code->deoptimization_data()->length());
+ Handle<FixedArray> deopt_data =
+ isolate()->factory()->NewFixedArray(2, TENURED);
+ Handle<Object> weak_instance =
+ isolate()->factory()->NewWeakCell(builder_->instance_object());
+ deopt_data->set(0, *weak_instance);
+ deopt_data->set(1, Smi::FromInt(static_cast<int>(function_index())));
+ code->set_deoptimization_data(*deopt_data);
+
+ // Build the TurboFan graph.
+ builder_->SetFunctionCode(function_index(), code);
+
+ // Add to code table.
+ Handle<FixedArray> code_table = compiled_module->code_table();
+ if (static_cast<int>(function_index()) >= code_table->length()) {
+ Handle<FixedArray> new_arr = isolate()->factory()->NewFixedArray(
+ static_cast<int>(function_index()) + 1);
+ code_table->CopyTo(0, *new_arr, 0, code_table->length());
+ code_table = new_arr;
+ compiled_module->ReplaceCodeTableForTesting(code_table);
+ }
+ DCHECK(code_table->get(static_cast<int>(function_index()))
+ ->IsUndefined(isolate()));
+ code_table->set(static_cast<int>(function_index()), *code);
+ if (trap_handler::UseTrapHandler()) {
+ UnpackAndRegisterProtectedInstructionsGC(isolate(), code_table);
+ }
+ } else {
+ if (trap_handler::UseTrapHandler()) {
+ UnpackAndRegisterProtectedInstructions(isolate(), native_module);
+ }
}
}
@@ -481,17 +510,20 @@ WasmFunctionCompiler::WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
source_position_table_(this->graph()),
interpreter_(builder->interpreter()) {
// Get a new function from the testing module.
- int index = builder->AddFunction(sig, Handle<Code>::null(), name);
+ int index = builder->AddFunction(sig, name);
function_ = builder_->GetFunctionAt(index);
}
WasmFunctionCompiler::~WasmFunctionCompiler() {
- if (trap_handler::UseTrapHandler() &&
- !builder_->GetFunctionCode(function_index()).is_null()) {
- const int handler_index = builder_->GetFunctionCode(function_index())
- ->trap_handler_index()
- ->value();
- trap_handler::ReleaseHandlerData(handler_index);
+ if (!FLAG_wasm_jit_to_native) {
+ if (trap_handler::UseTrapHandler() &&
+ !builder_->GetFunctionCode(function_index()).is_null()) {
+ const int handler_index = builder_->GetFunctionCode(function_index())
+ .GetCode()
+ ->trap_handler_index()
+ ->value();
+ trap_handler::ReleaseHandlerData(handler_index);
+ }
}
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 562e3b12ce..f46d1e3d61 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -48,8 +48,10 @@ constexpr uint32_t kMaxFunctions = 10;
constexpr uint32_t kMaxGlobalsSize = 128;
enum WasmExecutionMode {
- kExecuteInterpreted,
- kExecuteCompiled,
+ kExecuteInterpreter,
+ kExecuteTurbofan,
+ kExecuteLiftoff,
+ // TODO(bug:7028): Introduce another enum for simd lowering.
kExecuteSimdLowered
};
@@ -75,7 +77,7 @@ using compiler::Node;
r.Build(code, code + arraysize(code)); \
} while (false)
-// A buildable ModuleEnv. Globals are pre-set, however, memory and code may be
+// A Wasm module builder. Globals are pre-set, however, memory and code may be
// progressively added by a test. In turn, we piecemeal update the runtime
// objects, i.e. {WasmInstanceObject}, {WasmCompiledModule} and, if necessary,
// the interpreter.
@@ -88,7 +90,13 @@ class TestingModuleBuilder {
byte* AddMemory(uint32_t size);
- size_t CodeTableLength() const { return function_code_.size(); }
+ size_t CodeTableLength() const {
+ if (FLAG_wasm_jit_to_native) {
+ return native_module_->FunctionCount();
+ } else {
+ return function_code_.size();
+ }
+ }
template <typename T>
T* AddMemoryElems(uint32_t count) {
@@ -104,7 +112,11 @@ class TestingModuleBuilder {
}
byte AddSignature(FunctionSig* sig) {
+ DCHECK_EQ(test_module_.signatures.size(),
+ test_module_.signature_ids.size());
test_module_.signatures.push_back(sig);
+ auto canonical_sig_num = test_module_.signature_map.FindOrInsert(sig);
+ test_module_.signature_ids.push_back(canonical_sig_num);
size_t size = test_module_.signatures.size();
CHECK_GT(127, size);
return static_cast<byte>(size - 1);
@@ -167,7 +179,7 @@ class TestingModuleBuilder {
void SetHasSharedMemory() { test_module_.has_shared_memory = true; }
- uint32_t AddFunction(FunctionSig* sig, Handle<Code> code, const char* name);
+ uint32_t AddFunction(FunctionSig* sig, const char* name);
uint32_t AddJsFunction(FunctionSig* sig, const char* source,
Handle<FixedArray> js_imports_table);
@@ -194,14 +206,29 @@ class TestingModuleBuilder {
bool lower_simd() { return lower_simd_; }
Isolate* isolate() { return isolate_; }
Handle<WasmInstanceObject> instance_object() { return instance_object_; }
- Handle<Code> GetFunctionCode(int index) { return function_code_[index]; }
+ WasmCodeWrapper GetFunctionCode(uint32_t index) {
+ if (FLAG_wasm_jit_to_native) {
+ return WasmCodeWrapper(native_module_->GetCode(index));
+ } else {
+ return WasmCodeWrapper(function_code_[index]);
+ }
+ }
void SetFunctionCode(int index, Handle<Code> code) {
function_code_[index] = code;
}
Address globals_start() { return reinterpret_cast<Address>(globals_data_); }
+ void Link() {
+ if (!FLAG_wasm_jit_to_native) return;
+ if (!linked_) {
+ native_module_->LinkAll();
+ linked_ = true;
+ }
+ }
compiler::ModuleEnv CreateModuleEnv();
+ WasmExecutionMode execution_mode() const { return execution_mode_; }
+
compiler::RuntimeExceptionSupport runtime_exception_support() const {
return runtime_exception_support_;
}
@@ -216,9 +243,12 @@ class TestingModuleBuilder {
std::vector<Handle<Code>> function_code_;
std::vector<GlobalHandleAddress> function_tables_;
std::vector<GlobalHandleAddress> signature_tables_;
- V8_ALIGNED(8) byte globals_data_[kMaxGlobalsSize];
+ V8_ALIGNED(16) byte globals_data_[kMaxGlobalsSize];
WasmInterpreter* interpreter_;
+ WasmExecutionMode execution_mode_;
Handle<WasmInstanceObject> instance_object_;
+ NativeModule* native_module_;
+ bool linked_ = false;
compiler::RuntimeExceptionSupport runtime_exception_support_;
bool lower_simd_;
@@ -249,9 +279,21 @@ class WasmFunctionWrapper : private compiler::GraphAndBuilders {
Init(descriptor, MachineTypeForC<ReturnType>(), param_vec);
}
- void SetInnerCode(Handle<Code> code_handle) {
- compiler::NodeProperties::ChangeOp(inner_code_node_,
- common()->HeapConstant(code_handle));
+ void SetInnerCode(WasmCodeWrapper code) {
+ if (FLAG_wasm_jit_to_native) {
+ intptr_t address = reinterpret_cast<intptr_t>(
+ code.GetWasmCode()->instructions().start());
+ compiler::NodeProperties::ChangeOp(
+ inner_code_node_,
+ kPointerSize == 8
+ ? common()->RelocatableInt64Constant(address,
+ RelocInfo::WASM_CALL)
+ : common()->RelocatableInt32Constant(static_cast<int>(address),
+ RelocInfo::WASM_CALL));
+ } else {
+ compiler::NodeProperties::ChangeOp(
+ inner_code_node_, common()->HeapConstant(code.GetCode()));
+ }
}
const compiler::Operator* IntPtrConstant(intptr_t value) {
@@ -260,9 +302,13 @@ class WasmFunctionWrapper : private compiler::GraphAndBuilders {
: common()->Int64Constant(static_cast<int64_t>(value));
}
- void SetContextAddress(Address value) {
- compiler::NodeProperties::ChangeOp(
- context_address_, IntPtrConstant(reinterpret_cast<uintptr_t>(value)));
+ void SetContextAddress(uintptr_t value) {
+ auto rmode = RelocInfo::WASM_CONTEXT_REFERENCE;
+ auto op = kPointerSize == 8 ? common()->RelocatableInt64Constant(
+ static_cast<int64_t>(value), rmode)
+ : common()->RelocatableInt32Constant(
+ static_cast<int32_t>(value), rmode);
+ compiler::NodeProperties::ChangeOp(context_address_, op);
}
Handle<Code> GetWrapperCode();
@@ -428,13 +474,13 @@ class WasmRunner : public WasmRunnerBase {
set_trap_callback_for_testing(trap_callback);
wrapper_.SetInnerCode(builder_.GetFunctionCode(0));
- if (builder().instance_object()->has_memory_object()) {
- wrapper_.SetContextAddress(reinterpret_cast<Address>(
- builder().instance_object()->wasm_context()));
- }
+ WasmContext* wasm_context =
+ builder().instance_object()->wasm_context()->get();
+ wrapper_.SetContextAddress(reinterpret_cast<uintptr_t>(wasm_context));
+ builder().Link();
+ Handle<Code> wrapper_code = wrapper_.GetWrapperCode();
compiler::CodeRunner<int32_t> runner(CcTest::InitIsolateOnce(),
- wrapper_.GetWrapperCode(),
- wrapper_.signature());
+ wrapper_code, wrapper_.signature());
int32_t result = runner.Call(static_cast<void*>(&p)...,
static_cast<void*>(&return_value));
CHECK_EQ(WASM_WRAPPER_RETURN_VALUE, result);
@@ -463,28 +509,37 @@ class WasmRunner : public WasmRunnerBase {
return ReturnType{0};
}
}
+
+ Handle<Code> GetWrapperCode() { return wrapper_.GetWrapperCode(); }
};
// A macro to define tests that run in different engine configurations.
#define WASM_EXEC_TEST(name) \
void RunWasm_##name(WasmExecutionMode execution_mode); \
- TEST(RunWasmCompiled_##name) { RunWasm_##name(kExecuteCompiled); } \
- TEST(RunWasmInterpreted_##name) { RunWasm_##name(kExecuteInterpreted); } \
+ TEST(RunWasmTurbofan_##name) { RunWasm_##name(kExecuteTurbofan); } \
+ TEST(RunWasmLiftoff_##name) { RunWasm_##name(kExecuteLiftoff); } \
+ TEST(RunWasmInterpreter_##name) { RunWasm_##name(kExecuteInterpreter); } \
+ void RunWasm_##name(WasmExecutionMode execution_mode)
+
+#define WASM_COMPILED_EXEC_TEST(name) \
+ void RunWasm_##name(WasmExecutionMode execution_mode); \
+ TEST(RunWasmTurbofan_##name) { RunWasm_##name(kExecuteTurbofan); } \
+ TEST(RunWasmLiftoff_##name) { RunWasm_##name(kExecuteLiftoff); } \
void RunWasm_##name(WasmExecutionMode execution_mode)
#define WASM_EXEC_TEST_WITH_TRAP(name) \
void RunWasm_##name(WasmExecutionMode execution_mode); \
- TEST(RunWasmCompiled_##name) { \
- if (trap_handler::UseTrapHandler()) { \
- return; \
- } \
- RunWasm_##name(kExecuteCompiled); \
+ TEST(RunWasmTurbofan_##name) { \
+ if (trap_handler::UseTrapHandler()) return; \
+ RunWasm_##name(kExecuteTurbofan); \
+ } \
+ TEST(RunWasmLiftoff_##name) { \
+ if (trap_handler::UseTrapHandler()) return; \
+ RunWasm_##name(kExecuteLiftoff); \
} \
- TEST(RunWasmInterpreted_##name) { \
- if (trap_handler::UseTrapHandler()) { \
- return; \
- } \
- RunWasm_##name(kExecuteInterpreted); \
+ TEST(RunWasmInterpreter_##name) { \
+ if (trap_handler::UseTrapHandler()) return; \
+ RunWasm_##name(kExecuteInterpreter); \
} \
void RunWasm_##name(WasmExecutionMode execution_mode)
diff --git a/deps/v8/test/common/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index c0bc7a933f..9a0a3eac49 100644
--- a/deps/v8/test/common/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -38,7 +38,9 @@ class TestSignatures {
sig_v_i(0, 1, kIntTypes4),
sig_v_ii(0, 2, kIntTypes4),
sig_v_iii(0, 3, kIntTypes4),
- sig_s_i(1, 1, kSimd128IntTypes4) {
+ sig_s_i(1, 1, kSimd128IntTypes4),
+ sig_ii_v(2, 0, kIntTypes4),
+ sig_iii_v(3, 0, kIntTypes4) {
// I used C++ and you won't believe what happened next....
for (int i = 0; i < 4; i++) kIntTypes4[i] = kWasmI32;
for (int i = 0; i < 4; i++) kLongTypes4[i] = kWasmI64;
@@ -80,6 +82,9 @@ class TestSignatures {
FunctionSig* v_iii() { return &sig_v_iii; }
FunctionSig* s_i() { return &sig_s_i; }
+ FunctionSig* ii_v() { return &sig_ii_v; }
+ FunctionSig* iii_v() { return &sig_iii_v; }
+
FunctionSig* many(Zone* zone, ValueType ret, ValueType param, int count) {
FunctionSig::Builder builder(zone, ret == kWasmStmt ? 0 : 1, count);
if (ret != kWasmStmt) builder.AddReturn(ret);
@@ -124,6 +129,9 @@ class TestSignatures {
FunctionSig sig_v_ii;
FunctionSig sig_v_iii;
FunctionSig sig_s_i;
+
+ FunctionSig sig_ii_v;
+ FunctionSig sig_iii_v;
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 2d15bbc815..40718e79aa 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -70,21 +70,17 @@
#define ARITY_2 2
#define WASM_BLOCK(...) kExprBlock, kLocalVoid, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_I(...) kExprBlock, kLocalI32, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_L(...) kExprBlock, kLocalI64, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_F(...) kExprBlock, kLocalF32, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_D(...) kExprBlock, kLocalF64, __VA_ARGS__, kExprEnd
#define WASM_BLOCK_T(t, ...) \
kExprBlock, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), \
__VA_ARGS__, kExprEnd
-#define WASM_BLOCK_TT(t1, t2, ...) \
- kExprBlock, kMultivalBlock, 0, \
- static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t1)), \
- static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t2)), __VA_ARGS__, \
- kExprEnd
-
-#define WASM_BLOCK_I(...) kExprBlock, kLocalI32, __VA_ARGS__, kExprEnd
-#define WASM_BLOCK_L(...) kExprBlock, kLocalI64, __VA_ARGS__, kExprEnd
-#define WASM_BLOCK_F(...) kExprBlock, kLocalF32, __VA_ARGS__, kExprEnd
-#define WASM_BLOCK_D(...) kExprBlock, kLocalF64, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_X(index, ...) \
+ kExprBlock, static_cast<byte>(index), __VA_ARGS__, kExprEnd
#define WASM_INFINITE_LOOP kExprLoop, kLocalVoid, kExprBr, DEPTH_0, kExprEnd
@@ -94,20 +90,24 @@
#define WASM_LOOP_F(...) kExprLoop, kLocalF32, __VA_ARGS__, kExprEnd
#define WASM_LOOP_D(...) kExprLoop, kLocalF64, __VA_ARGS__, kExprEnd
-#define WASM_IF(cond, tstmt) cond, kExprIf, kLocalVoid, tstmt, kExprEnd
+#define WASM_LOOP_T(t, ...) \
+ kExprLoop, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), \
+ __VA_ARGS__, kExprEnd
-#define WASM_IF_ELSE(cond, tstmt, fstmt) \
- cond, kExprIf, kLocalVoid, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_LOOP_X(index, ...) \
+ kExprLoop, static_cast<byte>(index), __VA_ARGS__, kExprEnd
-#define WASM_IF_ELSE_T(t, cond, tstmt, fstmt) \
- cond, kExprIf, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), tstmt, \
- kExprElse, fstmt, kExprEnd
+#define WASM_IF(cond, ...) cond, kExprIf, kLocalVoid, __VA_ARGS__, kExprEnd
+
+#define WASM_IF_T(t, cond, ...) \
+ cond, kExprIf, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), \
+ __VA_ARGS__, kExprEnd
+
+#define WASM_IF_X(index, cond, ...) \
+ cond, kExprIf, static_cast<byte>(index), __VA_ARGS__, kExprEnd
-#define WASM_IF_ELSE_TT(t1, t2, cond, tstmt, fstmt) \
- cond, kExprIf, kMultivalBlock, 0, \
- static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t1)), \
- static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t2)), tstmt, kExprElse, \
- fstmt, kExprEnd
+#define WASM_IF_ELSE(cond, tstmt, fstmt) \
+ cond, kExprIf, kLocalVoid, tstmt, kExprElse, fstmt, kExprEnd
#define WASM_IF_ELSE_I(cond, tstmt, fstmt) \
cond, kExprIf, kLocalI32, tstmt, kExprElse, fstmt, kExprEnd
@@ -118,6 +118,13 @@
#define WASM_IF_ELSE_D(cond, tstmt, fstmt) \
cond, kExprIf, kLocalF64, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_IF_ELSE_T(t, cond, tstmt, fstmt) \
+ cond, kExprIf, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), tstmt, \
+ kExprElse, fstmt, kExprEnd
+
+#define WASM_IF_ELSE_X(index, cond, tstmt, fstmt) \
+ cond, kExprIf, static_cast<byte>(index), tstmt, kExprElse, fstmt, kExprEnd
+
#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
#define WASM_RETURN0 kExprReturn
@@ -574,7 +581,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define SIZEOF_SIG_ENTRY_x_xx 6
#define SIZEOF_SIG_ENTRY_x_xxx 7
-#define WASM_BRV(depth, val) val, kExprBr, static_cast<byte>(depth)
+#define WASM_BRV(depth, ...) __VA_ARGS__, kExprBr, static_cast<byte>(depth)
#define WASM_BRV_IF(depth, val, cond) \
val, cond, kExprBrIf, static_cast<byte>(depth)
#define WASM_BRV_IFD(depth, val, cond) \
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index d2f5e68fef..8150fc08a8 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -89,15 +89,25 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
WasmInterpreter::HeapObjectsScope heap_objects_scope(interpreter, instance);
WasmInterpreter::Thread* thread = interpreter->GetThread(0);
thread->Reset();
+
+ // Start an activation so that we can deal with stack overflows. We do not
+ // finish the activation. An activation is just part of the state of the
+ // interpreter, and we do not reuse the interpreter anyways. In addition,
+ // finishing the activation is not correct in all cases, e.g. when the
+ // execution of the interpreter did not finish after kMaxNumSteps.
+ thread->StartActivation();
thread->InitFrame(&instance->module()->functions[function_index],
arguments.get());
WasmInterpreter::State interpreter_result = thread->Run(kMaxNumSteps);
+ isolate->clear_pending_exception();
+
return interpreter_result != WasmInterpreter::PAUSED;
}
-int32_t RunWasmModuleForTesting(Isolate* isolate, Handle<JSObject> instance,
- int argc, Handle<Object> argv[]) {
+int32_t RunWasmModuleForTesting(Isolate* isolate,
+ Handle<WasmInstanceObject> instance, int argc,
+ Handle<Object> argv[]) {
ErrorThrower thrower(isolate, "RunWasmModule");
return CallWasmFunctionForTesting(isolate, instance, &thrower, "main", argc,
argv);
@@ -149,25 +159,34 @@ int32_t InterpretWasmModule(Isolate* isolate,
WasmInterpreter::HeapObjectsScope heap_objects_scope(interpreter, instance);
WasmInterpreter::Thread* thread = interpreter->GetThread(0);
thread->Reset();
+
+ // Start an activation so that we can deal with stack overflows. We do not
+ // finish the activation. An activation is just part of the state of the
+ // interpreter, and we do not reuse the interpreter anyways. In addition,
+ // finishing the activation is not correct in all cases, e.g. when the
+ // execution of the interpreter did not finish after kMaxNumSteps.
+ thread->StartActivation();
thread->InitFrame(&(instance->module()->functions[function_index]), args);
WasmInterpreter::State interpreter_result = thread->Run(kMaxNumSteps);
+ bool stack_overflow = isolate->has_pending_exception();
+ isolate->clear_pending_exception();
+
*possible_nondeterminism = thread->PossibleNondeterminism();
- if (interpreter_result == WasmInterpreter::FINISHED) {
- WasmValue val = thread->GetReturnValue();
- return val.to<int32_t>();
- } else if (thread->state() == WasmInterpreter::TRAPPED) {
- return 0xdeadbeef;
- } else {
- thrower->RangeError(
- "Interpreter did not finish execution within its step bound");
- return -1;
- }
+ if (stack_overflow) return 0xdeadbeef;
+
+ if (thread->state() == WasmInterpreter::TRAPPED) return 0xdeadbeef;
+
+ if (interpreter_result == WasmInterpreter::FINISHED)
+ return thread->GetReturnValue().to<int32_t>();
+
+ thrower->RangeError(
+ "Interpreter did not finish execution within its step bound");
+ return -1;
}
-MaybeHandle<WasmExportedFunction> GetExportedFunction(Isolate* isolate,
- Handle<JSObject> instance,
- const char* name) {
+MaybeHandle<WasmExportedFunction> GetExportedFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, const char* name) {
Handle<JSObject> exports_object;
Handle<Name> exports = isolate->factory()->InternalizeUtf8String("exports");
exports_object = Handle<JSObject>::cast(
@@ -183,7 +202,8 @@ MaybeHandle<WasmExportedFunction> GetExportedFunction(Isolate* isolate,
return Handle<WasmExportedFunction>::cast(desc.value());
}
-int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
+int32_t CallWasmFunctionForTesting(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
ErrorThrower* thrower, const char* name,
int argc, Handle<Object> argv[]) {
MaybeHandle<WasmExportedFunction> maybe_export =
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
index 4fa2ca67c1..cf85822175 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.h
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -29,14 +29,14 @@ std::unique_ptr<WasmModule> DecodeWasmModuleForTesting(
// Returns a MaybeHandle to the JsToWasm wrapper of the wasm function exported
// with the given name by the provided instance.
-MaybeHandle<WasmExportedFunction> GetExportedFunction(Isolate* isolate,
- Handle<JSObject> instance,
- const char* name);
+MaybeHandle<WasmExportedFunction> GetExportedFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, const char* name);
// Call an exported wasm function by name. Returns -1 if the export does not
// exist or throws an error. Errors are cleared from the isolate before
// returning.
-int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
+int32_t CallWasmFunctionForTesting(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
ErrorThrower* thrower, const char* name,
int argc, Handle<Object> argv[]);
@@ -63,8 +63,9 @@ int32_t InterpretWasmModule(Isolate* isolate,
WasmValue* args, bool* possible_nondeterminism);
// Runs the module instance with arguments.
-int32_t RunWasmModuleForTesting(Isolate* isolate, Handle<JSObject> instance,
- int argc, Handle<Object> argv[]);
+int32_t RunWasmModuleForTesting(Isolate* isolate,
+ Handle<WasmInstanceObject> instance, int argc,
+ Handle<Object> argv[]);
// Install function map, module symbol for testing
void SetupIsolateForWasmModule(Isolate* isolate);
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-arguments.js b/deps/v8/test/debugger/debug/debug-evaluate-arguments.js
new file mode 100644
index 0000000000..8cf18d7dc8
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-evaluate-arguments.js
@@ -0,0 +1,60 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+Debug = debug.Debug;
+var listened = false;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var foo_arguments = exec_state.frame(1).evaluate("arguments").value();
+ var bar_arguments = exec_state.frame(0).evaluate("arguments").value();
+ assertArrayEquals(foo_expected, foo_arguments);
+ assertArrayEquals(bar_expected, bar_arguments);
+ listened = true;
+ } catch (e) {
+ print(e);
+ print(e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+function foo(a) {
+ function bar(a,b,c) {
+ debugger;
+ return a + b + c;
+ }
+ return bar(1,2,a);
+}
+
+listened = false;
+foo_expected = [3];
+bar_expected = [1,2,3];
+assertEquals(6, foo(3));
+assertTrue(listened);
+
+listened = false;
+foo_expected = [3];
+bar_expected = [1,2,3];
+assertEquals(6, foo(3));
+assertTrue(listened);
+
+listened = false;
+foo_expected = [3];
+bar_expected = [1,2,3];
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(6, foo(3));
+assertTrue(listened);
+
+listened = false;
+foo_expected = [3,4,5];
+bar_expected = [1,2,3];
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(6, foo(3,4,5));
+assertTrue(listened);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/debugger/debug/debug-script.js b/deps/v8/test/debugger/debug/debug-script.js
index 98aa5553d7..342f61928f 100644
--- a/deps/v8/test/debugger/debug/debug-script.js
+++ b/deps/v8/test/debugger/debug/debug-script.js
@@ -28,10 +28,11 @@
// Flags: --expose-gc --send-idle-notification
// Flags: --expose-natives-as natives
// Flags: --noharmony-shipping
-// Flags: --nostress-opt
+// Flags: --nostress-opt --nostress-background-compile
-// --nostress-opt is specified because in stress mode the compilation cache
-// may hold on to old copies of scripts (see bug 1641).
+// --nostress-opt and --nostress-background-compilation is specified because in
+// stress mode the compilation cache may hold on to old copies of scripts (see
+// bug 1641).
// Note: this test checks that that the number of scripts reported as native
// by Debug.scripts() is the same as a number of core native scripts.
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-517592.js b/deps/v8/test/debugger/debug/regress/regress-crbug-517592.js
index e4a905d7c5..57e96a2b46 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-517592.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-517592.js
@@ -4,7 +4,7 @@
var source =
"var foo = function foo() {\n" +
- " return 1;\n" +
+ " var a = 1;\n" +
"}\n" +
"//@ sourceURL=test";
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
index a07acbce5b..71b19d2159 100644
--- a/deps/v8/test/debugger/testcfg.py
+++ b/deps/v8/test/debugger/testcfg.py
@@ -33,9 +33,13 @@ class DebuggerTestSuite(testsuite.TestSuite):
tests.append(test)
return tests
- def GetFlagsForTestCase(self, testcase, context):
+ def GetParametersForTestCase(self, testcase, context):
+ flags = (
+ testcase.flags +
+ ["--enable-inspector", "--allow-natives-syntax"] +
+ context.mode_flags
+ )
source = self.GetSourceForTest(testcase)
- flags = ["--enable-inspector", "--allow-natives-syntax"] + context.mode_flags
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
flags += match.strip().split()
@@ -59,12 +63,11 @@ class DebuggerTestSuite(testsuite.TestSuite):
files.append("--module")
files.append(os.path.join(self.root, testcase.path + self.suffix()))
- flags += files
+ all_files = list(files)
if context.isolates:
- flags.append("--isolate")
- flags += files
+ all_files += ["--isolate"] + files
- return testcase.flags + flags
+ return all_files, flags, {}
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index f800f49b8f..beda4899c1 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -14,26 +14,13 @@
namespace v8_fuzzer {
-namespace {
-
-FuzzerSupport* g_fuzzer_support = nullptr;
-
-void DeleteFuzzerSupport() {
- if (g_fuzzer_support) {
- delete g_fuzzer_support;
- g_fuzzer_support = nullptr;
- }
-}
-
-} // namespace
-
FuzzerSupport::FuzzerSupport(int* argc, char*** argv) {
v8::internal::FLAG_expose_gc = true;
v8::V8::SetFlagsFromCommandLine(argc, *argv, true);
v8::V8::InitializeICUDefaultLocation((*argv)[0]);
v8::V8::InitializeExternalStartupData((*argv)[0]);
- platform_ = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform_);
+ platform_ = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(platform_.get());
v8::V8::Initialize();
allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
@@ -47,7 +34,7 @@ FuzzerSupport::FuzzerSupport(int* argc, char*** argv) {
context_.Reset(isolate_, v8::Context::New(isolate_));
}
- v8::platform::EnsureEventLoopInitialized(platform_, isolate_);
+ v8::platform::EnsureEventLoopInitialized(platform_.get(), isolate_);
}
FuzzerSupport::~FuzzerSupport() {
@@ -70,15 +57,22 @@ FuzzerSupport::~FuzzerSupport() {
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
-
- delete platform_;
- platform_ = nullptr;
}
+std::unique_ptr<FuzzerSupport> FuzzerSupport::fuzzer_support_;
+
// static
-FuzzerSupport* FuzzerSupport::Get() { return g_fuzzer_support; }
+void FuzzerSupport::InitializeFuzzerSupport(int* argc, char*** argv) {
+ DCHECK_NULL(FuzzerSupport::fuzzer_support_);
+ FuzzerSupport::fuzzer_support_ =
+ v8::base::make_unique<v8_fuzzer::FuzzerSupport>(argc, argv);
+}
-v8::Isolate* FuzzerSupport::GetIsolate() const { return isolate_; }
+// static
+FuzzerSupport* FuzzerSupport::Get() {
+ DCHECK_NOT_NULL(FuzzerSupport::fuzzer_support_);
+ return FuzzerSupport::fuzzer_support_.get();
+}
v8::Local<v8::Context> FuzzerSupport::GetContext() {
v8::Isolate::Scope isolate_scope(isolate_);
@@ -90,13 +84,12 @@ v8::Local<v8::Context> FuzzerSupport::GetContext() {
bool FuzzerSupport::PumpMessageLoop(
v8::platform::MessageLoopBehavior behavior) {
- return v8::platform::PumpMessageLoop(platform_, isolate_, behavior);
+ return v8::platform::PumpMessageLoop(platform_.get(), isolate_, behavior);
}
} // namespace v8_fuzzer
extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) {
- v8_fuzzer::g_fuzzer_support = new v8_fuzzer::FuzzerSupport(argc, argv);
- atexit(&v8_fuzzer::DeleteFuzzerSupport);
+ v8_fuzzer::FuzzerSupport::InitializeFuzzerSupport(argc, argv);
return 0;
}
diff --git a/deps/v8/test/fuzzer/fuzzer-support.h b/deps/v8/test/fuzzer/fuzzer-support.h
index e72dcc3613..229c8c6b49 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.h
+++ b/deps/v8/test/fuzzer/fuzzer-support.h
@@ -13,12 +13,17 @@ namespace v8_fuzzer {
class FuzzerSupport {
public:
FuzzerSupport(int* argc, char*** argv);
+
~FuzzerSupport();
+ static void InitializeFuzzerSupport(int* argc, char*** argv);
+
static FuzzerSupport* Get();
- v8::Isolate* GetIsolate() const;
+ v8::Isolate* GetIsolate() const { return isolate_; }
+
v8::Local<v8::Context> GetContext();
+
bool PumpMessageLoop(v8::platform::MessageLoopBehavior =
v8::platform::MessageLoopBehavior::kDoNotWait);
@@ -27,8 +32,8 @@ class FuzzerSupport {
FuzzerSupport(const FuzzerSupport&);
FuzzerSupport& operator=(const FuzzerSupport&);
-
- v8::Platform* platform_;
+ static std::unique_ptr<FuzzerSupport> fuzzer_support_;
+ std::unique_ptr<v8::Platform> platform_;
v8::ArrayBuffer::Allocator* allocator_;
v8::Isolate* isolate_;
v8::Global<v8::Context> context_;
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
index 2c96000afb..c73901b0e0 100644
--- a/deps/v8/test/fuzzer/regexp.cc
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -34,8 +34,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::Context::Scope context_scope(support->GetContext());
v8::TryCatch try_catch(isolate);
- i::FLAG_harmony_regexp_lookbehind = true;
-
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Factory* factory = i_isolate->factory();
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 905d8b05b9..17cb0ef588 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -30,19 +30,21 @@ class FuzzerTestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
for subtest in FuzzerTestSuite.SUB_TESTS:
- shell = 'v8_simple_%s_fuzzer' % subtest
for fname in os.listdir(os.path.join(self.root, subtest)):
if not os.path.isfile(os.path.join(self.root, subtest, fname)):
continue
- test = testcase.TestCase(self, '%s/%s' % (subtest, fname),
- override_shell=shell)
+ test = testcase.TestCase(self, '%s/%s' % (subtest, fname))
tests.append(test)
tests.sort()
return tests
- def GetFlagsForTestCase(self, testcase, context):
+ def GetShellForTestCase(self, testcase):
+ group, _ = testcase.path.split('/', 1)
+ return 'v8_simple_%s_fuzzer' % group
+
+ def GetParametersForTestCase(self, testcase, context):
suite, name = testcase.path.split('/')
- return [os.path.join(self.root, suite, name)]
+ return [os.path.join(self.root, suite, name)], [], {}
def _VariantGeneratorFactory(self):
return FuzzerVariantGenerator
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 5b59a63e97..ded3a101f2 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -76,44 +76,143 @@ class DataRange {
class WasmGenerator {
template <WasmOpcode Op, ValueType... Args>
- std::function<void(DataRange)> op() {
- return [this](DataRange data) {
- Generate<Args...>(data);
- builder_->Emit(Op);
- };
+ void op(DataRange data) {
+ Generate<Args...>(data);
+ builder_->Emit(Op);
}
+ class BlockScope {
+ public:
+ BlockScope(WasmGenerator* gen, WasmOpcode block_type, ValueType result_type,
+ ValueType br_type)
+ : gen_(gen) {
+ gen->blocks_.push_back(br_type);
+ gen->builder_->EmitWithU8(block_type,
+ WasmOpcodes::ValueTypeCodeFor(result_type));
+ }
+
+ ~BlockScope() {
+ gen_->builder_->Emit(kExprEnd);
+ gen_->blocks_.pop_back();
+ }
+
+ private:
+ WasmGenerator* const gen_;
+ };
+
template <ValueType T>
- std::function<void(DataRange)> block() {
- return [this](DataRange data) {
- blocks_.push_back(T);
- builder_->EmitWithU8(
- kExprBlock, static_cast<uint8_t>(WasmOpcodes::ValueTypeCodeFor(T)));
- Generate<T>(data);
- builder_->Emit(kExprEnd);
- blocks_.pop_back();
- };
+ void block(DataRange data) {
+ BlockScope block_scope(this, kExprBlock, T, T);
+ Generate<T>(data);
}
template <ValueType T>
- std::function<void(DataRange)> block_br() {
- return [this](DataRange data) {
- blocks_.push_back(T);
- builder_->EmitWithU8(
- kExprBlock, static_cast<uint8_t>(WasmOpcodes::ValueTypeCodeFor(T)));
-
- const uint32_t target_block = data.get<uint32_t>() % blocks_.size();
- const ValueType break_type = blocks_[target_block];
-
- Generate(break_type, data);
- builder_->EmitWithI32V(kExprBr, target_block);
- builder_->Emit(kExprEnd);
- blocks_.pop_back();
- };
+ void loop(DataRange data) {
+ // When breaking to a loop header, don't provide any input value (hence
+ // kWasmStmt).
+ BlockScope block_scope(this, kExprLoop, T, kWasmStmt);
+ Generate<T>(data);
+ }
+
+ void br(DataRange data) {
+ // There is always at least the block representing the function body.
+ DCHECK(!blocks_.empty());
+ const uint32_t target_block = data.get<uint32_t>() % blocks_.size();
+ const ValueType break_type = blocks_[target_block];
+
+ Generate(break_type, data);
+ builder_->EmitWithI32V(
+ kExprBr, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
+ }
+
+ // TODO(eholk): make this function constexpr once gcc supports it
+ static uint8_t max_alignment(WasmOpcode memop) {
+ switch (memop) {
+ case kExprI64LoadMem:
+ case kExprF64LoadMem:
+ case kExprI64StoreMem:
+ case kExprF64StoreMem:
+ return 3;
+ case kExprI32LoadMem:
+ case kExprI64LoadMem32S:
+ case kExprI64LoadMem32U:
+ case kExprF32LoadMem:
+ case kExprI32StoreMem:
+ case kExprI64StoreMem32:
+ case kExprF32StoreMem:
+ return 2;
+ case kExprI32LoadMem16S:
+ case kExprI32LoadMem16U:
+ case kExprI64LoadMem16S:
+ case kExprI64LoadMem16U:
+ case kExprI32StoreMem16:
+ case kExprI64StoreMem16:
+ return 1;
+ case kExprI32LoadMem8S:
+ case kExprI32LoadMem8U:
+ case kExprI64LoadMem8S:
+ case kExprI64LoadMem8U:
+ case kExprI32StoreMem8:
+ case kExprI64StoreMem8:
+ return 0;
+ default:
+ return 0;
+ }
+ }
+
+ template <WasmOpcode memory_op, ValueType... arg_types>
+ void memop(DataRange data) {
+ const uint8_t align = data.get<uint8_t>() % (max_alignment(memory_op) + 1);
+ const uint32_t offset = data.get<uint32_t>();
+
+ // Generate the index and the arguments, if any.
+ Generate<kWasmI32, arg_types...>(data);
+
+ builder_->Emit(memory_op);
+ builder_->EmitU32V(align);
+ builder_->EmitU32V(offset);
}
+ template <ValueType T1, ValueType T2>
+ void sequence(DataRange data) {
+ Generate<T1, T2>(data);
+ }
+
+ void current_memory(DataRange data) {
+ builder_->EmitWithU8(kExprMemorySize, 0);
+ }
+
+ void grow_memory(DataRange data);
+
+ using generate_fn = void (WasmGenerator::*const)(DataRange);
+
+ template <size_t N>
+ void GenerateOneOf(generate_fn (&alternates)[N], DataRange data) {
+ static_assert(N < std::numeric_limits<uint8_t>::max(),
+ "Too many alternates. Replace with a bigger type if needed.");
+ const auto which = data.get<uint8_t>();
+
+ generate_fn alternate = alternates[which % N];
+ (this->*alternate)(data);
+ }
+
+ struct GeneratorRecursionScope {
+ explicit GeneratorRecursionScope(WasmGenerator* gen) : gen(gen) {
+ ++gen->recursion_depth;
+ DCHECK_LE(gen->recursion_depth, kMaxRecursionDepth);
+ }
+ ~GeneratorRecursionScope() {
+ DCHECK_GT(gen->recursion_depth, 0);
+ --gen->recursion_depth;
+ }
+ WasmGenerator* gen;
+ };
+
public:
- explicit WasmGenerator(WasmFunctionBuilder* fn) : builder_(fn) {}
+ explicit WasmGenerator(WasmFunctionBuilder* fn) : builder_(fn) {
+ DCHECK_EQ(1, fn->signature()->return_count());
+ blocks_.push_back(fn->signature()->GetReturn(0));
+ }
void Generate(ValueType type, DataRange data);
@@ -130,161 +229,222 @@ class WasmGenerator {
private:
WasmFunctionBuilder* builder_;
std::vector<ValueType> blocks_;
+ uint32_t recursion_depth = 0;
+
+ static constexpr uint32_t kMaxRecursionDepth = 64;
+
+ bool recursion_limit_reached() {
+ return recursion_depth >= kMaxRecursionDepth;
+ }
};
template <>
+void WasmGenerator::Generate<kWasmStmt>(DataRange data) {
+ GeneratorRecursionScope rec_scope(this);
+ if (recursion_limit_reached() || data.size() == 0) return;
+
+ constexpr generate_fn alternates[] = {
+ &WasmGenerator::block<kWasmStmt>,
+ &WasmGenerator::loop<kWasmStmt>,
+ &WasmGenerator::br,
+
+ &WasmGenerator::memop<kExprI32StoreMem, kWasmI32>,
+ &WasmGenerator::memop<kExprI32StoreMem8, kWasmI32>,
+ &WasmGenerator::memop<kExprI32StoreMem16, kWasmI32>,
+ &WasmGenerator::memop<kExprI64StoreMem, kWasmI64>,
+ &WasmGenerator::memop<kExprI64StoreMem8, kWasmI64>,
+ &WasmGenerator::memop<kExprI64StoreMem16, kWasmI64>,
+ &WasmGenerator::memop<kExprI64StoreMem32, kWasmI64>,
+ &WasmGenerator::memop<kExprF32StoreMem, kWasmF32>,
+ &WasmGenerator::memop<kExprF64StoreMem, kWasmF64>,
+ };
+
+ GenerateOneOf(alternates, data);
+}
+
+template <>
void WasmGenerator::Generate<kWasmI32>(DataRange data) {
- if (data.size() <= sizeof(uint32_t)) {
+ GeneratorRecursionScope rec_scope(this);
+ if (recursion_limit_reached() || data.size() <= sizeof(uint32_t)) {
builder_->EmitI32Const(data.get<uint32_t>());
- } else {
- const std::function<void(DataRange)> alternates[] = {
- op<kExprI32Eqz, kWasmI32>(), //
- op<kExprI32Eq, kWasmI32, kWasmI32>(),
- op<kExprI32Ne, kWasmI32, kWasmI32>(),
- op<kExprI32LtS, kWasmI32, kWasmI32>(),
- op<kExprI32LtU, kWasmI32, kWasmI32>(),
- op<kExprI32GeS, kWasmI32, kWasmI32>(),
- op<kExprI32GeU, kWasmI32, kWasmI32>(),
-
- op<kExprI64Eqz, kWasmI64>(), //
- op<kExprI64Eq, kWasmI64, kWasmI64>(),
- op<kExprI64Ne, kWasmI64, kWasmI64>(),
- op<kExprI64LtS, kWasmI64, kWasmI64>(),
- op<kExprI64LtU, kWasmI64, kWasmI64>(),
- op<kExprI64GeS, kWasmI64, kWasmI64>(),
- op<kExprI64GeU, kWasmI64, kWasmI64>(),
-
- op<kExprF32Eq, kWasmF32, kWasmF32>(),
- op<kExprF32Ne, kWasmF32, kWasmF32>(),
- op<kExprF32Lt, kWasmF32, kWasmF32>(),
- op<kExprF32Ge, kWasmF32, kWasmF32>(),
-
- op<kExprF64Eq, kWasmF64, kWasmF64>(),
- op<kExprF64Ne, kWasmF64, kWasmF64>(),
- op<kExprF64Lt, kWasmF64, kWasmF64>(),
- op<kExprF64Ge, kWasmF64, kWasmF64>(),
-
- op<kExprI32Add, kWasmI32, kWasmI32>(),
- op<kExprI32Sub, kWasmI32, kWasmI32>(),
- op<kExprI32Mul, kWasmI32, kWasmI32>(),
-
- op<kExprI32DivS, kWasmI32, kWasmI32>(),
- op<kExprI32DivU, kWasmI32, kWasmI32>(),
- op<kExprI32RemS, kWasmI32, kWasmI32>(),
- op<kExprI32RemU, kWasmI32, kWasmI32>(),
-
- op<kExprI32And, kWasmI32, kWasmI32>(),
- op<kExprI32Ior, kWasmI32, kWasmI32>(),
- op<kExprI32Xor, kWasmI32, kWasmI32>(),
- op<kExprI32Shl, kWasmI32, kWasmI32>(),
- op<kExprI32ShrU, kWasmI32, kWasmI32>(),
- op<kExprI32ShrS, kWasmI32, kWasmI32>(),
- op<kExprI32Ror, kWasmI32, kWasmI32>(),
- op<kExprI32Rol, kWasmI32, kWasmI32>(),
-
- op<kExprI32Clz, kWasmI32>(), //
- op<kExprI32Ctz, kWasmI32>(), //
- op<kExprI32Popcnt, kWasmI32>(),
-
- op<kExprI32ConvertI64, kWasmI64>(), //
- op<kExprI32SConvertF32, kWasmF32>(),
- op<kExprI32UConvertF32, kWasmF32>(),
- op<kExprI32SConvertF64, kWasmF64>(),
- op<kExprI32UConvertF64, kWasmF64>(),
- op<kExprI32ReinterpretF32, kWasmF32>(),
-
- block<kWasmI32>(),
- block_br<kWasmI32>()};
-
- static_assert(arraysize(alternates) < std::numeric_limits<uint8_t>::max(),
- "Too many alternates. Replace with a bigger type if needed.");
- const auto which = data.get<uint8_t>();
-
- alternates[which % arraysize(alternates)](data);
+ return;
}
+
+ constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmStmt, kWasmI32>,
+
+ &WasmGenerator::op<kExprI32Eqz, kWasmI32>,
+ &WasmGenerator::op<kExprI32Eq, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32Ne, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32LtS, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32LtU, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32GeS, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32GeU, kWasmI32, kWasmI32>,
+
+ &WasmGenerator::op<kExprI64Eqz, kWasmI64>,
+ &WasmGenerator::op<kExprI64Eq, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64Ne, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64LtS, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64LtU, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64GeS, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64GeU, kWasmI64, kWasmI64>,
+
+ &WasmGenerator::op<kExprF32Eq, kWasmF32, kWasmF32>,
+ &WasmGenerator::op<kExprF32Ne, kWasmF32, kWasmF32>,
+ &WasmGenerator::op<kExprF32Lt, kWasmF32, kWasmF32>,
+ &WasmGenerator::op<kExprF32Ge, kWasmF32, kWasmF32>,
+
+ &WasmGenerator::op<kExprF64Eq, kWasmF64, kWasmF64>,
+ &WasmGenerator::op<kExprF64Ne, kWasmF64, kWasmF64>,
+ &WasmGenerator::op<kExprF64Lt, kWasmF64, kWasmF64>,
+ &WasmGenerator::op<kExprF64Ge, kWasmF64, kWasmF64>,
+
+ &WasmGenerator::op<kExprI32Add, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32Sub, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32Mul, kWasmI32, kWasmI32>,
+
+ &WasmGenerator::op<kExprI32DivS, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32DivU, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32RemS, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32RemU, kWasmI32, kWasmI32>,
+
+ &WasmGenerator::op<kExprI32And, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32Ior, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32Xor, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32Shl, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32ShrU, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32ShrS, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32Ror, kWasmI32, kWasmI32>,
+ &WasmGenerator::op<kExprI32Rol, kWasmI32, kWasmI32>,
+
+ &WasmGenerator::op<kExprI32Clz, kWasmI32>,
+ &WasmGenerator::op<kExprI32Ctz, kWasmI32>,
+ &WasmGenerator::op<kExprI32Popcnt, kWasmI32>,
+
+ &WasmGenerator::op<kExprI32ConvertI64, kWasmI64>,
+ &WasmGenerator::op<kExprI32SConvertF32, kWasmF32>,
+ &WasmGenerator::op<kExprI32UConvertF32, kWasmF32>,
+ &WasmGenerator::op<kExprI32SConvertF64, kWasmF64>,
+ &WasmGenerator::op<kExprI32UConvertF64, kWasmF64>,
+ &WasmGenerator::op<kExprI32ReinterpretF32, kWasmF32>,
+
+ &WasmGenerator::block<kWasmI32>,
+ &WasmGenerator::loop<kWasmI32>,
+
+ &WasmGenerator::memop<kExprI32LoadMem>,
+ &WasmGenerator::memop<kExprI32LoadMem8S>,
+ &WasmGenerator::memop<kExprI32LoadMem8U>,
+ &WasmGenerator::memop<kExprI32LoadMem16S>,
+ &WasmGenerator::memop<kExprI32LoadMem16U>,
+
+ &WasmGenerator::current_memory,
+ &WasmGenerator::grow_memory};
+
+ GenerateOneOf(alternates, data);
}
template <>
void WasmGenerator::Generate<kWasmI64>(DataRange data) {
- if (data.size() <= sizeof(uint64_t)) {
+ GeneratorRecursionScope rec_scope(this);
+ if (recursion_limit_reached() || data.size() <= sizeof(uint64_t)) {
builder_->EmitI64Const(data.get<int64_t>());
- } else {
- const std::function<void(DataRange)> alternates[] = {
- op<kExprI64Add, kWasmI64, kWasmI64>(),
- op<kExprI64Sub, kWasmI64, kWasmI64>(),
- op<kExprI64Mul, kWasmI64, kWasmI64>(),
-
- op<kExprI64DivS, kWasmI64, kWasmI64>(),
- op<kExprI64DivU, kWasmI64, kWasmI64>(),
- op<kExprI64RemS, kWasmI64, kWasmI64>(),
- op<kExprI64RemU, kWasmI64, kWasmI64>(),
-
- op<kExprI64And, kWasmI64, kWasmI64>(),
- op<kExprI64Ior, kWasmI64, kWasmI64>(),
- op<kExprI64Xor, kWasmI64, kWasmI64>(),
- op<kExprI64Shl, kWasmI64, kWasmI64>(),
- op<kExprI64ShrU, kWasmI64, kWasmI64>(),
- op<kExprI64ShrS, kWasmI64, kWasmI64>(),
- op<kExprI64Ror, kWasmI64, kWasmI64>(),
- op<kExprI64Rol, kWasmI64, kWasmI64>(),
-
- op<kExprI64Clz, kWasmI64>(),
- op<kExprI64Ctz, kWasmI64>(),
- op<kExprI64Popcnt, kWasmI64>(),
-
- block<kWasmI64>(),
- block_br<kWasmI64>()};
-
- static_assert(arraysize(alternates) < std::numeric_limits<uint8_t>::max(),
- "Too many alternates. Replace with a bigger type if needed.");
- const auto which = data.get<uint8_t>();
-
- alternates[which % arraysize(alternates)](data);
+ return;
}
+
+ constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmStmt, kWasmI64>,
+
+ &WasmGenerator::op<kExprI64Add, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64Sub, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64Mul, kWasmI64, kWasmI64>,
+
+ &WasmGenerator::op<kExprI64DivS, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64DivU, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64RemS, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64RemU, kWasmI64, kWasmI64>,
+
+ &WasmGenerator::op<kExprI64And, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64Ior, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64Xor, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64Shl, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64ShrU, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64ShrS, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64Ror, kWasmI64, kWasmI64>,
+ &WasmGenerator::op<kExprI64Rol, kWasmI64, kWasmI64>,
+
+ &WasmGenerator::op<kExprI64Clz, kWasmI64>,
+ &WasmGenerator::op<kExprI64Ctz, kWasmI64>,
+ &WasmGenerator::op<kExprI64Popcnt, kWasmI64>,
+
+ &WasmGenerator::block<kWasmI64>,
+ &WasmGenerator::loop<kWasmI64>,
+
+ &WasmGenerator::memop<kExprI64LoadMem>,
+ &WasmGenerator::memop<kExprI64LoadMem8S>,
+ &WasmGenerator::memop<kExprI64LoadMem8U>,
+ &WasmGenerator::memop<kExprI64LoadMem16S>,
+ &WasmGenerator::memop<kExprI64LoadMem16U>,
+ &WasmGenerator::memop<kExprI64LoadMem32S>,
+ &WasmGenerator::memop<kExprI64LoadMem32U>};
+
+ GenerateOneOf(alternates, data);
}
template <>
void WasmGenerator::Generate<kWasmF32>(DataRange data) {
- if (data.size() <= sizeof(float)) {
+ GeneratorRecursionScope rec_scope(this);
+ if (recursion_limit_reached() || data.size() <= sizeof(float)) {
builder_->EmitF32Const(data.get<float>());
- } else {
- const std::function<void(DataRange)> alternates[] = {
- op<kExprF32Add, kWasmF32, kWasmF32>(),
- op<kExprF32Sub, kWasmF32, kWasmF32>(),
- op<kExprF32Mul, kWasmF32, kWasmF32>(),
+ return;
+ }
- block<kWasmF32>(), block_br<kWasmF32>()};
+ constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmStmt, kWasmF32>,
- static_assert(arraysize(alternates) < std::numeric_limits<uint8_t>::max(),
- "Too many alternates. Replace with a bigger type if needed.");
- const auto which = data.get<uint8_t>();
+ &WasmGenerator::op<kExprF32Add, kWasmF32, kWasmF32>,
+ &WasmGenerator::op<kExprF32Sub, kWasmF32, kWasmF32>,
+ &WasmGenerator::op<kExprF32Mul, kWasmF32, kWasmF32>,
- alternates[which % arraysize(alternates)](data);
- }
+ &WasmGenerator::block<kWasmF32>,
+ &WasmGenerator::loop<kWasmF32>,
+
+ &WasmGenerator::memop<kExprF32LoadMem>};
+
+ GenerateOneOf(alternates, data);
}
template <>
void WasmGenerator::Generate<kWasmF64>(DataRange data) {
- if (data.size() <= sizeof(double)) {
+ GeneratorRecursionScope rec_scope(this);
+ if (recursion_limit_reached() || data.size() <= sizeof(double)) {
builder_->EmitF64Const(data.get<double>());
- } else {
- const std::function<void(DataRange)> alternates[] = {
- op<kExprF64Add, kWasmF64, kWasmF64>(),
- op<kExprF64Sub, kWasmF64, kWasmF64>(),
- op<kExprF64Mul, kWasmF64, kWasmF64>(),
+ return;
+ }
- block<kWasmF64>(), block_br<kWasmF64>()};
+ constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmStmt, kWasmF64>,
- static_assert(arraysize(alternates) < std::numeric_limits<uint8_t>::max(),
- "Too many alternates. Replace with a bigger type if needed.");
- const auto which = data.get<uint8_t>();
+ &WasmGenerator::op<kExprF64Add, kWasmF64, kWasmF64>,
+ &WasmGenerator::op<kExprF64Sub, kWasmF64, kWasmF64>,
+ &WasmGenerator::op<kExprF64Mul, kWasmF64, kWasmF64>,
- alternates[which % arraysize(alternates)](data);
- }
+ &WasmGenerator::block<kWasmF64>,
+ &WasmGenerator::loop<kWasmF64>,
+
+ &WasmGenerator::memop<kExprF64LoadMem>};
+
+ GenerateOneOf(alternates, data);
+}
+
+void WasmGenerator::grow_memory(DataRange data) {
+ Generate<kWasmI32>(data);
+ builder_->EmitWithU8(kExprGrowMemory, 0);
}
void WasmGenerator::Generate(ValueType type, DataRange data) {
switch (type) {
+ case kWasmStmt:
+ return Generate<kWasmStmt>(data);
case kWasmI32:
return Generate<kWasmI32>(data);
case kWasmI64:
@@ -314,8 +474,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
WasmGenerator gen(f);
gen.Generate<kWasmI32>(DataRange(data, static_cast<uint32_t>(size)));
- uint8_t end_opcode = kExprEnd;
- f->EmitCode(&end_opcode, 1);
+ f->Emit(kExprEnd);
builder.AddExport(CStrVector("main"), f);
builder.SetMaxMemorySize(32);
@@ -333,7 +492,8 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return WasmCompileFuzzer().FuzzWasmModule(data, size);
+ constexpr bool require_valid = true;
+ return WasmCompileFuzzer().FuzzWasmModule(data, size, require_valid);
}
} // namespace fuzzer
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index f02d2b957e..4e6aed1a25 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -13,6 +13,7 @@
#include "src/wasm/wasm-module.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone.h"
+#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
@@ -21,8 +22,6 @@ namespace internal {
namespace wasm {
namespace fuzzer {
-static constexpr uint32_t kWasmCodeFuzzerHashSeed = 83;
-
static constexpr const char* kNameString = "name";
static constexpr size_t kNameStringLength = 4;
@@ -32,9 +31,7 @@ int FuzzWasmSection(SectionCode section, const uint8_t* data, size_t size) {
i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
- if (i_isolate->has_pending_exception()) {
- i_isolate->clear_pending_exception();
- }
+ i_isolate->clear_pending_exception();
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -92,8 +89,8 @@ void InterpretAndExecuteModule(i::Isolate* isolate,
testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
}
-int WasmExecutionFuzzer::FuzzWasmModule(
- const uint8_t* data, size_t size) {
+int WasmExecutionFuzzer::FuzzWasmModule(const uint8_t* data, size_t size,
+ bool require_valid) {
// Save the flag so that we can change it and restore it later.
bool generate_test = FLAG_wasm_code_fuzzer_gen_test;
if (generate_test) {
@@ -120,9 +117,7 @@ int WasmExecutionFuzzer::FuzzWasmModule(
i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
- if (i_isolate->has_pending_exception()) {
- i_isolate->clear_pending_exception();
- }
+ i_isolate->clear_pending_exception();
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -147,8 +142,12 @@ int WasmExecutionFuzzer::FuzzWasmModule(
ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
- MaybeHandle<WasmModuleObject> compiled_module =
- SyncCompile(i_isolate, &interpreter_thrower, wire_bytes);
+ // Compile with Turbofan here. Liftoff will be tested later.
+ MaybeHandle<WasmModuleObject> compiled_module;
+ {
+ FlagScope<bool> no_liftoff(&FLAG_liftoff, false);
+ compiled_module = SyncCompile(i_isolate, &interpreter_thrower, wire_bytes);
+ }
// Clear the flag so that the WebAssembly code is not printed twice.
FLAG_wasm_code_fuzzer_gen_test = false;
bool compiles = !compiled_module.is_null();
@@ -170,27 +169,24 @@ int WasmExecutionFuzzer::FuzzWasmModule(
bool validates = SyncValidate(i_isolate, wire_bytes);
- if (compiles != validates) {
- uint32_t hash = StringHasher::HashSequentialString(
- data, static_cast<int>(size), kWasmCodeFuzzerHashSeed);
- V8_Fatal(__FILE__, __LINE__,
- "compiles != validates (%d vs %d); WasmCodeFuzzerHash=%x",
- compiles, validates, hash);
- }
+ CHECK_EQ(compiles, validates);
+ CHECK_IMPLIES(require_valid, validates);
if (!compiles) return 0;
- int32_t result_interpreted;
+ int32_t result_interpreter;
bool possible_nondeterminism = false;
{
MaybeHandle<WasmInstanceObject> interpreter_instance = SyncInstantiate(
i_isolate, &interpreter_thrower, compiled_module.ToHandleChecked(),
MaybeHandle<JSReceiver>(), MaybeHandle<JSArrayBuffer>());
+ // Ignore instantiation failure.
if (interpreter_thrower.error()) {
return 0;
}
- result_interpreted = testing::InterpretWasmModule(
+
+ result_interpreter = testing::InterpretWasmModule(
i_isolate, interpreter_instance.ToHandleChecked(), &interpreter_thrower,
0, interpreter_args.get(), &possible_nondeterminism);
}
@@ -201,36 +197,57 @@ int WasmExecutionFuzzer::FuzzWasmModule(
return 0;
}
- int32_t result_compiled;
+ bool expect_exception =
+ result_interpreter == static_cast<int32_t>(0xdeadbeef);
+
+ int32_t result_turbofan;
{
- ErrorThrower compiler_thrower(i_isolate, "Compiler");
+ ErrorThrower compiler_thrower(i_isolate, "Turbofan");
MaybeHandle<WasmInstanceObject> compiled_instance = SyncInstantiate(
i_isolate, &compiler_thrower, compiled_module.ToHandleChecked(),
MaybeHandle<JSReceiver>(), MaybeHandle<JSArrayBuffer>());
DCHECK(!compiler_thrower.error());
- result_compiled = testing::CallWasmFunctionForTesting(
+ result_turbofan = testing::CallWasmFunctionForTesting(
i_isolate, compiled_instance.ToHandleChecked(), &compiler_thrower,
"main", num_args, compiler_args.get());
}
// The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
- // This sign bit may cause result_interpreted to be different than
- // result_compiled. Therefore we do not check the equality of the results
+ // This sign bit may cause result_interpreter to be different than
+ // result_turbofan. Therefore we do not check the equality of the results
// if the execution may have produced a NaN at some point.
- if (possible_nondeterminism) return 0;
+ if (!possible_nondeterminism) {
+ CHECK_EQ(expect_exception, i_isolate->has_pending_exception());
- if (result_interpreted == bit_cast<int32_t>(0xdeadbeef)) {
- CHECK(i_isolate->has_pending_exception());
- i_isolate->clear_pending_exception();
- } else {
- CHECK(!i_isolate->has_pending_exception());
- if (result_interpreted != result_compiled) {
- V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
- StringHasher::HashSequentialString(data, static_cast<int>(size),
- kWasmCodeFuzzerHashSeed));
- }
+ if (!expect_exception) CHECK_EQ(result_interpreter, result_turbofan);
}
+
+ // Clear any pending exceptions for the next run.
+ i_isolate->clear_pending_exception();
+
+ int32_t result_liftoff;
+ {
+ FlagScope<bool> liftoff(&FLAG_liftoff, true);
+ ErrorThrower compiler_thrower(i_isolate, "Liftoff");
+ // Re-compile with Liftoff.
+ MaybeHandle<WasmInstanceObject> compiled_instance =
+ SyncCompileAndInstantiate(i_isolate, &compiler_thrower, wire_bytes,
+ MaybeHandle<JSReceiver>(),
+ MaybeHandle<JSArrayBuffer>());
+ DCHECK(!compiler_thrower.error());
+ result_liftoff = testing::CallWasmFunctionForTesting(
+ i_isolate, compiled_instance.ToHandleChecked(), &compiler_thrower,
+ "main", num_args, compiler_args.get());
+ }
+ if (!possible_nondeterminism) {
+ CHECK_EQ(expect_exception, i_isolate->has_pending_exception());
+
+ if (!expect_exception) CHECK_EQ(result_interpreter, result_liftoff);
+ }
+
+ // Cleanup any pending exception.
+ i_isolate->clear_pending_exception();
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 8830d716b8..ac40a5eed5 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef WASM_SECTION_FUZZERS_H_
-#define WASM_SECTION_FUZZERS_H_
+#ifndef WASM_FUZZER_COMMON_H_
+#define WASM_FUZZER_COMMON_H_
#include <stddef.h>
#include <stdint.h>
@@ -29,7 +29,8 @@ void InterpretAndExecuteModule(Isolate* isolate,
class WasmExecutionFuzzer {
public:
virtual ~WasmExecutionFuzzer() {}
- int FuzzWasmModule(const uint8_t* data, size_t size);
+ int FuzzWasmModule(const uint8_t* data, size_t size,
+ bool require_valid = false);
protected:
virtual bool GenerateModule(
@@ -43,4 +44,4 @@ class WasmExecutionFuzzer {
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // WASM_SECTION_FUZZERS_H_
+#endif // WASM_FUZZER_COMMON_H_
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt b/deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt
index 32281e51a4..a631e72c69 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt
+++ b/deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt
@@ -289,7 +289,7 @@ Running test: testPreciseCountCoverageIncremental
[1] : {
count : 0
endOffset : 156
- startOffset : 145
+ startOffset : 143
}
[2] : {
count : 0
diff --git a/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt b/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
index 3b9f58aefc..001f393148 100644
--- a/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
@@ -8,12 +8,12 @@ test (test.js:21:2)
(anonymous) (expr1.js:0:0)
foo (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
test (test.js:19:14)
(anonymous) (expr1.js:0:0)
foo (test.js:12:2)
--- Promise.resolve --
+-- Promise.then --
test (test.js:19:14)
(anonymous) (expr1.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt b/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
index dfdf81fe8c..ab08d3d69b 100644
--- a/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
@@ -1,16 +1,16 @@
Checks async stack for late .then handlers with gc
foo1 (test.js:11:2)
--- Promise.resolve --
+-- Promise.then --
test (test.js:18:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:11:2)
--- Promise.resolve --
+-- Promise.then --
test (test.js:22:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:11:2)
--- Promise.resolve --
+-- Promise.then --
test (test.js:24:14)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-await-expected.txt b/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
index 4ebcefadad..64d5051298 100644
--- a/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
@@ -19,7 +19,7 @@ test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
foo2 (test.js:19:43)
-- async function --
foo2 (test.js:13:19)
diff --git a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
index f5197a1669..d555fb84de 100644
--- a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
@@ -2,78 +2,78 @@ Checks created frame for async call chain
Running test: testPromise
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
promise (test.js:20:14)
(anonymous) (expr.js:0:0)
Running test: testPromiseThen
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
promiseThen (test.js:28:14)
(anonymous) (expr.js:0:0)
foo2 (test.js:14:2)
--- Promise.resolve --
+-- Promise.then --
promiseThen (test.js:29:14)
(anonymous) (expr.js:0:0)
Running test: testPromiseThenThen
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
promiseThenThen (test.js:37:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
promiseThenThen (test.js:38:14)
(anonymous) (expr.js:0:0)
foo2 (test.js:14:2)
--- Promise.resolve --
+-- Promise.then --
promiseThenThen (test.js:37:25)
(anonymous) (expr.js:0:0)
Running test: testPromiseResolve
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
promiseResolve (test.js:44:27)
(anonymous) (expr.js:0:0)
Running test: testPromiseReject
foo1 (test.js:10:2)
--- Promise.reject --
+-- Promise.catch --
promiseReject (test.js:48:31)
(anonymous) (expr.js:0:0)
Running test: testPromiseAll
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
promiseAll (test.js:52:44)
(anonymous) (expr.js:0:0)
Running test: testPromiseRace
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
promiseRace (test.js:56:45)
(anonymous) (expr.js:0:0)
Running test: testThenableJob1
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
thenableJob1 (test.js:60:72)
(anonymous) (expr.js:0:0)
Running test: testThenableJob2
foo1 (test.js:10:2)
--- Promise.resolve --
+-- Promise.then --
thenableJob2 (test.js:64:57)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
index 81640849ec..cec05f239b 100644
--- a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
@@ -2,94 +2,94 @@ Checks that async chains for promises are correct.
Running test: testPromise
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
promise (test.js:19:14)
(anonymous) (testPromise.js:0:0)
Running test: testPromiseResolvedBySetTimeout
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
promiseResolvedBySetTimeout (test.js:27:14)
(anonymous) (testPromiseResolvedBySetTimeout.js:0:0)
Running test: testPromiseAll
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
promiseAll (test.js:37:35)
(anonymous) (testPromiseAll.js:0:0)
Running test: testPromiseAllReverseOrder
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
promiseAllReverseOrder (test.js:48:35)
(anonymous) (testPromiseAllReverseOrder.js:0:0)
Running test: testPromiseRace
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
promiseRace (test.js:59:36)
(anonymous) (testPromiseRace.js:0:0)
Running test: testTwoChainedCallbacks
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
twoChainedCallbacks (test.js:68:14)
(anonymous) (testTwoChainedCallbacks.js:0:0)
foo2 (test.js:13:2)
--- Promise.resolve --
+-- Promise.then --
twoChainedCallbacks (test.js:68:25)
(anonymous) (testTwoChainedCallbacks.js:0:0)
Running test: testPromiseResolve
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
promiseResolve (test.js:74:27)
(anonymous) (testPromiseResolve.js:0:0)
foo2 (test.js:13:2)
--- Promise.resolve --
+-- Promise.then --
promiseResolve (test.js:74:38)
(anonymous) (testPromiseResolve.js:0:0)
Running test: testThenableJobResolvedInSetTimeout
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
thenableJobResolvedInSetTimeout (test.js:86:40)
(anonymous) (testThenableJobResolvedInSetTimeout.js:0:0)
Running test: testThenableJobResolvedInSetTimeoutWithStack
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
thenableJobResolvedInSetTimeoutWithStack (test.js:104:40)
(anonymous) (testThenableJobResolvedInSetTimeoutWithStack.js:0:0)
Running test: testThenableJobResolvedByPromise
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
thenableJobResolvedByPromise (test.js:118:40)
(anonymous) (testThenableJobResolvedByPromise.js:0:0)
Running test: testThenableJobResolvedByPromiseWithStack
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
thenableJobResolvedByPromiseWithStack (test.js:136:40)
(anonymous) (testThenableJobResolvedByPromiseWithStack.js:0:0)
Running test: testLateThenCallback
foo1 (test.js:9:2)
--- Promise.resolve --
+-- Promise.then --
lateThenCallback (test.js:145:12)
(anonymous) (testLateThenCallback.js:0:0)
@@ -97,14 +97,14 @@ lateThenCallback (test.js:145:12)
Running test: testComplex
inner1 (test.js:154:6)
foo1 (test.js:156:4)
--- Promise.resolve --
+-- Promise.then --
complex (test.js:202:5)
(anonymous) (testComplex.js:0:0)
p.then (test.js:207:8)
--- Promise.resolve --
+-- Promise.then --
p.then (test.js:206:8)
--- Promise.resolve --
+-- Promise.then --
setTimeout (test.js:205:6)
-- setTimeout --
complex (test.js:204:2)
@@ -113,7 +113,20 @@ complex (test.js:204:2)
Running test: testReject
foo1 (test.js:9:2)
--- Promise.reject --
+-- Promise.catch --
reject (test.js:217:31)
(anonymous) (testReject.js:0:0)
+
+Running test: testFinally1
+foo1 (test.js:9:2)
+-- Promise.finally --
+finally1 (test.js:221:33)
+(anonymous) (testFinally1.js:0:0)
+
+
+Running test: testFinally2
+foo1 (test.js:9:2)
+-- Promise.finally --
+finally2 (test.js:225:34)
+(anonymous) (testFinally2.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-for-promise.js b/deps/v8/test/inspector/debugger/async-stack-for-promise.js
index 198c971e12..79c3261263 100644
--- a/deps/v8/test/inspector/debugger/async-stack-for-promise.js
+++ b/deps/v8/test/inspector/debugger/async-stack-for-promise.js
@@ -217,6 +217,13 @@ function reject() {
return Promise.reject().catch(foo1);
}
+function finally1() {
+ return Promise.reject().finally(foo1);
+}
+
+function finally2() {
+ return Promise.resolve().finally(foo1);
+}
//# sourceURL=test.js`, 7, 26);
session.setupScriptMap();
@@ -230,20 +237,12 @@ Protocol.Debugger.onPaused(message => {
Protocol.Debugger.enable();
Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
var testList = [
- 'promise',
- 'promiseResolvedBySetTimeout',
- 'promiseAll',
- 'promiseAllReverseOrder',
- 'promiseRace',
- 'twoChainedCallbacks',
- 'promiseResolve',
- 'thenableJobResolvedInSetTimeout',
- 'thenableJobResolvedInSetTimeoutWithStack',
- 'thenableJobResolvedByPromise',
- 'thenableJobResolvedByPromiseWithStack',
- 'lateThenCallback',
- 'complex',
- 'reject',
+ 'promise', 'promiseResolvedBySetTimeout', 'promiseAll',
+ 'promiseAllReverseOrder', 'promiseRace', 'twoChainedCallbacks',
+ 'promiseResolve', 'thenableJobResolvedInSetTimeout',
+ 'thenableJobResolvedInSetTimeoutWithStack', 'thenableJobResolvedByPromise',
+ 'thenableJobResolvedByPromiseWithStack', 'lateThenCallback', 'complex',
+ 'reject', 'finally1', 'finally2'
]
InspectorTest.runTestSuite(testList.map(name => {
return eval(`
diff --git a/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt b/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt
new file mode 100644
index 0000000000..3213c6973c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt
@@ -0,0 +1,42 @@
+Tests super long async stacks.
+callWithAsyncStack (expr.js:0:26)
+callWithAsyncStack (utils.js:3:4)
+call1 (wrapper.js:0:20)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call2 (wrapper.js:0:20)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call3 (wrapper.js:0:20)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call4 (wrapper.js:0:20)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call5 (wrapper.js:0:20)
+(fetch parent..)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call6 (wrapper.js:0:20)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call7 (wrapper.js:0:20)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call8 (wrapper.js:0:20)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call9 (wrapper.js:0:20)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call10 (wrapper.js:0:21)
+(fetch parent..)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call11 (wrapper.js:0:21)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+call12 (wrapper.js:0:21)
+--Promise.then--
+callWithAsyncStack (utils.js:7:20)
+(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-load-more.js b/deps/v8/test/inspector/debugger/async-stack-load-more.js
new file mode 100644
index 0000000000..3aaaa13076
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/async-stack-load-more.js
@@ -0,0 +1,44 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests super long async stacks.');
+
+contextGroup.addScript(`
+function callWithAsyncStack(f, depth) {
+ if (depth === 0) {
+ f();
+ return;
+ }
+ wrapper = eval('(function call' + depth + '() { callWithAsyncStack(f, depth - 1) }) //# sourceURL=wrapper.js');
+ Promise.resolve().then(wrapper);
+}
+//# sourceURL=utils.js`);
+
+(async function test() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 4});
+ Protocol.Runtime.evaluate({
+ expression: 'callWithAsyncStack(() => {debugger}, 12)//# sourceURL=expr.js'
+ });
+ let {params} = await Protocol.Debugger.oncePaused();
+ let {callFrames, asyncStackTrace, externalAsyncStackTrace} = params;
+ while (true) {
+ session.logCallFrames(callFrames);
+ if (externalAsyncStackTrace) {
+ InspectorTest.log('(fetch parent..)');
+ asyncStackTrace = (await Protocol.Debugger.getStackTrace({
+ stackTraceId: externalAsyncStackTrace
+ })).result.stackTrace;
+ }
+ if (asyncStackTrace) {
+ InspectorTest.log('--' + asyncStackTrace.description + '--');
+ callFrames = asyncStackTrace.callFrames;
+ externalAsyncStackTrace = asyncStackTrace.parentId;
+ asyncStackTrace = asyncStackTrace.parent;
+ } else {
+ break;
+ }
+ }
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/debugger/change-return-value-expected.txt b/deps/v8/test/inspector/debugger/change-return-value-expected.txt
new file mode 100644
index 0000000000..6e91e6ee62
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/change-return-value-expected.txt
@@ -0,0 +1,35 @@
+Checks that we can update return value on pause
+
+Running test: testError
+Set return value not at return position
+{
+ error : {
+ code : -32000
+ message : Could not update return value at non-return position
+ }
+ id : <messageId>
+}
+
+Running test: testUndefined
+Break at return position..
+Update return value to 42..
+Dump actual return value
+{
+ result : {
+ description : 42
+ type : number
+ value : 42
+ }
+}
+
+Running test: testArrow
+Break at return position..
+Update return value to 239..
+Dump actual return value
+{
+ result : {
+ description : 239
+ type : number
+ value : 239
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/change-return-value.js b/deps/v8/test/inspector/debugger/change-return-value.js
new file mode 100644
index 0000000000..6f5f43fdf2
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/change-return-value.js
@@ -0,0 +1,68 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Checks that we can update return value on pause');
+
+InspectorTest.runAsyncTestSuite([
+ async function testError() {
+ Protocol.Debugger.enable();
+ let evaluation = Protocol.Runtime.evaluate({
+ expression: 'function foo() { debugger; } foo()',
+ returnByValue: true
+ });
+ let {params:{callFrames}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Set return value not at return position');
+ let result = await Protocol.Debugger.setReturnValue({
+ newValue: { value: 42 },
+ });
+ InspectorTest.logMessage(result);
+ await Protocol.Debugger.disable();
+ },
+
+ async function testUndefined() {
+ Protocol.Debugger.enable();
+ let evaluation = Protocol.Runtime.evaluate({
+ expression: 'function foo() { debugger; } foo()',
+ returnByValue: true
+ });
+ InspectorTest.log('Break at return position..');
+ await Protocol.Debugger.oncePaused();
+ Protocol.Debugger.stepInto();
+ let {params:{callFrames}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Update return value to 42..');
+ Protocol.Debugger.setReturnValue({
+ newValue: { value: 42 },
+ });
+ Protocol.Debugger.resume();
+ let {result} = await evaluation;
+ InspectorTest.log('Dump actual return value');
+ InspectorTest.logMessage(result);
+ await Protocol.Debugger.disable();
+ },
+
+ async function testArrow() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.pause();
+ let evaluation = Protocol.Runtime.evaluate({
+ expression: '(() => 42)()',
+ returnByValue: true
+ });
+ InspectorTest.log('Break at return position..');
+ await Protocol.Debugger.oncePaused();
+ Protocol.Debugger.stepInto();
+ await Protocol.Debugger.oncePaused();
+ Protocol.Debugger.stepInto();
+ let {params:{callFrames}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Update return value to 239..');
+ Protocol.Debugger.setReturnValue({
+ newValue: { value: 239 },
+ });
+ Protocol.Debugger.resume();
+ let {result} = await evaluation;
+ InspectorTest.log('Dump actual return value');
+ InspectorTest.logMessage(result);
+ await Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt b/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt
index c114e34012..82599eb7e1 100644
--- a/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt
+++ b/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt
@@ -1,37 +1,25 @@
Checks that we collect obsolete async tasks with async stacks.
-Async stacks count: 2
+Async stacks count: 1
Scheduled async tasks: 1
-Created async tasks: 1
-Async tasks with parent: 0
-Recurring async tasks: 1
+Recurring async tasks: 0
Async stacks count: 0
Scheduled async tasks: 0
-Created async tasks: 0
-Async tasks with parent: 0
Recurring async tasks: 0
Async stacks count: 2
-Scheduled async tasks: 0
-Created async tasks: 2
-Async tasks with parent: 2
+Scheduled async tasks: 2
Recurring async tasks: 0
Async stacks count: 0
Scheduled async tasks: 0
-Created async tasks: 0
-Async tasks with parent: 0
Recurring async tasks: 0
Async stacks count: 1
Scheduled async tasks: 1
-Created async tasks: 0
-Async tasks with parent: 0
Recurring async tasks: 0
Async stacks count: 0
Scheduled async tasks: 0
-Created async tasks: 0
-Async tasks with parent: 0
Recurring async tasks: 0
diff --git a/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js b/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js
index cfef345d19..0d13c2787c 100644
--- a/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js
+++ b/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js
@@ -7,7 +7,7 @@ let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we coll
contextGroup.addScript(`
function test() {
inspector.setMaxAsyncTaskStacks(128);
- var p = Promise.resolve();
+ var p = Promise.resolve().then(() => 42);
inspector.dumpAsyncTaskStacksStateForTest();
inspector.setMaxAsyncTaskStacks(128);
diff --git a/deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt b/deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt
index 64fef4af77..1f66c2f092 100644
--- a/deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt
+++ b/deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt
@@ -7,11 +7,11 @@ actual async chain len: 1
inspector.setMaxAsyncTaskStacks(1024)
Run expression 'console.trace(42)' with async chain len: 2
-actual async chain len: 1
+actual async chain len: 2
inspector.setMaxAsyncTaskStacks(1024)
Run expression 'console.trace(42)' with async chain len: 5
-actual async chain len: 1
+actual async chain len: 5
inspector.setMaxAsyncTaskStacks(1024)
Run expression 'console.trace(42)' with async chain len: 1
@@ -69,7 +69,7 @@ actual async chain len: 1
inspector.setMaxAsyncTaskStacks(2)
Run expression 'console.trace(42)' with async chain len: 2
-actual async chain len: 0
+actual async chain len: 2
inspector.setMaxAsyncTaskStacks(2)
Run expression 'console.trace(42)' with async chain len: 3
@@ -95,11 +95,11 @@ actual async chain len: 1
inspector.setMaxAsyncTaskStacks(3)
Run expression 'console.trace(42)' with async chain len: 2
-actual async chain len: 1
+actual async chain len: 2
inspector.setMaxAsyncTaskStacks(3)
Run expression 'console.trace(42)' with async chain len: 3
-actual async chain len: 1
+actual async chain len: 3
inspector.setMaxAsyncTaskStacks(3)
Run expression 'console.trace(42)' with async chain len: 1
@@ -119,11 +119,11 @@ actual async chain len: 1
inspector.setMaxAsyncTaskStacks(4)
Run expression 'console.trace(42)' with async chain len: 2
-actual async chain len: 1
+actual async chain len: 2
inspector.setMaxAsyncTaskStacks(4)
Run expression 'console.trace(42)' with async chain len: 3
-actual async chain len: 1
+actual async chain len: 3
inspector.setMaxAsyncTaskStacks(4)
Run expression 'console.trace(42)' with async chain len: 1
@@ -143,11 +143,11 @@ actual async chain len: 1
inspector.setMaxAsyncTaskStacks(5)
Run expression 'console.trace(42)' with async chain len: 2
-actual async chain len: 1
+actual async chain len: 2
inspector.setMaxAsyncTaskStacks(5)
Run expression 'console.trace(42)' with async chain len: 3
-actual async chain len: 1
+actual async chain len: 3
inspector.setMaxAsyncTaskStacks(5)
Run expression 'console.trace(42)' with async chain len: 1
@@ -167,11 +167,11 @@ actual async chain len: 1
inspector.setMaxAsyncTaskStacks(6)
Run expression 'console.trace(42)' with async chain len: 2
-actual async chain len: 1
+actual async chain len: 2
inspector.setMaxAsyncTaskStacks(6)
Run expression 'console.trace(42)' with async chain len: 3
-actual async chain len: 1
+actual async chain len: 3
inspector.setMaxAsyncTaskStacks(6)
Run expression 'console.trace(42)' with async chain len: 1
@@ -191,11 +191,11 @@ actual async chain len: 1
inspector.setMaxAsyncTaskStacks(7)
Run expression 'console.trace(42)' with async chain len: 2
-actual async chain len: 1
+actual async chain len: 2
inspector.setMaxAsyncTaskStacks(7)
Run expression 'console.trace(42)' with async chain len: 3
-actual async chain len: 1
+actual async chain len: 3
inspector.setMaxAsyncTaskStacks(7)
Run expression 'console.trace(42)' with async chain len: 1
diff --git a/deps/v8/test/inspector/debugger/collect-old-async-call-chains.js b/deps/v8/test/inspector/debugger/collect-old-async-call-chains.js
index 7ac822534b..072cfd8269 100644
--- a/deps/v8/test/inspector/debugger/collect-old-async-call-chains.js
+++ b/deps/v8/test/inspector/debugger/collect-old-async-call-chains.js
@@ -138,11 +138,14 @@ InspectorTest.runAsyncTestSuite([
function runWithAsyncChainPromise(len, source) {
InspectorTest.log(`Run expression '${source}' with async chain len: ${len}`);
- let then = '.then(() => 1)';
- let pause = `.then(() => { ${source} })`;
- Protocol.Runtime.evaluate({
- expression: `Promise.resolve()${then.repeat(len - 1)}${pause}`
- });
+ let asyncCall = `(function asyncCall(num) {
+ if (num === 0) {
+ ${source};
+ return;
+ }
+ Promise.resolve().then(() => asyncCall(num - 1));
+ })(${len})`;
+ Protocol.Runtime.evaluate({expression: asyncCall});
}
function runWithAsyncChainSetTimeout(len, source) {
diff --git a/deps/v8/test/inspector/debugger/external-stack-trace-expected.txt b/deps/v8/test/inspector/debugger/external-stack-trace-expected.txt
new file mode 100644
index 0000000000..3d5ecca112
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/external-stack-trace-expected.txt
@@ -0,0 +1,42 @@
+Tests external stack traces
+
+Running test: testDebuggerId
+Enabling debugger first time..
+Enabling debugger again..
+> second Debugger.enable returns the same debugger id
+Enabling debugger in another context group..
+> Debugger.enable in another context group returns own debugger id
+
+Running test: testInstrumentation
+{
+ id : <messageId>
+ result : {
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 15
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : stack
+ }
+ }
+}
+
+Running test: testDisableStacksAfterStored
+> external async stack trace is empty
+
+Running test: testDisableStacksAfterStarted
+> external async stack trace is empty
+
+Running test: testExternalStacks
+(anonymous) (expr1-2.js:1:6)
+-- stack2 --
+store (utils.js:2:25)
+(anonymous) (expr2.js:1:11)
+-- stack --
+store (utils.js:2:25)
+(anonymous) (expr1-1.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/external-stack-trace.js b/deps/v8/test/inspector/debugger/external-stack-trace.js
new file mode 100644
index 0000000000..c8392e28c7
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/external-stack-trace.js
@@ -0,0 +1,170 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.log('Tests external stack traces');
+
+let contextGroup1 = new InspectorTest.ContextGroup();
+let session1 = contextGroup1.connect();
+let Protocol1 = session1.Protocol;
+let contextGroup2 = new InspectorTest.ContextGroup();
+let session2 = contextGroup2.connect();
+let Protocol2 = session2.Protocol;
+
+let utilsScript = `
+function store(description) {
+ let buffer = inspector.storeCurrentStackTrace(description);
+ return '[' + new Int32Array(buffer).join(',') + ']';
+}
+
+function started(id) {
+ inspector.externalAsyncTaskStarted(Int32Array.from(JSON.parse(id)).buffer);
+}
+
+function finished(id) {
+ inspector.externalAsyncTaskFinished(Int32Array.from(JSON.parse(id)).buffer);
+}
+//# sourceURL=utils.js`;
+
+contextGroup1.addScript(utilsScript);
+contextGroup2.addScript(utilsScript);
+
+InspectorTest.runAsyncTestSuite([
+ async function testDebuggerId() {
+ InspectorTest.log('Enabling debugger first time..');
+ let {result: {debuggerId}} = await Protocol1.Debugger.enable();
+ let firstDebuggerId = debuggerId;
+ InspectorTest.log('Enabling debugger again..');
+ ({result: {debuggerId}} = await Protocol1.Debugger.enable());
+ if (firstDebuggerId !== debuggerId) {
+ InspectorTest.log(
+ 'FAIL: second Debugger.enable returns different debugger id');
+ } else {
+ InspectorTest.log(
+ '> second Debugger.enable returns the same debugger id');
+ }
+ InspectorTest.log('Enabling debugger in another context group..');
+ ({result: {debuggerId}} = await Protocol2.Debugger.enable());
+ if (firstDebuggerId === debuggerId) {
+ InspectorTest.log(
+ 'FAIL: Debugger.enable in another context group returns the same debugger id');
+ } else {
+ InspectorTest.log(
+ '> Debugger.enable in another context group returns own debugger id');
+ }
+ },
+
+ async function testInstrumentation() {
+ Protocol1.Debugger.enable();
+ Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 32});
+ let result = await Protocol1.Runtime.evaluate(
+ {expression: 'id = inspector.storeCurrentStackTrace(\'stack\')'});
+ let stackTraceId = result.result.result.objectId;
+ Protocol1.Runtime.evaluate({
+ expression: `inspector.externalAsyncTaskStarted(id);
+ debugger;
+ inspector.externalAsyncTaskFinished(id);`
+ });
+ let {params: {callFrames, asyncStackTraceId}} =
+ await Protocol1.Debugger.oncePaused();
+ result = await Protocol1.Debugger.getStackTrace(
+ {stackTraceId: asyncStackTraceId});
+ InspectorTest.logMessage(result);
+ await Protocol1.Debugger.disable();
+ },
+
+ async function testDisableStacksAfterStored() {
+ Protocol1.Debugger.enable();
+ Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 32});
+ let result = await Protocol1.Runtime.evaluate(
+ {expression: 'id = inspector.storeCurrentStackTrace(\'stack\')'});
+ let stackTraceId = result.result.result.objectId;
+ Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 0});
+ Protocol1.Runtime.evaluate({
+ expression: `inspector.externalAsyncTaskStarted(id);
+ debugger;
+ inspector.externalAsyncTaskFinished(id);`
+ });
+ let {params: {callFrames, asyncStackTraceId}} =
+ await Protocol1.Debugger.oncePaused();
+ if (!asyncStackTraceId) {
+ InspectorTest.log('> external async stack trace is empty');
+ } else {
+ InspectorTest.log('FAIL: external async stack trace is reported');
+ }
+ await Protocol1.Debugger.disable();
+ },
+
+ async function testDisableStacksAfterStarted() {
+ Protocol1.Debugger.enable();
+ Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 32});
+ let result = await Protocol1.Runtime.evaluate(
+ {expression: 'id = inspector.storeCurrentStackTrace(\'stack\')'});
+ let stackTraceId = result.result.result.objectId;
+ Protocol1.Runtime.evaluate(
+ {expression: 'inspector.externalAsyncTaskStarted(id);'});
+ Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 0});
+ Protocol1.Runtime.evaluate({
+ expression: `debugger;
+ inspector.externalAsyncTaskFinished(id);`
+ });
+ let {params: {callFrames, asyncStackTraceId}} =
+ await Protocol1.Debugger.oncePaused();
+ if (!asyncStackTraceId) {
+ InspectorTest.log('> external async stack trace is empty');
+ } else {
+ InspectorTest.log('FAIL: external async stack trace is reported');
+ }
+ await Protocol1.Debugger.disable();
+ },
+
+ async function testExternalStacks() {
+
+ let debuggerId1 = (await Protocol1.Debugger.enable()).result.debuggerId;
+ let debuggerId2 = (await Protocol2.Debugger.enable()).result.debuggerId;
+ Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 32});
+ Protocol2.Debugger.setAsyncCallStackDepth({maxDepth: 32});
+ let stackTraceId1 = (await Protocol1.Runtime.evaluate({
+ expression: 'store(\'stack\')//# sourceURL=expr1-1.js'
+ })).result.result.value;
+ let stackTraceId2 = (await Protocol2.Runtime.evaluate({
+ expression: `started('${stackTraceId1}');
+ id = store('stack2');
+ finished('${stackTraceId1}');
+ id
+ //# sourceURL=expr2.js`
+ })).result.result.value;
+ Protocol1.Runtime.evaluate({
+ expression: `started('${stackTraceId2}');
+ debugger;
+ finished('${stackTraceId2}');
+ id
+ //# sourceURL=expr1-2.js`
+ });
+
+ let {params: {callFrames, asyncStackTraceId}} =
+ await Protocol1.Debugger.oncePaused();
+ let debuggers = new Map(
+ [[debuggerId1, Protocol1.Debugger], [debuggerId2, Protocol2.Debugger]]);
+ let sessions = new Map([[debuggerId1, session1], [debuggerId2, session2]]);
+ let currentDebuggerId = debuggerId1;
+ while (true) {
+ sessions.get(currentDebuggerId).logCallFrames(callFrames);
+ if (asyncStackTraceId) {
+ currentDebuggerId = asyncStackTraceId.debuggerId;
+ let {result: {stackTrace}} =
+ await debuggers.get(currentDebuggerId).getStackTrace({
+ stackTraceId: asyncStackTraceId
+ });
+ InspectorTest.log(`-- ${stackTrace.description} --`);
+ callFrames = stackTrace.callFrames;
+ asyncStackTraceId = stackTrace.parentId;
+ } else {
+ break;
+ }
+ }
+
+ Protocol1.Debugger.disable();
+ await Protocol2.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt b/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt
index 0c421da607..0d60dbb6a1 100644
--- a/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt
+++ b/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt
@@ -62,7 +62,7 @@ Running test: testConsoleTraceWithEmptySync
url :
}
]
- description : Promise.resolve
+ description : Promise.then
}
}
diff --git a/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt b/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt
new file mode 100644
index 0000000000..53fde7bcd2
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt
@@ -0,0 +1,207 @@
+Checks Debugger.scheduleStepIntoAsync.
+
+Running test: testScheduleErrors
+paused at:
+function testNoScheduledTask() {
+ #debugger;
+ return 42;
+
+paused at:
+ debugger;
+ #return 42;
+}
+
+paused at:
+ debugger;
+ return 42;#
+}
+
+
+Running test: testSimple
+paused at:
+function testSimple() {
+ #debugger;
+ Promise.resolve().then(v => v * 2);
+
+paused at:
+ debugger;
+ #Promise.resolve().then(v => v * 2);
+}
+
+paused at:
+ debugger;
+ Promise.resolve().#then(v => v * 2);
+}
+
+asyncCallStackTraceId is set
+
+paused at:
+ debugger;
+ Promise.resolve().then(#v => v * 2);
+}
+
+
+Running test: testNotResolvedPromise
+paused at:
+ var p = new Promise(resolve => resolveCallback = resolve);
+ #debugger;
+ p.then(v => v * 2);
+
+paused at:
+ debugger;
+ p.#then(v => v * 2);
+ resolveCallback();
+
+paused at:
+ debugger;
+ p.#then(v => v * 2);
+ resolveCallback();
+
+asyncCallStackTraceId is set
+
+paused at:
+ debugger;
+ p.then(#v => v * 2);
+ resolveCallback();
+
+
+Running test: testTwoAsyncTasks
+paused at:
+function testTwoAsyncTasks() {
+ #debugger;
+ Promise.resolve().then(v => v * 2);
+
+paused at:
+ debugger;
+ #Promise.resolve().then(v => v * 2);
+ Promise.resolve().then(v => v * 4);
+
+paused at:
+ debugger;
+ Promise.resolve().#then(v => v * 2);
+ Promise.resolve().then(v => v * 4);
+
+asyncCallStackTraceId is set
+
+paused at:
+ debugger;
+ Promise.resolve().then(#v => v * 2);
+ Promise.resolve().then(v => v * 4);
+
+
+Running test: testTwoTasksAndGoToSecond
+paused at:
+function testTwoAsyncTasks() {
+ #debugger;
+ Promise.resolve().then(v => v * 2);
+
+paused at:
+ debugger;
+ #Promise.resolve().then(v => v * 2);
+ Promise.resolve().then(v => v * 4);
+
+paused at:
+ Promise.resolve().then(v => v * 2);
+ #Promise.resolve().then(v => v * 4);
+}
+
+paused at:
+ Promise.resolve().then(v => v * 2);
+ Promise.resolve().#then(v => v * 4);
+}
+
+asyncCallStackTraceId is set
+
+paused at:
+ Promise.resolve().then(v => v * 2);
+ Promise.resolve().then(#v => v * 4);
+}
+
+
+Running test: testTwoAsyncTasksWithBreak
+paused at:
+function testTwoAsyncTasksWithBreak() {
+ #debugger;
+ Promise.resolve().then(v => v * 2);
+
+paused at:
+ debugger;
+ #Promise.resolve().then(v => v * 2);
+ debugger;
+
+paused at:
+ debugger;
+ Promise.resolve().#then(v => v * 2);
+ debugger;
+
+asyncCallStackTraceId is set
+
+paused at:
+ Promise.resolve().then(v => v * 2);
+ #debugger;
+ Promise.resolve().then(v => v * 4);
+
+paused at:
+ debugger;
+ #Promise.resolve().then(v => v * 4);
+}
+
+paused at:
+ debugger;
+ Promise.resolve().#then(v => v * 4);
+}
+
+asyncCallStackTraceId is set
+
+paused at:
+ debugger;
+ Promise.resolve().then(#v => v * 4);
+}
+
+
+Running test: testPromiseAll
+paused at:
+function testPromiseAll() {
+ #debugger;
+ Promise.all([ Promise.resolve(), Promise.resolve() ]).then(v => v * 2);
+
+paused at:
+ debugger;
+ #Promise.all([ Promise.resolve(), Promise.resolve() ]).then(v => v * 2);
+}
+
+paused at:
+ debugger;
+ Promise.all([ Promise.resolve(), Promise.resolve() ]).#then(v => v * 2);
+}
+
+asyncCallStackTraceId is set
+
+paused at:
+ debugger;
+ Promise.all([ Promise.resolve(), Promise.resolve() ]).then(#v => v * 2);
+}
+
+
+Running test: testWithBlackboxedCode
+paused at:
+function testBlackboxedCreatePromise() {
+ #debugger;
+ createPromise().then(v => v * 2);
+
+paused at:
+ debugger;
+ #createPromise().then(v => v * 2);
+}
+
+paused at:
+ debugger;
+ createPromise().#then(v => v * 2);
+}
+
+asyncCallStackTraceId is set
+
+paused at:
+ debugger;
+ createPromise().then(#v => v * 2);
+}
diff --git a/deps/v8/test/inspector/debugger/pause-on-async-call.js b/deps/v8/test/inspector/debugger/pause-on-async-call.js
new file mode 100644
index 0000000000..ef29905849
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call.js
@@ -0,0 +1,183 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks Debugger.scheduleStepIntoAsync.');
+
+contextGroup.addScript(`
+function testNoScheduledTask() {
+ debugger;
+ return 42;
+}
+
+function testSimple() {
+ debugger;
+ Promise.resolve().then(v => v * 2);
+}
+
+function testNotResolvedPromise() {
+ var resolveCallback;
+ var p = new Promise(resolve => resolveCallback = resolve);
+ debugger;
+ p.then(v => v * 2);
+ resolveCallback();
+}
+
+function testTwoAsyncTasks() {
+ debugger;
+ Promise.resolve().then(v => v * 2);
+ Promise.resolve().then(v => v * 4);
+}
+
+function testTwoAsyncTasksWithBreak() {
+ debugger;
+ Promise.resolve().then(v => v * 2);
+ debugger;
+ Promise.resolve().then(v => v * 4);
+}
+
+function testPromiseAll() {
+ debugger;
+ Promise.all([ Promise.resolve(), Promise.resolve() ]).then(v => v * 2);
+}
+
+function testBlackboxedCreatePromise() {
+ debugger;
+ createPromise().then(v => v * 2);
+}
+//# sourceURL=test.js`);
+
+contextGroup.addScript(`
+
+function createPromise() {
+ return Promise.resolve().then(v => v * 3).then(v => v * 4);
+}
+
+//# sourceURL=framework.js`)
+
+session.setupScriptMap();
+
+Protocol.Debugger.enable();
+InspectorTest.runAsyncTestSuite([
+ async function testScheduleErrors() {
+ Protocol.Runtime.evaluate({ expression: 'testNoScheduledTask()' });
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testSimple() {
+ Protocol.Runtime.evaluate({ expression: 'testSimple()' });
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testNotResolvedPromise() {
+ Protocol.Runtime.evaluate({ expression: 'testNotResolvedPromise()' });
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testTwoAsyncTasks() {
+ Protocol.Runtime.evaluate({ expression: 'testTwoAsyncTasks()' });
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testTwoTasksAndGoToSecond() {
+ Protocol.Runtime.evaluate({ expression: 'testTwoAsyncTasks()' });
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testTwoAsyncTasksWithBreak() {
+ Protocol.Runtime.evaluate({ expression: 'testTwoAsyncTasksWithBreak()' });
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testPromiseAll() {
+ Protocol.Runtime.evaluate({ expression: 'testPromiseAll()' });
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testWithBlackboxedCode() {
+ Protocol.Runtime.evaluate({ expression: 'testBlackboxedCreatePromise()' });
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js'] });
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ }
+]);
+
+async function waitPauseAndDumpLocation() {
+ var {params: {callFrames, asyncCallStackTraceId}} =
+ await Protocol.Debugger.oncePaused();
+ InspectorTest.log('paused at:');
+ await session.logSourceLocation(callFrames[0].location);
+ if (asyncCallStackTraceId) {
+ InspectorTest.log('asyncCallStackTraceId is set\n');
+ }
+ return asyncCallStackTraceId;
+}
diff --git a/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt
index 57357ab15a..936ed3f86e 100644
--- a/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt
+++ b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt
@@ -15,9 +15,16 @@ Run expression 'console.trace()' with async chain len: 3
stackTrace : {
callFrames : [
[0] : {
- columnNumber : 67
- functionName : Promise.resolve.then.then.then
- lineNumber : 0
+ columnNumber : 14
+ functionName : asyncCall
+ lineNumber : 2
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
@@ -25,13 +32,59 @@ Run expression 'console.trace()' with async chain len: 3
parent : {
callFrames : [
[0] : {
- columnNumber : 46
- functionName :
- lineNumber : 0
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
]
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 4
+ functionName :
+ lineNumber : 6
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ }
+ }
}
}
timestamp : <timestamp>
@@ -53,9 +106,16 @@ Run expression 'console.trace()' with async chain len: 3
stackTrace : {
callFrames : [
[0] : {
- columnNumber : 67
- functionName : Promise.resolve.then.then.then
- lineNumber : 0
+ columnNumber : 14
+ functionName : asyncCall
+ lineNumber : 2
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
@@ -63,14 +123,59 @@ Run expression 'console.trace()' with async chain len: 3
parent : {
callFrames : [
[0] : {
- columnNumber : 46
- functionName :
- lineNumber : 0
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
]
- description : Promise.resolve
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 4
+ functionName :
+ lineNumber : 6
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ }
+ }
}
}
timestamp : <timestamp>
@@ -92,9 +197,16 @@ Run expression 'console.trace()' with async chain len: 3
stackTrace : {
callFrames : [
[0] : {
- columnNumber : 67
- functionName : Promise.resolve.then.then.then
- lineNumber : 0
+ columnNumber : 14
+ functionName : asyncCall
+ lineNumber : 2
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
@@ -102,14 +214,59 @@ Run expression 'console.trace()' with async chain len: 3
parent : {
callFrames : [
[0] : {
- columnNumber : 46
- functionName :
- lineNumber : 0
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
]
- description : Promise.resolve
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 4
+ functionName :
+ lineNumber : 6
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ }
+ }
}
}
timestamp : <timestamp>
@@ -131,9 +288,16 @@ Run expression 'console.trace()' with async chain len: 3
stackTrace : {
callFrames : [
[0] : {
- columnNumber : 67
- functionName : Promise.resolve.then.then.then
- lineNumber : 0
+ columnNumber : 14
+ functionName : asyncCall
+ lineNumber : 2
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
@@ -141,14 +305,59 @@ Run expression 'console.trace()' with async chain len: 3
parent : {
callFrames : [
[0] : {
- columnNumber : 46
- functionName :
- lineNumber : 0
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
]
- description : Promise.resolve
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 4
+ functionName :
+ lineNumber : 6
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ }
+ }
}
}
timestamp : <timestamp>
@@ -170,9 +379,16 @@ Run expression 'console.trace()' with async chain len: 3
stackTrace : {
callFrames : [
[0] : {
- columnNumber : 67
- functionName : Promise.resolve.then.then.then
- lineNumber : 0
+ columnNumber : 14
+ functionName : asyncCall
+ lineNumber : 2
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
@@ -180,14 +396,59 @@ Run expression 'console.trace()' with async chain len: 3
parent : {
callFrames : [
[0] : {
- columnNumber : 46
- functionName :
- lineNumber : 0
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
]
- description : Promise.resolve
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 4
+ functionName :
+ lineNumber : 6
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ }
+ }
}
}
timestamp : <timestamp>
@@ -209,9 +470,16 @@ Run expression 'console.trace()' with async chain len: 3
stackTrace : {
callFrames : [
[0] : {
- columnNumber : 67
- functionName : Promise.resolve.then.then.then
- lineNumber : 0
+ columnNumber : 14
+ functionName : asyncCall
+ lineNumber : 2
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
@@ -219,14 +487,59 @@ Run expression 'console.trace()' with async chain len: 3
parent : {
callFrames : [
[0] : {
- columnNumber : 46
- functionName :
- lineNumber : 0
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
scriptId : <scriptId>
url :
}
]
- description : Promise.resolve
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 33
+ functionName : Promise.resolve.then
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 22
+ functionName : asyncCall
+ lineNumber : 5
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 4
+ functionName :
+ lineNumber : 6
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.then
+ }
+ }
}
}
timestamp : <timestamp>
diff --git a/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js
index 072af732c4..3718312c45 100644
--- a/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js
+++ b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js
@@ -40,11 +40,14 @@ let {session, contextGroup, Protocol} = InspectorTest.start('Tests how async pro
function runWithAsyncChainPromise(len, source) {
InspectorTest.log(`Run expression '${source}' with async chain len: ${len}`);
- let then = '.then(() => 1)';
- let pause = `.then(() => { ${source} })`;
- Protocol.Runtime.evaluate({
- expression: `Promise.resolve()${then.repeat(len - 1)}${pause}`
- });
+ let asyncCall = `(function asyncCall(num) {
+ if (num === 0) {
+ ${source};
+ return;
+ }
+ Promise.resolve().then(() => asyncCall(num - 1));
+ })(${len})`;
+ Protocol.Runtime.evaluate({expression: asyncCall});
}
async function setMaxAsyncTaskStacks(max) {
diff --git a/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout-expected.txt b/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout-expected.txt
index afcc5e3071..28f3a19613 100644
--- a/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout-expected.txt
+++ b/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout-expected.txt
@@ -21,7 +21,8 @@ paused at:
Running test: testDebuggerStmtBeforeCallback2
paused at:
-debugger; setTimeout(() => 1#, 0);debugger;
+setTimeout('debugger//should-break-here', 0);
+#setTimeout(() => 1, 0);
paused at:
#debugger//should-break-here
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-at-last-line-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-at-last-line-expected.txt
new file mode 100644
index 0000000000..d8b7d70ab6
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-at-last-line-expected.txt
@@ -0,0 +1,16 @@
+Tests breakpoint at last line.
+{
+ breakpointId : <breakpointId>
+ locations : [
+ [0] : {
+ columnNumber : 12
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ ]
+}
+{
+ breakpointId : <breakpointId>
+ locations : [
+ ]
+}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-at-last-line.js b/deps/v8/test/inspector/debugger/set-breakpoint-at-last-line.js
new file mode 100644
index 0000000000..0be590e5a9
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-at-last-line.js
@@ -0,0 +1,28 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests breakpoint at last line.');
+
+let source = `
+ let a = 1;
+ //# sourceURL=foo.js
+ let b = 2;
+`;
+
+(async function test() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: source});
+ let {result} = await Protocol.Debugger.setBreakpointByUrl({
+ url: 'foo.js',
+ lineNumber: 3,
+ columnNumber: 12
+ });
+ InspectorTest.logMessage(result);
+ ({result} = await Protocol.Debugger.setBreakpointByUrl({
+ url: 'foo.js',
+ lineNumber: 4
+ }));
+ InspectorTest.logMessage(result);
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt b/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt
new file mode 100644
index 0000000000..26975c1c4c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt
@@ -0,0 +1,13 @@
+Test for Debugger.stepInto with breakOnAsyncCall.
+
+Running test: testSetTimeout
+(anonymous) (test.js:0:0)
+asyncCallStackTraceId is set
+setTimeout (test.js:0:11)
+asyncCallStackTraceId is empty
+
+Running test: testPromiseThen
+(anonymous) (test.js:0:2)
+asyncCallStackTraceId is set
+p.then (test.js:0:7)
+asyncCallStackTraceId is empty
diff --git a/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js b/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js
new file mode 100644
index 0000000000..6a185c045f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-into-break-on-async-call.js
@@ -0,0 +1,61 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Test for Debugger.stepInto with breakOnAsyncCall.');
+
+InspectorTest.runAsyncTestSuite([
+ async function testSetTimeout() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.pause();
+ let pausedPromise = Protocol.Debugger.oncePaused();
+ Protocol.Runtime.evaluate({
+ expression: 'setTimeout(() => 42, 0)//# sourceURL=test.js'
+ });
+ await pausedPromise;
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let {params: {callFrames, asyncCallStackTraceId}} =
+ await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ if (asyncCallStackTraceId) {
+ InspectorTest.log('asyncCallStackTraceId is set');
+ }
+ Protocol.Debugger.pauseOnAsyncCall(
+ {parentStackTraceId: asyncCallStackTraceId});
+ pausedPromise = Protocol.Debugger.oncePaused();
+ Protocol.Debugger.resume();
+ ({params: {callFrames, asyncCallStackTraceId}} = await pausedPromise);
+ session.logCallFrames(callFrames);
+ if (!asyncCallStackTraceId) {
+ InspectorTest.log('asyncCallStackTraceId is empty');
+ }
+ await Protocol.Debugger.disable();
+ },
+
+ async function testPromiseThen() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: 'var p = Promise.resolve()'});
+ Protocol.Debugger.pause();
+ let pausedPromise = Protocol.Debugger.oncePaused();
+ Protocol.Runtime.evaluate({expression: 'p.then(() => 42)//# sourceURL=test.js'});
+ await pausedPromise;
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let {params: {callFrames, asyncCallStackTraceId}} =
+ await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ if (asyncCallStackTraceId) {
+ InspectorTest.log('asyncCallStackTraceId is set');
+ }
+ Protocol.Debugger.pauseOnAsyncCall(
+ {parentStackTraceId: asyncCallStackTraceId});
+ pausedPromise = Protocol.Debugger.oncePaused();
+ Protocol.Debugger.resume();
+ ({params: {callFrames, asyncCallStackTraceId}} = await pausedPromise);
+ session.logCallFrames(callFrames);
+ if (!asyncCallStackTraceId) {
+ InspectorTest.log('asyncCallStackTraceId is empty');
+ }
+ await Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt b/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt
new file mode 100644
index 0000000000..fcc5f9a625
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task-expected.txt
@@ -0,0 +1,14 @@
+Test for step-into remote async task
+Setup debugger agents..
+Pause before stack trace is captured..
+Run stepInto with breakOnAsyncCall flag
+Call pauseOnAsyncCall
+Trigger external async task on another context group
+Dump stack trace
+boo (target.js:1:18)
+call (framework.js:3:2)
+(anonymous) (target.js:0:0)
+-- remote-task --
+store (utils.js:2:25)
+foo (source.js:1:13)
+(anonymous) (source.js:2:6)
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task.js b/deps/v8/test/inspector/debugger/step-into-external-async-task.js
new file mode 100644
index 0000000000..7c7c23816d
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task.js
@@ -0,0 +1,107 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.log('Test for step-into remote async task');
+
+let contextGroup1 = new InspectorTest.ContextGroup();
+let session1 = contextGroup1.connect();
+let Protocol1 = session1.Protocol;
+let contextGroup2 = new InspectorTest.ContextGroup();
+let session2 = contextGroup2.connect();
+let Protocol2 = session2.Protocol;
+
+let utilsScript = `
+function store(description) {
+ let buffer = inspector.storeCurrentStackTrace(description);
+ return '[' + new Int32Array(buffer).join(',') + ']';
+}
+//# sourceURL=utils.js`;
+
+contextGroup1.addScript(utilsScript);
+contextGroup2.addScript(utilsScript);
+
+let frameworkScript = `
+function call(id, f) {
+ inspector.externalAsyncTaskStarted(Int32Array.from(JSON.parse(id)).buffer);
+ f();
+ inspector.externalAsyncTaskFinished(Int32Array.from(JSON.parse(id)).buffer);
+}
+//# sourceURL=framework.js`;
+
+contextGroup1.addScript(frameworkScript);
+contextGroup2.addScript(frameworkScript);
+
+session1.setupScriptMap();
+session2.setupScriptMap();
+
+(async function test() {
+ InspectorTest.log('Setup debugger agents..');
+ let debuggerId1 = (await Protocol1.Debugger.enable()).result.debuggerId;
+ let debuggerId2 = (await Protocol2.Debugger.enable()).result.debuggerId;
+
+ Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+ Protocol2.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+
+ Protocol1.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
+ Protocol2.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
+
+ InspectorTest.log('Pause before stack trace is captured..');
+ Protocol1.Debugger.setBreakpointByUrl(
+ {lineNumber: 2, columnNumber: 25, url: 'utils.js'});
+ let evaluatePromise = Protocol1.Runtime.evaluate({
+ expression: `(function foo() {
+ return store('remote-task');
+ })()
+ //# sourceURL=source.js`
+ });
+ await Protocol1.Debugger.oncePaused();
+
+ InspectorTest.log('Run stepInto with breakOnAsyncCall flag');
+ Protocol1.Debugger.stepInto({breakOnAsyncCall: true});
+ let {params: {asyncCallStackTraceId}} = await Protocol1.Debugger.oncePaused();
+
+ InspectorTest.log('Call pauseOnAsyncCall');
+ Protocol2.Debugger.pauseOnAsyncCall({
+ parentStackTraceId: asyncCallStackTraceId,
+ });
+ Protocol1.Debugger.resume();
+
+ InspectorTest.log('Trigger external async task on another context group');
+ let stackTraceId = (await evaluatePromise).result.result.value;
+ Protocol2.Runtime.evaluate({
+ expression: `call('${stackTraceId}',
+ function boo() {})
+ //# sourceURL=target.js`
+ });
+
+ InspectorTest.log('Dump stack trace');
+ let {params: {callFrames, asyncStackTraceId}} =
+ await Protocol2.Debugger.oncePaused();
+ let debuggers = new Map(
+ [[debuggerId1, Protocol1.Debugger], [debuggerId2, Protocol2.Debugger]]);
+ let sessions = new Map([[debuggerId1, session1], [debuggerId2, session2]]);
+ let currentDebuggerId = debuggerId1;
+ while (true) {
+ sessions.get(currentDebuggerId).logCallFrames(callFrames);
+ if (asyncStackTraceId) {
+ currentDebuggerId = asyncStackTraceId.debuggerId;
+ let {result: {stackTrace}} =
+ await debuggers.get(currentDebuggerId).getStackTrace({
+ stackTraceId: asyncStackTraceId
+ });
+ InspectorTest.log(`-- ${stackTrace.description} --`);
+ callFrames = stackTrace.callFrames;
+ asyncStackTraceId = stackTrace.parentId;
+ } else {
+ break;
+ }
+ }
+
+ Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 0});
+ Protocol2.Debugger.setAsyncCallStackDepth({maxDepth: 0});
+ await Protocol1.Debugger.disable();
+ await Protocol2.Debugger.disable();
+
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt b/deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt
new file mode 100644
index 0000000000..58f1e926d2
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt
@@ -0,0 +1,106 @@
+Checks this in arrow function scope
+ (function() {
+ let f = () => { #debugger; };
+ f();
+
+This on callFrame:
+{
+ type : undefined
+}
+This in evaluateOnCallFrame:
+{
+ type : undefined
+}
+Values equal: true
+
+ let f = () => { debugger; };
+ #f();
+ }).call('a');
+
+This on callFrame:
+{
+ className : String
+ description : String
+ objectId : <objectId>
+ type : object
+}
+This in evaluateOnCallFrame:
+{
+ className : String
+ description : String
+ objectId : <objectId>
+ type : object
+}
+Values equal: true
+
+ f();
+ }).#call('a');
+ return a;
+
+This on callFrame:
+{
+ className : Number
+ description : Number
+ objectId : <objectId>
+ type : object
+}
+This in evaluateOnCallFrame:
+{
+ className : Number
+ description : Number
+ objectId : <objectId>
+ type : object
+}
+Values equal: true
+
+function boo() {
+ foo.call(1)#();
+}
+
+This on callFrame:
+{
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+}
+This in evaluateOnCallFrame:
+{
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+}
+Values equal: true
+
+}
+(() => boo.#call({}))();
+
+This on callFrame:
+{
+ type : undefined
+}
+This in evaluateOnCallFrame:
+{
+ type : undefined
+}
+Values equal: true
+
+}
+(() => boo.call({}))#();
+
+This on callFrame:
+{
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+}
+This in evaluateOnCallFrame:
+{
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+}
+Values equal: false
diff --git a/deps/v8/test/inspector/debugger/this-in-arrow-function.js b/deps/v8/test/inspector/debugger/this-in-arrow-function.js
new file mode 100644
index 0000000000..37c2962e0e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/this-in-arrow-function.js
@@ -0,0 +1,54 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Checks this in arrow function scope');
+
+(async function test() {
+ session.setupScriptMap();
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: `
+function foo() {
+ return () => {
+ let a = this;
+ (function() {
+ let f = () => { debugger; };
+ f();
+ }).call('a');
+ return a;
+ };
+}
+function boo() {
+ foo.call(1)();
+}
+(() => boo.call({}))();`
+ });
+ let {params:{callFrames}} = await Protocol.Debugger.oncePaused();
+ for (let callFrame of callFrames) {
+ await session.logSourceLocation(callFrame.location);
+
+ InspectorTest.log('This on callFrame:');
+ InspectorTest.logMessage(callFrame.this);
+ let {result:{result}} = await Protocol.Debugger.evaluateOnCallFrame({
+ callFrameId: callFrame.callFrameId,
+ expression: 'this'
+ });
+ InspectorTest.log('This in evaluateOnCallFrame:');
+ InspectorTest.logMessage(result);
+
+ if (callFrame.this.type === 'undefined' || result.type === 'undefined') {
+ InspectorTest.log('Values equal: ' + (callFrame.this.type === result.type) + '\n');
+ continue;
+ }
+
+ let {result:{result:{value}}} = await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function equal(a) { return this === a; }',
+ objectId: callFrame.this.objectId,
+ arguments: [ result.value ? {value: result.value} : {objectId: result.objectId}],
+ returnByValue: true
+ });
+ InspectorTest.log('Values equal: ' + value + '\n');
+ }
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt b/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt
new file mode 100644
index 0000000000..2b14f901b6
--- /dev/null
+++ b/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt
@@ -0,0 +1,7 @@
+Checks sampling heap profiler methods.
+Expected error: V8 sampling heap profiler was not started.
+Allocated size is zero in the beginning: true
+Allocated size is more than 100KB after a chunk is allocated: true
+Allocated size increased after one more chunk is allocated: true
+Allocated size did not change after stopping: true
+Successfully finished
diff --git a/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js b/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js
new file mode 100644
index 0000000000..1b82a46fa2
--- /dev/null
+++ b/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js
@@ -0,0 +1,48 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --sampling-heap-profiler-suppress-randomness
+
+(async function() {
+ let {contextGroup, Protocol} = InspectorTest.start('Checks sampling heap profiler methods.');
+
+ contextGroup.addScript(`
+ var holder = [];
+ function allocateChunk() {
+ holder.push(new Array(100000).fill(42));
+ }
+ //# sourceURL=test.js`);
+
+ Protocol.HeapProfiler.enable();
+
+ const profile0 = await Protocol.HeapProfiler.getSamplingProfile();
+ InspectorTest.log('Expected error: ' + profile0.error.message);
+
+ await Protocol.HeapProfiler.startSampling();
+ const profile1 = await Protocol.HeapProfiler.getSamplingProfile();
+ const size1 = nodeSize(profile1.result.profile.head);
+ InspectorTest.log('Allocated size is zero in the beginning:', size1 === 0);
+
+ await Protocol.Runtime.evaluate({ expression: 'allocateChunk()' });
+ const profile2 = await Protocol.HeapProfiler.getSamplingProfile();
+ const size2 = nodeSize(profile2.result.profile.head);
+ InspectorTest.log('Allocated size is more than 100KB after a chunk is allocated:', size2 > 100000);
+
+ await Protocol.Runtime.evaluate({ expression: 'allocateChunk()' });
+ const profile3 = await Protocol.HeapProfiler.getSamplingProfile();
+ const size3 = nodeSize(profile3.result.profile.head);
+ InspectorTest.log('Allocated size increased after one more chunk is allocated:', size3 > size2);
+
+ const profile4 = await Protocol.HeapProfiler.stopSampling();
+ const size4 = nodeSize(profile4.result.profile.head);
+ InspectorTest.log('Allocated size did not change after stopping:', size4 === size3);
+
+ InspectorTest.log('Successfully finished');
+ InspectorTest.completeTest();
+
+ function nodeSize(node) {
+ return node.children.reduce((res, child) => res + nodeSize(child),
+ node.callFrame.functionName === 'allocateChunk' ? node.selfSize : 0);
+ }
+})();
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index de89271fbf..56c7431af6 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -322,6 +322,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
backend_runner_ = runner;
}
+ static void ClearAllSessions() { channels_.clear(); }
+
private:
static TaskRunner* backend_runner_;
@@ -688,6 +690,19 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
ToV8String(isolate, "markObjectAsNotInspectable"),
v8::FunctionTemplate::New(
isolate, &InspectorExtension::MarkObjectAsNotInspectable));
+ inspector->Set(ToV8String(isolate, "createObjectWithAccessor"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::CreateObjectWithAccessor));
+ inspector->Set(ToV8String(isolate, "storeCurrentStackTrace"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::StoreCurrentStackTrace));
+ inspector->Set(ToV8String(isolate, "externalAsyncTaskStarted"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::ExternalAsyncTaskStarted));
+ inspector->Set(
+ ToV8String(isolate, "externalAsyncTaskFinished"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::ExternalAsyncTaskFinished));
global->Set(ToV8String(isolate, "inspector"), inspector);
}
@@ -827,14 +842,98 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
v8::True(isolate))
.ToChecked();
}
+
+ static void CreateObjectWithAccessor(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsBoolean()) {
+ fprintf(stderr,
+ "Internal error: createObjectWithAccessor('accessor name', "
+ "hasSetter)\n");
+ Exit();
+ }
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ if (args[1].As<v8::Boolean>()->Value()) {
+ templ->SetAccessor(v8::Local<v8::String>::Cast(args[0]), AccessorGetter,
+ AccessorSetter);
+ } else {
+ templ->SetAccessor(v8::Local<v8::String>::Cast(args[0]), AccessorGetter);
+ }
+ args.GetReturnValue().Set(
+ templ->NewInstance(isolate->GetCurrentContext()).ToLocalChecked());
+ }
+
+ static void AccessorGetter(v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ isolate->ThrowException(ToV8String(isolate, "Getter is called"));
+ }
+
+ static void AccessorSetter(v8::Local<v8::String> property,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ isolate->ThrowException(ToV8String(isolate, "Setter is called"));
+ }
+
+ static void StoreCurrentStackTrace(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ fprintf(stderr,
+ "Internal error: storeCurrentStackTrace('description')\n");
+ Exit();
+ }
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ v8::internal::Vector<uint16_t> description =
+ ToVector(args[0].As<v8::String>());
+ v8_inspector::StringView description_view(description.start(),
+ description.length());
+ v8_inspector::V8StackTraceId id =
+ data->StoreCurrentStackTrace(description_view);
+ v8::Local<v8::ArrayBuffer> buffer =
+ v8::ArrayBuffer::New(isolate, sizeof(id));
+ *static_cast<v8_inspector::V8StackTraceId*>(buffer->GetContents().Data()) =
+ id;
+ args.GetReturnValue().Set(buffer);
+ }
+
+ static void ExternalAsyncTaskStarted(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsArrayBuffer()) {
+ fprintf(stderr, "Internal error: externalAsyncTaskStarted(id)\n");
+ Exit();
+ }
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ v8_inspector::V8StackTraceId* id =
+ static_cast<v8_inspector::V8StackTraceId*>(
+ args[0].As<v8::ArrayBuffer>()->GetContents().Data());
+ data->ExternalAsyncTaskStarted(*id);
+ }
+
+ static void ExternalAsyncTaskFinished(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsArrayBuffer()) {
+ fprintf(stderr, "Internal error: externalAsyncTaskFinished(id)\n");
+ Exit();
+ }
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ v8_inspector::V8StackTraceId* id =
+ static_cast<v8_inspector::V8StackTraceId*>(
+ args[0].As<v8::ArrayBuffer>()->GetContents().Data());
+ data->ExternalAsyncTaskFinished(*id);
+ }
};
} // namespace
int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
+ v8::V8::InitializePlatform(platform.get());
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::V8::Initialize();
@@ -895,5 +994,6 @@ int main(int argc, char* argv[]) {
backend_runner.Join();
delete startup_data.data;
+ UtilsExtension::ClearAllSessions();
return 0;
}
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 8362dd4488..a18ef90ca0 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -63,19 +63,20 @@ IsolateData::IsolateData(TaskRunner* task_runner,
params.array_buffer_allocator =
v8::ArrayBuffer::Allocator::NewDefaultAllocator();
params.snapshot_blob = startup_data;
- isolate_ = v8::Isolate::New(params);
+ isolate_.reset(v8::Isolate::New(params));
isolate_->SetMicrotasksPolicy(v8::MicrotasksPolicy::kScoped);
if (with_inspector) {
isolate_->AddMessageListener(&IsolateData::MessageHandler);
isolate_->SetPromiseRejectCallback(&IsolateData::PromiseRejectHandler);
- inspector_ = v8_inspector::V8Inspector::create(isolate_, this);
+ inspector_ = v8_inspector::V8Inspector::create(isolate_.get(), this);
}
- v8::HandleScope handle_scope(isolate_);
+ v8::HandleScope handle_scope(isolate_.get());
not_inspectable_private_.Reset(
- isolate_, v8::Private::ForApi(isolate_, v8::String::NewFromUtf8(
- isolate_, "notInspectable",
- v8::NewStringType::kNormal)
- .ToLocalChecked()));
+ isolate_.get(),
+ v8::Private::ForApi(isolate_.get(), v8::String::NewFromUtf8(
+ isolate_.get(), "notInspectable",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked()));
}
IsolateData* IsolateData::FromContext(v8::Local<v8::Context> context) {
@@ -84,27 +85,27 @@ IsolateData* IsolateData::FromContext(v8::Local<v8::Context> context) {
}
int IsolateData::CreateContextGroup() {
- v8::HandleScope handle_scope(isolate_);
+ v8::HandleScope handle_scope(isolate_.get());
v8::Local<v8::ObjectTemplate> global_template =
- v8::ObjectTemplate::New(isolate_);
+ v8::ObjectTemplate::New(isolate_.get());
for (auto it = setup_global_tasks_.begin(); it != setup_global_tasks_.end();
++it) {
- (*it)->Run(isolate_, global_template);
+ (*it)->Run(isolate_.get(), global_template);
}
v8::Local<v8::Context> context =
- v8::Context::New(isolate_, nullptr, global_template);
+ v8::Context::New(isolate_.get(), nullptr, global_template);
context->SetAlignedPointerInEmbedderData(kIsolateDataIndex, this);
int context_group_id = ++last_context_group_id_;
// Should be 2-byte aligned.
context->SetAlignedPointerInEmbedderData(
kContextGroupIdIndex, reinterpret_cast<void*>(context_group_id * 2));
- contexts_[context_group_id].Reset(isolate_, context);
+ contexts_[context_group_id].Reset(isolate_.get(), context);
if (inspector_) FireContextCreated(context, context_group_id);
return context_group_id;
}
v8::Local<v8::Context> IsolateData::GetContext(int context_group_id) {
- return contexts_[context_group_id].Get(isolate_);
+ return contexts_[context_group_id].Get(isolate_.get());
}
int IsolateData::GetContextGroupId(v8::Local<v8::Context> context) {
@@ -126,7 +127,7 @@ void IsolateData::RegisterModule(v8::Local<v8::Context> context,
}
v8::Local<v8::Value> result;
if (!module->Evaluate(context).ToLocal(&result)) return;
- modules_[name] = v8::Global<v8::Module>(isolate_, module);
+ modules_[name] = v8::Global<v8::Module>(isolate_.get(), module);
}
// static
@@ -134,8 +135,8 @@ v8::MaybeLocal<v8::Module> IsolateData::ModuleResolveCallback(
v8::Local<v8::Context> context, v8::Local<v8::String> specifier,
v8::Local<v8::Module> referrer) {
IsolateData* data = IsolateData::FromContext(context);
- std::string str = *v8::String::Utf8Value(data->isolate_, specifier);
- return data->modules_[ToVector(specifier)].Get(data->isolate_);
+ std::string str = *v8::String::Utf8Value(data->isolate(), specifier);
+ return data->modules_[ToVector(specifier)].Get(data->isolate());
}
int IsolateData::ConnectSession(int context_group_id,
@@ -202,11 +203,27 @@ void IsolateData::AsyncTaskFinished(void* task) {
inspector_->asyncTaskFinished(task);
}
+v8_inspector::V8StackTraceId IsolateData::StoreCurrentStackTrace(
+ const v8_inspector::StringView& description) {
+ return inspector_->storeCurrentStackTrace(description);
+}
+
+void IsolateData::ExternalAsyncTaskStarted(
+ const v8_inspector::V8StackTraceId& parent) {
+ inspector_->externalAsyncTaskStarted(parent);
+}
+
+void IsolateData::ExternalAsyncTaskFinished(
+ const v8_inspector::V8StackTraceId& parent) {
+ inspector_->externalAsyncTaskFinished(parent);
+}
+
void IsolateData::AddInspectedObject(int session_id,
v8::Local<v8::Value> object) {
auto it = sessions_.find(session_id);
if (it == sessions_.end()) return;
- std::unique_ptr<Inspectable> inspectable(new Inspectable(isolate_, object));
+ std::unique_ptr<Inspectable> inspectable(
+ new Inspectable(isolate_.get(), object));
it->second->addInspectedObject(std::move(inspectable));
}
@@ -363,7 +380,7 @@ double IsolateData::currentTimeMS() {
}
void IsolateData::SetMemoryInfo(v8::Local<v8::Value> memory_info) {
- memory_info_.Reset(isolate_, memory_info);
+ memory_info_.Reset(isolate_.get(), memory_info);
}
void IsolateData::SetLogConsoleApiMessageCalls(bool log) {
@@ -393,11 +410,11 @@ void IsolateData::consoleAPIMessage(int contextGroupId,
unsigned lineNumber, unsigned columnNumber,
v8_inspector::V8StackTrace* stack) {
if (!log_console_api_message_calls_) return;
- Print(isolate_, message);
+ Print(isolate_.get(), message);
fprintf(stdout, " (");
- Print(isolate_, url);
+ Print(isolate_.get(), url);
fprintf(stdout, ":%d:%d)", lineNumber, columnNumber);
- Print(isolate_, stack->toString()->string());
+ Print(isolate_.get(), stack->toString()->string());
fprintf(stdout, "\n");
}
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index 455b44b49b..5eb9803a74 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -30,7 +30,7 @@ class IsolateData : public v8_inspector::V8InspectorClient {
v8::StartupData* startup_data, bool with_inspector);
static IsolateData* FromContext(v8::Local<v8::Context> context);
- v8::Isolate* isolate() const { return isolate_; }
+ v8::Isolate* isolate() const { return isolate_.get(); }
TaskRunner* task_runner() const { return task_runner_; }
// Setting things up.
@@ -58,6 +58,12 @@ class IsolateData : public v8_inspector::V8InspectorClient {
bool recurring);
void AsyncTaskStarted(void* task);
void AsyncTaskFinished(void* task);
+
+ v8_inspector::V8StackTraceId StoreCurrentStackTrace(
+ const v8_inspector::StringView& description);
+ void ExternalAsyncTaskStarted(const v8_inspector::V8StackTraceId& parent);
+ void ExternalAsyncTaskFinished(const v8_inspector::V8StackTraceId& parent);
+
void AddInspectedObject(int session_id, v8::Local<v8::Value> object);
// Test utilities.
@@ -109,9 +115,17 @@ class IsolateData : public v8_inspector::V8InspectorClient {
bool isInspectableHeapObject(v8::Local<v8::Object>) override;
void maxAsyncCallStackDepthChanged(int depth) override;
+ // The isolate gets deleted by its {Dispose} method, not by the default
+ // deleter. Therefore we have to define a custom deleter for the unique_ptr to
+ // call {Dispose}. We have to use the unique_ptr so that the isolate get
+ // disposed in the right order, relative to other member variables.
+ struct IsolateDeleter {
+ void operator()(v8::Isolate* isolate) const { isolate->Dispose(); }
+ };
+
TaskRunner* task_runner_;
SetupGlobalTasks setup_global_tasks_;
- v8::Isolate* isolate_;
+ std::unique_ptr<v8::Isolate, IsolateDeleter> isolate_;
std::unique_ptr<v8_inspector::V8Inspector> inspector_;
int last_context_group_id_ = 0;
std::map<int, v8::Global<v8::Context>> contexts_;
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index 4ae96614dc..749aa3fecc 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -37,8 +37,11 @@ InspectorTest.logMessage = function(originalMessage) {
if (message.id)
message.id = "<messageId>";
- const nonStableFields = new Set(["objectId", "scriptId", "exceptionId", "timestamp",
- "executionContextId", "callFrameId", "breakpointId", "bindRemoteObjectFunctionId", "formatterObjectId" ]);
+ const nonStableFields = new Set([
+ 'objectId', 'scriptId', 'exceptionId', 'timestamp', 'executionContextId',
+ 'callFrameId', 'breakpointId', 'bindRemoteObjectFunctionId',
+ 'formatterObjectId', 'debuggerId'
+ ]);
var objects = [ message ];
while (objects.length) {
var object = objects.shift();
diff --git a/deps/v8/test/inspector/runtime/await-promise-expected.txt b/deps/v8/test/inspector/runtime/await-promise-expected.txt
index 3e23a71a38..2b906dd49b 100644
--- a/deps/v8/test/inspector/runtime/await-promise-expected.txt
+++ b/deps/v8/test/inspector/runtime/await-promise-expected.txt
@@ -26,22 +26,6 @@ Running test: testRejectedPromise
}
exceptionId : <exceptionId>
lineNumber : 0
- stackTrace : {
- callFrames : [
- ]
- parent : {
- callFrames : [
- [0] : {
- columnNumber : 8
- functionName :
- lineNumber : 0
- scriptId : <scriptId>
- url :
- }
- ]
- description : Promise.reject
- }
- }
text : Uncaught (in promise)
}
result : {
@@ -66,29 +50,6 @@ Running test: testRejectedPromiseWithStack
}
exceptionId : <exceptionId>
lineNumber : 0
- stackTrace : {
- callFrames : [
- ]
- parent : {
- callFrames : [
- [0] : {
- columnNumber : 4
- functionName : rejectPromise
- lineNumber : 17
- scriptId : <scriptId>
- url : test.js
- }
- [1] : {
- columnNumber : 0
- functionName :
- lineNumber : 0
- scriptId : <scriptId>
- url :
- }
- ]
- description : Promise.reject
- }
- }
text : Uncaught (in promise)
}
result : {
@@ -114,29 +75,6 @@ Running test: testRejectedPromiseWithError
}
exceptionId : <exceptionId>
lineNumber : 0
- stackTrace : {
- callFrames : [
- ]
- parent : {
- callFrames : [
- [0] : {
- columnNumber : 4
- functionName : rejectPromiseWithAnError
- lineNumber : 24
- scriptId : <scriptId>
- url : test.js
- }
- [1] : {
- columnNumber : 0
- functionName :
- lineNumber : 0
- scriptId : <scriptId>
- url :
- }
- ]
- description : Promise.reject
- }
- }
text : Uncaught (in promise) Error: MyError
}
result : {
diff --git a/deps/v8/test/inspector/runtime/console-time-end-format-expected.txt b/deps/v8/test/inspector/runtime/console-time-end-format-expected.txt
index 92fa158d2b..d09dc41c79 100644
--- a/deps/v8/test/inspector/runtime/console-time-end-format-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-time-end-format-expected.txt
@@ -27,3 +27,11 @@ timeEnd: 1000000000000000.2ms
Running test: huge
js: 1e+42ms
timeEnd: 1e+42ms
+
+Running test: undefinedAsLabel
+js: 1ms
+default: 1ms
+
+Running test: emptyAsLabel
+js: 1ms
+default: 1ms
diff --git a/deps/v8/test/inspector/runtime/console-time-end-format.js b/deps/v8/test/inspector/runtime/console-time-end-format.js
index 7e4ff788b4..4b28f67e8e 100644
--- a/deps/v8/test/inspector/runtime/console-time-end-format.js
+++ b/deps/v8/test/inspector/runtime/console-time-end-format.js
@@ -9,37 +9,43 @@ Protocol.Runtime.onConsoleAPICalled(message => {
InspectorTest.log(message.params.args[0].value);
});
-InspectorTest.runTestSuite([
- function zero(next) {
- checkInterval(0.0).then(next);
+InspectorTest.runAsyncTestSuite([
+ function zero() {
+ return checkInterval(0.0);
},
- function verySmall(next) {
- checkInterval(1e-15).then(next);
+ function verySmall() {
+ return checkInterval(1e-15);
},
- function small(next) {
- checkInterval(0.001).then(next);
+ function small() {
+ return checkInterval(0.001);
},
- function regular(next) {
- checkInterval(1.2345).then(next);
+ function regular() {
+ return checkInterval(1.2345);
},
- function big(next) {
- checkInterval(10000.2345).then(next);
+ function big() {
+ return checkInterval(10000.2345);
},
- function veryBig(next) {
- checkInterval(1e+15 + 0.2345).then(next);
+ function veryBig() {
+ return checkInterval(1e+15 + 0.2345);
},
- function huge(next) {
- checkInterval(1e+42).then(next);
+ function huge() {
+ return checkInterval(1e+42);
+ },
+ function undefinedAsLabel() {
+ return checkInterval(1.0, 'undefined');
+ },
+ function emptyAsLabel() {
+ return checkInterval(1.0, '');
}
]);
-function checkInterval(time) {
+async function checkInterval(time, label) {
+ label = label === undefined ? '\'timeEnd\'' : label;
utils.setCurrentTimeMSForTest(0.0);
- return Protocol.Runtime.evaluate({
- expression: `console.log('js: ' + ${time} + 'ms')`})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'console.time(\'timeEnd\')'}))
- .then(() => utils.setCurrentTimeMSForTest(time))
- .then(() => Protocol.Runtime.evaluate({
- expression: 'console.timeEnd(\'timeEnd\')'}));
+ Protocol.Runtime.evaluate({
+ expression: `console.log('js: ' + ${time} + 'ms')`
+ });
+ await Protocol.Runtime.evaluate({expression: `console.time(${label})`});
+ utils.setCurrentTimeMSForTest(time);
+ await Protocol.Runtime.evaluate({expression: `console.timeEnd(${label})`});
}
diff --git a/deps/v8/test/inspector/runtime/create-context-expected.txt b/deps/v8/test/inspector/runtime/create-context-expected.txt
index e64f75bc57..770d2e32d2 100644
--- a/deps/v8/test/inspector/runtime/create-context-expected.txt
+++ b/deps/v8/test/inspector/runtime/create-context-expected.txt
@@ -22,6 +22,7 @@ Checks createContext().
{
id : <messageId>
result : {
+ debuggerId : <debuggerId>
}
}
#debugger;
diff --git a/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt b/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt
new file mode 100644
index 0000000000..648187c2b4
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt
@@ -0,0 +1,33 @@
+Runtime.getProperties for objects with accessor
+title property with getter and setter:
+{
+ configurable : false
+ enumerable : false
+ get : {
+ className : Function
+ description : function nativeGetter() { [native code] }
+ objectId : <objectId>
+ type : function
+ }
+ isOwn : true
+ name : title
+ set : {
+ className : Function
+ description : function nativeSetter() { [native code] }
+ objectId : <objectId>
+ type : function
+ }
+}
+title property with getter only:
+{
+ configurable : false
+ enumerable : false
+ get : {
+ className : Function
+ description : function nativeGetter() { [native code] }
+ objectId : <objectId>
+ type : function
+ }
+ isOwn : true
+ name : title
+}
diff --git a/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor.js b/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor.js
new file mode 100644
index 0000000000..e621008f9f
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor.js
@@ -0,0 +1,29 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Runtime.getProperties for objects with accessor');
+
+(async function test() {
+ let {result:{result:{objectId}}} = await Protocol.Runtime.evaluate({
+ expression: 'inspector.createObjectWithAccessor(\'title\', true)'
+ });
+ let {result:{result}} = await Protocol.Runtime.getProperties({
+ objectId,
+ ownProperties: true
+ });
+ InspectorTest.log('title property with getter and setter:');
+ InspectorTest.logMessage(result.find(property => property.name === 'title'));
+
+ ({result:{result:{objectId}}} = await Protocol.Runtime.evaluate({
+ expression: 'inspector.createObjectWithAccessor(\'title\', false)'
+ }));
+ ({result:{result}} = await Protocol.Runtime.getProperties({
+ objectId,
+ ownProperties: true
+ }));
+ InspectorTest.log('title property with getter only:');
+ InspectorTest.logMessage(result.find(property => property.name === 'title'));
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index 9c943d9848..f33384e1cf 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -17,7 +17,6 @@ EXPECTED_SUFFIX = "-expected.txt"
RESOURCES_FOLDER = "resources"
class InspectorProtocolTestSuite(testsuite.TestSuite):
-
def __init__(self, name, root):
super(InspectorProtocolTestSuite, self).__init__(name, root)
@@ -39,25 +38,26 @@ class InspectorProtocolTestSuite(testsuite.TestSuite):
tests.append(test)
return tests
- def GetFlagsForTestCase(self, testcase, context):
+ def GetShellForTestCase(self, testcase):
+ return 'inspector-test'
+
+ def GetParametersForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
- flags = [] + context.mode_flags
+ flags = testcase.flags + context.mode_flags
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
flags += shlex.split(match.strip())
- testname = testcase.path.split(os.path.sep)[-1]
- testfilename = os.path.join(self.root, testcase.path + self.suffix())
- protocoltestfilename = os.path.join(self.root, PROTOCOL_TEST_JS)
- return testcase.flags + flags + [ protocoltestfilename, testfilename ]
+ files = [
+ os.path.join(self.root, PROTOCOL_TEST_JS),
+ os.path.join(self.root, testcase.path + self.suffix()),
+ ]
+ return files, flags, {}
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
with open(filename) as f:
return f.read()
- def shell(self):
- return "inspector-test"
-
def _IgnoreLine(self, string):
"""Ignore empty lines, valgrind output and Android output."""
if not string: return True
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt b/deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt
index 0db7882562..e04a4ec3df 100644
--- a/deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt
+++ b/deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt
@@ -47,3 +47,5 @@ function f(/*null*/a) {
return 'second';
/*string*/};
f(null);
+
+Running test: testStopTwice
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js b/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js
index dad9874ff7..be4e0bdfd9 100644
--- a/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js
+++ b/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js
@@ -166,5 +166,13 @@ InspectorTest.runAsyncTestSuite([
Protocol.Profiler.stopTypeProfile();
Protocol.Profiler.disable();
await Protocol.Runtime.disable();
- }
+ },
+ async function testStopTwice() {
+ Protocol.Runtime.enable();
+ await Protocol.Profiler.enable();
+ await Protocol.Profiler.stopTypeProfile();
+ await Protocol.Profiler.stopTypeProfile();
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ },
]);
diff --git a/deps/v8/test/intl/date-format/invalid-time.js b/deps/v8/test/intl/date-format/invalid-time.js
new file mode 100644
index 0000000000..ef625453a0
--- /dev/null
+++ b/deps/v8/test/intl/date-format/invalid-time.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var df = new Intl.DateTimeFormat();
+
+assertThrows("df.format(Infinity)", RangeError);
+assertThrows("df.formatToParts(Infinity)", RangeError);
+assertThrows("df.format(-Infinity)", RangeError);
+assertThrows("df.formatToParts(-Infinity)", RangeError);
+assertThrows("df.format(NaN)", RangeError);
+assertThrows("df.formatToParts(NaN)", RangeError);
+
+// https://crbug.com/774833
+var df2 = new Intl.DateTimeFormat('en', {'hour': 'numeric'});
+Date.prototype.valueOf = "ponies";
+assertEquals(df.format(Date.now()), df.format());
+assertEquals(df2.format(Date.now()), df2.format());
+assertEquals(df.formatToParts(Date.now()), df.formatToParts());
+assertEquals(df2.formatToParts(Date.now()), df2.formatToParts());
diff --git a/deps/v8/test/intl/number-format/format-currency.js b/deps/v8/test/intl/number-format/format-currency.js
index 004c566ce4..97e49f9aa6 100755
--- a/deps/v8/test/intl/number-format/format-currency.js
+++ b/deps/v8/test/intl/number-format/format-currency.js
@@ -16,4 +16,4 @@ assertEquals("JPY54,306", nf_JPY.format(parseFloat(54306.4047970)));
var nf_EUR = new Intl.NumberFormat(['pt'], {style: 'currency', currency: 'EUR'});
-assertEquals("€1.000,00", nf_EUR.format(1000.00));
+assertEquals("€\u00a01.000,00", nf_EUR.format(1000.00));
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index c7f17bbb57..977dc11e2e 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -56,9 +56,9 @@ class IntlTestSuite(testsuite.TestSuite):
tests.append(test)
return tests
- def GetFlagsForTestCase(self, testcase, context):
+ def GetParametersForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
- flags = ["--allow-natives-syntax"] + context.mode_flags
+ flags = testcase.flags + ["--allow-natives-syntax"] + context.mode_flags
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
flags += match.strip().split()
@@ -70,12 +70,11 @@ class IntlTestSuite(testsuite.TestSuite):
files.append(os.path.join(self.root, testcase.path + self.suffix()))
files.append(os.path.join(self.root, "regexp-assert.js"))
- flags += files
+ all_files = list(files)
if context.isolates:
- flags.append("--isolate")
- flags += files
+ all_files += ["--isolate"] + files
- return testcase.flags + flags
+ return all_files, flags, {}
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
diff --git a/deps/v8/test/js-perf-test/Collections/common.js b/deps/v8/test/js-perf-test/Collections/common.js
index 3ea3933374..7ae835f2ec 100644
--- a/deps/v8/test/js-perf-test/Collections/common.js
+++ b/deps/v8/test/js-perf-test/Collections/common.js
@@ -2,30 +2,47 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
var N = 10;
+var LargeN = 1e4;
var keys;
+var keyValuePairs;
+
+function SetupKeyValuePairsFromKeys() {
+ keyValuePairs = keys.map((v) => [v, v]);
+}
+
+function SetupSmiKeys(count = 2 * N) {
+ keys = Array.from({ length : count }, (v, i) => i);
+}
+function SetupSmiKeyValuePairs(count = 2 * N) {
+ SetupSmiKeys(count);
+ SetupKeyValuePairsFromKeys();
+}
-function SetupSmiKeys() {
- keys = new Array(N * 2);
- for (var i = 0; i < N * 2; i++) {
- keys[i] = i;
- }
+function SetupStringKeys(count = 2 * N) {
+ keys = Array.from({ length : count }, (v, i) => 's' + i);
}
+function SetupStringKeyValuePairs(count = 2 * N) {
+ SetupStringKeys(count);
+ SetupKeyValuePairsFromKeys();
+}
-function SetupStringKeys() {
- keys = new Array(N * 2);
- for (var i = 0; i < N * 2; i++) {
- keys[i] = 's' + i;
- }
+function SetupObjectKeys(count = 2 * N) {
+ keys = Array.from({ length : count }, (v, i) => ({}));
}
+function SetupObjectKeyValuePairs(count = 2 * N) {
+ SetupObjectKeys(count);
+ SetupKeyValuePairsFromKeys();
+}
+
+function SetupDoubleKeys(count = 2 * N) {
+ keys = Array.from({ length : count }, (v, i) => i + 0.234);
+}
-function SetupObjectKeys() {
- keys = new Array(N * 2);
- for (var i = 0; i < N * 2; i++) {
- keys[i] = {};
- }
+function SetupDoubleKeyValuePairs(count = 2 * N) {
+ SetupDoubleKeys(count);
+ SetupKeyValuePairsFromKeys();
}
diff --git a/deps/v8/test/js-perf-test/Collections/map.js b/deps/v8/test/js-perf-test/Collections/map.js
index ee0899189d..39e97552b9 100644
--- a/deps/v8/test/js-perf-test/Collections/map.js
+++ b/deps/v8/test/js-perf-test/Collections/map.js
@@ -10,7 +10,6 @@ var MapSmiBenchmark = new BenchmarkSuite('Map-Smi', [1000], [
new Benchmark('Delete', false, false, 0, MapDeleteSmi, MapSetupSmi, MapTearDown),
]);
-
var MapStringBenchmark = new BenchmarkSuite('Map-String', [1000], [
new Benchmark('Set', false, false, 0, MapSetString, MapSetupStringBase, MapTearDown),
new Benchmark('Has', false, false, 0, MapHasString, MapSetupString, MapTearDown),
@@ -18,7 +17,6 @@ var MapStringBenchmark = new BenchmarkSuite('Map-String', [1000], [
new Benchmark('Delete', false, false, 0, MapDeleteString, MapSetupString, MapTearDown),
]);
-
var MapObjectBenchmark = new BenchmarkSuite('Map-Object', [1000], [
new Benchmark('Set', false, false, 0, MapSetObject, MapSetupObjectBase, MapTearDown),
new Benchmark('Has', false, false, 0, MapHasObject, MapSetupObject, MapTearDown),
@@ -26,60 +24,87 @@ var MapObjectBenchmark = new BenchmarkSuite('Map-Object', [1000], [
new Benchmark('Delete', false, false, 0, MapDeleteObject, MapSetupObject, MapTearDown),
]);
+var MapDoubleBenchmark = new BenchmarkSuite('Map-Double', [1000], [
+ new Benchmark('Set', false, false, 0, MapSetDouble, MapSetupDoubleBase, MapTearDown),
+ new Benchmark('Has', false, false, 0, MapHasDouble, MapSetupDouble, MapTearDown),
+ new Benchmark('Get', false, false, 0, MapGetDouble, MapSetupDouble, MapTearDown),
+ new Benchmark('Delete', false, false, 0, MapDeleteDouble, MapSetupDouble, MapTearDown),
+]);
+
+var MapObjectLargeBenchmark = new BenchmarkSuite('Map-Object-Set-Get-Large', [1e7], [
+ new Benchmark('Set-Get', false, false, 0, MapSetGetObjectLarge,
+ MapSetupObjectBaseLarge, MapTearDown),
+]);
var MapIterationBenchmark = new BenchmarkSuite('Map-Iteration', [1000], [
new Benchmark('ForEach', false, false, 0, MapForEach, MapSetupSmi, MapTearDown),
]);
-
var MapIterationBenchmark = new BenchmarkSuite('Map-Iterator', [1000], [
new Benchmark('Iterator', false, false, 0, MapIterator, MapSetupSmi, MapTearDown),
]);
+var MapConstructorBenchmark = new BenchmarkSuite('Map-Constructor', [1000], [
+ new Benchmark('Smi', false, false, 0, MapConstructorSmi, SetupSmiKeyValuePairs, MapTearDown),
+ new Benchmark('String', false, false, 0, MapConstructorString, SetupStringKeyValuePairs, MapTearDown),
+ new Benchmark('Object', false, false, 0, MapConstructorObject, SetupObjectKeyValuePairs, MapTearDown),
+ new Benchmark('Double', false, false, 0, MapConstructorDouble, SetupDoubleKeyValuePairs, MapTearDown),
+]);
var map;
-
function MapSetupSmiBase() {
SetupSmiKeys();
map = new Map;
}
-
function MapSetupSmi() {
MapSetupSmiBase();
MapSetSmi();
}
-
function MapSetupStringBase() {
SetupStringKeys();
map = new Map;
}
-
function MapSetupString() {
MapSetupStringBase();
MapSetString();
}
-
function MapSetupObjectBase() {
SetupObjectKeys();
map = new Map;
}
+function MapSetupObjectBaseLarge() {
+ SetupObjectKeys(2 * LargeN);
+ map = new Map;
+}
function MapSetupObject() {
MapSetupObjectBase();
MapSetObject();
}
+function MapSetupDoubleBase() {
+ SetupDoubleKeys();
+ map = new Map;
+}
+
+function MapSetupDouble() {
+ MapSetupDoubleBase();
+ MapSetDouble();
+}
function MapTearDown() {
map = null;
}
+function MapConstructorSmi() {
+ map = new Map(keyValuePairs);
+}
function MapSetSmi() {
for (var i = 0; i < N; i++) {
@@ -87,7 +112,6 @@ function MapSetSmi() {
}
}
-
function MapHasSmi() {
for (var i = 0; i < N; i++) {
if (!map.has(keys[i])) {
@@ -101,7 +125,6 @@ function MapHasSmi() {
}
}
-
function MapGetSmi() {
for (var i = 0; i < N; i++) {
if (map.get(keys[i]) !== i) {
@@ -125,13 +148,16 @@ function MapDeleteSmi() {
}
+function MapConstructorString() {
+ map = new Map(keyValuePairs);
+}
+
function MapSetString() {
for (var i = 0; i < N; i++) {
map.set(keys[i], i);
}
}
-
function MapHasString() {
for (var i = 0; i < N; i++) {
if (!map.has(keys[i])) {
@@ -159,7 +185,6 @@ function MapGetString() {
}
}
-
function MapDeleteString() {
// This is run more than once per setup so we will end up deleting items
// more than once. Therefore, we do not the return value of delete.
@@ -169,13 +194,16 @@ function MapDeleteString() {
}
+function MapConstructorObject() {
+ map = new Map(keyValuePairs);
+}
+
function MapSetObject() {
for (var i = 0; i < N; i++) {
map.set(keys[i], i);
}
}
-
function MapHasObject() {
for (var i = 0; i < N; i++) {
if (!map.has(keys[i])) {
@@ -189,7 +217,6 @@ function MapHasObject() {
}
}
-
function MapGetObject() {
for (var i = 0; i < N; i++) {
if (map.get(keys[i]) !== i) {
@@ -203,6 +230,21 @@ function MapGetObject() {
}
}
+function MapSetGetObjectLarge() {
+ for (var i = 0; i < LargeN; i++) {
+ map.set(keys[i * 2], i);
+ }
+ for (var i = 0; i < LargeN; i++) {
+ if (map.get(keys[i * 2]) !== i) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * LargeN; i++) {
+ if (map.get(keys[i * 2 + 1]) !== undefined) {
+ throw new Error();
+ }
+ }
+}
function MapDeleteObject() {
// This is run more than once per setup so we will end up deleting items
@@ -213,6 +255,51 @@ function MapDeleteObject() {
}
+function MapConstructorDouble() {
+ map = new Map(keyValuePairs);
+}
+
+function MapSetDouble() {
+ for (var i = 0; i < N; i++) {
+ map.set(keys[i], i);
+ }
+}
+
+function MapHasDouble() {
+ for (var i = 0; i < N; i++) {
+ if (!map.has(keys[i])) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (map.has(keys[i])) {
+ throw new Error();
+ }
+ }
+}
+
+function MapGetDouble() {
+ for (var i = 0; i < N; i++) {
+ if (map.get(keys[i]) !== i) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (map.get(keys[i]) !== undefined) {
+ throw new Error();
+ }
+ }
+}
+
+function MapDeleteDouble() {
+ // This is run more than once per setup so we will end up deleting items
+ // more than once. Therefore, we do not the return value of delete.
+ for (var i = 0; i < N; i++) {
+ map.delete(keys[i]);
+ }
+}
+
+
function MapForEach() {
map.forEach(function(v, k) {
if (v !== k) {
@@ -221,7 +308,6 @@ function MapForEach() {
});
}
-
function MapIterator() {
var result = 0;
for (const v of map.values()) result += v;
diff --git a/deps/v8/test/js-perf-test/Collections/run.js b/deps/v8/test/js-perf-test/Collections/run.js
index 50f1ee1051..6fcdd83bc7 100644
--- a/deps/v8/test/js-perf-test/Collections/run.js
+++ b/deps/v8/test/js-perf-test/Collections/run.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+"use strict";
load('../base.js');
load('common.js');
diff --git a/deps/v8/test/js-perf-test/Collections/set.js b/deps/v8/test/js-perf-test/Collections/set.js
index dd4092530f..bcf98405ea 100644
--- a/deps/v8/test/js-perf-test/Collections/set.js
+++ b/deps/v8/test/js-perf-test/Collections/set.js
@@ -24,6 +24,13 @@ var SetObjectBenchmark = new BenchmarkSuite('Set-Object', [1000], [
]);
+var SetDoubleBenchmark = new BenchmarkSuite('Set-Double', [1000], [
+ new Benchmark('Set', false, false, 0, SetAddDouble, SetSetupDoubleBase, SetTearDown),
+ new Benchmark('Has', false, false, 0, SetHasDouble, SetSetupDouble, SetTearDown),
+ new Benchmark('Delete', false, false, 0, SetDeleteDouble, SetSetupDouble, SetTearDown),
+]);
+
+
var SetIterationBenchmark = new BenchmarkSuite('Set-Iteration', [1000], [
new Benchmark('ForEach', false, false, 0, SetForEach, SetSetupSmi, SetTearDown),
]);
@@ -34,6 +41,13 @@ var SetIterationBenchmark = new BenchmarkSuite('Set-Iterator', [1000], [
]);
+var SetConstructorBenchmark = new BenchmarkSuite('Set-Constructor', [1000], [
+ new Benchmark('Smi', false, false, 0, SetConstructorSmi, SetupSmiKeys, SetTearDown),
+ new Benchmark('String', false, false, 0, SetConstructorString, SetupStringKeys, SetTearDown),
+ new Benchmark('Object', false, false, 0, SetConstructorObject, SetupObjectKeys, SetTearDown),
+ new Benchmark('Double', false, false, 0, SetConstructorDouble, SetupDoubleKeys, SetTearDown),
+]);
+
var set;
@@ -73,11 +87,28 @@ function SetSetupObject() {
}
+function SetSetupDoubleBase() {
+ SetupDoubleKeys();
+ set = new Set;
+}
+
+
+function SetSetupDouble() {
+ SetSetupDoubleBase();
+ SetAddDouble();
+}
+
+
function SetTearDown() {
set = null;
}
+function SetConstructorSmi() {
+ set = new Set(keys);
+}
+
+
function SetAddSmi() {
for (var i = 0; i < N; i++) {
set.add(keys[i], i);
@@ -108,6 +139,11 @@ function SetDeleteSmi() {
}
+function SetConstructorString() {
+ set = new Set(keys);
+}
+
+
function SetAddString() {
for (var i = 0; i < N; i++) {
set.add(keys[i], i);
@@ -138,6 +174,11 @@ function SetDeleteString() {
}
+function SetConstructorObject() {
+ set = new Set(keys);
+}
+
+
function SetAddObject() {
for (var i = 0; i < N; i++) {
set.add(keys[i], i);
@@ -168,6 +209,41 @@ function SetDeleteObject() {
}
+function SetConstructorDouble() {
+ set = new Set(keys);
+}
+
+
+function SetAddDouble() {
+ for (var i = 0; i < N; i++) {
+ set.add(keys[i], i);
+ }
+}
+
+
+function SetHasDouble() {
+ for (var i = 0; i < N; i++) {
+ if (!set.has(keys[i])) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (set.has(keys[i])) {
+ throw new Error();
+ }
+ }
+}
+
+
+function SetDeleteDouble() {
+ // This is run more than once per setup so we will end up deleting items
+ // more than once. Therefore, we do not the return value of delete.
+ for (var i = 0; i < N; i++) {
+ set.delete(keys[i]);
+ }
+}
+
+
function SetForEach() {
set.forEach(function(v, k) {
if (v !== k) {
diff --git a/deps/v8/test/js-perf-test/Collections/weakmap.js b/deps/v8/test/js-perf-test/Collections/weakmap.js
index 9aa265f25a..e13d1b36d7 100644
--- a/deps/v8/test/js-perf-test/Collections/weakmap.js
+++ b/deps/v8/test/js-perf-test/Collections/weakmap.js
@@ -14,6 +14,16 @@ var MapBenchmark = new BenchmarkSuite('WeakMap', [1000], [
WeakMapTearDown),
]);
+var MapBenchmark = new BenchmarkSuite('WeakMapSetGet-Large', [1e7], [
+ new Benchmark('Set-Get', false, false, 0, WeakMapSetGetLarge,
+ WeakMapSetupBaseLarge, WeakMapTearDown),
+]);
+
+var MapBenchmark = new BenchmarkSuite('WeakMap-Constructor', [1000], [
+ new Benchmark('Constructor', false, false, 0, WeakMapConstructor, SetupObjectKeyValuePairs,
+ WeakMapTearDown),
+]);
+
var wm;
@@ -24,6 +34,12 @@ function WeakMapSetupBase() {
}
+function WeakMapSetupBaseLarge() {
+ SetupObjectKeys(2 * LargeN);
+ wm = new WeakMap;
+}
+
+
function WeakMapSetup() {
WeakMapSetupBase();
WeakMapSet();
@@ -35,6 +51,10 @@ function WeakMapTearDown() {
}
+function WeakMapConstructor() {
+ wm = new WeakMap(keyValuePairs);
+}
+
function WeakMapSet() {
for (var i = 0; i < N; i++) {
wm.set(keys[i], i);
@@ -77,3 +97,19 @@ function WeakMapDelete() {
wm.delete(keys[i]);
}
}
+
+function WeakMapSetGetLarge() {
+ for (var i = 0; i < LargeN; i++) {
+ wm.set(keys[i * 2], i);
+ }
+ for (var i = 0; i < LargeN; i++) {
+ if (wm.get(keys[i * 2]) !== i) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * LargeN; i++) {
+ if (wm.get(keys[i * 2 + 1]) !== undefined) {
+ throw new Error();
+ }
+ }
+}
diff --git a/deps/v8/test/js-perf-test/Collections/weakset.js b/deps/v8/test/js-perf-test/Collections/weakset.js
index 2936477b35..1dca043c34 100644
--- a/deps/v8/test/js-perf-test/Collections/weakset.js
+++ b/deps/v8/test/js-perf-test/Collections/weakset.js
@@ -12,6 +12,10 @@ var SetBenchmark = new BenchmarkSuite('WeakSet', [1000], [
WeakSetTearDown),
]);
+var SetBenchmark = new BenchmarkSuite('WeakSet-Constructor', [1000], [
+ new Benchmark('Constructor', false, false, 0, WeakSetConstructor, SetupObjectKeys,
+ WeakSetTearDown),
+]);
var ws;
@@ -33,6 +37,11 @@ function WeakSetTearDown() {
}
+function WeakSetConstructor() {
+ ws = new WeakSet(keys);
+}
+
+
function WeakSetAdd() {
for (var i = 0; i < N; i++) {
ws.add(keys[i]);
diff --git a/deps/v8/test/js-perf-test/ExpressionDepth/run.js b/deps/v8/test/js-perf-test/ExpressionDepth/run.js
index 9e3075ace7..1b3bd83998 100644
--- a/deps/v8/test/js-perf-test/ExpressionDepth/run.js
+++ b/deps/v8/test/js-perf-test/ExpressionDepth/run.js
@@ -24,6 +24,7 @@ AddTest('Add', '+');
AddTest('Sub', '-');
AddTest('BitwiseOr', '|');
AddTestCustomPrologue('StringConcat', '+', '"string" +');
+AddTestCustomPrologue('TemplateString', '} ${', '`${', '}`');
function TestExpressionDepth(depth, expression, prologue, epilogue) {
var func = '(function f(a) {\n' + prologue;
@@ -35,18 +36,43 @@ function TestExpressionDepth(depth, expression, prologue, epilogue) {
}
function RunTest(name, expression, prologue, epilogue) {
- var depth;
+ var low_depth = 0;
+ var high_depth = 1;
+
+ // Find the upper limit where depth breaks down.
try {
- for (depth = 0; depth < 20000; depth += 100) {
- TestExpressionDepth(depth, expression, prologue, epilogue);
+ while (high_depth <= 65536) {
+ TestExpressionDepth(high_depth, expression, prologue, epilogue);
+ low_depth = high_depth;
+ high_depth *= 4;
}
+ // Looks like we can't get the depth to break down, just report
+ // the maximum depth tested.
+ print(name + '-ExpressionDepth(Score): ' + low_depth);
+ return;
} catch (e) {
if (!e instanceof RangeError) {
print(name + '-ExpressionDepth(Score): ERROR');
return;
}
}
- print(name + '-ExpressionDepth(Score): ' + depth);
+
+ // Binary search the actual limit.
+ while (low_depth + 1 < high_depth) {
+ var mid_depth = Math.round((low_depth + high_depth) / 2);
+ try {
+ TestExpressionDepth(mid_depth, expression, prologue, epilogue);
+ low_depth = mid_depth;
+ } catch (e) {
+ if (!e instanceof RangeError) {
+ print(name + '-ExpressionDepth(Score): ERROR');
+ return;
+ }
+ high_depth = mid_depth;
+ }
+ }
+
+ print(name + '-ExpressionDepth(Score): ' + low_depth);
}
function AddTest(name, expression, in_test) {
@@ -59,6 +85,6 @@ function AddTest(name, expression, in_test) {
RunTest(name, expression, prologue, epilogue);
}
-function AddTestCustomPrologue(name, expression, prologue) {
- RunTest(name, expression, prologue, '');
+function AddTestCustomPrologue(name, expression, prologue, epilogue='') {
+ RunTest(name, expression, prologue, epilogue);
}
diff --git a/deps/v8/test/js-perf-test/Inspector/debugger.js b/deps/v8/test/js-perf-test/Inspector/debugger.js
index 506d2a3550..c2ec6d993d 100644
--- a/deps/v8/test/js-perf-test/Inspector/debugger.js
+++ b/deps/v8/test/js-perf-test/Inspector/debugger.js
@@ -13,6 +13,9 @@
benchy('Debugger.getPossibleBreakpoints',
DebuggerGetPossibleBreakpoints,
SetupGetPossibleBreakpoints);
+ benchy('AsyncStacksInstrumentation',
+ AsyncStacksInstrumentation,
+ SetupAsyncStacksInstrumentation);
function Setup() {
SendMessage('Debugger.enable');
@@ -57,4 +60,20 @@
start: {lineNumber: 0, columnNumber: 0, scriptId: scriptId}
});
}
+
+ function SetupAsyncStacksInstrumentation() {
+ Setup();
+ SendMessage('Debugger.setAsyncCallStackDepth', {maxDepth: 1024});
+ }
+
+ function AsyncStacksInstrumentation() {
+ var p = Promise.resolve();
+ var nopCallback = () => undefined;
+ var done = false;
+ for (let i = 0; i < 1000; ++i) {
+ p = p.then(nopCallback);
+ }
+ p = p.then(() => done = true);
+ while (!done) %RunMicrotasks();
+ }
})();
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 420421099a..6b0076f6fb 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -150,15 +150,20 @@
{"name": "Map-Smi"},
{"name": "Map-String"},
{"name": "Map-Object"},
+ {"name": "Map-Object-Set-Get-Large"},
+ {"name": "Map-Double"},
{"name": "Map-Iteration"},
{"name": "Map-Iterator"},
{"name": "Set-Smi"},
{"name": "Set-String"},
{"name": "Set-Object"},
+ {"name": "Set-Double"},
{"name": "Set-Iteration"},
{"name": "Set-Iterator"},
{"name": "WeakMap"},
- {"name": "WeakSet"}
+ {"name": "WeakMap-Constructor"},
+ {"name": "WeakSet"},
+ {"name": "WeakSet-Constructor"}
]
},
{
@@ -602,12 +607,13 @@
"path": ["Inspector"],
"main": "run.js",
"resources": [ "debugger.js", "runtime.js" ],
- "flags": ["--enable-inspector"],
+ "flags": ["--enable-inspector","--allow-natives-syntax"],
"results_regexp": "^%s\\-Inspector\\(Score\\): (.+)$",
"tests": [
{"name": "Debugger.paused"},
{"name": "Runtime.evaluate(String16Cstor)"},
- {"name": "Debugger.getPossibleBreakpoints"}
+ {"name": "Debugger.getPossibleBreakpoints"},
+ {"name": "AsyncStacksInstrumentation"}
]
},
{
diff --git a/deps/v8/test/js-perf-test/SixSpeed.json b/deps/v8/test/js-perf-test/SixSpeed.json
index 15a2792c6a..a2fb560886 100644
--- a/deps/v8/test/js-perf-test/SixSpeed.json
+++ b/deps/v8/test/js-perf-test/SixSpeed.json
@@ -258,14 +258,14 @@
{
"name": "ES5",
"main": "run.js",
- "resources": ["map_set_add/es5.js"],
- "test_flags": ["map_set_add/es5"]
+ "resources": ["map_string/es5.js"],
+ "test_flags": ["map_string/es5"]
},
{
"name": "ES6",
"main": "run.js",
- "resources": ["map_set_add/es6.js"],
- "test_flags": ["map_set_add/es6"]
+ "resources": ["map_string/es6.js"],
+ "test_flags": ["map_string/es6"]
}
]
}
diff --git a/deps/v8/test/message/console.out b/deps/v8/test/message/console.out
deleted file mode 100644
index 7813ccd025..0000000000
--- a/deps/v8/test/message/console.out
+++ /dev/null
@@ -1,15 +0,0 @@
-default: {NUMBER}
-abcd: {NUMBER}
-b: 0.000000
-a: {NUMBER}
-log more
-warn 2
-debug
-info
-*%(basename)s:25: Error: exception
-console.info({ toString: () => {throw new Error("exception");} })
- ^
-Error: exception
- at Object.toString (*%(basename)s:25:39)
- at console.info (<anonymous>)
- at *%(basename)s:25:9
diff --git a/deps/v8/test/message/arrow-bare-rest-param.js b/deps/v8/test/message/fail/arrow-bare-rest-param.js
index b826ec20b7..b826ec20b7 100644
--- a/deps/v8/test/message/arrow-bare-rest-param.js
+++ b/deps/v8/test/message/fail/arrow-bare-rest-param.js
diff --git a/deps/v8/test/message/arrow-bare-rest-param.out b/deps/v8/test/message/fail/arrow-bare-rest-param.out
index 76a25a455d..76a25a455d 100644
--- a/deps/v8/test/message/arrow-bare-rest-param.out
+++ b/deps/v8/test/message/fail/arrow-bare-rest-param.out
diff --git a/deps/v8/test/message/arrow-formal-parameters.js b/deps/v8/test/message/fail/arrow-formal-parameters.js
index 30f9c86048..30f9c86048 100644
--- a/deps/v8/test/message/arrow-formal-parameters.js
+++ b/deps/v8/test/message/fail/arrow-formal-parameters.js
diff --git a/deps/v8/test/message/arrow-formal-parameters.out b/deps/v8/test/message/fail/arrow-formal-parameters.out
index bafcf452e3..bafcf452e3 100644
--- a/deps/v8/test/message/arrow-formal-parameters.out
+++ b/deps/v8/test/message/fail/arrow-formal-parameters.out
diff --git a/deps/v8/test/message/arrow-invalid-rest-2.js b/deps/v8/test/message/fail/arrow-invalid-rest-2.js
index 3517803d30..3517803d30 100644
--- a/deps/v8/test/message/arrow-invalid-rest-2.js
+++ b/deps/v8/test/message/fail/arrow-invalid-rest-2.js
diff --git a/deps/v8/test/message/arrow-invalid-rest-2.out b/deps/v8/test/message/fail/arrow-invalid-rest-2.out
index aef0fb0041..aef0fb0041 100644
--- a/deps/v8/test/message/arrow-invalid-rest-2.out
+++ b/deps/v8/test/message/fail/arrow-invalid-rest-2.out
diff --git a/deps/v8/test/message/arrow-invalid-rest.js b/deps/v8/test/message/fail/arrow-invalid-rest.js
index 870dbe9f54..870dbe9f54 100644
--- a/deps/v8/test/message/arrow-invalid-rest.js
+++ b/deps/v8/test/message/fail/arrow-invalid-rest.js
diff --git a/deps/v8/test/message/arrow-invalid-rest.out b/deps/v8/test/message/fail/arrow-invalid-rest.out
index 520c67393f..520c67393f 100644
--- a/deps/v8/test/message/arrow-invalid-rest.out
+++ b/deps/v8/test/message/fail/arrow-invalid-rest.out
diff --git a/deps/v8/test/message/arrow-missing.js b/deps/v8/test/message/fail/arrow-missing.js
index b9f9acd05c..b9f9acd05c 100644
--- a/deps/v8/test/message/arrow-missing.js
+++ b/deps/v8/test/message/fail/arrow-missing.js
diff --git a/deps/v8/test/message/arrow-missing.out b/deps/v8/test/message/fail/arrow-missing.out
index bad6157a0a..bad6157a0a 100644
--- a/deps/v8/test/message/arrow-missing.out
+++ b/deps/v8/test/message/fail/arrow-missing.out
diff --git a/deps/v8/test/message/arrow-param-after-rest-2.js b/deps/v8/test/message/fail/arrow-param-after-rest-2.js
index 617c8726d7..617c8726d7 100644
--- a/deps/v8/test/message/arrow-param-after-rest-2.js
+++ b/deps/v8/test/message/fail/arrow-param-after-rest-2.js
diff --git a/deps/v8/test/message/arrow-param-after-rest-2.out b/deps/v8/test/message/fail/arrow-param-after-rest-2.out
index 27785cfb02..27785cfb02 100644
--- a/deps/v8/test/message/arrow-param-after-rest-2.out
+++ b/deps/v8/test/message/fail/arrow-param-after-rest-2.out
diff --git a/deps/v8/test/message/arrow-param-after-rest.js b/deps/v8/test/message/fail/arrow-param-after-rest.js
index 9192bc6c0c..9192bc6c0c 100644
--- a/deps/v8/test/message/arrow-param-after-rest.js
+++ b/deps/v8/test/message/fail/arrow-param-after-rest.js
diff --git a/deps/v8/test/message/arrow-param-after-rest.out b/deps/v8/test/message/fail/arrow-param-after-rest.out
index 5b36e43584..5b36e43584 100644
--- a/deps/v8/test/message/arrow-param-after-rest.out
+++ b/deps/v8/test/message/fail/arrow-param-after-rest.out
diff --git a/deps/v8/test/message/arrow-strict-eval-bare-parameter.js b/deps/v8/test/message/fail/arrow-strict-eval-bare-parameter.js
index 3cc8254f05..3cc8254f05 100644
--- a/deps/v8/test/message/arrow-strict-eval-bare-parameter.js
+++ b/deps/v8/test/message/fail/arrow-strict-eval-bare-parameter.js
diff --git a/deps/v8/test/message/arrow-strict-eval-bare-parameter.out b/deps/v8/test/message/fail/arrow-strict-eval-bare-parameter.out
index e2230a1bb2..e2230a1bb2 100644
--- a/deps/v8/test/message/arrow-strict-eval-bare-parameter.out
+++ b/deps/v8/test/message/fail/arrow-strict-eval-bare-parameter.out
diff --git a/deps/v8/test/message/arrow-two-rest-params.js b/deps/v8/test/message/fail/arrow-two-rest-params.js
index 222f10ab4f..222f10ab4f 100644
--- a/deps/v8/test/message/arrow-two-rest-params.js
+++ b/deps/v8/test/message/fail/arrow-two-rest-params.js
diff --git a/deps/v8/test/message/arrow-two-rest-params.out b/deps/v8/test/message/fail/arrow-two-rest-params.out
index 7147ebcf11..7147ebcf11 100644
--- a/deps/v8/test/message/arrow-two-rest-params.out
+++ b/deps/v8/test/message/fail/arrow-two-rest-params.out
diff --git a/deps/v8/test/message/async-arrow-invalid-rest-2.js b/deps/v8/test/message/fail/async-arrow-invalid-rest-2.js
index ff5245f457..ff5245f457 100644
--- a/deps/v8/test/message/async-arrow-invalid-rest-2.js
+++ b/deps/v8/test/message/fail/async-arrow-invalid-rest-2.js
diff --git a/deps/v8/test/message/async-arrow-invalid-rest-2.out b/deps/v8/test/message/fail/async-arrow-invalid-rest-2.out
index ff90771fbb..ff90771fbb 100644
--- a/deps/v8/test/message/async-arrow-invalid-rest-2.out
+++ b/deps/v8/test/message/fail/async-arrow-invalid-rest-2.out
diff --git a/deps/v8/test/message/async-arrow-invalid-rest.js b/deps/v8/test/message/fail/async-arrow-invalid-rest.js
index c77a7eb4b4..c77a7eb4b4 100644
--- a/deps/v8/test/message/async-arrow-invalid-rest.js
+++ b/deps/v8/test/message/fail/async-arrow-invalid-rest.js
diff --git a/deps/v8/test/message/async-arrow-invalid-rest.out b/deps/v8/test/message/fail/async-arrow-invalid-rest.out
index 31fd1ab0e1..31fd1ab0e1 100644
--- a/deps/v8/test/message/async-arrow-invalid-rest.out
+++ b/deps/v8/test/message/fail/async-arrow-invalid-rest.out
diff --git a/deps/v8/test/message/async-arrow-param-after-rest.js b/deps/v8/test/message/fail/async-arrow-param-after-rest.js
index a050749a13..a050749a13 100644
--- a/deps/v8/test/message/async-arrow-param-after-rest.js
+++ b/deps/v8/test/message/fail/async-arrow-param-after-rest.js
diff --git a/deps/v8/test/message/async-arrow-param-after-rest.out b/deps/v8/test/message/fail/async-arrow-param-after-rest.out
index 51d8c879b2..51d8c879b2 100644
--- a/deps/v8/test/message/async-arrow-param-after-rest.out
+++ b/deps/v8/test/message/fail/async-arrow-param-after-rest.out
diff --git a/deps/v8/test/message/await-non-async.js b/deps/v8/test/message/fail/await-non-async.js
index 9b93bfc5ef..9b93bfc5ef 100644
--- a/deps/v8/test/message/await-non-async.js
+++ b/deps/v8/test/message/fail/await-non-async.js
diff --git a/deps/v8/test/message/await-non-async.out b/deps/v8/test/message/fail/await-non-async.out
index 3198e8d7b1..3198e8d7b1 100644
--- a/deps/v8/test/message/await-non-async.out
+++ b/deps/v8/test/message/fail/await-non-async.out
diff --git a/deps/v8/test/message/call-non-constructable.js b/deps/v8/test/message/fail/call-non-constructable.js
index 261acbdad0..261acbdad0 100644
--- a/deps/v8/test/message/call-non-constructable.js
+++ b/deps/v8/test/message/fail/call-non-constructable.js
diff --git a/deps/v8/test/message/call-non-constructable.out b/deps/v8/test/message/fail/call-non-constructable.out
index 89f689790a..89f689790a 100644
--- a/deps/v8/test/message/call-non-constructable.out
+++ b/deps/v8/test/message/fail/call-non-constructable.out
diff --git a/deps/v8/test/message/call-primitive-constructor.js b/deps/v8/test/message/fail/call-primitive-constructor.js
index a5c9266682..a5c9266682 100644
--- a/deps/v8/test/message/call-primitive-constructor.js
+++ b/deps/v8/test/message/fail/call-primitive-constructor.js
diff --git a/deps/v8/test/message/call-primitive-constructor.out b/deps/v8/test/message/fail/call-primitive-constructor.out
index ad5172acce..ad5172acce 100644
--- a/deps/v8/test/message/call-primitive-constructor.out
+++ b/deps/v8/test/message/fail/call-primitive-constructor.out
diff --git a/deps/v8/test/message/call-primitive-function.js b/deps/v8/test/message/fail/call-primitive-function.js
index b5872513dc..b5872513dc 100644
--- a/deps/v8/test/message/call-primitive-function.js
+++ b/deps/v8/test/message/fail/call-primitive-function.js
diff --git a/deps/v8/test/message/call-primitive-function.out b/deps/v8/test/message/fail/call-primitive-function.out
index be707ed15c..be707ed15c 100644
--- a/deps/v8/test/message/call-primitive-function.out
+++ b/deps/v8/test/message/fail/call-primitive-function.out
diff --git a/deps/v8/test/message/call-undeclared-constructor.js b/deps/v8/test/message/fail/call-undeclared-constructor.js
index e98fc1a24e..e98fc1a24e 100644
--- a/deps/v8/test/message/call-undeclared-constructor.js
+++ b/deps/v8/test/message/fail/call-undeclared-constructor.js
diff --git a/deps/v8/test/message/call-undeclared-constructor.out b/deps/v8/test/message/fail/call-undeclared-constructor.out
index bbfa37c21b..bbfa37c21b 100644
--- a/deps/v8/test/message/call-undeclared-constructor.out
+++ b/deps/v8/test/message/fail/call-undeclared-constructor.out
diff --git a/deps/v8/test/message/call-undeclared-function.js b/deps/v8/test/message/fail/call-undeclared-function.js
index 2c6f9aaec1..2c6f9aaec1 100644
--- a/deps/v8/test/message/call-undeclared-function.js
+++ b/deps/v8/test/message/fail/call-undeclared-function.js
diff --git a/deps/v8/test/message/call-undeclared-function.out b/deps/v8/test/message/fail/call-undeclared-function.out
index c853122b37..c853122b37 100644
--- a/deps/v8/test/message/call-undeclared-function.out
+++ b/deps/v8/test/message/fail/call-undeclared-function.out
diff --git a/deps/v8/test/message/class-constructor-accessor.js b/deps/v8/test/message/fail/class-constructor-accessor.js
index fcc9868b14..fcc9868b14 100644
--- a/deps/v8/test/message/class-constructor-accessor.js
+++ b/deps/v8/test/message/fail/class-constructor-accessor.js
diff --git a/deps/v8/test/message/class-constructor-accessor.out b/deps/v8/test/message/fail/class-constructor-accessor.out
index 8776f54db1..8776f54db1 100644
--- a/deps/v8/test/message/class-constructor-accessor.out
+++ b/deps/v8/test/message/fail/class-constructor-accessor.out
diff --git a/deps/v8/test/message/class-constructor-generator.js b/deps/v8/test/message/fail/class-constructor-generator.js
index a9a0ef862d..a9a0ef862d 100644
--- a/deps/v8/test/message/class-constructor-generator.js
+++ b/deps/v8/test/message/fail/class-constructor-generator.js
diff --git a/deps/v8/test/message/class-constructor-generator.out b/deps/v8/test/message/fail/class-constructor-generator.out
index 5075e511cc..5075e511cc 100644
--- a/deps/v8/test/message/class-constructor-generator.out
+++ b/deps/v8/test/message/fail/class-constructor-generator.out
diff --git a/deps/v8/test/message/fail/class-field-constructor.js b/deps/v8/test/message/fail/class-field-constructor.js
new file mode 100644
index 0000000000..baeb04e94d
--- /dev/null
+++ b/deps/v8/test/message/fail/class-field-constructor.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields
+
+class X {
+ constructor = function() {};
+}
diff --git a/deps/v8/test/message/fail/class-field-constructor.out b/deps/v8/test/message/fail/class-field-constructor.out
new file mode 100644
index 0000000000..51f26957ad
--- /dev/null
+++ b/deps/v8/test/message/fail/class-field-constructor.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: Classes may not have a field named 'constructor'
+ constructor = function() {};
+ ^^^^^^^^^^^
+SyntaxError: Classes may not have a field named 'constructor' \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-field-static-constructor.js b/deps/v8/test/message/fail/class-field-static-constructor.js
new file mode 100644
index 0000000000..b64cf6254c
--- /dev/null
+++ b/deps/v8/test/message/fail/class-field-static-constructor.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields
+
+class X {
+ static constructor = function() {};
+}
diff --git a/deps/v8/test/message/fail/class-field-static-constructor.out b/deps/v8/test/message/fail/class-field-static-constructor.out
new file mode 100644
index 0000000000..6831d83552
--- /dev/null
+++ b/deps/v8/test/message/fail/class-field-static-constructor.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: Classes may not have a field named 'constructor'
+ static constructor = function() {};
+ ^^^^^^^^^^^
+SyntaxError: Classes may not have a field named 'constructor' \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-field-static-prototype.js b/deps/v8/test/message/fail/class-field-static-prototype.js
new file mode 100644
index 0000000000..da8120481a
--- /dev/null
+++ b/deps/v8/test/message/fail/class-field-static-prototype.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields
+
+class X {
+ static prototype = function() {};
+}
diff --git a/deps/v8/test/message/fail/class-field-static-prototype.out b/deps/v8/test/message/fail/class-field-static-prototype.out
new file mode 100644
index 0000000000..06d6a75842
--- /dev/null
+++ b/deps/v8/test/message/fail/class-field-static-prototype.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: Classes may not have a static property named 'prototype'
+ static prototype = function() {};
+ ^^^^^^^^^
+SyntaxError: Classes may not have a static property named 'prototype' \ No newline at end of file
diff --git a/deps/v8/test/message/class-spread-property.js b/deps/v8/test/message/fail/class-spread-property.js
index 5887df538b..5887df538b 100644
--- a/deps/v8/test/message/class-spread-property.js
+++ b/deps/v8/test/message/fail/class-spread-property.js
diff --git a/deps/v8/test/message/class-spread-property.out b/deps/v8/test/message/fail/class-spread-property.out
index df15e50262..df15e50262 100644
--- a/deps/v8/test/message/class-spread-property.out
+++ b/deps/v8/test/message/fail/class-spread-property.out
diff --git a/deps/v8/test/message/console.js b/deps/v8/test/message/fail/console.js
index f49ce4c608..d8cbfa28c9 100644
--- a/deps/v8/test/message/console.js
+++ b/deps/v8/test/message/fail/console.js
@@ -18,7 +18,6 @@ console.timeEnd("a", "b");
console.log("log", "more");
console.warn("warn", { toString: () => 2 });
-console.error("error");
console.debug("debug");
console.info("info");
diff --git a/deps/v8/test/message/fail/console.out b/deps/v8/test/message/fail/console.out
new file mode 100644
index 0000000000..abfc4ad718
--- /dev/null
+++ b/deps/v8/test/message/fail/console.out
@@ -0,0 +1,15 @@
+console.timeEnd: default, {NUMBER}
+console.timeEnd: abcd, {NUMBER}
+console.timeEnd: b, {NUMBER}
+console.timeEnd: a, {NUMBER}
+log more
+console.warn: warn 2
+console.debug: debug
+console.info: info
+console.info: *%(basename)s:24: Error: exception
+console.info({ toString: () => {throw new Error("exception");} })
+ ^
+Error: exception
+ at Object.toString (*%(basename)s:24:39)
+ at console.info (<anonymous>)
+ at *%(basename)s:24:9
diff --git a/deps/v8/test/message/const-decl-no-init-sloppy.out b/deps/v8/test/message/fail/const-decl-no-init-sloppy.out
index 302497771a..302497771a 100644
--- a/deps/v8/test/message/const-decl-no-init-sloppy.out
+++ b/deps/v8/test/message/fail/const-decl-no-init-sloppy.out
diff --git a/deps/v8/test/message/const-decl-no-init.js b/deps/v8/test/message/fail/const-decl-no-init.js
index 7a47a1319a..7a47a1319a 100644
--- a/deps/v8/test/message/const-decl-no-init.js
+++ b/deps/v8/test/message/fail/const-decl-no-init.js
diff --git a/deps/v8/test/message/const-decl-no-init.out b/deps/v8/test/message/fail/const-decl-no-init.out
index 42f1219b3c..42f1219b3c 100644
--- a/deps/v8/test/message/const-decl-no-init.out
+++ b/deps/v8/test/message/fail/const-decl-no-init.out
diff --git a/deps/v8/test/message/const-decl-no-init2.js b/deps/v8/test/message/fail/const-decl-no-init2.js
index 586b783d64..586b783d64 100644
--- a/deps/v8/test/message/const-decl-no-init2.js
+++ b/deps/v8/test/message/fail/const-decl-no-init2.js
diff --git a/deps/v8/test/message/const-decl-no-init2.out b/deps/v8/test/message/fail/const-decl-no-init2.out
index 2f61cedb16..2f61cedb16 100644
--- a/deps/v8/test/message/const-decl-no-init2.out
+++ b/deps/v8/test/message/fail/const-decl-no-init2.out
diff --git a/deps/v8/test/message/default-parameter-tdz-arrow.js b/deps/v8/test/message/fail/default-parameter-tdz-arrow.js
index d68ceb2908..d68ceb2908 100644
--- a/deps/v8/test/message/default-parameter-tdz-arrow.js
+++ b/deps/v8/test/message/fail/default-parameter-tdz-arrow.js
diff --git a/deps/v8/test/message/default-parameter-tdz-arrow.out b/deps/v8/test/message/fail/default-parameter-tdz-arrow.out
index 7d5f894ef5..7d5f894ef5 100644
--- a/deps/v8/test/message/default-parameter-tdz-arrow.out
+++ b/deps/v8/test/message/fail/default-parameter-tdz-arrow.out
diff --git a/deps/v8/test/message/default-parameter-tdz.js b/deps/v8/test/message/fail/default-parameter-tdz.js
index a109196218..a109196218 100644
--- a/deps/v8/test/message/default-parameter-tdz.js
+++ b/deps/v8/test/message/fail/default-parameter-tdz.js
diff --git a/deps/v8/test/message/default-parameter-tdz.out b/deps/v8/test/message/fail/default-parameter-tdz.out
index 8a6d56abae..8a6d56abae 100644
--- a/deps/v8/test/message/default-parameter-tdz.out
+++ b/deps/v8/test/message/fail/default-parameter-tdz.out
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-number.js b/deps/v8/test/message/fail/destructuring-array-non-iterable-number.js
index d07cb7a037..d07cb7a037 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-number.js
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-number.js
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-number.out b/deps/v8/test/message/fail/destructuring-array-non-iterable-number.out
index 35a328d7f3..35a328d7f3 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-number.out
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-number.out
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-object-literal-complex.js b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal-complex.js
index 404f4e2889..404f4e2889 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-object-literal-complex.js
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal-complex.js
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-object-literal-complex.out b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal-complex.out
index c4752ff4f9..c4752ff4f9 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-object-literal-complex.out
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal-complex.out
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-object-literal.js b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal.js
index a261a3994e..a261a3994e 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-object-literal.js
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal.js
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-object-literal.out b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal.out
index 4180bddb33..4180bddb33 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-object-literal.out
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-literal.out
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-object.js b/deps/v8/test/message/fail/destructuring-array-non-iterable-object.js
index 03286e318c..03286e318c 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-object.js
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-object.js
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-object.out b/deps/v8/test/message/fail/destructuring-array-non-iterable-object.out
index bcf70a8ec0..bcf70a8ec0 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-object.out
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-object.out
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-undefined.js b/deps/v8/test/message/fail/destructuring-array-non-iterable-undefined.js
index f0eaa30562..f0eaa30562 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-undefined.js
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-undefined.js
diff --git a/deps/v8/test/message/destructuring-array-non-iterable-undefined.out b/deps/v8/test/message/fail/destructuring-array-non-iterable-undefined.out
index 564d02c91b..564d02c91b 100644
--- a/deps/v8/test/message/destructuring-array-non-iterable-undefined.out
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-undefined.out
diff --git a/deps/v8/test/message/destructuring-decl-no-init-array.js b/deps/v8/test/message/fail/destructuring-decl-no-init-array.js
index ab976b197e..ab976b197e 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-array.js
+++ b/deps/v8/test/message/fail/destructuring-decl-no-init-array.js
diff --git a/deps/v8/test/message/destructuring-decl-no-init-array.out b/deps/v8/test/message/fail/destructuring-decl-no-init-array.out
index 888656b490..888656b490 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-array.out
+++ b/deps/v8/test/message/fail/destructuring-decl-no-init-array.out
diff --git a/deps/v8/test/message/destructuring-decl-no-init-array2.js b/deps/v8/test/message/fail/destructuring-decl-no-init-array2.js
index 9ffa58b1df..9ffa58b1df 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-array2.js
+++ b/deps/v8/test/message/fail/destructuring-decl-no-init-array2.js
diff --git a/deps/v8/test/message/destructuring-decl-no-init-array2.out b/deps/v8/test/message/fail/destructuring-decl-no-init-array2.out
index d3b32b5d3a..d3b32b5d3a 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-array2.out
+++ b/deps/v8/test/message/fail/destructuring-decl-no-init-array2.out
diff --git a/deps/v8/test/message/destructuring-decl-no-init-obj.js b/deps/v8/test/message/fail/destructuring-decl-no-init-obj.js
index 398b4fc5e7..398b4fc5e7 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-obj.js
+++ b/deps/v8/test/message/fail/destructuring-decl-no-init-obj.js
diff --git a/deps/v8/test/message/destructuring-decl-no-init-obj.out b/deps/v8/test/message/fail/destructuring-decl-no-init-obj.out
index cb94b58a32..cb94b58a32 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-obj.out
+++ b/deps/v8/test/message/fail/destructuring-decl-no-init-obj.out
diff --git a/deps/v8/test/message/destructuring-decl-no-init-obj2.js b/deps/v8/test/message/fail/destructuring-decl-no-init-obj2.js
index 652409bfab..652409bfab 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-obj2.js
+++ b/deps/v8/test/message/fail/destructuring-decl-no-init-obj2.js
diff --git a/deps/v8/test/message/destructuring-decl-no-init-obj2.out b/deps/v8/test/message/fail/destructuring-decl-no-init-obj2.out
index 2b4054ac37..2b4054ac37 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-obj2.out
+++ b/deps/v8/test/message/fail/destructuring-decl-no-init-obj2.out
diff --git a/deps/v8/test/message/destructuring-function-non-iterable.js b/deps/v8/test/message/fail/destructuring-function-non-iterable.js
index fcae586f1c..fcae586f1c 100644
--- a/deps/v8/test/message/destructuring-function-non-iterable.js
+++ b/deps/v8/test/message/fail/destructuring-function-non-iterable.js
diff --git a/deps/v8/test/message/destructuring-function-non-iterable.out b/deps/v8/test/message/fail/destructuring-function-non-iterable.out
index 575fc5bd1b..575fc5bd1b 100644
--- a/deps/v8/test/message/destructuring-function-non-iterable.out
+++ b/deps/v8/test/message/fail/destructuring-function-non-iterable.out
diff --git a/deps/v8/test/message/destructuring-modify-const.js b/deps/v8/test/message/fail/destructuring-modify-const.js
index 5575ae9267..5575ae9267 100644
--- a/deps/v8/test/message/destructuring-modify-const.js
+++ b/deps/v8/test/message/fail/destructuring-modify-const.js
diff --git a/deps/v8/test/message/destructuring-modify-const.out b/deps/v8/test/message/fail/destructuring-modify-const.out
index 19bffa6d3d..19bffa6d3d 100644
--- a/deps/v8/test/message/destructuring-modify-const.out
+++ b/deps/v8/test/message/fail/destructuring-modify-const.out
diff --git a/deps/v8/test/message/destructuring-new-callable-non-iterable.js b/deps/v8/test/message/fail/destructuring-new-callable-non-iterable.js
index e854d6e3af..e854d6e3af 100644
--- a/deps/v8/test/message/destructuring-new-callable-non-iterable.js
+++ b/deps/v8/test/message/fail/destructuring-new-callable-non-iterable.js
diff --git a/deps/v8/test/message/destructuring-new-callable-non-iterable.out b/deps/v8/test/message/fail/destructuring-new-callable-non-iterable.out
index 53f8dec32c..53f8dec32c 100644
--- a/deps/v8/test/message/destructuring-new-callable-non-iterable.out
+++ b/deps/v8/test/message/fail/destructuring-new-callable-non-iterable.out
diff --git a/deps/v8/test/message/destructuring-non-function-non-iterable.js b/deps/v8/test/message/fail/destructuring-non-function-non-iterable.js
index 6de9c90869..6de9c90869 100644
--- a/deps/v8/test/message/destructuring-non-function-non-iterable.js
+++ b/deps/v8/test/message/fail/destructuring-non-function-non-iterable.js
diff --git a/deps/v8/test/message/destructuring-non-function-non-iterable.out b/deps/v8/test/message/fail/destructuring-non-function-non-iterable.out
index 575fc5bd1b..575fc5bd1b 100644
--- a/deps/v8/test/message/destructuring-non-function-non-iterable.out
+++ b/deps/v8/test/message/fail/destructuring-non-function-non-iterable.out
diff --git a/deps/v8/test/message/destructuring-undefined-computed-property.js b/deps/v8/test/message/fail/destructuring-undefined-computed-property.js
index 7f48062eda..7f48062eda 100644
--- a/deps/v8/test/message/destructuring-undefined-computed-property.js
+++ b/deps/v8/test/message/fail/destructuring-undefined-computed-property.js
diff --git a/deps/v8/test/message/destructuring-undefined-computed-property.out b/deps/v8/test/message/fail/destructuring-undefined-computed-property.out
index 27baf9a2d6..27baf9a2d6 100644
--- a/deps/v8/test/message/destructuring-undefined-computed-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-computed-property.out
diff --git a/deps/v8/test/message/destructuring-undefined-number-property.js b/deps/v8/test/message/fail/destructuring-undefined-number-property.js
index e6b6643d72..e6b6643d72 100644
--- a/deps/v8/test/message/destructuring-undefined-number-property.js
+++ b/deps/v8/test/message/fail/destructuring-undefined-number-property.js
diff --git a/deps/v8/test/message/destructuring-undefined-number-property.out b/deps/v8/test/message/fail/destructuring-undefined-number-property.out
index 4d63351204..4d63351204 100644
--- a/deps/v8/test/message/destructuring-undefined-number-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-number-property.out
diff --git a/deps/v8/test/message/destructuring-undefined-string-property.js b/deps/v8/test/message/fail/destructuring-undefined-string-property.js
index 67f3dab2f8..67f3dab2f8 100644
--- a/deps/v8/test/message/destructuring-undefined-string-property.js
+++ b/deps/v8/test/message/fail/destructuring-undefined-string-property.js
diff --git a/deps/v8/test/message/destructuring-undefined-string-property.out b/deps/v8/test/message/fail/destructuring-undefined-string-property.out
index 1ea1c6b264..1ea1c6b264 100644
--- a/deps/v8/test/message/destructuring-undefined-string-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-string-property.out
diff --git a/deps/v8/test/message/regress/regress-4829-1.js b/deps/v8/test/message/fail/dynamic-import-missing-specifier.js
index 78277df3cc..c2af815f12 100644
--- a/deps/v8/test/message/regress/regress-4829-1.js
+++ b/deps/v8/test/message/fail/dynamic-import-missing-specifier.js
@@ -1,9 +1,7 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --harmony-dynamic-import
-// Flags: --no-harmony-template-escapes
-
-function tag() {}
-
-tag(tag`\xyy`);
+import();
diff --git a/deps/v8/test/message/fail/dynamic-import-missing-specifier.out b/deps/v8/test/message/fail/dynamic-import-missing-specifier.out
new file mode 100644
index 0000000000..2f8c1cfa0c
--- /dev/null
+++ b/deps/v8/test/message/fail/dynamic-import-missing-specifier.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: import() requires a specifier
+import();
+ ^
+SyntaxError: import() requires a specifier
diff --git a/deps/v8/test/message/export-duplicate-as.js b/deps/v8/test/message/fail/export-duplicate-as.js
index 416180b093..416180b093 100644
--- a/deps/v8/test/message/export-duplicate-as.js
+++ b/deps/v8/test/message/fail/export-duplicate-as.js
diff --git a/deps/v8/test/message/export-duplicate-as.out b/deps/v8/test/message/fail/export-duplicate-as.out
index 729de8a904..729de8a904 100644
--- a/deps/v8/test/message/export-duplicate-as.out
+++ b/deps/v8/test/message/fail/export-duplicate-as.out
diff --git a/deps/v8/test/message/export-duplicate-default.js b/deps/v8/test/message/fail/export-duplicate-default.js
index de1a8807c1..de1a8807c1 100644
--- a/deps/v8/test/message/export-duplicate-default.js
+++ b/deps/v8/test/message/fail/export-duplicate-default.js
diff --git a/deps/v8/test/message/export-duplicate-default.out b/deps/v8/test/message/fail/export-duplicate-default.out
index 685e289891..685e289891 100644
--- a/deps/v8/test/message/export-duplicate-default.out
+++ b/deps/v8/test/message/fail/export-duplicate-default.out
diff --git a/deps/v8/test/message/export-duplicate.js b/deps/v8/test/message/fail/export-duplicate.js
index 93011f0c1c..93011f0c1c 100644
--- a/deps/v8/test/message/export-duplicate.js
+++ b/deps/v8/test/message/fail/export-duplicate.js
diff --git a/deps/v8/test/message/export-duplicate.out b/deps/v8/test/message/fail/export-duplicate.out
index 9811cb122c..9811cb122c 100644
--- a/deps/v8/test/message/export-duplicate.out
+++ b/deps/v8/test/message/fail/export-duplicate.out
diff --git a/deps/v8/test/message/for-in-let-loop-initializers-strict.js b/deps/v8/test/message/fail/for-in-let-loop-initializers-strict.js
index a58f2fd275..a58f2fd275 100644
--- a/deps/v8/test/message/for-in-let-loop-initializers-strict.js
+++ b/deps/v8/test/message/fail/for-in-let-loop-initializers-strict.js
diff --git a/deps/v8/test/message/for-in-let-loop-initializers-strict.out b/deps/v8/test/message/fail/for-in-let-loop-initializers-strict.out
index 6c8ca9dfe9..6c8ca9dfe9 100644
--- a/deps/v8/test/message/for-in-let-loop-initializers-strict.out
+++ b/deps/v8/test/message/fail/for-in-let-loop-initializers-strict.out
diff --git a/deps/v8/test/message/for-in-loop-initializers-destructuring.js b/deps/v8/test/message/fail/for-in-loop-initializers-destructuring.js
index 9bbfd8d71f..9bbfd8d71f 100644
--- a/deps/v8/test/message/for-in-loop-initializers-destructuring.js
+++ b/deps/v8/test/message/fail/for-in-loop-initializers-destructuring.js
diff --git a/deps/v8/test/message/for-in-loop-initializers-destructuring.out b/deps/v8/test/message/fail/for-in-loop-initializers-destructuring.out
index 9dbda2c639..9dbda2c639 100644
--- a/deps/v8/test/message/for-in-loop-initializers-destructuring.out
+++ b/deps/v8/test/message/fail/for-in-loop-initializers-destructuring.out
diff --git a/deps/v8/test/message/for-in-loop-initializers-strict.js b/deps/v8/test/message/fail/for-in-loop-initializers-strict.js
index 6aa0925501..6aa0925501 100644
--- a/deps/v8/test/message/for-in-loop-initializers-strict.js
+++ b/deps/v8/test/message/fail/for-in-loop-initializers-strict.js
diff --git a/deps/v8/test/message/for-in-loop-initializers-strict.out b/deps/v8/test/message/fail/for-in-loop-initializers-strict.out
index 41d7cbd286..41d7cbd286 100644
--- a/deps/v8/test/message/for-in-loop-initializers-strict.out
+++ b/deps/v8/test/message/fail/for-in-loop-initializers-strict.out
diff --git a/deps/v8/test/message/for-loop-invalid-lhs.js b/deps/v8/test/message/fail/for-loop-invalid-lhs.js
index 81a9512a6c..81a9512a6c 100644
--- a/deps/v8/test/message/for-loop-invalid-lhs.js
+++ b/deps/v8/test/message/fail/for-loop-invalid-lhs.js
diff --git a/deps/v8/test/message/for-loop-invalid-lhs.out b/deps/v8/test/message/fail/for-loop-invalid-lhs.out
index b42e8512f6..b42e8512f6 100644
--- a/deps/v8/test/message/for-loop-invalid-lhs.out
+++ b/deps/v8/test/message/fail/for-loop-invalid-lhs.out
diff --git a/deps/v8/test/message/for-of-let-loop-initializers.js b/deps/v8/test/message/fail/for-of-let-loop-initializers.js
index 4ac0d549ce..4ac0d549ce 100644
--- a/deps/v8/test/message/for-of-let-loop-initializers.js
+++ b/deps/v8/test/message/fail/for-of-let-loop-initializers.js
diff --git a/deps/v8/test/message/for-of-let-loop-initializers.out b/deps/v8/test/message/fail/for-of-let-loop-initializers.out
index 3b43e9f644..3b43e9f644 100644
--- a/deps/v8/test/message/for-of-let-loop-initializers.out
+++ b/deps/v8/test/message/fail/for-of-let-loop-initializers.out
diff --git a/deps/v8/test/message/for-of-loop-initializers-sloppy.js b/deps/v8/test/message/fail/for-of-loop-initializers-sloppy.js
index 685e2e6d2e..685e2e6d2e 100644
--- a/deps/v8/test/message/for-of-loop-initializers-sloppy.js
+++ b/deps/v8/test/message/fail/for-of-loop-initializers-sloppy.js
diff --git a/deps/v8/test/message/for-of-loop-initializers-sloppy.out b/deps/v8/test/message/fail/for-of-loop-initializers-sloppy.out
index 2961d0cecd..2961d0cecd 100644
--- a/deps/v8/test/message/for-of-loop-initializers-sloppy.out
+++ b/deps/v8/test/message/fail/for-of-loop-initializers-sloppy.out
diff --git a/deps/v8/test/message/for-of-loop-initializers-strict.js b/deps/v8/test/message/fail/for-of-loop-initializers-strict.js
index 5b3dddc3c9..5b3dddc3c9 100644
--- a/deps/v8/test/message/for-of-loop-initializers-strict.js
+++ b/deps/v8/test/message/fail/for-of-loop-initializers-strict.js
diff --git a/deps/v8/test/message/for-of-loop-initializers-strict.out b/deps/v8/test/message/fail/for-of-loop-initializers-strict.out
index e29bd84df8..e29bd84df8 100644
--- a/deps/v8/test/message/for-of-loop-initializers-strict.out
+++ b/deps/v8/test/message/fail/for-of-loop-initializers-strict.out
diff --git a/deps/v8/test/message/for-of-non-iterable.js b/deps/v8/test/message/fail/for-of-non-iterable.js
index a94e65a15f..a94e65a15f 100644
--- a/deps/v8/test/message/for-of-non-iterable.js
+++ b/deps/v8/test/message/fail/for-of-non-iterable.js
diff --git a/deps/v8/test/message/for-of-non-iterable.out b/deps/v8/test/message/fail/for-of-non-iterable.out
index ed05b7e6cc..ed05b7e6cc 100644
--- a/deps/v8/test/message/for-of-non-iterable.out
+++ b/deps/v8/test/message/fail/for-of-non-iterable.out
diff --git a/deps/v8/test/message/for-of-throw-in-body.js b/deps/v8/test/message/fail/for-of-throw-in-body.js
index 38b27f3863..38b27f3863 100644
--- a/deps/v8/test/message/for-of-throw-in-body.js
+++ b/deps/v8/test/message/fail/for-of-throw-in-body.js
diff --git a/deps/v8/test/message/for-of-throw-in-body.out b/deps/v8/test/message/fail/for-of-throw-in-body.out
index 4bc48ebdad..4bc48ebdad 100644
--- a/deps/v8/test/message/for-of-throw-in-body.out
+++ b/deps/v8/test/message/fail/for-of-throw-in-body.out
diff --git a/deps/v8/test/message/formal-parameters-bad-rest.js b/deps/v8/test/message/fail/formal-parameters-bad-rest.js
index 3e5860ec07..3e5860ec07 100644
--- a/deps/v8/test/message/formal-parameters-bad-rest.js
+++ b/deps/v8/test/message/fail/formal-parameters-bad-rest.js
diff --git a/deps/v8/test/message/formal-parameters-bad-rest.out b/deps/v8/test/message/fail/formal-parameters-bad-rest.out
index 562b6ad49d..562b6ad49d 100644
--- a/deps/v8/test/message/formal-parameters-bad-rest.out
+++ b/deps/v8/test/message/fail/formal-parameters-bad-rest.out
diff --git a/deps/v8/test/message/formal-parameters-strict-body.js b/deps/v8/test/message/fail/formal-parameters-strict-body.js
index c5af7405f7..c5af7405f7 100644
--- a/deps/v8/test/message/formal-parameters-strict-body.js
+++ b/deps/v8/test/message/fail/formal-parameters-strict-body.js
diff --git a/deps/v8/test/message/formal-parameters-strict-body.out b/deps/v8/test/message/fail/formal-parameters-strict-body.out
index bb0d7e03b2..bb0d7e03b2 100644
--- a/deps/v8/test/message/formal-parameters-strict-body.out
+++ b/deps/v8/test/message/fail/formal-parameters-strict-body.out
diff --git a/deps/v8/test/message/formal-parameters-trailing-comma.js b/deps/v8/test/message/fail/formal-parameters-trailing-comma.js
index 9a248bee68..9a248bee68 100644
--- a/deps/v8/test/message/formal-parameters-trailing-comma.js
+++ b/deps/v8/test/message/fail/formal-parameters-trailing-comma.js
diff --git a/deps/v8/test/message/formal-parameters-trailing-comma.out b/deps/v8/test/message/fail/formal-parameters-trailing-comma.out
index 5c46552628..5c46552628 100644
--- a/deps/v8/test/message/formal-parameters-trailing-comma.out
+++ b/deps/v8/test/message/fail/formal-parameters-trailing-comma.out
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-694433.js b/deps/v8/test/message/fail/func-name-inferrer-arg-1.js
index a260e20790..6c28367d92 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-694433.js
+++ b/deps/v8/test/message/fail/func-name-inferrer-arg-1.js
@@ -2,5 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var size = 0x40000000;
-assertThrows(() => WebAssembly.validate(new Uint16Array(size)), RangeError);
+(function (param = function() { throw new Error('boom') }) {
+ (() => {
+ param();
+ })();
+
+})();
diff --git a/deps/v8/test/message/fail/func-name-inferrer-arg-1.out b/deps/v8/test/message/fail/func-name-inferrer-arg-1.out
new file mode 100644
index 0000000000..3c19121a0a
--- /dev/null
+++ b/deps/v8/test/message/fail/func-name-inferrer-arg-1.out
@@ -0,0 +1,8 @@
+*%(basename)s:5: Error: boom
+(function (param = function() { throw new Error('boom') }) {
+ ^
+Error: boom
+ at param (*%(basename)s:5:39)
+ at *%(basename)s:7:5
+ at *%(basename)s:8:5
+ at *%(basename)s:10:3 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/func-name-inferrer-arg.js b/deps/v8/test/message/fail/func-name-inferrer-arg.js
new file mode 100644
index 0000000000..3fcd044b9b
--- /dev/null
+++ b/deps/v8/test/message/fail/func-name-inferrer-arg.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function (param) {
+ (() => {
+ throw new Error('boom');
+ })();
+
+})();
diff --git a/deps/v8/test/message/fail/func-name-inferrer-arg.out b/deps/v8/test/message/fail/func-name-inferrer-arg.out
new file mode 100644
index 0000000000..06e001d1d5
--- /dev/null
+++ b/deps/v8/test/message/fail/func-name-inferrer-arg.out
@@ -0,0 +1,7 @@
+*%(basename)s:7: Error: boom
+ throw new Error('boom');
+ ^
+Error: boom
+ at *%(basename)s:7:11
+ at *%(basename)s:8:5
+ at *%(basename)s:10:3 \ No newline at end of file
diff --git a/deps/v8/test/message/function-param-after-rest.js b/deps/v8/test/message/fail/function-param-after-rest.js
index 3fe79a8f27..3fe79a8f27 100644
--- a/deps/v8/test/message/function-param-after-rest.js
+++ b/deps/v8/test/message/fail/function-param-after-rest.js
diff --git a/deps/v8/test/message/function-param-after-rest.out b/deps/v8/test/message/fail/function-param-after-rest.out
index 58633fddca..58633fddca 100644
--- a/deps/v8/test/message/function-param-after-rest.out
+++ b/deps/v8/test/message/fail/function-param-after-rest.out
diff --git a/deps/v8/test/message/function-sent-escaped.js b/deps/v8/test/message/fail/function-sent-escaped.js
index aa17258f85..aa17258f85 100644
--- a/deps/v8/test/message/function-sent-escaped.js
+++ b/deps/v8/test/message/fail/function-sent-escaped.js
diff --git a/deps/v8/test/message/function-sent-escaped.out b/deps/v8/test/message/fail/function-sent-escaped.out
index d9613d8ef4..d9613d8ef4 100644
--- a/deps/v8/test/message/function-sent-escaped.out
+++ b/deps/v8/test/message/fail/function-sent-escaped.out
diff --git a/deps/v8/test/message/generators-throw1.js b/deps/v8/test/message/fail/generators-throw1.js
index b4d404a1bf..b4d404a1bf 100644
--- a/deps/v8/test/message/generators-throw1.js
+++ b/deps/v8/test/message/fail/generators-throw1.js
diff --git a/deps/v8/test/message/generators-throw1.out b/deps/v8/test/message/fail/generators-throw1.out
index 1e78e8834f..1e78e8834f 100644
--- a/deps/v8/test/message/generators-throw1.out
+++ b/deps/v8/test/message/fail/generators-throw1.out
diff --git a/deps/v8/test/message/generators-throw2.js b/deps/v8/test/message/fail/generators-throw2.js
index 7207755427..7207755427 100644
--- a/deps/v8/test/message/generators-throw2.js
+++ b/deps/v8/test/message/fail/generators-throw2.js
diff --git a/deps/v8/test/message/generators-throw2.out b/deps/v8/test/message/fail/generators-throw2.out
index 3f23814f8b..3f23814f8b 100644
--- a/deps/v8/test/message/generators-throw2.out
+++ b/deps/v8/test/message/fail/generators-throw2.out
diff --git a/deps/v8/test/message/get-iterator-return-non-receiver.js b/deps/v8/test/message/fail/get-iterator-return-non-receiver.js
index c24a9e2fdd..c24a9e2fdd 100644
--- a/deps/v8/test/message/get-iterator-return-non-receiver.js
+++ b/deps/v8/test/message/fail/get-iterator-return-non-receiver.js
diff --git a/deps/v8/test/message/get-iterator-return-non-receiver.out b/deps/v8/test/message/fail/get-iterator-return-non-receiver.out
index 7a125ef787..7a125ef787 100644
--- a/deps/v8/test/message/get-iterator-return-non-receiver.out
+++ b/deps/v8/test/message/fail/get-iterator-return-non-receiver.out
diff --git a/deps/v8/test/message/get-iterator1.js b/deps/v8/test/message/fail/get-iterator1.js
index efbdf8fdbf..efbdf8fdbf 100644
--- a/deps/v8/test/message/get-iterator1.js
+++ b/deps/v8/test/message/fail/get-iterator1.js
diff --git a/deps/v8/test/message/get-iterator1.out b/deps/v8/test/message/fail/get-iterator1.out
index 5d01f1cc88..5d01f1cc88 100644
--- a/deps/v8/test/message/get-iterator1.out
+++ b/deps/v8/test/message/fail/get-iterator1.out
diff --git a/deps/v8/test/message/import-as-eval.js b/deps/v8/test/message/fail/import-as-eval.js
index 66adc32cbe..66adc32cbe 100644
--- a/deps/v8/test/message/import-as-eval.js
+++ b/deps/v8/test/message/fail/import-as-eval.js
diff --git a/deps/v8/test/message/import-as-eval.out b/deps/v8/test/message/fail/import-as-eval.out
index 622f7fe9e1..622f7fe9e1 100644
--- a/deps/v8/test/message/import-as-eval.out
+++ b/deps/v8/test/message/fail/import-as-eval.out
diff --git a/deps/v8/test/message/import-as-redeclaration.js b/deps/v8/test/message/fail/import-as-redeclaration.js
index 43bf278d1b..43bf278d1b 100644
--- a/deps/v8/test/message/import-as-redeclaration.js
+++ b/deps/v8/test/message/fail/import-as-redeclaration.js
diff --git a/deps/v8/test/message/import-as-redeclaration.out b/deps/v8/test/message/fail/import-as-redeclaration.out
index 51c4c032dc..51c4c032dc 100644
--- a/deps/v8/test/message/import-as-redeclaration.out
+++ b/deps/v8/test/message/fail/import-as-redeclaration.out
diff --git a/deps/v8/test/message/import-as-reserved-word.js b/deps/v8/test/message/fail/import-as-reserved-word.js
index 562699d45f..562699d45f 100644
--- a/deps/v8/test/message/import-as-reserved-word.js
+++ b/deps/v8/test/message/fail/import-as-reserved-word.js
diff --git a/deps/v8/test/message/import-as-reserved-word.out b/deps/v8/test/message/fail/import-as-reserved-word.out
index 1ee8d41c1a..1ee8d41c1a 100644
--- a/deps/v8/test/message/import-as-reserved-word.out
+++ b/deps/v8/test/message/fail/import-as-reserved-word.out
diff --git a/deps/v8/test/message/import-eval.js b/deps/v8/test/message/fail/import-eval.js
index 8ab35baef6..8ab35baef6 100644
--- a/deps/v8/test/message/import-eval.js
+++ b/deps/v8/test/message/fail/import-eval.js
diff --git a/deps/v8/test/message/import-eval.out b/deps/v8/test/message/fail/import-eval.out
index 148662a28c..148662a28c 100644
--- a/deps/v8/test/message/import-eval.out
+++ b/deps/v8/test/message/fail/import-eval.out
diff --git a/deps/v8/test/message/import-redeclaration.js b/deps/v8/test/message/fail/import-redeclaration.js
index 27b0cdccef..27b0cdccef 100644
--- a/deps/v8/test/message/import-redeclaration.js
+++ b/deps/v8/test/message/fail/import-redeclaration.js
diff --git a/deps/v8/test/message/import-redeclaration.out b/deps/v8/test/message/fail/import-redeclaration.out
index 641948810f..641948810f 100644
--- a/deps/v8/test/message/import-redeclaration.out
+++ b/deps/v8/test/message/fail/import-redeclaration.out
diff --git a/deps/v8/test/message/import-reserved-word.js b/deps/v8/test/message/fail/import-reserved-word.js
index 1fd7ba291e..1fd7ba291e 100644
--- a/deps/v8/test/message/import-reserved-word.js
+++ b/deps/v8/test/message/fail/import-reserved-word.js
diff --git a/deps/v8/test/message/import-reserved-word.out b/deps/v8/test/message/fail/import-reserved-word.out
index 5b990e9e59..5b990e9e59 100644
--- a/deps/v8/test/message/import-reserved-word.out
+++ b/deps/v8/test/message/fail/import-reserved-word.out
diff --git a/deps/v8/test/message/instanceof-noncallable.js b/deps/v8/test/message/fail/instanceof-noncallable.js
index d82b416e68..d82b416e68 100644
--- a/deps/v8/test/message/instanceof-noncallable.js
+++ b/deps/v8/test/message/fail/instanceof-noncallable.js
diff --git a/deps/v8/test/message/instanceof-noncallable.out b/deps/v8/test/message/fail/instanceof-noncallable.out
index 73e2ae61b5..73e2ae61b5 100644
--- a/deps/v8/test/message/instanceof-noncallable.out
+++ b/deps/v8/test/message/fail/instanceof-noncallable.out
diff --git a/deps/v8/test/message/instanceof-nonobject.js b/deps/v8/test/message/fail/instanceof-nonobject.js
index ef8e0ae2e4..ef8e0ae2e4 100644
--- a/deps/v8/test/message/instanceof-nonobject.js
+++ b/deps/v8/test/message/fail/instanceof-nonobject.js
diff --git a/deps/v8/test/message/instanceof-nonobject.out b/deps/v8/test/message/fail/instanceof-nonobject.out
index ecf47af9d1..ecf47af9d1 100644
--- a/deps/v8/test/message/instanceof-nonobject.out
+++ b/deps/v8/test/message/fail/instanceof-nonobject.out
diff --git a/deps/v8/test/message/invalid-spread-2.js b/deps/v8/test/message/fail/invalid-spread-2.js
index 14dfd728a0..14dfd728a0 100644
--- a/deps/v8/test/message/invalid-spread-2.js
+++ b/deps/v8/test/message/fail/invalid-spread-2.js
diff --git a/deps/v8/test/message/invalid-spread-2.out b/deps/v8/test/message/fail/invalid-spread-2.out
index 287390a74a..287390a74a 100644
--- a/deps/v8/test/message/invalid-spread-2.out
+++ b/deps/v8/test/message/fail/invalid-spread-2.out
diff --git a/deps/v8/test/message/invalid-spread.js b/deps/v8/test/message/fail/invalid-spread.js
index cc42874431..cc42874431 100644
--- a/deps/v8/test/message/invalid-spread.js
+++ b/deps/v8/test/message/fail/invalid-spread.js
diff --git a/deps/v8/test/message/invalid-spread.out b/deps/v8/test/message/fail/invalid-spread.out
index 5694ad6e88..5694ad6e88 100644
--- a/deps/v8/test/message/invalid-spread.out
+++ b/deps/v8/test/message/fail/invalid-spread.out
diff --git a/deps/v8/test/message/isvar.js b/deps/v8/test/message/fail/isvar.js
index fedf9d5f68..fedf9d5f68 100644
--- a/deps/v8/test/message/isvar.js
+++ b/deps/v8/test/message/fail/isvar.js
diff --git a/deps/v8/test/message/isvar.out b/deps/v8/test/message/fail/isvar.out
index 6d5cca3345..6d5cca3345 100644
--- a/deps/v8/test/message/isvar.out
+++ b/deps/v8/test/message/fail/isvar.out
diff --git a/deps/v8/test/message/let-lexical-declaration-destructuring-brace-in-single-statement.js b/deps/v8/test/message/fail/let-lexical-declaration-destructuring-brace-in-single-statement.js
index 1afcc8ccc8..1afcc8ccc8 100644
--- a/deps/v8/test/message/let-lexical-declaration-destructuring-brace-in-single-statement.js
+++ b/deps/v8/test/message/fail/let-lexical-declaration-destructuring-brace-in-single-statement.js
diff --git a/deps/v8/test/message/let-lexical-declaration-destructuring-brace-in-single-statement.out b/deps/v8/test/message/fail/let-lexical-declaration-destructuring-brace-in-single-statement.out
index 352adae7cb..352adae7cb 100644
--- a/deps/v8/test/message/let-lexical-declaration-destructuring-brace-in-single-statement.out
+++ b/deps/v8/test/message/fail/let-lexical-declaration-destructuring-brace-in-single-statement.out
diff --git a/deps/v8/test/message/let-lexical-declaration-destructuring-in-single-statement.js b/deps/v8/test/message/fail/let-lexical-declaration-destructuring-in-single-statement.js
index 5e439fff9f..5e439fff9f 100644
--- a/deps/v8/test/message/let-lexical-declaration-destructuring-in-single-statement.js
+++ b/deps/v8/test/message/fail/let-lexical-declaration-destructuring-in-single-statement.js
diff --git a/deps/v8/test/message/let-lexical-declaration-destructuring-in-single-statement.out b/deps/v8/test/message/fail/let-lexical-declaration-destructuring-in-single-statement.out
index 16c487f3d6..16c487f3d6 100644
--- a/deps/v8/test/message/let-lexical-declaration-destructuring-in-single-statement.out
+++ b/deps/v8/test/message/fail/let-lexical-declaration-destructuring-in-single-statement.out
diff --git a/deps/v8/test/message/let-lexical-declaration-in-single-statement.js b/deps/v8/test/message/fail/let-lexical-declaration-in-single-statement.js
index 037937d594..037937d594 100644
--- a/deps/v8/test/message/let-lexical-declaration-in-single-statement.js
+++ b/deps/v8/test/message/fail/let-lexical-declaration-in-single-statement.js
diff --git a/deps/v8/test/message/let-lexical-declaration-in-single-statement.out b/deps/v8/test/message/fail/let-lexical-declaration-in-single-statement.out
index 29680d8fd8..29680d8fd8 100644
--- a/deps/v8/test/message/let-lexical-declaration-in-single-statement.out
+++ b/deps/v8/test/message/fail/let-lexical-declaration-in-single-statement.out
diff --git a/deps/v8/test/message/let-lexical-name-in-array-prohibited.js b/deps/v8/test/message/fail/let-lexical-name-in-array-prohibited.js
index e5e37e11aa..e5e37e11aa 100644
--- a/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
+++ b/deps/v8/test/message/fail/let-lexical-name-in-array-prohibited.js
diff --git a/deps/v8/test/message/let-lexical-name-in-array-prohibited.out b/deps/v8/test/message/fail/let-lexical-name-in-array-prohibited.out
index fc8181a498..fc8181a498 100644
--- a/deps/v8/test/message/let-lexical-name-in-array-prohibited.out
+++ b/deps/v8/test/message/fail/let-lexical-name-in-array-prohibited.out
diff --git a/deps/v8/test/message/let-lexical-name-in-object-prohibited.js b/deps/v8/test/message/fail/let-lexical-name-in-object-prohibited.js
index 4e26c62ad6..4e26c62ad6 100644
--- a/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
+++ b/deps/v8/test/message/fail/let-lexical-name-in-object-prohibited.js
diff --git a/deps/v8/test/message/let-lexical-name-in-object-prohibited.out b/deps/v8/test/message/fail/let-lexical-name-in-object-prohibited.out
index c04f6bedd2..c04f6bedd2 100644
--- a/deps/v8/test/message/let-lexical-name-in-object-prohibited.out
+++ b/deps/v8/test/message/fail/let-lexical-name-in-object-prohibited.out
diff --git a/deps/v8/test/message/let-lexical-name-prohibited.js b/deps/v8/test/message/fail/let-lexical-name-prohibited.js
index b001be877c..b001be877c 100644
--- a/deps/v8/test/message/let-lexical-name-prohibited.js
+++ b/deps/v8/test/message/fail/let-lexical-name-prohibited.js
diff --git a/deps/v8/test/message/let-lexical-name-prohibited.out b/deps/v8/test/message/fail/let-lexical-name-prohibited.out
index 4b2011b297..4b2011b297 100644
--- a/deps/v8/test/message/let-lexical-name-prohibited.out
+++ b/deps/v8/test/message/fail/let-lexical-name-prohibited.out
diff --git a/deps/v8/test/message/modules-cycle1.js b/deps/v8/test/message/fail/modules-cycle1.js
index e3497cace8..e3497cace8 100644
--- a/deps/v8/test/message/modules-cycle1.js
+++ b/deps/v8/test/message/fail/modules-cycle1.js
diff --git a/deps/v8/test/message/modules-cycle1.out b/deps/v8/test/message/fail/modules-cycle1.out
index 3e6f31b1d4..3e6f31b1d4 100644
--- a/deps/v8/test/message/modules-cycle1.out
+++ b/deps/v8/test/message/fail/modules-cycle1.out
diff --git a/deps/v8/test/message/modules-cycle2.js b/deps/v8/test/message/fail/modules-cycle2.js
index 1121c3098f..1121c3098f 100644
--- a/deps/v8/test/message/modules-cycle2.js
+++ b/deps/v8/test/message/fail/modules-cycle2.js
diff --git a/deps/v8/test/message/modules-cycle2.out b/deps/v8/test/message/fail/modules-cycle2.out
index f3c19d20ed..f3c19d20ed 100644
--- a/deps/v8/test/message/modules-cycle2.out
+++ b/deps/v8/test/message/fail/modules-cycle2.out
diff --git a/deps/v8/test/message/modules-cycle3.js b/deps/v8/test/message/fail/modules-cycle3.js
index 133d203886..133d203886 100644
--- a/deps/v8/test/message/modules-cycle3.js
+++ b/deps/v8/test/message/fail/modules-cycle3.js
diff --git a/deps/v8/test/message/modules-cycle3.out b/deps/v8/test/message/fail/modules-cycle3.out
index a5b10149f9..a5b10149f9 100644
--- a/deps/v8/test/message/modules-cycle3.out
+++ b/deps/v8/test/message/fail/modules-cycle3.out
diff --git a/deps/v8/test/message/modules-cycle4.js b/deps/v8/test/message/fail/modules-cycle4.js
index 2e22757e54..2e22757e54 100644
--- a/deps/v8/test/message/modules-cycle4.js
+++ b/deps/v8/test/message/fail/modules-cycle4.js
diff --git a/deps/v8/test/message/modules-cycle4.out b/deps/v8/test/message/fail/modules-cycle4.out
index 74789e0ec9..74789e0ec9 100644
--- a/deps/v8/test/message/modules-cycle4.out
+++ b/deps/v8/test/message/fail/modules-cycle4.out
diff --git a/deps/v8/test/message/modules-cycle5.js b/deps/v8/test/message/fail/modules-cycle5.js
index 53382daac4..53382daac4 100644
--- a/deps/v8/test/message/modules-cycle5.js
+++ b/deps/v8/test/message/fail/modules-cycle5.js
diff --git a/deps/v8/test/message/modules-cycle5.out b/deps/v8/test/message/fail/modules-cycle5.out
index 8eb3e606af..8eb3e606af 100644
--- a/deps/v8/test/message/modules-cycle5.out
+++ b/deps/v8/test/message/fail/modules-cycle5.out
diff --git a/deps/v8/test/message/modules-cycle6.js b/deps/v8/test/message/fail/modules-cycle6.js
index 3043e1d73e..3043e1d73e 100644
--- a/deps/v8/test/message/modules-cycle6.js
+++ b/deps/v8/test/message/fail/modules-cycle6.js
diff --git a/deps/v8/test/message/modules-cycle6.out b/deps/v8/test/message/fail/modules-cycle6.out
index d91e1abc14..d91e1abc14 100644
--- a/deps/v8/test/message/modules-cycle6.out
+++ b/deps/v8/test/message/fail/modules-cycle6.out
diff --git a/deps/v8/test/message/modules-duplicate-export1.js b/deps/v8/test/message/fail/modules-duplicate-export1.js
index 0ba421a0b4..0ba421a0b4 100644
--- a/deps/v8/test/message/modules-duplicate-export1.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export1.js
diff --git a/deps/v8/test/message/modules-duplicate-export1.out b/deps/v8/test/message/fail/modules-duplicate-export1.out
index 5b2478b3a2..5b2478b3a2 100644
--- a/deps/v8/test/message/modules-duplicate-export1.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export1.out
diff --git a/deps/v8/test/message/modules-duplicate-export2.js b/deps/v8/test/message/fail/modules-duplicate-export2.js
index 3aec862341..3aec862341 100644
--- a/deps/v8/test/message/modules-duplicate-export2.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export2.js
diff --git a/deps/v8/test/message/modules-duplicate-export2.out b/deps/v8/test/message/fail/modules-duplicate-export2.out
index 17e831886c..17e831886c 100644
--- a/deps/v8/test/message/modules-duplicate-export2.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export2.out
diff --git a/deps/v8/test/message/modules-duplicate-export3.js b/deps/v8/test/message/fail/modules-duplicate-export3.js
index 36fc27b6c0..36fc27b6c0 100644
--- a/deps/v8/test/message/modules-duplicate-export3.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export3.js
diff --git a/deps/v8/test/message/modules-duplicate-export3.out b/deps/v8/test/message/fail/modules-duplicate-export3.out
index 3913a75c02..3913a75c02 100644
--- a/deps/v8/test/message/modules-duplicate-export3.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export3.out
diff --git a/deps/v8/test/message/modules-duplicate-export4.js b/deps/v8/test/message/fail/modules-duplicate-export4.js
index 1bc60dad60..1bc60dad60 100644
--- a/deps/v8/test/message/modules-duplicate-export4.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export4.js
diff --git a/deps/v8/test/message/modules-duplicate-export4.out b/deps/v8/test/message/fail/modules-duplicate-export4.out
index 73e0fdcc91..73e0fdcc91 100644
--- a/deps/v8/test/message/modules-duplicate-export4.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export4.out
diff --git a/deps/v8/test/message/modules-import-redeclare1.js b/deps/v8/test/message/fail/modules-import-redeclare1.js
index 22e1ce35a9..22e1ce35a9 100644
--- a/deps/v8/test/message/modules-import-redeclare1.js
+++ b/deps/v8/test/message/fail/modules-import-redeclare1.js
diff --git a/deps/v8/test/message/modules-import-redeclare1.out b/deps/v8/test/message/fail/modules-import-redeclare1.out
index 09b5e8b713..09b5e8b713 100644
--- a/deps/v8/test/message/modules-import-redeclare1.out
+++ b/deps/v8/test/message/fail/modules-import-redeclare1.out
diff --git a/deps/v8/test/message/modules-import-redeclare2.js b/deps/v8/test/message/fail/modules-import-redeclare2.js
index af7ec2b4d2..af7ec2b4d2 100644
--- a/deps/v8/test/message/modules-import-redeclare2.js
+++ b/deps/v8/test/message/fail/modules-import-redeclare2.js
diff --git a/deps/v8/test/message/modules-import-redeclare2.out b/deps/v8/test/message/fail/modules-import-redeclare2.out
index c972a382e5..c972a382e5 100644
--- a/deps/v8/test/message/modules-import-redeclare2.out
+++ b/deps/v8/test/message/fail/modules-import-redeclare2.out
diff --git a/deps/v8/test/message/modules-import-redeclare3.js b/deps/v8/test/message/fail/modules-import-redeclare3.js
index 60ae6f20e5..60ae6f20e5 100644
--- a/deps/v8/test/message/modules-import-redeclare3.js
+++ b/deps/v8/test/message/fail/modules-import-redeclare3.js
diff --git a/deps/v8/test/message/modules-import-redeclare3.out b/deps/v8/test/message/fail/modules-import-redeclare3.out
index 3a1080736d..3a1080736d 100644
--- a/deps/v8/test/message/modules-import-redeclare3.out
+++ b/deps/v8/test/message/fail/modules-import-redeclare3.out
diff --git a/deps/v8/test/message/modules-import1.js b/deps/v8/test/message/fail/modules-import1.js
index fbfe907995..fbfe907995 100644
--- a/deps/v8/test/message/modules-import1.js
+++ b/deps/v8/test/message/fail/modules-import1.js
diff --git a/deps/v8/test/message/modules-import1.out b/deps/v8/test/message/fail/modules-import1.out
index 6facd0fa7c..6facd0fa7c 100644
--- a/deps/v8/test/message/modules-import1.out
+++ b/deps/v8/test/message/fail/modules-import1.out
diff --git a/deps/v8/test/message/modules-import2.js b/deps/v8/test/message/fail/modules-import2.js
index 8a719ace18..8a719ace18 100644
--- a/deps/v8/test/message/modules-import2.js
+++ b/deps/v8/test/message/fail/modules-import2.js
diff --git a/deps/v8/test/message/modules-import2.out b/deps/v8/test/message/fail/modules-import2.out
index 317399a6db..317399a6db 100644
--- a/deps/v8/test/message/modules-import2.out
+++ b/deps/v8/test/message/fail/modules-import2.out
diff --git a/deps/v8/test/message/modules-import3.js b/deps/v8/test/message/fail/modules-import3.js
index 7e93ff3155..7e93ff3155 100644
--- a/deps/v8/test/message/modules-import3.js
+++ b/deps/v8/test/message/fail/modules-import3.js
diff --git a/deps/v8/test/message/modules-import3.out b/deps/v8/test/message/fail/modules-import3.out
index 75abc74b50..75abc74b50 100644
--- a/deps/v8/test/message/modules-import3.out
+++ b/deps/v8/test/message/fail/modules-import3.out
diff --git a/deps/v8/test/message/modules-import4.js b/deps/v8/test/message/fail/modules-import4.js
index 0410e38ec8..0410e38ec8 100644
--- a/deps/v8/test/message/modules-import4.js
+++ b/deps/v8/test/message/fail/modules-import4.js
diff --git a/deps/v8/test/message/modules-import4.out b/deps/v8/test/message/fail/modules-import4.out
index bd406e4021..bd406e4021 100644
--- a/deps/v8/test/message/modules-import4.out
+++ b/deps/v8/test/message/fail/modules-import4.out
diff --git a/deps/v8/test/message/modules-import5.js b/deps/v8/test/message/fail/modules-import5.js
index d4cb6559bd..d4cb6559bd 100644
--- a/deps/v8/test/message/modules-import5.js
+++ b/deps/v8/test/message/fail/modules-import5.js
diff --git a/deps/v8/test/message/modules-import5.out b/deps/v8/test/message/fail/modules-import5.out
index 8828774c92..8828774c92 100644
--- a/deps/v8/test/message/modules-import5.out
+++ b/deps/v8/test/message/fail/modules-import5.out
diff --git a/deps/v8/test/message/modules-import6.js b/deps/v8/test/message/fail/modules-import6.js
index f625a342b1..f625a342b1 100644
--- a/deps/v8/test/message/modules-import6.js
+++ b/deps/v8/test/message/fail/modules-import6.js
diff --git a/deps/v8/test/message/modules-import6.out b/deps/v8/test/message/fail/modules-import6.out
index 9d7eeebe5d..9d7eeebe5d 100644
--- a/deps/v8/test/message/modules-import6.out
+++ b/deps/v8/test/message/fail/modules-import6.out
diff --git a/deps/v8/test/message/modules-skip-cycle2.js b/deps/v8/test/message/fail/modules-skip-cycle2.js
index 8b5ea93468..8b5ea93468 100644
--- a/deps/v8/test/message/modules-skip-cycle2.js
+++ b/deps/v8/test/message/fail/modules-skip-cycle2.js
diff --git a/deps/v8/test/message/modules-skip-cycle3.js b/deps/v8/test/message/fail/modules-skip-cycle3.js
index a63a660c16..a63a660c16 100644
--- a/deps/v8/test/message/modules-skip-cycle3.js
+++ b/deps/v8/test/message/fail/modules-skip-cycle3.js
diff --git a/deps/v8/test/message/modules-skip-cycle5.js b/deps/v8/test/message/fail/modules-skip-cycle5.js
index 6eee47423e..6eee47423e 100644
--- a/deps/v8/test/message/modules-skip-cycle5.js
+++ b/deps/v8/test/message/fail/modules-skip-cycle5.js
diff --git a/deps/v8/test/message/modules-skip-cycle6.js b/deps/v8/test/message/fail/modules-skip-cycle6.js
index c27c302cc8..c27c302cc8 100644
--- a/deps/v8/test/message/modules-skip-cycle6.js
+++ b/deps/v8/test/message/fail/modules-skip-cycle6.js
diff --git a/deps/v8/test/message/modules-star-conflict1.js b/deps/v8/test/message/fail/modules-star-conflict1.js
index 1bf3473f47..4281f7c085 100644
--- a/deps/v8/test/message/modules-star-conflict1.js
+++ b/deps/v8/test/message/fail/modules-star-conflict1.js
@@ -4,4 +4,4 @@
//
// MODULE
-import {a} from "../mjsunit/modules-skip-7.js";
+import {a} from "../../mjsunit/modules-skip-7.js";
diff --git a/deps/v8/test/message/modules-star-conflict1.out b/deps/v8/test/message/fail/modules-star-conflict1.out
index 1a4986ad88..969a566edc 100644
--- a/deps/v8/test/message/modules-star-conflict1.out
+++ b/deps/v8/test/message/fail/modules-star-conflict1.out
@@ -1,5 +1,5 @@
*%(basename)s:7: SyntaxError: The requested module contains conflicting star exports for name 'a'
-import {a} from "../mjsunit/modules-skip-7.js";
+import {a} from "../../mjsunit/modules-skip-7.js";
^
SyntaxError: The requested module contains conflicting star exports for name 'a'
diff --git a/deps/v8/test/message/modules-star-conflict2.js b/deps/v8/test/message/fail/modules-star-conflict2.js
index 84d23d8edc..6235851ee1 100644
--- a/deps/v8/test/message/modules-star-conflict2.js
+++ b/deps/v8/test/message/fail/modules-star-conflict2.js
@@ -4,7 +4,7 @@
//
// MODULE
-export * from "../mjsunit/modules-skip-star-exports-conflict.js";
-export * from "../mjsunit/modules-skip-6.js";
+export * from "../../mjsunit/modules-skip-star-exports-conflict.js";
+export * from "../../mjsunit/modules-skip-6.js";
import {a} from "modules-star-conflict2.js";
diff --git a/deps/v8/test/message/modules-star-conflict2.out b/deps/v8/test/message/fail/modules-star-conflict2.out
index 9cbbfc4247..34827e0374 100644
--- a/deps/v8/test/message/modules-star-conflict2.out
+++ b/deps/v8/test/message/fail/modules-star-conflict2.out
@@ -1,5 +1,5 @@
*%(basename)s:7: SyntaxError: The requested module contains conflicting star exports for name 'a'
-export * from "../mjsunit/modules-skip-star-exports-conflict.js";
+export * from "../../mjsunit/modules-skip-star-exports-conflict.js";
^
SyntaxError: The requested module contains conflicting star exports for name 'a'
diff --git a/deps/v8/test/message/modules-star-default.js b/deps/v8/test/message/fail/modules-star-default.js
index 30bc8f271a..30bc8f271a 100644
--- a/deps/v8/test/message/modules-star-default.js
+++ b/deps/v8/test/message/fail/modules-star-default.js
diff --git a/deps/v8/test/message/modules-star-default.out b/deps/v8/test/message/fail/modules-star-default.out
index a3cd5a6107..a3cd5a6107 100644
--- a/deps/v8/test/message/modules-star-default.out
+++ b/deps/v8/test/message/fail/modules-star-default.out
diff --git a/deps/v8/test/message/modules-undefined-export1.js b/deps/v8/test/message/fail/modules-undefined-export1.js
index ddedbaaded..ddedbaaded 100644
--- a/deps/v8/test/message/modules-undefined-export1.js
+++ b/deps/v8/test/message/fail/modules-undefined-export1.js
diff --git a/deps/v8/test/message/modules-undefined-export1.out b/deps/v8/test/message/fail/modules-undefined-export1.out
index 66b862c167..66b862c167 100644
--- a/deps/v8/test/message/modules-undefined-export1.out
+++ b/deps/v8/test/message/fail/modules-undefined-export1.out
diff --git a/deps/v8/test/message/modules-undefined-export2.js b/deps/v8/test/message/fail/modules-undefined-export2.js
index bf8c2a0c94..bf8c2a0c94 100644
--- a/deps/v8/test/message/modules-undefined-export2.js
+++ b/deps/v8/test/message/fail/modules-undefined-export2.js
diff --git a/deps/v8/test/message/modules-undefined-export2.out b/deps/v8/test/message/fail/modules-undefined-export2.out
index fd036f190a..fd036f190a 100644
--- a/deps/v8/test/message/modules-undefined-export2.out
+++ b/deps/v8/test/message/fail/modules-undefined-export2.out
diff --git a/deps/v8/test/message/new-target-assignment.js b/deps/v8/test/message/fail/new-target-assignment.js
index 72845654d8..72845654d8 100644
--- a/deps/v8/test/message/new-target-assignment.js
+++ b/deps/v8/test/message/fail/new-target-assignment.js
diff --git a/deps/v8/test/message/new-target-assignment.out b/deps/v8/test/message/fail/new-target-assignment.out
index bc1492ca92..bc1492ca92 100644
--- a/deps/v8/test/message/new-target-assignment.out
+++ b/deps/v8/test/message/fail/new-target-assignment.out
diff --git a/deps/v8/test/message/new-target-escaped.js b/deps/v8/test/message/fail/new-target-escaped.js
index f8398bebd4..f8398bebd4 100644
--- a/deps/v8/test/message/new-target-escaped.js
+++ b/deps/v8/test/message/fail/new-target-escaped.js
diff --git a/deps/v8/test/message/new-target-escaped.out b/deps/v8/test/message/fail/new-target-escaped.out
index 21b30e3513..21b30e3513 100644
--- a/deps/v8/test/message/new-target-escaped.out
+++ b/deps/v8/test/message/fail/new-target-escaped.out
diff --git a/deps/v8/test/message/new-target-for-loop.js b/deps/v8/test/message/fail/new-target-for-loop.js
index 40ca6e483a..40ca6e483a 100644
--- a/deps/v8/test/message/new-target-for-loop.js
+++ b/deps/v8/test/message/fail/new-target-for-loop.js
diff --git a/deps/v8/test/message/new-target-for-loop.out b/deps/v8/test/message/fail/new-target-for-loop.out
index 4c1a0c638c..4c1a0c638c 100644
--- a/deps/v8/test/message/new-target-for-loop.out
+++ b/deps/v8/test/message/fail/new-target-for-loop.out
diff --git a/deps/v8/test/message/new-target-postfix-op.js b/deps/v8/test/message/fail/new-target-postfix-op.js
index 4135f15044..4135f15044 100644
--- a/deps/v8/test/message/new-target-postfix-op.js
+++ b/deps/v8/test/message/fail/new-target-postfix-op.js
diff --git a/deps/v8/test/message/new-target-postfix-op.out b/deps/v8/test/message/fail/new-target-postfix-op.out
index 1e8ef6be21..1e8ef6be21 100644
--- a/deps/v8/test/message/new-target-postfix-op.out
+++ b/deps/v8/test/message/fail/new-target-postfix-op.out
diff --git a/deps/v8/test/message/new-target-prefix-op.js b/deps/v8/test/message/fail/new-target-prefix-op.js
index e463ae3cdd..e463ae3cdd 100644
--- a/deps/v8/test/message/new-target-prefix-op.js
+++ b/deps/v8/test/message/fail/new-target-prefix-op.js
diff --git a/deps/v8/test/message/new-target-prefix-op.out b/deps/v8/test/message/fail/new-target-prefix-op.out
index a444087ec9..a444087ec9 100644
--- a/deps/v8/test/message/new-target-prefix-op.out
+++ b/deps/v8/test/message/fail/new-target-prefix-op.out
diff --git a/deps/v8/test/message/nf-yield-in-generator.js b/deps/v8/test/message/fail/nf-yield-in-generator.js
index ecdaf33242..ecdaf33242 100644
--- a/deps/v8/test/message/nf-yield-in-generator.js
+++ b/deps/v8/test/message/fail/nf-yield-in-generator.js
diff --git a/deps/v8/test/message/nf-yield-in-generator.out b/deps/v8/test/message/fail/nf-yield-in-generator.out
index 91986dfffc..91986dfffc 100644
--- a/deps/v8/test/message/nf-yield-in-generator.out
+++ b/deps/v8/test/message/fail/nf-yield-in-generator.out
diff --git a/deps/v8/test/message/nf-yield-strict-in-generator.js b/deps/v8/test/message/fail/nf-yield-strict-in-generator.js
index 7e87881bed..7e87881bed 100644
--- a/deps/v8/test/message/nf-yield-strict-in-generator.js
+++ b/deps/v8/test/message/fail/nf-yield-strict-in-generator.js
diff --git a/deps/v8/test/message/nf-yield-strict-in-generator.out b/deps/v8/test/message/fail/nf-yield-strict-in-generator.out
index 7f2a7596b3..7f2a7596b3 100644
--- a/deps/v8/test/message/nf-yield-strict-in-generator.out
+++ b/deps/v8/test/message/fail/nf-yield-strict-in-generator.out
diff --git a/deps/v8/test/message/nf-yield-strict.js b/deps/v8/test/message/fail/nf-yield-strict.js
index d959a2164e..d959a2164e 100644
--- a/deps/v8/test/message/nf-yield-strict.js
+++ b/deps/v8/test/message/fail/nf-yield-strict.js
diff --git a/deps/v8/test/message/nf-yield-strict.out b/deps/v8/test/message/fail/nf-yield-strict.out
index 38c0bade08..38c0bade08 100644
--- a/deps/v8/test/message/nf-yield-strict.out
+++ b/deps/v8/test/message/fail/nf-yield-strict.out
diff --git a/deps/v8/test/message/nfe-yield-generator.js b/deps/v8/test/message/fail/nfe-yield-generator.js
index 4e193fe881..4e193fe881 100644
--- a/deps/v8/test/message/nfe-yield-generator.js
+++ b/deps/v8/test/message/fail/nfe-yield-generator.js
diff --git a/deps/v8/test/message/nfe-yield-generator.out b/deps/v8/test/message/fail/nfe-yield-generator.out
index 75cc9a559b..75cc9a559b 100644
--- a/deps/v8/test/message/nfe-yield-generator.out
+++ b/deps/v8/test/message/fail/nfe-yield-generator.out
diff --git a/deps/v8/test/message/nfe-yield-strict.js b/deps/v8/test/message/fail/nfe-yield-strict.js
index 11021083e1..11021083e1 100644
--- a/deps/v8/test/message/nfe-yield-strict.js
+++ b/deps/v8/test/message/fail/nfe-yield-strict.js
diff --git a/deps/v8/test/message/nfe-yield-strict.out b/deps/v8/test/message/fail/nfe-yield-strict.out
index 2d6e97417b..2d6e97417b 100644
--- a/deps/v8/test/message/nfe-yield-strict.out
+++ b/deps/v8/test/message/fail/nfe-yield-strict.out
diff --git a/deps/v8/test/message/non-alphanum.js b/deps/v8/test/message/fail/non-alphanum.js
index 357ebfa1e5..357ebfa1e5 100644
--- a/deps/v8/test/message/non-alphanum.js
+++ b/deps/v8/test/message/fail/non-alphanum.js
diff --git a/deps/v8/test/message/non-alphanum.out b/deps/v8/test/message/fail/non-alphanum.out
index dc15a614eb..34464efb74 100644
--- a/deps/v8/test/message/non-alphanum.out
+++ b/deps/v8/test/message/fail/non-alphanum.out
@@ -1,6 +1,6 @@
-*%(basename)s:34: TypeError: Array.prototype.sort called on null or undefined
+*%(basename)s:34: TypeError: Cannot convert undefined or null to object
([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]])([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(![]+[])[+!+[]]]((![]+[])[+!+[]])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+(+[![]]+[])[+[]])[+[]]+(![]+[])[+!+[]]+(+[]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[!+[]+!+[]+!+[]+[+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([]+([]+[])[([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+!+[]]+(![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[+!+[]]+([][[]]+[])[+[]]+([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]])[+!+[]+[!+[]+!+[]+!+[]+!+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+[+[]])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])+[])[+[]]+(![]+[])[+[]])[+[]])
^
-TypeError: Array.prototype.sort called on null or undefined
+TypeError: Cannot convert undefined or null to object
at sort (native)
at *%(basename)s:34:410
diff --git a/deps/v8/test/message/object-rest-assignment-pattern.js b/deps/v8/test/message/fail/object-rest-assignment-pattern.js
index 27b7e3b1da..27b7e3b1da 100644
--- a/deps/v8/test/message/object-rest-assignment-pattern.js
+++ b/deps/v8/test/message/fail/object-rest-assignment-pattern.js
diff --git a/deps/v8/test/message/object-rest-assignment-pattern.out b/deps/v8/test/message/fail/object-rest-assignment-pattern.out
index 058bbc065c..058bbc065c 100644
--- a/deps/v8/test/message/object-rest-assignment-pattern.out
+++ b/deps/v8/test/message/fail/object-rest-assignment-pattern.out
diff --git a/deps/v8/test/message/object-rest-binding-pattern.js b/deps/v8/test/message/fail/object-rest-binding-pattern.js
index 894005c806..894005c806 100644
--- a/deps/v8/test/message/object-rest-binding-pattern.js
+++ b/deps/v8/test/message/fail/object-rest-binding-pattern.js
diff --git a/deps/v8/test/message/object-rest-binding-pattern.out b/deps/v8/test/message/fail/object-rest-binding-pattern.out
index 43fb7d5fd2..43fb7d5fd2 100644
--- a/deps/v8/test/message/object-rest-binding-pattern.out
+++ b/deps/v8/test/message/fail/object-rest-binding-pattern.out
diff --git a/deps/v8/test/message/overwritten-builtins.js b/deps/v8/test/message/fail/overwritten-builtins.js
index 8a838de1dd..8a838de1dd 100644
--- a/deps/v8/test/message/overwritten-builtins.js
+++ b/deps/v8/test/message/fail/overwritten-builtins.js
diff --git a/deps/v8/test/message/overwritten-builtins.out b/deps/v8/test/message/fail/overwritten-builtins.out
index 1b5c007723..1b5c007723 100644
--- a/deps/v8/test/message/overwritten-builtins.out
+++ b/deps/v8/test/message/fail/overwritten-builtins.out
diff --git a/deps/v8/test/message/paren_in_arg_string.js b/deps/v8/test/message/fail/paren_in_arg_string.js
index 83ba7c0859..83ba7c0859 100644
--- a/deps/v8/test/message/paren_in_arg_string.js
+++ b/deps/v8/test/message/fail/paren_in_arg_string.js
diff --git a/deps/v8/test/message/paren_in_arg_string.out b/deps/v8/test/message/fail/paren_in_arg_string.out
index 9784712ab8..9784712ab8 100644
--- a/deps/v8/test/message/paren_in_arg_string.out
+++ b/deps/v8/test/message/fail/paren_in_arg_string.out
diff --git a/deps/v8/test/message/redeclaration1.js b/deps/v8/test/message/fail/redeclaration1.js
index 56a891a2b0..56a891a2b0 100644
--- a/deps/v8/test/message/redeclaration1.js
+++ b/deps/v8/test/message/fail/redeclaration1.js
diff --git a/deps/v8/test/message/redeclaration1.out b/deps/v8/test/message/fail/redeclaration1.out
index 89e074701a..89e074701a 100644
--- a/deps/v8/test/message/redeclaration1.out
+++ b/deps/v8/test/message/fail/redeclaration1.out
diff --git a/deps/v8/test/message/redeclaration2.js b/deps/v8/test/message/fail/redeclaration2.js
index d8637f1503..d8637f1503 100644
--- a/deps/v8/test/message/redeclaration2.js
+++ b/deps/v8/test/message/fail/redeclaration2.js
diff --git a/deps/v8/test/message/redeclaration2.out b/deps/v8/test/message/fail/redeclaration2.out
index 8689b6928a..8689b6928a 100644
--- a/deps/v8/test/message/redeclaration2.out
+++ b/deps/v8/test/message/fail/redeclaration2.out
diff --git a/deps/v8/test/message/redeclaration3.js b/deps/v8/test/message/fail/redeclaration3.js
index 8916609597..8916609597 100644
--- a/deps/v8/test/message/redeclaration3.js
+++ b/deps/v8/test/message/fail/redeclaration3.js
diff --git a/deps/v8/test/message/redeclaration3.out b/deps/v8/test/message/fail/redeclaration3.out
index 7e691faac4..7e691faac4 100644
--- a/deps/v8/test/message/redeclaration3.out
+++ b/deps/v8/test/message/fail/redeclaration3.out
diff --git a/deps/v8/test/message/replacement-marker-as-argument.js b/deps/v8/test/message/fail/replacement-marker-as-argument.js
index 22a68ecbe5..22a68ecbe5 100644
--- a/deps/v8/test/message/replacement-marker-as-argument.js
+++ b/deps/v8/test/message/fail/replacement-marker-as-argument.js
diff --git a/deps/v8/test/message/replacement-marker-as-argument.out b/deps/v8/test/message/fail/replacement-marker-as-argument.out
index a91fe5b7e2..a91fe5b7e2 100644
--- a/deps/v8/test/message/replacement-marker-as-argument.out
+++ b/deps/v8/test/message/fail/replacement-marker-as-argument.out
diff --git a/deps/v8/test/message/rest-param-class-setter-strict.js b/deps/v8/test/message/fail/rest-param-class-setter-strict.js
index 84e9f8172b..84e9f8172b 100644
--- a/deps/v8/test/message/rest-param-class-setter-strict.js
+++ b/deps/v8/test/message/fail/rest-param-class-setter-strict.js
diff --git a/deps/v8/test/message/rest-param-class-setter-strict.out b/deps/v8/test/message/fail/rest-param-class-setter-strict.out
index 3296a0f2c8..3296a0f2c8 100644
--- a/deps/v8/test/message/rest-param-class-setter-strict.out
+++ b/deps/v8/test/message/fail/rest-param-class-setter-strict.out
diff --git a/deps/v8/test/message/rest-param-object-setter-sloppy.js b/deps/v8/test/message/fail/rest-param-object-setter-sloppy.js
index 00006545f4..00006545f4 100644
--- a/deps/v8/test/message/rest-param-object-setter-sloppy.js
+++ b/deps/v8/test/message/fail/rest-param-object-setter-sloppy.js
diff --git a/deps/v8/test/message/rest-param-object-setter-sloppy.out b/deps/v8/test/message/fail/rest-param-object-setter-sloppy.out
index 9400a2a9ce..9400a2a9ce 100644
--- a/deps/v8/test/message/rest-param-object-setter-sloppy.out
+++ b/deps/v8/test/message/fail/rest-param-object-setter-sloppy.out
diff --git a/deps/v8/test/message/rest-param-object-setter-strict.js b/deps/v8/test/message/fail/rest-param-object-setter-strict.js
index fe46fd688c..fe46fd688c 100644
--- a/deps/v8/test/message/rest-param-object-setter-strict.js
+++ b/deps/v8/test/message/fail/rest-param-object-setter-strict.js
diff --git a/deps/v8/test/message/rest-param-object-setter-strict.out b/deps/v8/test/message/fail/rest-param-object-setter-strict.out
index 3296a0f2c8..3296a0f2c8 100644
--- a/deps/v8/test/message/rest-param-object-setter-strict.out
+++ b/deps/v8/test/message/fail/rest-param-object-setter-strict.out
diff --git a/deps/v8/test/message/settimeout.js b/deps/v8/test/message/fail/settimeout.js
index 59ed1c6517..59ed1c6517 100644
--- a/deps/v8/test/message/settimeout.js
+++ b/deps/v8/test/message/fail/settimeout.js
diff --git a/deps/v8/test/message/settimeout.out b/deps/v8/test/message/fail/settimeout.out
index 7951436fdf..7951436fdf 100644
--- a/deps/v8/test/message/settimeout.out
+++ b/deps/v8/test/message/fail/settimeout.out
diff --git a/deps/v8/test/message/simple-throw.js b/deps/v8/test/message/fail/simple-throw.js
index 7b3b4cae1d..7b3b4cae1d 100644
--- a/deps/v8/test/message/simple-throw.js
+++ b/deps/v8/test/message/fail/simple-throw.js
diff --git a/deps/v8/test/message/simple-throw.out b/deps/v8/test/message/fail/simple-throw.out
index 0f359cfbaa..0f359cfbaa 100644
--- a/deps/v8/test/message/simple-throw.out
+++ b/deps/v8/test/message/fail/simple-throw.out
diff --git a/deps/v8/test/message/strict-formal-parameters.js b/deps/v8/test/message/fail/strict-formal-parameters.js
index a6c75317b3..a6c75317b3 100644
--- a/deps/v8/test/message/strict-formal-parameters.js
+++ b/deps/v8/test/message/fail/strict-formal-parameters.js
diff --git a/deps/v8/test/message/strict-formal-parameters.out b/deps/v8/test/message/fail/strict-formal-parameters.out
index 3ea3f233b7..3ea3f233b7 100644
--- a/deps/v8/test/message/strict-formal-parameters.out
+++ b/deps/v8/test/message/fail/strict-formal-parameters.out
diff --git a/deps/v8/test/message/strict-octal-number.js b/deps/v8/test/message/fail/strict-octal-number.js
index 3e991279fa..3e991279fa 100644
--- a/deps/v8/test/message/strict-octal-number.js
+++ b/deps/v8/test/message/fail/strict-octal-number.js
diff --git a/deps/v8/test/message/strict-octal-number.out b/deps/v8/test/message/fail/strict-octal-number.out
index 687321877a..687321877a 100644
--- a/deps/v8/test/message/strict-octal-number.out
+++ b/deps/v8/test/message/fail/strict-octal-number.out
diff --git a/deps/v8/test/message/strict-octal-string.js b/deps/v8/test/message/fail/strict-octal-string.js
index 87c0e99fb1..87c0e99fb1 100644
--- a/deps/v8/test/message/strict-octal-string.js
+++ b/deps/v8/test/message/fail/strict-octal-string.js
diff --git a/deps/v8/test/message/strict-octal-string.out b/deps/v8/test/message/fail/strict-octal-string.out
index 69f81a48c8..69f81a48c8 100644
--- a/deps/v8/test/message/strict-octal-string.out
+++ b/deps/v8/test/message/fail/strict-octal-string.out
diff --git a/deps/v8/test/message/strict-octal-use-strict-after.js b/deps/v8/test/message/fail/strict-octal-use-strict-after.js
index 57d0f20151..57d0f20151 100644
--- a/deps/v8/test/message/strict-octal-use-strict-after.js
+++ b/deps/v8/test/message/fail/strict-octal-use-strict-after.js
diff --git a/deps/v8/test/message/strict-octal-use-strict-after.out b/deps/v8/test/message/fail/strict-octal-use-strict-after.out
index 1fe03dd734..1fe03dd734 100644
--- a/deps/v8/test/message/strict-octal-use-strict-after.out
+++ b/deps/v8/test/message/fail/strict-octal-use-strict-after.out
diff --git a/deps/v8/test/message/strict-octal-use-strict-before.js b/deps/v8/test/message/fail/strict-octal-use-strict-before.js
index bfc380f950..bfc380f950 100644
--- a/deps/v8/test/message/strict-octal-use-strict-before.js
+++ b/deps/v8/test/message/fail/strict-octal-use-strict-before.js
diff --git a/deps/v8/test/message/strict-octal-use-strict-before.out b/deps/v8/test/message/fail/strict-octal-use-strict-before.out
index e742288431..e742288431 100644
--- a/deps/v8/test/message/strict-octal-use-strict-before.out
+++ b/deps/v8/test/message/fail/strict-octal-use-strict-before.out
diff --git a/deps/v8/test/message/strict-with.js b/deps/v8/test/message/fail/strict-with.js
index 411fc2926c..411fc2926c 100644
--- a/deps/v8/test/message/strict-with.js
+++ b/deps/v8/test/message/fail/strict-with.js
diff --git a/deps/v8/test/message/strict-with.out b/deps/v8/test/message/fail/strict-with.out
index 06e7ed852d..06e7ed852d 100644
--- a/deps/v8/test/message/strict-with.out
+++ b/deps/v8/test/message/fail/strict-with.out
diff --git a/deps/v8/test/message/super-constructor-extra-statement.js b/deps/v8/test/message/fail/super-constructor-extra-statement.js
index 541bddbde1..541bddbde1 100644
--- a/deps/v8/test/message/super-constructor-extra-statement.js
+++ b/deps/v8/test/message/fail/super-constructor-extra-statement.js
diff --git a/deps/v8/test/message/super-constructor-extra-statement.out b/deps/v8/test/message/fail/super-constructor-extra-statement.out
index 0faa3bea0d..0faa3bea0d 100644
--- a/deps/v8/test/message/super-constructor-extra-statement.out
+++ b/deps/v8/test/message/fail/super-constructor-extra-statement.out
diff --git a/deps/v8/test/message/super-constructor.js b/deps/v8/test/message/fail/super-constructor.js
index 93ca61844a..93ca61844a 100644
--- a/deps/v8/test/message/super-constructor.js
+++ b/deps/v8/test/message/fail/super-constructor.js
diff --git a/deps/v8/test/message/super-constructor.out b/deps/v8/test/message/fail/super-constructor.out
index 3fa546bd45..3fa546bd45 100644
--- a/deps/v8/test/message/super-constructor.out
+++ b/deps/v8/test/message/fail/super-constructor.out
diff --git a/deps/v8/test/message/super-in-function.js b/deps/v8/test/message/fail/super-in-function.js
index f2e2342c31..f2e2342c31 100644
--- a/deps/v8/test/message/super-in-function.js
+++ b/deps/v8/test/message/fail/super-in-function.js
diff --git a/deps/v8/test/message/super-in-function.out b/deps/v8/test/message/fail/super-in-function.out
index 19f8bf067c..19f8bf067c 100644
--- a/deps/v8/test/message/super-in-function.out
+++ b/deps/v8/test/message/fail/super-in-function.out
diff --git a/deps/v8/test/message/tonumber-symbol.js b/deps/v8/test/message/fail/tonumber-symbol.js
index 28a6d3cdeb..28a6d3cdeb 100644
--- a/deps/v8/test/message/tonumber-symbol.js
+++ b/deps/v8/test/message/fail/tonumber-symbol.js
diff --git a/deps/v8/test/message/tonumber-symbol.out b/deps/v8/test/message/fail/tonumber-symbol.out
index c09c9a97ec..c09c9a97ec 100644
--- a/deps/v8/test/message/tonumber-symbol.out
+++ b/deps/v8/test/message/fail/tonumber-symbol.out
diff --git a/deps/v8/test/message/try-catch-finally-throw-in-catch-and-finally.js b/deps/v8/test/message/fail/try-catch-finally-throw-in-catch-and-finally.js
index 164ab699bc..164ab699bc 100644
--- a/deps/v8/test/message/try-catch-finally-throw-in-catch-and-finally.js
+++ b/deps/v8/test/message/fail/try-catch-finally-throw-in-catch-and-finally.js
diff --git a/deps/v8/test/message/try-catch-finally-throw-in-catch-and-finally.out b/deps/v8/test/message/fail/try-catch-finally-throw-in-catch-and-finally.out
index e3e2348c24..e3e2348c24 100644
--- a/deps/v8/test/message/try-catch-finally-throw-in-catch-and-finally.out
+++ b/deps/v8/test/message/fail/try-catch-finally-throw-in-catch-and-finally.out
diff --git a/deps/v8/test/message/try-catch-finally-throw-in-catch.js b/deps/v8/test/message/fail/try-catch-finally-throw-in-catch.js
index afba12c71b..afba12c71b 100644
--- a/deps/v8/test/message/try-catch-finally-throw-in-catch.js
+++ b/deps/v8/test/message/fail/try-catch-finally-throw-in-catch.js
diff --git a/deps/v8/test/message/try-catch-finally-throw-in-catch.out b/deps/v8/test/message/fail/try-catch-finally-throw-in-catch.out
index 618c13cdcd..618c13cdcd 100644
--- a/deps/v8/test/message/try-catch-finally-throw-in-catch.out
+++ b/deps/v8/test/message/fail/try-catch-finally-throw-in-catch.out
diff --git a/deps/v8/test/message/try-catch-finally-throw-in-finally.js b/deps/v8/test/message/fail/try-catch-finally-throw-in-finally.js
index 387df7a628..387df7a628 100644
--- a/deps/v8/test/message/try-catch-finally-throw-in-finally.js
+++ b/deps/v8/test/message/fail/try-catch-finally-throw-in-finally.js
diff --git a/deps/v8/test/message/try-catch-finally-throw-in-finally.out b/deps/v8/test/message/fail/try-catch-finally-throw-in-finally.out
index e3e2348c24..e3e2348c24 100644
--- a/deps/v8/test/message/try-catch-finally-throw-in-finally.out
+++ b/deps/v8/test/message/fail/try-catch-finally-throw-in-finally.out
diff --git a/deps/v8/test/message/try-catch-lexical-conflict.js b/deps/v8/test/message/fail/try-catch-lexical-conflict.js
index 48b1a162b1..48b1a162b1 100644
--- a/deps/v8/test/message/try-catch-lexical-conflict.js
+++ b/deps/v8/test/message/fail/try-catch-lexical-conflict.js
diff --git a/deps/v8/test/message/try-catch-lexical-conflict.out b/deps/v8/test/message/fail/try-catch-lexical-conflict.out
index 0a7a0ebc25..0a7a0ebc25 100644
--- a/deps/v8/test/message/try-catch-lexical-conflict.out
+++ b/deps/v8/test/message/fail/try-catch-lexical-conflict.out
diff --git a/deps/v8/test/message/try-catch-variable-conflict.js b/deps/v8/test/message/fail/try-catch-variable-conflict.js
index 49e120bf61..49e120bf61 100644
--- a/deps/v8/test/message/try-catch-variable-conflict.js
+++ b/deps/v8/test/message/fail/try-catch-variable-conflict.js
diff --git a/deps/v8/test/message/try-catch-variable-conflict.out b/deps/v8/test/message/fail/try-catch-variable-conflict.out
index be4858e2fa..be4858e2fa 100644
--- a/deps/v8/test/message/try-catch-variable-conflict.out
+++ b/deps/v8/test/message/fail/try-catch-variable-conflict.out
diff --git a/deps/v8/test/message/try-finally-throw-in-finally.js b/deps/v8/test/message/fail/try-finally-throw-in-finally.js
index fbd57649a3..fbd57649a3 100644
--- a/deps/v8/test/message/try-finally-throw-in-finally.js
+++ b/deps/v8/test/message/fail/try-finally-throw-in-finally.js
diff --git a/deps/v8/test/message/try-finally-throw-in-finally.out b/deps/v8/test/message/fail/try-finally-throw-in-finally.out
index 618c13cdcd..618c13cdcd 100644
--- a/deps/v8/test/message/try-finally-throw-in-finally.out
+++ b/deps/v8/test/message/fail/try-finally-throw-in-finally.out
diff --git a/deps/v8/test/message/try-finally-throw-in-try-and-finally.js b/deps/v8/test/message/fail/try-finally-throw-in-try-and-finally.js
index 1a9660c03a..1a9660c03a 100644
--- a/deps/v8/test/message/try-finally-throw-in-try-and-finally.js
+++ b/deps/v8/test/message/fail/try-finally-throw-in-try-and-finally.js
diff --git a/deps/v8/test/message/try-finally-throw-in-try-and-finally.out b/deps/v8/test/message/fail/try-finally-throw-in-try-and-finally.out
index 618c13cdcd..618c13cdcd 100644
--- a/deps/v8/test/message/try-finally-throw-in-try-and-finally.out
+++ b/deps/v8/test/message/fail/try-finally-throw-in-try-and-finally.out
diff --git a/deps/v8/test/message/try-finally-throw-in-try.js b/deps/v8/test/message/fail/try-finally-throw-in-try.js
index 428fac91d1..428fac91d1 100644
--- a/deps/v8/test/message/try-finally-throw-in-try.js
+++ b/deps/v8/test/message/fail/try-finally-throw-in-try.js
diff --git a/deps/v8/test/message/try-finally-throw-in-try.out b/deps/v8/test/message/fail/try-finally-throw-in-try.out
index 336d4cee51..336d4cee51 100644
--- a/deps/v8/test/message/try-finally-throw-in-try.out
+++ b/deps/v8/test/message/fail/try-finally-throw-in-try.out
diff --git a/deps/v8/test/message/typedarray.js b/deps/v8/test/message/fail/typedarray.js
index 8fb92fbc1b..8fb92fbc1b 100644
--- a/deps/v8/test/message/typedarray.js
+++ b/deps/v8/test/message/fail/typedarray.js
diff --git a/deps/v8/test/message/typedarray.out b/deps/v8/test/message/fail/typedarray.out
index 908dd7fa5e..908dd7fa5e 100644
--- a/deps/v8/test/message/typedarray.out
+++ b/deps/v8/test/message/fail/typedarray.out
diff --git a/deps/v8/test/message/undefined-keyed-property.js b/deps/v8/test/message/fail/undefined-keyed-property.js
index c8ae618c44..c8ae618c44 100644
--- a/deps/v8/test/message/undefined-keyed-property.js
+++ b/deps/v8/test/message/fail/undefined-keyed-property.js
diff --git a/deps/v8/test/message/undefined-keyed-property.out b/deps/v8/test/message/fail/undefined-keyed-property.out
index 84673252eb..84673252eb 100644
--- a/deps/v8/test/message/undefined-keyed-property.out
+++ b/deps/v8/test/message/fail/undefined-keyed-property.out
diff --git a/deps/v8/test/message/unicode-escape-invalid-2.js b/deps/v8/test/message/fail/unicode-escape-invalid-2.js
index b83665b197..b83665b197 100644
--- a/deps/v8/test/message/unicode-escape-invalid-2.js
+++ b/deps/v8/test/message/fail/unicode-escape-invalid-2.js
diff --git a/deps/v8/test/message/unicode-escape-invalid-2.out b/deps/v8/test/message/fail/unicode-escape-invalid-2.out
index 423e79d60e..423e79d60e 100644
--- a/deps/v8/test/message/unicode-escape-invalid-2.out
+++ b/deps/v8/test/message/fail/unicode-escape-invalid-2.out
diff --git a/deps/v8/test/message/unicode-escape-invalid.js b/deps/v8/test/message/fail/unicode-escape-invalid.js
index 5378acf816..5378acf816 100644
--- a/deps/v8/test/message/unicode-escape-invalid.js
+++ b/deps/v8/test/message/fail/unicode-escape-invalid.js
diff --git a/deps/v8/test/message/unicode-escape-invalid.out b/deps/v8/test/message/fail/unicode-escape-invalid.out
index 2bdd53881e..2bdd53881e 100644
--- a/deps/v8/test/message/unicode-escape-invalid.out
+++ b/deps/v8/test/message/fail/unicode-escape-invalid.out
diff --git a/deps/v8/test/message/unicode-escape-undefined.js b/deps/v8/test/message/fail/unicode-escape-undefined.js
index 49de2fb2c8..49de2fb2c8 100644
--- a/deps/v8/test/message/unicode-escape-undefined.js
+++ b/deps/v8/test/message/fail/unicode-escape-undefined.js
diff --git a/deps/v8/test/message/unicode-escape-undefined.out b/deps/v8/test/message/fail/unicode-escape-undefined.out
index 9b0483cdcc..9b0483cdcc 100644
--- a/deps/v8/test/message/unicode-escape-undefined.out
+++ b/deps/v8/test/message/fail/unicode-escape-undefined.out
diff --git a/deps/v8/test/message/unterminated-arg-list.js b/deps/v8/test/message/fail/unterminated-arg-list.js
index b0fd1dd893..b0fd1dd893 100644
--- a/deps/v8/test/message/unterminated-arg-list.js
+++ b/deps/v8/test/message/fail/unterminated-arg-list.js
diff --git a/deps/v8/test/message/unterminated-arg-list.out b/deps/v8/test/message/fail/unterminated-arg-list.out
index 5be2b3d90c..5be2b3d90c 100644
--- a/deps/v8/test/message/unterminated-arg-list.out
+++ b/deps/v8/test/message/fail/unterminated-arg-list.out
diff --git a/deps/v8/test/message/var-conflict-in-with.js b/deps/v8/test/message/fail/var-conflict-in-with.js
index fe58a33a07..fe58a33a07 100644
--- a/deps/v8/test/message/var-conflict-in-with.js
+++ b/deps/v8/test/message/fail/var-conflict-in-with.js
diff --git a/deps/v8/test/message/var-conflict-in-with.out b/deps/v8/test/message/fail/var-conflict-in-with.out
index 896e437107..896e437107 100644
--- a/deps/v8/test/message/var-conflict-in-with.out
+++ b/deps/v8/test/message/fail/var-conflict-in-with.out
diff --git a/deps/v8/test/message/wasm-function-name.js b/deps/v8/test/message/fail/wasm-function-name.js
index 0573db02e4..0573db02e4 100644
--- a/deps/v8/test/message/wasm-function-name.js
+++ b/deps/v8/test/message/fail/wasm-function-name.js
diff --git a/deps/v8/test/message/wasm-function-name.out b/deps/v8/test/message/fail/wasm-function-name.out
index 00626c01f3..00626c01f3 100644
--- a/deps/v8/test/message/wasm-function-name.out
+++ b/deps/v8/test/message/fail/wasm-function-name.out
diff --git a/deps/v8/test/message/wasm-module-and-function-name.js b/deps/v8/test/message/fail/wasm-module-and-function-name.js
index cab3252427..cab3252427 100644
--- a/deps/v8/test/message/wasm-module-and-function-name.js
+++ b/deps/v8/test/message/fail/wasm-module-and-function-name.js
diff --git a/deps/v8/test/message/wasm-module-and-function-name.out b/deps/v8/test/message/fail/wasm-module-and-function-name.out
index 42ba7b077c..42ba7b077c 100644
--- a/deps/v8/test/message/wasm-module-and-function-name.out
+++ b/deps/v8/test/message/fail/wasm-module-and-function-name.out
diff --git a/deps/v8/test/message/wasm-module-name.js b/deps/v8/test/message/fail/wasm-module-name.js
index 1e32a5d437..1e32a5d437 100644
--- a/deps/v8/test/message/wasm-module-name.js
+++ b/deps/v8/test/message/fail/wasm-module-name.js
diff --git a/deps/v8/test/message/wasm-module-name.out b/deps/v8/test/message/fail/wasm-module-name.out
index bc3a6c01a5..bc3a6c01a5 100644
--- a/deps/v8/test/message/wasm-module-name.out
+++ b/deps/v8/test/message/fail/wasm-module-name.out
diff --git a/deps/v8/test/message/wasm-no-name.js b/deps/v8/test/message/fail/wasm-no-name.js
index 121a7cbfe4..121a7cbfe4 100644
--- a/deps/v8/test/message/wasm-no-name.js
+++ b/deps/v8/test/message/fail/wasm-no-name.js
diff --git a/deps/v8/test/message/wasm-no-name.out b/deps/v8/test/message/fail/wasm-no-name.out
index f6b9f8d032..f6b9f8d032 100644
--- a/deps/v8/test/message/wasm-no-name.out
+++ b/deps/v8/test/message/fail/wasm-no-name.out
diff --git a/deps/v8/test/message/wasm-trap.js b/deps/v8/test/message/fail/wasm-trap.js
index 53013a7d22..53013a7d22 100644
--- a/deps/v8/test/message/wasm-trap.js
+++ b/deps/v8/test/message/fail/wasm-trap.js
diff --git a/deps/v8/test/message/wasm-trap.out b/deps/v8/test/message/fail/wasm-trap.out
index 33d6309d13..33d6309d13 100644
--- a/deps/v8/test/message/wasm-trap.out
+++ b/deps/v8/test/message/fail/wasm-trap.out
diff --git a/deps/v8/test/message/yield-in-arrow-param.js b/deps/v8/test/message/fail/yield-in-arrow-param.js
index c815fe7603..c815fe7603 100644
--- a/deps/v8/test/message/yield-in-arrow-param.js
+++ b/deps/v8/test/message/fail/yield-in-arrow-param.js
diff --git a/deps/v8/test/message/yield-in-arrow-param.out b/deps/v8/test/message/fail/yield-in-arrow-param.out
index 8eeb0df5f6..8eeb0df5f6 100644
--- a/deps/v8/test/message/yield-in-arrow-param.out
+++ b/deps/v8/test/message/fail/yield-in-arrow-param.out
diff --git a/deps/v8/test/message/yield-in-generator-param.js b/deps/v8/test/message/fail/yield-in-generator-param.js
index 1a8f8420f6..1a8f8420f6 100644
--- a/deps/v8/test/message/yield-in-generator-param.js
+++ b/deps/v8/test/message/fail/yield-in-generator-param.js
diff --git a/deps/v8/test/message/yield-in-generator-param.out b/deps/v8/test/message/fail/yield-in-generator-param.out
index ec46f478c9..ec46f478c9 100644
--- a/deps/v8/test/message/yield-in-generator-param.out
+++ b/deps/v8/test/message/fail/yield-in-generator-param.out
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index ebdf76e5e8..1c40a25186 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -34,6 +34,12 @@
# Modules which are only meant to be imported from by other tests, not to be
# tested standalone.
- 'modules-skip*': [SKIP],
+ 'fail/modules-skip*': [SKIP],
}], # ALWAYS
+
+##############################################################################
+# BUG(v8:7138).
+['arch == arm and not simulator_run and variant == wasm_traps', {
+ '*': [SKIP],
+}], # arch == arm and not simulator_run and variant == wasm_traps
]
diff --git a/deps/v8/test/message/regress/regress-1527.js b/deps/v8/test/message/regress/fail/regress-1527.js
index 682e386d3d..682e386d3d 100644
--- a/deps/v8/test/message/regress/regress-1527.js
+++ b/deps/v8/test/message/regress/fail/regress-1527.js
diff --git a/deps/v8/test/message/regress/regress-1527.out b/deps/v8/test/message/regress/fail/regress-1527.out
index dc17fb3517..dc17fb3517 100644
--- a/deps/v8/test/message/regress/regress-1527.out
+++ b/deps/v8/test/message/regress/fail/regress-1527.out
diff --git a/deps/v8/test/message/regress/regress-3995.js b/deps/v8/test/message/regress/fail/regress-3995.js
index ba84bc0965..ba84bc0965 100644
--- a/deps/v8/test/message/regress/regress-3995.js
+++ b/deps/v8/test/message/regress/fail/regress-3995.js
diff --git a/deps/v8/test/message/regress/regress-3995.out b/deps/v8/test/message/regress/fail/regress-3995.out
index e4f5b31d1f..e4f5b31d1f 100644
--- a/deps/v8/test/message/regress/regress-3995.out
+++ b/deps/v8/test/message/regress/fail/regress-3995.out
diff --git a/deps/v8/test/message/regress/regress-4266.js b/deps/v8/test/message/regress/fail/regress-4266.js
index 552176bccf..552176bccf 100644
--- a/deps/v8/test/message/regress/regress-4266.js
+++ b/deps/v8/test/message/regress/fail/regress-4266.js
diff --git a/deps/v8/test/message/regress/regress-4266.out b/deps/v8/test/message/regress/fail/regress-4266.out
index d31541debe..d31541debe 100644
--- a/deps/v8/test/message/regress/regress-4266.out
+++ b/deps/v8/test/message/regress/fail/regress-4266.out
diff --git a/deps/v8/test/message/regress/regress-5727.js b/deps/v8/test/message/regress/fail/regress-5727.js
index ac26d1b910..ac26d1b910 100644
--- a/deps/v8/test/message/regress/regress-5727.js
+++ b/deps/v8/test/message/regress/fail/regress-5727.js
diff --git a/deps/v8/test/message/regress/regress-5727.out b/deps/v8/test/message/regress/fail/regress-5727.out
index 8eb800a3c8..8eb800a3c8 100644
--- a/deps/v8/test/message/regress/regress-5727.out
+++ b/deps/v8/test/message/regress/fail/regress-5727.out
diff --git a/deps/v8/test/message/regress/regress-73.js b/deps/v8/test/message/regress/fail/regress-73.js
index 72652d74a9..137b731fc9 100644
--- a/deps/v8/test/message/regress/regress-73.js
+++ b/deps/v8/test/message/regress/fail/regress-73.js
@@ -25,9 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-try {
- throw 'a';
-} catch (e) {
- throw 'b';
- print('c');
-}
+try {
+ throw 'a';
+} catch (e) {
+ throw 'b';
+ print('c');
+}
diff --git a/deps/v8/test/message/regress/regress-73.out b/deps/v8/test/message/regress/fail/regress-73.out
index 28606dd8fa..28606dd8fa 100644
--- a/deps/v8/test/message/regress/regress-73.out
+++ b/deps/v8/test/message/regress/fail/regress-73.out
diff --git a/deps/v8/test/message/regress/regress-75.js b/deps/v8/test/message/regress/fail/regress-75.js
index 428fac91d1..428fac91d1 100644
--- a/deps/v8/test/message/regress/regress-75.js
+++ b/deps/v8/test/message/regress/fail/regress-75.js
diff --git a/deps/v8/test/message/regress/regress-75.out b/deps/v8/test/message/regress/fail/regress-75.out
index 336d4cee51..336d4cee51 100644
--- a/deps/v8/test/message/regress/regress-75.out
+++ b/deps/v8/test/message/regress/fail/regress-75.out
diff --git a/deps/v8/test/message/regress/regress-crbug-661579.js b/deps/v8/test/message/regress/fail/regress-crbug-661579.js
index d5c574cbda..d5c574cbda 100644
--- a/deps/v8/test/message/regress/regress-crbug-661579.js
+++ b/deps/v8/test/message/regress/fail/regress-crbug-661579.js
diff --git a/deps/v8/test/message/regress/regress-crbug-661579.out b/deps/v8/test/message/regress/fail/regress-crbug-661579.out
index 72cca79bb1..72cca79bb1 100644
--- a/deps/v8/test/message/regress/regress-crbug-661579.out
+++ b/deps/v8/test/message/regress/fail/regress-crbug-661579.out
diff --git a/deps/v8/test/message/regress/regress-crbug-669017.js b/deps/v8/test/message/regress/fail/regress-crbug-669017.js
index a8d76ecacd..a8d76ecacd 100644
--- a/deps/v8/test/message/regress/regress-crbug-669017.js
+++ b/deps/v8/test/message/regress/fail/regress-crbug-669017.js
diff --git a/deps/v8/test/message/regress/regress-crbug-669017.out b/deps/v8/test/message/regress/fail/regress-crbug-669017.out
index c2589e7a15..c2589e7a15 100644
--- a/deps/v8/test/message/regress/regress-crbug-669017.out
+++ b/deps/v8/test/message/regress/fail/regress-crbug-669017.out
diff --git a/deps/v8/test/message/regress/regress-crbug-691194.js b/deps/v8/test/message/regress/fail/regress-crbug-691194.js
index fc7a121e38..fc7a121e38 100644
--- a/deps/v8/test/message/regress/regress-crbug-691194.js
+++ b/deps/v8/test/message/regress/fail/regress-crbug-691194.js
diff --git a/deps/v8/test/message/regress/regress-crbug-691194.out b/deps/v8/test/message/regress/fail/regress-crbug-691194.out
index 43453900db..43453900db 100644
--- a/deps/v8/test/message/regress/regress-crbug-691194.out
+++ b/deps/v8/test/message/regress/fail/regress-crbug-691194.out
diff --git a/deps/v8/test/message/regress/regress-4829-1.out b/deps/v8/test/message/regress/regress-4829-1.out
deleted file mode 100644
index dc0f56666e..0000000000
--- a/deps/v8/test/message/regress/regress-4829-1.out
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-*%(basename)s:9: SyntaxError: Invalid hexadecimal escape sequence
-tag(tag`\xyy`);
- ^^^^
-SyntaxError: Invalid hexadecimal escape sequence
diff --git a/deps/v8/test/message/regress/regress-4829-2.js b/deps/v8/test/message/regress/regress-4829-2.js
deleted file mode 100644
index 2f507e3fb3..0000000000
--- a/deps/v8/test/message/regress/regress-4829-2.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-harmony-template-escapes
-
-function tag() {}
-
-`${tag`\xyy`}`;
diff --git a/deps/v8/test/message/regress/regress-4829-2.out b/deps/v8/test/message/regress/regress-4829-2.out
deleted file mode 100644
index 72784245fc..0000000000
--- a/deps/v8/test/message/regress/regress-4829-2.out
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-*%(basename)s:9: SyntaxError: Invalid hexadecimal escape sequence
-`${tag`\xyy`}`;
- ^^^^
-SyntaxError: Invalid hexadecimal escape sequence
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 1bbb2b16d8..28a1e641f6 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -63,18 +63,18 @@ class MessageTestSuite(testsuite.TestSuite):
return super(MessageTestSuite, self).CreateVariantGenerator(
variants + ["preparser"])
- def GetFlagsForTestCase(self, testcase, context):
+ def GetParametersForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
- result = []
+ files = []
+ if MODULE_PATTERN.search(source):
+ files.append("--module")
+ files.append(os.path.join(self.root, testcase.path + ".js"))
+ flags = testcase.flags + context.mode_flags
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
- result += match.strip().split()
- result += context.mode_flags
- if MODULE_PATTERN.search(source):
- result.append("--module")
- result = [x for x in result if x not in INVALID_FLAGS]
- result.append(os.path.join(self.root, testcase.path + ".js"))
- return testcase.flags + result
+ flags += match.strip().split()
+ flags = [x for x in flags if x not in INVALID_FLAGS]
+ return files, flags, {}
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
@@ -88,9 +88,22 @@ class MessageTestSuite(testsuite.TestSuite):
return (string.startswith("==") or string.startswith("**") or
string.startswith("ANDROID"))
+ def _GetExpectedFail(self, testcase):
+ path = testcase.path
+ while path:
+ (head, tail) = os.path.split(path)
+ if tail == "fail":
+ return True
+ path = head
+ return False
+
def IsFailureOutput(self, testcase):
output = testcase.output
testpath = testcase.path
+ expected_fail = self._GetExpectedFail(testcase)
+ fail = testcase.output.exit_code != 0
+ if expected_fail != fail:
+ return True
expected_path = os.path.join(self.root, testpath + ".out")
expected_lines = []
# Can't use utils.ReadLinesFrom() here because it strips whitespace.
diff --git a/deps/v8/test/mjsunit/array-lastindexof.js b/deps/v8/test/mjsunit/array-lastindexof.js
new file mode 100644
index 0000000000..785bd64727
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-lastindexof.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => {
+ Array.prototype.lastIndexOf.call(null, 42);
+}, TypeError);
+assertThrows(() => {
+ Array.prototype.lastIndexOf.call(undefined, 42);
+}, TypeError);
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index ddfeffe4a2..acc96117be 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -567,3 +567,7 @@ TestSortOnNonExtensible();
})()
})();
+
+assertThrows(() => {
+ Array.prototype.sort.call(undefined);
+}, TypeError);
diff --git a/deps/v8/test/mjsunit/code-coverage-block-noopt.js b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
index 727acea75d..3eba9d3f57 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-noopt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --block-coverage
-// Flags: --harmony-async-iteration --no-opt
+// Flags: --allow-natives-syntax --no-always-opt --harmony-async-iteration
+// Flags: --no-opt
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
diff --git a/deps/v8/test/mjsunit/code-coverage-block-opt.js b/deps/v8/test/mjsunit/code-coverage-block-opt.js
index 488af8cf96..bc4a3f1010 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-opt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-opt.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --block-coverage
-// Flags: --harmony-async-iteration --opt
+// Flags: --allow-natives-syntax --no-always-opt --harmony-async-iteration --opt
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index c5e7455b1c..3355fd1259 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --block-coverage
-// Flags: --harmony-async-iteration
+// Flags: --allow-natives-syntax --no-always-opt --harmony-async-iteration
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
@@ -524,15 +523,15 @@ var FALSE = false; // 0050
`,
[{"start":0,"end":849,"count":1},
{"start":101,"end":801,"count":1},
- {"start":167,"end":172,"count":0},
- {"start":217,"end":222,"count":0},
- {"start":260,"end":265,"count":0},
- {"start":310,"end":372,"count":0},
- {"start":467,"end":472,"count":0},
- {"start":559,"end":564,"count":0},
- {"start":617,"end":680,"count":0},
- {"start":710,"end":715,"count":0},
- {"start":775,"end":780,"count":0}]
+ {"start":165,"end":172,"count":0},
+ {"start":215,"end":222,"count":0},
+ {"start":258,"end":265,"count":0},
+ {"start":308,"end":372,"count":0},
+ {"start":465,"end":472,"count":0},
+ {"start":557,"end":564,"count":0},
+ {"start":615,"end":680,"count":0},
+ {"start":708,"end":715,"count":0},
+ {"start":773,"end":780,"count":0}]
);
TestCoverage(
@@ -547,9 +546,9 @@ it.next(); it.next(); // 0250
`,
[{"start":0,"end":299,"count":1},
{"start":11,"end":201,"count":3},
- {"start":64,"end":116,"count":1},
- {"start":116,"end":121,"count":0},
- {"start":124,"end":129,"count":1},
+ {"start":64,"end":114,"count":1},
+ {"start":114,"end":121,"count":0},
+ {"start":122,"end":129,"count":1},
{"start":129,"end":200,"count":0}]
);
@@ -625,9 +624,9 @@ it.next(); it.next(); it.next(); // 0300
`,
[{"start":0,"end":349,"count":1},
{"start":11,"end":201,"count":7},
- {"start":65,"end":117,"count":1},
- {"start":117,"end":122,"count":0},
- {"start":125,"end":130,"count":1},
+ {"start":65,"end":115,"count":1},
+ {"start":115,"end":122,"count":0},
+ {"start":123,"end":130,"count":1},
{"start":130,"end":200,"count":0}]
);
@@ -667,4 +666,166 @@ f(); // 0200
{"start":61,"end":150,"count":1}]
);
+TestCoverage(
+"LogicalOrExpression assignment",
+`
+const a = true || 99 // 0000
+function b () { // 0050
+ const b = a || 2 // 0100
+} // 0150
+b() // 0200
+b() // 0250
+`,
+[{"start":0,"end":299,"count":1},
+ {"start":15,"end":20,"count":0},
+ {"start":50,"end":151,"count":2},
+ {"start":114,"end":118,"count":0}]);
+
+TestCoverage(
+"LogicalOrExpression IsTest()",
+`
+true || false // 0000
+const a = 99 // 0050
+a || 50 // 0100
+const b = false // 0150
+if (b || true) {} // 0200
+`,
+[{"start":0,"end":249,"count":1},
+ {"start":5,"end":13,"count":0},
+ {"start":102,"end":107,"count":0}]);
+
+TestCoverage(
+"LogicalAndExpression assignment",
+`
+const a = false && 99 // 0000
+function b () { // 0050
+ const b = a && 2 // 0100
+} // 0150
+b() // 0200
+b() // 0250
+const c = true && 50 // 0300
+`,
+[{"start":0,"end":349,"count":1},
+ {"start":16,"end":21,"count":0},
+ {"start":50,"end":151,"count":2},
+ {"start":114,"end":118,"count":0}]);
+
+TestCoverage(
+"LogicalAndExpression IsTest()",
+`
+false && true // 0000
+const a = 0 // 0050
+a && 50 // 0100
+const b = true // 0150
+if (b && true) {} // 0200
+true && true // 0250
+`,
+[{"start":0,"end":299,"count":1},
+ {"start":6,"end":13,"count":0},
+ {"start":102,"end":107,"count":0}]);
+
+TestCoverage(
+"NaryLogicalOr assignment",
+`
+const a = true // 0000
+const b = false // 0050
+const c = false || false || 99 // 0100
+const d = false || true || 99 // 0150
+const e = true || true || 99 // 0200
+const f = b || b || 99 // 0250
+const g = b || a || 99 // 0300
+const h = a || a || 99 // 0350
+const i = a || (b || c) || d // 0400
+`,
+[{"start":0,"end":449,"count":1},
+ {"start":174,"end":179,"count":0},
+ {"start":215,"end":222,"count":0},
+ {"start":223,"end":228,"count":0},
+ {"start":317,"end":322,"count":0},
+ {"start":362,"end":366,"count":0},
+ {"start":367,"end":372,"count":0},
+ {"start":412,"end":423,"count":0},
+ {"start":424,"end":428,"count":0}]);
+
+TestCoverage(
+"NaryLogicalOr IsTest()",
+`
+const a = true // 0000
+const b = false // 0050
+false || false || 99 // 0100
+false || true || 99 // 0150
+true || true || 99 // 0200
+b || b || 99 // 0250
+b || a || 99 // 0300
+a || a || 99 // 0350
+`,
+[{"start":0,"end":399,"count":1},
+ {"start":164,"end":169,"count":0},
+ {"start":205,"end":212,"count":0},
+ {"start":213,"end":218,"count":0},
+ {"start":307,"end":312,"count":0},
+ {"start":352,"end":356,"count":0},
+ {"start":357,"end":362,"count":0}]);
+
+TestCoverage(
+"NaryLogicalAnd assignment",
+`
+const a = true // 0000
+const b = false // 0050
+const c = false && false && 99 // 0100
+const d = false && true && 99 // 0150
+const e = true && true && 99 // 0200
+const f = true && false || true // 0250
+const g = true || false && true // 0300
+`,
+[{"start":0,"end":349,"count":1},
+ {"start":116,"end":124,"count":0},
+ {"start":125,"end":130,"count":0},
+ {"start":166,"end":173,"count":0},
+ {"start":174,"end":179,"count":0},
+ {"start":315,"end":331,"count":0}
+]);
+
+TestCoverage(
+"NaryLogicalAnd IsTest()",
+`
+const a = true // 0000
+const b = false // 0050
+false && false && 99 // 0100
+false && true && 99 // 0150
+true && true && 99 // 0200
+true && false || true // 0250
+true || false && true // 0300
+false || false || 99 || 55 // 0350
+`,
+[{"start":0,"end":399,"count":1},
+ {"start":106,"end":114,"count":0},
+ {"start":115,"end":120,"count":0},
+ {"start":156,"end":163,"count":0},
+ {"start":164,"end":169,"count":0},
+ {"start":305,"end":321,"count":0},
+ {"start":371,"end":376,"count":0}]);
+
+// see regression: https://bugs.chromium.org/p/chromium/issues/detail?id=785778
+TestCoverage(
+"logical expressions + conditional expressions",
+`
+const a = true // 0000
+const b = 99 // 0050
+const c = false // 0100
+const d = '' // 0150
+const e = a && (b ? 'left' : 'right') // 0200
+const f = a || (b ? 'left' : 'right') // 0250
+const g = c || d ? 'left' : 'right' // 0300
+const h = a && b && (b ? 'left' : 'right')// 0350
+const i = d || c || (c ? 'left' : 'right')// 0400
+`,
+[{"start":0,"end":449,"count":1},
+ {"start":227,"end":236,"count":0},
+ {"start":262,"end":287,"count":0},
+ {"start":317,"end":325,"count":0},
+ {"start":382,"end":391,"count":0},
+ {"start":423,"end":431,"count":0}
+]);
+
%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/compiler-regress-787301.js b/deps/v8/test/mjsunit/compiler-regress-787301.js
new file mode 100644
index 0000000000..851e22a0cb
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler-regress-787301.js
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt(b) {
+ let iterator = new Set().values();
+ iterator.x = 0;
+
+ let arr = [iterator, iterator];
+ if (b)
+ return arr.slice();
+}
+
+opt(false);
+opt(false);
+%OptimizeFunctionOnNextCall(opt);
+
+let res = opt(true);
+let a = res[0];
+let b = res[1];
+
+assertTrue(a === b);
+a.x = 7;
+assertEquals(7, b.x);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js b/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
index ebdcc6cce5..e5ec075aa9 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
@@ -29,3 +29,27 @@ var s = "12345";
foo(5);
assertOptimized(foo);
})();
+
+(function() {
+ function foo(s) { return s[5]; }
+
+ foo(s);
+ foo(s);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(s);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(s);
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(s, i) { return s[i]; }
+
+ foo(s, 0);
+ foo(s, 1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(s, 5);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(s, 5);
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/function-bind.js b/deps/v8/test/mjsunit/compiler/function-bind.js
index 11337b4bf9..cc9451e0d5 100644
--- a/deps/v8/test/mjsunit/compiler/function-bind.js
+++ b/deps/v8/test/mjsunit/compiler/function-bind.js
@@ -75,3 +75,212 @@
assertEquals(0, foo(0, 1).length);
assertEquals("bound bar", foo(1, 2).name)
})();
+
+(function() {
+ function bar(f) { return f(1); }
+
+ function foo(g) { return bar(g.bind(null, 2)); }
+
+ assertEquals(3, foo((x, y) => x + y));
+ assertEquals(1, foo((x, y) => x - y));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo((x, y) => x + y));
+ assertEquals(1, foo((x, y) => x - y));
+})();
+
+(function() {
+ function add(x, y) { return x + y; }
+
+ function foo(a) { return a.map(add.bind(null, 1)); }
+
+ assertEquals([1, 2, 3], foo([0, 1, 2]));
+ assertEquals([2, 3, 4], foo([1, 2, 3]));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([1, 2, 3], foo([0, 1, 2]));
+ assertEquals([2, 3, 4], foo([1, 2, 3]));
+})();
+
+(function() {
+ const add = (x, y) => x + y;
+ const inc = add.bind(null, 1);
+
+ function foo(inc) { return inc(1); }
+
+ assertEquals(2, foo(inc));
+ assertEquals(2, foo(inc));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(inc));
+})();
+
+(function() {
+ const A = class A {};
+ const B = A.bind();
+
+ function foo() { return new B; }
+
+ assertInstanceof(foo(), A);
+ assertInstanceof(foo(), B);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertInstanceof(foo(), B);
+})();
+
+(function() {
+ const A = class A {
+ constructor(x, y, z) {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+ };
+ const B = A.bind(null, 1, 2);
+
+ function foo(z) { return new B(z); }
+
+ assertEquals(1, foo(3).x);
+ assertEquals(2, foo(3).y);
+ assertEquals(3, foo(3).z);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(3).x);
+ assertEquals(2, foo(3).y);
+ assertEquals(3, foo(3).z);
+})();
+
+(function() {
+ const A = class A {};
+
+ function foo() {
+ const B = A.bind();
+ return new B;
+ }
+
+ assertInstanceof(foo(), A);
+ assertInstanceof(foo(), A);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+})();
+
+(function() {
+ const A = class A {
+ constructor(x, y, z) {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+ };
+
+ function foo(z) {
+ const B = A.bind(null, 1, 2);
+ return new B(z);
+ }
+
+ assertEquals(1, foo(3).x);
+ assertEquals(2, foo(3).y);
+ assertEquals(3, foo(3).z);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(3).x);
+ assertEquals(2, foo(3).y);
+ assertEquals(3, foo(3).z);
+})();
+
+(function() {
+ const A = class A {};
+ const B = A.bind();
+
+ function foo(B) {
+ return new B;
+ }
+
+ assertInstanceof(foo(B), A);
+ assertInstanceof(foo(B), A);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(B), A);
+})();
+
+(function() {
+ const A = class A {
+ constructor(x, y, z) {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+ };
+ const B = A.bind(null, 1, 2);
+
+ function foo(B, z) {
+ return new B(z);
+ }
+
+ assertEquals(1, foo(B, 3).x);
+ assertEquals(2, foo(B, 3).y);
+ assertEquals(3, foo(B, 3).z);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(B, 3).x);
+ assertEquals(2, foo(B, 3).y);
+ assertEquals(3, foo(B, 3).z);
+})();
+
+(function() {
+ const A = class A {
+ constructor(value) {
+ this.value = value;
+ }
+ };
+ const C = class C extends A {
+ constructor() { super(1); }
+ };
+ const B = C.__proto__ = A.bind(null, 1);
+
+ assertInstanceof(new C(), A);
+ assertInstanceof(new C(), B);
+ assertInstanceof(new C(), C);
+ assertEquals(1, new C().value);
+ %OptimizeFunctionOnNextCall(C);
+ assertInstanceof(new C(), A);
+ assertInstanceof(new C(), B);
+ assertInstanceof(new C(), C);
+ assertEquals(1, new C().value);
+})();
+
+(function() {
+ const A = class A {};
+ const B = A.bind();
+
+ function bar(B, ...args) {
+ return new B(...args);
+ }
+ function foo(B) {
+ return bar(B)
+ }
+
+ assertInstanceof(foo(B), A);
+ assertInstanceof(foo(B), A);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(B), A);
+})();
+
+(function() {
+ const A = class A {
+ constructor(x, y, z) {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+ };
+ const B = A.bind(null, 1, 2);
+
+ function bar(B, ...args) {
+ return new B(...args);
+ }
+ function foo(B, z) {
+ return bar(B, z);
+ }
+
+ assertEquals(1, foo(B, 3).x);
+ assertEquals(2, foo(B, 3).y);
+ assertEquals(3, foo(B, 3).z);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(B, 3).x);
+ assertEquals(2, foo(B, 3).y);
+ assertEquals(3, foo(B, 3).z);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/instanceof.js b/deps/v8/test/mjsunit/compiler/instanceof.js
index f6a364e607..67127cf88e 100644
--- a/deps/v8/test/mjsunit/compiler/instanceof.js
+++ b/deps/v8/test/mjsunit/compiler/instanceof.js
@@ -143,3 +143,21 @@ F.__proto__ = null;
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo());
})();
+
+(function() {
+ class B extends A {};
+
+ function makeFoo() {
+ return function foo(b) {
+ return b instanceof B;
+ }
+ }
+ makeFoo();
+ const foo = makeFoo();
+
+ assertTrue(foo(new B));
+ assertFalse(foo(new A));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(new B));
+ assertFalse(foo(new A));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/math-ceil.js b/deps/v8/test/mjsunit/compiler/math-ceil.js
new file mode 100644
index 0000000000..f91348b4d2
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/math-ceil.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Ensure that the typing rule for Math.ceil deals correctly with
+// inputs in the range (-1.0,0.0), which are mapped to -0.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type PlainNumber \/ NaN.
+ x = +x;
+ x = Math.abs(x) - 1.0;
+ return Object.is(-0, Math.ceil(x));
+ }
+
+ assertFalse(foo(1.5));
+ assertTrue(foo(0.5));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1.5));
+ assertTrue(foo(0.5));
+})();
+
+// Ensure that the typing rule for Math.ceil deals correctly with
+// NaN inputs, which are mapped to NaN.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type PlainNumber \/ NaN.
+ x = +x;
+ x = Math.abs(x) - 1.0;
+ return Object.is(NaN, Math.ceil(x));
+ }
+
+ assertFalse(foo(1.5));
+ assertTrue(foo(NaN));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1.5));
+ assertTrue(foo(NaN));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/math-round.js b/deps/v8/test/mjsunit/compiler/math-round.js
new file mode 100644
index 0000000000..c42bf8f2a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/math-round.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Ensure that the typing rule for Math.round deals correctly with
+// inputs in the range [-0.5,0.0), which are mapped to -0.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type PlainNumber \/ NaN.
+ x = +x;
+ x = Math.abs(x) - 1.0;
+ return Object.is(-0, Math.round(x));
+ }
+
+ assertFalse(foo(1.5));
+ assertTrue(foo(0.5));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1.5));
+ assertTrue(foo(0.5));
+})();
+
+// Ensure that the typing rule for Math.round deals correctly with
+// NaN inputs, which are mapped to NaN.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type PlainNumber \/ NaN.
+ x = +x;
+ x = Math.abs(x) - 1.0;
+ return Object.is(NaN, Math.round(x));
+ }
+
+ assertFalse(foo(1.5));
+ assertTrue(foo(NaN));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1.5));
+ assertTrue(foo(NaN));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/math-trunc.js b/deps/v8/test/mjsunit/compiler/math-trunc.js
new file mode 100644
index 0000000000..e5cc523bc0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/math-trunc.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Ensure that the typing rule for Math.trunc deals correctly with
+// inputs in the range (-1.0,0.0), which are mapped to -0.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type PlainNumber \/ NaN.
+ x = +x;
+ x = Math.abs(x) - 1.0;
+ return Object.is(-0, Math.trunc(x));
+ }
+
+ assertFalse(foo(1.5));
+ assertTrue(foo(0.5));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1.5));
+ assertTrue(foo(0.5));
+})();
+
+// Ensure that the typing rule for Math.trunc deals correctly with
+// NaN inputs, which are mapped to NaN.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type PlainNumber \/ NaN.
+ x = +x;
+ x = Math.abs(x) - 1.0;
+ return Object.is(NaN, Math.trunc(x));
+ }
+
+ assertFalse(foo(1.5));
+ assertTrue(foo(NaN));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1.5));
+ assertTrue(foo(NaN));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/nary-binary-ops.js b/deps/v8/test/mjsunit/compiler/nary-binary-ops.js
new file mode 100644
index 0000000000..b200b669aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/nary-binary-ops.js
@@ -0,0 +1,150 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test that n-ary chains of binary ops give an equal result to individual
+// binary op calls. Also test binop chains inside an if condition return
+// the same branch.
+
+// Generate a function of the form
+//
+// function(init,a0,...,aN) {
+// return init + a0 + ... + aN;
+// }
+//
+// where + can be any binary operation.
+function generate_chained_op(op, num_ops) {
+ let str = "(function(init";
+ for (let i = 0; i < num_ops; i++) {
+ str += ",a"+i;
+ }
+ str += "){return (init";
+ for (let i = 0; i < num_ops; i++) {
+ str += op+"a"+i;
+ }
+ str += ");})";
+ return eval(str);
+}
+
+// Generate a function of the form
+//
+// function(init,a0,...,aN) {
+// var tmp = init;
+// tmp = tmp + a0;
+// ...
+// tmp = tmp + aN;
+// return tmp;
+// }
+//
+// where + can be any binary operation.
+function generate_nonchained_op(op, num_ops) {
+ let str = "(function(init";
+ for (let i = 0; i < num_ops; i++) {
+ str += ",a"+i;
+ }
+ str += "){ var tmp=init; ";
+ for (let i = 0; i < num_ops; i++) {
+ str += "tmp=(tmp"+op+"a"+i+");";
+ }
+ str += "return tmp;})";
+ return eval(str);
+}
+
+// Generate a function of the form
+//
+// function(init,a0,...,aN) {
+// if(init + a0 + ... + aN) return 1;
+// else return 0;
+// }
+//
+// where + can be any binary operation.
+function generate_chained_op_test(op, num_ops) {
+ let str = "(function(init";
+ for (let i = 0; i < num_ops; i++) {
+ str += ",a"+i;
+ }
+ str += "){ if(init";
+ for (let i = 0; i < num_ops; i++) {
+ str += op+"a"+i;
+ }
+ str += ")return 1;else return 0;})";
+ return eval(str);
+}
+
+// Generate a function of the form
+//
+// function(init,a0,...,aN) {
+// var tmp = init;
+// tmp = tmp + a0;
+// ...
+// tmp = tmp + aN;
+// if(tmp) return 1
+// else return 0;
+// }
+//
+// where + can be any binary operation.
+function generate_nonchained_op_test(op, num_ops) {
+ let str = "(function(init";
+ for (let i = 0; i < num_ops; i++) {
+ str += ",a"+i;
+ }
+ str += "){ var tmp=init; ";
+ for (let i = 0; i < num_ops; i++) {
+ str += "tmp=(tmp"+op+"a"+i+");";
+ }
+ str += "if(tmp)return 1;else return 0;})";
+ return eval(str);
+}
+
+const BINOPS = [
+ ",",
+ "||",
+ "&&",
+ "|",
+ "^",
+ "&",
+ "<<",
+ ">>",
+ ">>>",
+ "+",
+ "-",
+ "*",
+ "/",
+ "%",
+];
+
+// Test each binop to see if the chained version is equivalent to the non-
+// chained one.
+for (let op of BINOPS) {
+ let chained = generate_chained_op(op, 4);
+ let nonchained = generate_nonchained_op(op, 4);
+ let chained_test = generate_chained_op_test(op, 4);
+ let nonchained_test = generate_nonchained_op_test(op, 4);
+
+ // With numbers.
+ assertEquals(
+ nonchained(1,2,3,4,5),
+ chained(1,2,3,4,5),
+ "numeric " + op);
+
+ // With numbers and strings.
+ assertEquals(
+ nonchained(1,"2",3,"4",5),
+ chained(1,"2",3,"4",5),
+ "numeric and string " + op);
+
+ // Iterate over all possible combinations of 5 numbers that evaluate
+ // to boolean true or false (for testing logical ops).
+ for (var i = 0; i < 32; i++) {
+ var booleanArray = [i & 1, i & 2, i & 4, i & 8, i & 16];
+ assertEquals(
+ nonchained.apply(this, booleanArray),
+ chained.apply(this, booleanArray),
+ booleanArray.join(" " + op + " "));
+
+ assertEquals(
+ nonchained_test.apply(this, booleanArray),
+ chained_test.apply(this, booleanArray),
+ "if (" + booleanArray.join(" " + op + " ") + ")");
+ }
+}
diff --git a/deps/v8/test/mjsunit/compiler/object-is.js b/deps/v8/test/mjsunit/compiler/object-is.js
index 9537d78e3b..f89b73e9d8 100644
--- a/deps/v8/test/mjsunit/compiler/object-is.js
+++ b/deps/v8/test/mjsunit/compiler/object-is.js
@@ -126,6 +126,15 @@
})();
(function() {
+ function foo(o) { return Object.is(String(o), "foo"); }
+ assertFalse(foo("bar"));
+ assertTrue(foo("foo"));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo("bar"));
+ assertTrue(foo("foo"));
+})();
+
+(function() {
function foo(o) { return Object.is(o, o); }
assertTrue(foo(-0));
assertTrue(foo(0));
@@ -141,3 +150,25 @@
assertTrue(foo([]));
assertTrue(foo({}));
})();
+
+(function() {
+ function foo(o) { return Object.is(o|0, 0); }
+ assertTrue(foo(0));
+ assertTrue(foo(-0));
+ assertTrue(foo(NaN));
+ assertFalse(foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(0));
+ assertTrue(foo(-0));
+ assertTrue(foo(NaN));
+ assertFalse(foo(1));
+})();
+
+(function() {
+ const s = Symbol();
+ function foo() { return Object.is(s, Symbol()); }
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/osr-arguments.js b/deps/v8/test/mjsunit/compiler/osr-arguments.js
new file mode 100644
index 0000000000..14a769fc44
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-arguments.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f1() {
+ var sum = 0;
+ for (var i = 0; i < 1000; i++) {
+ sum += arguments[0] + arguments[1] + arguments[2] + arguments[3];
+ if (i == 18) %OptimizeOsr();
+ }
+ return sum;
+}
+
+let result = f1(1, 1, 2, 3);
+assertEquals(7000, result);
diff --git a/deps/v8/test/mjsunit/compiler/reflect-get.js b/deps/v8/test/mjsunit/compiler/reflect-get.js
new file mode 100644
index 0000000000..0c329e497e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/reflect-get.js
@@ -0,0 +1,68 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test Reflect.get with wrong (number of) arguments.
+(function() {
+ "use strict";
+ function foo() { return Reflect.get(); }
+
+ assertThrows(foo);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+})();
+(function() {
+ "use strict";
+ function foo(o) { return Reflect.get(o); }
+
+ assertEquals(undefined, foo({}));
+ assertEquals(undefined, foo({}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo({}));
+})();
+(function() {
+ "use strict";
+ function foo(o) { return Reflect.get(o); }
+
+ assertThrows(foo.bind(undefined, 1));
+ assertThrows(foo.bind(undefined, undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo.bind(undefined, 'o'));
+})();
+
+// Test Reflect.get within try/catch.
+(function() {
+ const o = {x: 10};
+ "use strict";
+ function foo() {
+ try {
+ return Reflect.get(o, "x");
+ } catch (e) {
+ return 1;
+ }
+ }
+
+ assertEquals(10, foo());
+ assertEquals(10, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(10, foo());
+})();
+(function() {
+ "use strict";
+ const o = {};
+ function foo(n) {
+ try {
+ return Reflect.get(o, n);
+ } catch (e) {
+ return 1;
+ }
+ }
+
+ assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
+ assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/reflect-has.js b/deps/v8/test/mjsunit/compiler/reflect-has.js
new file mode 100644
index 0000000000..2f9ee1b66a
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/reflect-has.js
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test Reflect.has with wrong (number of) arguments.
+(function() {
+ "use strict";
+ function foo() { return Reflect.has(); }
+
+ assertThrows(foo);
+ assertThrows(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo);
+})();
+(function() {
+ "use strict";
+ function foo(o) { return Reflect.has(o); }
+
+ assertFalse(foo({}));
+ assertFalse(foo({}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo({}));
+})();
+(function() {
+ "use strict";
+ function foo(o) { return Reflect.has(o); }
+
+ assertThrows(foo.bind(undefined, 1));
+ assertThrows(foo.bind(undefined, undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo.bind(undefined, 'o'));
+})();
+
+// Test Reflect.has within try/catch.
+(function() {
+ "use strict";
+ function foo() {
+ try {
+ return Reflect.has();
+ } catch (e) {
+ return 1;
+ }
+ }
+
+ assertEquals(1, foo());
+ assertEquals(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo());
+})();
+(function() {
+ "use strict";
+ const o = {};
+ function foo(n) {
+ try {
+ return Reflect.has(o, n);
+ } catch (e) {
+ return 1;
+ }
+ }
+
+ assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
+ assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-7121.js b/deps/v8/test/mjsunit/compiler/regress-7121.js
new file mode 100644
index 0000000000..98c1a1ac19
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-7121.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-bigint
+
+function foo() { %_ToLength(42n) }
+assertThrows(foo, TypeError);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-772420.js b/deps/v8/test/mjsunit/compiler/regress-772420.js
new file mode 100644
index 0000000000..4b58b10909
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-772420.js
@@ -0,0 +1,28 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(arg) {
+ var value;
+ // None of the branches of this switch are ever taken, but
+ // the sequence means value could be the hole.
+ switch (arg) {
+ case 1:
+ let let_var = 1;
+ case 2:
+ value = let_var;
+ }
+ // Speculative number binop with NumberOrOddball feedback.
+ // Shouldn't be optimized to pure operator since value's phi
+ // could theoretically be the hole (we would have already thrown a
+ // reference error in case 2 above if so, but TF typing still
+ // thinks it could be the hole).
+ return value * undefined;
+}
+
+foo(3);
+foo(3);
+%OptimizeFunctionOnNextCall(foo);
+foo(3);
diff --git a/deps/v8/test/mjsunit/compiler/regress-772872.js b/deps/v8/test/mjsunit/compiler/regress-772872.js
new file mode 100644
index 0000000000..345ace82bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-772872.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ for (var x = 10; x > 5; x -= 16) {}
+}
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-773954.js b/deps/v8/test/mjsunit/compiler/regress-773954.js
new file mode 100644
index 0000000000..b78a499d37
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-773954.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+'use strict';
+var a = { x:0 };
+var b = {};
+Object.defineProperty(b, "x", {get: function () {}});
+
+function f(o) {
+ return 5 + o.x++;
+}
+
+try {
+ f(a);
+ f(b);
+} catch (e) {}
+%OptimizeFunctionOnNextCall(f);
+f(a);
diff --git a/deps/v8/test/mjsunit/compiler/regress-788539.js b/deps/v8/test/mjsunit/compiler/regress-788539.js
new file mode 100644
index 0000000000..889090cdd9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-788539.js
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-verify
+
+// This test creates a FrameState node with a DeadValue parent framestate.
+// Ensure that deadness is propagated along FrameState edges.
+
+function f1() {
+ return this;
+}
+
+function f2(x, value, type) {
+ x instanceof type
+}
+
+function f3(a) {
+ a.x = 0;
+ if (a.x === 0) {
+ a[1] = 0.1;
+ }
+ class B {
+ }
+ class C extends B {
+ bar() {
+ return super.foo()
+ }
+ }
+ B.prototype.foo = f1;
+ f2(new C().bar.call(), Object(), String);
+}
+
+f3(new Array(1));
+f3(new Array(1));
+%OptimizeFunctionOnNextCall(f3);
+f3(new Array(1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-791245.js b/deps/v8/test/mjsunit/compiler/regress-791245.js
new file mode 100644
index 0000000000..9018fb7526
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-791245.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+var a, b; // Global variables that will end up with number map.
+
+for (var i = 0; i < 100000; i++) {
+ b = 1;
+ a = i + -0; // -0 is a number, so this will make "a" a heap object.
+ b = a;
+}
+
+assertTrue(a === b);
+gc();
+assertTrue(a === b);
diff --git a/deps/v8/test/mjsunit/compiler/regress-799263.js b/deps/v8/test/mjsunit/compiler/regress-799263.js
new file mode 100644
index 0000000000..b6b1165329
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-799263.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt(a, b) {
+ b[0] = 0;
+
+ a.length;
+
+ // TransitionElementsKind
+ for (let i = 0; i < 1; i++)
+ a[0] = 0;
+
+ b[0] = 9.431092e-317;
+}
+
+let arr1 = new Array(1);
+arr1[0] = 'a';
+opt(arr1, [0]);
+
+let arr2 = [0.1];
+opt(arr2, arr2);
+
+%OptimizeFunctionOnNextCall(opt);
+
+opt(arr2, arr2);
+assertEquals(9.431092e-317, arr2[0]);
diff --git a/deps/v8/test/mjsunit/compiler/string-slice.js b/deps/v8/test/mjsunit/compiler/string-slice.js
new file mode 100644
index 0000000000..6c3274753e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/string-slice.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(s) { return s.slice(-1); }
+
+ assertEquals('', foo(''));
+ assertEquals('a', foo('a'));
+ assertEquals('b', foo('ab'));
+ assertEquals('c', foo('abc'));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals('', foo(''));
+ assertEquals('a', foo('a'));
+ assertEquals('b', foo('ab'));
+ assertEquals('c', foo('abc'));
+})();
+
+(function() {
+ function foo(s) { return s.slice(-1, undefined); }
+
+ assertEquals('', foo(''));
+ assertEquals('a', foo('a'));
+ assertEquals('b', foo('ab'));
+ assertEquals('c', foo('abc'));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals('', foo(''));
+ assertEquals('a', foo('a'));
+ assertEquals('b', foo('ab'));
+ assertEquals('c', foo('abc'));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js b/deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js
new file mode 100644
index 0000000000..459e2b4202
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js
@@ -0,0 +1,51 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function(){
+ function f(x){
+ return 1/(x+x);
+ }
+
+ function forgetAboutMinus0(i) {
+ var x = 0;
+ var y;
+ for(; i > 0; --i) {
+ y = f(x);
+ x = -0;
+ }
+ return y;
+ }
+
+ forgetAboutMinus0(1);
+ assertEquals(Infinity, forgetAboutMinus0(1));
+ %OptimizeFunctionOnNextCall(forgetAboutMinus0);
+ assertEquals(Infinity, forgetAboutMinus0(1));
+ assertEquals(-Infinity, forgetAboutMinus0(2));
+})();
+
+(function(){
+ function f(x){
+ return x+x;
+ }
+
+ function NumberAdd(x,y) {
+ return x + y;
+ }
+ NumberAdd(1,0.5);
+ NumberAdd(0.5, 1);
+ NumberAdd(NaN, Infinity);
+
+ function forgetAboutNaN(b) {
+ var x = b ? NaN : 1;
+ return NumberAdd(f(x), 0);
+ }
+
+ forgetAboutNaN(false);
+ assertEquals(2, forgetAboutNaN(false));
+ %OptimizeFunctionOnNextCall(forgetAboutNaN);
+ assertEquals(2, forgetAboutNaN(false));
+ assertEquals(NaN, forgetAboutNaN(true));
+})();
diff --git a/deps/v8/test/mjsunit/console.js b/deps/v8/test/mjsunit/console.js
new file mode 100644
index 0000000000..f78afc69dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/console.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+console.assert(true);
+console.assert("yes");
+assertThrows(() => console.assert(false), Error);
+assertThrows(() => console.assert(""), Error);
+assertThrows(() => console.assert(0), Error);
+
+let args = ["", {}, [], this, Array, 1, 1.4, true, false];
+
+console.log(...args);
+console.error(...args);
+console.warn(...args);
+console.info(...args);
+console.debug(...args);
+
+console.time();
+console.timeEnd();
+
+console.time("a");
+console.timeEnd("a");
+
+console.timeStamp();
+args.forEach(each => console.timeStamp(each));
+
+console.trace();
diff --git a/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js b/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
index b5a2ac995e..4aa816f6cd 100644
--- a/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
+++ b/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
@@ -9,6 +9,52 @@ function ID(x) {
return x;
}
+function assertMethodDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertEquals('function', typeof descr.value);
+ assertFalse('prototype' in descr.value);
+ assertEquals("" + name, descr.value.name);
+}
+
+
+function assertGetterDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertEquals('function', typeof descr.get);
+ assertFalse('prototype' in descr.get);
+ assertEquals(undefined, descr.set);
+ assertEquals("get " + name, descr.get.name);
+}
+
+
+function assertSetterDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertEquals(undefined, descr.get);
+ assertEquals('function', typeof descr.set);
+ assertFalse('prototype' in descr.set);
+ assertEquals("set " + name, descr.set.name);
+}
+
+
+function assertAccessorDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertEquals('function', typeof descr.get);
+ assertEquals('function', typeof descr.set);
+ assertFalse('prototype' in descr.get);
+ assertFalse('prototype' in descr.set);
+ assertEquals("get " + name, descr.get.name);
+ assertEquals("set " + name, descr.set.name);
+}
+
+
(function TestComputedMethodSuper() {
class Base {
@@ -21,14 +67,28 @@ function ID(x) {
[ID('b')]() { return 'b' + super.m(); }
[0]() { return '0' + super.m(); }
[ID(1)]() { return '1' + super.m(); }
+ [ID(2147483649)]() { return '2147483649' + super.m(); }
+ [ID(4294967294)]() { return '4294967294' + super.m(); }
+ [ID(4294967295)]() { return '4294967295' + super.m(); }
}
assertSame(Derived.prototype, Derived.prototype.a[%HomeObjectSymbol()]);
+ assertMethodDescriptor(Derived.prototype, "a");
+ assertMethodDescriptor(Derived.prototype, "b");
+ assertMethodDescriptor(Derived.prototype, 0);
+ assertMethodDescriptor(Derived.prototype, 1);
+ assertMethodDescriptor(Derived.prototype, 2147483649);
+ assertMethodDescriptor(Derived.prototype, 4294967294);
+ assertMethodDescriptor(Derived.prototype, 4294967295);
+
assertEquals('a base m', new Derived().a());
assertEquals('b base m', new Derived().b());
assertEquals('0 base m', new Derived()[0]());
assertEquals('1 base m', new Derived()[1]());
+ assertEquals('2147483649 base m', new Derived()[2147483649]());
+ assertEquals('4294967294 base m', new Derived()[4294967294]());
+ assertEquals('4294967295 base m', new Derived()[4294967295]());
})();
@@ -43,11 +103,26 @@ function ID(x) {
get [ID('b')]() { return 'b' + super.m(); }
get [0]() { return '0' + super.m(); }
get [ID(1)]() { return '1' + super.m(); }
+ get [ID(2147483649)]() { return '2147483649' + super.m(); }
+ get [ID(4294967294)]() { return '4294967294' + super.m(); }
+ get [ID(4294967295)]() { return '4294967295' + super.m(); }
}
+
+ assertGetterDescriptor(Derived.prototype, "a");
+ assertGetterDescriptor(Derived.prototype, "b");
+ assertGetterDescriptor(Derived.prototype, 0);
+ assertGetterDescriptor(Derived.prototype, 1);
+ assertGetterDescriptor(Derived.prototype, 2147483649);
+ assertGetterDescriptor(Derived.prototype, 4294967294);
+ assertGetterDescriptor(Derived.prototype, 4294967295);
+
assertEquals('a base m', new Derived().a);
assertEquals('b base m', new Derived().b);
assertEquals('0 base m', new Derived()[0]);
assertEquals('1 base m', new Derived()[1]);
+ assertEquals('2147483649 base m', new Derived()[2147483649]);
+ assertEquals('4294967294 base m', new Derived()[4294967294]);
+ assertEquals('4294967295 base m', new Derived()[4294967295]);
})();
@@ -63,7 +138,18 @@ function ID(x) {
set [ID('b')](v) { super.m('b', v); }
set [0](v) { super.m('0', v); }
set [ID(1)](v) { super.m('1', v); }
+ set [ID(2147483649)](v) { super.m('2147483649', v); }
+ set [ID(4294967294)](v) { super.m('4294967294', v); }
+ set [ID(4294967295)](v) { super.m('4294967295', v); }
}
+ assertSetterDescriptor(Derived.prototype, "a");
+ assertSetterDescriptor(Derived.prototype, "b");
+ assertSetterDescriptor(Derived.prototype, 0);
+ assertSetterDescriptor(Derived.prototype, 1);
+ assertSetterDescriptor(Derived.prototype, 2147483649);
+ assertSetterDescriptor(Derived.prototype, 4294967294);
+ assertSetterDescriptor(Derived.prototype, 4294967295);
+
new Derived().a = 2;
assertEquals('a 2', value);
new Derived().b = 3;
@@ -72,4 +158,10 @@ function ID(x) {
assertEquals('0 4', value);
new Derived()[1] = 5;
assertEquals('1 5', value);
+ new Derived()[2147483649] = 6;
+ assertEquals('2147483649 6', value);
+ new Derived()[4294967294] = 7;
+ assertEquals('4294967294 7', value);
+ new Derived()[4294967295] = 8;
+ assertEquals('4294967295 8', value);
})();
diff --git a/deps/v8/test/mjsunit/es6/classes.js b/deps/v8/test/mjsunit/es6/classes.js
index f8a1499aef..a123dadc52 100644
--- a/deps/v8/test/mjsunit/es6/classes.js
+++ b/deps/v8/test/mjsunit/es6/classes.js
@@ -195,6 +195,7 @@ function assertMethodDescriptor(object, name) {
assertTrue(descr.writable);
assertEquals('function', typeof descr.value);
assertFalse('prototype' in descr.value);
+ assertEquals(name, descr.value.name);
}
@@ -205,6 +206,7 @@ function assertGetterDescriptor(object, name) {
assertEquals('function', typeof descr.get);
assertFalse('prototype' in descr.get);
assertEquals(undefined, descr.set);
+ assertEquals("get " + name, descr.get.name);
}
@@ -215,6 +217,7 @@ function assertSetterDescriptor(object, name) {
assertEquals(undefined, descr.get);
assertEquals('function', typeof descr.set);
assertFalse('prototype' in descr.set);
+ assertEquals("set " + name, descr.set.name);
}
@@ -226,6 +229,8 @@ function assertAccessorDescriptor(object, name) {
assertEquals('function', typeof descr.set);
assertFalse('prototype' in descr.get);
assertFalse('prototype' in descr.set);
+ assertEquals("get " + name, descr.get.name);
+ assertEquals("set " + name, descr.set.name);
}
@@ -590,15 +595,38 @@ function assertAccessorDescriptor(object, name) {
static 4() { return 4; }
static get 5() { return 5; }
static set 6(_) {}
+
+ 2147483649() { return 2147483649; }
+ get 2147483650() { return 2147483650; }
+ set 2147483651(_) {}
+
+ static 2147483652() { return 2147483652; }
+ static get 2147483653() { return 2147483653; }
+ static set 2147483654(_) {}
+
+ 4294967294() { return 4294967294; }
+ 4294967295() { return 4294967295; }
+ static 4294967294() { return 4294967294; }
+ static 4294967295() { return 4294967295; }
}
assertMethodDescriptor(B.prototype, '1');
assertGetterDescriptor(B.prototype, '2');
assertSetterDescriptor(B.prototype, '3');
+ assertMethodDescriptor(B.prototype, '2147483649');
+ assertGetterDescriptor(B.prototype, '2147483650');
+ assertSetterDescriptor(B.prototype, '2147483651');
+ assertMethodDescriptor(B.prototype, '4294967294');
+ assertMethodDescriptor(B.prototype, '4294967295');
assertMethodDescriptor(B, '4');
assertGetterDescriptor(B, '5');
assertSetterDescriptor(B, '6');
+ assertMethodDescriptor(B, '2147483652');
+ assertGetterDescriptor(B, '2147483653');
+ assertSetterDescriptor(B, '2147483654');
+ assertMethodDescriptor(B, '4294967294');
+ assertMethodDescriptor(B, '4294967295');
class C extends B {
1() { return super[1](); }
@@ -606,12 +634,23 @@ function assertAccessorDescriptor(object, name) {
static 4() { return super[4](); }
static get 5() { return super[5]; }
+
+ 2147483649() { return super[2147483649](); }
+ get 2147483650() { return super[2147483650]; }
+
+ static 2147483652() { return super[2147483652](); }
+ static get 2147483653() { return super[2147483653]; }
+
}
assertEquals(1, new C()[1]());
assertEquals(2, new C()[2]);
+ assertEquals(2147483649, new C()[2147483649]());
+ assertEquals(2147483650, new C()[2147483650]);
assertEquals(4, C[4]());
assertEquals(5, C[5]);
+ assertEquals(2147483652, C[2147483652]());
+ assertEquals(2147483653, C[2147483653]);
})();
@@ -1047,3 +1086,103 @@ function testClassRestrictedProperties(C) {
assertEquals(42, usingYieldInExtends());
})();
+
+
+(function testLargeClassesMethods() {
+ const kLimit = 2000;
+ let evalString = "(function(i) { " +
+ "let clazz = class { " +
+ " constructor(i) { this.value = i; } ";
+ for (let i = 0; i < 2000; i++) {
+ evalString += "property"+i+"() { return "+i+"; }; "
+ }
+ evalString += "};" +
+ " return new clazz(i); })";
+
+ let fn = eval(evalString);
+ assertEquals(fn(1).value, 1);
+ assertEquals(fn(2).value, 2);
+ assertEquals(fn(3).value, 3);
+ %OptimizeFunctionOnNextCall(fn);
+ assertEquals(fn(4).value, 4);
+
+ let instance = fn(1);
+ assertEquals(Object.getOwnPropertyNames(instance).length, 1);
+ assertEquals(Object.getOwnPropertyNames(instance.__proto__).length,
+ kLimit + 1);
+
+ // Call all instance functions.
+ for (let i = 0; i < kLimit; i++) {
+ const key = "property" + i;
+ assertEquals(instance[key](), i);
+ }
+})();
+
+
+(function testLargeClassesStaticMethods() {
+ const kLimit = 2000;
+ let evalString = "(function(i) { " +
+ "let clazz = class { " +
+ " constructor(i) { this.value = i; } ";
+ for (let i = 0; i < kLimit; i++) {
+ evalString += "static property"+i+"() { return "+i+" }; "
+ }
+ evalString += "};" +
+ " return new clazz(i); })";
+
+ let fn = eval(evalString);
+
+ assertEquals(fn(1).value, 1);
+ assertEquals(fn(2).value, 2);
+ assertEquals(fn(3).value, 3);
+ %OptimizeFunctionOnNextCall(fn);
+ assertEquals(fn(4).value, 4);
+
+ let instance = fn(1);
+ assertEquals(Object.getOwnPropertyNames(instance).length, 1);
+ assertEquals(instance.value, 1);
+ instance.value = 10;
+ assertEquals(instance.value, 10);
+
+ // kLimit + nof default properties (length, prototype, name).
+ assertEquals(Object.getOwnPropertyNames(instance.constructor).length,
+ kLimit + 3);
+
+ // Call all static properties.
+ for (let i = 0; i < kLimit; i++) {
+ const key = "property" + i;
+ assertEquals(instance.constructor[key](), i);
+ }
+})();
+
+
+(function testLargeClassesProperties(){
+ const kLimit = 2000;
+ let evalString = "(function(i) { " +
+ "let clazz = class { " +
+ " constructor(i) { this.value = i;";
+ for (let i = 0; i < kLimit ; i++) {
+ evalString += "this.property"+i +" = "+i+"; "
+ }
+ evalString += "}};" +
+ " return (new clazz(i)); })";
+
+ let fn = eval(evalString);
+ assertEquals(fn(1).value, 1);
+ assertEquals(fn(2).value, 2);
+ assertEquals(fn(3).value, 3);
+ %OptimizeFunctionOnNextCall(fn);
+ assertEquals(fn(4).value, 4);
+
+ let instance = fn(1);
+ assertEquals(Object.getOwnPropertyNames(instance).length, kLimit+1);
+
+ // Get and set all properties.
+ for (let i = 0; i < kLimit; i++) {
+ const key = "property" + i;
+ assertEquals(instance[key], i);
+ const value = "value"+i;
+ instance[key] = value;
+ assertEquals(instance[key], value);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es6/completion.js b/deps/v8/test/mjsunit/es6/completion.js
index b9d93f41c0..d88fcd054f 100644
--- a/deps/v8/test/mjsunit/es6/completion.js
+++ b/deps/v8/test/mjsunit/es6/completion.js
@@ -169,3 +169,10 @@ assertUndef(eval(
assertUndef(eval("1; try{2; throwOnReturn();} catch(e){}"));
assertUndef(eval("1; twoFunc();"));
assertEquals(2, eval("1; with ( { a: 0 } ) { 2; }"));
+
+// https://bugs.chromium.org/p/chromium/issues/detail?id=787698
+assertEquals(42, eval("try {42} catch (_) {} finally {}"));
+assertEquals(42, eval("try {42} catch (_) {} finally {43}"));
+assertEquals(42, eval("foo: try {42} catch (_) {} finally {}"));
+assertEquals(42, eval("foo: try {42} catch (_) {} finally {43}"));
+assertEquals(43, eval("foo: try {42} catch (_) {} finally {43; break foo}"));
diff --git a/deps/v8/test/mjsunit/es6/regexp-sticky.js b/deps/v8/test/mjsunit/es6/regexp-sticky.js
index df39763694..46006fb4e0 100644
--- a/deps/v8/test/mjsunit/es6/regexp-sticky.js
+++ b/deps/v8/test/mjsunit/es6/regexp-sticky.js
@@ -43,9 +43,11 @@ assertFalse(!!"..foo*bar".match(sticky));
var stickyplain = /foobar/y;
-assertTrue(!!"foobar".match(stickyplain));
+assertTrue(!!"foobarfoobar".match(stickyplain));
assertEquals(6, stickyplain.lastIndex);
-assertFalse(!!"..foobar".match(stickyplain));
+assertTrue(!!"foobarfoobar".match(stickyplain));
+assertEquals(12, stickyplain.lastIndex);
+assertFalse(!!"..foobarfoobar".match(stickyplain));
var global = /foo.bar/g;
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-generators.js b/deps/v8/test/mjsunit/es6/sloppy-no-duplicate-generators.js
index de2e461f95..a10f3f539f 100644
--- a/deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-generators.js
+++ b/deps/v8/test/mjsunit/es6/sloppy-no-duplicate-generators.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-restrictive-generators
-
// Generators don't get sloppy-mode block-scoped function hoisting
// No hoisting to the global scope
diff --git a/deps/v8/test/mjsunit/es6/string-match.js b/deps/v8/test/mjsunit/es6/string-match.js
index 2c7affe454..6fb6a1d072 100644
--- a/deps/v8/test/mjsunit/es6/string-match.js
+++ b/deps/v8/test/mjsunit/es6/string-match.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var pattern = {};
+const pattern = {};
pattern[Symbol.match] = function(string) {
return string.length;
};
@@ -11,6 +11,9 @@ assertThrows(() => String.prototype.match.call(null, pattern),
TypeError);
// Override is called.
assertEquals(5, "abcde".match(pattern));
+// Receiver is not converted to string if pattern has Symbol.match
+const receiver = { toString(){ throw new Error(); }, length: 6 };
+assertEquals(6, String.prototype.match.call(receiver, pattern));
// Non-callable override.
pattern[Symbol.match] = "dumdidum";
assertThrows(() => "abcde".match(pattern), TypeError);
diff --git a/deps/v8/test/mjsunit/es6/string-search.js b/deps/v8/test/mjsunit/es6/string-search.js
index cbdf33d692..aa7105724b 100644
--- a/deps/v8/test/mjsunit/es6/string-search.js
+++ b/deps/v8/test/mjsunit/es6/string-search.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var pattern = {};
+const pattern = {};
pattern[Symbol.search] = function(string) {
return string.length;
};
@@ -11,6 +11,9 @@ assertThrows(() => String.prototype.search.call(null, pattern),
TypeError);
// Override is called.
assertEquals(5, "abcde".search(pattern));
+// Receiver is not converted to string if pattern has Symbol.match
+const receiver = { toString(){ throw new Error(); }, length: 6 };
+assertEquals(6, String.prototype.search.call(receiver, pattern));
// Non-callable override.
pattern[Symbol.search] = "dumdidum";
assertThrows(() => "abcde".search(pattern), TypeError);
diff --git a/deps/v8/test/mjsunit/es6/templates.js b/deps/v8/test/mjsunit/es6/templates.js
index a5157b85f9..3eb73e4d16 100644
--- a/deps/v8/test/mjsunit/es6/templates.js
+++ b/deps/v8/test/mjsunit/es6/templates.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-harmony-template-escapes
-
var num = 5;
var str = "str";
function fn() { return "result"; }
@@ -481,8 +479,9 @@ var obj = {
for (var i = 0; i < 10; i++) {
var code = "`\\0" + i + "`";
assertThrows(code, SyntaxError);
+ // Not an error if tagged.
code = "(function(){})" + code;
- assertThrows(code, SyntaxError);
+ assertDoesNotThrow(code, SyntaxError);
}
assertEquals('\\0', String.raw`\0`);
@@ -495,8 +494,9 @@ var obj = {
for (var i = 1; i < 8; i++) {
var code = "`\\" + i + "`";
assertThrows(code, SyntaxError);
+ // Not an error if tagged.
code = "(function(){})" + code;
- assertThrows(code, SyntaxError);
+ assertDoesNotThrow(code, SyntaxError);
}
})();
@@ -716,3 +716,22 @@ var global = this;
assertEquals(["a", "b"], result);
assertSame(result, f());
})();
+
+(function testTaggedTemplateInvalidAssignmentTargetStrict() {
+ "use strict";
+ function f() {}
+ assertThrows(() => Function("++f`foo`"), ReferenceError);
+ assertThrows(() => Function("f`foo`++"), ReferenceError);
+ assertThrows(() => Function("--f`foo`"), ReferenceError);
+ assertThrows(() => Function("f`foo`--"), ReferenceError);
+ assertThrows(() => Function("f`foo` = 1"), ReferenceError);
+})();
+
+(function testTaggedTemplateInvalidAssignmentTargetSloppy() {
+ function f() {}
+ assertThrows(() => Function("++f`foo`"), ReferenceError);
+ assertThrows(() => Function("f`foo`++"), ReferenceError);
+ assertThrows(() => Function("--f`foo`"), ReferenceError);
+ assertThrows(() => Function("f`foo`--"), ReferenceError);
+ assertThrows(() => Function("f`foo` = 1"), ReferenceError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index 5f4d3f0747..93d92097cd 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -626,6 +626,17 @@ function TestTypedArraySet() {
assertThrows(() => a111.set(evilarr), TypeError);
assertEquals(true, detached);
+ // Check if the target is a typed array before converting offset to integer
+ var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is neutered.");
+ return 1;
+ }
+ };
+ assertThrows(() => Int8Array.prototype.set.call(1, tmp), TypeError);
+ assertThrows(() => Int8Array.prototype.set.call([], tmp), TypeError);
+
// Detached array buffer when converting offset.
{
for (const klass of typedArrayConstructors) {
diff --git a/deps/v8/test/mjsunit/es6/unicode-character-ranges.js b/deps/v8/test/mjsunit/es6/unicode-character-ranges.js
index f39004fe97..770d5a5363 100644
--- a/deps/v8/test/mjsunit/es6/unicode-character-ranges.js
+++ b/deps/v8/test/mjsunit/es6/unicode-character-ranges.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-lookbehind
-
function execl(expectation, regexp, subject) {
if (regexp instanceof String) regexp = new RegExp(regexp, "u");
assertEquals(expectation, regexp.exec(subject));
diff --git a/deps/v8/test/mjsunit/es6/unicode-regexp-backrefs.js b/deps/v8/test/mjsunit/es6/unicode-regexp-backrefs.js
index 56b9c5eb8c..0a0b951bd9 100644
--- a/deps/v8/test/mjsunit/es6/unicode-regexp-backrefs.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-backrefs.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-lookbehind
-
// Back reference does not end in the middle of a surrogate pair.
function replace(string) {
return string.replace(/L/g, "\ud800")
diff --git a/deps/v8/test/mjsunit/es6/unicode-regexp-last-index.js b/deps/v8/test/mjsunit/es6/unicode-regexp-last-index.js
index 67fbac7ef3..714d3d1e54 100644
--- a/deps/v8/test/mjsunit/es6/unicode-regexp-last-index.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-last-index.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-lookbehind
-
var r = /./ug;
assertEquals(["\ud800\udc00"], r.exec("\ud800\udc00\ud801\udc01"));
assertEquals(2, r.lastIndex);
diff --git a/deps/v8/test/mjsunit/harmony/object-rest-basic.js b/deps/v8/test/mjsunit/es9/object-rest-basic.js
index e539ccbb3b..caaee6f40c 100644
--- a/deps/v8/test/mjsunit/harmony/object-rest-basic.js
+++ b/deps/v8/test/mjsunit/es9/object-rest-basic.js
@@ -2,7 +2,6 @@
// // Use of this source code is governed by a BSD-style license that can be
// // found in the LICENSE file.
-// Flags: --harmony-object-rest-spread
var { ...x } = { a: 1 };
assertEquals({ a: 1 }, x);
diff --git a/deps/v8/test/mjsunit/harmony/object-spread-basic.js b/deps/v8/test/mjsunit/es9/object-spread-basic.js
index 8724b1af28..8264da47a5 100644
--- a/deps/v8/test/mjsunit/harmony/object-spread-basic.js
+++ b/deps/v8/test/mjsunit/es9/object-spread-basic.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-rest-spread
-
var x = {a: 1};
var y = { ...x};
assertEquals(x, y);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-lookbehind.js b/deps/v8/test/mjsunit/es9/regexp-lookbehind.js
index 5148068c3f..54c975cfdf 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-lookbehind.js
+++ b/deps/v8/test/mjsunit/es9/regexp-lookbehind.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-lookbehind
-
// Simple fixed-length matches.
assertEquals(["a"], "a".match(/^.(?<=a)/));
assertNull("b".match(/^.(?<=a)/));
diff --git a/deps/v8/test/mjsunit/harmony/template-escapes.js b/deps/v8/test/mjsunit/es9/template-escapes.js
index ea019851ff..d7af8bacc1 100644
--- a/deps/v8/test/mjsunit/harmony/template-escapes.js
+++ b/deps/v8/test/mjsunit/es9/template-escapes.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-template-escapes
-
function check({cooked, raw, exprs}) {
return function(strs, ...args) {
assertArrayEquals(cooked, strs);
diff --git a/deps/v8/test/mjsunit/filter-element-kinds.js b/deps/v8/test/mjsunit/filter-element-kinds.js
new file mode 100644
index 0000000000..7853a33b9c
--- /dev/null
+++ b/deps/v8/test/mjsunit/filter-element-kinds.js
@@ -0,0 +1,144 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc --opt --no-always-opt
+
+var elements_kind = {
+ fast_smi_only : 'fast smi only elements',
+ fast : 'fast elements',
+ fast_double : 'fast double elements',
+ dictionary : 'dictionary elements',
+ external_byte : 'external byte elements',
+ external_unsigned_byte : 'external unsigned byte elements',
+ external_short : 'external short elements',
+ external_unsigned_short : 'external unsigned short elements',
+ external_int : 'external int elements',
+ external_unsigned_int : 'external unsigned int elements',
+ external_float : 'external float elements',
+ external_double : 'external double elements',
+ external_pixel : 'external pixel elements'
+}
+
+function getKind(obj) {
+ if (%HasSmiElements(obj)) return elements_kind.fast_smi_only;
+ if (%HasObjectElements(obj)) return elements_kind.fast;
+ if (%HasDoubleElements(obj)) return elements_kind.fast_double;
+ if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
+}
+
+function isHoley(obj) {
+ if (%HasHoleyElements(obj)) return true;
+ return false;
+}
+
+function assertKind(expected, obj, name_opt) {
+ assertEquals(expected, getKind(obj), name_opt);
+}
+
+function assertHoley(obj, name_opt) {
+ assertEquals(true, isHoley(obj), name_opt);
+}
+
+function assertNotHoley(obj, name_opt) {
+ assertEquals(false, isHoley(obj), name_opt);
+}
+
+// Create a new closure that inlines Array.prototype.filter().
+function create(a) {
+ return function() {
+ return a.filter(x => false);
+ }
+}
+
+function runTest(test, kind, holey_predicate) {
+
+ // Verify built-in implementation produces correct results.
+ let a = test();
+ assertKind(kind, a);
+ holey_predicate(a);
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+
+ // Now for optimized code.
+ a = test();
+ assertKind(kind, a);
+ holey_predicate(a);
+}
+
+function chooseHoleyPredicate(a) {
+ return isHoley(a) ? assertHoley : assertNotHoley;
+}
+
+(function() {
+ let data = [];
+
+ // Packed literal arrays.
+ data.push(() => [1, 2, 3]);
+ data.push(() => [true, true, false]);
+ data.push(() => [1.0, 1.5, 3.5]);
+ // Holey literal arrays.
+ data.push(() => { let obj = [1,, 3]; obj[1] = 2; return obj; });
+ data.push(() => { let obj = [true,, false]; obj[1] = true; return obj; });
+ data.push(() => { let obj = [1.0,, 3.5]; obj[1] = 1.5; return obj; });
+ // Packed constructed arrays.
+ data.push(() => new Array(1, 2, 3));
+ data.push(() => new Array(true, true, false));
+ data.push(() => new Array(1.0, 1.5, 3.5));
+
+ // Holey constructed arrays.
+ data.push(() => {
+ let obj = new Array(3);
+ obj[0] = 1;
+ obj[1] = 2;
+ obj[2] = 3;
+ return obj;
+ });
+
+ data.push(() => {
+ let obj = new Array(3);
+ obj[0] = true;
+ obj[1] = true;
+ obj[2] = false;
+ return obj;
+ });
+
+ data.push(() => {
+ let obj = new Array(3);
+ obj[0] = 1.0;
+ obj[1] = 1.5;
+ obj[2] = 3.5;
+ return obj;
+ });
+
+ for (datum of data) {
+ let a = datum();
+ // runTest(create(a), getKind(a), chooseHoleyPredicate(a));
+ let f = function() { return a.filter(x => false); }
+ runTest(f, getKind(a), chooseHoleyPredicate(a));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/function-call.js b/deps/v8/test/mjsunit/function-call.js
index fb91dcd879..711acce0d7 100644
--- a/deps/v8/test/mjsunit/function-call.js
+++ b/deps/v8/test/mjsunit/function-call.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-var should_throw_on_null_and_undefined =
+const should_throw_on_null_and_undefined =
[Object.prototype.toLocaleString,
Object.prototype.valueOf,
Object.prototype.hasOwnProperty,
@@ -39,7 +39,6 @@ var should_throw_on_null_and_undefined =
Array.prototype.reverse,
Array.prototype.shift,
Array.prototype.slice,
- Array.prototype.sort,
Array.prototype.splice,
Array.prototype.unshift,
Array.prototype.indexOf,
@@ -72,7 +71,7 @@ var should_throw_on_null_and_undefined =
// Non generic natives do not work on any input other than the specific
// type, but since this change will allow call to be invoked with undefined
// or null as this we still explicitly test that we throw on these here.
-var non_generic =
+const non_generic =
[Array.prototype.toString,
Array.prototype.toLocaleString,
Function.prototype.toString,
@@ -137,7 +136,7 @@ var non_generic =
// Mapping functions.
-var mapping_functions =
+const mapping_functions =
[Array.prototype.every,
Array.prototype.some,
Array.prototype.forEach,
@@ -145,27 +144,27 @@ var mapping_functions =
Array.prototype.filter];
// Reduce functions.
-var reducing_functions =
+const reducing_functions =
[Array.prototype.reduce,
Array.prototype.reduceRight];
function checkExpectedMessage(e) {
- assertTrue(e.message.indexOf("called on null or undefined") >= 0 ||
- e.message.indexOf("invoked on undefined or null value") >= 0 ||
- e.message.indexOf("Cannot convert undefined or null to object") >= 0);
+ assertTrue(e.message.includes("called on null or undefined") ||
+ e.message.includes("invoked on undefined or null value") ||
+ e.message.includes("Cannot convert undefined or null to object"));
}
// Test that all natives using the ToObject call throw the right exception.
-for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
+for (const fn of should_throw_on_null_and_undefined) {
// Sanity check that all functions are correct
- assertEquals(typeof(should_throw_on_null_and_undefined[i]), "function");
+ assertEquals(typeof fn, "function");
- var exception = false;
+ let exception = false;
try {
// We need to pass a dummy object argument ({}) to these functions because
// of Object.prototype.isPrototypeOf's special behavior, see issue 3483
// for more details.
- should_throw_on_null_and_undefined[i].call(null, {});
+ fn.call(null, {});
} catch (e) {
exception = true;
checkExpectedMessage(e);
@@ -174,7 +173,7 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
exception = false;
try {
- should_throw_on_null_and_undefined[i].call(undefined, {});
+ fn.call(undefined, {});
} catch (e) {
exception = true;
checkExpectedMessage(e);
@@ -183,7 +182,7 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
exception = false;
try {
- should_throw_on_null_and_undefined[i].apply(null, [{}]);
+ fn.apply(null, [{}]);
} catch (e) {
exception = true;
checkExpectedMessage(e);
@@ -192,7 +191,7 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
exception = false;
try {
- should_throw_on_null_and_undefined[i].apply(undefined, [{}]);
+ fn.apply(undefined, [{}]);
} catch (e) {
exception = true;
checkExpectedMessage(e);
@@ -201,13 +200,13 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
}
// Test that all natives that are non generic throw on null and undefined.
-for (var i = 0; i < non_generic.length; i++) {
+for (const fn of non_generic) {
// Sanity check that all functions are correct
- assertEquals(typeof(non_generic[i]), "function");
+ assertEquals(typeof fn, "function");
exception = false;
try {
- non_generic[i].call(null);
+ fn.call(null);
} catch (e) {
exception = true;
assertTrue(e instanceof TypeError);
@@ -216,7 +215,7 @@ for (var i = 0; i < non_generic.length; i++) {
exception = false;
try {
- non_generic[i].call(null);
+ fn.call(null);
} catch (e) {
exception = true;
assertTrue(e instanceof TypeError);
@@ -225,7 +224,7 @@ for (var i = 0; i < non_generic.length; i++) {
exception = false;
try {
- non_generic[i].apply(null);
+ fn.apply(null);
} catch (e) {
exception = true;
assertTrue(e instanceof TypeError);
@@ -234,7 +233,7 @@ for (var i = 0; i < non_generic.length; i++) {
exception = false;
try {
- non_generic[i].apply(null);
+ fn.apply(null);
} catch (e) {
exception = true;
assertTrue(e instanceof TypeError);
@@ -247,14 +246,14 @@ for (var i = 0; i < non_generic.length; i++) {
// through an array mapping function.
// We need to make sure that the elements of `array` are all object values,
// see issue 3483 for more details.
-var array = [{}, [], new Number, new Map, new WeakSet];
-for (var j = 0; j < mapping_functions.length; j++) {
- for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
+const array = [{}, [], new Number, new Map, new WeakSet];
+for (const mapping_function of mapping_functions) {
+ for (const fn of should_throw_on_null_and_undefined) {
exception = false;
try {
- mapping_functions[j].call(array,
- should_throw_on_null_and_undefined[i],
- null);
+ mapping_function.call(array,
+ fn,
+ null);
} catch (e) {
exception = true;
checkExpectedMessage(e);
@@ -263,9 +262,9 @@ for (var j = 0; j < mapping_functions.length; j++) {
exception = false;
try {
- mapping_functions[j].call(array,
- should_throw_on_null_and_undefined[i],
- undefined);
+ mapping_function.call(array,
+ fn,
+ undefined);
} catch (e) {
exception = true;
checkExpectedMessage(e);
@@ -274,13 +273,13 @@ for (var j = 0; j < mapping_functions.length; j++) {
}
}
-for (var j = 0; j < mapping_functions.length; j++) {
- for (var i = 0; i < non_generic.length; i++) {
+for (const mapping_function of mapping_functions) {
+ for (const fn of non_generic) {
exception = false;
try {
- mapping_functions[j].call(array,
- non_generic[i],
- null);
+ mapping_function.call(array,
+ fn,
+ null);
} catch (e) {
exception = true;
assertTrue(e instanceof TypeError);
@@ -289,9 +288,9 @@ for (var j = 0; j < mapping_functions.length; j++) {
exception = false;
try {
- mapping_functions[j].call(array,
- non_generic[i],
- undefined);
+ mapping_function.call(array,
+ fn,
+ undefined);
} catch (e) {
exception = true;
assertTrue(e instanceof TypeError);
@@ -302,11 +301,11 @@ for (var j = 0; j < mapping_functions.length; j++) {
// Reduce functions do a call with null as this argument.
-for (var j = 0; j < reducing_functions.length; j++) {
- for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
+for (const reducing_function of reducing_functions) {
+ for (const fn of should_throw_on_null_and_undefined) {
exception = false;
try {
- reducing_functions[j].call(array, should_throw_on_null_and_undefined[i]);
+ reducing_function.call(array, fn);
} catch (e) {
exception = true;
checkExpectedMessage(e);
@@ -315,7 +314,7 @@ for (var j = 0; j < reducing_functions.length; j++) {
exception = false;
try {
- reducing_functions[j].call(array, should_throw_on_null_and_undefined[i]);
+ reducing_function.call(array, fn);
} catch (e) {
exception = true;
checkExpectedMessage(e);
@@ -324,11 +323,11 @@ for (var j = 0; j < reducing_functions.length; j++) {
}
}
-for (var j = 0; j < reducing_functions.length; j++) {
- for (var i = 0; i < non_generic.length; i++) {
+for (const reducing_function of reducing_functions) {
+ for (const fn of non_generic) {
exception = false;
try {
- reducing_functions[j].call(array, non_generic[i]);
+ reducing_function.call(array, fn);
} catch (e) {
exception = true;
assertTrue(e instanceof TypeError);
@@ -337,7 +336,7 @@ for (var j = 0; j < reducing_functions.length; j++) {
exception = false;
try {
- reducing_functions[j].call(array, non_generic[i]);
+ reducing_function.call(array, fn);
} catch (e) {
exception = true;
assertTrue(e instanceof TypeError);
diff --git a/deps/v8/test/mjsunit/global-accessors.js b/deps/v8/test/mjsunit/global-accessors.js
index 00658f43a5..a375f96ae8 100644
--- a/deps/v8/test/mjsunit/global-accessors.js
+++ b/deps/v8/test/mjsunit/global-accessors.js
@@ -26,16 +26,14 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test accessors on the global object.
-//
-// Flags: --no-harmony-strict-legacy-accessor-builtins
var x_ = 0;
-__defineSetter__('x', function(x) { x_ = x; });
-__defineGetter__('x', function() { return x_; });
+this.__defineSetter__('x', function(x) { x_ = x; });
+this.__defineGetter__('x', function() { return x_; });
-__defineSetter__('y', function(x) { });
-__defineGetter__('y', function() { return 7; });
+this.__defineSetter__('y', function(x) { });
+this.__defineGetter__('y', function() { return 7; });
function f(a) {
x = x + a;
diff --git a/deps/v8/test/mjsunit/harmony/array-sort-comparefn.js b/deps/v8/test/mjsunit/harmony/array-sort-comparefn.js
index 1ae470a351..9858c1c434 100644
--- a/deps/v8/test/mjsunit/harmony/array-sort-comparefn.js
+++ b/deps/v8/test/mjsunit/harmony/array-sort-comparefn.js
@@ -36,3 +36,13 @@ for (const type of types) {
assertThrows(() => { array.sort({}); }, TypeError);
assertThrows(() => { array.sort(Symbol()); }, TypeError);
}
+
+assertThrows(() => { Array.prototype.sort.call(null, 42); }, TypeError);
+try {
+ Array.prototype.sort.call(null, 42);
+} catch (exception) {
+ assertEquals(
+ 'The comparison function must be either a function or undefined',
+ exception.message
+ );
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/add.js b/deps/v8/test/mjsunit/harmony/bigint/add.js
new file mode 100644
index 0000000000..b57846e7f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/add.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "a2102214b151421124f462d37f843",
+ b: "90f3fa0f2fb9b1481b1a4737586ad6bdf71cb2ae51e06fdcb00fb779163e94ae4237",
+ r: "90f3fa0f2fb9b1481b1a4737586ad6bdf71cb2b872e29127c523d88b6584c1e63a7a"
+}, {
+ a: "35ca28bdd383c1b9ffdb851cc7f385ad370eef3d",
+ b: "-ca2d4dd677f23e005f44ec121303c3c304940eb2fd15e9e88772a3c5ba8515",
+ r: "-ca2d4dd677f23e005f44ebdc48db05ef80d254b32190cd2093ecf68eab95d8"
+}, {
+ a: "-8abb4b6ca534b584fad2f5898dd22ae6",
+ b: "0",
+ r: "-8abb4b6ca534b584fad2f5898dd22ae6"
+}, {
+ a: "b3",
+ b: "4180a0a",
+ r: "4180abd"
+}, {
+ a: "-8de89",
+ b: "c329fbab24d762a9453f90b134fcf5da9777aa1fdb26b74f27583a92a43f0f2c450",
+ r: "c329fbab24d762a9453f90b134fcf5da9777aa1fdb26b74f27583a92a43f0e9e5c7"
+}, {
+ a: "-49af5f350d64c75047dfb107550dae478c983dd520e86c9807b1f5",
+ b: "60a62691669b8c323a29db2eb9cb75ba5811",
+ r: "-49af5f350d64c750477f0ae0c3a712bb5a5e13f9f22ea1224d59e4"
+}, {
+ a: "80bf614aaa1140792099375f7fac9c7046303a8d13086755d505795f38761",
+ b: "-949dc945",
+ r: "80bf614aaa1140792099375f7fac9c7046303a8d13086755d50570155be1c"
+}, {
+ a: "4241d736e6a40",
+ b: "-78e88f5eaeae4ff8b",
+ r: "-78e84d1cd7776954b"
+}, {
+ a: "-8033927bf52210827b99e712fb220631503adfaa4e0045c872b9b",
+ b: "-2f",
+ r: "-8033927bf52210827b99e712fb220631503adfaa4e0045c872bca"
+}, {
+ a: "-3ad8b67efe9",
+ b: "-35586bf43788fd8e313da33c62d9a5",
+ r: "-35586bf43788fd8e314150c7cac98e"
+}, {
+ a: "-a43d8c9af54e8ea545e1af4674613932650c833669c7adc9273b77",
+ b: "-6a4",
+ r: "-a43d8c9af54e8ea545e1af4674613932650c833669c7adc927421b"
+}, {
+ a: "26c178e22dd42280a59b",
+ b: "fba77d85ba082981ce4a1ca21ac8b805b389297dc",
+ r: "fba77d85ba082981ce4a1f0e3256dae2f5b133d77"
+}, {
+ a: "-c9bc2ac82920efc63fa48f63fae105ec432672b50269fad72ee8b44a1",
+ b: "8967d49deeff878f40fa1bf408400b8085820d47b",
+ r: "-c9bc2ac82920efc5b63cbac60be17e5d022c56c0fa29ef56a966a7026"
+}, {
+ a: "815a18c9a2d8c6e5f3fffa958430851c4ea3",
+ b: "59d451c6efad276d3cc393907dda0eca463488958f397bb09",
+ r: "59d451c6efad2f82de502dbe0a486e0a45dde0d8978b409ac"
+}, {
+ a: "8cfc360e8d215045cb5c289a50e5c7fa9da34c0b9d9be9597e6e476efdb121",
+ b: "-482747619f0edd06",
+ r: "8cfc360e8d215045cb5c289a50e5c7fa9da34c0b9d9be9115726e5cfeed41b"
+}, {
+ a: "346337dbb9bbfc08cb815434c50315d32d",
+ b: "-ac569f54f5ea8852463c7542e876a9953",
+ r: "299dcde66a5d5383a71d8ce0967bab39da"
+}, {
+ a: "8bb7c3e56",
+ b: "-c334d52ed6eb903256253e01fc0c5118fe9bc",
+ r: "-c334d52ed6eb903256253e01fc0bc5613ab66"
+}, {
+ a: "b1f444a7a95e6d1d293ff0182e3dd5e945234484a5b47516b5b42627ed54fa8cf1221e",
+ b: "-93b77e906778b7e0a85c07e08babe730edd93ed37adef19da9e76de2add3",
+ r: "b1f444a7a8cab59e98d877604d9579e16497989d74c69bd7e23947364fab131f0e744b"
+}, {
+ a: "9a156524b9",
+ b: "-bce28d1561fc0153b836c6e0969d2674fcb960331cdb55df24e34e4b65136fcb59",
+ r: "-bce28d1561fc0153b836c6e0969d2674fcb960331cdb55df24e34e4acafe0aa6a0"
+}, {
+ a: "5eaf418fbccefb4f53abc413c02cee60eb3880b615c615c2005b3d11c8ee4aaf3b4ded8",
+ b: "-eb8aa4a72cf44f06208",
+ r: "5eaf418fbccefb4f53abc413c02cee60eb3880b615c615c2005a518724471dbaec47cd0"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a + b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: +");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/and.js b/deps/v8/test/mjsunit/harmony/bigint/and.js
new file mode 100644
index 0000000000..e5c3b145d6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/and.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "-193b47d0d9a8688b329e80de92195f311825",
+ b: "-2fabed9434bff933e23ea21af0f21a43",
+ r: "-193b6ffbfdbc7cbffbbfe2feb21bfff31a67"
+}, {
+ a: "35979ec99b7ff03f81817ebc9ddd50025d1ccf41565b502f0fc85ec54f630",
+ b: "be96016cc031653c9b1eceb1dd",
+ r: "8c9401648000603c810c44b010"
+}, {
+ a: "-f594f8199c11594681a9c38fd985a03d8c79ce6c8d342809924c89b385af43116ec3a21",
+ b: "-53e7b9738caaecc58fde1b5a4aa9f782f28a04e2bb29d207ccd5d45",
+ r: "-f594f8199c115946d3effbffddafecfd8fffdf7ecfbdff8bf2ce8df3bfafd317eed7f65"
+}, {
+ a: "dd7245d3ca5b360296082e6ca91915179b257f36e45e6e44cf892db875fdcfb19522b3",
+ b: "-dcc83137df3bb234e1144390f6c5bc0772a07f2a4540865554d20ebd37be",
+ r: "dd7245d3ca0336028000044c891801140b013a02e00c4e00c5882d3820a90db1000002"
+}, {
+ a: "-28",
+ b: "eaec4017147fd9741ff3b98f1b6f71d8f3d6869c18b39c6237a6b2d4d2fc3c81e9",
+ r: "eaec4017147fd9741ff3b98f1b6f71d8f3d6869c18b39c6237a6b2d4d2fc3c81c8"
+}, {
+ a: "-223909fc585f36f995d6f72dd9f169df1fad8",
+ b: "b13e919ce59c18c7c0517eecdb2519155cc",
+ r: "80360184a0880042000052240a040000508"
+}, {
+ a: "-fcb4ac9fdc7ee85d03585f944a79b28efffb461e17df2d",
+ b: "13cd27fb49c92d53c567688ab6b9",
+ r: "38023b100492100042160882091"
+}, {
+ a: "-1a16ca8c3725cec0c8a61ce81",
+ b: "-dbf3e",
+ r: "-1a16ca8c3725cec0c8a6dffbe"
+}, {
+ a: "-834db45b67472062091e",
+ b: "5aff66623af6b6cd042a361d5a22aea03152b764a056c71",
+ r: "5aff66623af6b6cd042a361d5a228a2030408304a056460"
+}, {
+ a: "1a8c37cff2e02f5272bc61d60b8301e443c38172446f75d75e01c41f60",
+ b: "e15d12bee18edaca77ad15ff0a567e132bb1b046623858",
+ r: "215012bc61860a8201a401c30052440321911000401840"
+}, {
+ a: "-f463",
+ b: "bb02038e2ff03fa",
+ r: "bb02038e2ff0398"
+}, {
+ a: "3178f92d2eeee1aebc33f085aa96c9046f1133ad6afbd666664ab79625639e001",
+ b: "124d8bd8ea20d8e510ba30d9",
+ r: "20d02406020586010382001"
+}, {
+ a: "fc7aaaa7a52f3604e1e700f01ea6f266912f583bffa78aee08939401056cde",
+ b: "-50e3611d6ada075f432319f10c8192f1de56ead628972",
+ r: "fc7aaaa7a52f3604e0e100e008025202010d4820ef2782c00012900005648e"
+}, {
+ a: "7dea10c67bdf023c00d94643e9f2d38295635b0b2b55a0e40818",
+ b: "8defe4741785c6c2d2ecaf7752a903ed",
+ r: "443e0701380844252082b5500a00008"
+}, {
+ a: "6f837e0ec2d00abb60051299bfd36b58c803f6445f91bb8dded858c6c1c476142",
+ b: "-26746eda5ca5095ab8f315c88b201cfa2affbbb700fc3bba8626b5bfd0a",
+ r: "6f837e08829000a3400400010cc22350c80304440000088d02c04040c14040042"
+}, {
+ a: "ab69c286138358dea4308b60f12f212fcd1e0",
+ b: "-c8735b6ce5513cc661fdae7941055028a2ea768dc13b9c83a79b9bf84e62cdf",
+ r: "aa29c2041181501e84200840602401218d120"
+}, {
+ a: "6c9ec2e7cdb2c1fb630a8e16323138db939c2a21e3576b777d",
+ b: "-51cf93f77a711c00",
+ r: "6c9ec2e7cdb2c1fb630a8e16323138db938c202000050a6400"
+}, {
+ a: "edee34cd0c29ad27fed12e77a42aedbf8b53161716c90d516394b9038a2f125c95",
+ b: "-18a515e3705a582d82f14bd42075b3b",
+ r: "edee34cd0c29ad27fed12e77a42aedbf8b52140600c8085061049003022d100485"
+}, {
+ a: "466fee7dabecbaea71c19892f045d7d196a80c6f",
+ b: "-5c93c7afd552be",
+ r: "466fee7dabecbaea71c19892f001441010280c42"
+}, {
+ a: "-657c587f67a70177797befb96f116c2843",
+ b: "-c3b8e2",
+ r: "-657c587f67a70177797befb96f11efb8e4"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a & b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: &");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
new file mode 100644
index 0000000000..08c94245fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
@@ -0,0 +1,300 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-bigint --noopt
+
+// BigInt.asIntN
+{
+ assertEquals(2, BigInt.asIntN.length);
+}{
+ assertEquals(-1n, BigInt.asIntN(3, 15n));
+ assertEquals(-2n, BigInt.asIntN(3, 14n));
+ assertEquals(-3n, BigInt.asIntN(3, 13n));
+ assertEquals(-4n, BigInt.asIntN(3, 12n));
+ assertEquals(3n, BigInt.asIntN(3, 11n));
+ assertEquals(2n, BigInt.asIntN(3, 10n));
+ assertEquals(1n, BigInt.asIntN(3, 9n));
+ assertEquals(0n, BigInt.asIntN(3, 8n));
+ assertEquals(-1n, BigInt.asIntN(3, 7n));
+ assertEquals(-2n, BigInt.asIntN(3, 6n));
+ assertEquals(-3n, BigInt.asIntN(3, 5n));
+ assertEquals(-4n, BigInt.asIntN(3, 4n));
+ assertEquals(3n, BigInt.asIntN(3, 3n));
+ assertEquals(2n, BigInt.asIntN(3, 2n));
+ assertEquals(1n, BigInt.asIntN(3, 1n));
+ assertEquals(0n, BigInt.asIntN(3, 0n));
+ assertEquals(-1n, BigInt.asIntN(3, -1n));
+ assertEquals(-2n, BigInt.asIntN(3, -2n));
+ assertEquals(-3n, BigInt.asIntN(3, -3n));
+ assertEquals(-4n, BigInt.asIntN(3, -4n));
+ assertEquals(3n, BigInt.asIntN(3, -5n));
+ assertEquals(2n, BigInt.asIntN(3, -6n));
+ assertEquals(1n, BigInt.asIntN(3, -7n));
+ assertEquals(0n, BigInt.asIntN(3, -8n));
+ assertEquals(-1n, BigInt.asIntN(3, -9n));
+ assertEquals(-2n, BigInt.asIntN(3, -10n));
+ assertEquals(-3n, BigInt.asIntN(3, -11n));
+ assertEquals(-4n, BigInt.asIntN(3, -12n));
+ assertEquals(3n, BigInt.asIntN(3, -13n));
+ assertEquals(2n, BigInt.asIntN(3, -14n));
+ assertEquals(1n, BigInt.asIntN(3, -15n));
+}{
+ assertEquals(254n, BigInt.asIntN(10, 254n));
+ assertEquals(255n, BigInt.asIntN(10, 255n));
+ assertEquals(256n, BigInt.asIntN(10, 256n));
+ assertEquals(257n, BigInt.asIntN(10, 257n));
+ assertEquals(510n, BigInt.asIntN(10, 510n));
+ assertEquals(511n, BigInt.asIntN(10, 511n));
+ assertEquals(-512n, BigInt.asIntN(10, 512n));
+ assertEquals(-511n, BigInt.asIntN(10, 513n));
+ assertEquals(-2n, BigInt.asIntN(10, 1022n));
+ assertEquals(-1n, BigInt.asIntN(10, 1023n));
+ assertEquals(0n, BigInt.asIntN(10, 1024n));
+ assertEquals(1n, BigInt.asIntN(10, 1025n));
+}{
+ assertEquals(-254n, BigInt.asIntN(10, -254n));
+ assertEquals(-255n, BigInt.asIntN(10, -255n));
+ assertEquals(-256n, BigInt.asIntN(10, -256n));
+ assertEquals(-257n, BigInt.asIntN(10, -257n));
+ assertEquals(-510n, BigInt.asIntN(10, -510n));
+ assertEquals(-511n, BigInt.asIntN(10, -511n));
+ assertEquals(-512n, BigInt.asIntN(10, -512n));
+ assertEquals(511n, BigInt.asIntN(10, -513n));
+ assertEquals(2n, BigInt.asIntN(10, -1022n));
+ assertEquals(1n, BigInt.asIntN(10, -1023n));
+ assertEquals(0n, BigInt.asIntN(10, -1024n));
+ assertEquals(-1n, BigInt.asIntN(10, -1025n));
+}{
+ assertEquals(0n, BigInt.asIntN(0, 0n));
+ assertEquals(0n, BigInt.asIntN(1, 0n));
+ assertEquals(0n, BigInt.asIntN(16, 0n));
+ assertEquals(0n, BigInt.asIntN(31, 0n));
+ assertEquals(0n, BigInt.asIntN(32, 0n));
+ assertEquals(0n, BigInt.asIntN(33, 0n));
+ assertEquals(0n, BigInt.asIntN(63, 0n));
+ assertEquals(0n, BigInt.asIntN(64, 0n));
+ assertEquals(0n, BigInt.asIntN(65, 0n));
+ assertEquals(0n, BigInt.asIntN(127, 0n));
+ assertEquals(0n, BigInt.asIntN(128, 0n));
+ assertEquals(0n, BigInt.asIntN(129, 0n));
+}{
+ assertEquals(0n, BigInt.asIntN(0, 42n));
+ assertEquals(0n, BigInt.asIntN(1, 42n));
+ assertEquals(42n, BigInt.asIntN(16, 42n));
+ assertEquals(42n, BigInt.asIntN(31, 42n));
+ assertEquals(42n, BigInt.asIntN(32, 42n));
+ assertEquals(42n, BigInt.asIntN(33, 42n));
+ assertEquals(42n, BigInt.asIntN(63, 42n));
+ assertEquals(42n, BigInt.asIntN(64, 42n));
+ assertEquals(42n, BigInt.asIntN(65, 42n));
+ assertEquals(42n, BigInt.asIntN(127, 42n));
+ assertEquals(42n, BigInt.asIntN(128, 42n));
+ assertEquals(42n, BigInt.asIntN(129, 42n));
+}{
+ assertEquals(0n, BigInt.asIntN(0, -42n));
+ assertEquals(0n, BigInt.asIntN(1, -42n));
+ assertEquals(-42n, BigInt.asIntN(16, -42n));
+ assertEquals(-42n, BigInt.asIntN(31, -42n));
+ assertEquals(-42n, BigInt.asIntN(32, -42n));
+ assertEquals(-42n, BigInt.asIntN(33, -42n));
+ assertEquals(-42n, BigInt.asIntN(63, -42n));
+ assertEquals(-42n, BigInt.asIntN(64, -42n));
+ assertEquals(-42n, BigInt.asIntN(65, -42n));
+ assertEquals(-42n, BigInt.asIntN(127, -42n));
+ assertEquals(-42n, BigInt.asIntN(128, -42n));
+ assertEquals(-42n, BigInt.asIntN(129, -42n));
+}{
+ assertEquals(0n, BigInt.asIntN(0, 4294967295n));
+ assertEquals(-1n, BigInt.asIntN(1, 4294967295n));
+ assertEquals(-1n, BigInt.asIntN(16, 4294967295n));
+ assertEquals(-1n, BigInt.asIntN(31, 4294967295n));
+ assertEquals(-1n, BigInt.asIntN(32, 4294967295n));
+ assertEquals(4294967295n, BigInt.asIntN(33, 4294967295n));
+ assertEquals(4294967295n, BigInt.asIntN(63, 4294967295n));
+ assertEquals(4294967295n, BigInt.asIntN(64, 4294967295n));
+ assertEquals(4294967295n, BigInt.asIntN(65, 4294967295n));
+ assertEquals(4294967295n, BigInt.asIntN(127, 4294967295n));
+ assertEquals(4294967295n, BigInt.asIntN(128, 4294967295n));
+ assertEquals(4294967295n, BigInt.asIntN(129, 4294967295n));
+}{
+ assertEquals(0n, BigInt.asIntN(0, -4294967295n));
+ assertEquals(-1n, BigInt.asIntN(1, -4294967295n));
+ assertEquals(1n, BigInt.asIntN(16, -4294967295n));
+ assertEquals(1n, BigInt.asIntN(31, -4294967295n));
+ assertEquals(1n, BigInt.asIntN(32, -4294967295n));
+ assertEquals(-4294967295n, BigInt.asIntN(33, -4294967295n));
+ assertEquals(-4294967295n, BigInt.asIntN(63, -4294967295n));
+ assertEquals(-4294967295n, BigInt.asIntN(64,-4294967295n));
+ assertEquals(-4294967295n, BigInt.asIntN(65, -4294967295n));
+ assertEquals(-4294967295n, BigInt.asIntN(127, -4294967295n));
+ assertEquals(-4294967295n, BigInt.asIntN(128, -4294967295n));
+ assertEquals(-4294967295n, BigInt.asIntN(129, -4294967295n));
+}{
+ assertEquals(42n, BigInt.asIntN(2**32, 42n));
+ assertEquals(4294967295n, BigInt.asIntN(2**32, 4294967295n));
+ assertEquals(4294967296n, BigInt.asIntN(2**32, 4294967296n));
+ assertEquals(4294967297n, BigInt.asIntN(2**32, 4294967297n));
+}{
+ assertThrows(() => BigInt.asIntN(2n, 12n), TypeError);
+ assertThrows(() => BigInt.asIntN(-1, 0n), RangeError);
+ assertThrows(() => BigInt.asIntN(2**53, 0n), RangeError);
+ assertEquals(0n, BigInt.asIntN({}, 12n));
+ assertEquals(0n, BigInt.asIntN(2.9999, 12n));
+ assertEquals(-4n, BigInt.asIntN(3.1234, 12n));
+}{
+ assertThrows(() => BigInt.asIntN(3, 12), TypeError);
+ assertEquals(-4n, BigInt.asIntN(3, "12"));
+}
+
+// BigInt.asUintN
+{
+ assertEquals(2, BigInt.asUintN.length);
+}{
+ assertEquals(7n, BigInt.asUintN(3, 15n));
+ assertEquals(6n, BigInt.asUintN(3, 14n));
+ assertEquals(5n, BigInt.asUintN(3, 13n));
+ assertEquals(4n, BigInt.asUintN(3, 12n));
+ assertEquals(3n, BigInt.asUintN(3, 11n));
+ assertEquals(2n, BigInt.asUintN(3, 10n));
+ assertEquals(1n, BigInt.asUintN(3, 9n));
+ assertEquals(0n, BigInt.asUintN(3, 8n));
+ assertEquals(7n, BigInt.asUintN(3, 7n));
+ assertEquals(6n, BigInt.asUintN(3, 6n));
+ assertEquals(5n, BigInt.asUintN(3, 5n));
+ assertEquals(4n, BigInt.asUintN(3, 4n));
+ assertEquals(3n, BigInt.asUintN(3, 3n));
+ assertEquals(2n, BigInt.asUintN(3, 2n));
+ assertEquals(1n, BigInt.asUintN(3, 1n));
+ assertEquals(0n, BigInt.asUintN(3, 0n));
+ assertEquals(7n, BigInt.asUintN(3, -1n));
+ assertEquals(6n, BigInt.asUintN(3, -2n));
+ assertEquals(5n, BigInt.asUintN(3, -3n));
+ assertEquals(4n, BigInt.asUintN(3, -4n));
+ assertEquals(3n, BigInt.asUintN(3, -5n));
+ assertEquals(2n, BigInt.asUintN(3, -6n));
+ assertEquals(1n, BigInt.asUintN(3, -7n));
+ assertEquals(0n, BigInt.asUintN(3, -8n));
+ assertEquals(7n, BigInt.asUintN(3, -9n));
+ assertEquals(6n, BigInt.asUintN(3, -10n));
+ assertEquals(5n, BigInt.asUintN(3, -11n));
+ assertEquals(4n, BigInt.asUintN(3, -12n));
+ assertEquals(3n, BigInt.asUintN(3, -13n));
+ assertEquals(2n, BigInt.asUintN(3, -14n));
+ assertEquals(1n, BigInt.asUintN(3, -15n));
+}{
+ assertEquals(254n, BigInt.asUintN(10, 254n));
+ assertEquals(255n, BigInt.asUintN(10, 255n));
+ assertEquals(256n, BigInt.asUintN(10, 256n));
+ assertEquals(257n, BigInt.asUintN(10, 257n));
+ assertEquals(510n, BigInt.asUintN(10, 510n));
+ assertEquals(511n, BigInt.asUintN(10, 511n));
+ assertEquals(512n, BigInt.asUintN(10, 512n));
+ assertEquals(513n, BigInt.asUintN(10, 513n));
+ assertEquals(1022n, BigInt.asUintN(10, 1022n));
+ assertEquals(1023n, BigInt.asUintN(10, 1023n));
+ assertEquals(0n, BigInt.asUintN(10, 1024n));
+ assertEquals(1n, BigInt.asUintN(10, 1025n));
+}{
+ assertEquals(1024n - 254n, BigInt.asUintN(10, -254n));
+ assertEquals(1024n - 255n, BigInt.asUintN(10, -255n));
+ assertEquals(1024n - 256n, BigInt.asUintN(10, -256n));
+ assertEquals(1024n - 257n, BigInt.asUintN(10, -257n));
+ assertEquals(1024n - 510n, BigInt.asUintN(10, -510n));
+ assertEquals(1024n - 511n, BigInt.asUintN(10, -511n));
+ assertEquals(1024n - 512n, BigInt.asUintN(10, -512n));
+ assertEquals(1024n - 513n, BigInt.asUintN(10, -513n));
+ assertEquals(1024n - 1022n, BigInt.asUintN(10, -1022n));
+ assertEquals(1024n - 1023n, BigInt.asUintN(10, -1023n));
+ assertEquals(0n, BigInt.asUintN(10, -1024n));
+ assertEquals(1023n, BigInt.asUintN(10, -1025n));
+}{
+ assertEquals(0n, BigInt.asUintN(0, 0n));
+ assertEquals(0n, BigInt.asUintN(1, 0n));
+ assertEquals(0n, BigInt.asUintN(16, 0n));
+ assertEquals(0n, BigInt.asUintN(31, 0n));
+ assertEquals(0n, BigInt.asUintN(32, 0n));
+ assertEquals(0n, BigInt.asUintN(33, 0n));
+ assertEquals(0n, BigInt.asUintN(63, 0n));
+ assertEquals(0n, BigInt.asUintN(64, 0n));
+ assertEquals(0n, BigInt.asUintN(65, 0n));
+ assertEquals(0n, BigInt.asUintN(127, 0n));
+ assertEquals(0n, BigInt.asUintN(128, 0n));
+ assertEquals(0n, BigInt.asUintN(129, 0n));
+}{
+ assertEquals(0n, BigInt.asUintN(0, 42n));
+ assertEquals(0n, BigInt.asUintN(1, 42n));
+ assertEquals(42n, BigInt.asUintN(16, 42n));
+ assertEquals(42n, BigInt.asUintN(31, 42n));
+ assertEquals(42n, BigInt.asUintN(32, 42n));
+ assertEquals(42n, BigInt.asUintN(33, 42n));
+ assertEquals(42n, BigInt.asUintN(63, 42n));
+ assertEquals(42n, BigInt.asUintN(64, 42n));
+ assertEquals(42n, BigInt.asUintN(65, 42n));
+ assertEquals(42n, BigInt.asUintN(127, 42n));
+ assertEquals(42n, BigInt.asUintN(128, 42n));
+ assertEquals(42n, BigInt.asUintN(129, 42n));
+}{
+ assertEquals(0n, BigInt.asUintN(0, -42n));
+ assertEquals(0n, BigInt.asUintN(1, -42n));
+ assertEquals(65536n - 42n, BigInt.asUintN(16, -42n));
+ assertEquals(2147483648n - 42n, BigInt.asUintN(31, -42n));
+ assertEquals(4294967296n - 42n, BigInt.asUintN(32, -42n));
+ assertEquals(8589934592n - 42n, BigInt.asUintN(33, -42n));
+ assertEquals(9223372036854775808n - 42n, BigInt.asUintN(63, -42n));
+ assertEquals(18446744073709551616n - 42n, BigInt.asUintN(64, -42n));
+ assertEquals(36893488147419103232n - 42n, BigInt.asUintN(65, -42n));
+ // TODO(neis): Enable once we have exponentation.
+ // assertEquals(2n**127n - 42n, BigInt.asUintN(127, -42n));
+ // assertEquals(2n**128n - 42n, BigInt.asUintN(128, -42n));
+ // assertEquals(2n**129n - 42n, BigInt.asUintN(129, -42n));
+}{
+ assertEquals(0n, BigInt.asUintN(0, 4294967295n));
+ assertEquals(1n, BigInt.asUintN(1, 4294967295n));
+ assertEquals(65535n, BigInt.asUintN(16, 4294967295n));
+ assertEquals(2147483647n, BigInt.asUintN(31, 4294967295n));
+ assertEquals(4294967295n, BigInt.asUintN(32, 4294967295n));
+ assertEquals(4294967295n, BigInt.asUintN(33, 4294967295n));
+ assertEquals(4294967295n, BigInt.asUintN(63, 4294967295n));
+ assertEquals(4294967295n, BigInt.asUintN(64, 4294967295n));
+ assertEquals(4294967295n, BigInt.asUintN(65, 4294967295n));
+ assertEquals(4294967295n, BigInt.asUintN(127, 4294967295n));
+ assertEquals(4294967295n, BigInt.asUintN(128, 4294967295n));
+ assertEquals(4294967295n, BigInt.asUintN(129, 4294967295n));
+}{
+ assertEquals(0n, BigInt.asUintN(0, -4294967295n));
+ assertEquals(1n, BigInt.asUintN(1, -4294967295n));
+ assertEquals(1n, BigInt.asUintN(16, -4294967295n));
+ assertEquals(1n, BigInt.asUintN(31, -4294967295n));
+ assertEquals(1n, BigInt.asUintN(32, -4294967295n));
+ assertEquals(8589934592n - 4294967295n, BigInt.asUintN(33, -4294967295n));
+ assertEquals(9223372036854775808n - 4294967295n,
+ BigInt.asUintN(63, -4294967295n));
+ assertEquals(18446744073709551616n - 4294967295n,
+ BigInt.asUintN(64,-4294967295n));
+ assertEquals(36893488147419103232n - 4294967295n,
+ BigInt.asUintN(65, -4294967295n));
+ // TODO(neis): Enable once we have exponentation.
+ // assertEquals(2n**127n - 42n, BigInt.asUintN(127, -4294967295n));
+ // assertEquals(2n**128n - 42n, BigInt.asUintN(128, -4294967295n));
+ // assertEquals(2n**129n - 42n, BigInt.asUintN(129, -4294967295n));
+}{
+ assertEquals(42n, BigInt.asUintN(2**32, 42n));
+ assertEquals(4294967295n, BigInt.asUintN(2**32, 4294967295n));
+ assertEquals(4294967296n, BigInt.asUintN(2**32, 4294967296n));
+ assertEquals(4294967297n, BigInt.asUintN(2**32, 4294967297n));
+}{
+ assertEquals(
+ BigInt.parseInt("0x7234567812345678"),
+ BigInt.asUintN(63, BigInt.parseInt("0xf234567812345678")));
+}{
+ assertThrows(() => BigInt.asUintN(2n, 12n), TypeError);
+ assertThrows(() => BigInt.asUintN(-1, 0n), RangeError);
+ assertThrows(() => BigInt.asUintN(2**53, 0n), RangeError);
+ assertEquals(0n, BigInt.asUintN({}, 12n));
+ assertEquals(0n, BigInt.asUintN(2.9999, 12n));
+ assertEquals(4n, BigInt.asUintN(3.1234, 12n));
+}{
+ assertThrows(() => BigInt.asUintN(3, 12), TypeError);
+ assertEquals(4n, BigInt.asUintN(3, "12"));
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint.js b/deps/v8/test/mjsunit/harmony/bigint/basics.js
index 4406f12b11..5ea89009a3 100644
--- a/deps/v8/test/mjsunit/harmony/bigint.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/basics.js
@@ -6,6 +6,7 @@
'use strict'
+const minus_one = BigInt(-1);
const zero = BigInt(0);
const another_zero = BigInt(0);
const one = BigInt(1);
@@ -17,14 +18,94 @@ const six = BigInt(6);
// BigInt
{
assertSame(BigInt, BigInt.prototype.constructor)
+}{
+ assertThrows(() => new BigInt, TypeError);
+ assertThrows(() => new BigInt(), TypeError);
+ assertThrows(() => new BigInt(0), TypeError);
+ assertThrows(() => new BigInt(0n), TypeError);
+ assertThrows(() => new BigInt("0"), TypeError);
+}{
+ class C extends BigInt { constructor() { throw 42 } };
+ assertThrowsEquals(() => new C, 42);
+}
+
+// ToBigInt, NumberToBigInt, BigInt
+{
+ assertThrows(() => BigInt(undefined), TypeError);
+ assertThrows(() => BigInt(null), TypeError);
+ assertThrows(() => BigInt({}), SyntaxError);
+ assertThrows(() => BigInt("foo"), SyntaxError);
+
+ assertThrows(() => BigInt("1j"), SyntaxError);
+ assertThrows(() => BigInt("0b1ju"), SyntaxError);
+ assertThrows(() => BigInt("0o1jun"), SyntaxError);
+ assertThrows(() => BigInt("0x1junk"), SyntaxError);
+}{
+ assertSame(BigInt(true), 1n);
+ assertSame(BigInt(false), 0n);
+ assertSame(BigInt(""), 0n);
+ assertSame(BigInt(" 42"), 42n);
+ assertSame(BigInt("0b101010"), 42n);
+ assertSame(BigInt(" 0b101011"), 43n);
+ assertSame(BigInt("0x2a "), 42n);
+ assertSame(BigInt(" 0x2b"), 43n);
+ assertSame(BigInt("0o52"), 42n);
+ assertSame(BigInt(" 0o53\n"), 43n);
+ assertSame(BigInt(-0), 0n);
+ assertSame(BigInt(42), 42n);
+ assertSame(BigInt(42n), 42n);
+ assertSame(BigInt(Object(42n)), 42n);
+ assertSame(BigInt(2**53 - 1), 9007199254740991n);
+ assertSame(BigInt(Object(2**53 - 1)), 9007199254740991n);
+ assertSame(BigInt([]), 0n);
+}{
+ assertThrows(() => BigInt(NaN), RangeError);
+ assertThrows(() => BigInt(-Infinity), RangeError);
+ assertThrows(() => BigInt(+Infinity), RangeError);
+ assertThrows(() => BigInt(4.00000001), RangeError);
+ assertThrows(() => BigInt(Object(4.00000001)), RangeError);
+ assertThrows(() => BigInt(2**53), RangeError);
+ assertThrows(() => BigInt(2**1000), RangeError);
+}
+
+// BigInt.prototype[Symbol.toStringTag]
+{
+ const toStringTag = Object.getOwnPropertyDescriptor(
+ BigInt.prototype, Symbol.toStringTag);
+ assertTrue(toStringTag.configurable);
+ assertFalse(toStringTag.enumerable);
+ assertFalse(toStringTag.writable);
+ assertEquals("BigInt", toStringTag.value);
+}
+
+// Object.prototype.toString
+{
+ const toString = Object.prototype.toString;
+
+ assertEquals("[object BigInt]", toString.call(42n));
+ assertEquals("[object BigInt]", toString.call(Object(42n)));
+
+ delete BigInt.prototype[Symbol.toStringTag];
+ assertEquals("[object Object]", toString.call(42n));
+ assertEquals("[object Object]", toString.call(Object(42n)));
+
+ BigInt.prototype[Symbol.toStringTag] = "foo";
+ assertEquals("[object foo]", toString.call(42n));
+ assertEquals("[object foo]", toString.call(Object(42n)));
}
// typeof
{
assertEquals(typeof zero, "bigint");
assertEquals(typeof one, "bigint");
-}
-{
+}{
+ assertEquals(%Typeof(zero), "bigint");
+ assertEquals(%Typeof(one), "bigint");
+}{
+ assertTrue(typeof 1n === "bigint");
+ assertFalse(typeof 1n === "BigInt");
+ assertFalse(typeof 1 === "bigint");
+}{
// TODO(neis): Enable once --no-opt can be removed.
//
// function Typeof(x) { return typeof x }
@@ -227,8 +308,7 @@ const six = BigInt(6);
assertEquals(Object(zero).valueOf(), another_zero);
assertThrows(() => { return BigInt.prototype.valueOf.call("string"); },
TypeError);
- // TODO(jkummerow): Add tests for (new BigInt(...)).valueOf() when we
- // can construct BigInt wrappers.
+ assertEquals(-42n, Object(-42n).valueOf());
}
// ToBoolean
@@ -240,82 +320,97 @@ const six = BigInt(6);
assertFalse(!one);
assertTrue(!!one);
assertFalse(!!!one);
-}
-
-// Strict equality
-{
- assertTrue(zero === zero);
- assertFalse(zero !== zero);
-
- assertTrue(zero === another_zero);
- assertFalse(zero !== another_zero);
- assertFalse(zero === one);
- assertTrue(zero !== one);
- assertTrue(one !== zero);
- assertFalse(one === zero);
-
- assertFalse(zero === 0);
- assertTrue(zero !== 0);
- assertFalse(0 === zero);
- assertTrue(0 !== zero);
+ // This is a hack to test Object::BooleanValue.
+ assertTrue(%CreateIterResultObject(42, one).done);
+ assertFalse(%CreateIterResultObject(42, zero).done);
}
-// SameValue
+// ToNumber
{
- const obj = Object.defineProperty({}, 'foo',
- {value: zero, writable: false, configurable: false});
+ assertThrows(() => isNaN(zero), TypeError);
+ assertThrows(() => isNaN(one), TypeError);
- assertTrue(Reflect.defineProperty(obj, 'foo', {value: zero}));
- assertTrue(Reflect.defineProperty(obj, 'foo', {value: another_zero}));
- assertFalse(Reflect.defineProperty(obj, 'foo', {value: one}));
+ assertThrows(() => +zero, TypeError);
+ assertThrows(() => +one, TypeError);
}
-
-// SameValueZero
{
- assertTrue([zero].includes(zero));
- assertTrue([zero].includes(another_zero));
-
- assertFalse([zero].includes(+0));
- assertFalse([zero].includes(-0));
+ let Zero = {valueOf() { return zero }};
+ let One = {valueOf() { return one }};
- assertFalse([+0].includes(zero));
- assertFalse([-0].includes(zero));
+ assertThrows(() => isNaN(Zero), TypeError);
+ assertThrows(() => isNaN(One), TypeError);
- assertTrue([one].includes(one));
- assertTrue([one].includes(another_one));
-
- assertFalse([one].includes(1));
- assertFalse([1].includes(one));
+ assertThrows(() => +Zero, TypeError);
+ assertThrows(() => +One, TypeError);
}{
- assertTrue(new Set([zero]).has(zero));
- assertTrue(new Set([zero]).has(another_zero));
+ let Zero = {valueOf() { return Object(NaN) }, toString() { return zero }};
+ let One = {valueOf() { return one }, toString() { return NaN }};
- assertFalse(new Set([zero]).has(+0));
- assertFalse(new Set([zero]).has(-0));
+ assertThrows(() => isNaN(Zero), TypeError);
+ assertThrows(() => isNaN(One), TypeError);
- assertFalse(new Set([+0]).has(zero));
- assertFalse(new Set([-0]).has(zero));
+ assertThrows(() => +Zero, TypeError);
+ assertThrows(() => +One, TypeError);
+}
- assertTrue(new Set([one]).has(one));
- assertTrue(new Set([one]).has(another_one));
-}{
- assertTrue(new Map([[zero, 42]]).has(zero));
- assertTrue(new Map([[zero, 42]]).has(another_zero));
+// ToObject
+{
+ const ToObject = x => (new Function("", "return this")).call(x);
+
+ function test(x) {
+ const X = ToObject(x);
+ assertEquals(typeof x, "bigint");
+ assertEquals(typeof X, 'object');
+ assertEquals(X.constructor, BigInt);
+ assertTrue(X == x);
+ }
- assertFalse(new Map([[zero, 42]]).has(+0));
- assertFalse(new Map([[zero, 42]]).has(-0));
+ test(0n);
+ test(-1n);
+ test(1n);
+ test(2343423423423423423424234234234235234524353453452345324523452345234534n);
+}{
+ function test(x) {
+ const X = Object(x);
+ assertEquals(typeof x, "bigint");
+ assertEquals(typeof X, 'object');
+ assertEquals(X.constructor, BigInt);
+ assertTrue(X == x);
+ }
- assertFalse(new Map([[+0, 42]]).has(zero));
- assertFalse(new Map([[-0, 42]]).has(zero));
+ test(0n);
+ test(-1n);
+ test(1n);
+ test(2343423423423423423424234234234235234524353453452345324523452345234534n);
+}
- assertTrue(new Map([[one, 42]]).has(one));
- assertTrue(new Map([[one, 42]]).has(another_one));
+// Literals
+{
+ // Invalid literals.
+ assertThrows("00n", SyntaxError);
+ assertThrows("01n", SyntaxError);
+ assertThrows("0bn", SyntaxError);
+ assertThrows("0on", SyntaxError);
+ assertThrows("0xn", SyntaxError);
+ assertThrows("1.n", SyntaxError);
+ assertThrows("1.0n", SyntaxError);
+ assertThrows("1e25n", SyntaxError);
+
+ // Various radixes.
+ assertTrue(12345n === BigInt(12345));
+ assertTrue(0xabcden === BigInt(0xabcde));
+ assertTrue(0xAbCdEn === BigInt(0xabcde));
+ assertTrue(0o54321n === BigInt(0o54321));
+ assertTrue(0b1010101n === BigInt(0b1010101));
}
// Binary ops.
{
+ let One = {valueOf() { return one }};
assertTrue(one + two === three);
+ assertTrue(One + two === three);
+ assertTrue(two + One === three);
assertEquals("hello1", "hello" + one);
assertEquals("2hello", two + "hello");
assertThrows("one + 2", TypeError);
@@ -332,6 +427,8 @@ const six = BigInt(6);
assertThrows("2.5 - one", TypeError);
assertTrue(two * three === six);
+ assertTrue(two * One === two);
+ assertTrue(One * two === two);
assertThrows("two * 1", TypeError);
assertThrows("1 * two", TypeError);
assertThrows("two * 1.5", TypeError);
@@ -353,3 +450,146 @@ const six = BigInt(6);
assertThrows("three % zero", RangeError);
assertThrows("three % 0", TypeError);
}
+
+// Bitwise binary ops.
+{
+ let One = {valueOf() { return one }};
+ assertTrue((three & one) === one);
+ assertTrue((BigInt(-2) & zero) === zero);
+ assertTrue((three & One) === one);
+ assertTrue((One & three) === one);
+ assertThrows("three & 1", TypeError);
+ assertThrows("1 & three", TypeError);
+ assertThrows("three & true", TypeError);
+ assertThrows("true & three", TypeError);
+ assertThrows("three & {valueOf: function() { return 1; }}", TypeError);
+ assertThrows("({valueOf: function() { return 1; }}) & three", TypeError);
+
+ assertTrue((two | one) === three);
+ assertThrows("two | 0", TypeError);
+ assertThrows("0 | two", TypeError);
+ assertThrows("two | undefined", TypeError);
+ assertThrows("undefined | two", TypeError);
+
+ assertTrue((three ^ one) === two);
+ assertThrows("three ^ 1", TypeError);
+ assertThrows("1 ^ three", TypeError);
+ assertThrows("three ^ 2.5", TypeError);
+ assertThrows("2.5 ^ three", TypeError);
+}
+
+// Shift ops.
+{
+ assertTrue(one << one === two);
+ assertThrows("one << 1", TypeError);
+ assertThrows("1 << one", TypeError);
+ assertThrows("one << true", TypeError);
+ assertThrows("true << one", TypeError);
+
+ assertTrue(three >> one === one);
+ assertThrows("three >> 1", TypeError);
+ assertThrows("0xbeef >> one", TypeError);
+ assertThrows("three >> 1.5", TypeError);
+ assertThrows("23.45 >> three", TypeError);
+
+ assertThrows("three >>> one", TypeError);
+ assertThrows("three >>> 1", TypeError);
+ assertThrows("0xbeef >>> one", TypeError);
+ assertThrows("three >>> {valueOf: function() { return 1; }}", TypeError);
+ assertThrows("({valueOf: function() { return 1; }}) >>> one", TypeError);
+}
+
+// Unary ops.
+{
+ let One = {valueOf() { return one }};
+ assertTrue(~minus_one === zero);
+ assertTrue(-minus_one === one);
+ assertTrue(-One === minus_one);
+ assertTrue(~~two === two);
+ assertTrue(-(-two) === two);
+ assertTrue(~One === BigInt(-2));
+
+ let a = minus_one;
+ assertTrue(a++ === minus_one);
+ assertTrue(a === zero);
+ assertTrue(a++ === zero);
+ assertTrue(a === one);
+ assertTrue(++a === two);
+ assertTrue(a === two);
+ assertTrue(--a === one);
+ assertTrue(a === one);
+ assertTrue(a-- === one);
+ assertTrue(a === zero);
+ assertTrue(a-- === zero);
+ assertTrue(a === minus_one);
+
+ a = {valueOf() { return minus_one }};
+ assertTrue(a++ === minus_one);
+ assertTrue(a++ === zero);
+ assertTrue(a === one);
+
+ a = {valueOf() { return one }};
+ assertTrue(a-- === one);
+ assertTrue(a-- === zero);
+ assertTrue(a === minus_one);
+}
+
+// ToPropertyKey
+{
+ let obj = {};
+ assertEquals(obj[0n], undefined);
+ assertEquals(obj[0n] = 42, 42);
+ assertEquals(obj[0n], 42);
+ assertEquals(obj[0], 42);
+ obj[0]++;
+ assertEquals(obj[1n - 1n], 43);
+ assertEquals(Reflect.get(obj, -0n), 43);
+ assertEquals(obj[{toString() {return 0n}}], 43);
+ assertEquals(Reflect.ownKeys(obj), ["0"]);
+}{
+ let obj = {};
+ const unsafe = 9007199254740993n;
+ assertEquals(obj[unsafe] = 23, 23);
+ assertEquals(obj[unsafe], 23);
+ assertEquals(Reflect.ownKeys(obj), ["9007199254740993"]);
+ assertEquals(obj[9007199254740993], undefined);
+ delete obj[unsafe];
+ assertEquals(Reflect.ownKeys(obj), []);
+}{
+ let arr = [];
+ assertFalse(4n in arr);
+ arr[4n] = 42;
+ assertTrue(4n in arr);
+ let enumkeys = 0;
+ for (const key in arr) {
+ enumkeys++;
+ assertSame(key, "4");
+ }
+ assertEquals(enumkeys, 1);
+}{
+ let str = "blubb";
+ assertEquals(str[2n], "u");
+ assertThrows(() => str.slice(2n), TypeError);
+}{
+ let obj = {};
+ let key = 0;
+
+ function set_key(x) { obj[key] = x }
+ set_key("aaa");
+ set_key("bbb");
+ key = 0n;
+ set_key("ccc");
+ assertEquals(obj[key], "ccc");
+
+ function get_key() { return obj[key] }
+ assertEquals(get_key(), "ccc");
+ assertEquals(get_key(), "ccc");
+ key = 0;
+ assertEquals(get_key(), "ccc");
+}{
+ assertSame(%ToName(0n), "0");
+ assertSame(%ToName(-0n), "0");
+
+ const unsafe = 9007199254740993n;
+ assertSame(%ToName(unsafe), "9007199254740993");
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/comparisons.js b/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
new file mode 100644
index 0000000000..7be5eb7ee5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
@@ -0,0 +1,525 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-bigint --no-opt
+
+'use strict'
+
+const minus_one = BigInt(-1);
+const zero = BigInt(0);
+const another_zero = BigInt(0);
+const one = BigInt(1);
+const another_one = BigInt(1);
+const two = BigInt(2);
+const three = BigInt(3);
+const six = BigInt(6);
+
+
+// Strict equality
+{
+ assertTrue(zero === zero);
+ assertFalse(zero !== zero);
+
+ assertTrue(zero === another_zero);
+ assertFalse(zero !== another_zero);
+
+ assertFalse(zero === one);
+ assertTrue(zero !== one);
+ assertTrue(one !== zero);
+ assertFalse(one === zero);
+
+ assertFalse(zero === 0);
+ assertTrue(zero !== 0);
+ assertFalse(0 === zero);
+ assertTrue(0 !== zero);
+}{
+ assertTrue(%StrictEqual(zero, zero));
+ assertFalse(%StrictNotEqual(zero, zero));
+
+ assertTrue(%StrictEqual(zero, another_zero));
+ assertFalse(%StrictNotEqual(zero, another_zero));
+
+ assertFalse(%StrictEqual(zero, one));
+ assertTrue(%StrictNotEqual(zero, one));
+ assertTrue(%StrictNotEqual(one, zero));
+ assertFalse(%StrictEqual(one, zero));
+
+ assertFalse(%StrictEqual(zero, 0));
+ assertTrue(%StrictNotEqual(zero, 0));
+ assertFalse(%StrictEqual(0, zero));
+ assertTrue(%StrictNotEqual(0, zero));
+}
+
+// Abstract equality
+{
+ assertTrue(%Equal(zero, zero));
+ assertTrue(%Equal(zero, another_zero));
+ assertFalse(%Equal(zero, one));
+ assertFalse(%Equal(one, zero));
+
+ assertTrue(%Equal(zero, +0));
+ assertTrue(%Equal(zero, -0));
+ assertTrue(%Equal(+0, zero));
+ assertTrue(%Equal(-0, zero));
+
+ assertTrue(%Equal(zero, false));
+ assertTrue(%Equal(one, true));
+ assertFalse(%Equal(zero, true));
+ assertFalse(%Equal(one, false));
+ assertTrue(%Equal(false, zero));
+ assertTrue(%Equal(true, one));
+ assertFalse(%Equal(true, zero));
+ assertFalse(%Equal(false, one));
+
+ assertTrue(%Equal(one, 1));
+ assertTrue(%Equal(one, Number(1)));
+ assertTrue(%Equal(1, one));
+ assertTrue(%Equal(Number(1), one));
+
+ assertTrue(%Equal(minus_one, -1));
+ assertTrue(%Equal(minus_one, Number(-1)));
+ assertTrue(%Equal(-1, minus_one));
+ assertTrue(%Equal(Number(-1), minus_one));
+
+ assertFalse(%Equal(one, -1));
+ assertFalse(%Equal(one, Number(-1)));
+ assertFalse(%Equal(-1, one));
+ assertFalse(%Equal(Number(-1), one));
+
+ assertFalse(%Equal(one, 1.0000000000001));
+ assertFalse(%Equal(1.0000000000001, one));
+
+ assertTrue(%Equal(zero, ""));
+ assertTrue(%Equal("", zero));
+ assertTrue(%Equal(one, "1"));
+ assertTrue(%Equal("1", one));
+ assertFalse(%Equal(one, "a"));
+ assertFalse(%Equal("a", one));
+
+ assertTrue(%Equal(one, {valueOf() { return true }}));
+ assertTrue(%Equal({valueOf() { return true }}, one));
+ assertFalse(%Equal(two, {valueOf() { return true }}));
+ assertFalse(%Equal({valueOf() { return true }}, two));
+
+ assertFalse(%Equal(Symbol(), zero));
+ assertFalse(%Equal(zero, Symbol()));
+}{
+ assertTrue(zero == zero);
+ assertTrue(zero == another_zero);
+ assertFalse(zero == one);
+ assertFalse(one == zero);
+
+ assertTrue(zero == +0);
+ assertTrue(zero == -0);
+ assertTrue(+0 == zero);
+ assertTrue(-0 == zero);
+
+ assertTrue(zero == false);
+ assertTrue(one == true);
+ assertFalse(zero == true);
+ assertFalse(one == false);
+ assertTrue(false == zero);
+ assertTrue(true == one);
+ assertFalse(true == zero);
+ assertFalse(false == one);
+
+ assertTrue(one == 1);
+ assertTrue(one == Number(1));
+ assertTrue(1 == one);
+ assertTrue(Number(1) == one);
+
+ assertTrue(minus_one == -1);
+ assertTrue(minus_one == Number(-1));
+ assertTrue(-1 == minus_one);
+ assertTrue(Number(-1) == minus_one);
+
+ assertFalse(one == -1);
+ assertFalse(one == Number(-1));
+ assertFalse(-1 == one);
+ assertFalse(Number(-1) == one);
+
+ assertFalse(one == 1.0000000000001);
+ assertFalse(1.0000000000001 == one);
+
+ assertTrue(zero == "");
+ assertTrue("" == zero);
+ assertTrue(zero == " \t\r\n");
+ assertTrue(one == "1");
+ assertTrue("1" == one);
+ assertFalse(" \t\r\n" == one);
+ assertFalse(one == "a");
+ assertFalse("a" == one);
+
+ assertTrue(zero == "00000000000000" + "0");
+
+ assertTrue(one == {valueOf() { return true }});
+ assertTrue({valueOf() { return true }} == one);
+ assertFalse(two == {valueOf() { return true }});
+ assertFalse({valueOf() { return true }} == two);
+
+ assertFalse(Symbol() == zero);
+ assertFalse(zero == Symbol());
+
+ assertTrue(one == "0b1");
+ assertTrue(" 0b1" == one);
+ assertTrue(one == "0o1");
+ assertTrue("0o1 " == one);
+ assertTrue(one == "\n0x1");
+ assertTrue("0x1" == one);
+
+ assertFalse(one == "1j");
+ assertFalse(one == "0b1ju");
+ assertFalse(one == "0o1jun");
+ assertFalse(one == "0x1junk");
+}{
+ assertFalse(%NotEqual(zero, zero));
+ assertFalse(%NotEqual(zero, another_zero));
+ assertTrue(%NotEqual(zero, one));
+ assertTrue(%NotEqual(one, zero));
+
+ assertFalse(%NotEqual(zero, +0));
+ assertFalse(%NotEqual(zero, -0));
+ assertFalse(%NotEqual(+0, zero));
+ assertFalse(%NotEqual(-0, zero));
+
+ assertFalse(%NotEqual(zero, false));
+ assertFalse(%NotEqual(one, true));
+ assertTrue(%NotEqual(zero, true));
+ assertTrue(%NotEqual(one, false));
+ assertFalse(%NotEqual(false, zero));
+ assertFalse(%NotEqual(true, one));
+ assertTrue(%NotEqual(true, zero));
+ assertTrue(%NotEqual(false, one));
+
+ assertFalse(%NotEqual(one, 1));
+ assertFalse(%NotEqual(one, Number(1)));
+ assertFalse(%NotEqual(1, one));
+ assertFalse(%NotEqual(Number(1), one));
+
+ assertFalse(%NotEqual(minus_one, -1));
+ assertFalse(%NotEqual(minus_one, Number(-1)));
+ assertFalse(%NotEqual(-1, minus_one));
+ assertFalse(%NotEqual(Number(-1), minus_one));
+
+ assertTrue(%NotEqual(one, -1));
+ assertTrue(%NotEqual(one, Number(-1)));
+ assertTrue(%NotEqual(-1, one));
+ assertTrue(%NotEqual(Number(-1), one));
+
+ assertTrue(%NotEqual(one, 1.0000000000001));
+ assertTrue(%NotEqual(1.0000000000001, one));
+
+ assertFalse(%NotEqual(zero, ""));
+ assertFalse(%NotEqual("", zero));
+ assertFalse(%NotEqual(one, "1"));
+ assertFalse(%NotEqual("1", one));
+ assertTrue(%NotEqual(one, "a"));
+ assertTrue(%NotEqual("a", one));
+
+ assertFalse(%NotEqual(one, {valueOf() { return true }}));
+ assertFalse(%NotEqual({valueOf() { return true }}, one));
+ assertTrue(%NotEqual(two, {valueOf() { return true }}));
+ assertTrue(%NotEqual({valueOf() { return true }}, two));
+
+ assertTrue(%NotEqual(Symbol(), zero));
+ assertTrue(%NotEqual(zero, Symbol()));
+}{
+ assertFalse(zero != zero);
+ assertFalse(zero != another_zero);
+ assertTrue(zero != one);
+ assertTrue(one != zero);
+
+ assertFalse(zero != +0);
+ assertFalse(zero != -0);
+ assertFalse(+0 != zero);
+ assertFalse(-0 != zero);
+
+ assertFalse(zero != false);
+ assertFalse(one != true);
+ assertTrue(zero != true);
+ assertTrue(one != false);
+ assertFalse(false != zero);
+ assertFalse(true != one);
+ assertTrue(true != zero);
+ assertTrue(false != one);
+
+ assertFalse(one != 1);
+ assertFalse(one != Number(1));
+ assertFalse(1 != one);
+ assertFalse(Number(1) != one);
+
+ assertFalse(minus_one != -1);
+ assertFalse(minus_one != Number(-1));
+ assertFalse(-1 != minus_one);
+ assertFalse(Number(-1) != minus_one);
+
+ assertTrue(one != -1);
+ assertTrue(one != Number(-1));
+ assertTrue(-1 != one);
+ assertTrue(Number(-1) != one);
+
+ assertTrue(one != 1.0000000000001);
+ assertTrue(1.0000000000001 != one);
+
+ assertFalse(zero != "");
+ assertFalse("" != zero);
+ assertFalse(one != "1");
+ assertFalse("1" != one);
+ assertTrue(one != "a");
+ assertTrue("a" != one);
+
+ assertFalse(one != {valueOf() { return true }});
+ assertFalse({valueOf() { return true }} != one);
+ assertTrue(two != {valueOf() { return true }});
+ assertTrue({valueOf() { return true }} != two);
+
+ assertTrue(Symbol() != zero);
+ assertTrue(zero != Symbol());
+}
+
+// SameValue
+{
+ assertTrue(Object.is(zero, zero));
+ assertTrue(Object.is(zero, another_zero));
+ assertTrue(Object.is(one, one));
+ assertTrue(Object.is(one, another_one));
+ assertFalse(Object.is(zero, +0));
+ assertFalse(Object.is(zero, -0));
+ assertFalse(Object.is(+0, zero));
+ assertFalse(Object.is(-0, zero));
+ assertFalse(Object.is(zero, one));
+ assertFalse(Object.is(one, minus_one));
+}{
+ const obj = Object.defineProperty({}, 'foo',
+ {value: zero, writable: false, configurable: false});
+
+ assertTrue(Reflect.defineProperty(obj, 'foo', {value: zero}));
+ assertTrue(Reflect.defineProperty(obj, 'foo', {value: another_zero}));
+ assertFalse(Reflect.defineProperty(obj, 'foo', {value: one}));
+}{
+ assertTrue(%SameValue(zero, zero));
+ assertTrue(%SameValue(zero, another_zero));
+
+ assertFalse(%SameValue(zero, +0));
+ assertFalse(%SameValue(zero, -0));
+
+ assertFalse(%SameValue(+0, zero));
+ assertFalse(%SameValue(-0, zero));
+
+ assertTrue(%SameValue(one, one));
+ assertTrue(%SameValue(one, another_one));
+}
+
+// SameValueZero
+{
+ assertTrue([zero].includes(zero));
+ assertTrue([zero].includes(another_zero));
+
+ assertFalse([zero].includes(+0));
+ assertFalse([zero].includes(-0));
+
+ assertFalse([+0].includes(zero));
+ assertFalse([-0].includes(zero));
+
+ assertTrue([one].includes(one));
+ assertTrue([one].includes(another_one));
+
+ assertFalse([one].includes(1));
+ assertFalse([1].includes(one));
+}{
+ assertTrue(new Set([zero]).has(zero));
+ assertTrue(new Set([zero]).has(another_zero));
+
+ assertFalse(new Set([zero]).has(+0));
+ assertFalse(new Set([zero]).has(-0));
+
+ assertFalse(new Set([+0]).has(zero));
+ assertFalse(new Set([-0]).has(zero));
+
+ assertTrue(new Set([one]).has(one));
+ assertTrue(new Set([one]).has(another_one));
+}{
+ assertTrue(new Map([[zero, 42]]).has(zero));
+ assertTrue(new Map([[zero, 42]]).has(another_zero));
+
+ assertFalse(new Map([[zero, 42]]).has(+0));
+ assertFalse(new Map([[zero, 42]]).has(-0));
+
+ assertFalse(new Map([[+0, 42]]).has(zero));
+ assertFalse(new Map([[-0, 42]]).has(zero));
+
+ assertTrue(new Map([[one, 42]]).has(one));
+ assertTrue(new Map([[one, 42]]).has(another_one));
+}{
+ assertTrue(%SameValueZero(zero, zero));
+ assertTrue(%SameValueZero(zero, another_zero));
+
+ assertFalse(%SameValueZero(zero, +0));
+ assertFalse(%SameValueZero(zero, -0));
+
+ assertFalse(%SameValueZero(+0, zero));
+ assertFalse(%SameValueZero(-0, zero));
+
+ assertTrue(%SameValueZero(one, one));
+ assertTrue(%SameValueZero(one, another_one));
+}
+
+// Abstract comparison
+{
+ let undef = Symbol();
+
+ assertTrue(%Equal(zero, zero));
+ assertTrue(%GreaterThanOrEqual(zero, zero));
+
+ assertTrue(%LessThan(zero, one));
+ assertTrue(%GreaterThan(one, zero));
+
+ assertTrue(%LessThan(minus_one, one));
+ assertTrue(%GreaterThan(one, minus_one));
+
+ assertTrue(%Equal(zero, -0));
+ assertTrue(%LessThanOrEqual(zero, -0));
+ assertTrue(%GreaterThanOrEqual(zero, -0));
+ assertTrue(%Equal(-0, zero));
+ assertTrue(%LessThanOrEqual(-0, zero));
+ assertTrue(%GreaterThanOrEqual(-0, zero));
+
+ assertTrue(%Equal(zero, 0));
+ assertTrue(%Equal(0, zero));
+
+ assertTrue(%LessThan(minus_one, 1));
+ assertTrue(%GreaterThan(1, minus_one));
+
+ assertFalse(%LessThan(six, NaN));
+ assertFalse(%GreaterThan(six, NaN));
+ assertFalse(%Equal(six, NaN));
+ assertFalse(%LessThan(NaN, six));
+ assertFalse(%GreaterThan(NaN, six));
+ assertFalse(%Equal(NaN, six));
+
+ assertTrue(%LessThan(six, Infinity));
+ assertTrue(%GreaterThan(Infinity, six));
+
+ assertTrue(%GreaterThan(six, -Infinity));
+ assertTrue(%LessThan(-Infinity, six));
+
+ assertTrue(%GreaterThan(six, 5.99999999));
+ assertTrue(%LessThan(5.99999999, six));
+
+ assertTrue(%Equal(zero, ""));
+ assertTrue(%LessThanOrEqual(zero, ""));
+ assertTrue(%GreaterThanOrEqual(zero, ""));
+ assertTrue(%Equal("", zero));
+ assertTrue(%LessThanOrEqual("", zero));
+ assertTrue(%GreaterThanOrEqual("", zero));
+
+ assertTrue(%Equal(minus_one, "\t-1 "));
+ assertTrue(%LessThanOrEqual(minus_one, "\t-1 "));
+ assertTrue(%GreaterThanOrEqual(minus_one, "\t-1 "));
+ assertTrue(%Equal("\t-1 ", minus_one));
+ assertTrue(%LessThanOrEqual("\t-1 ", minus_one));
+ assertTrue(%GreaterThanOrEqual("\t-1 ", minus_one));
+
+ assertFalse(%LessThan(minus_one, "-0x1"));
+ assertFalse(%GreaterThan(minus_one, "-0x1"));
+ assertFalse(%Equal(minus_one, "-0x1"));
+ assertFalse(%LessThan("-0x1", minus_one));
+ assertFalse(%GreaterThan("-0x1", minus_one));
+ assertFalse(%Equal("-0x1", minus_one));
+
+ const unsafe = "9007199254740993"; // 2**53 + 1
+ assertTrue(%GreaterThan(BigInt.parseInt(unsafe), unsafe));
+ assertTrue(%LessThan(unsafe, BigInt.parseInt(unsafe)));
+
+ assertThrows(() => %LessThan(six, Symbol(6)), TypeError);
+ assertThrows(() => %LessThan(Symbol(6), six), TypeError);
+
+ var value_five_string_six = {
+ valueOf() { return Object(5); },
+ toString() { return 6; }
+ };
+ assertTrue(%LessThanOrEqual(six, value_five_string_six));
+ assertTrue(%GreaterThanOrEqual(six, value_five_string_six));
+ assertTrue(%LessThanOrEqual(value_five_string_six, six));
+ assertTrue(%GreaterThanOrEqual(value_five_string_six, six));
+}{
+ assertFalse(zero < zero);
+ assertTrue(zero <= zero);
+
+ assertTrue(zero < one);
+ assertTrue(zero <= one);
+ assertFalse(one < zero);
+ assertFalse(one <= zero);
+
+ assertTrue(minus_one < one);
+ assertTrue(minus_one <= one);
+ assertFalse(one < minus_one);
+ assertFalse(one <= minus_one);
+
+ assertFalse(zero < -0);
+ assertTrue(zero <= -0);
+ assertFalse(-0 < zero);
+ assertTrue(-0 <= zero);
+
+ assertFalse(zero < 0);
+ assertTrue(zero <= 0);
+ assertFalse(0 < zero);
+ assertTrue(0 <= zero);
+
+ assertTrue(minus_one < 1);
+ assertTrue(minus_one <= 1);
+ assertFalse(1 < minus_one);
+ assertFalse(1 <= minus_one);
+
+ assertFalse(six < NaN);
+ assertFalse(six <= NaN);
+ assertFalse(NaN < six);
+ assertFalse(NaN <= six);
+
+ assertTrue(six < Infinity);
+ assertTrue(six <= Infinity);
+ assertFalse(Infinity < six);
+ assertFalse(Infinity <= six);
+
+ assertFalse(six < -Infinity);
+ assertFalse(six <= -Infinity);
+ assertTrue(-Infinity < six);
+ assertTrue(-Infinity <= six);
+
+ assertFalse(six < 5.99999999);
+ assertFalse(six <= 5.99999999);
+ assertTrue(5.99999999 < six);
+ assertTrue(5.99999999 <= six);
+
+ assertFalse(zero < "");
+ assertTrue(zero <= "");
+ assertFalse("" < zero);
+ assertTrue("" <= zero);
+
+ assertFalse(minus_one < "\t-1 ");
+ assertTrue(minus_one <= "\t-1 ");
+ assertFalse("\t-1 " < minus_one);
+ assertTrue("\t-1 " <= minus_one);
+
+ assertFalse(minus_one < "-0x1");
+ assertFalse(minus_one <= "-0x1");
+ assertFalse("-0x1" < minus_one);
+ assertFalse("-0x1" <= minus_one);
+
+ const unsafe = "9007199254740993"; // 2**53 + 1
+ assertFalse(BigInt.parseInt(unsafe) < unsafe);
+ assertFalse(BigInt.parseInt(unsafe) <= unsafe);
+ assertTrue(unsafe < BigInt.parseInt(unsafe));
+ assertTrue(unsafe <= BigInt.parseInt(unsafe));
+
+ assertThrows(() => six < Symbol(6), TypeError);
+ assertThrows(() => six <= Symbol(6), TypeError);
+ assertThrows(() => Symbol(6) < six, TypeError);
+ assertThrows(() => Symbol(6) <= six, TypeError);
+
+ assertFalse(six < {valueOf() {return Object(5)}, toString() {return 6}});
+ assertTrue(six <= {valueOf() {return Object(5)}, toString() {return 6}});
+ assertFalse({valueOf() {return Object(5)}, toString() {return 6}} < six);
+ assertTrue({valueOf() {return Object(5)}, toString() {return 6}} <= six);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/dec.js b/deps/v8/test/mjsunit/harmony/bigint/dec.js
new file mode 100644
index 0000000000..cdf1d96d60
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/dec.js
@@ -0,0 +1,89 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+// TODO(adamk/jkummerow/neis): Support BigInts in TF unary ops.
+// Flags: --noopt
+
+var data = [{
+ a: "-609648ccf253976b12f6b6c8e20790c17ef6b89ea9f536267783607cf465b1ca",
+ r: "-609648ccf253976b12f6b6c8e20790c17ef6b89ea9f536267783607cf465b1cb"
+}, {
+ a: "-6e4c39cdd2c666e32cf2fd3c53a20eeb725e7578af97d42",
+ r: "-6e4c39cdd2c666e32cf2fd3c53a20eeb725e7578af97d43"
+}, {
+ a: "34c93e1c",
+ r: "34c93e1b"
+}, {
+ a: "-db3032",
+ r: "-db3033"
+}, {
+ a: "8e658ffacbefbdec5",
+ r: "8e658ffacbefbdec4"
+}, {
+ a: "-d321033ec94d6a75f",
+ r: "-d321033ec94d6a760"
+}, {
+ a: "-286017f718d6118b581ec4357e456ce6d12c01aed9a32ff0cc048d",
+ r: "-286017f718d6118b581ec4357e456ce6d12c01aed9a32ff0cc048e"
+}, {
+ a: "c0",
+ r: "bf"
+}, {
+ a: "9f9577e008a6f46f7709f71362176ebe23d19eb9e58a41de6f2631b18f2ca",
+ r: "9f9577e008a6f46f7709f71362176ebe23d19eb9e58a41de6f2631b18f2c9"
+}, {
+ a: "-9d4294590df0aa8ea46a5c2a3d186a6afcc00c6ebb072752",
+ r: "-9d4294590df0aa8ea46a5c2a3d186a6afcc00c6ebb072753"
+}, {
+ a: "-4bc2aed1641151db908c0eb21aa46d8b406803dc0f71d66671322d59babf10c2",
+ r: "-4bc2aed1641151db908c0eb21aa46d8b406803dc0f71d66671322d59babf10c3"
+}, {
+ a: "-1dfb3929632fbba39f60cabdc27",
+ r: "-1dfb3929632fbba39f60cabdc28"
+}, {
+ a: "c0d409943c093aec43ba99a33ef2bb54574ecdc7cccf6547ab44eafb27",
+ r: "c0d409943c093aec43ba99a33ef2bb54574ecdc7cccf6547ab44eafb26"
+}, {
+ a: "3d148dcffe94f859c80b38c4",
+ r: "3d148dcffe94f859c80b38c3"
+}, {
+ a: "0",
+ r: "-1"
+}, {
+ a: "d659f6507e0ac2e653bdb7c3fb38c1514dd33619a9a0c87fcb69b22",
+ r: "d659f6507e0ac2e653bdb7c3fb38c1514dd33619a9a0c87fcb69b21"
+}, {
+ a: "14efe",
+ r: "14efd"
+}, {
+ a: "-f2df301948cd17ff391a6589a67551c00679687ba5",
+ r: "-f2df301948cd17ff391a6589a67551c00679687ba6"
+}, {
+ a: "-e",
+ r: "-f"
+}, {
+ a: "-a09cf77fea7af1767695c978af13fdb62f4f040b6fb803625fb124cc99139cddadd",
+ r: "-a09cf77fea7af1767695c978af13fdb62f4f040b6fb803625fb124cc99139cddade"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var r = --a;
+ if (d.r !== r.toString(16)) {
+ print("Input: " + a.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/div.js b/deps/v8/test/mjsunit/harmony/bigint/div.js
new file mode 100644
index 0000000000..771a0c7fda
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/div.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "c0bb18527ab19a800932cc14c553c69688372809dde38f095fdb99478a7eba",
+ b: "8137ebc482f361a3e43fb80fe2ba509caa3f0acc62be0d2",
+ r: "17dd3b8ecbdd71a4"
+}, {
+ a: "10c5163e874c786fcfcb48bbb0ccf8d6c66f480b76332194",
+ b: "4e462b2",
+ r: "36d8ca96869a7226d456bff9b5ed3effdcaaf6f83"
+}, {
+ a: "-677f8c8668fcc826129f2724a5b88a6225dd19959810b5254afbc0",
+ b: "-2bf8ff1c49636",
+ r: "25a8bd406d721554f45ad37cc691d58821ad163f3a"
+}, {
+ a: "e80276ba7d416f4f7e5f8917529eb204cc7ed08413261e73aa23f169eb46e7162",
+ b: "-c884f07a",
+ r: "-12834073e7cbc44804a99cb2778fb82b098645946d5e4f5e6c47c49267"
+}, {
+ a: "-f9a51f64ce995e0a1c7f8369a573dae2533bc3df801edbb79235d41502e1ce",
+ b: "-f33bf",
+ r: "106bf522569501704f9700d6bfcd203a606e63725e339e92ff931f568f"
+}, {
+ a: "-6c9ae08dfa5b11c29ae30e53c108d915e0e87f1a8ca82f42d23a53b08895",
+ b: "-43fd79afe1ae6a4994ee7dfc2c89453d6b6",
+ r: "198ecd667e8cca17c2839b38fe"
+}, {
+ a: "-d20a2f7074ecbc776b64c2e04ff007e194b8cdd106b2be2e78d752f2d16e9",
+ b: "43eb8ae7d7f9be6d77e3be696ffef",
+ r: "-317ab1a09014950e1c53b9e3e8f44873c"
+}, {
+ a: "-999e0f75f2f652d403840cea3536b1a522433a331dace7c39d7990b993908fdd6",
+ b: "cea1de74020232c6c4bedca49d63f140c",
+ r: "-be51ac62c073e7d7cb9e43fc12436bfe"
+}, {
+ a: "1b49ac0c323436294f1471a3f00feb197b9b42549c3",
+ b: "5a7e30888a22fbbcecfe2a0b2e5eca30",
+ r: "4d321e8fe1c"
+}, {
+ a: "-689adf0d332ed26c63f8f361d5cb66056321b683ce87c60567df7305396f20d6ab8",
+ b: "82f3b92f5bb57811fdb66f519",
+ r: "-cc7e64b9ef239ac4c31b6ae3687aa181bda7657222"
+}, {
+ a: "-6fecb457f9e05c15b7fe038494c25053938d4747a01577cf2dc939a21e",
+ b: "b7cb6ce62c1d7483d0b675109f5782a1ad19",
+ r: "-9be52d2e94716bb441dbcd"
+}, {
+ a: "f96f8276f314d37657ce7774bc539198ee84fcec8a53cbb3d36ad81040b715",
+ b: "-891c9a0dd99b02",
+ r: "-1d1b803e3484173c0d2e8add5042117eaa27c853c222f5fdd"
+}, {
+ a: "-d2c9a9068d57f2ebc9ad432b48b4135f2a911519d4b791",
+ b: "-a34d124b5b4825d314683098d0",
+ r: "14a715b96671307438926"
+}, {
+ a: "-59593d55f61c1c739601e15624fe2e61592fe6abf1ecb238c9f8e1a7bded9c1bd1c",
+ b: "f067a5a155fc894b0f7f0a939f1772c4d135a",
+ r: "-5f250867331e7eff64dafcd58e9922"
+}, {
+ a: "2c0a3172494623013ba14d01433ad38167f365765b1c0ca610",
+ b: "ffe8",
+ r: "2c0e52ca0c37483000215020c64d6ac36a455bf6fa4382"
+}, {
+ a: "-a9cde7cdfe56eb8bd6777543ae714fadac2d97a394d8e9e8",
+ b: "104b0d7135d3d2",
+ r: "-a6bfb9031e60b5bc4b20e814cabfd80bd27"
+}, {
+ a: "6216ceed0d221476bfba7701f8297af56a4e66d003f8165b",
+ b: "bfcaaf8676ad6a",
+ r: "82ed6efc83669b0bc476bdd717dcfb6f10"
+}, {
+ a: "-707752a899efbe8989d205293535a404a6afb39cf21ce1274",
+ b: "-3a24848be1024a6ea901f",
+ r: "1ef2f2a8c0ad85ff4e6e6afdd3966"
+}, {
+ a: "-e662fb8c46f979ff3b3f576fffd5c51cf70071ab61168b2eafee4708af",
+ b: "964c4dd019b9a543df0cd1830a90428ec84ec04f8165283",
+ r: "-18869d87ce54"
+}, {
+ a: "b4403dee3a192009a5aae85c74efdb52b66dee1be6befa66f708ca9bf6b7",
+ b: "-36db9f9f6e",
+ r: "-34928ce65c2b71a6b6ce351838d4263d91ff2bceecec7a91441"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a / b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: /");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/inc.js b/deps/v8/test/mjsunit/harmony/bigint/inc.js
new file mode 100644
index 0000000000..2773ed9110
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/inc.js
@@ -0,0 +1,89 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+// TODO(adamk/jkummerow/neis): Support BigInts in TF unary ops.
+// Flags: --noopt
+
+var data = [{
+ a: "-989c298c6fc3",
+ r: "-989c298c6fc2"
+}, {
+ a: "bff2c86c449a56",
+ r: "bff2c86c449a57"
+}, {
+ a: "-6fb15264369b63e3b92d6f74458140d4b62a56ecbfca000492b6b8a0c56c651ebaecdd1",
+ r: "-6fb15264369b63e3b92d6f74458140d4b62a56ecbfca000492b6b8a0c56c651ebaecdd0"
+}, {
+ a: "e161f6284b91fa2646dc0f162c575bb8a0d0f5bfee26748b7181413567",
+ r: "e161f6284b91fa2646dc0f162c575bb8a0d0f5bfee26748b7181413568"
+}, {
+ a: "-4d073653cc812",
+ r: "-4d073653cc811"
+}, {
+ a: "-ce31549364717dea9d1bf30baed642f",
+ r: "-ce31549364717dea9d1bf30baed642e"
+}, {
+ a: "2ae123a62361f1de2cc5ca9cfd9658f47d",
+ r: "2ae123a62361f1de2cc5ca9cfd9658f47e"
+}, {
+ a: "-4820298153b7bbd86337ad72e0d1ac7448de99bc6ce4c43c2",
+ r: "-4820298153b7bbd86337ad72e0d1ac7448de99bc6ce4c43c1"
+}, {
+ a: "2e",
+ r: "2f"
+}, {
+ a: "-8f3b598ac2ab8a78a2d3e1f7ab1124b05a830aa1261bf57d8de2a",
+ r: "-8f3b598ac2ab8a78a2d3e1f7ab1124b05a830aa1261bf57d8de29"
+}, {
+ a: "-5c070fdee0d3f4a9adc63",
+ r: "-5c070fdee0d3f4a9adc62"
+}, {
+ a: "-3700cd6a6d1e68de1",
+ r: "-3700cd6a6d1e68de0"
+}, {
+ a: "56c68c",
+ r: "56c68d"
+}, {
+ a: "-1ab894376fcf0dab9c",
+ r: "-1ab894376fcf0dab9b"
+}, {
+ a: "-937dcf37c57588e55260c3eea20318",
+ r: "-937dcf37c57588e55260c3eea20317"
+}, {
+ a: "-f8ee63b438580a8915baf84edcfd0688247905e593e153644a88761bab0",
+ r: "-f8ee63b438580a8915baf84edcfd0688247905e593e153644a88761baaf"
+}, {
+ a: "-c63d4353ddf575bf02",
+ r: "-c63d4353ddf575bf01"
+}, {
+ a: "-bef439cc990a8b1d69b80fa7b3c703ba2",
+ r: "-bef439cc990a8b1d69b80fa7b3c703ba1"
+}, {
+ a: "6007edbd20b0ba90d86cf80e871e898c8907bb37",
+ r: "6007edbd20b0ba90d86cf80e871e898c8907bb38"
+}, {
+ a: "-692b36b5e874a448415678ac2b4d0a61c75bdc1674ae14d753cad7d46846",
+ r: "-692b36b5e874a448415678ac2b4d0a61c75bdc1674ae14d753cad7d46845"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var r = ++a;
+ if (d.r !== r.toString(16)) {
+ print("Input: " + a.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/json.js b/deps/v8/test/mjsunit/harmony/bigint/json.js
new file mode 100644
index 0000000000..10afdfce02
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/json.js
@@ -0,0 +1,81 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-bigint --no-opt
+
+'use strict'
+
+
+// Without .toJSON method.
+
+assertEquals(undefined, BigInt.prototype.toJSON);
+assertThrows(() => JSON.stringify(42n), TypeError);
+assertThrows(() => JSON.stringify(Object(42n)), TypeError);
+
+
+// With .toJSON method that returns a string.
+
+BigInt.prototype.toJSON = function() {
+ assertEquals("bigint", typeof this);
+ return String(this);
+}
+assertEquals("\"42\"", JSON.stringify(42n));
+
+BigInt.prototype.toJSON = function() {
+ assertEquals("object", typeof this);
+ return String(this);
+}
+assertEquals("\"42\"", JSON.stringify(Object(42n)));
+
+
+// With .toJSON method that returns a BigInt primitive.
+
+BigInt.prototype.toJSON = function() {return this};
+assertThrows(() => JSON.stringify(42n), TypeError);
+assertThrows(() => JSON.stringify(Object(42n)), TypeError);
+
+
+// With .toJSON method that returns a BigInt object.
+
+BigInt.prototype.toJSON = function() {return Object(this)};
+assertThrows(() => JSON.stringify(42n), TypeError);
+assertThrows(() => JSON.stringify(Object(42n)), TypeError);
+
+
+// Without .toJSON method but with a replacer returning a string.
+
+delete BigInt.prototype.toJSON;
+let replacer;
+
+replacer = function(k, v) {
+ assertEquals("bigint", typeof v);
+ assertTrue(42n == v);
+ return "43";
+}
+assertEquals("\"43\"", JSON.stringify(42n, replacer));
+
+replacer = function(k, v) {
+ assertEquals("object", typeof v);
+ assertTrue(42n == v);
+ return "43";
+}
+assertEquals("\"43\"", JSON.stringify(Object(42n), replacer));
+
+
+// Without .toJSON method but with a replacer returning a BigInt primitive.
+
+assertEquals(undefined, BigInt.prototype.toJSON);
+
+replacer = () => 43n;
+assertThrows(() => JSON.stringify(42n, replacer), TypeError);
+assertThrows(() => JSON.stringify(Object(42n), replacer), TypeError);
+
+
+// Without .toJSON method but with a replacer returning a BigInt object.
+
+assertEquals(undefined, BigInt.prototype.toJSON);
+
+replacer = () => Object(43n);
+assertThrows(() => JSON.stringify(42n, replacer), TypeError);
+assertThrows(() => JSON.stringify(Object(42n), replacer), TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/mod.js b/deps/v8/test/mjsunit/harmony/bigint/mod.js
new file mode 100644
index 0000000000..c310e2d3b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/mod.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "-67c2a5ba4221c048b64ecca6dd979391c44296b8b87ce97584ebab03be18",
+ b: "-bebdbd9b278d05704416fd87d09aece9b69f98843",
+ r: "-8794ba5d9204eb8adfb5e44c11a65fe5af057e6a4"
+}, {
+ a: "-843cfbe576106b8e7c0aabec2cde402e7f76a711dbbf6ae79d6d932e22cf",
+ b: "553d800ffcf69f80026",
+ r: "-272250d344908c21525"
+}, {
+ a: "-b5fc9e8e21fbad798bcd3571ab169dd5e2059d1a9aa4d1bf44fe18da725b",
+ b: "-da5d287414fbb2b4b",
+ r: "-2610eb2277e01a758"
+}, {
+ a: "a9c92d42b53a6fbf0824f5b7bd9ed1cab1f9419d7ec2b50f192abfa6d620d53",
+ b: "9de30592e1fe27d26afe06bbb4781c77ef58418",
+ r: "9a67af5868a619a48883aed4c0d0a876e2ce143"
+}, {
+ a: "ffa271b138163a779c89f17aa720490854520120b1307ef",
+ b: "ab27ef49f3a00085981a5bc1c13530aec35e29c",
+ r: "4c49a63b98d2dd7a33a843f98ba82b54909e337"
+}, {
+ a: "d43f739464bcc8643dfaa807cbbe1157189e33368dd19b800db3682bb0dcb73",
+ b: "bf66b1dc93054920039f9b3eba688d9",
+ r: "bb2f3000959d66b3962d755d141c3ac"
+}, {
+ a: "111ae5ba62ec37fd157ef531195363c5c8ace4427ff58811746af94ab8",
+ b: "58112d0d88f3c8722d1e28942e8949c433c4619b451",
+ r: "283276aa2b6316ca2146b1e6bcc8aa8de4b5446847a"
+}, {
+ a: "-4609a1c7d563bbdb49fd01e05031395e6e06b78407e440",
+ b: "-75ee71f85344163bae2ba0e438",
+ r: "-68b517938971cb261babea1cf8"
+}, {
+ a: "292cd2c5eb3e80942066c5af8bfdaf8d1d091f61f3005d226318",
+ b: "5",
+ r: "3"
+}, {
+ a: "-a9f8c03f06200b54959a2ced325090be25417f0bf1274d",
+ b: "13eb3a0c772fc98e537883a3ef72e1ee37b249ee17c0510",
+ r: "-a9f8c03f06200b54959a2ced325090be25417f0bf1274d"
+}, {
+ a: "c4ddadae9abee150068fe6536c6b2fe229070410da61a09abe1a7270b",
+ b: "5f4d55e3345e37fe8887f3ca5e",
+ r: "42eea65dd545aacdcd250b505b"
+}, {
+ a: "-7a73d61e639dacedd207dfe1edc630b1dfda9078489a7f0cf79dcdfcbf3992efc13861f",
+ b: "-c0f2b9045bb3865d89cc0c9920c3ccfae382c250",
+ r: "-6bdc3d5943d7a35e4ecbc5c8deb335ca2c3bbc0f"
+}, {
+ a: "-a9ac4a2e055f22c8ba7956ffca5457a71412eb74d3a180555bb25ce5096e23d6c619",
+ b: "-f3c",
+ r: "-a5d"
+}, {
+ a: "-b16cfd7fbbf820afc77be1590fd9802ecd12059238b98bb96d9d215af4808",
+ b: "2c288a9de167",
+ r: "-2b3901c1b8d1"
+}, {
+ a: "-df12dd3e56dc3c3dd769ad964f8356a5860177f1b4a3b95acc75",
+ b: "4ec6f5474b18",
+ r: "-17bf0980582d"
+}, {
+ a: "1d197bf4aa09a02760cb004e9edf25e6591ae14d92b6cbf1349ea1c040d66",
+ b: "-23ebacc5f380e5649a1234c3ed050472569cbcd056",
+ r: "20e5e588b4861be3ec7b4005a6a50566e60a3a4364"
+}, {
+ a: "fab19b7e774bf33bb42a7af90d8dc75cbc927e3225003610c05b117c25c90944d",
+ b: "-3b433469282a54d46cac",
+ r: "10eda9e98b721b4a2505"
+}, {
+ a: "5b19514660782d3a2429d7791659868abb9d8fc96077247",
+ b: "-59188be60",
+ r: "2340c3607"
+}, {
+ a: "-2884ed1401b5e976be7dc1faf7bffb632c808649fa75ab458cc66ef4e75",
+ b: "bf",
+ r: "-40"
+}, {
+ a: "-4c9cdf26be1797e54480ab20797e35d04941c11ff78b040c00099422cec",
+ b: "ee20f34835529f4a73",
+ r: "-e24db40426d47f968b"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a % b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: %");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/mul.js b/deps/v8/test/mjsunit/harmony/bigint/mul.js
new file mode 100644
index 0000000000..83a6700768
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/mul.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "-be5e349bf5ad48e7a5d",
+ b: "a47a19a48667620a82973579739e08c64651b3ede2c578ff975e",
+ r: "-7a4f25aa8725368922062fc91a110352a660657ffb3a489126ded10ccd29d929120c926"
+}, {
+ a: "912d5d85aec5ec6caf90514f0ec005f711caac82960c0ee911ef30f145107ae236783e6",
+ b: "0",
+ r: "0"
+}, {
+ a: "-87c17f6930bdef7e7c7ae7dab8be3baa2e7d67",
+ b: "285af6503c42d077a0d59558deb170bad",
+ r: "-1566788db7747e33ddb6540190617b57856e5f3924447f9f400f34a9aa75aa7d20f2b9b"
+}, {
+ a: "-3d0fd72abc5fb82d5cf45679812b71",
+ b: "-3c537c742e17c5fac136e33f5e7edf256ef52b6df",
+ r: "e63a0402966391e13e600371183f223379b66912ee706a2b369668b3e33e9a15992d6f"
+}, {
+ a: "dfd77cf4b0d071df2487c9b77959ee027380a159b0",
+ b: "-56300fb05cba28d5150892ff66e77",
+ r: "-4b5c6a1976d3a318cb9f93e154dc362da4e740fa31d82baefead60c355bfea99eaa50d0"
+}, {
+ a: "-bea52261387bb2aaf8c61b5ee7bbf85b3bbbedfe60773ed9873cbceba078d3fecbb",
+ b: "-2ad7",
+ r: "1fe73453d2eabb331a676ede8eb9759c8c6bffe09c76947578e08b0152379841d867f0d"
+}, {
+ a: "5b5a3a047d8f7e1519d6d92d3241c0f32c4d789",
+ b: "-4bd3e8c09b0ba71bc25416877a4c7135",
+ r: "-1b0f0d6ba20fe60049c4a172e8b1fb9824c1e85e21f1ebe08556b7074d8d4f4ac90185d"
+}, {
+ a: "1b2c8263d4bac8cc825657f81fca6196799aff00fa960d5c04",
+ b: "-6b140fca30c8037b18d88",
+ r: "-b5dbba6fba700592408e2ebbba2d4c6557d61d1201e24314f690e77c0b59c68d8b1620"
+}, {
+ a: "dd74f1f92ab5b9e0b447b7fe9076f7",
+ b: "-9dbb0acb24e6336fe4f37f8e942d1d5c22dfe4f34",
+ r: "-88728628dd4ed79514e720448e4f9ad422ba6ca62cd61bf779cfb3cb12afb5bbe20632c"
+}, {
+ a: "-e97d2712832aa20a",
+ b: "-cb98c0fa9b4b35fc7a4ebed6d3d106bb758c244eb756c75587300ad",
+ r: "b9b1904d502a19d20cc7bd8e05670f667db817bb9104ef8acc747f3df6541eede4d80c2"
+}, {
+ a: "828ca2d8981f347f4bec14ba",
+ b: "-8ce5b6b1c329477a0a728ed81331af1f03e3eaa1ccb2cb3",
+ r: "-47da0e06c179e58b3e5d8abc4a2b274ede3a7d73c0b5fcb3690f0b544ed6c0b7120760e"
+}, {
+ a: "aade382f2483a571e12cb1796bd124e21c6014261cef5d733a1b35fb01db7232c5b",
+ b: "-d34a",
+ r: "-8d068c8b18b90f6aaf82ed3260f68bdb06bf64613cdda736fc33395b7d16688edb9d34e"
+}, {
+ a: "34188043594e64ac10b37923ad29b4b536a4098ce76c9133155820b7843de95af",
+ b: "106952",
+ r: "356f6c0aa4a1a0d27dd2e8867420eddcd9fb5614450d6bb85cba0071e03e4563a0b90e"
+}, {
+ a: "96b2a125fc041fe36aebe7dfd02a4f56b90",
+ b: "-a9f9c57a732002abf0764868f297442f61d1",
+ r: "-640ef4605c38f6643d60526833e7a3b7b71a72b7042434abc0ca600e7f79b3aa98e6090"
+}, {
+ a: "-ae8327a77a340d2c90c93bb0ebe02d7a77d7c2a4e8d21a21d0",
+ b: "268a49068f2769f949d95",
+ r: "-1a45bc5e1b636b28bf716dfeaf0599cd932270796233adf7d58b8d72d06ac580c323e10"
+}, {
+ a: "-a0adcb953540a4f25fc97a40a2c9285bfba7e70e4ffc",
+ b: "fff2302a069e8f69d00e529a81b",
+ r: "-a0a5204f09eeb3ecd9fe2c6de34d0e0ed70c6d1589d95fd83de33d921a3152dfe5bcf94"
+}, {
+ a: "-83771467194c5b612539be7228f366a8",
+ b: "9412ce8f98510486287bc15c35883fff04d126e",
+ r: "-4c0a803e9a079742969e01dbb7990566b2f5ac9658653c967b5d295f6a996ba1655ec30"
+}, {
+ a: "12c3327d3d7ed4b3180cc301f98d351804451be431137fa48aa67627db867a2cd",
+ b: "-ee0af6",
+ r: "-11724697fa94a9caafbee6e7b778ecae17ed7ebba5575296b1fc4995a12fe5c9a8872fe"
+}, {
+ a: "-5",
+ b: "-a00c8e774e3d4a6fc2fa384382720700e49e3e4b882bb5c4c0dbe4cdcd92126731128b",
+ r: "3203ec8548732742ecee319518c3a230477173779a8da8cd7c44b780503da5c03f55cb7"
+}, {
+ a: "a0a4dc9610ada50dfa633ad910a02aa20c85",
+ b: "-4d7aa1dc1cc1d1767b4e25a839e7c177652",
+ r: "-309e8a7c10fbc6b50f6ad012099765a35395b9d51112d50e0a8f3ac076942a9e5a0509a"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a * b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: *");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/neg.js b/deps/v8/test/mjsunit/harmony/bigint/neg.js
new file mode 100644
index 0000000000..75548f62c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/neg.js
@@ -0,0 +1,89 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+// TODO(adamk/jkummerow/neis): Support BigInts in TF unary ops.
+// Flags: --noopt
+
+var data = [{
+ a: "58ad59aa3aa9d04d4c12493966e204ef0500d5f92ecb31",
+ r: "-58ad59aa3aa9d04d4c12493966e204ef0500d5f92ecb31"
+}, {
+ a: "6dbd19e4b781a8f113ae95738dda4b70ba027755052126c198d20ade97869ff",
+ r: "-6dbd19e4b781a8f113ae95738dda4b70ba027755052126c198d20ade97869ff"
+}, {
+ a: "d02befb1c96364a984664f85",
+ r: "-d02befb1c96364a984664f85"
+}, {
+ a: "86",
+ r: "-86"
+}, {
+ a: "0",
+ r: "0"
+}, {
+ a: "-f8da",
+ r: "f8da"
+}, {
+ a: "2b0f358b54a82fbaddc5a6e61a5d",
+ r: "-2b0f358b54a82fbaddc5a6e61a5d"
+}, {
+ a: "-3d32065b9bbb36ee521ff82da",
+ r: "3d32065b9bbb36ee521ff82da"
+}, {
+ a: "ca3da934e8081c457933c90",
+ r: "-ca3da934e8081c457933c90"
+}, {
+ a: "-e4d2bbdf90affad1d2a",
+ r: "e4d2bbdf90affad1d2a"
+}, {
+ a: "-290845e8f55d467e3",
+ r: "290845e8f55d467e3"
+}, {
+ a: "-771c77d2dd2227c30cf44f1bf3230",
+ r: "771c77d2dd2227c30cf44f1bf3230"
+}, {
+ a: "-77aa11",
+ r: "77aa11"
+}, {
+ a: "d220c8af9c97516bf5ec295585f711e020480d08ac11689726a285930",
+ r: "-d220c8af9c97516bf5ec295585f711e020480d08ac11689726a285930"
+}, {
+ a: "53841e699f994e1a43f7f848f34d418792191de15b78e1a852c430b2f8af6e7",
+ r: "-53841e699f994e1a43f7f848f34d418792191de15b78e1a852c430b2f8af6e7"
+}, {
+ a: "7c30187b8901bd748adc4bc243",
+ r: "-7c30187b8901bd748adc4bc243"
+}, {
+ a: "-e07ac5649eb741a023b0f9928d5982032f6766a479c7fbf26",
+ r: "e07ac5649eb741a023b0f9928d5982032f6766a479c7fbf26"
+}, {
+ a: "5ab3237bb32234bcaf8",
+ r: "-5ab3237bb32234bcaf8"
+}, {
+ a: "7df7",
+ r: "-7df7"
+}, {
+ a: "-518b7b",
+ r: "518b7b"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var r = -a;
+ if (d.r !== r.toString(16)) {
+ print("Input: " + a.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/not.js b/deps/v8/test/mjsunit/harmony/bigint/not.js
new file mode 100644
index 0000000000..fe23c8f965
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/not.js
@@ -0,0 +1,89 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+// TODO(adamk/jkummerow/neis): Support BigInts in TF unary ops.
+// Flags: --noopt
+
+var data = [{
+ a: "3d02c87edc77722299f6559ecca038911f864a4e78c20af80f4a6d9",
+ r: "-3d02c87edc77722299f6559ecca038911f864a4e78c20af80f4a6da"
+}, {
+ a: "ac01894aeaf77255ede209897561ec1e3c7e916b9",
+ r: "-ac01894aeaf77255ede209897561ec1e3c7e916ba"
+}, {
+ a: "-7aaab657ab197f26eb6b98fe4c2c79b199a8156129ca04",
+ r: "7aaab657ab197f26eb6b98fe4c2c79b199a8156129ca03"
+}, {
+ a: "9718579cc52befdaff1ec035b5ed03cec5c1d1678c28712cf0c9bec2c807897b74f0",
+ r: "-9718579cc52befdaff1ec035b5ed03cec5c1d1678c28712cf0c9bec2c807897b74f1"
+}, {
+ a: "e614366bc4e67509843254c52e13da5380b00a35aa1d233e70821f7d649ad1957db",
+ r: "-e614366bc4e67509843254c52e13da5380b00a35aa1d233e70821f7d649ad1957dc"
+}, {
+ a: "fb815f78e6952b500226c",
+ r: "-fb815f78e6952b500226d"
+}, {
+ a: "94404df802649cff2ea6c0996f55ec60c14f00ab29b287092389951f6227c4ec7",
+ r: "-94404df802649cff2ea6c0996f55ec60c14f00ab29b287092389951f6227c4ec8"
+}, {
+ a: "-74b42cd7bccd",
+ r: "74b42cd7bccc"
+}, {
+ a: "da",
+ r: "-db"
+}, {
+ a: "3a9ade198",
+ r: "-3a9ade199"
+}, {
+ a: "56e766d24fd18c2241f244dedc426c0b1ae59e7ed4f06def0a75e0a5c8651e2ce87928",
+ r: "-56e766d24fd18c2241f244dedc426c0b1ae59e7ed4f06def0a75e0a5c8651e2ce87929"
+}, {
+ a: "cc430c91347b22ecb1a6f1a2ceea168ffa4a9b80065bd1ec5d",
+ r: "-cc430c91347b22ecb1a6f1a2ceea168ffa4a9b80065bd1ec5e"
+}, {
+ a: "32e4b7f82d8c037d0f562296e21b1e58a",
+ r: "-32e4b7f82d8c037d0f562296e21b1e58b"
+}, {
+ a: "-526d3f1a904561f0cde1f0a2a4",
+ r: "526d3f1a904561f0cde1f0a2a3"
+}, {
+ a: "3de5a9635a40539831c9665577e5eedbf680755e2065a0caa346759e17225",
+ r: "-3de5a9635a40539831c9665577e5eedbf680755e2065a0caa346759e17226"
+}, {
+ a: "-d912828b8d6419900",
+ r: "d912828b8d64198ff"
+}, {
+ a: "-17968ddf93",
+ r: "17968ddf92"
+}, {
+ a: "-c2bfd766e34923d549bbaedb4d9b7bb35a61908e6144462a",
+ r: "c2bfd766e34923d549bbaedb4d9b7bb35a61908e61444629"
+}, {
+ a: "af426ec83aaafc84a94930e51a2899696a3d",
+ r: "-af426ec83aaafc84a94930e51a2899696a3e"
+}, {
+ a: "-283de5b9379a45f065d3b8662ac38faa6492bc0eea6b7e3b51591a5cc27669e",
+ r: "283de5b9379a45f065d3b8662ac38faa6492bc0eea6b7e3b51591a5cc27669d"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var r = ~a;
+ if (d.r !== r.toString(16)) {
+ print("Input: " + a.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/or.js b/deps/v8/test/mjsunit/harmony/bigint/or.js
new file mode 100644
index 0000000000..4ee32657d4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/or.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "a66",
+ b: "d0671cd6e4ebd7baf6e48b2529348cfa89fc9513ba30ef3f99aee07f267df163cf8",
+ r: "d0671cd6e4ebd7baf6e48b2529348cfa89fc9513ba30ef3f99aee07f267df163efe"
+}, {
+ a: "a9950e5fc429f0f93d5fa8f306f4e5da88a8c9f9",
+ b: "d1fc1ac3db7ff5547462800923e616727120f74f0a6cb7bf1886dd4f4ac",
+ r: "d1fc1ac3db7ff554746a9959e7fe56ff7fb3f7ff8f7cffff5daedfcfdfd"
+}, {
+ a: "5e277a64b6515ad69ed8935ae8dcdb6dc66f98fcbb462b10bea0db15ad6010d",
+ b: "7df3",
+ r: "5e277a64b6515ad69ed8935ae8dcdb6dc66f98fcbb462b10bea0db15ad67dff"
+}, {
+ a: "3b8368196588e684403965902763d66aa",
+ b: "-edf58c5ab418f49cf9fdb7f3b1c416a03c1dfbe90ba7ea6373c",
+ r: "-edf58c5ab418f49cf9c43493a0801600381dc2880b808821112"
+}, {
+ a: "-5587f5e86137f8ea4d7259acdd0b77a26ea069385501c9985df6a5fcd3c",
+ b: "9878871628ea5cb66",
+ r: "-5587f5e86137f8ea4d7259acdd0b77a26ea069385501811849d605a041a"
+}, {
+ a: "-dc65679b1ea7c86c10890e6d6be3bd069b4c7",
+ b: "83ea9",
+ r: "-dc65679b1ea7c86c10890e6d6be3bd0618047"
+}, {
+ a: "-755f422bfb614b7ed2c8e05cd1e0e0a",
+ b: "-d3185fac5454a495d7b149e67df4436339e060d924d",
+ r: "-24154221496049744240204040c0209"
+}, {
+ a: "-1dfdf84b41ddd069053",
+ b: "f9b2bc80b580311773e9a5d57e8f24ace46bd2a0fce24404db684efa8692638b5d604e6",
+ r: "-1105784900548009011"
+}, {
+ a: "18d1b78380aa9016029417c2ebe77a",
+ b: "-b63b35e6711dcbf00dc02cd936",
+ r: "-3835446109c9600800041806"
+}, {
+ a: "-9981f",
+ b: "-5d876576146a2d5dc8d52d26ea3304287af0922685f8e1a46875e80f24a470",
+ r: "-800f"
+}, {
+ a: "-20f8052991bc5a8f2631c9e4b34aa9073a69913185a539d719",
+ b: "a59fdaa025b42",
+ r: "-20f8052991bc5a8f2631c9e4b34aa9073a6991200005398419"
+}, {
+ a: "-d02620570",
+ b: "-3b14d75fb9d9b95d13f884a82c9f16",
+ r: "-400200506"
+}, {
+ a: "-8",
+ b: "-4",
+ r: "-4"
+}, {
+ a: "e0e8ab319d1f051560e1155ae5789dd4d9b638e07e5a57c3432e6cb9239d",
+ b: "85c9cd1f09436dc45ac783f31a21a1ff4e11ceca00cc164",
+ r: "e0e8ab319d1f0d5dfcf1f5def7fcddfcf9bf39e27e5ff7e35feeecbde3fd"
+}, {
+ a: "8131173cb5597e2ae560cae6d0907f004792b1b1c7",
+ b: "-2ac290724a7c86",
+ r: "-c290604a4c01"
+}, {
+ a: "bdb24fd4d78b01d77e41d95f2c3eedeb2bf919",
+ b: "-97f6ccbd94d64ada501e0f288568",
+ r: "-14f60881940600d2401204000467"
+}, {
+ a: "-d5ad86f9a4808394f6e7dcd2a67bd3e6a9",
+ b: "1a80309934942e4e55fbf2ba4d1d2f8cc4697338097e2c1b7ce013da8a79965974286",
+ r: "-408c066804000010f6c25450261a40a429"
+}, {
+ a: "c7a0086120a1613a28456",
+ b: "b8",
+ r: "c7a0086120a1613a284fe"
+}, {
+ a: "c8480c",
+ b: "0",
+ r: "c8480c"
+}, {
+ a: "ec8913fc89aa7a47672bc0f5269e8629cabf2dba88836cb3a9",
+ b: "-52594e7",
+ r: "-4010447"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a | b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: |");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regressions.js b/deps/v8/test/mjsunit/harmony/bigint/regressions.js
new file mode 100644
index 0000000000..45c8816fe7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/regressions.js
@@ -0,0 +1,18 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-bigint --noopt
+
+var a = 5n;
+var b = a / -1n;
+assertEquals(5n, a);
+assertEquals(-5n, b);
+assertEquals(5n, 5n / 1n);
+assertEquals(5n, -5n / -1n);
+assertEquals(-5n, -5n / 1n);
+
+assertEquals(0n, 5n % 1n);
+assertEquals(0n, -5n % 1n);
+assertEquals(0n, 5n % -1n);
+assertEquals(0n, -5n % -1n);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/sar.js b/deps/v8/test/mjsunit/harmony/bigint/sar.js
new file mode 100644
index 0000000000..7feb8aebcd
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/sar.js
@@ -0,0 +1,113 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "-4efa0d1f8a127",
+ b: "-66",
+ r: "-13be8347e2849c0000000000000000000000000"
+}, {
+ a: "-100000001",
+ b: "20",
+ r: "-2"
+}, {
+ a: "853cd87b0bd5c046aecbf4b3d",
+ b: "-96",
+ r: "214f361ec2f57011abb2fd2cf40000000000000000000000000000000000000"
+}, {
+ a: "-4bc82dba903fedec0a079f7ae4fa6bd6befa",
+ b: "0",
+ r: "-4bc82dba903fedec0a079f7ae4fa6bd6befa"
+}, {
+ a: "43969b4db0d921d9f0ca68f74e4e4b9073732a7955a5b4571",
+ b: "-91",
+ r: "872d369b61b243b3e194d1ee9c9c9720e6e654f2ab4b68ae2000000000000000000000000000000000000"
+}, {
+ a: "495f57",
+ b: "-a5",
+ r: "92beae00000000000000000000000000000000000000000"
+}, {
+ a: "-22109b99d3025aaef5c3fbd27420a72",
+ b: "45d",
+ r: "-1"
+}, {
+ a: "b3f6b156f4afcf259efd3cd1",
+ b: "c7",
+ r: "0"
+}, {
+ a: "137aeeadc8d1395042e80393cc1b6a1c7b6e526ab1b6fc2f2859fd70e0c29df2802",
+ b: "f49",
+ r: "0"
+}, {
+ a: "70f51026476e43bd7e911d37a4553701",
+ b: "33",
+ r: "e1ea204c8edc877afd2"
+}, {
+ a: "-3f935a89481c85d666498cf64fdc2a57028f7b295621dc665c0442229563",
+ b: "-2",
+ r: "-fe4d6a2520721759992633d93f70a95c0a3deca5588771997011088a558c"
+}, {
+ a: "-c3",
+ b: "-87",
+ r: "-618000000000000000000000000000000000"
+}, {
+ a: "aae225520f630c0dfbb815f121836612d75a1f65a301461cd05ad0a741496",
+ b: "-4",
+ r: "aae225520f630c0dfbb815f121836612d75a1f65a301461cd05ad0a7414960"
+}, {
+ a: "a5348f9af939041cc6ed386c060619a42f30c4aa8",
+ b: "95",
+ r: "529a"
+}, {
+ a: "-4c27fc7e3892a6a5b517",
+ b: "-6c",
+ r: "-4c27fc7e3892a6a5b517000000000000000000000000000"
+}, {
+ a: "98efd35f2239f7efde9aef42ad0acd835e68ad868a2cd8fac260f1c7496e3fd2ada76",
+ b: "0",
+ r: "98efd35f2239f7efde9aef42ad0acd835e68ad868a2cd8fac260f1c7496e3fd2ada76"
+}, {
+ a: "-92f0264c863bdf66d4c83e8bf812123d759b4",
+ b: "-96",
+ r: "-24bc0993218ef7d9b5320fa2fe04848f5d66d00000000000000000000000000000000000000"
+}, {
+ a: "ec6341ff2b0e9cf8721e2eb4ec9c9",
+ b: "74",
+ r: "0"
+}, {
+ a: "-32de8dced947fa55cd0b91332a81f70",
+ b: "-5b",
+ r: "-196f46e76ca3fd2ae685c8999540fb800000000000000000000000"
+}, {
+ a: "-3ef43bf8610f6533526ba734e85eafa04cd50a3",
+ b: "-25",
+ r: "-7de877f0c21eca66a4d74e69d0bd5f4099aa146000000000"
+}, {
+ a: "-9979591a367a32ae0039c54fd0f3d9e0ccc80be52b7e517fc94d9f3587dc54d933bb",
+ b: "0",
+ r: "-9979591a367a32ae0039c54fd0f3d9e0ccc80be52b7e517fc94d9f3587dc54d933bb"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a >> b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: >>");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/shl.js b/deps/v8/test/mjsunit/harmony/bigint/shl.js
new file mode 100644
index 0000000000..1b0f309f88
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/shl.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "-9a6d035348727045f6abf7d59056d30e9ce885e87f5f8438347bfcda0a1f9b",
+ b: "-2",
+ r: "-269b40d4d21c9c117daafdf56415b4c3a73a217a1fd7e10e0d1eff368287e7"
+}, {
+ a: "615f9676062ea7a1b89396ce4208712f279475490829",
+ b: "ff",
+ r: "30afcb3b031753d0dc49cb672104389793ca3aa484148000000000000000000000000000000000000000000000000000000000000000"
+}, {
+ a: "-9b6131d8b806543fce32b4c2ca2038ffa956929848a61b5eb7f",
+ b: "-e7",
+ r: "-1"
+}, {
+ a: "-331d9e",
+ b: "0",
+ r: "-331d9e"
+}, {
+ a: "cb79696d3a6f5d5d034e9d2",
+ b: "-d33",
+ r: "0"
+}, {
+ a: "ca99",
+ b: "10",
+ r: "ca990000"
+}, {
+ a: "6f97833d5",
+ b: "0",
+ r: "6f97833d5"
+}, {
+ a: "67d36e7948d18af35f0823c0d58ba47ca0846cdfaa7a7407f09d44747275532681b343",
+ b: "f",
+ r: "33e9b73ca468c579af8411e06ac5d23e5042366fd53d3a03f84ea23a393aa99340d9a18000"
+}, {
+ a: "f4896",
+ b: "-7",
+ r: "1e91"
+}, {
+ a: "996ce2a9e0f7d65e0523204c9c469bfd14821efe571ac59cdc01",
+ b: "1d",
+ r: "132d9c553c1efacbc0a464099388d37fa29043dfcae358b39b8020000000"
+}, {
+ a: "-f8f",
+ b: "f1",
+ r: "-1f1e000000000000000000000000000000000000000000000000000000000000"
+}, {
+ a: "-b685bbcd953ba9c5973ae523dc81d7b35e0cf2b9b51026d4ba1ac21bd5c3c18f9c13",
+ b: "0",
+ r: "-b685bbcd953ba9c5973ae523dc81d7b35e0cf2b9b51026d4ba1ac21bd5c3c18f9c13"
+}, {
+ a: "e2295b362b7048fb163d1272178ed441517fc689e5ec5ea40f29",
+ b: "-30",
+ r: "e2295b362b7048fb163d1272178ed441517fc689"
+}, {
+ a: "-b322e816b014448f44e60b418582390d2a3ad95",
+ b: "0",
+ r: "-b322e816b014448f44e60b418582390d2a3ad95"
+}, {
+ a: "4c135e4d7",
+ b: "0",
+ r: "4c135e4d7"
+}, {
+ a: "-d5b694",
+ b: "f1",
+ r: "-1ab6d28000000000000000000000000000000000000000000000000000000000000"
+}, {
+ a: "-7994be7",
+ b: "-d",
+ r: "-3ccb"
+}, {
+ a: "a6443add555ea15af90092e8",
+ b: "42",
+ r: "29910eb75557a856be4024ba00000000000000000"
+}, {
+ a: "9385ed",
+ b: "e5",
+ r: "1270bda000000000000000000000000000000000000000000000000000000000"
+}, {
+ a: "-531",
+ b: "7d",
+ r: "-a620000000000000000000000000000000"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a << b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: <<");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/sub.js b/deps/v8/test/mjsunit/harmony/bigint/sub.js
new file mode 100644
index 0000000000..f0af2ca930
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/sub.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "e5e5071838eb1314",
+ b: "3b7f55dce703a25ea14fdea6186156f775dec5d29f6edb3a014",
+ r: "-3b7f55dce703a25ea14fdea6186156f775d067822deb4c88d00"
+}, {
+ a: "-f",
+ b: "22d6805c7201a8ad4b9e6c2a7e8b5ab3bac",
+ r: "-22d6805c7201a8ad4b9e6c2a7e8b5ab3bbb"
+}, {
+ a: "-22",
+ b: "-11a0adfaedd5adb92297af1c3794ef5461dd8bc146db3",
+ r: "11a0adfaedd5adb92297af1c3794ef5461dd8bc146d91"
+}, {
+ a: "-d20c39d0",
+ b: "-46faa9d3eabcbd8b6d07adc2d0ff289d2",
+ r: "46faa9d3eabcbd8b6d07adc2c3de65002"
+}, {
+ a: "-e5b56109a11",
+ b: "211e1dcdf52f020ab0f16e18cc4e46027d05bfa3155b88973e630ae9a75bf2c7fbad269",
+ r: "-211e1dcdf52f020ab0f16e18cc4e46027d05bfa3155b88973e630ae9a75cd87d5cb6c7a"
+}, {
+ a: "-b682aa",
+ b: "-5fa59a6a80d39c0c885c030e9c8c84ec7",
+ r: "5fa59a6a80d39c0c885c030e9c811cc1d"
+}, {
+ a: "-c1325b8ab9fea966f093bbfbc2e611b0e5bf0b13ce047c7133056d4eea",
+ b: "f97d5c4014c5cc87923c344a",
+ r: "-c1325b8ab9fea966f093bbfbc2e611b0e6b888700e19423dba97a98334"
+}, {
+ a: "-1872900ab729911a3c021db53672eda07a9ad623",
+ b: "152d13997090c43551edfc89d4c7ea5e9ffee4a114085858892e67e82edea6384aaaba7",
+ r: "-152d13997090c43551edfc89d4c7ea602727e54c86a169fc4950433b960d803ff4581ca"
+}, {
+ a: "5440388fc10de9",
+ b: "-4b",
+ r: "5440388fc10e34"
+}, {
+ a: "-198dc54795a81722f70acc9cc20505492172c7819ba168e57d",
+ b: "-48f3b40bf850f771d44e423eb266846801d9e4e920c",
+ r: "-198dc543066cd66371fb557f7d20e15dfb0a81017e031a5371"
+}, {
+ a: "c78082429b3163ce243c778",
+ b: "-97afe29",
+ r: "c78082429b3163cebbec5a1"
+}, {
+ a: "-50df",
+ b: "-d5352ec9c1b0e62b97ea1363ce8b72",
+ r: "d5352ec9c1b0e62b97ea1363ce3a93"
+}, {
+ a: "-5c9777f93d64636ff8bcda39125625aa58a49e9a4f29ece2b7afa5d",
+ b: "894bb7aa90b8687e6290c3218a4258dac9400d556caafe02cf8c312c053f2fc73",
+ r: "-894bb7aa9114fff65bce2784fa3b15b50252637b1703a2a169db5b18e7f6df6d0"
+}, {
+ a: "-e15c51f0627e460c477",
+ b: "-dfd13bac43ebe2f8e77f5b31314843",
+ r: "dfd13bac43ddcd33c879334cd083cc"
+}, {
+ a: "0",
+ b: "adbd3e4b06b92771ae25eb52fca5fc86391303ebf7962",
+ r: "-adbd3e4b06b92771ae25eb52fca5fc86391303ebf7962"
+}, {
+ a: "960a8aa627a1c48721f4e0",
+ b: "-9e8742ae61615481cdd12f0728f0b61",
+ r: "9e8742ae6ac1fd2c304b4b4f9b10041"
+}, {
+ a: "-abf5cf9ff3c15b0645009",
+ b: "-e805773176aaa712d144e172db033c64aeaddf3380b2",
+ r: "e805773176aaa712d144e1681ba6426572982ecf30a9"
+}, {
+ a: "349ebb89b13a7149ec1a4e067574c3825f90ec4e4413948b808c6a",
+ b: "-44cdc0e3efa11513525f68163033a59d7b0610070",
+ r: "349ebb89b13a7596c8288d0086c5f8a856124f517e6d6c3be18cda"
+}, {
+ a: "a86c53e8c49b20cff057882c4345c40f5c34a8cb8",
+ b: "-76453703c781511b52e663",
+ r: "a86c53e8c49b20cff05eec7fb3823c246de9d731b"
+}, {
+ a: "-2647d3c",
+ b: "776e5b3a57bd5196be1b9c99ae899d949cb2b94310c53be8910db71b",
+ r: "-776e5b3a57bd5196be1b9c99ae899d949cb2b94310c53be893723457"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a - b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: -");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint/tonumber.js b/deps/v8/test/mjsunit/harmony/bigint/tonumber.js
new file mode 100644
index 0000000000..0061d91d67
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/tonumber.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-bigint --no-opt
+
+function Check(bigint, number_string) {
+ var number = Number(number_string);
+ if (number_string.substring(0, 2) === "0x") {
+ assertEquals(number_string.substring(2), number.toString(16));
+ } else {
+ assertEquals(number_string, number.toString());
+ }
+}
+
+// Values in Smi range.
+Check(0n, "0");
+Check(1n, "1");
+Check(-1n, "-1");
+
+// Values in double range.
+Check(12345678912n, "12345678912");
+Check(-12345678912345n, "-12345678912345");
+Check(0xfffffffffffffn, "0xfffffffffffff"); // 52 bits.
+Check(0x1fffffffffffffn, "0x1fffffffffffff"); // 53 bits.
+Check(0x3fffffffffffffn, "0x40000000000000"); // 54 bits, rounding up.
+Check(0x3ffffffffffffen, "0x3ffffffffffffe"); // 54 bits, rounding down.
+Check(0x7ffffffffffffdn, "0x7ffffffffffffc"); // 55 bits, rounding down.
+Check(0x7ffffffffffffen, "0x80000000000000"); // 55 bits, tie to even.
+Check(0x7fffffffffffffn, "0x80000000000000"); // 55 bits, rounding up.
+Check(0x1ffff0000ffff0000n, "0x1ffff0000ffff0000"); // 65 bits.
+
+// Values near infinity.
+Check(1n << 1024n, "Infinity");
+Check(-1n << 1024n, "-Infinity");
+Check(1n << 1023n, "8.98846567431158e+307");
+Check((1n << 1024n) - (1n << 972n), "1.7976931348623155e+308");
+Check((1n << 1024n) - (1n << 971n), "1.7976931348623157e+308");
+Check((1n << 1024n) - (1n << 970n), "Infinity"); // Rounding up overflows.
diff --git a/deps/v8/test/mjsunit/harmony/bigint/too-big-literal.js b/deps/v8/test/mjsunit/harmony/bigint/too-big-literal.js
new file mode 100644
index 0000000000..242700191a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/too-big-literal.js
@@ -0,0 +1,14 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-bigint --no-opt
+
+const MAX_BIGINT_BITS = 1024 * 1024; // Matches BigInt::kMaxLengthBits
+const MAX_BIGINT_CHARS = MAX_BIGINT_BITS / 4;
+
+const TOO_MANY_ONES = Array(MAX_BIGINT_CHARS + 2).join("1") + "n";
+
+const tooBigHex = "0x" + TOO_MANY_ONES;
+
+assertThrows(tooBigHex, SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/xor.js b/deps/v8/test/mjsunit/harmony/bigint/xor.js
new file mode 100644
index 0000000000..d8c9012971
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/xor.js
@@ -0,0 +1,109 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by tools/bigint-tester.py.
+
+// Flags: --harmony-bigint
+
+var data = [{
+ a: "abde23cae3113c95ec7f444c7277658",
+ b: "-65e40fb1",
+ r: "-abde23cae3113c95ec7f444a2c379e9"
+}, {
+ a: "2d0bbdc05059c78b7e9f43689b2f7a9afaefd679212c2a9b990",
+ b: "29fcdb109b54650f9762b494916bc1cf14853430697febe7acf4327983ce0c6c4c183",
+ r: "29fcdb109b54650f974fbf29513b98089ffbab7301e4c49d360eddaffaef2046d7813"
+}, {
+ a: "b958dc77068d01811e031d6320df5e53823697be94f7654340b",
+ b: "-c1f5ca609a658e24fc33fec10a84b18fb745cb7c6",
+ r: "-b958dc77064cf44b7e9978ed04236dad433c130f1b4020883cf"
+}, {
+ a: "cf7319e3fe16912370c830906f88b",
+ b: "98d972f3c",
+ r: "cf7319e3fe16912370c8a8491d7b7"
+}, {
+ a: "aea6d9e7cec74bca19",
+ b: "5abbcd0c5aa1f96fef9db32b3618de782db64b8f6b4",
+ r: "5abbcd0c5aa1f96fef9db32b3cf2b3e6515a3f33cad"
+}, {
+ a: "-b522a022e90fa094f3b729a7a0a914349f5e1fd778829d7576ad36711",
+ b: "-aa00d2fd6a7636",
+ r: "b522a022e90fa094f3b729a7a0a914349f5e1fd778883d78597b91125"
+}, {
+ a: "9c2bc822ec4a590eb8a77ee630009713090",
+ b: "30b13459c1434",
+ r: "9c2bc822ec4a590eb8a77ed68134ced24a4"
+}, {
+ a: "-f14873e1f6121d584d5541073c7ce162873e156b72fb3c943ffd5f212c0d6",
+ b: "f449f0a292048546924d2973626f5441c045d4adbfd00d301791f0db965f",
+ r: "-fe0cecebdf32550c247193900a5a14269b3a4821a9063c473e84402c9568b"
+}, {
+ a: "83d5552fba4213d8dd1ed9bc6c2",
+ b: "4f7ccc10ba9b6880b862f8d5e1c9",
+ r: "47419942413f49bd35b3154e270b"
+}, {
+ a: "9fdb44741177921c8338b70fc7fa362295bfc92f6275fa16492",
+ b: "93676e5ef972",
+ r: "9fdb44741177921c8338b70fc7fa362295bfc92654031ff9de0"
+}, {
+ a: "4355637ed",
+ b: "-7aeb3013cc5eb39d56eed8104407a3e68039944f7673a0c75bd3",
+ r: "-7aeb3013cc5eb39d56eed8104407a3e68039944f767795916c40"
+}, {
+ a: "7fdf50188f716c13feced67a1c33ecf422",
+ b: "-7106cd7b9",
+ r: "-7fdf50188f716c13feced67a1b2380239b"
+}, {
+ a: "368cf8d0f5790a03774b9a1e116f82281ebd9e18de7f54a7d91f50",
+ b: "8bc4e4f24ce2a7d037079552e6c7db2795f15c92a01f4e0d9",
+ r: "368cf06cbb362ecd5d36996e683aac44630fe747cbb67ea62dff89"
+}, {
+ a: "-7466a596078a20cc4eca96953e3",
+ b: "-666328e5437b1475dcfe2f44f1c6a82af82ce7ee7cf229c8398836d2d834f9014",
+ r: "666328e5437b1475dcfe2f44f1c6a82af82ce79a1a57bfcfb3a8fa9c12a26c3f1"
+}, {
+ a: "ad284b70a22d96bdefba53f134c65a1e4958013bb9a31f091fde6fc89",
+ b: "-c89374df2",
+ r: "-ad284b70a22d96bdefba53f134c65a1e4958013bb9a31f09d74d1b179"
+}, {
+ a: "-47df52354db5",
+ b: "-aa7f61aba9ad859e803e964418af30",
+ r: "aa7f61aba9ad859e807949162de29b"
+}, {
+ a: "-f03ea80f22a3dc03f036b13f85faf5fb1",
+ b: "86e9110772d369fdd52b45a8fb22cea26cb73e908408f8a3cdf637f0042c8efdc11",
+ r: "-86e9110772d369fdd52b45a8fb22cea26c4700388b2a5b7fce0601413ba974083a2"
+}, {
+ a: "3603d29c8",
+ b: "f4849ec3ec3c352b",
+ r: "f4849ec08c011ce3"
+}, {
+ a: "e6668ed8eae8b4bb7bdf522d44e9f1bcf66",
+ b: "361cab4f5be1",
+ r: "e6668ed8eae8b4bb7bdf522e25234549487"
+}, {
+ a: "-d0395d",
+ b: "-4a8ee89d006d22a124070241caf5f4343bdfd30d12",
+ r: "4a8ee89d006d22a124070241caf5f4343bdf03344d"
+}];
+
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+ var a = BigInt.parseInt(d.a, 16);
+ var b = BigInt.parseInt(d.b, 16);
+ var r = a ^ b;
+ if (d.r !== r.toString(16)) {
+ print("Input A: " + a.toString(16));
+ print("Input B: " + b.toString(16));
+ print("Result: " + r.toString(16));
+ print("Expected: " + d.r);
+ print("Op: ^");
+ error_count++;
+ }
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js b/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js
index a218a8ae6f..3c96031a8f 100644
--- a/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js
+++ b/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js
@@ -94,3 +94,14 @@
(function TestTruncation() {
assertEquals("ab", "a".padEnd(2, "bc"));
})();
+
+
+(function TestMaxLength() {
+ assertThrows(() => "123".padEnd(Math.pow(2, 40)), RangeError);
+ assertThrows(() => "123".padEnd(1 << 30), RangeError);
+})();
+
+
+(function TestNoArguments() {
+ assertEquals("abc", "abc".padEnd());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js b/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js
index 5e1c36b949..d27ad5418f 100644
--- a/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js
+++ b/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js
@@ -94,3 +94,14 @@
(function TestTruncation() {
assertEquals("ba", "a".padStart(2, "bc"));
})();
+
+
+(function TestMaxLength() {
+ assertThrows(() => "123".padStart(Math.pow(2, 40)), RangeError);
+ assertThrows(() => "123".padStart(1 << 30), RangeError);
+})();
+
+
+(function TestNoArguments() {
+ assertEquals("abc", "abc".padStart());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-meta.js b/deps/v8/test/mjsunit/harmony/modules-import-meta.js
new file mode 100644
index 0000000000..5ea8a686f2
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-meta.js
@@ -0,0 +1,44 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+// Flags: --harmony-import-meta
+
+import foreign, { url as otherUrl } from './modules-skip-export-import-meta.js';
+
+assertEquals("object", typeof import.meta);
+assertEquals(null, Object.getPrototypeOf(import.meta));
+assertSame(import.meta, import.meta);
+
+const loadImportMetaArrow = () => import.meta;
+assertSame(loadImportMetaArrow(), import.meta);
+function loadImportMetaFn() {
+ try {
+ throw new Error('force catch code path for nested context');
+ } catch (e) {
+ return import.meta;
+ }
+}
+loadImportMetaFn();
+assertSame(loadImportMetaFn(), import.meta);
+
+// This property isn't part of the spec itself but is mentioned as an example
+assertMatches(/\/modules-import-meta\.js$/, import.meta.url);
+
+import.meta.x = 42;
+assertEquals(42, import.meta.x);
+Object.assign(import.meta, { foo: "bar" })
+assertEquals("bar", import.meta.foo);
+
+// PerformEval parses its argument for the goal symbol Script. So the following
+// should fail just as it does for every other Script context.
+//
+// See:
+// https://github.com/tc39/proposal-import-meta/issues/7#issuecomment-329363083
+assertThrows(() => eval('import.meta'), SyntaxError);
+assertThrows(() => new Function('return import.meta;'), SyntaxError);
+
+assertNotEquals(foreign, import.meta);
+assertMatches(/\/modules-skip-export-import-meta\.js$/, foreign.url);
+assertEquals(foreign.url, otherUrl);
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.js b/deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.js
new file mode 100644
index 0000000000..e58e5018af
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default import.meta;
+const { url } = import.meta;
+export { url };
diff --git a/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js b/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
new file mode 100644
index 0000000000..acf0f13a99
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
@@ -0,0 +1,676 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-public-fields
+"use strict";
+
+{
+ class C {
+ a;
+ }
+
+ assertEquals(undefined, C.a);
+
+ let c = new C;
+ let descriptor = Object.getOwnPropertyDescriptor(c, 'a');
+ assertTrue(c.hasOwnProperty('a'));
+ assertTrue(descriptor.writable);
+ assertTrue(descriptor.enumerable);
+ assertTrue(descriptor.configurable);
+ assertEquals(undefined, c.a);
+}
+
+{
+ class C {
+ x = 1;
+ constructor() {}
+ }
+
+ let c = new C;
+ assertEquals(1, c.x);
+}
+
+{
+ function t() {
+ class X {
+ x = 1;
+ constructor() {}
+ }
+
+ var x = new X;
+ return x.x;
+ }
+
+ assertEquals(1, t());
+}
+
+{
+ let x = 'a';
+ class C {
+ a;
+ b = x;
+ c = 1;
+ hasOwnProperty() { return 1;}
+ static [x] = 2;
+ static b = 3;
+ static d;
+ }
+
+ assertEquals(2, C.a);
+ assertEquals(3, C.b);
+ assertEquals(undefined, C.d);
+ assertEquals(undefined, C.c);
+
+ let c = new C;
+ assertEquals(undefined, c.a);
+ assertEquals('a', c.b);
+ assertEquals(1, c.c);
+ assertEquals(undefined, c.d);
+ assertEquals(1, c.hasOwnProperty());
+}
+
+{
+ class C {
+ x = Object.freeze(this);
+ c = 42;
+ }
+ assertThrows(() => { new C; }, TypeError);
+}
+
+{
+ class C {
+ c = this;
+ d = () => this;
+ }
+
+ let c = new C;
+ assertEquals(c, c.c);
+ assertEquals(c, c.d());
+
+ assertEquals(undefined, C.c);
+ assertEquals(undefined, C.d);
+}
+
+{
+ class C {
+ c = 1;
+ d = this.c;
+ }
+
+ let c = new C;
+ assertEquals(1, c.c);
+ assertEquals(1, c.d);
+
+ assertEquals(undefined, C.c);
+ assertEquals(undefined, C.d);
+}
+
+{
+ class C {
+ b = 1;
+ c = () => this.b;
+ }
+
+ let c = new C;
+ assertEquals(1, c.b);
+ assertEquals(1, c.c());
+
+ assertEquals(undefined, C.c);
+ assertEquals(undefined, C.b);
+}
+
+{
+ let x = 'a';
+ class C {
+ b = 1;
+ c = () => this.b;
+ e = () => x;
+ }
+
+ let c = new C;
+ assertEquals(1, c.b);
+ assertEquals('a', c.e());
+
+ let a = {b : 2 };
+ assertEquals(1, c.c.call(a));
+
+ assertEquals(undefined, C.b);
+ assertEquals(undefined, C.c);
+}
+
+{
+ let x = 'a';
+ class C {
+ c = 1;
+ d = function() { return this.c; };
+ e = function() { return x; };
+ }
+
+ let c = new C;
+ assertEquals(1, c.c);
+ assertEquals(1, c.d());
+ assertEquals('a', c.e());
+
+ c.c = 2;
+ assertEquals(2, c.d());
+
+ let a = {c : 3 };
+ assertEquals(3, c.d.call(a));
+
+ assertThrows(c.d.bind(undefined));
+
+ assertEquals(undefined, C.c);
+ assertEquals(undefined, C.d);
+ assertEquals(undefined, C.e);
+}
+
+{
+ class C {
+ c = function() { return 1 };
+ }
+
+ let c = new C;
+ assertEquals('c', c.c.name);
+}
+
+{
+ let d = function() { return new.target; }
+ class C {
+ c = d;
+ }
+
+ let c = new C;
+ assertEquals(undefined, c.c());
+ assertEquals(new d, new c.c());
+}
+
+{
+ class C {
+ c = () => new.target;
+ }
+
+ let c = new C;
+ assertEquals(undefined, c.c());
+}
+
+{
+ let run = false;
+ class C {
+ c = () => {
+ let b;
+ class A {
+ constructor() {
+ b = new.target;
+ }
+ };
+ new A;
+ run = true;
+ assertEquals(A, b);
+ }
+ }
+
+ let c = new C;
+ c.c();
+ assertTrue(run);
+}
+
+{
+ class C {
+ c = new.target;
+ }
+
+ let c = new C;
+ assertEquals(undefined, c.c);
+}
+
+{
+ class B {
+ c = 1;
+ }
+
+ class C extends B {}
+
+ let c = new C;
+ assertEquals(1, c.c);
+}
+
+{
+ assertThrows(() => {
+ class C {
+ c = new C;
+ }
+ let c = new C;
+ });
+}
+
+(function test() {
+ function makeC() {
+ var x = 1;
+
+ return class {
+ a = () => () => x;
+ }
+ }
+
+ let C = makeC();
+ let c = new C;
+ let f = c.a();
+ assertEquals(1, f());
+})()
+
+{
+ let c1 = "c";
+ class C {
+ ["a"] = 1;
+ ["b"];
+ [c1];
+ }
+
+ let c = new C;
+ assertEquals(1, c.a);
+ assertEquals(undefined, c.b);
+ assertEquals(undefined, c.c1);
+}
+
+{
+ let log = [];
+ function run(i) {
+ log.push(i);
+ return i;
+ }
+
+ class C {
+ [run(1)] = run(7);
+ [run(2)] = run(8);
+ [run(3)]() { run(9);}
+ static [run(4)] = run(6);
+ [run(5)]() { throw new Error('should not execute');};
+ }
+
+ let c = new C;
+ c[3]();
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], log);
+}
+
+function x() {
+ // This tests lazy parsing.
+ return function() {
+ let log = [];
+ function run(i) {
+ log.push(i);
+ return i;
+ }
+
+ class C {
+ [run(1)] = run(7);
+ [run(2)] = run(8);
+ [run(3)]() { run(9);}
+ static [run(4)] = run(6);
+ [run(5)]() { throw new Error('should not execute');};
+ }
+
+ let c = new C;
+ c[3]();
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], log);
+ }
+}
+x();
+
+{
+ class C {}
+ class D {
+ [C];
+ }
+
+ let d = new D;
+ assertThrows(() => { class X { [X] } let x = new X;});
+ assertEquals(undefined, d[C]);
+}
+
+{
+ class B {
+ a = 1;
+ }
+
+ class C extends B {
+ b = 2;
+ constructor() {
+ super();
+ }
+ }
+
+ let c = new C;
+ assertEquals(1, c.a);
+ assertEquals(2, c.b);
+}
+
+{
+ var log = [];
+ function addToLog(item) { log.push(item); }
+
+ class B {
+ a = 1;
+ constructor() {
+ addToLog("base constructor");
+ }
+ }
+
+ function initF() {
+ addToLog("init f");
+ return 1;
+ }
+
+ class C extends B {
+ f = initF();
+
+ constructor() {
+ addToLog("derived constructor");
+ var t = () => {
+ addToLog("t");
+ if (1==-1) {
+ super();
+ } else {
+ super();
+ }
+ }
+ (() => {
+ addToLog("anon");
+ t();
+ })();
+ }
+ }
+
+ let c = new C;
+ assertEquals(1, c.f);
+ assertEquals(1, c.a);
+ assertEquals(["derived constructor","anon","t","base constructor","init f"],
+ log);
+}
+
+{
+ class B {
+ a = 1;
+ returnA = () => this.a;
+ }
+
+ class C extends B {
+ c = this.a;
+ d = 2;
+ returnC = () => this.c;
+ returnD = () => this.d;
+ }
+
+ let c = new C;
+ assertEquals(1, c.a);
+ assertEquals(1, c.returnA());
+ assertEquals(1, c.c);
+ assertEquals(1, c.returnA());
+ assertEquals(1, c.returnC());
+ assertEquals(2, c.d);
+ assertEquals(2, c.returnD());
+
+ let c2 = new C;
+ assertNotEquals(c2.returnA, c.returnA);
+ assertNotEquals(c2.returnC, c.returnC);
+ assertNotEquals(c2.returnD, c.returnD);
+}
+
+{
+ let foo = undefined;
+ class B {
+ set d(x) {
+ foo = x;
+ }
+ }
+
+ class C extends B {
+ d = 2;
+ }
+
+ let c = new C;
+ assertEquals(undefined, foo);
+ assertEquals(2, c.d);
+}
+
+{
+ class B {}
+ class C extends B {
+ constructor() {
+ super();
+ }
+
+ c = 1;
+ }
+
+ let c = new C;
+ assertEquals(1, c.c);
+}
+
+{
+ class B {}
+ class C extends B {
+ constructor() {
+ let t = () => {
+ super();
+ }
+ t();
+ }
+
+ c = 1;
+ }
+
+ let c = new C;
+ assertEquals(1, c.c);
+}
+
+{
+ let log = [];
+
+ class B {}
+
+ class C extends B {
+
+ x = (log.push(1), 1);
+
+ constructor() {
+ let t = () => {
+ class D extends B {
+
+ x = (log.push(2), 2);
+
+ constructor() {
+ let p = () => {
+ super();
+ }
+
+ p();
+ }
+ }
+
+ let d = new D();
+ assertEquals(2, d.x);
+ super();
+ }
+
+ t();
+ }
+ }
+
+
+ let c = new C;
+ assertEquals(1, c.x);
+ assertEquals([2, 1], log);
+}
+
+{
+ let log = [];
+ class C1 extends class {} {
+ x = log.push(1);
+ constructor() {
+ var t = () => super();
+ super();
+ t();
+ }
+ }
+
+ assertThrows(() => new C1, ReferenceError);
+ assertEquals([1,1], log);
+
+ log = [];
+ class C2 extends class {} {
+ x = log.push(1);
+ constructor() {
+ var t = () => super();
+ t();
+ super();
+ }
+ }
+
+ assertThrows(() => new C2, ReferenceError);
+ assertEquals([1,1], log);
+}
+
+{
+ class C1 extends class {} {
+ x = 1
+ constructor() {
+ eval("super()");
+ }
+ }
+
+ let c = new C1;
+ assertEquals(1, c.x);
+
+ class C2 extends class {} {
+ x = 1
+ constructor() {
+ var t = () => {
+ eval("super()");
+ }
+ t();
+ }
+ }
+
+ c = new C2;
+ assertEquals(1, c.x);
+}
+
+{
+ class C {
+ ['x'] = 1;
+ ['y'] = 2;
+ }
+
+ class C1 extends C {
+ ['x'] = 3;
+ ['z'] = 4;
+ }
+
+ let c = new C1;
+ assertEquals(3, c.x);
+ assertEquals(2, c.y);
+ assertEquals(4, c.z);
+}
+
+{
+ class X extends class {} {
+ c = 1;
+
+ constructor() {
+ let t = () => {
+
+ class P extends class {} {
+ constructor() {
+ let t = () => { super(); };
+ t();
+ }
+ }
+
+ let p = new P;
+ assertEquals(undefined, p.c);
+ super();
+ }
+
+ t();
+ }
+ }
+
+ let x = new X;
+ assertEquals(1, x.c);
+}
+
+{
+ class A {
+ a() { return 1; }
+ }
+
+ class C extends A {
+ b = super.a();
+ c = () => super.a;
+ d = () => super.a();
+ e = super.a;
+ f = super.b;
+ }
+
+ let c = new C;
+ assertEquals(1, c.a());
+ assertEquals(1, c.b);
+ assertEquals(1, c.c()());
+ assertEquals(1, c.d());
+ assertEquals(1, c.e());
+ assertFalse(Object.hasOwnProperty(c, 'a'));
+ assertEquals(c.a, c.e);
+ assertEquals(undefined, c.f);
+}
+
+{
+ function t() {
+ return class {
+ ['x'] = 1;
+ }
+ }
+
+ let klass = t();
+ let obj = new klass;
+ assertEquals(1, obj.x);
+}
+
+{
+ function t() {
+ return class {
+ ['x'] = 1;
+ static ['x'] = 2;
+ }
+ }
+
+ let klass = t();
+ let obj = new klass;
+ assertEquals(1, obj.x);
+ assertEquals(2, klass.x);
+}
+
+{
+ new class {
+ t = 1;
+ constructor(t = this.t) {
+ assertEquals(1, t);
+ }
+ }
+
+ new class extends class {} {
+ t = 1;
+ constructor(t = (super(), this.t)) {
+ assertEquals(1, t);
+ }
+ }
+
+ assertThrows(() => {
+ new class extends class {} {
+ t = 1;
+ constructor(t = this.t) {
+ super();
+ }
+ }
+ }, ReferenceError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
new file mode 100644
index 0000000000..0477e3dca7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
@@ -0,0 +1,335 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-public-fields
+
+"use strict";
+
+{
+ class C {
+ static a;
+ }
+
+ assertEquals(undefined, C.a);
+ let descriptor = Object.getOwnPropertyDescriptor(C, 'a');
+ assertTrue(C.hasOwnProperty('a'));
+ assertTrue(descriptor.writable);
+ assertTrue(descriptor.enumerable);
+ assertTrue(descriptor.configurable);
+
+ let c = new C;
+ assertEquals(undefined, c.a);
+}
+
+{
+ let x = 'a';
+ class C {
+ static a;
+ static hasOwnProperty = function() { return 1; }
+ static b = x;
+ static c = 1;
+ }
+
+ assertEquals(undefined, C.a);
+ assertEquals('a', C.b);
+ assertEquals(1, C.c);
+ assertEquals(1, C.hasOwnProperty());
+
+ let c = new C;
+ assertEquals(undefined, c.a);
+ assertEquals(undefined, c.b);
+ assertEquals(undefined, c.c);
+}
+
+{
+ assertThrows(() => {
+ class C {
+ static x = Object.freeze(this);
+ static c = 42;
+ }
+ }, TypeError);
+}
+
+{
+ class C {
+ static c = this;
+ static d = () => this;
+ }
+
+ assertEquals(C, C.c);
+ assertEquals(C, C.d());
+
+ let c = new C;
+ assertEquals(undefined, c.c);
+ assertEquals(undefined, c.d);
+}
+
+{
+ class C {
+ static c = 1;
+ static d = this.c;
+ }
+
+ assertEquals(1, C.c);
+ assertEquals(1, C.d);
+
+ let c = new C;
+ assertEquals(undefined, c.c);
+ assertEquals(undefined, c.d);
+}
+
+{
+ class C {
+ static b = 1;
+ static c = () => this.b;
+ }
+
+ assertEquals(1, C.b);
+ assertEquals(1, C.c());
+
+ let c = new C;
+ assertEquals(undefined, c.c);
+}
+
+{
+ let x = 'a';
+ class C {
+ static b = 1;
+ static c = () => this.b;
+ static e = () => x;
+ }
+
+ assertEquals(1, C.b);
+ assertEquals('a', C.e());
+
+ let a = {b : 2 };
+ assertEquals(1, C.c.call(a));
+
+ let c = new C;
+ assertEquals(undefined, c.b);
+ assertEquals(undefined, c.c);
+}
+
+{
+ let x = 'a';
+ class C {
+ static c = 1;
+ static d = function() { return this.c; };
+ static e = function() { return x; };
+ }
+
+ assertEquals(1, C.c);
+ assertEquals(1, C.d());
+ assertEquals('a', C.e());
+
+ C.c = 2;
+ assertEquals(2, C.d());
+
+ let a = {c : 3 };
+ assertEquals(3, C.d.call(a));
+
+ assertThrows(C.d.bind(undefined));
+
+ let c = new C;
+ assertEquals(undefined, c.c);
+ assertEquals(undefined, c.d);
+ assertEquals(undefined, c.e);
+}
+
+{
+ class C {
+ static c = function() { return 1 };
+ }
+
+ assertEquals('c', C.c.name);
+}
+
+{
+ let d = function() { return new.target; }
+ class C {
+ static c = d;
+ }
+
+ assertEquals(undefined, C.c());
+ assertEquals(new d, new C.c());
+}
+
+{
+ class C {
+ static c = () => new.target;
+ }
+
+ assertEquals(undefined, C.c());
+}
+
+{
+ class C {
+ static c = () => {
+ let b;
+ class A {
+ constructor() {
+ b = new.target;
+ }
+ };
+ new A;
+ assertEquals(A, b);
+ }
+ }
+
+ C.c();
+}
+
+{
+ class C {
+ static c = new.target;
+ }
+
+ assertEquals(undefined, C.c);
+}
+
+{
+ class B {
+ static d = 1;
+ static b = () => this.d;
+ }
+
+ class C extends B {
+ static c = super.d;
+ static d = () => super.d;
+ static e = () => super.b();
+ }
+
+ assertEquals(1, C.c);
+ assertEquals(1, C.d());
+ assertEquals(1, C.e());
+}
+
+{
+ let foo = undefined;
+ class B {
+ static set d(x) {
+ foo = x;
+ }
+ }
+
+ class C extends B {
+ static d = 2;
+ }
+
+ assertEquals(undefined, foo);
+ assertEquals(2, C.d);
+}
+
+
+{
+ let C = class {
+ static c;
+ };
+
+ assertEquals("C", C.name);
+}
+
+{
+ class C {
+ static c = new C;
+ }
+
+ assertTrue(C.c instanceof C);
+}
+
+(function test() {
+ function makeC() {
+ var x = 1;
+
+ return class {
+ static a = () => () => x;
+ }
+ }
+
+ let C = makeC();
+ let f = C.a();
+ assertEquals(1, f());
+})()
+
+{
+ let c = "c";
+ class C {
+ static ["a"] = 1;
+ static ["b"];
+ static [c];
+ }
+
+ assertEquals(1, C.a);
+ assertEquals(undefined, C.b);
+ assertEquals(undefined, C.c);
+}
+
+{
+ let log = [];
+ function run(i) {
+ log.push(i);
+ return i;
+ }
+
+ class C {
+ static [run(1)] = run(6);
+ static [run(2)] = run(7);
+ [run(3)]() { run(9);}
+ static [run(4)] = run(8);
+ static [run(5)]() { throw new Error('should not execute');};
+ }
+
+ let c = new C;
+ c[3]();
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], log);
+}
+
+
+
+function x() {
+
+ // This tests lazy parsing.
+ return function() {
+ let log = [];
+ function run(i) {
+ log.push(i);
+ return i;
+ }
+
+ class C {
+ static [run(1)] = run(6);
+ static [run(2)] = run(7);
+ [run(3)]() { run(9);}
+ static [run(4)] = run(8);
+ static [run(5)]() { throw new Error('should not execute');};
+ }
+
+ let c = new C;
+ c[3]();
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], log);
+ }
+}
+x();
+
+{
+ class C {}
+ class D {
+ static [C];
+ }
+
+ assertThrows(() => { class X { static [X] } });
+ assertEquals(undefined, D[C]);
+}
+
+{
+ function t() {
+ return class {
+ static ['x'] = 2;
+ }
+ }
+
+ let klass = t();
+ let obj = new klass;
+ assertEquals(2, klass.x);
+}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
index 3ce947d1e5..1ad29f6b49 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-named-captures --harmony-regexp-lookbehind
+// Flags: --harmony-regexp-named-captures
// Malformed named captures.
assertThrows("/(?<>a)/u", SyntaxError); // Empty name.
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-binary.js b/deps/v8/test/mjsunit/harmony/regexp-property-binary.js
index 8ab3f19329..d0c4dc577b 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-binary.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-binary.js
@@ -71,8 +71,8 @@ f(/\p{Dia}/u, "1");
t(/\p{Emoji}/u, "\u2603");
f(/\p{Emoji}/u, "x");
-// t(/\p{Emoji_Component}/u, "\u2603");
-// f(/\p{Emoji_Component}/u, "x");
+t(/\p{Emoji_Component}/u, "\u{1F1E6}");
+f(/\p{Emoji_Component}/u, "x");
t(/\p{Emoji_Modifier_Base}/u, "\u{1F6CC}");
f(/\p{Emoji_Modifier_Base}/u, "x");
@@ -137,6 +137,9 @@ f(/\p{QMark}/u, "A");
t(/\p{Radical}/u, "\u2FAD");
f(/\p{Radical}/u, "A");
+t(/\p{Regional_Indicator}/u, "\u{1F1E6}");
+f(/\p{Regional_Indicator}/u, "A");
+
t(/\p{Sentence_Terminal}/u, "!");
f(/\p{STerm}/u, "A");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js
index 115e064005..ae559bac10 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js
@@ -2,12 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-property
-
const regexp = /\P{Lu}/ui;
const regexpu = /[\0-@\[-\xBF\xD7\xDF-\xFF\u0101\u0103\u0105\u0107\u0109\u010B\u010D\u010F\u0111\u0113\u0115\u0117\u0119\u011B\u011D\u011F\u0121\u0123\u0125\u0127\u0129\u012B\u012D\u012F\u0131\u0133\u0135\u0137\u0138\u013A\u013C\u013E\u0140\u0142\u0144\u0146\u0148\u0149\u014B\u014D\u014F\u0151\u0153\u0155\u0157\u0159\u015B\u015D\u015F\u0161\u0163\u0165\u0167\u0169\u016B\u016D\u016F\u0171\u0173\u0175\u0177\u017A\u017C\u017E-\u0180\u0183\u0185\u0188\u018C\u018D\u0192\u0195\u0199-\u019B\u019E\u01A1\u01A3\u01A5\u01A8\u01AA\u01AB\u01AD\u01B0\u01B4\u01B6\u01B9-\u01BB\u01BD-\u01C3\u01C5\u01C6\u01C8\u01C9\u01CB\u01CC\u01CE\u01D0\u01D2\u01D4\u01D6\u01D8\u01DA\u01DC\u01DD\u01DF\u01E1\u01E3\u01E5\u01E7\u01E9\u01EB\u01ED\u01EF\u01F0\u01F2\u01F3\u01F5\u01F9\u01FB\u01FD\u01FF\u0201\u0203\u0205\u0207\u0209\u020B\u020D\u020F\u0211\u0213\u0215\u0217\u0219\u021B\u021D\u021F\u0221\u0223\u0225\u0227\u0229\u022B\u022D\u022F\u0231\u0233-\u0239\u023C\u023F\u0240\u0242\u0247\u0249\u024B\u024D\u024F-\u036F\u0371\u0373-\u0375\u0377-\u037E\u0380-\u0385\u0387\u038B\u038D\u0390\u03A2\u03AC-\u03CE\u03D0\u03D1\u03D5-\u03D7\u03D9\u03DB\u03DD\u03DF\u03E1\u03E3\u03E5\u03E7\u03E9\u03EB\u03ED\u03EF-\u03F3\u03F5\u03F6\u03F8\u03FB\u03FC\u0430-\u045F\u0461\u0463\u0465\u0467\u0469\u046B\u046D\u046F\u0471\u0473\u0475\u0477\u0479\u047B\u047D\u047F\u0481-\u0489\u048B\u048D\u048F\u0491\u0493\u0495\u0497\u0499\u049B\u049D\u049F\u04A1\u04A3\u04A5\u04A7\u04A9\u04AB\u04AD\u04AF\u04B1\u04B3\u04B5\u04B7\u04B9\u04BB\u04BD\u04BF\u04C2\u04C4\u04C6\u04C8\u04CA\u04CC\u04CE\u04CF\u04D1\u04D3\u04D5\u04D7\u04D9\u04DB\u04DD\u04DF\u04E1\u04E3\u04E5\u04E7\u04E9\u04EB\u04ED\u04EF\u04F1\u04F3\u04F5\u04F7\u04F9\u04FB\u04FD\u04FF\u0501\u0503\u0505\u0507\u0509\u050B\u050D\u050F\u0511\u0513\u0515\u0517\u0519\u051B\u051D\u051F\u0521\u0523\u0525\u0527\u0529\u052B\u052D\u052F\u0530\u0557-\u109F\u10C6\u10C8-\u10CC\u10CE-\u139F\u13F6-\u1DFF\u1E01\u1E03\u1E05\u1E07\u1E09\u1E0B\u1E0D\u1E0F\u1E11\u1E13\u1E15\u1E17\u1E19\u1E1B\u1E1D\u1E1F\u1E21\u1E23\u1E25\u1E27\u1E29\u1E2B\u1E2D\u1E2F\u1E31\u1E33\u1E35\u1E37\u1E39\u1E3B\u1E3D\u1E3F\u1E41\u1E43\u1E45\u1E47\u1E49\u1E4B\u1E4D\u1E4F\u1E51\u1E53\u1E55\u1E57\u1E59\u1E5B\u1E5D\u1E5F\u1E61\u1E63\u1E65\u1E67\u1E69\u1E6B\u1E6D\u1E6F\u1E71\u1E73\u1E75\u1E77\u1E79\u1E7B\u1E7D\u1E7F\u1E81\u1E83\u1E85\u1E87\u1E89\u1E8B\u1E8D\u1E8F\u1E91\u1E93\u1E95-\u1E9D\u1E9F\u1EA1\u1EA3\u1EA5\u1EA7\u1EA9\u1EAB\u1EAD\u1EAF\u1EB1\u1EB3\u1EB5\u1EB7\u1EB9\u1EBB\u1EBD\u1EBF\u1EC1\u1EC3\u1EC5\u1EC7\u1EC9\u1ECB\u1ECD\u1ECF\u1ED1\u1ED3\u1ED5\u1ED7\u1ED9\u1EDB\u1EDD\u1EDF\u1EE1\u1EE3\u1EE5\u1EE7\u1EE9\u1EEB\u1EED\u1EEF\u1EF1\u1EF3\u1EF5\u1EF7\u1EF9\u1EFB\u1EFD\u1EFF-\u1F07\u1F10-\u1F17\u1F1E-\u1F27\u1F30-\u1F37\u1F40-\u1F47\u1F4E-\u1F58\u1F5A\u1F5C\u1F5E\u1F60-\u1F67\u1F70-\u1FB7\u1FBC-\u1FC7\u1FCC-\u1FD7\u1FDC-\u1FE7\u1FED-\u1FF7\u1FFC-\u2101\u2103-\u2106\u2108-\u210A\u210E\u210F\u2113\u2114\u2116-\u2118\u211E-\u2123\u2125\u2127\u2129\u212E\u212F\u2134-\u213D\u2140-\u2144\u2146-\u2182\u2184-\u2BFF\u2C2F-\u2C5F\u2C61\u2C65\u2C66\u2C68\u2C6A\u2C6C\u2C71\u2C73\u2C74\u2C76-\u2C7D\u2C81\u2C83\u2C85\u2C87\u2C89\u2C8B\u2C8D\u2C8F\u2C91\u2C93\u2C95\u2C97\u2C99\u2C9B\u2C9D\u2C9F\u2CA1\u2CA3\u2CA5\u2CA7\u2CA9\u2CAB\u2CAD\u2CAF\u2CB1\u2CB3\u2CB5\u2CB7\u2CB9\u2CBB\u2CBD\u2CBF\u2CC1\u2CC3\u2CC5\u2CC7\u2CC9\u2CCB\u2CCD\u2CCF\u2CD1\u2CD3\u2CD5\u2CD7\u2CD9\u2CDB\u2CDD\u2CDF\u2CE1\u2CE3-\u2CEA\u2CEC\u2CEE-\u2CF1\u2CF3-\uA63F\uA641\uA643\uA645\uA647\uA649\uA64B\uA64D\uA64F\uA651\uA653\uA655\uA657\uA659\uA65B\uA65D\uA65F\uA661\uA663\uA665\uA667\uA669\uA66B\uA66D-\uA67F\uA681\uA683\uA685\uA687\uA689\uA68B\uA68D\uA68F\uA691\uA693\uA695\uA697\uA699\uA69B-\uA721\uA723\uA725\uA727\uA729\uA72B\uA72D\uA72F-\uA731\uA733\uA735\uA737\uA739\uA73B\uA73D\uA73F\uA741\uA743\uA745\uA747\uA749\uA74B\uA74D\uA74F\uA751\uA753\uA755\uA757\uA759\uA75B\uA75D\uA75F\uA761\uA763\uA765\uA767\uA769\uA76B\uA76D\uA76F-\uA778\uA77A\uA77C\uA77F\uA781\uA783\uA785\uA787-\uA78A\uA78C\uA78E\uA78F\uA791\uA793-\uA795\uA797\uA799\uA79B\uA79D\uA79F\uA7A1\uA7A3\uA7A5\uA7A7\uA7A9\uA7AE\uA7AF\uA7B5\uA7B7-\uFF20\uFF3B-\u{103FF}\u{10428}-\u{10C7F}\u{10CB3}-\u{1189F}\u{118C0}-\u{1D3FF}\u{1D41A}-\u{1D433}\u{1D44E}-\u{1D467}\u{1D482}-\u{1D49B}\u{1D49D}\u{1D4A0}\u{1D4A1}\u{1D4A3}\u{1D4A4}\u{1D4A7}\u{1D4A8}\u{1D4AD}\u{1D4B6}-\u{1D4CF}\u{1D4EA}-\u{1D503}\u{1D506}\u{1D50B}\u{1D50C}\u{1D515}\u{1D51D}-\u{1D537}\u{1D53A}\u{1D53F}\u{1D545}\u{1D547}-\u{1D549}\u{1D551}-\u{1D56B}\u{1D586}-\u{1D59F}\u{1D5BA}-\u{1D5D3}\u{1D5EE}-\u{1D607}\u{1D622}-\u{1D63B}\u{1D656}-\u{1D66F}\u{1D68A}-\u{1D6A7}\u{1D6C1}-\u{1D6E1}\u{1D6FB}-\u{1D71B}\u{1D735}-\u{1D755}\u{1D76F}-\u{1D78F}\u{1D7A9}-\u{1D7C9}\u{1D7CB}-\u{10FFFF}]/ui;
-for (let codePoint = 0; codePoint <= 0x10FFFF; codePoint++) {
- const string = String.fromCodePoint(codePoint);
- assertEquals(regexp.test(string), regexpu.test(string));
+// Test is split into parts to increase parallelism.
+const number_of_tests = 10;
+const max_codepoint = 0x10FFFF;
+
+function firstCodePointOfRange(i) {
+ return Math.floor(i * (max_codepoint / number_of_tests));
+}
+
+function testCodePointRange(i) {
+ assertTrue(i >= 0 && i < number_of_tests);
+
+ const from = firstCodePointOfRange(i);
+ const to = (i == number_of_tests - 1)
+ ? max_codepoint + 1 : firstCodePointOfRange(i + 1);
+
+ for (let codePoint = from; codePoint < to; codePoint++) {
+ const string = String.fromCodePoint(codePoint);
+ assertEquals(regexp.test(string), regexpu.test(string));
+ }
}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui0.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui0.js
new file mode 100644
index 0000000000..27911e7d6f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui0.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(0);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui1.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui1.js
new file mode 100644
index 0000000000..e0177b835f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui1.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(1);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui2.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui2.js
new file mode 100644
index 0000000000..7a916b0193
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui2.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(2);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui3.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui3.js
new file mode 100644
index 0000000000..51cf5224a1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui3.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(3);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui4.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui4.js
new file mode 100644
index 0000000000..ab22e7f33d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui4.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(4);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui5.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui5.js
new file mode 100644
index 0000000000..668ddbf8f9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui5.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(5);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui6.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui6.js
new file mode 100644
index 0000000000..51669e1afa
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui6.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(6);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui7.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui7.js
new file mode 100644
index 0000000000..68f3e0f66e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui7.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(7);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui8.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui8.js
new file mode 100644
index 0000000000..b38abe2d23
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui8.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(8);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui9.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui9.js
new file mode 100644
index 0000000000..5c9ca06e16
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui9.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+// Files: test/mjsunit/harmony/regexp-property-lu-ui.js
+
+testCodePointRange(9);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-6100.js b/deps/v8/test/mjsunit/harmony/regress/regress-6100.js
index 16f9ed4f32..f301094679 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-6100.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-6100.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-template-escapes
-
// This test is added because harmony-template-escapes were not properly
// handled in the preparser.
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-legacy-duplicate-generators.js b/deps/v8/test/mjsunit/harmony/sloppy-legacy-duplicate-generators.js
deleted file mode 100644
index 1fde47507e..0000000000
--- a/deps/v8/test/mjsunit/harmony/sloppy-legacy-duplicate-generators.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-harmony-restrictive-generators
-
-// In legacy mode, generators get sloppy-mode block-scoped function hoisting
-
-// Hoisting to the global scope
-
-{
- function* foo() {}
- assertEquals('function', typeof foo);
-}
-//assertEquals('function', typeof foo);
-
-// Hoisting within a function scope
-(function() {
- { function* bar() {} }
- assertEquals('function', typeof bar);
-})();
-
-// Lexical shadowing allowed; hoisting happens
-(function() {
- function* x() { yield 1; }
- { function* x() { yield 2 } }
- assertEquals(2, x().next().value);
-})();
-
-// Duplicates allowed
-(function() {
- function* y() { yield 1; }
- function* y() { yield 2 }
- assertEquals(2, y().next().value);
-})();
-
-// Functions and generators may duplicate each other
-(function() {
- function* z() { yield 1; }
- function z() { return 2 }
- assertEquals(2, z());
-
- function a() { return 1; }
- function* a() { yield 2 }
- assertEquals(2, a().next().value);
-})();
-
-// In strict mode, none of this happens
-
-(function() {
- 'use strict';
-
- { function* bar() {} }
- assertEquals('undefined', typeof bar);
-
- // Lexical shadowing allowed; hoisting happens
- function* x() { yield 1; }
- { function* x() { yield 2 } }
- assertEquals(1, x().next().value);
-})();
diff --git a/deps/v8/test/mjsunit/ignition/dynamic-global-inside-block.js b/deps/v8/test/mjsunit/ignition/dynamic-global-inside-block.js
new file mode 100644
index 0000000000..028ad49ae6
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/dynamic-global-inside-block.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Verifies that DYNAMIC_GLOBAL variables walk the correct context-chain length
+// to reach the sloppy-eval calling function context, including block contexts.
+function test() {
+ return eval('var x = 100; { function z() {z}; x }')
+}
+
+test();
diff --git a/deps/v8/test/mjsunit/ignition/print-ast.js b/deps/v8/test/mjsunit/ignition/print-ast.js
index 4a77e9a557..e3e113307e 100644
--- a/deps/v8/test/mjsunit/ignition/print-ast.js
+++ b/deps/v8/test/mjsunit/ignition/print-ast.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --print-ast
+// Flags: --print-ast --no-stress-background-compile
// Ensures that the --print-ast flag doesn't crash.
function foo(a) {
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 934a731e8f..39d7b02e2d 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --stack-size=100 --harmony
+// Flags: --allow-natives-syntax --stack-size=100 --harmony
function test(f, expected, type) {
try {
@@ -15,6 +15,18 @@ function test(f, expected, type) {
assertUnreachable("Exception expected");
}
+const typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Float32Array,
+ Float64Array,
+ Uint8ClampedArray
+];
+
// === Error ===
// kCyclicProto
@@ -74,8 +86,16 @@ test(function() {
}, "String.prototype.includes called on null or undefined", TypeError);
test(function() {
+ String.prototype.match.call(null);
+}, "String.prototype.match called on null or undefined", TypeError);
+
+test(function() {
+ String.prototype.search.call(null);
+}, "String.prototype.search called on null or undefined", TypeError);
+
+test(function() {
Array.prototype.shift.call(null);
-}, "Array.prototype.shift called on null or undefined", TypeError);
+}, "Cannot convert undefined or null to object", TypeError);
test(function() {
String.prototype.trim.call(null);
@@ -116,9 +136,25 @@ test(function() {
// kConstructorNotFunction
test(function() {
+ Map();
+}, "Constructor Map requires 'new'", TypeError);
+
+test(function() {
+ Set();
+}, "Constructor Set requires 'new'", TypeError);
+
+test(function() {
Uint16Array(1);
}, "Constructor Uint16Array requires 'new'", TypeError);
+test(function() {
+ WeakSet();
+}, "Constructor WeakSet requires 'new'", TypeError);
+
+test(function() {
+ WeakMap();
+}, "Constructor WeakMap requires 'new'", TypeError);
+
// kDataViewNotArrayBuffer
test(function() {
new DataView(1);
@@ -132,6 +168,21 @@ test(function() {
Object.defineProperty(o, "x", { value: 1 });
}, "Cannot define property x, object is not extensible", TypeError);
+// kDetachedOperation
+for (constructor of typedArrayConstructors) {
+ test(() => {
+ const ta = new constructor([1]);
+ %ArrayBufferNeuter(ta.buffer);
+ ta.find(() => {});
+ }, "Cannot perform %TypedArray%.prototype.find on a detached ArrayBuffer", TypeError);
+
+ test(() => {
+ const ta = new constructor([1]);
+ %ArrayBufferNeuter(ta.buffer);
+ ta.findIndex(() => {});
+ }, "Cannot perform %TypedArray%.prototype.findIndex on a detached ArrayBuffer", TypeError);
+}
+
// kFirstArgumentNotRegExp
test(function() {
"a".startsWith(/a/);
@@ -167,6 +218,26 @@ test(function() {
}, "Method Set.prototype.add called on incompatible receiver [object Array]",
TypeError);
+test(function() {
+ WeakSet.prototype.add.call([]);
+}, "Method WeakSet.prototype.add called on incompatible receiver [object Array]",
+TypeError);
+
+test(function() {
+ WeakSet.prototype.delete.call([]);
+}, "Method WeakSet.prototype.delete called on incompatible receiver [object Array]",
+TypeError);
+
+test(function() {
+ WeakMap.prototype.set.call([]);
+}, "Method WeakMap.prototype.set called on incompatible receiver [object Array]",
+TypeError);
+
+test(function() {
+ WeakMap.prototype.delete.call([]);
+}, "Method WeakMap.prototype.delete called on incompatible receiver [object Array]",
+TypeError);
+
// kNonCallableInInstanceOfCheck
test(function() {
1 instanceof {};
@@ -190,6 +261,24 @@ test(function() {
1 in 1;
}, "Cannot use 'in' operator to search for '1' in 1", TypeError);
+// kInvalidWeakMapKey
+test(function() {
+ new WeakMap([[1, 1]]);
+}, "Invalid value used as weak map key", TypeError);
+
+test(function() {
+ new WeakMap().set(1, 1);
+}, "Invalid value used as weak map key", TypeError);
+
+// kInvalidWeakSetValue
+test(function() {
+ new WeakSet([1]);
+}, "Invalid value used in weak set", TypeError);
+
+test(function() {
+ new WeakSet().add(1);
+}, "Invalid value used in weak set", TypeError);
+
// kIteratorResultNotAnObject
test(function() {
var obj = {};
@@ -281,10 +370,25 @@ test(function() {
// kPropertyNotFunction
test(function() {
+ Map.prototype.set = 0;
+ new Map([[1, 2]]);
+}, "'0' returned for property 'set' of object '#<Map>' is not a function", TypeError);
+
+test(function() {
Set.prototype.add = 0;
- new Set(1);
+ new Set([1]);
}, "'0' returned for property 'add' of object '#<Set>' is not a function", TypeError);
+test(function() {
+ WeakMap.prototype.set = 0;
+ new WeakMap([[{}, 1]]);
+}, "'0' returned for property 'set' of object '#<WeakMap>' is not a function", TypeError);
+
+test(function() {
+ WeakSet.prototype.add = 0;
+ new WeakSet([{}]);
+}, "'0' returned for property 'add' of object '#<WeakSet>' is not a function", TypeError);
+
// kProtoObjectOrNull
test(function() {
Object.setPrototypeOf({}, 1);
@@ -442,6 +546,14 @@ test(function() {
// kThrowInvalidStringLength
test(function() {
+ "a".padEnd(1 << 30);
+}, "Invalid string length", RangeError);
+
+test(function() {
+ "a".padStart(1 << 30);
+}, "Invalid string length", RangeError);
+
+test(function() {
"a".repeat(1 << 30);
}, "Invalid string length", RangeError);
diff --git a/deps/v8/test/mjsunit/migrations.js b/deps/v8/test/mjsunit/migrations.js
index 1828a612dc..cc5bf99341 100644
--- a/deps/v8/test/mjsunit/migrations.js
+++ b/deps/v8/test/mjsunit/migrations.js
@@ -244,10 +244,6 @@ var migrations = [
migr: function(o, i) { o.__proto__ = {}; },
},
{
- name: "%FunctionSetPrototype",
- migr: function(o, i) { %FunctionSetPrototype(o, null); },
- },
- {
name: "modify prototype",
migr: function(o, i) { if (i == 0) o.__proto__.__proto1__ = [,,,5,,,]; },
},
diff --git a/deps/v8/test/mjsunit/mjsunit.isolate b/deps/v8/test/mjsunit/mjsunit.isolate
index 2474d65060..a7d151aee6 100644
--- a/deps/v8/test/mjsunit/mjsunit.isolate
+++ b/deps/v8/test/mjsunit/mjsunit.isolate
@@ -9,6 +9,7 @@
'../../tools/consarray.js',
'../../tools/csvparser.js',
'../../tools/logreader.js',
+ '../../tools/arguments.js',
'../../tools/profile.js',
'../../tools/profile_view.js',
'../../tools/profviz/composer.js',
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index d3db2e2e94..10cf527f30 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -192,6 +192,12 @@ var failWithMessage;
var ArrayPrototypeMap = Array.prototype.map;
var ArrayPrototypePush = Array.prototype.push;
+ var BigIntPrototypeValueOf;
+ // TODO(neis): Remove try-catch once BigInts are enabled by default.
+ try {
+ BigIntPrototypeValueOf = BigInt.prototype.valueOf;
+ } catch(e) {}
+
function classOf(object) {
// Argument must not be null or undefined.
var string = ObjectPrototypeToString.call(object);
@@ -204,6 +210,8 @@ var failWithMessage;
switch (classOf(value)) {
case "Number":
return NumberPrototypeValueOf.call(value);
+ case "BigInt":
+ return BigIntPrototypeValueOf.call(value);
case "String":
return StringPrototypeValueOf.call(value);
case "Boolean":
@@ -220,6 +228,8 @@ var failWithMessage;
switch (typeof value) {
case "string":
return JSON.stringify(value);
+ case "bigint":
+ return String(value) + "n";
case "number":
if (value === 0 && (1 / value) < 0) return "-0";
// FALLTHROUGH.
@@ -233,6 +243,7 @@ var failWithMessage;
var objectClass = classOf(value);
switch (objectClass) {
case "Number":
+ case "BigInt":
case "String":
case "Boolean":
case "Date":
@@ -346,7 +357,8 @@ var failWithMessage;
return true;
}
if (objectClass === "String" || objectClass === "Number" ||
- objectClass === "Boolean" || objectClass === "Date") {
+ objectClass === "BigInt" || objectClass === "Boolean" ||
+ objectClass === "Date") {
if (ValueOf(a) !== ValueOf(b)) return false;
}
return deepObjectEquals(a, b);
@@ -582,8 +594,8 @@ var failWithMessage;
return OptimizationStatusImpl(fun, sync_opt);
}
- assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt,
- skip_if_maybe_deopted = true) {
+ assertUnoptimized = function assertUnoptimized(
+ fun, sync_opt, name_opt, skip_if_maybe_deopted = true) {
if (sync_opt === undefined) sync_opt = "";
var opt_status = OptimizationStatus(fun, sync_opt);
// Tests that use assertUnoptimized() do not make sense if --always-opt
@@ -601,8 +613,8 @@ var failWithMessage;
assertFalse((opt_status & V8OptimizationStatus.kOptimized) !== 0, name_opt);
}
- assertOptimized = function assertOptimized(fun, sync_opt, name_opt,
- skip_if_maybe_deopted = true) {
+ assertOptimized = function assertOptimized(
+ fun, sync_opt, name_opt, skip_if_maybe_deopted = true) {
if (sync_opt === undefined) sync_opt = "";
var opt_status = OptimizationStatus(fun, sync_opt);
// Tests that use assertOptimized() do not make sense if --no-opt
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index b1b6f6aeb1..aa59fb680a 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -158,6 +158,8 @@
'es6/unicode-regexp-ignore-case-noi18n': [FAIL, ['no_i18n == True', PASS]],
'regress/regress-5036': [PASS, ['no_i18n == True', FAIL]],
'es7/regexp-ui-word': [PASS, ['no_i18n == True', FAIL]],
+ 'regexp-modifiers-i18n': [PASS, ['no_i18n == True', FAIL]],
+ 'regexp-modifiers-autogenerated-i18n': [PASS, ['no_i18n == True', FAIL]],
# desugaring regexp property class relies on ICU.
'harmony/regexp-property-*': [PASS, ['no_i18n == True', FAIL]],
@@ -168,9 +170,6 @@
'icu-date-to-string': [PASS, ['no_i18n == True', SKIP]],
'icu-date-lord-howe': [PASS, ['no_i18n == True', SKIP]],
- # Allocates a large array buffer, which TSAN sometimes cannot handle.
- 'regress/regress-599717': [PASS, ['tsan', SKIP]],
-
# TODO(bmeurer): Flaky timeouts (sometimes <1s, sometimes >3m).
'unicodelctest': [PASS, NO_VARIANTS],
'unicodelctest-no-optimization': [PASS, NO_VARIANTS],
@@ -255,6 +254,10 @@
# BUG(chromium:751825): Crashes flakily.
'wasm/js-api': [SKIP],
+
+ # BUG(chromium:773631): Crashes flakily.
+ 'wasm/streaming-trap-location': [SKIP],
+
}], # 'gc_stress == True'
##############################################################################
@@ -365,6 +368,13 @@
# Flaky on ASAN builds: https://bugs.chromium.org/p/v8/issues/detail?id=6305
'regress/regress-430201': [SKIP],
'regress/regress-430201b': [SKIP],
+
+ # Stack overflow on windows.
+ 'es8/regress/regress-624300': [PASS, ['system == windows', SKIP]],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7102
+ # Flaky due to huge string allocation.
+ 'regress/regress-748069': [SKIP],
}], # 'asan == True'
##############################################################################
@@ -387,9 +397,33 @@
'compiler/osr-one': [PASS, SLOW],
'compiler/osr-two': [PASS, SLOW],
'wasm/grow-memory': [PASS, SLOW],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7102
+ # Flaky due to huge string allocation.
+ 'regress/regress-748069': [SKIP],
+ # Slow test.
+ 'regress/regress-779407': [PASS, SLOW],
}], # 'msan == True'
##############################################################################
+['tsan == True', {
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7102
+ # Flaky due to huge string allocation.
+ 'regress/regress-748069': [SKIP],
+
+ # Allocates a large array buffer, which TSAN sometimes cannot handle.
+ 'regress/regress-599717': [SKIP],
+
+ # BUG(v8:7042). Uses a lot of memory.
+ 'regress/regress-678917': [SKIP],
+
+ # BUG(v8:6924). The test uses a lot of memory.
+ 'regress/wasm/regress-694433': [SKIP],
+ 'es6/typedarray': [PASS, NO_VARIANTS],
+ 'regress/regress-752764': [PASS, NO_VARIANTS],
+}], # 'tsan == True'
+
+##############################################################################
['arch == arm or arch == android_arm', {
# Slow tests which times out in debug mode.
@@ -478,6 +512,10 @@
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
+
+ # Requires too much memory on MIPS.
+ 'regress/regress-752764': [SKIP],
+ 'regress/regress-779407': [SKIP],
}], # 'arch == mipsel or arch == mips'
##############################################################################
@@ -526,6 +564,10 @@
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
+
+ # Requires too much memory on MIPS.
+ 'regress/regress-752764': [SKIP],
+ 'regress/regress-779407': [SKIP],
}], # 'arch == mips64el or arch == mips64'
##############################################################################
@@ -559,6 +601,10 @@
'regress/regress-2185-2': [SKIP],
'readonly': [SKIP],
'array-feedback': [SKIP],
+ 'deopt-recursive-eager-once': [SKIP],
+ 'deopt-recursive-lazy-once': [SKIP],
+ 'deopt-recursive-soft-once': [SKIP],
+ 'code-coverage-block-opt': [SKIP],
# Bounds check triggers forced deopt for array constructors.
'array-constructor-feedback': [SKIP],
@@ -574,6 +620,13 @@
}], # 'deopt_fuzzer == True'
##############################################################################
+['gc_fuzzer == True', {
+ 'regress/regress-336820': [SKIP],
+ 'regress/regress-748069': [SKIP],
+ 'regress/regress-778668': [SKIP],
+}], # 'gc_fuzzer == True'
+
+##############################################################################
['predictable == True', {
# Skip tests that are known to be non-deterministic.
@@ -586,7 +639,12 @@
# take too long with the simulator.
'regress/regress-1132': [SKIP],
- 'regress/regress-740784': [PASS, SLOW],
+ 'regress/regress-740784': [SKIP],
+ 'regress/regress-crbug-482998': [PASS, SLOW],
+ 'regress/regress-91008': [PASS, SLOW],
+ 'harmony/regexp-property-lu-ui': [PASS, SLOW],
+ 'whitespaces': [PASS, SLOW],
+ 'wasm/grow-memory': [PASS, SLOW],
}], # 'arch == ppc and simulator_run'
##############################################################################
@@ -650,6 +708,12 @@
'whitespaces': [SKIP],
}], # variant == wasm_traps
+['variant == wasm_traps and gc_stress == True', {
+ # TODO(eholk): these tests are disabled due to address space exhaustion.
+ # Re-enable them once Wasm address space limits are in place.
+ '*': [SKIP],
+}], # variant == wasm_traps and gc_stress == True
+
##############################################################################
['no_harness', {
# skip assertion tests since the stack trace is broken if mjsunit is
@@ -663,4 +727,24 @@
'compiler/stress-deopt-count-*': [SKIP],
}], # arch != x64 or deopt_fuzzer
+##############################################################################
+# Liftoff is currently only sufficiently implemented on x64 and ia32.
+# TODO(clemensh): Implement on all other platforms (crbug.com/v8/6600).
+['arch != x64 and arch != ia32', {
+ 'wasm/liftoff': [SKIP],
+}], # arch != x64 and arch != ia32
+
+##############################################################################
+# BUG(v8:7138).
+['arch == arm and not simulator_run and variant == wasm_traps', {
+ '*': [SKIP],
+}], # arch == arm and not simulator_run and variant == wasm_traps
+
+##############################################################################
+['variant == liftoff', {
+ # In the liftoff variant, liftoff compilation happens even though the test
+ # does not explicitly enable it.
+ 'wasm/default-liftoff-setting': [SKIP],
+}], # variant == liftoff
+
]
diff --git a/deps/v8/test/mjsunit/object-literal-modified-object-prototype.js b/deps/v8/test/mjsunit/object-literal-modified-object-prototype.js
new file mode 100644
index 0000000000..1bf7d3d36c
--- /dev/null
+++ b/deps/v8/test/mjsunit/object-literal-modified-object-prototype.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function TestModifedPrototypeInObjectLiteral() {
+ // The prototype chain should not be used if the definition
+ // happens in the object literal.
+
+ Object.defineProperty(Object.prototype, 'c', {
+ get: function () {
+ return 21;
+ },
+ set: function () {
+ }
+ });
+
+ var o = {};
+ o.c = 7;
+ assertEquals(21, o.c);
+
+ var l = {c: 7};
+ assertEquals(7, l.c);
+
+ delete Object.prototype.c;
+})();
diff --git a/deps/v8/test/mjsunit/object-literal.js b/deps/v8/test/mjsunit/object-literal.js
index 79c4df1de9..cbc4f5de93 100644
--- a/deps/v8/test/mjsunit/object-literal.js
+++ b/deps/v8/test/mjsunit/object-literal.js
@@ -27,14 +27,14 @@
// Flags: --allow-natives-syntax
-function runTest(fn) {
+function runLiteralsTest(fn) {
// The first run creates an copy directly from the boilerplate decsription.
fn();
// The second run will create the boilerplate.
fn();
// The third run might copy literals directly in the stub.
fn();
- // Several invocations more to trigger map deprecations.
+ // Several invocations more to trigger potential map deprecations.
fn();
fn();
fn();
@@ -43,7 +43,25 @@ function runTest(fn) {
fn();
}
-function testBasicPrototype() {
+
+runLiteralsTest(function testEmptyObjectLiteral() {
+ let object = {};
+ assertTrue(%HasFastProperties(object));
+ assertTrue(%HasObjectElements(object ));
+ assertTrue(%HasHoleyElements(object));
+ assertEquals([], Object.keys(object));
+});
+
+runLiteralsTest(function testSingleGetter() {
+ let object = { get foo() { return 1 } };
+ // For now getters create dict mode objects.
+ assertFalse(%HasFastProperties(object));
+ assertTrue(%HasObjectElements(object ));
+ assertTrue(%HasHoleyElements(object));
+ assertEquals(['foo'], Object.keys(object));
+});
+
+runLiteralsTest(function testBasicPrototype() {
var obj = {
a: 7,
b: { x: 12, y: 24 },
@@ -56,11 +74,9 @@ function testBasicPrototype() {
assertEquals('Zebra', obj.c);
assertEquals(Object.getPrototypeOf(obj), Object.prototype);
assertEquals(Object.getPrototypeOf(obj.b), Object.prototype);
-};
-testBasicPrototype();
-testBasicPrototype();
+});
-function testDynamicValue() {
+runLiteralsTest(function testDynamicValue() {
var z = 24;
var obj2 = {
@@ -73,11 +89,9 @@ function testDynamicValue() {
assertEquals(12, obj2.b.x);
assertEquals(24, obj2.b.y);
assertEquals('Zebra', obj2.c);
-}
-testDynamicValue();
-testDynamicValue();
+});
-(function testMultipleInstatiations() {
+runLiteralsTest(function testMultipleInstatiations() {
var arr = [];
for (var i = 0; i < 2; i++) {
arr[i] = {
@@ -90,174 +104,182 @@ testDynamicValue();
arr[0].b.x = 2;
assertEquals(2, arr[0].b.x);
assertEquals(12, arr[1].b.x);
-})();
-
-function testSparseElements() {
- let sa1 = {
- '0': { x: 12, y: 24 },
- '1000000': { x: 1, y: 2 }
- };
- %HeapObjectVerify(sa1);
- assertEquals(['0', '1000000'], Object.keys(sa1));
- assertEquals(12, sa1[0].x);
- assertEquals(24, sa1[0].y);
- assertEquals(['x', 'y'], Object.keys(sa1[0]));
- assertEquals(1, sa1[1000000].x);
- assertEquals(2, sa1[1000000].y);
- assertEquals(['x', 'y'], Object.keys(sa1[1000000]));
- assertEquals(Object.prototype, Object.getPrototypeOf(sa1));
- assertEquals(Object.prototype, Object.getPrototypeOf(sa1[0]));
- assertEquals(Object.prototype, Object.getPrototypeOf(sa1[1000000]));
- return sa1;
-}
-
-let object = testSparseElements();
-// modify the object and rerun the test, ensuring the literal didn't change.
-object[1] = "a";
-object[0].x = -12;
-testSparseElements();
-
-// Test that non-constant literals work.
-var n = new Object();
-
-function makeNonConstantArray() { return [ [ n ] ]; }
-
-var a = makeNonConstantArray();
-var b = makeNonConstantArray();
-assertTrue(a[0][0] === n);
-assertTrue(b[0][0] === n);
-assertFalse(a[0] === b[0]);
-a[0][0].foo = "bar";
-assertEquals("bar", n.foo);
-
-function makeNonConstantObject() { return { a: { b: n } }; }
-
-a = makeNonConstantObject();
-b = makeNonConstantObject();
-assertFalse(a.a === b.a);
-assertTrue(a.a.b === b.a.b);
-a.a.b.bar = "foo";
-assertEquals("foo", n.bar);
-
-// Test that exceptions for regexps still hold.
-function makeRegexpInArray() { return [ [ /a*/, {} ] ]; }
-
-a = makeRegexpInArray();
-b = makeRegexpInArray();
-assertFalse(a[0][0] === b[0][0]);
-assertFalse(a[0][1] === b[0][1]);
-assertEquals(Array.prototype, Object.getPrototypeOf(a));
-assertEquals(Array.prototype, Object.getPrototypeOf(b));
-assertEquals(Array.prototype, Object.getPrototypeOf(a[0]));
-assertEquals(Array.prototype, Object.getPrototypeOf(b[0]));
-assertEquals(RegExp.prototype, Object.getPrototypeOf(a[0][0]));
-assertEquals(RegExp.prototype, Object.getPrototypeOf(b[0][0]));
-
-function makeRegexpInObject() { return { a: { b: /b*/, c: {} } }; }
-a = makeRegexpInObject();
-b = makeRegexpInObject();
-assertFalse(a.a.b === b.a.b);
-assertFalse(a.a.c === b.a.c);
-assertEquals(RegExp.prototype, Object.getPrototypeOf(a.a.b));
-assertEquals(RegExp.prototype, Object.getPrototypeOf(b.a.b));
-
-
-// Test keywords are valid as property names in initializers and dot-access.
-var keywords = [
- "break",
- "case",
- "catch",
- "const",
- "continue",
- "debugger",
- "default",
- "delete",
- "do",
- "else",
- "false",
- "finally",
- "for",
- "function",
- "if",
- "in",
- "instanceof",
- "new",
- "null",
- "return",
- "switch",
- "this",
- "throw",
- "true",
- "try",
- "typeof",
- "var",
- "void",
- "while",
- "with"
-];
-
-function testKeywordProperty(keyword) {
- var exception = false;
- try {
- // Sanity check that what we get is a keyword.
- eval("var " + keyword + " = 42;");
- } catch (e) {
- exception = true;
+});
+
+
+runLiteralsTest(function TestSparseElements() {
+ function createSparseElements() {
+ let sa1 = {
+ '0': { x: 12, y: 24 },
+ '1000000': { x: 1, y: 2 }
+ };
+ %HeapObjectVerify(sa1);
+ assertEquals(['0', '1000000'], Object.keys(sa1));
+ assertEquals(12, sa1[0].x);
+ assertEquals(24, sa1[0].y);
+ assertEquals(['x', 'y'], Object.keys(sa1[0]));
+ assertEquals(1, sa1[1000000].x);
+ assertEquals(2, sa1[1000000].y);
+ assertEquals(['x', 'y'], Object.keys(sa1[1000000]));
+ assertEquals(Object.prototype, Object.getPrototypeOf(sa1));
+ assertEquals(Object.prototype, Object.getPrototypeOf(sa1[0]));
+ assertEquals(Object.prototype, Object.getPrototypeOf(sa1[1000000]));
+ return sa1;
}
- assertTrue(exception);
-
- // Simple property, read and write.
- var x = eval("({" + keyword + ": 42})");
- assertEquals(42, x[keyword]);
- assertEquals(42, eval("x." + keyword));
- eval("x." + keyword + " = 37");
- assertEquals(37, x[keyword]);
- assertEquals(37, eval("x." + keyword));
-
- // Getter/setter property, read and write.
- var y = eval("({value : 42, get " + keyword + "(){return this.value}," +
- " set " + keyword + "(v) { this.value = v; }})");
- assertEquals(42, y[keyword]);
- assertEquals(42, eval("y." + keyword));
- eval("y." + keyword + " = 37");
- assertEquals(37, y[keyword]);
- assertEquals(37, eval("y." + keyword));
-
- // Quoted keyword works is read back by unquoted as well.
- var z = eval("({\"" + keyword + "\": 42})");
- assertEquals(42, z[keyword]);
- assertEquals(42, eval("z." + keyword));
- // Function property, called.
- var was_called;
- function test_call() { this.was_called = true; was_called = true; }
- var w = eval("({" + keyword + ": test_call, was_called: false})");
- eval("w." + keyword + "();");
- assertTrue(was_called);
- assertTrue(w.was_called);
-
- // Function property, constructed.
- function construct() { this.constructed = true; }
- var v = eval("({" + keyword + ": construct})");
- var vo = eval("new v." + keyword + "()");
- assertTrue(vo instanceof construct);
- assertTrue(vo.constructed);
-}
+ let object = createSparseElements();
+ // modify the object and rerun the test, ensuring the literal didn't change.
+ object[1] = "a";
+ object[0].x = -12;
+ createSparseElements();
+});
+
+runLiteralsTest(function TestNonConstLiterals() {
+ // Test that non-constant literals work.
+ var n = new Object();
+
+ function makeNonConstantArray() { return [ [ n ] ]; }
+
+ var a = makeNonConstantArray();
+ var b = makeNonConstantArray();
+ assertTrue(a[0][0] === n);
+ assertTrue(b[0][0] === n);
+ assertFalse(a[0] === b[0]);
+ a[0][0].foo = "bar";
+ assertEquals("bar", n.foo);
+
+ function makeNonConstantObject() { return { a: { b: n } }; }
+
+ a = makeNonConstantObject();
+ b = makeNonConstantObject();
+ assertFalse(a.a === b.a);
+ assertTrue(a.a.b === b.a.b);
+ a.a.b.bar = "foo";
+ assertEquals("foo", n.bar);
+});
+
+runLiteralsTest(function TestRegexpInArray() {
+ // Test that exceptions for regexps still hold.
+ function makeRegexpInArray() { return [ [ /a*/, {} ] ]; }
+
+ let a = makeRegexpInArray();
+ let b = makeRegexpInArray();
+ assertFalse(a[0][0] === b[0][0]);
+ assertFalse(a[0][1] === b[0][1]);
+ assertEquals(Array.prototype, Object.getPrototypeOf(a));
+ assertEquals(Array.prototype, Object.getPrototypeOf(b));
+ assertEquals(Array.prototype, Object.getPrototypeOf(a[0]));
+ assertEquals(Array.prototype, Object.getPrototypeOf(b[0]));
+ assertEquals(RegExp.prototype, Object.getPrototypeOf(a[0][0]));
+ assertEquals(RegExp.prototype, Object.getPrototypeOf(b[0][0]));
+});
+
+runLiteralsTest(function TestRegexpInObject() {
+ function makeRegexpInObject() { return { a: { b: /b*/, c: {} } }; }
+ let a = makeRegexpInObject();
+ let b = makeRegexpInObject();
+ assertFalse(a.a.b === b.a.b);
+ assertFalse(a.a.c === b.a.c);
+ assertEquals(RegExp.prototype, Object.getPrototypeOf(a.a.b));
+ assertEquals(RegExp.prototype, Object.getPrototypeOf(b.a.b));
+});
+
+runLiteralsTest(function TestKeywordProperties() {
+ // Test keywords are valid as property names in initializers and dot-access.
+ var keywords = [
+ "break",
+ "case",
+ "catch",
+ "const",
+ "continue",
+ "debugger",
+ "default",
+ "delete",
+ "do",
+ "else",
+ "false",
+ "finally",
+ "for",
+ "function",
+ "if",
+ "in",
+ "instanceof",
+ "new",
+ "null",
+ "return",
+ "switch",
+ "this",
+ "throw",
+ "true",
+ "try",
+ "typeof",
+ "var",
+ "void",
+ "while",
+ "with"
+ ];
+
+ function testKeywordProperty(keyword) {
+ var exception = false;
+ try {
+ // Sanity check that what we get is a keyword.
+ eval("var " + keyword + " = 42;");
+ } catch (e) {
+ exception = true;
+ }
+ assertTrue(exception);
+
+ // Simple property, read and write.
+ var x = eval("({" + keyword + ": 42})");
+ assertEquals(42, x[keyword]);
+ assertEquals(42, eval("x." + keyword));
+ eval("x." + keyword + " = 37");
+ assertEquals(37, x[keyword]);
+ assertEquals(37, eval("x." + keyword));
+
+ // Getter/setter property, read and write.
+ var y = eval("({value : 42, get " + keyword + "(){return this.value}," +
+ " set " + keyword + "(v) { this.value = v; }})");
+ assertEquals(42, y[keyword]);
+ assertEquals(42, eval("y." + keyword));
+ eval("y." + keyword + " = 37");
+ assertEquals(37, y[keyword]);
+ assertEquals(37, eval("y." + keyword));
+
+ // Quoted keyword works is read back by unquoted as well.
+ var z = eval("({\"" + keyword + "\": 42})");
+ assertEquals(42, z[keyword]);
+ assertEquals(42, eval("z." + keyword));
+
+ // Function property, called.
+ var was_called;
+ function test_call() { this.was_called = true; was_called = true; }
+ var w = eval("({" + keyword + ": test_call, was_called: false})");
+ eval("w." + keyword + "();");
+ assertTrue(was_called);
+ assertTrue(w.was_called);
+
+ // Function property, constructed.
+ function construct() { this.constructed = true; }
+ var v = eval("({" + keyword + ": construct})");
+ var vo = eval("new v." + keyword + "()");
+ assertTrue(vo instanceof construct);
+ assertTrue(vo.constructed);
+ }
-for (var i = 0; i < keywords.length; i++) {
- testKeywordProperty(keywords[i]);
-}
+ for (var i = 0; i < keywords.length; i++) {
+ testKeywordProperty(keywords[i]);
+ }
+});
-function TestSimpleElements() {
+runLiteralsTest(function TestSimpleElements() {
var o = { 0:"zero", 1:"one", 2:"two" };
assertEquals({0:"zero", 1:"one", 2:"two"}, o);
o[0] = 0;
assertEquals({0:0, 1:"one", 2:"two"}, o);
-}
-TestSimpleElements();
-TestSimpleElements();
+});
-function TestNumericNames() {
+runLiteralsTest(function TestNumericNames() {
var o = {
1: 1,
2.: 2,
@@ -278,11 +300,9 @@ function TestNumericNames() {
};
%HeapObjectVerify(o);
assertEquals(['1.2', '1.3'], Object.keys(o));
-}
-TestNumericNames();
-TestNumericNames();
+});
-function TestDictionaryElements() {
+runLiteralsTest(function TestDictionaryElements() {
let o = {1024: true};
assertTrue(%HasDictionaryElements(o));
assertEquals(true, o[1024]);
@@ -300,13 +320,9 @@ function TestDictionaryElements() {
%HeapObjectVerify(o2);
o2[1024] = "test";
assertEquals(["test"], Object.values(o2));
-}
-TestDictionaryElements();
-TestDictionaryElements();
-%OptimizeFunctionOnNextCall(TestDictionaryElements);
-TestDictionaryElements();
+});
-function TestLiteralElementsKind() {
+runLiteralsTest(function TestLiteralElementsKind() {
let o = {0:0, 1:1, 2:2};
assertTrue(%HasObjectElements(o));
assertTrue(%HasHoleyElements(o));
@@ -329,13 +345,9 @@ function TestLiteralElementsKind() {
assertTrue(%HasHoleyElements(o));
assertTrue(%HasDictionaryElements({0xFFFFFF:true}));
-}
-TestLiteralElementsKind();
-TestLiteralElementsKind();
-%OptimizeFunctionOnNextCall(TestLiteralElementsKind);
-TestLiteralElementsKind();
+});
-function TestNonNumberElementValues() {
+runLiteralsTest(function TestNonNumberElementValues() {
var o = {
1: true,
2: false,
@@ -387,15 +399,10 @@ function TestNonNumberElementValues() {
};
%HeapObjectVerify(o4);
assertEquals(['1', '2', '3', '4', 'a', 'b'], Object.keys(o4));
-}
-TestNonNumberElementValues();
-TestNonNumberElementValues();
-TestNonNumberElementValues();
-%OptimizeFunctionOnNextCall(TestNonNumberElementValues);
-TestNonNumberElementValues();
+})
-function numericGetters() {
+runLiteralsTest(function numericGetters() {
function TestNumericNamesGetter(expectedKeys, object) {
assertEquals(expectedKeys, Object.keys(object));
expectedKeys.forEach(function(key) {
@@ -418,11 +425,9 @@ function numericGetters() {
get 1.2() {},
get 1.30() {}
});
-}
-numericGetters();
-numericGetters();
+});
-function numericSetters() {
+runLiteralsTest(function numericSetters() {
function TestNumericNamesSetter(expectedKeys, object) {
assertEquals(expectedKeys, Object.keys(object));
expectedKeys.forEach(function(key) {
@@ -445,12 +450,10 @@ function numericSetters() {
set 1.2(_) {; },
set 1.30(_) {; }
});
-};
+});
-numericSetters();
-numericSetters();
-function TestProxyWithDefinitionInObjectLiteral() {
+runLiteralsTest(function TestProxyWithDefinitionInObjectLiteral() {
// Trap for set should not be used if the definition
// happens in the object literal.
var handler = {
@@ -467,11 +470,9 @@ function TestProxyWithDefinitionInObjectLiteral() {
var l = new Proxy({[prop]: 'my value'}, handler);
assertEquals('my value', l[prop]);
-};
-TestProxyWithDefinitionInObjectLiteral();
-TestProxyWithDefinitionInObjectLiteral();
+});
-(function TestLiteralWithNullProto() {
+runLiteralsTest(function TestLiteralWithNullProto() {
// Assume dictionary usage for simple null prototype literal objects,
// this is equivalent to Object.create(null). Note that on the first call
// the literal boilerplate is initialized, and from then on we use a the
@@ -498,9 +499,9 @@ TestProxyWithDefinitionInObjectLiteral();
testDictModeNullProtoLiteral(() => ({a:1, b:2, __proto__:null}));
testDictModeNullProtoLiteral(() => ({["a"]: 1, __proto__: null}));
testDictModeNullProtoLiteral(() => ({a: Object, __proto__: null}));
-})();
+});
-function testNestedNullProtoLiteral() {
+runLiteralsTest(function testNestedNullProtoLiteral() {
let obj;
obj = { foo: { __proto__:Math, bar:"barValue"}};
assertTrue(%HasFastProperties(obj));
@@ -523,12 +524,10 @@ function testNestedNullProtoLiteral() {
assertEquals("barValue", obj.foo.bar);
obj.foo.bar = "barValue2";
assertEquals("barValue2", obj.foo.bar);
-}
-testNestedNullProtoLiteral();
-testNestedNullProtoLiteral();
+});
-function TestSlowLiteralOptimized() {
+runLiteralsTest(function TestSlowLiteralOptimized() {
function f() {
return {__proto__:null, bar:"barValue"};
}
@@ -548,11 +547,9 @@ function TestSlowLiteralOptimized() {
assertEquals("barValue", obj.bar);
obj.bar = "barValue2";
assertEquals("barValue2", obj.bar);
-};
-TestSlowLiteralOptimized();
-TestSlowLiteralOptimized();
+});
-(function TestLargeDictionaryLiteral() {
+runLiteralsTest(function TestLargeDictionaryLiteral() {
// Create potential large-space object literal.
function createObject() {
// This literal has least kMaxRegularHeapObjectSize / 64 number of
@@ -1568,27 +1565,4 @@ TestSlowLiteralOptimized();
assertFalse(%HasFastProperties(object2));
assertEquals(Object.getPrototypeOf(object2), null);
assertEquals(keys, Object.keys(object2));
-})();
-
-
-(function TestPrototypeInObjectLiteral() {
- // The prototype chain should not be used if the definition
- // happens in the object literal.
-
- Object.defineProperty(Object.prototype, 'c', {
- get: function () {
- return 21;
- },
- set: function () {
- }
- });
-
- var o = {};
- o.c = 7;
- assertEquals(21, o.c);
-
- var l = {c: 7};
- assertEquals(7, l.c);
-
- delete Object.prototype.c;
-})();
+});
diff --git a/deps/v8/test/mjsunit/optimized-filter.js b/deps/v8/test/mjsunit/optimized-filter.js
new file mode 100644
index 0000000000..b13edc3b36
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-filter.js
@@ -0,0 +1,440 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
+// Flags: --opt --no-always-opt
+
+// Unknown field access leads to soft-deopt unrelated to filter, should still
+// lead to correct result.
+(function() {
+ var a = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
+ var result = 0;
+ var eagerDeoptInCalled = function(deopt) {
+ var callback = function(v,i,o) {
+ if (i == 13 && deopt) {
+ a.abc = 25;
+ }
+
+ // Ensure that the output array is smaller by shaving off the first
+ // item.
+ if (i === 0) return false;
+ result += v;
+ return true;
+ }
+ return a.filter(callback);
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ var deopt_result = eagerDeoptInCalled(true);
+ assertEquals(a.slice(1), deopt_result);
+ eagerDeoptInCalled();
+ assertEquals(1620, result);
+})();
+
+// Length change detected during loop, must cause properly handled eager deopt.
+(function() {
+ var eagerDeoptInCalled = function(deopt) {
+ var a = [1,2,3,4,5,6,7,8,9,10];
+ var callback = function(v,i,o) {
+ a.length = (i == 5 && deopt) ? 8 : 10;
+ return i == 0 ? false : true;
+ }
+ return a.filter(callback);
+ }
+ var like_a = [1,2,3,4,5,6,7,8,9,10];
+ assertEquals(like_a.slice(1), eagerDeoptInCalled());
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ assertEquals(like_a.slice(1), eagerDeoptInCalled());
+ assertEquals(like_a.slice(1).slice(0, 7), eagerDeoptInCalled(true));
+ eagerDeoptInCalled();
+})();
+
+// Lazy deopt from a callback that changes the input array. Ensure that
+// the value stored in the output array is from the original read.
+(function() {
+ var a = [1, 2, 3, 4, 5];
+ var lazyChanger = function(deopt) {
+ var callback = function(v,i,o) {
+ if (i === 2 && deopt) {
+ a[2] = 100;
+ %DeoptimizeNow();
+ }
+ return true;
+ }
+ return a.filter(callback);
+ }
+ assertEquals(a, lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ var deopt_result = lazyChanger(true);
+ assertEquals([1, 2, 3, 4, 5], deopt_result);
+ assertEquals([1, 2, 100, 4, 5], lazyChanger());
+})();
+
+// Lazy deopt from a callback that returns false at the deopt point.
+// Ensure the non-selection is respected in the output array.
+(function() {
+ var a = [1, 2, 3, 4, 5];
+ var lazyDeselection = function(deopt) {
+ var callback = function(v,i,o) {
+ if (i === 2 && deopt) {
+ %DeoptimizeNow();
+ return false;
+ }
+ return true;
+ }
+ return a.filter(callback);
+ }
+ assertEquals(a, lazyDeselection());
+ lazyDeselection();
+ %OptimizeFunctionOnNextCall(lazyDeselection);
+ var deopt_result = lazyDeselection(true);
+ assertEquals([1, 2, 4, 5], deopt_result);
+ assertEquals([1, 2, 3, 4, 5], lazyDeselection());
+})();
+
+
+// Escape analyzed array
+(function() {
+ var result = 0;
+ var eagerDeoptInCalled = function(deopt) {
+ var a_noescape = [0,1,2,3,4,5];
+ var callback = function(v,i,o) {
+ result += v;
+ if (i == 13 && deopt) {
+ a_noescape.length = 25;
+ }
+ return true;
+ }
+ a_noescape.filter(callback);
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ eagerDeoptInCalled(true);
+ eagerDeoptInCalled();
+ assertEquals(75, result);
+})();
+
+// Escape analyzed array where callback function isn't inlined, forcing a lazy
+// deopt with GC that relies on the stashed-away return result fro the lazy
+// deopt being properly stored in a place on the stack that gets GC'ed.
+(function() {
+ var result = 0;
+ var lazyDeopt = function(deopt) {
+ var b = [1,2,3];
+ var callback = function(v,i,o) {
+ result += i;
+ if (i == 1 && deopt) {
+ %DeoptimizeFunction(lazyDeopt);
+ }
+ gc(); gc();
+ return true;
+ };
+ %NeverOptimizeFunction(callback);
+ b.filter(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+})();
+
+// Lazy deopt from runtime call from inlined callback function.
+(function() {
+ var a = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
+ var result = 0;
+ var lazyDeopt = function(deopt) {
+ var callback = function(v,i,o) {
+ result += i;
+ if (i == 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return true;
+ }
+ a.filter(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Lazy deopt from runtime call from non-inline callback function.
+(function() {
+ var a = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
+ var result = 0;
+ var lazyDeopt = function(deopt) {
+ var callback = function(v,i,o) {
+ result += i;
+ if (i == 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return true;
+ };
+ %NeverOptimizeFunction(callback);
+ a.filter(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+(function() {
+ var a = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
+ var result = 0;
+ var lazyDeopt = function(deopt) {
+ var callback = function(v,i,o) {
+ result += i;
+ if (i == 13 && deopt) {
+ %DeoptimizeNow();
+ gc();
+ gc();
+ gc();
+ }
+ return true;
+ }
+ a.filter(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Call to a.filter is done inside a try-catch block and the callback function
+// being called actually throws.
+(function() {
+ var a = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
+ var caught = false;
+ var result = 0;
+ var lazyDeopt = function(deopt) {
+ var callback = function(v,i,o) {
+ result += i;
+ if (i == 1 && deopt) {
+ throw("a");
+ }
+ return true;
+ }
+ try {
+ a.filter(callback);
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.filter is done inside a try-catch block and the callback function
+// being called actually throws, but the callback is not inlined.
+(function() {
+ var a = [1,2,3,4,5,6,7,8,9,10];
+ var caught = false;
+ var result = 0;
+ var lazyDeopt = function(deopt) {
+ var callback = function(v,i,o) {
+ result += i;
+ if (i == 1 && deopt) {
+ throw("a");
+ }
+ return true;
+ };
+ %NeverOptimizeFunction(callback);
+ try {
+ a.filter(callback);
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.filter is done inside a try-catch block and the callback function
+// being called throws into a deoptimized caller function.
+(function TestThrowIntoDeoptimizedOuter() {
+ var a = [1,2,3,4];
+ var lazyDeopt = function(deopt) {
+ var callback = function(v,i,o) {
+ if (i == 1 && deopt) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw "some exception";
+ }
+ return true;
+ };
+ %NeverOptimizeFunction(callback);
+ var result = 0;
+ try {
+ result = a.filter(callback);
+ } catch (e) {
+ assertEquals("some exception", e)
+ result = "nope";
+ }
+ return result;
+ }
+ assertEquals([1,2,3,4], lazyDeopt(false));
+ assertEquals([1,2,3,4], lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+ assertEquals("nope", lazyDeopt(true));
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertEquals([1,2,3,4], lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+})();
+
+// An error generated inside the callback includes filter in it's
+// stack trace.
+(function() {
+ var re = /Array\.filter/;
+ var lazyDeopt = function(deopt) {
+ var b = [1,2,3];
+ var result = 0;
+ var callback = function(v,i,o) {
+ result += v;
+ if (i == 1) {
+ var e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ return true;
+ };
+ var o = [1,2,3];
+ b.filter(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+})();
+
+// An error generated inside a non-inlined callback function also
+// includes filter in it's stack trace.
+(function() {
+ var re = /Array\.filter/;
+ var lazyDeopt = function(deopt) {
+ var b = [1,2,3];
+ var result = 0;
+ var callback = function(v,i,o) {
+ result += v;
+ if (i == 1) {
+ var e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ return true;
+ };
+ %NeverOptimizeFunction(callback);
+ var o = [1,2,3];
+ b.filter(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+})();
+
+// An error generated inside a recently deoptimized callback function
+// includes filter in it's stack trace.
+(function() {
+ var re = /Array\.filter/;
+ var lazyDeopt = function(deopt) {
+ var b = [1,2,3];
+ var result = 0;
+ var callback = function(v,i,o) {
+ result += v;
+ if (i == 1) {
+ %DeoptimizeNow();
+ } else if (i == 2) {
+ var e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ return true;
+ };
+ var o = [1,2,3];
+ b.filter(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+})();
+
+// Verify that various exception edges are handled appropriately.
+// The thrown Error object should always indicate it was created from
+// a filter call stack.
+(function() {
+ var re = /Array\.filter/;
+ var a = [1,2,3];
+ var result = 0;
+ var lazyDeopt = function() {
+ var callback = function(v,i,o) {
+ result += i;
+ if (i == 1) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw new Error();
+ }
+ return true;
+ };
+ a.filter(callback);
+ }
+ assertThrows(() => lazyDeopt());
+ assertThrows(() => lazyDeopt());
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+})();
+
+// Messing with the Array species constructor causes deoptimization.
+(function() {
+ var result = 0;
+ var a = [1,2,3];
+ var species_breakage = function() {
+ var callback = function(v,i,o) {
+ result += v;
+ return true;
+ }
+ a.filter(callback);
+ }
+ species_breakage();
+ species_breakage();
+ %OptimizeFunctionOnNextCall(species_breakage);
+ species_breakage();
+ a.constructor = {};
+ a.constructor[Symbol.species] = function() {};
+ species_breakage();
+ assertUnoptimized(species_breakage);
+ assertEquals(24, result);
+})();
diff --git a/deps/v8/test/mjsunit/optimized-map.js b/deps/v8/test/mjsunit/optimized-map.js
index 55a87bb5df..d8613e0300 100644
--- a/deps/v8/test/mjsunit/optimized-map.js
+++ b/deps/v8/test/mjsunit/optimized-map.js
@@ -101,6 +101,27 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
lazyDeopt();
})();
+// Escape analyzed array where callback function isn't inlined, forcing a lazy
+// deopt. Check that the result of the callback function is passed correctly
+// to the lazy deopt and that the final result of map is as expected.
+(function() {
+ var lazyDeopt = function(deopt) {
+ var b = [1,2,3];
+ var callback = function(v,i,o) {
+ if (i == 1 && deopt) {
+ %DeoptimizeFunction(lazyDeopt);
+ }
+ return 2 * v;
+ };
+ %NeverOptimizeFunction(callback);
+ return b.map(callback);
+ }
+ assertEquals([2,4,6], lazyDeopt());
+ assertEquals([2,4,6], lazyDeopt());
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertEquals([2,4,6], lazyDeopt(true));
+})();
+
// Lazy deopt from runtime call from inlined callback function.
(function() {
var result = 0;
@@ -415,6 +436,38 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
assertOptimized(to_fast);
})();
+// TurboFan specializes on number results, ensure the code path is
+// tested.
+(function() {
+ var a = [1, 2, 3];
+ function double_results() {
+ // TurboFan recognizes the result is a double.
+ var callback = v => v + 0.5;
+ return a.map(callback);
+ }
+ double_results();
+ double_results();
+ %OptimizeFunctionOnNextCall(double_results);
+ double_results();
+ assertEquals(1.5, double_results()[0]);
+})();
+
+// TurboFan specializes on non-number results, ensure the code path is
+// tested.
+(function() {
+ var a = [1, 2, 3];
+ function string_results() {
+ // TurboFan recognizes the result is a string.
+ var callback = v => "hello" + v.toString();
+ return a.map(callback);
+ }
+ string_results();
+ string_results();
+ %OptimizeFunctionOnNextCall(string_results);
+ string_results();
+ assertEquals("hello1", string_results()[0]);
+})();
+
// Messing with the Array species constructor causes deoptimization.
(function() {
var result = 0;
@@ -436,3 +489,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
assertUnoptimized(species_breakage);
assertEquals(24, result);
})();
+
+/////////////////////////////////////////////////////////////////////////
+//
+// Any tests added below species_breakage won't test optimized map calls
+// because the array species constructor change disables inlining of
+// Array.prototype.map across the isolate.
+//
+/////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/test/mjsunit/regexp-modifiers-autogenerated-i18n.js b/deps/v8/test/mjsunit/regexp-modifiers-autogenerated-i18n.js
new file mode 100644
index 0000000000..18e086c339
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-modifiers-autogenerated-i18n.js
@@ -0,0 +1,81 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --regexp-mode-modifiers --harmony-regexp-property
+
+// These regexps are just grepped out of the other tests we already have
+// and the syntax changed from out-of-line i flag to inline i flag.
+
+// These tests won't all run on the noi18n build of V8.
+
+assertTrue(/(?i)\u00e5/u.test("\u00c5"));
+assertTrue(/(?i)\u00e5/u.test("\u00e5"));
+assertTrue(/(?i)\u00c5/u.test("\u00e5"));
+assertTrue(/(?i)\u00c5/u.test("\u00c5"));
+assertTrue(/(?i)\u212b/u.test("\u212b"));
+assertFalse(/(?i)\u00df/u.test("SS"));
+assertFalse(/(?i)\u1f8d/u.test("\u1f05\u03b9"));
+assertTrue(/(?i)\u1f6b/u.test("\u1f63"));
+assertTrue(/(?i)\u00e5/u.test("\u212b"));
+assertTrue(/(?i)\u00e5/u.test("\u00c5"));
+assertTrue(/(?i)\u00e5/u.test("\u00e5"));
+assertTrue(/(?i)\u00e5/u.test("\u212b"));
+assertTrue(/(?i)\u00c5/u.test("\u00e5"));
+assertTrue(/(?i)\u00c5/u.test("\u212b"));
+assertTrue(/(?i)\u00c5/u.test("\u00c5"));
+assertTrue(/(?i)\u212b/u.test("\u00c5"));
+assertTrue(/(?i)\u212b/u.test("\u00e5"));
+assertTrue(/(?i)\u212b/u.test("\u212b"));
+assertTrue(/(?i)\u{10400}/u.test("\u{10428}"));
+assertTrue(/(?i)\ud801\udc00/u.test("\u{10428}"));
+assertTrue(/(?i)[\u{10428}]/u.test("\u{10400}"));
+assertTrue(/(?i)[\ud801\udc28]/u.test("\u{10400}"));
+assertFalse(/(?i)\u00df/u.test("SS"));
+assertFalse(/(?i)\u1f8d/u.test("\u1f05\u03b9"));
+assertTrue(/(?i)\u1f8d/u.test("\u1f85"));
+assertTrue(/(?i)\u1f6b/u.test("\u1f63"));
+assertTrue(/(?i)\u00e5\u00e5\u00e5/u.test("\u212b\u00e5\u00c5"));
+assertTrue(/(?i)AB\u{10400}/u.test("ab\u{10428}"));
+assertTrue(/(?i)\w/u.test('\u017F'));
+assertTrue(/(?i)\w/u.test('\u212A'));
+assertFalse(/(?i)\W/u.test('\u017F'));
+assertFalse(/(?i)\W/u.test('\u212A'));
+assertFalse(/(?i)\W/u.test('s'));
+assertFalse(/(?i)\W/u.test('S'));
+assertFalse(/(?i)\W/u.test('K'));
+assertFalse(/(?i)\W/u.test('k'));
+assertTrue(/(?i)[\w]/u.test('\u017F'));
+assertTrue(/(?i)[\w]/u.test('\u212A'));
+assertFalse(/(?i)[\W]/u.test('\u017F'));
+assertFalse(/(?i)[\W]/u.test('\u212A'));
+assertFalse(/(?i)[\W]/u.test('s'));
+assertFalse(/(?i)[\W]/u.test('S'));
+assertFalse(/(?i)[\W]/u.test('K'));
+assertFalse(/(?i)[\W]/u.test('k'));
+assertTrue(/(?i)\b/u.test('\u017F'));
+assertTrue(/(?i)\b/u.test('\u212A'));
+assertTrue(/(?i)\b/u.test('s'));
+assertTrue(/(?i)\b/u.test('S'));
+assertFalse(/(?i)\B/u.test('\u017F'));
+assertFalse(/(?i)\B/u.test('\u212A'));
+assertFalse(/(?i)\B/u.test('s'));
+assertFalse(/(?i)\B/u.test('S'));
+assertFalse(/(?i)\B/u.test('K'));
+assertFalse(/(?i)\B/u.test('k'));
+assertTrue(/(?i)\p{Ll}/u.test("a"));
+assertTrue(/(?i)\p{Ll}/u.test("\u{118D4}"));
+assertTrue(/(?i)\p{Ll}/u.test("A"));
+assertTrue(/(?i)\p{Ll}/u.test("\u{118B4}"));
+assertTrue(/(?i)\P{Ll}/u.test("a"));
+assertTrue(/(?i)\P{Ll}/u.test("\u{118D4}"));
+assertTrue(/(?i)\P{Ll}/u.test("A"));
+assertTrue(/(?i)\P{Ll}/u.test("\u{118B4}"));
+assertTrue(/(?i)\p{Lu}/u.test("a"));
+assertTrue(/(?i)\p{Lu}/u.test("\u{118D4}"));
+assertTrue(/(?i)\p{Lu}/u.test("A"));
+assertTrue(/(?i)\p{Lu}/u.test("\u{118B4}"));
+assertTrue(/(?i)\P{Lu}/u.test("a"));
+assertTrue(/(?i)\P{Lu}/u.test("\u{118D4}"));
+assertTrue(/(?i)\P{Lu}/u.test("A"));
+assertTrue(/(?i)\P{Lu}/u.test("\u{118B4}"));
diff --git a/deps/v8/test/mjsunit/regexp-modifiers-autogenerated.js b/deps/v8/test/mjsunit/regexp-modifiers-autogenerated.js
new file mode 100644
index 0000000000..e74ea8b384
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-modifiers-autogenerated.js
@@ -0,0 +1,74 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --regexp-mode-modifiers --harmony-regexp-property
+
+// These regexps are just grepped out of the other tests we already have
+// and the syntax changed from out-of-line i flag to inline i flag.
+
+assertFalse(/(?i)x(...)\1/.test("x\u03a3\u03c2\u03c3\u03c2\u03c3"));
+assertTrue(/(?i)\u03a3((?:))\1\1x/.test("\u03c2x"), "backref-UC16-empty");
+assertTrue(/(?i)x(?:...|(...))\1x/.test("x\u03a3\u03c2\u03c3x"));
+assertTrue(/(?i)x(?:...|(...))\1x/.test("x\u03c2\u03c3\u039b\u03a3\u03c2\u03bbx"));
+assertFalse(/(?i)\xc1/.test('fooA'), "quickcheck-uc16-pattern-ascii-subject");
+assertFalse(/(?i)x(...)\1/.test("xaaaaa"), "backref-ASCII-short");
+assertTrue(/(?i)x((?:))\1\1x/.test("xx"), "backref-ASCII-empty");
+assertTrue(/(?i)x(?:...|(...))\1x/.test("xabcx"), "backref-ASCII-uncaptured");
+assertTrue(/(?i)x(?:...|(...))\1x/.test("xabcABCx"), "backref-ASCII-backtrack");
+assertFalse(/(?i)f/.test('b'));
+assertFalse(/(?i)[abc]f/.test('x'));
+assertFalse(/(?i)[abc]f/.test('xa'));
+assertFalse(/(?i)[abc]</.test('x'));
+assertFalse(/(?i)[abc]</.test('xa'));
+assertFalse(/(?i)f[abc]/.test('x'));
+assertFalse(/(?i)f[abc]/.test('xa'));
+assertFalse(/(?i)<[abc]/.test('x'));
+assertFalse(/(?i)<[abc]/.test('xa'));
+assertFalse(/(?i)[\u00e5]/.test("\u212b"));
+assertFalse(/(?i)[\u212b]/.test("\u00e5\u1234"));
+assertFalse(/(?i)[\u212b]/.test("\u00e5"));
+assertFalse(/(?i)\u{10400}/.test("\u{10428}"));
+assertFalse(/(?i)[\u00e5]/.test("\u212b"));
+assertFalse(/(?i)[\u212b]/.test("\u00e5\u1234"));
+assertFalse(/(?i)[\u212b]/.test("\u00e5"));
+assertFalse(/(?i)\u{10400}/.test("\u{10428}"));
+assertTrue(/(?i)[@-A]/.test("a"));
+assertTrue(/(?i)[@-A]/.test("A"));
+assertTrue(/(?i)[@-A]/.test("@"));
+assertFalse(/(?i)[¿-À]/.test('¾'));
+assertTrue(/(?i)[¿-À]/.test('¿'));
+assertTrue(/(?i)[¿-À]/.test('À'));
+assertTrue(/(?i)[¿-À]/.test('à'));
+assertFalse(/(?i)[¿-À]/.test('á'));
+assertFalse(/(?i)[¿-À]/.test('Á'));
+assertFalse(/(?i)[¿-À]/.test('Á'));
+assertFalse(/(?i)[Ö-×]/.test('Õ'));
+assertTrue(/(?i)[Ö-×]/.test('Ö'));
+assertTrue(/(?i)[Ö-×]/.test('ö'));
+assertTrue(/(?i)[Ö-×]/.test('×'));
+assertFalse(/(?i)[Ö-×]/.test('Ø'));
+assertTrue(/(?i)(a[\u1000A])+/.test('aa'));
+assertTrue(/(?i)\u0178/.test('\u00ff'));
+assertTrue(/(?i)\u039c/.test('\u00b5'));
+assertTrue(/(?i)\u039c/.test('\u03bc'));
+assertTrue(/(?i)\u00b5/.test('\u03bc'));
+assertTrue(/(?i)[\u039b-\u039d]/.test('\u00b5'));
+assertFalse(/(?i)[^\u039b-\u039d]/.test('\u00b5'));
+
+assertTrue(/(?m)^bar/.test("bar"));
+assertTrue(/(?m)^bar/.test("bar\nfoo"));
+assertTrue(/(?m)^bar/.test("foo\nbar"));
+assertTrue(/(?m)bar$/.test("bar"));
+assertTrue(/(?m)bar$/.test("bar\nfoo"));
+assertTrue(/(?m)bar$/.test("foo\nbar"));
+assertFalse(/(?m)^bxr/.test("bar"));
+assertFalse(/(?m)^bxr/.test("bar\nfoo"));
+assertFalse(/(?m)^bxr/.test("foo\nbar"));
+assertFalse(/(?m)bxr$/.test("bar"));
+assertFalse(/(?m)bxr$/.test("bar\nfoo"));
+assertFalse(/(?m)bxr$/.test("foo\nbar"));
+assertTrue(/(?m)^.*$/.test("\n"));
+assertTrue(/(?m)^([()]|.)*$/.test("()\n()"));
+assertTrue(/(?m)^([()]|.)*$/.test("()\n"));
+assertTrue(/(?m)^[()]*$/.test("()\n."));
diff --git a/deps/v8/test/mjsunit/regexp-modifiers-dotall.js b/deps/v8/test/mjsunit/regexp-modifiers-dotall.js
new file mode 100644
index 0000000000..70c379c2e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-modifiers-dotall.js
@@ -0,0 +1,27 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --regexp-mode-modifiers
+
+// S flag switches dotall mode on and off. Combine with i flag changes to test
+// the parser.
+test(/.(?s).(?i-s).a(?-i)a/);
+test(/.(?s:.)(?i:.a)a/);
+test(/.(?s).(?i-s).a(?-i)a/u);
+test(/.(?s:.)(?i:.a)a/u);
+
+// m flag makes no difference
+test(/.(?sm).(?i-s).a(?-i)a/);
+test(/.(?s:.)(?i:.a)a/);
+test(/.(?sm).(?im-s).a(?m-i)a/u);
+test(/.(?s:.)(?i:.a)a/u);
+
+function test(re) {
+ assertTrue(re.test("...aa"));
+ assertTrue(re.test(".\n.aa"));
+ assertTrue(re.test(".\n.Aa"));
+ assertFalse(re.test("\n\n.Aa"));
+ assertFalse(re.test(".\n\nAa"));
+ assertFalse(re.test(".\n.AA"));
+}
diff --git a/deps/v8/test/mjsunit/regexp-modifiers-i18n.js b/deps/v8/test/mjsunit/regexp-modifiers-i18n.js
new file mode 100644
index 0000000000..e9ffe05ac9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-modifiers-i18n.js
@@ -0,0 +1,138 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --regexp-mode-modifiers
+
+// These tests won't all run on the noi18n build of V8.
+
+aa(/(a)(?i)\1/u);
+aa(/([az])(?i)\1/u);
+
+function aa(re) {
+ assertTrue(re.test("aa"));
+ assertTrue(re.test("aA"));
+ assertFalse(re.test("Aa"));
+ assertFalse(re.test("AA"));
+}
+
+aai(/(a)(?-i)\1/iu);
+aai(/([az])(?-i)\1/iu);
+
+function aai(re) {
+ assertTrue(re.test("aa"));
+ assertFalse(re.test("aA"));
+ assertFalse(re.test("Aa"));
+ assertTrue(re.test("AA"));
+}
+
+abcd(/a(b(?i)c)d/u);
+abcd(/[aw]([bx](?i)[cy])[dz]/u);
+
+function abcd(re) {
+ assertTrue(re.test("abcd"));
+ assertFalse(re.test("abcD"));
+ assertTrue(re.test("abCd"));
+ assertFalse(re.test("abCD"));
+ assertFalse(re.test("aBcd"));
+ assertFalse(re.test("aBcD"));
+ assertFalse(re.test("aBCd"));
+ assertFalse(re.test("aBCD"));
+ assertFalse(re.test("Abcd"));
+ assertFalse(re.test("AbcD"));
+ assertFalse(re.test("AbCd"));
+ assertFalse(re.test("AbCD"));
+ assertFalse(re.test("ABcd"));
+ assertFalse(re.test("ABcD"));
+ assertFalse(re.test("ABCd"));
+ assertFalse(re.test("ABCD"));
+}
+
+abcdei(/a(b(?-i)c)d/iu);
+abcdei(/[aw]([bx](?-i)[cy])[dz]/iu);
+
+function abcdei(re) {
+ assertTrue(re.test("abcd"));
+ assertTrue(re.test("abcD"));
+ assertFalse(re.test("abCd"));
+ assertFalse(re.test("abCD"));
+ assertTrue(re.test("aBcd"));
+ assertTrue(re.test("aBcD"));
+ assertFalse(re.test("aBCd"));
+ assertFalse(re.test("aBCD"));
+ assertTrue(re.test("Abcd"));
+ assertTrue(re.test("AbcD"));
+ assertFalse(re.test("AbCd"));
+ assertFalse(re.test("AbCD"));
+ assertTrue(re.test("ABcd"));
+ assertTrue(re.test("ABcD"));
+ assertFalse(re.test("ABCd"));
+ assertFalse(re.test("ABCD"));
+}
+
+abc(/a(?i:b)c/u);
+abc(/[ax](?i:[by])[cz]/u);
+
+function abc(re) {
+ assertTrue(re.test("abc"));
+ assertFalse(re.test("abC"));
+ assertTrue(re.test("aBc"));
+ assertFalse(re.test("aBC"));
+ assertFalse(re.test("Abc"));
+ assertFalse(re.test("AbC"));
+ assertFalse(re.test("ABc"));
+ assertFalse(re.test("ABC"));
+}
+
+abci(/a(?-i:b)c/iu);
+abci(/[ax](?-i:[by])[cz]/iu);
+
+function abci(re) {
+ assertTrue(re.test("abc"));
+ assertTrue(re.test("abC"));
+ assertFalse(re.test("aBc"));
+ assertFalse(re.test("aBC"));
+ assertTrue(re.test("Abc"));
+ assertTrue(re.test("AbC"));
+ assertFalse(re.test("ABc"));
+ assertFalse(re.test("ABC"));
+}
+
+// The following tests are taken from test/mjsunit/es7/regexp-ui-word.js but
+// using inline syntax instead of the global /i flag.
+assertTrue(/(?i)\w/u.test('\u017F'));
+assertTrue(/(?i)\w/u.test('\u212A'));
+assertFalse(/(?i)\W/u.test('\u017F'));
+assertFalse(/(?i)\W/u.test('\u212A'));
+assertFalse(/(?i)\W/u.test('s'));
+assertFalse(/(?i)\W/u.test('S'));
+assertFalse(/(?i)\W/u.test('K'));
+assertFalse(/(?i)\W/u.test('k'));
+
+assertTrue(/(?i)[\w]/u.test('\u017F'));
+assertTrue(/(?i)[\w]/u.test('\u212A'));
+assertFalse(/(?i)[\W]/u.test('\u017F'));
+assertFalse(/(?i)[\W]/u.test('\u212A'));
+assertFalse(/(?i)[\W]/u.test('s'));
+assertFalse(/(?i)[\W]/u.test('S'));
+assertFalse(/(?i)[\W]/u.test('K'));
+assertFalse(/(?i)[\W]/u.test('k'));
+
+assertTrue(/(?i)\b/u.test('\u017F'));
+assertFalse(/(?i:)\b/u.test('\u017F'));
+assertTrue(/(?i)\b/u.test('\u212A'));
+assertFalse(/(?i:)\b/u.test('\u212A'));
+assertTrue(/(?i)\b/u.test('s'));
+assertTrue(/(?i)\b/u.test('S'));
+assertFalse(/(?i)\B/u.test('\u017F'));
+assertFalse(/(?i)\B/u.test('\u212A'));
+assertFalse(/(?i)\B/u.test('s'));
+assertFalse(/(?i)\B/u.test('S'));
+assertFalse(/(?i)\B/u.test('K'));
+assertFalse(/(?i)\B/u.test('k'));
+
+assertEquals(["abcd\u017F", "\u017F"], /a.*?(.)(?i)\b/u.exec('abcd\u017F cd'));
+assertEquals(["abcd\u212A", "\u212A"], /a.*?(.)(?i)\b/u.exec('abcd\u212A cd'));
+
+assertEquals(["a\u017F", "\u017F"], /a.*?(?i:\B)(.)/u.exec('a\u017F '));
+assertEquals(["a\u212A", "\u212A"], /a.*?(?i:\B)(.)/u.exec('a\u212A '));
diff --git a/deps/v8/test/mjsunit/regexp-modifiers.js b/deps/v8/test/mjsunit/regexp-modifiers.js
new file mode 100644
index 0000000000..7e76717912
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-modifiers.js
@@ -0,0 +1,146 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --regexp-mode-modifiers
+
+aa(/(a)(?i)\1/);
+aa(/([az])(?i)\1/);
+
+function aa(re) {
+ assertTrue(re.test("aa"));
+ assertTrue(re.test("aA"));
+ assertFalse(re.test("Aa"));
+ assertFalse(re.test("AA"));
+}
+
+aai(/(a)(?-i)\1/i);
+aai(/([az])(?-i)\1/i);
+
+function aai(re) {
+ assertTrue(re.test("aa"));
+ assertFalse(re.test("aA"));
+ assertFalse(re.test("Aa"));
+ assertTrue(re.test("AA"));
+}
+
+abcd(/a(b(?i)c)d/);
+abcd(/[aw]([bx](?i)[cy])[dz]/);
+
+function abcd(re) {
+ assertTrue(re.test("abcd"));
+ assertFalse(re.test("abcD"));
+ assertTrue(re.test("abCd"));
+ assertFalse(re.test("abCD"));
+ assertFalse(re.test("aBcd"));
+ assertFalse(re.test("aBcD"));
+ assertFalse(re.test("aBCd"));
+ assertFalse(re.test("aBCD"));
+ assertFalse(re.test("Abcd"));
+ assertFalse(re.test("AbcD"));
+ assertFalse(re.test("AbCd"));
+ assertFalse(re.test("AbCD"));
+ assertFalse(re.test("ABcd"));
+ assertFalse(re.test("ABcD"));
+ assertFalse(re.test("ABCd"));
+ assertFalse(re.test("ABCD"));
+}
+
+abcdei(/a(b(?-i)c)d/i);
+abcdei(/[aw]([bx](?-i)[cy])[dz]/i);
+
+function abcdei(re) {
+ assertTrue(re.test("abcd"));
+ assertTrue(re.test("abcD"));
+ assertFalse(re.test("abCd"));
+ assertFalse(re.test("abCD"));
+ assertTrue(re.test("aBcd"));
+ assertTrue(re.test("aBcD"));
+ assertFalse(re.test("aBCd"));
+ assertFalse(re.test("aBCD"));
+ assertTrue(re.test("Abcd"));
+ assertTrue(re.test("AbcD"));
+ assertFalse(re.test("AbCd"));
+ assertFalse(re.test("AbCD"));
+ assertTrue(re.test("ABcd"));
+ assertTrue(re.test("ABcD"));
+ assertFalse(re.test("ABCd"));
+ assertFalse(re.test("ABCD"));
+}
+
+abc(/a(?i:b)c/);
+abc(/[ax](?i:[by])[cz]/);
+
+function abc(re) {
+ assertTrue(re.test("abc"));
+ assertFalse(re.test("abC"));
+ assertTrue(re.test("aBc"));
+ assertFalse(re.test("aBC"));
+ assertFalse(re.test("Abc"));
+ assertFalse(re.test("AbC"));
+ assertFalse(re.test("ABc"));
+ assertFalse(re.test("ABC"));
+}
+
+abci(/a(?-i:b)c/i);
+abci(/[ax](?-i:[by])[cz]/i);
+
+function abci(re) {
+ assertTrue(re.test("abc"));
+ assertTrue(re.test("abC"));
+ assertFalse(re.test("aBc"));
+ assertFalse(re.test("aBC"));
+ assertTrue(re.test("Abc"));
+ assertTrue(re.test("AbC"));
+ assertFalse(re.test("ABc"));
+ assertFalse(re.test("ABC"));
+}
+
+assertThrows(() => new RegExp("foo(?i:"));
+assertThrows(() => new RegExp("foo(?--i)"));
+assertThrows(() => new RegExp("foo(?i-i)"));
+
+assertThrows(() => new RegExp("foo(?m:"));
+assertThrows(() => new RegExp("foo(?--m)"));
+assertThrows(() => new RegExp("foo(?m-m)"));
+
+var re = /^\s(?m)^.$\s(?-m)$/;
+assertTrue(re.test("\n.\n"));
+assertFalse(re.test(" .\n"));
+assertFalse(re.test("\n. "));
+assertFalse(re.test(" . "));
+assertFalse(re.test("_\n.\n"));
+assertFalse(re.test("\n.\n_"));
+assertFalse(re.test("_\n.\n_"));
+
+assertEquals(["abcd", "d"], /a.*?(.)(?i)\b/.exec('abcd\u017F cd'));
+assertEquals(["abcd", "d"], /a.*?(.)(?i)\b/.exec('abcd\u212A cd'));
+
+assertEquals(["a\u017F ", " "], /a.*?(?i)\B(.)/.exec('a\u017F '));
+assertEquals(["a\u212A ", " "], /a.*?(?i)\B(.)/.exec('a\u212A '));
+
+// Nested flags.
+var res = [
+ /^a(?i:b(?-i:c(?i:d)e)f)g$/,
+ /^a(?i:b(?-i)c(?i)d(?-i)e(?i)f)g$/,
+ /^(?-i:a(?i:b(?-i:c(?i:d)e)f)g)$/i,
+ /^(?-i:a(?i:b(?-i)c(?i)d(?-i)e(?i)f)g)$/i,
+];
+
+for (var idx = 0; idx < res.length; idx++) {
+ var re = res[idx];
+ for (var i = 0; i < 128; i++) {
+ var s = (i & 1) ? "A" : "a";
+ s += (i & 2) ? "B" : "b";
+ s += (i & 4) ? "C" : "c";
+ s += (i & 8) ? "D" : "d";
+ s += (i & 16) ? "E" : "e";
+ s += (i & 32) ? "F" : "f";
+ s += (i & 64) ? "G" : "g";
+ if ((i & (1 | 4 | 16 | 64)) != 0) {
+ assertFalse(re.test(s), s);
+ } else {
+ assertTrue(re.test(s), s);
+ }
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1257.js b/deps/v8/test/mjsunit/regress/regress-1257.js
index c20fb86068..c5ed14dd3b 100644
--- a/deps/v8/test/mjsunit/regress/regress-1257.js
+++ b/deps/v8/test/mjsunit/regress/regress-1257.js
@@ -1,29 +1,8 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
function g(y) { assertEquals(y, 12); }
@@ -39,14 +18,11 @@ function foo () {
l = 0;
break;
case 0:
- // Loop for to hit OSR.
- if (cnt++ < 10000000) {
- l = 0;
- break;
- } else {
+ if (cnt++ == 5) {
+ %OptimizeOsr();
l = 1;
- break;
}
+ break;
case 1:
// This case will contain deoptimization
// because it has no type feedback.
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index 437401e487..8e539fffa4 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -40,6 +40,7 @@ function f() {
// feedback.
var opt_status = %GetOptimizationStatus(f);
assertTrue(
+ (opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0 ||
(opt_status & V8OptimizationStatus.kTopmostFrameIsTurboFanned) !== 0);
} while (false);
} while (false);
@@ -66,8 +67,10 @@ function g() {
do {
for (var i = 0; i < 10; i++) %OptimizeOsr();
var opt_status = %GetOptimizationStatus(g);
- assertTrue((opt_status
- & V8OptimizationStatus.kTopmostFrameIsTurboFanned) !== 0);
+ assertTrue(
+ (opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0 ||
+ (opt_status &
+ V8OptimizationStatus.kTopmostFrameIsTurboFanned) !== 0);
} while (false);
} while (false);
} while (false);
diff --git a/deps/v8/test/mjsunit/regress/regress-353004.js b/deps/v8/test/mjsunit/regress/regress-353004.js
index 233a0f11d1..fe19354d8b 100644
--- a/deps/v8/test/mjsunit/regress/regress-353004.js
+++ b/deps/v8/test/mjsunit/regress/regress-353004.js
@@ -6,12 +6,12 @@
var buffer1 = new ArrayBuffer(100 * 1024);
-var array1 = new Uint8Array(buffer1, {valueOf : function() {
- %ArrayBufferNeuter(buffer1);
- return 0;
-}});
-
-assertEquals(0, array1.length);
+assertThrows(function() {
+ var array1 = new Uint8Array(buffer1, {valueOf : function() {
+ %ArrayBufferNeuter(buffer1);
+ return 0;
+ }});
+}, TypeError);
var buffer2 = new ArrayBuffer(100 * 1024);
@@ -20,8 +20,21 @@ assertThrows(function() {
%ArrayBufferNeuter(buffer2);
return 100 * 1024;
}});
-}, RangeError);
+}, TypeError);
+let convertedOffset = false;
+let convertedLength = false;
+assertThrows(() =>
+ new Uint8Array(buffer1, {valueOf : function() {
+ convertedOffset = true;
+ return 0;
+ }}, {valueOf : function() {
+ convertedLength = true;
+ %ArrayBufferNeuter(buffer1);
+ return 0;
+ }}), TypeError);
+assertTrue(convertedOffset);
+assertTrue(convertedLength);
var buffer3 = new ArrayBuffer(100 * 1024 * 1024);
var dataView1 = new DataView(buffer3, {valueOf : function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-5902.js b/deps/v8/test/mjsunit/regress/regress-5902.js
index 54cb90ee8a..6054104570 100644
--- a/deps/v8/test/mjsunit/regress/regress-5902.js
+++ b/deps/v8/test/mjsunit/regress/regress-5902.js
@@ -51,12 +51,5 @@ Object.getOwnPropertyNames(global).forEach(function(name) {
`${name}.prototype.constructor`);
});
-// This is the current set of dictionary mode objects.
-// Remove items as we fix them. See issue 5902.
-assertEquals(
- [
- 'Error.prototype',
- 'EvalError.prototype', 'RangeError.prototype', 'ReferenceError.prototype',
- 'SyntaxError.prototype', 'TypeError.prototype', 'URIError.prototype'
- ],
- log);
+// There should be no dictionary mode builtin objects.
+assertEquals([], log);
diff --git a/deps/v8/test/mjsunit/regress/regress-599717.js b/deps/v8/test/mjsunit/regress/regress-599717.js
index 51831860e9..94a41ce4d3 100644
--- a/deps/v8/test/mjsunit/regress/regress-599717.js
+++ b/deps/v8/test/mjsunit/regress/regress-599717.js
@@ -15,7 +15,7 @@ function __f_61(stdlib, foreign, buffer) {
}
var ok = false;
try {
- var __v_12 = new ArrayBuffer(2147483648);
+ var __v_12 = new ArrayBuffer(1 << 30);
ok = true;
} catch (e) {
// Can happen on 32 bit systems.
diff --git a/deps/v8/test/mjsunit/regress/regress-678917.js b/deps/v8/test/mjsunit/regress/regress-678917.js
index 0e302e58d2..a8ce04212b 100644
--- a/deps/v8/test/mjsunit/regress/regress-678917.js
+++ b/deps/v8/test/mjsunit/regress/regress-678917.js
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --max-old-space-size=1600
+// Flags: --max-old-space-size=1600 --nostress-incremental-marking
+
+// This test uses a lot of memory and fails with flaky OOM when run
+// with --stress-incremental-marking on TSAN.
s1 = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx';
s1 += s1;
diff --git a/deps/v8/test/mjsunit/regress/regress-6941.js b/deps/v8/test/mjsunit/regress/regress-6941.js
new file mode 100644
index 0000000000..a8370831f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6941.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function foo(x) {
+ return Symbol.iterator == x;
+}
+
+function main() {
+ foo(Symbol());
+ foo({valueOf() { return Symbol.toPrimitive}});
+}
+
+%NeverOptimizeFunction(main);
+main();
+%OptimizeFunctionOnNextCall(foo);
+main();
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-6948.js b/deps/v8/test/mjsunit/regress/regress-6948.js
new file mode 100644
index 0000000000..c7e0fae28f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6948.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+var o = {};
+
+function foo(s) { return o[s]; }
+
+var s = 'c' + 'c';
+foo(s);
+foo(s);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo(s));
+assertOptimized(foo);
+assertEquals(undefined, foo('c' + 'c'));
+assertOptimized(foo);
+assertEquals(undefined, foo('ccddeeffgg'.slice(0, 2)));
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-6970.js b/deps/v8/test/mjsunit/regress/regress-6970.js
new file mode 100644
index 0000000000..64f8caf102
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6970.js
@@ -0,0 +1,6 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(42, (({a = {b} = {b: 42}}) => a.b)({}));
+assertEquals(42, b);
diff --git a/deps/v8/test/mjsunit/regress/regress-6989.js b/deps/v8/test/mjsunit/regress/regress-6989.js
new file mode 100644
index 0000000000..b4a33c59c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6989.js
@@ -0,0 +1,85 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+(function() {
+ function foo(o) { o["x"] = 1; }
+
+ assertThrows(() => foo(undefined));
+ assertThrows(() => foo(undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(undefined));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(o) { o["x"] = 1; }
+
+ assertThrows(() => foo(null));
+ assertThrows(() => foo(null));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(null));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(o) { return o["x"]; }
+
+ assertThrows(() => foo(undefined));
+ assertThrows(() => foo(undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(undefined));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(o) { return o["x"]; }
+
+ assertThrows(() => foo(null));
+ assertThrows(() => foo(null));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(null));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(o) { o.x = 1; }
+
+ assertThrows(() => foo(undefined));
+ assertThrows(() => foo(undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(undefined));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(o) { o.x = 1; }
+
+ assertThrows(() => foo(null));
+ assertThrows(() => foo(null));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(null));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(o) { return o.x; }
+
+ assertThrows(() => foo(undefined));
+ assertThrows(() => foo(undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(undefined));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(o) { return o.x; }
+
+ assertThrows(() => foo(null));
+ assertThrows(() => foo(null));
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(null));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-6991.js b/deps/v8/test/mjsunit/regress/regress-6991.js
new file mode 100644
index 0000000000..1c6b976977
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6991.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function foo(o) { return o.x; }
+
+assertEquals(undefined, foo({}));
+assertEquals(undefined, foo(1));
+assertEquals(undefined, foo({}));
+assertEquals(undefined, foo(1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo({}));
+assertOptimized(foo);
+assertEquals(undefined, foo(1));
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-7014-1.js b/deps/v8/test/mjsunit/regress/regress-7014-1.js
new file mode 100644
index 0000000000..6aadf91aa2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-7014-1.js
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+function foo(s) {
+ return s[5];
+}
+
+assertEquals("f", foo("abcdef"));
+assertEquals(undefined, foo("a"));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("f", foo("abcdef"));
+assertEquals(undefined, foo("a"));
+assertOptimized(foo);
+
+// Now mess with the String.prototype.
+String.prototype[5] = "5";
+
+assertEquals("f", foo("abcdef"));
+assertEquals("5", foo("a"));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("f", foo("abcdef"));
+assertEquals("5", foo("a"));
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-7014-2.js b/deps/v8/test/mjsunit/regress/regress-7014-2.js
new file mode 100644
index 0000000000..057e170d90
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-7014-2.js
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+function foo(s) {
+ return s[5];
+}
+
+assertEquals("f", foo("abcdef"));
+assertEquals(undefined, foo("a"));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("f", foo("abcdef"));
+assertEquals(undefined, foo("a"));
+assertOptimized(foo);
+
+// Now mess with the String.prototype.
+String.prototype.__proto__ = new Proxy(String.prototype.__proto__, {
+ get(target, property) {
+ return "5";
+ }
+});
+
+assertEquals("f", foo("abcdef"));
+assertEquals("5", foo("a"));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("f", foo("abcdef"));
+assertEquals("5", foo("a"));
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-7026.js b/deps/v8/test/mjsunit/regress/regress-7026.js
new file mode 100644
index 0000000000..f02d2f3505
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-7026.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo(o, k) { return o[k]; }
+
+const a = "a";
+foo([1], 0);
+foo({a:1}, a);
+
+const p = new Proxy({}, {
+ get(target, name) {
+ return name;
+ }
+});
+
+assertEquals(a + "b", foo(p, a + "b"));
diff --git a/deps/v8/test/mjsunit/regress/regress-707187.js b/deps/v8/test/mjsunit/regress/regress-707187.js
index 31a73e2ba0..3c4d768d4b 100644
--- a/deps/v8/test/mjsunit/regress/regress-707187.js
+++ b/deps/v8/test/mjsunit/regress/regress-707187.js
@@ -5,7 +5,7 @@
let i = 0;
let re = /./g;
re.exec = () => {
- if (i++ == 0) return { length: 2147483648 };
+ if (i++ == 0) return { length: 2 ** 16 };
return null;
};
diff --git a/deps/v8/test/mjsunit/regress/regress-7115.js b/deps/v8/test/mjsunit/regress/regress-7115.js
new file mode 100644
index 0000000000..837c11e930
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-7115.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function TestBuiltinSubclassing(Builtin) {
+ assertTrue(%HasFastProperties(Builtin));
+ assertTrue(%HasFastProperties(Builtin.prototype));
+ assertTrue(%HasFastProperties(Builtin.prototype.__proto__));
+
+ class SubClass extends Builtin {}
+
+ assertTrue(%HasFastProperties(Builtin));
+ assertTrue(%HasFastProperties(Builtin.prototype));
+ assertTrue(%HasFastProperties(Builtin.prototype.__proto__));
+}
+
+let TypedArray = Uint8Array.__proto__;
+
+TestBuiltinSubclassing(RegExp);
+TestBuiltinSubclassing(Promise);
+TestBuiltinSubclassing(Array);
+TestBuiltinSubclassing(TypedArray);
+TestBuiltinSubclassing(Uint8Array);
+TestBuiltinSubclassing(Int8Array);
+TestBuiltinSubclassing(Uint16Array);
+TestBuiltinSubclassing(Int16Array);
+TestBuiltinSubclassing(Uint32Array);
+TestBuiltinSubclassing(Int32Array);
+TestBuiltinSubclassing(Float32Array);
+TestBuiltinSubclassing(Float64Array);
+TestBuiltinSubclassing(Uint8ClampedArray);
diff --git a/deps/v8/test/mjsunit/regress/regress-7135.js b/deps/v8/test/mjsunit/regress/regress-7135.js
new file mode 100644
index 0000000000..2387241eee
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-7135.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function foo() { return -"0" }
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+assertOptimized(foo);
+
+function bar() { return -"1" }
+bar();
+%OptimizeFunctionOnNextCall(bar);
+bar();
+assertOptimized(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-752764.js b/deps/v8/test/mjsunit/regress/regress-752764.js
index 4963089d0c..30ab7b2a6d 100644
--- a/deps/v8/test/mjsunit/regress/regress-752764.js
+++ b/deps/v8/test/mjsunit/regress/regress-752764.js
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --nostress-incremental-marking
+
+// This test uses a lot of memory and fails with flaky OOM when run
+// with --stress-incremental-marking on TSAN.
a = "a".repeat(%StringMaxLength() - 3);
assertThrows(() => new RegExp("a" + a), SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-774824.js b/deps/v8/test/mjsunit/regress/regress-774824.js
new file mode 100644
index 0000000000..ca2deccad9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-774824.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var a = new Set().values();
+ a.outOfObjectProperty = undefined;
+ %DeoptimizeNow();
+ return !a;
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-775888.js b/deps/v8/test/mjsunit/regress/regress-775888.js
new file mode 100644
index 0000000000..8aa809e812
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-775888.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function __f_7586(__v_27535) {
+ let a = __v_27535.shift();
+ return a;
+}
+
+function __f_7587() {
+ var __v_27536 = [ 1, 15, 16];
+ __f_7586(__v_27536);
+ __v_27536.unshift(__v_27536);
+}
+__f_7587();
+__f_7587();
+
+%OptimizeFunctionOnNextCall(__f_7586);
+__f_7587();
diff --git a/deps/v8/test/mjsunit/regress/regress-776309.js b/deps/v8/test/mjsunit/regress/regress-776309.js
new file mode 100644
index 0000000000..98a38c05e5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-776309.js
@@ -0,0 +1,27 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function C() { }
+
+function f(b) {
+ var o = new C();
+ // Create out-of-object properties only on one branch so that escape
+ // analysis does not analyze the property array away.
+ if (b) o.t = 1.1;
+ %_DeoptimizeNow();
+ return o.t;
+}
+
+// Finish slack tracking for C.
+for (var i = 0; i < 1000; i++) new C();
+
+f(true);
+f(true);
+f(false);
+
+%OptimizeFunctionOnNextCall(f);
+
+assertEquals(1.1, f(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-778574.js b/deps/v8/test/mjsunit/regress/regress-778574.js
new file mode 100644
index 0000000000..862418e503
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-778574.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function () {
+ arguments.length = 7;
+ Array.prototype.slice.call(arguments);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-778668.js b/deps/v8/test/mjsunit/regress/regress-778668.js
new file mode 100644
index 0000000000..cb6a359fd9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-778668.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function () {
+ function f( __v_59960) {
+ arguments.length = -5;
+ Array.prototype.slice.call(arguments);
+ }
+ f('a')
+})();
+
+(function () {
+ function f( __v_59960) {
+ arguments.length = 2.3;
+ print(arguments.length);
+ Array.prototype.slice.call(arguments);
+ }
+ f('a')
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-779407.js b/deps/v8/test/mjsunit/regress/regress-779407.js
new file mode 100644
index 0000000000..140f7bdd74
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-779407.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var s = '\u1234-------';
+for (var i = 0; i < 17; i++) {
+ try {
+ s += s;
+ s += s;
+ } catch (e) {
+ }
+}
+s.replace(/[a]/g);
diff --git a/deps/v8/test/mjsunit/regress/regress-781218.js b/deps/v8/test/mjsunit/regress/regress-781218.js
index ae00cc5c08..f51e99f32b 100644
--- a/deps/v8/test/mjsunit/regress/regress-781218.js
+++ b/deps/v8/test/mjsunit/regress/regress-781218.js
@@ -24,7 +24,10 @@ f(new C());
var o = new C();
%HeapObjectVerify(o);
-m.set(o, 1); // This creates hash code on o.
+// We need at least 2 elements in the Map.
+m.set({}, 3);
+// This creates hash code on o.
+m.set(o, 1);
// Add an out-of-object property.
o.x = true;
@@ -41,3 +44,10 @@ f(o);
%HeapObjectVerify(o);
assertEquals(1, m.get(o));
+
+// Grow the Map and ensure the object is still found.
+for (let i = 0; i < 1000; i++) {
+ let object = {};
+ m.set(object, object);
+ assertEquals(1, m.get(o));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-782754.js b/deps/v8/test/mjsunit/regress/regress-782754.js
new file mode 100644
index 0000000000..608c48ad9e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-782754.js
@@ -0,0 +1,18 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let a = [1,2];
+function f(skip) { g(undefined, skip) }
+function g(x, skip) {
+ if (skip) return;
+ return a[x+1];
+}
+g(0, false);
+g(0, false);
+f(true);
+f(true);
+%OptimizeFunctionOnNextCall(f);
+f(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-783051.js b/deps/v8/test/mjsunit/regress/regress-783051.js
new file mode 100644
index 0000000000..0e5af37a21
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-783051.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() { return Math.abs([][0]); }
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-783119.js b/deps/v8/test/mjsunit/regress/regress-783119.js
new file mode 100644
index 0000000000..68200a837f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-783119.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let a = [,,,,,,,,,,,,,,,,,,,,,,,11,12,13,14,15,16,17,18,19];
+%NormalizeElements(a);
+let b = a.slice(19);
+assertEquals(11, b[4]);
diff --git a/deps/v8/test/mjsunit/regress/regress-784080.js b/deps/v8/test/mjsunit/regress/regress-784080.js
new file mode 100644
index 0000000000..83b32b678f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-784080.js
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(function() {
+ function f(a, b, a) {
+ return Array.prototype.slice.call(arguments);
+ }
+ let result = f(456, 789, 111112);
+ assertEquals(result[0], 456);
+ assertEquals(result[1], 789);
+ assertEquals(result[2], 111112);
+ assertEquals(result.length, 3);
+})();
+
+(function() {
+ function f(a, b, a) {
+ return Array.prototype.slice.call(arguments);
+ }
+ let result = f(456, 789, 111112, 543, 654);
+ assertEquals(result[0], 456);
+ assertEquals(result[1], 789);
+ assertEquals(result[2], 111112);
+ assertEquals(result[3], 543);
+ assertEquals(result[4], 654);
+ assertEquals(result.length, 5);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-784862.js b/deps/v8/test/mjsunit/regress/regress-784862.js
new file mode 100644
index 0000000000..5471002015
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-784862.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This test triggers table allocation in large object space. We don't care
+// about the result as long as we don't crash.
+const array = new Array();
+array[0x80000] = 1;
+array.unshift({});
+assertThrows(() => new WeakMap(array));
diff --git a/deps/v8/test/mjsunit/regress/regress-784863.js b/deps/v8/test/mjsunit/regress/regress-784863.js
new file mode 100644
index 0000000000..1612197af2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-784863.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+var __v_18522 = [ 4.2, true, false];
+Object.defineProperty(__v_18522, 2, {
+ get: function () {
+ return false;
+ },
+});
+__v_18522.shift();
+__v_18522.slice();
diff --git a/deps/v8/test/mjsunit/regress/regress-784990.js b/deps/v8/test/mjsunit/regress/regress-784990.js
new file mode 100644
index 0000000000..7a57253078
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-784990.js
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const key1 = {};
+const key2 = {};
+
+const set = new Set([, 1]);
+assertEquals(set.has(undefined), true);
+assertEquals(set.has(1), true);
+
+const doubleSet = new Set([,1.234]);
+assertEquals(doubleSet.has(undefined), true);
+assertEquals(doubleSet.has(1.234), true);
+
+const map = new Map([[, key1], [key2, ]]);
+assertEquals(map.get(undefined), key1);
+assertEquals(map.get(key2), undefined);
+
+const doublesMap = new Map([[, 1.234]]);
+assertEquals(doublesMap.get(undefined), 1.234);
+
+const weakmap = new WeakMap([[key1, ]]);
+assertEquals(weakmap.get(key1), undefined);
+
+assertThrows(() => new WeakSet([, {}]));
+assertThrows(() => new WeakSet([, 1.234]));
+assertThrows(() => new Map([, [, key1]]));
+assertThrows(() => new WeakMap([[, key1]]));
+assertThrows(() => new WeakMap([, [, key1]]));
diff --git a/deps/v8/test/mjsunit/regress/regress-785804.js b/deps/v8/test/mjsunit/regress/regress-785804.js
new file mode 100644
index 0000000000..2dce3be73e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-785804.js
@@ -0,0 +1,19 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let __v_25059 = {
+ valueOf: function () {
+ let __v_25062 = __v_25055.length;
+ __v_25055.length = 1;
+ return __v_25062;
+ }
+};
+let __v_25060 = [];
+for (let __v_25063 = 0; __v_25063 < 1500; __v_25063++) {
+ __v_25060.push("" + 0.1);
+}
+for (let __v_25064 = 0; __v_25064 < 75; __v_25064++) {
+ __v_25055 = __v_25060.slice();
+ __v_25056 = __v_25055.slice(0, __v_25059);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-786573.js b/deps/v8/test/mjsunit/regress/regress-786573.js
new file mode 100644
index 0000000000..a7d5f37e75
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-786573.js
@@ -0,0 +1,16 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let cnt = 0;
+let reg = /./g;
+reg.exec = () => {
+ // Note: it's still possible to trigger OOM by passing huge values here, since
+ // the spec requires building a list of all captures in
+ // https://tc39.github.io/ecma262/#sec-regexp.prototype-@@replace
+ if (cnt++ == 0) return {length: 2 ** 16};
+ cnt = 0;
+ return null;
+};
+
+assertThrows(() => ''.replace(reg, () => {}), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-791345.js b/deps/v8/test/mjsunit/regress/regress-791345.js
new file mode 100644
index 0000000000..3a92dcde98
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-791345.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(a) {
+ var len = 0x80000000;
+ arguments.length = len;
+ Array.prototype.slice.call(arguments, len - 1, len);
+}('a'));
+
+(function(a) {
+ var len = 0x40000000;
+ arguments.length = len;
+ Array.prototype.slice.call(arguments, len - 1, len);
+}('a'));
diff --git a/deps/v8/test/mjsunit/regress/regress-793793.js b/deps/v8/test/mjsunit/regress/regress-793793.js
new file mode 100644
index 0000000000..e89c5e5f34
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-793793.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+
+assertThrows(() => new RegExp("\\1(\\P{P\0[}()/", "u"), SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-794822.js b/deps/v8/test/mjsunit/regress/regress-794822.js
new file mode 100644
index 0000000000..c9b46001d1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-794822.js
@@ -0,0 +1,19 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function* opt(arg = () => arg) {
+ let tmp = opt.x; // LdaNamedProperty
+ for (;;) {
+ arg;
+ yield;
+ function inner() { tmp }
+ break;
+ }
+}
+
+opt();
+%OptimizeFunctionOnNextCall(opt);
+opt();
diff --git a/deps/v8/test/mjsunit/harmony/global-accessors-strict.js b/deps/v8/test/mjsunit/regress/regress-794825.js
index 15a581e795..3709e8eabc 100644
--- a/deps/v8/test/mjsunit/harmony/global-accessors-strict.js
+++ b/deps/v8/test/mjsunit/regress/regress-794825.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,30 +25,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test accessors on the global object.
-//
-// Flags: --harmony-strict-legacy-accessor-builtins
-
-var x_ = 0;
-
-this.__defineSetter__('x', function(x) { x_ = x; });
-this.__defineGetter__('x', function() { return x_; });
-
-this.__defineSetter__('y', function(x) { });
-this.__defineGetter__('y', function() { return 7; });
-
-function f(a) {
- x = x + a;
- return x;
-}
-
-function g(a) {
- y = y + a;
- return y;
+// Flags: --allow-natives-syntax --opt
+
+
+function* opt() {
+ // The for loop to generate a SwitchOnSmiNoFeedback with holes
+ // at the end since yield will be eliminated.
+ for (;;)
+ if (true) {
+ } else {
+ yield;
+ }
+
+ // Another loop to force more holes in the constant pool to
+ // verify if bounds checks works when iterating over the jump
+ // table.
+ for (;;)
+ if (true) {
+ } else {
+ yield;
+ }
}
-assertEquals(1, f(1));
-assertEquals(3, f(2));
-
-assertEquals(7, g(1));
-assertEquals(7, g(2));
+opt();
+// Optimize function to trigger the iteration over jump
+// table.
+%OptimizeFunctionOnNextCall(opt);
+opt();
+assertOptimized(opt);
diff --git a/deps/v8/test/mjsunit/regress/regress-799690.js b/deps/v8/test/mjsunit/regress/regress-799690.js
new file mode 100644
index 0000000000..e7bf143bb0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-799690.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=500
+
+function asm() {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ while (1) return 1;
+ return 0;
+ }
+ return { f: f};
+}
+const mod = asm();
+function call_f() {
+ mod.f();
+ call_f();
+}
+assertThrows(call_f, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-799813.js b/deps/v8/test/mjsunit/regress/regress-799813.js
new file mode 100644
index 0000000000..e965ede5bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-799813.js
@@ -0,0 +1,42 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function testAdvanceLastIndex(initial_last_index_value,
+ expected_final_last_index_value) {
+ let exec_call_count = 0;
+ let last_index_setter_call_count = 0;
+ let final_last_index_value;
+
+ var customRegexp = {
+ get global() { return true; },
+ get unicode() { return true; },
+ get lastIndex() {
+ return initial_last_index_value;
+ },
+ set lastIndex(v) {
+ last_index_setter_call_count++;
+ final_last_index_value = v;
+ },
+ exec() {
+ return (exec_call_count++ == 0) ? [""] : null;
+ }
+ };
+
+ RegExp.prototype[Symbol.replace].call(customRegexp);
+
+ assertEquals(2, exec_call_count);
+ assertEquals(2, last_index_setter_call_count);
+ assertEquals(expected_final_last_index_value, final_last_index_value);
+}
+
+testAdvanceLastIndex(-1, 1);
+testAdvanceLastIndex( 0, 1);
+testAdvanceLastIndex(2**31 - 2, 2**31 - 1);
+testAdvanceLastIndex(2**31 - 1, 2**31 - 0);
+testAdvanceLastIndex(2**32 - 3, 2**32 - 2);
+testAdvanceLastIndex(2**32 - 2, 2**32 - 1);
+testAdvanceLastIndex(2**32 - 1, 2**32 - 0);
+testAdvanceLastIndex(2**53 - 2, 2**53 - 1);
+testAdvanceLastIndex(2**53 - 1, 2**53 - 0);
+testAdvanceLastIndex(2**53 - 0, 2**53 - 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-465564.js b/deps/v8/test/mjsunit/regress/regress-crbug-465564.js
index ea0c8dcf66..383070be9e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-465564.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-465564.js
@@ -4,4 +4,4 @@
// Flags: --allow-natives-syntax --cache=code
-assertEquals(-1, %StringCompare("a", "b"));
+assertTrue(%StringLessThan("a", "b"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-570241.js b/deps/v8/test/mjsunit/regress/regress-crbug-570241.js
index 4fecba57b5..1b52f1b347 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-570241.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-570241.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-lookbehind
-
assertTrue(/(?<=12345123451234512345)/.test("12345123451234512345"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-747062.js b/deps/v8/test/mjsunit/regress/regress-crbug-747062.js
index 4fe99d39c1..7e0e92ad7e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-747062.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-747062.js
@@ -35,3 +35,19 @@
%OptimizeFunctionOnNextCall(foo);
assertInstanceof(foo(), TypeError);
})();
+
+(function TestNonCallableFilter() {
+ function foo() { [].filter(undefined); }
+ assertThrows(foo, TypeError);
+ assertThrows(foo, TypeError);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo, TypeError);
+})();
+
+(function TestNonCallableFilterCaught() {
+ function foo() { try { [].filter(undefined) } catch(e) { return e } }
+ assertInstanceof(foo(), TypeError);
+ assertInstanceof(foo(), TypeError);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-766635.js b/deps/v8/test/mjsunit/regress/regress-crbug-766635.js
new file mode 100644
index 0000000000..ae0de0a600
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-766635.js
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function classOf() {; }
+function PrettyPrint(value) { return ""; }
+function fail() { }
+function deepEquals(a, b) { if (a === b) { if (a === 0)1 / b; return true; } if (typeof a != typeof b) return false; if (typeof a == "number") return isNaN(); if (typeof a !== "object" && typeof a !== "function") return false; var objectClass = classOf(); if (b) return false; if (objectClass === "RegExp") {; } if (objectClass === "Function") return false; if (objectClass === "Array") { var elementCount = 0; if (a.length != b.length) { return false; } for (var i = 0; i < a.length; i++) { if (a[i][i]) return false; } return true; } if (objectClass == "String" || objectClass == "Number" || objectClass == "Boolean" || objectClass == "Date") { if (a.valueOf()) return false; }; }
+assertSame = function assertSame() { if (found === expected) { if (1 / found) return; } else if ((expected !== expected) && (found !== found)) { return; }; }; assertEquals = function assertEquals(expected, found, name_opt) { if (!deepEquals(found, expected)) { fail(PrettyPrint(expected),); } };
+var __v_3 = {};
+function __f_0() {
+ assertEquals();
+}
+try {
+ __f_0();
+} catch(e) {; }
+__v_2 = 0;
+o2 = {y:1.5};
+o2.y = 0;
+o3 = o2.y;
+function __f_1() {
+ for (var __v_1 = 0; __v_1 < 10; __v_1++) {
+ __v_2 += __v_3.x + o3.foo;
+ [ 3].filter(__f_9);
+ }
+}
+__f_1();
+%OptimizeFunctionOnNextCall(__f_1);
+__f_1();
+function __f_9(){ "use __f_9"; assertEquals( this); }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-768875.js b/deps/v8/test/mjsunit/regress/regress-crbug-768875.js
new file mode 100644
index 0000000000..4b5ecdee1c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-768875.js
@@ -0,0 +1,28 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+this.__defineGetter__('x', function() { return 0; });
+function store_x() {
+ x = 23;
+}
+store_x();
+store_x();
+assertEquals(0, x);
+Realm.eval(Realm.current(), "let x = 42");
+assertEquals(42, x);
+store_x();
+assertEquals(23, x);
+
+
+this.__defineGetter__('y', function() { return 0; });
+function store_y() {
+ y = 23;
+}
+store_y();
+store_y();
+assertEquals(0, y);
+Realm.eval(Realm.current(), "const y = 42");
+assertEquals(42, y);
+assertThrows(store_y, TypeError);
+assertEquals(42, y);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-774459.js b/deps/v8/test/mjsunit/regress/regress-crbug-774459.js
new file mode 100644
index 0000000000..4263c3252d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-774459.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ const m = new Map();
+ const k = Math.pow(2, 31) - 1;
+ m.set(k, 1);
+
+ function foo(m, k) {
+ return m.get(k | 0);
+ }
+
+ assertEquals(1, foo(m, k));
+ assertEquals(1, foo(m, k));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(m, k));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-774860.js b/deps/v8/test/mjsunit/regress/regress-crbug-774860.js
new file mode 100644
index 0000000000..e0ba5e3d8a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-774860.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --verify-heap
+
+(function () {
+ class F extends Function {}
+ let f = new F("'use strict';");
+ // Create enough objects to complete slack tracking.
+ for (let i = 0; i < 20; i++) {
+ new F();
+ }
+ gc();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-776511.js b/deps/v8/test/mjsunit/regress/regress-crbug-776511.js
new file mode 100644
index 0000000000..f757bc2cc2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-776511.js
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-slow-asserts --expose-gc --allow-natives-syntax
+
+function __getProperties(obj) {
+ let properties = [];
+ for (let name of Object.getOwnPropertyNames(obj)) {
+ properties.push(name);
+ }
+ return properties;
+}
+function __getRandomProperty(obj, seed) {
+ let properties = __getProperties(obj);
+ return properties[seed % properties.length];
+}
+(function() {
+ var __v_59904 = [12, 13, 14, 16, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25];
+ var __v_59906 = function(__v_59908) {
+ var __v_59909 = function(__v_59910, __v_59911) {
+ if (__v_59911 == 13 && __v_59908) {
+ __v_59904.abc = 25;
+ }
+ return true;
+ };
+ return __v_59904.filter(__v_59909);
+ };
+ print(__v_59906());
+ __v_59904[__getRandomProperty(__v_59904, 366855)] = this, gc();
+ print(__v_59906());
+ %OptimizeFunctionOnNextCall(__v_59906);
+ var __v_59907 = __v_59906(true);
+ print(__v_59907);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-778952.js b/deps/v8/test/mjsunit/regress/regress-crbug-778952.js
new file mode 100644
index 0000000000..d07c478ecf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-778952.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(function() {
+ const p = new Proxy({}, {});
+ (new Set).add(p); // Compute the hash code for p.
+ null[p] = 0;
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-779344.js b/deps/v8/test/mjsunit/regress/regress-crbug-779344.js
new file mode 100644
index 0000000000..5198a67be5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-779344.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = {};
+var proxy = new Proxy(() => {}, o);
+o.apply = proxy;
+assertThrows(
+ () => Function.prototype.call.call(proxy)
+);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-779367.js b/deps/v8/test/mjsunit/regress/regress-crbug-779367.js
new file mode 100644
index 0000000000..3836b34fc1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-779367.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g(o) {
+ return o.x;
+}
+
+Object.defineProperty(g, 'x', {set(v) {}});
+
+g.prototype = 1;
+g(g);
+g(g);
+%OptimizeFunctionOnNextCall(g);
+g(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-779457.js b/deps/v8/test/mjsunit/regress/regress-crbug-779457.js
new file mode 100644
index 0000000000..0e0504023b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-779457.js
@@ -0,0 +1,27 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testEager() {
+ (function({name = [foo] = eval("[]")}) {})({});
+ (function([name = [foo] = eval("[]")]) {})([]);
+})();
+
+(function testLazy() {
+ function f({name = [foo] = eval("[]")}) {}
+ function g([name = [foo] = eval("[]")]) {}
+ f({});
+ g([]);
+})();
+
+(function testEagerArrow() {
+ (({name = [foo] = eval("[]")}) => {})({});
+ (([name = [foo] = eval("[]")]) => {})([]);
+})();
+
+(function testLazyArrow() {
+ var f = ({name = [foo] = eval("[]")}) => {};
+ var g = ([name = [foo] = eval("[]")]) => {};
+ f({});
+ g([]);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js
new file mode 100644
index 0000000000..83af7a8b98
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function baz(obj, store) {
+ if (store === true) obj[0] = 1;
+}
+function bar(store) {
+ baz(Array.prototype, store);
+ baz(this.arguments, true);
+}
+bar(false);
+bar(false);
+%OptimizeFunctionOnNextCall(bar);
+bar(true);
+
+function foo() { [].push(); }
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js
new file mode 100644
index 0000000000..f8ffbe8ff5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function baz(obj, store) {
+ if (store === true) obj[0] = 1;
+}
+function bar(store) {
+ baz(Object.prototype, store);
+ baz(this.arguments, true);
+}
+bar(false);
+bar(false);
+%OptimizeFunctionOnNextCall(bar);
+bar(true);
+
+function foo() { [].push(); }
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js
new file mode 100644
index 0000000000..6048fb9250
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js
@@ -0,0 +1,12 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a) { return a[0]; }
+
+assertEquals(undefined, foo(x => x));
+assertEquals(undefined, foo({}));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo(x => x));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js
new file mode 100644
index 0000000000..71801df14a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(o) { return o[0]; }
+
+assertEquals(undefined, foo({}));
+Array.prototype[0] = 0;
+assertEquals(undefined, foo({}));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo({}));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js
new file mode 100644
index 0000000000..70b29896f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a, i) { return a[i] + 0.5; }
+
+foo({}, 1);
+Array.prototype.unshift(1.5);
+assertTrue(Number.isNaN(foo({}, 1)));
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(Number.isNaN(foo({}, 1)));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781583.js b/deps/v8/test/mjsunit/regress/regress-crbug-781583.js
new file mode 100644
index 0000000000..fd14ad7bce
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781583.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function* generator(a) {
+ a.pop().next();
+}
+
+function prepareGenerators(n) {
+ var a = [{ next: () => 0 }];
+ for (var i = 0; i < n; ++i) {
+ a.push(generator(a));
+ }
+ return a;
+}
+
+var gens1 = prepareGenerators(10);
+assertDoesNotThrow(() => gens1.pop().next());
+
+%OptimizeFunctionOnNextCall(generator);
+
+var gens2 = prepareGenerators(200000);
+assertThrows(() => gens2.pop().next(), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-783902.js b/deps/v8/test/mjsunit/regress/regress-crbug-783902.js
new file mode 100644
index 0000000000..6739704a2a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-783902.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A {}
+
+class B extends A {
+ *gf() {
+ yield super.f();
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-784835.js b/deps/v8/test/mjsunit/regress/regress-crbug-784835.js
new file mode 100644
index 0000000000..340e3cf221
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-784835.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo(o, k) { return o[k]; }
+
+var a = [1,2];
+a["-1"] = 42;
+
+assertEquals(1, foo(a, 0));
+assertEquals(2, foo(a, 1));
+assertEquals(undefined, foo(a, 3));
+assertEquals(42, foo(a, -1));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-786020.js b/deps/v8/test/mjsunit/regress/regress-crbug-786020.js
new file mode 100644
index 0000000000..fbad074bf8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-786020.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+%SetAllocationTimeout(1000, 90);
+(new constructor)[0x40000000] = null;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-786723.js b/deps/v8/test/mjsunit/regress/regress-crbug-786723.js
new file mode 100644
index 0000000000..d4e0957c5e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-786723.js
@@ -0,0 +1,19 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --function-context-specialization
+
+function f() {
+ var o = {};
+ function g() {
+ o.x = 1;
+ return Object.create(o);
+ };
+ gc();
+ o.x = 10;
+ %OptimizeFunctionOnNextCall(g);
+ g();
+}
+f();
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-791256.js b/deps/v8/test/mjsunit/regress/regress-crbug-791256.js
new file mode 100644
index 0000000000..bf9fc25977
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-791256.js
@@ -0,0 +1,12 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original repro. A DCHECK used to fire.
+(function* (name = (eval(foo), foo, prototype)) { });
+
+// Simpler repro.
+(function (name = (foo, bar, baz) ) { });
+
+// A test which uses the value of the n-ary operation.
+(function (param = (0, 1, 2)) { assertEquals(2, param); })();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-798026.js b/deps/v8/test/mjsunit/regress/regress-crbug-798026.js
new file mode 100644
index 0000000000..46cd150e3f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-798026.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --expose-gc
+
+array = new Array(4 * 1024 * 1024);
+Set.prototype.add = value => {
+ if (array.length != 1) {
+ array.length = 1;
+ gc();
+ }
+}
+new Set(array);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-801627.js b/deps/v8/test/mjsunit/regress/regress-crbug-801627.js
new file mode 100644
index 0000000000..0e51fff2b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-801627.js
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --enable-slow-asserts
+
+class Base {
+ constructor() {
+ this.x = 1;
+ }
+}
+
+class Derived extends Base {
+ constructor() {
+ super();
+ }
+}
+
+// Feed a bound function as new.target
+// to the profiler, so HeapObjectMatcher
+// can find it.
+Reflect.construct(Derived, [], Object.bind());
+%OptimizeFunctionOnNextCall(Derived);
+new Derived();
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-6906.js b/deps/v8/test/mjsunit/regress/regress-v8-6906.js
new file mode 100644
index 0000000000..72aa9858d2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-6906.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+%DeoptimizeFunction(f);
+
+%DisassembleFunction(f);
diff --git a/deps/v8/test/mjsunit/regress/string-compare-memcmp.js b/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
index 45f47343ee..aedffcaebf 100644
--- a/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
+++ b/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
@@ -4,4 +4,4 @@
// Flags: --allow-natives-syntax
-assertEquals(-1, %StringCompare("abc\u0102", "abc\u0201"));
+assertTrue(%StringLessThan("abc\u0102", "abc\u0201"));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-02256.js b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
index 3b9b76b5a6..3b9b76b5a6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-02256.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js b/deps/v8/test/mjsunit/regress/wasm/regress-02256b.js
index 120643896d..120643896d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-02256b.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-02862.js b/deps/v8/test/mjsunit/regress/wasm/regress-02862.js
index 92ed1cd6c9..92ed1cd6c9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-02862.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-02862.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-5531.js b/deps/v8/test/mjsunit/regress/wasm/regress-5531.js
index 1363f96264..1363f96264 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-5531.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5531.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-5800.js b/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
index 2e56da853d..2e56da853d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-5800.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-5884.js b/deps/v8/test/mjsunit/regress/wasm/regress-5884.js
index 8677f105ee..8677f105ee 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-5884.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5884.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-6054.js b/deps/v8/test/mjsunit/regress/wasm/regress-6054.js
index 7b309b6f82..7b309b6f82 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-6054.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-6054.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-6164.js b/deps/v8/test/mjsunit/regress/wasm/regress-6164.js
index 3035ea5249..3035ea5249 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-6164.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-6164.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-643595.js b/deps/v8/test/mjsunit/regress/wasm/regress-643595.js
index 9da074b689..9da074b689 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-643595.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-643595.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-644682.js b/deps/v8/test/mjsunit/regress/wasm/regress-644682.js
index b58c0d9b10..b58c0d9b10 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-644682.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-644682.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-647649.js b/deps/v8/test/mjsunit/regress/wasm/regress-647649.js
index dc89ebd845..dc89ebd845 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-647649.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-647649.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-648079.js b/deps/v8/test/mjsunit/regress/wasm/regress-648079.js
index acc6146ef5..acc6146ef5 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-648079.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-648079.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-651961.js b/deps/v8/test/mjsunit/regress/wasm/regress-651961.js
index 30f6565d32..30f6565d32 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-651961.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-651961.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-654377.js b/deps/v8/test/mjsunit/regress/wasm/regress-654377.js
index 871da72114..871da72114 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-654377.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-654377.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-663994.js b/deps/v8/test/mjsunit/regress/wasm/regress-663994.js
index da3d7c7771..da3d7c7771 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-663994.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-663994.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-666741.js b/deps/v8/test/mjsunit/regress/wasm/regress-666741.js
index 9531fc8fd2..9531fc8fd2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-666741.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-666741.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-667745.js b/deps/v8/test/mjsunit/regress/wasm/regress-667745.js
index 68c880303b..68c880303b 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-667745.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-667745.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-670683.js b/deps/v8/test/mjsunit/regress/wasm/regress-670683.js
index 7306e117f4..7306e117f4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-670683.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-670683.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-674447.js b/deps/v8/test/mjsunit/regress/wasm/regress-674447.js
index 228b038334..228b038334 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-674447.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-674447.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-680938.js b/deps/v8/test/mjsunit/regress/wasm/regress-680938.js
index 75c8a457bb..75c8a457bb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-680938.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-680938.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-684858.js b/deps/v8/test/mjsunit/regress/wasm/regress-684858.js
index bfef7fcc8e..bfef7fcc8e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-684858.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-684858.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-688876.js b/deps/v8/test/mjsunit/regress/wasm/regress-688876.js
index 83bebbb802..83bebbb802 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-688876.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-688876.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-689450.js b/deps/v8/test/mjsunit/regress/wasm/regress-689450.js
index 9a4989c633..9a4989c633 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-689450.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-689450.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-6931.js b/deps/v8/test/mjsunit/regress/wasm/regress-6931.js
new file mode 100644
index 0000000000..364e95a680
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-6931.js
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+
+// This test checks for accidental sign extension. The Wasm spec says we do
+// arbitrary precision unsigned arithmetic to compute the memory address,
+// meaning this test should do 0xfffffffc + 8, which is 0x100000004 and out of
+// bounds. However, if we interpret 0xfffffffc as -4, then the result is 4 and
+// succeeds erroneously.
+
+
+(function() {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addFunction('test', kSig_v_v)
+ .addBody([
+ kExprI32Const, 0x7c, // address = -4
+ kExprI32Const, 0,
+ kExprI32StoreMem, 0, 8, // align = 0, offset = 8
+ ])
+ .exportFunc();
+ let module = builder.instantiate();
+
+ assertTraps(kTrapMemOutOfBounds, module.exports.test);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-694433.js b/deps/v8/test/mjsunit/regress/wasm/regress-694433.js
new file mode 100644
index 0000000000..b63f390c62
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-694433.js
@@ -0,0 +1,14 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+var size = Math.floor(0xFFFFFFFF / 4) + 1;
+(function() {
+ // Note: On 32 bit, this throws in the Uint16Array constructor (size does not
+ // fit in a Smi). On 64 bit, it throws in WebAssembly.validate, because the
+ // size exceeds the internal module size limit.
+ assertThrows(() => WebAssembly.validate(new Uint16Array(size)), RangeError);
+})();
+gc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-698587.js b/deps/v8/test/mjsunit/regress/wasm/regress-698587.js
index 8e0be882b4..8e0be882b4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-698587.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-698587.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-699485.js b/deps/v8/test/mjsunit/regress/wasm/regress-699485.js
index 7f4560789e..7f4560789e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-699485.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-699485.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-702460.js b/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
index 73c01e13a0..73c01e13a0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-702460.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-702839.js b/deps/v8/test/mjsunit/regress/wasm/regress-702839.js
index 859d268d12..859d268d12 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-702839.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-702839.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7033.js b/deps/v8/test/mjsunit/regress/wasm/regress-7033.js
new file mode 100644
index 0000000000..17d79c896f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7033.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addFunction('test', kSig_i_iii)
+ .addBodyWithEnd([
+ kExprI32Const, 0x07, // i32.const 7
+ kExprI32Const, 0x00, // i32.const 0
+ kExprI32Const, 0x00, // i32.const 0
+ kExprI32And, // i32.and
+ kExprI32And, // i32.and
+ kExprEnd, // -
+ ])
+ .exportFunc();
+var module = builder.instantiate();
+assertEquals(0, module.exports.test());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7035.js b/deps/v8/test/mjsunit/regress/wasm/regress-7035.js
new file mode 100644
index 0000000000..cd69c7d1b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7035.js
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addFunction('test', kSig_i_iii)
+ .addBodyWithEnd([
+ kExprI32Const, 0x00, // i32.const 0
+ kExprI32Const, 0x00, // i32.const 0
+ kExprI32Add, // i32.add -> 0
+ kExprI32Const, 0x00, // i32.const 0
+ kExprI32Const, 0x00, // i32.const 0
+ kExprI32Add, // i32.add -> 0
+ kExprI32Add, // i32.add -> 0
+ kExprI32Const, 0x01, // i32.const 1
+ kExprI32Const, 0x00, // i32.const 0
+ kExprI32Add, // i32.add -> 1
+ kExprBlock, 0x7f, // @39 i32
+ kExprI32Const, 0x00, // i32.const 0
+ kExprBr, 0x00, // depth=0
+ kExprEnd, // @90
+ kExprI32Add, // i32.add -> 1
+ kExprI32Add, // i32.add -> 1
+ kExprEnd
+ ])
+ .exportFunc();
+var module = builder.instantiate();
+assertEquals(1, module.exports.test());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-703568.js b/deps/v8/test/mjsunit/regress/wasm/regress-703568.js
index c7d716bffd..c7d716bffd 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-703568.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-703568.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7049.js b/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
new file mode 100644
index 0000000000..b9ad1a0be4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
@@ -0,0 +1,54 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Build two instances, instance 2 is interpreted, and calls instance 1 (via
+// C_WASM_ENTRY), instance 1 then calls JS, which triggers GC.
+
+let builder1 = new WasmModuleBuilder();
+
+function call_gc() {
+ print('Triggering GC.');
+ gc();
+ print('Survived GC.');
+}
+let func1_sig = makeSig(new Array(8).fill(kWasmI32), [kWasmI32]);
+let imp = builder1.addImport('q', 'gc', kSig_v_v);
+let func1 = builder1.addFunction('func1', func1_sig)
+ .addBody([
+ kExprGetLocal, 0, // -
+ kExprCallFunction, imp
+ ])
+ .exportFunc();
+let instance1 = builder1.instantiate({q: {gc: call_gc}});
+
+let builder2 = new WasmModuleBuilder();
+
+let func1_imp = builder2.addImport('q', 'func1', func1_sig);
+let func2 = builder2.addFunction('func2', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // 1
+ kExprGetLocal, 0, // 2
+ kExprGetLocal, 0, // 3
+ kExprGetLocal, 0, // 4
+ kExprGetLocal, 0, // 5
+ kExprGetLocal, 0, // 6
+ kExprGetLocal, 0, // 7
+ kExprGetLocal, 0, // 8
+ kExprCallFunction, func1_imp
+ ])
+ .exportFunc();
+
+let instance2 = builder2.instantiate({q: {func1: instance1.exports.func1}});
+
+%RedirectToWasmInterpreter(
+ instance2, parseInt(instance2.exports.func2.name));
+
+// Call with 1. This will be passed by the C_WASM_ENTRY via the stack, and the
+// GC will try to dereference it (before the bug fix).
+instance2.exports.func2(1);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-708714.js b/deps/v8/test/mjsunit/regress/wasm/regress-708714.js
index 10cd67ad8d..10cd67ad8d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-708714.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-708714.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-710844.js b/deps/v8/test/mjsunit/regress/wasm/regress-710844.js
index a45e953574..a45e953574 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-710844.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-710844.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-711203.js b/deps/v8/test/mjsunit/regress/wasm/regress-711203.js
index 46f274a8b0..46f274a8b0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-711203.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-711203.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-715216-a.js b/deps/v8/test/mjsunit/regress/wasm/regress-715216a.js
index 56253414c9..56253414c9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-715216-a.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-715216a.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-715216-b.js b/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js
index 0954f807dd..0954f807dd 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-715216-b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-717056.js b/deps/v8/test/mjsunit/regress/wasm/regress-717056.js
index 534cf74eb7..534cf74eb7 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-717056.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-717056.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-717194.js b/deps/v8/test/mjsunit/regress/wasm/regress-717194.js
index 074b2e4bca..074b2e4bca 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-717194.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-717194.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-719175.js b/deps/v8/test/mjsunit/regress/wasm/regress-719175.js
index c6217b0b01..c6217b0b01 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-719175.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-719175.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-722445.js b/deps/v8/test/mjsunit/regress/wasm/regress-722445.js
index f6a96dc60d..f6a96dc60d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-722445.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-722445.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-724846.js b/deps/v8/test/mjsunit/regress/wasm/regress-724846.js
index 628d58f294..628d58f294 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-724846.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-724846.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-724851.js b/deps/v8/test/mjsunit/regress/wasm/regress-724851.js
index 18834795d2..18834795d2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-724851.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-724851.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-724972.js b/deps/v8/test/mjsunit/regress/wasm/regress-724972.js
index 2af403ce20..2af403ce20 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-724972.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-724972.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-727219.js b/deps/v8/test/mjsunit/regress/wasm/regress-727219.js
index af0d8725bc..af0d8725bc 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-727219.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-727219.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-727222.js b/deps/v8/test/mjsunit/regress/wasm/regress-727222.js
index 6b3f2faf5f..6b3f2faf5f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-727222.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-727222.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-727560.js b/deps/v8/test/mjsunit/regress/wasm/regress-727560.js
index f92d879a2e..f92d879a2e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-727560.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-727560.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-729991.js b/deps/v8/test/mjsunit/regress/wasm/regress-729991.js
index 85a9ae7231..85a9ae7231 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-729991.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-729991.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-731351.js b/deps/v8/test/mjsunit/regress/wasm/regress-731351.js
index 238223ac2c..238223ac2c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-731351.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-731351.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-734108.js b/deps/v8/test/mjsunit/regress/wasm/regress-734108.js
index d8774f4a84..d8774f4a84 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-734108.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-734108.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-734246.js b/deps/v8/test/mjsunit/regress/wasm/regress-734246.js
index 57f98949f8..57f98949f8 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-734246.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-734246.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-734345.js b/deps/v8/test/mjsunit/regress/wasm/regress-734345.js
index f55a06288e..f55a06288e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-734345.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-734345.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-736584.js b/deps/v8/test/mjsunit/regress/wasm/regress-736584.js
index 39f03c1072..39f03c1072 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-736584.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-736584.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-737069.js b/deps/v8/test/mjsunit/regress/wasm/regress-737069.js
index c68d10f06d..c68d10f06d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-737069.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-737069.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-739768.js b/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
index bcf3ceeca2..bcf3ceeca2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-739768.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-753496.js b/deps/v8/test/mjsunit/regress/wasm/regress-753496.js
index a056a9fd8e..a056a9fd8e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-753496.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-753496.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-757217.js b/deps/v8/test/mjsunit/regress/wasm/regress-757217.js
index 218b090c45..218b090c45 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-757217.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-757217.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-763439.js b/deps/v8/test/mjsunit/regress/wasm/regress-763439.js
index 0f9d2b24d8..0f9d2b24d8 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-763439.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-763439.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-763697.js b/deps/v8/test/mjsunit/regress/wasm/regress-763697.js
index faf74e1cff..faf74e1cff 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-763697.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-763697.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-766003.js b/deps/v8/test/mjsunit/regress/wasm/regress-766003.js
new file mode 100644
index 0000000000..d8a1ea1ebf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-766003.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-interpret-all
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+ __v_6 = new WasmModuleBuilder();
+__v_6.addFunction('exp1', kSig_i_i).addBody([kExprUnreachable]).exportFunc();
+ __v_7 = new WasmModuleBuilder();
+ __v_7.addImport('__v_11', '__v_11', kSig_i_i);
+try {
+; } catch(e) {; }
+ __v_8 = __v_6.instantiate().exports.exp1;
+ __v_9 = __v_7.instantiate({__v_11: {__v_11: __v_8}}).exports.call_imp;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-769846.js b/deps/v8/test/mjsunit/regress/wasm/regress-769846.js
index 297da84f5f..297da84f5f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-769846.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-769846.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-771243.js b/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
new file mode 100644
index 0000000000..e1581fcdd8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-interpret-all
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+assertThrows(() => {
+ __v_29 = 0;
+function __f_1() {
+ __v_19 = new WasmModuleBuilder();
+ if (__v_25) {
+ __v_23 = __v_19.addImport('__v_24', '__v_30', __v_25);
+ }
+ if (__v_18) {
+ __v_19.addMemory();
+ __v_19.addFunction('load', kSig_i_i)
+ .addBody([ 0])
+ .exportFunc();
+ }
+ return __v_19;
+}
+ (function TestExternalCallBetweenTwoWasmModulesWithoutAndWithMemory() {
+ __v_21 = __f_1(__v_18 = false, __v_25 = kSig_i_i);
+ __v_21.addFunction('plus_one', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // -
+ kExprCallFunction, __v_29 ])
+ .exportFunc();
+ __v_32 =
+ __f_1(__v_18 = true, __v_25 = undefined);
+ __v_31 = __v_32.instantiate(); try { __v_32[__getRandomProperty()] = __v_0; delete __v_18[__getRandomProperty()]; delete __v_34[__getRandomProperty()]; } catch(e) {; };
+ __v_20 = __v_21.instantiate(
+ {__v_24: {__v_30: __v_31.exports.load}});
+ __v_20.exports.plus_one(); __v_33 = __v_43;
+})();
+});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-772332.js b/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
new file mode 100644
index 0000000000..56e6f2ceb8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-interpret-all
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+assertThrows(() => {
+let __v_50315 = 0;
+function __f_15356(__v_50316, __v_50317) {
+ let __v_50318 = new WasmModuleBuilder();
+ if (__v_50317) {
+ let __v_50319 = __v_50318.addImport('import_module', 'other_module_fn', kSig_i_i);
+ }
+ __v_50318.addMemory();
+ __v_50318.addFunction('load', kSig_i_i).addBody([ 0, 0, 0]).exportFunc();
+ return __v_50318;
+}
+ (function __f_15357() {
+ let __v_50320 = __f_15356(__v_50350 = false, __v_50351 = kSig_i_i);
+ __v_50320.addFunction('plus_one', kSig_i_i).addBody([kExprGetLocal, 0, kExprCallFunction, __v_50315, kExprI32Const, kExprI32Add, kExprReturn]).exportFunc();
+ let __v_50321 = __f_15356();
+ let __v_50324 = __v_50321.instantiate();
+ let __v_50325 = __v_50320.instantiate({
+ import_module: {
+ other_module_fn: __v_50324.exports.load
+ }
+ });
+ __v_50325.exports.plus_one();
+ })();
+});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-775366.js b/deps/v8/test/mjsunit/regress/wasm/regress-775366.js
new file mode 100644
index 0000000000..e8db923896
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-775366.js
@@ -0,0 +1,29 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+
+(function BadTypeSection() {
+ var data = bytes(
+ kWasmH0,
+ kWasmH1,
+ kWasmH2,
+ kWasmH3,
+
+ kWasmV0,
+ kWasmV1,
+ kWasmV2,
+ kWasmV3,
+
+ kTypeSectionCode,
+ 5,
+ 2,
+ 0x60,
+ 0,
+ 0,
+ 13
+ );
+
+ assertFalse(WebAssembly.validate(data));
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-778917.js b/deps/v8/test/mjsunit/regress/wasm/regress-778917.js
new file mode 100644
index 0000000000..083f1d12e3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-778917.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-interpret-all
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+
+const builder = new WasmModuleBuilder();
+
+const index = builder.addFunction("huge_frame", kSig_v_v)
+ .addBody([kExprCallFunction, 0])
+ .addLocals({f64_count: 49555}).exportFunc().index;
+// We assume above that the function we added has index 0.
+assertEquals(0, index);
+
+const module = builder.instantiate();
+assertThrows(module.exports.huge_frame, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-782280.js b/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
new file mode 100644
index 0000000000..a94f061c2b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addFunction('test', kSig_i_iii)
+ .addBodyWithEnd([
+ kExprI32Const, 0, // 0
+ kExprI32Const, 0, // 0, 0
+ kExprI32Add, // 0 + 0 -> 0
+ kExprI32Const, 0, // 0, 0
+ kExprI32Const, 0, // 0, 0, 0
+ kExprI32Add, // 0, 0 + 0 -> 0
+ kExprDrop, // 0
+ kExprDrop, // -
+ kExprI32Const, 0, // 0
+ kExprI32Const, 0, // 0, 0
+ kExprI32Add, // 0 + 0 -> 0
+ kExprI32Const, 0, // 0, 0
+ kExprI32Const, 1, // 0, 0, 1
+ kExprI32Add, // 0, 0 + 1 -> 1
+ kExprBlock, kWasmStmt, // 0, 1
+ kExprBr, 0, // 0, 1
+ kExprEnd, // 0, 1
+ kExprI32Add, // 0 + 1 -> 1
+ kExprEnd
+ ])
+ .exportFunc();
+var module = builder.instantiate();
+assertEquals(1, module.exports.test());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-784050.js b/deps/v8/test/mjsunit/regress/wasm/regress-784050.js
new file mode 100644
index 0000000000..8f1a79002c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-784050.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addFunction('test', kSig_v_v)
+ .addBodyWithEnd([
+ kExprI32Const, 0x0, // const 0
+ kExprI32Const, 0x0, // const 0
+ kExprBrIf, 0x00, // br depth=0
+ kExprLoop, 0x7f, // loop i32
+ kExprBlock, 0x7f, // block i32
+ kExprI32Const, 0x0, // const 0
+ kExprBr, 0x00, // br depth=0
+ kExprEnd, // end
+ kExprBr, 0x00, // br depth=0
+ kExprEnd, // end
+ kExprUnreachable, // unreachable
+ kExprEnd, // end
+ ])
+ .exportFunc();
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-789952.js b/deps/v8/test/mjsunit/regress/wasm/regress-789952.js
new file mode 100644
index 0000000000..f73d8dc471
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-789952.js
@@ -0,0 +1,40 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let module_size = 19;
+let string_len = 0x00fffff0 - module_size;
+
+print("Allocating backing store: " + (string_len + module_size));
+let backing = new ArrayBuffer(string_len + module_size);
+
+print("Allocating typed array buffer");
+let buffer = new Uint8Array(backing);
+
+print("Filling...");
+buffer.fill(0x41);
+
+print("Setting up array buffer");
+// Magic
+buffer.set([0x00, 0x61, 0x73, 0x6D], 0);
+// Version
+buffer.set([0x01, 0x00, 0x00, 0x00], 4);
+// kUnknownSection (0)
+buffer.set([0], 8);
+// Section length
+buffer.set([0x80, 0x80, 0x80, 0x80, 0x00], 9);
+// Name length
+let x = string_len + 1;
+let b1 = ((x >> 0) & 0x7F) | 0x80;
+let b2 = ((x >> 7) & 0x7F) | 0x80;
+let b3 = ((x >> 14) & 0x7F) | 0x80;
+let b4 = ((x >> 21) & 0x7F);
+//buffer.set([0xDE, 0xFF, 0xFF, 0x7F], 14);
+ buffer.set([b1, b2, b3, b4], 14);
+
+print("Parsing module...");
+let m = new WebAssembly.Module(buffer);
+
+print("Triggering!");
+let c = WebAssembly.Module.customSections(m, "A".repeat(string_len + 1));
+assertEquals(0, c.length);
diff --git a/deps/v8/test/mjsunit/splice-proxy.js b/deps/v8/test/mjsunit/splice-proxy.js
new file mode 100644
index 0000000000..d33a2efeb0
--- /dev/null
+++ b/deps/v8/test/mjsunit/splice-proxy.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var array = [];
+var proxy = new Proxy(new Proxy(array, {}), {});
+var Ctor = function() {};
+var result;
+
+array.constructor = function() {};
+array.constructor[Symbol.species] = Ctor;
+
+Array.prototype.slice.call(proxy);
diff --git a/deps/v8/test/mjsunit/string-equal.js b/deps/v8/test/mjsunit/string-equal.js
new file mode 100644
index 0000000000..26aac23e07
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-equal.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ const s = '\u8765abc';
+
+ assertTrue(s === s);
+ assertFalse(s === 'abc');
+ assertFalse('abc' === s);
+ assertTrue(s.slice(-3) === 'abc');
+ assertTrue('abc' === s.slice(-3));
+ assertTrue(s.slice(0, 1) === '\u8765');
+ assertTrue('\u8765' === s.slice(0, 1));
+ assertTrue(s === '' + s);
+ assertTrue('' + s === s);
+})();
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index 869ab26b5b..ff84bc3be5 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -60,9 +60,12 @@ class MjsunitTestSuite(testsuite.TestSuite):
tests.append(test)
return tests
- def GetFlagsForTestCase(self, testcase, context):
+ def GetParametersForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
- flags = [] + context.mode_flags
+
+ flags = testcase.flags + context.mode_flags
+ env = self._get_env(source)
+
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
flags += match.strip().split()
@@ -80,8 +83,9 @@ class MjsunitTestSuite(testsuite.TestSuite):
for f in files_list ]
testfilename = os.path.join(self.root, testcase.path + self.suffix())
if SELF_SCRIPT_PATTERN.search(source):
- env = ["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")]
- files = env + files
+ files = (
+ ["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")] +
+ files)
if not context.no_harness and not NO_HARNESS_PATTERN.search(source):
files.append(os.path.join(self.root, "mjsunit.js"))
@@ -90,18 +94,20 @@ class MjsunitTestSuite(testsuite.TestSuite):
files.append("--module")
files.append(testfilename)
- flags += files
+ all_files = list(files)
if context.isolates:
- flags.append("--isolate")
- flags += files
+ all_files += ["--isolate"] + files
+ return all_files, flags, env
+
+ def _get_env(self, source):
env_match = ENV_PATTERN.search(source)
+ env = {}
if env_match:
for env_pair in env_match.group(1).strip().split():
var, value = env_pair.split('=')
- testcase.env[var] = value
-
- return testcase.flags + flags
+ env[var] = value
+ return env
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
diff --git a/deps/v8/test/mjsunit/tools/csvparser.js b/deps/v8/test/mjsunit/tools/csvparser.js
index f1449f6ba4..ffca9dd463 100644
--- a/deps/v8/test/mjsunit/tools/csvparser.js
+++ b/deps/v8/test/mjsunit/tools/csvparser.js
@@ -43,41 +43,44 @@ assertEquals(
parser.parseLine('1997,Ford,E350'));
assertEquals(
- ['1997','Ford','E350'],
- parser.parseLine('"1997","Ford","E350"'));
+ ['"', '\'', ',', '\n'],
+ parser.parseLine('",\',\\x2c,\\x0a'));
assertEquals(
- ['1997','Ford','E350','Super, luxurious truck'],
- parser.parseLine('1997,Ford,E350,"Super, luxurious truck"'));
+ ['"1997"','Ford','E350'],
+ parser.parseLine('"1997",Ford,E350'));
+assertEquals(
+ ['1997', 'Ford', 'E350', 'Super', ' luxurious truck'],
+ parser.parseLine('1997,Ford,E350,Super, luxurious truck'));
assertEquals(
['1997','Ford','E350','Super "luxurious" truck'],
- parser.parseLine('1997,Ford,E350,"Super ""luxurious"" truck"'));
+ parser.parseLine('1997,Ford,E350,Super "luxurious" truck'));
assertEquals(
['1997','Ford','E350','Super "luxurious" "truck"'],
- parser.parseLine('1997,Ford,E350,"Super ""luxurious"" ""truck"""'));
+ parser.parseLine('1997,Ford,E350,Super "luxurious" "truck"'));
assertEquals(
['1997','Ford','E350','Super "luxurious""truck"'],
- parser.parseLine('1997,Ford,E350,"Super ""luxurious""""truck"""'));
+ parser.parseLine('1997,Ford,E350,Super "luxurious""truck"'));
assertEquals(
['shared-library','/lib/ld-2.3.6.so','0x489a2000','0x489b7000'],
- parser.parseLine('shared-library,"/lib/ld-2.3.6.so",0x489a2000,0x489b7000'));
+ parser.parseLine('shared-library,/lib/ld-2.3.6.so,0x489a2000,0x489b7000'));
assertEquals(
['code-creation','LazyCompile','0xf6fe2d20','1201','APPLY_PREPARE native runtime.js:165'],
- parser.parseLine('code-creation,LazyCompile,0xf6fe2d20,1201,"APPLY_PREPARE native runtime.js:165"'));
+ parser.parseLine('code-creation,LazyCompile,0xf6fe2d20,1201,APPLY_PREPARE native runtime.js:165'));
assertEquals(
['code-creation','LazyCompile','0xf6fe4bc0','282',' native v8natives.js:69'],
- parser.parseLine('code-creation,LazyCompile,0xf6fe4bc0,282," native v8natives.js:69"'));
+ parser.parseLine('code-creation,LazyCompile,0xf6fe4bc0,282, native v8natives.js:69'));
assertEquals(
['code-creation','RegExp','0xf6c21c00','826','NccyrJroXvg\\/([^,]*)'],
- parser.parseLine('code-creation,RegExp,0xf6c21c00,826,"NccyrJroXvg\\/([^,]*)"'));
+ parser.parseLine('code-creation,RegExp,0xf6c21c00,826,NccyrJroXvg\\x5C/([^\\x2C]*)'));
assertEquals(
- ['code-creation','Function','0x42f0a0','163',''],
+ ['code-creation','Function','0x42f0a0','163','""'],
parser.parseLine('code-creation,Function,0x42f0a0,163,""'));
diff --git a/deps/v8/test/mjsunit/tools/dumpcpp.js b/deps/v8/test/mjsunit/tools/dumpcpp.js
index 49b4675bf1..2d9f17971f 100644
--- a/deps/v8/test/mjsunit/tools/dumpcpp.js
+++ b/deps/v8/test/mjsunit/tools/dumpcpp.js
@@ -5,7 +5,7 @@
// Load implementations from <project root>/tools.
// Files: tools/splaytree.js tools/codemap.js tools/csvparser.js
// Files: tools/consarray.js tools/profile.js tools/profile_view.js
-// Files: tools/logreader.js tools/tickprocessor.js
+// Files: tools/logreader.js tools/arguments.js tools/tickprocessor.js
// Files: tools/dumpcpp.js
// Env: TEST_FILE_NAME
diff --git a/deps/v8/test/mjsunit/tools/profviz-test.log b/deps/v8/test/mjsunit/tools/profviz-test.log
index f7cbe5b1b0..720def9d5a 100644
--- a/deps/v8/test/mjsunit/tools/profviz-test.log
+++ b/deps/v8/test/mjsunit/tools/profviz-test.log
@@ -1,1302 +1,1302 @@
-shared-library,"/usr/local/google/home/yangguo/v8/out/ia32.release/d8",0x08048000,0x08557000
-shared-library,"2506f000-25070000",0x2506f000,0x25070000
-shared-library,"31e60000-31e61000",0x31e60000,0x31e61000
-shared-library,"35dff000-35e00000",0x35dff000,0x35e00000
-shared-library,"48218000-48219000",0x48218000,0x48219000
-shared-library,"4af7d000-4af7e000",0x4af7d000,0x4af7e000
-shared-library,"55bf2000-55bf3000",0x55bf2000,0x55bf3000
-shared-library,"/lib/i386-linux-gnu/libc-2.15.so",0xf7450000,0xf75f3000
-shared-library,"/lib/i386-linux-gnu/libpthread-2.15.so",0xf75f9000,0xf7610000
-shared-library,"/lib/i386-linux-gnu/libgcc_s.so.1",0xf7614000,0xf7630000
-shared-library,"/lib/i386-linux-gnu/libm-2.15.so",0xf7633000,0xf765d000
-shared-library,"/usr/lib/i386-linux-gnu/libstdc++.so.6.0.16",0xf765f000,0xf7737000
-shared-library,"[vdso]",0xf776d000,0xf776e000
-shared-library,"/lib/i386-linux-gnu/ld-2.15.so",0xf776e000,0xf778e000
-profiler,"begin",1
-timer-event-start,"V8.GCCompactor",2425
-timer-event-start,"V8.External",2458
-timer-event-end,"V8.External",2468
-timer-event-start,"V8.External",3810
-timer-event-end,"V8.External",3830
-timer-event-end,"V8.GCCompactor",3840
-code-creation,Stub,2,0x2b80a000,484,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80a200,622,"CEntryStub"
-code-creation,Stub,2,0x2b80a480,540,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,13,0x2b80a6a0,116,"CompareICStub"
-code-creation,Stub,2,0x2b80a720,1428,"RecordWriteStub"
-code-creation,Stub,2,0x2b80acc0,97,"StoreBufferOverflowStub"
-code-creation,Stub,2,0x2b80ad40,611,"RecordWriteStub"
-code-creation,Stub,2,0x2b80afc0,76,"InterruptStub"
-code-creation,Stub,13,0x2b80b020,104,"CompareICStub"
-code-creation,Stub,2,0x2b80b0a0,130,"ArgumentsAccessStub"
-code-creation,Stub,2,0x2b80b140,160,"FastNewContextStub"
-code-creation,Stub,2,0x2b80b1e0,79,"StubFailureTrampolineStub"
-code-creation,Stub,2,0x2b80b240,704,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,14,0x2b80b500,93,"CompareNilICStub"
-code-creation,Stub,2,0x2b80b560,289,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80b6a0,664,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80b940,740,"NameDictionaryLookupStub"
-code-creation,Stub,13,0x2b80bc40,156,"CompareICStub"
-code-creation,Stub,2,0x2b80bce0,611,"RecordWriteStub"
-code-creation,Stub,13,0x2b80bf60,122,"CompareICStub"
-code-creation,Stub,2,0x2b80bfe0,217,"CreateAllocationSiteStub"
-code-creation,Stub,2,0x2b80c0c0,1456,"RecordWriteStub"
-code-creation,Stub,2,0x2b80c680,245,"StoreArrayLiteralElementStub"
-code-creation,Stub,2,0x2b80c780,1448,"RecordWriteStub"
-code-creation,Stub,2,0x2b80cd40,1471,"StringAddStub"
-code-creation,Stub,2,0x2b80d300,1448,"RecordWriteStub"
-code-creation,Stub,2,0x2b80d8c0,1453,"RecordWriteStub"
-code-creation,Stub,12,0x2b80de80,146,"BinaryOpStub"
-code-creation,Stub,2,0x2b80df20,640,"InternalArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e1a0,517,"ArrayConstructorStub"
-code-creation,Stub,2,0x2b80e3c0,305,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e500,305,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e640,349,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e7a0,349,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e900,289,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80ea40,680,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80ed00,692,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80efc0,704,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80f280,664,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80f520,488,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80f720,540,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80f940,432,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80fb00,432,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80fcc0,1453,"RecordWriteStub"
-code-creation,Stub,2,0x2b810280,400,"InternalArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b810420,611,"RecordWriteStub"
-code-creation,Stub,2,0x2b8106a0,213,"JSEntryStub"
-code-creation,Stub,13,0x2b810780,104,"CompareICStub"
-code-creation,Stub,12,0x2b810800,124,"BinaryOpStub"
-code-creation,Stub,2,0x2b810880,1447,"StringAddStub"
-code-creation,Stub,2,0x2b810e40,640,"InternalArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b8110c0,400,"InternalArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b811260,261,"FastCloneShallowArrayStub"
-code-creation,Stub,12,0x2b811380,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b8113e0,76,"StackCheckStub"
-code-creation,Stub,2,0x2b811440,1437,"RecordWriteStub"
-code-creation,Stub,2,0x2b8119e0,289,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b811b20,331,"CallFunctionStub"
-code-creation,Builtin,3,0x2b811c80,174,"A builtin from the snapshot"
-code-creation,Stub,14,0x2b811d40,124,"CompareNilICStub"
-code-creation,Stub,2,0x2b811dc0,1420,"RecordWriteStub"
-code-creation,Stub,13,0x2b812360,104,"CompareICStub"
-code-creation,Stub,2,0x2b8123e0,76,"LoadFieldStub"
-code-creation,Stub,13,0x2b812440,104,"CompareICStub"
-code-creation,Stub,2,0x2b8124c0,195,"NumberToStringStub"
-code-creation,Stub,15,0x2b8125a0,148,"ToBooleanStub"
-code-creation,Stub,2,0x2b812640,351,"ArgumentsAccessStub"
-code-creation,Stub,2,0x2b8127a0,664,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b812a40,1420,"RecordWriteStub"
-code-creation,Stub,12,0x2b812fe0,133,"BinaryOpStub"
-code-creation,Stub,2,0x2b813080,1664,"StringAddStub"
-code-creation,Stub,2,0x2b813700,1661,"StringAddStub"
-code-creation,Stub,2,0x2b813d80,472,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b813f60,80,"StubFailureTrampolineStub"
-code-creation,Stub,13,0x2b813fc0,104,"CompareICStub"
-code-creation,Stub,2,0x2b814040,331,"CallFunctionStub"
-code-creation,Stub,2,0x2b8141a0,660,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b814440,1433,"RecordWriteStub"
-code-creation,Stub,12,0x2b8149e0,146,"BinaryOpStub"
-code-creation,Stub,2,0x2b814a80,271,"CallConstructStub"
-code-creation,Stub,15,0x2b814ba0,136,"ToBooleanStub"
-code-creation,Stub,2,0x2b814c40,468,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,15,0x2b814e20,128,"ToBooleanStub"
-code-creation,Stub,2,0x2b814ea0,163,"FastNewContextStub"
-code-creation,Stub,2,0x2b814f60,1425,"RecordWriteStub"
-code-creation,LoadIC,5,0x2b815500,145,"A load IC from the snapshot"
-code-creation,Builtin,3,0x2b8155a0,83,"A builtin from the snapshot"
-code-creation,Stub,12,0x2b815600,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b815660,1433,"RecordWriteStub"
-code-creation,Stub,2,0x2b815c00,331,"CallFunctionStub"
-code-creation,Stub,13,0x2b815d60,104,"CompareICStub"
-code-creation,Stub,2,0x2b815de0,304,"FastNewClosureStub"
-code-creation,Stub,2,0x2b815f20,285,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b816040,1433,"RecordWriteStub"
-code-creation,Stub,2,0x2b8165e0,233,"InternalArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b8166e0,740,"NameDictionaryLookupStub"
-code-creation,Stub,2,0x2b8169e0,740,"NameDictionaryLookupStub"
-code-creation,Stub,12,0x2b816ce0,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b816d40,216,"StringCompareStub"
-code-creation,Stub,15,0x2b816e20,93,"ToBooleanStub"
-code-creation,Stub,12,0x2b816e80,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b816ee0,1433,"RecordWriteStub"
-code-creation,Stub,12,0x2b817480,155,"BinaryOpStub"
-code-creation,Stub,2,0x2b817520,169,"InternalArrayConstructorStub"
-code-creation,Stub,2,0x2b8175e0,233,"InternalArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b8176e0,1433,"RecordWriteStub"
-code-creation,Stub,12,0x2b817c80,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b817ce0,328,"KeyedLoadElementStub"
-code-creation,Stub,2,0x2b817e40,1461,"RecordWriteStub"
-code-creation,Stub,2,0x2b818400,98,"ToNumberStub"
-code-creation,Stub,13,0x2b818480,122,"CompareICStub"
-code-creation,Stub,12,0x2b818500,124,"BinaryOpStub"
-code-creation,Stub,2,0x2b818580,148,"CallConstructStub"
-code-creation,Stub,13,0x2b818620,491,"CompareICStub"
-code-creation,Stub,2,0x2b818820,213,"JSEntryStub"
-code-creation,CallIC,7,0x2b818900,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b8189c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818a80,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818b40,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818c00,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818cc0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818d80,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818e40,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818f00,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818fc0,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819080,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819140,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819200,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b8192c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819380,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819440,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819500,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b8195c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819680,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819740,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819800,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b8198c0,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819980,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819a40,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819b00,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819bc0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819c80,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819d40,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819e00,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819ec0,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819f80,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a040,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a100,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a1c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a280,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a340,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a400,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a4c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a580,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a640,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a700,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a7c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a880,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a940,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81aa00,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81aac0,180,"A call IC from the snapshot"
-code-creation,Builtin,3,0x2b81ab80,107,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b81ac00,105,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b81ac80,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b81ace0,432,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b81afc0,101,"A builtin from the snapshot"
-code-creation,LoadIC,5,0x2b81b1a0,83,"A load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b81bf00,83,"A keyed load IC from the snapshot"
-code-creation,StoreIC,9,0x2b81c680,84,"A store IC from the snapshot"
-code-creation,Builtin,3,0x2b8262e0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826340,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8263a0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826400,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826460,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8264c0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826520,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826580,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8265e0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826640,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8266a0,80,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826700,80,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826760,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8267c0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826820,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826880,75,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8268e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826960,491,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826b60,406,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826d00,157,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826da0,131,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826e40,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826ec0,107,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826f40,143,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826fe0,143,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827080,143,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827120,94,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827180,91,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8271e0,83,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827240,83,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8272a0,83,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827300,84,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827360,84,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8273c0,84,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827420,84,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827480,84,"A builtin from the snapshot"
-code-creation,LoadIC,5,0x2b8274e0,83,"A load IC from the snapshot"
-code-creation,LoadIC,5,0x2b827540,313,"A load IC from the snapshot"
-code-creation,LoadIC,5,0x2b827680,266,"A load IC from the snapshot"
-code-creation,LoadIC,5,0x2b8277a0,80,"A load IC from the snapshot"
-code-creation,LoadIC,5,0x2b827800,83,"A load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b827860,83,"A keyed load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b8278c0,896,"A keyed load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b827c40,499,"A keyed load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b827e40,144,"A keyed load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b827ee0,216,"A keyed load IC from the snapshot"
-code-creation,StoreIC,9,0x2b827fc0,365,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828140,293,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828280,88,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b8282e0,88,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828340,88,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b8283a0,84,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828400,365,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828580,293,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b8286c0,88,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828720,82,"A store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b828780,84,"A keyed store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b8287e0,2082,"A keyed store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b829020,84,"A keyed store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b829080,2082,"A keyed store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b8298c0,286,"A keyed store IC from the snapshot"
-code-creation,Builtin,3,0x2b8299e0,355,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b829b60,416,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b829d00,376,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b829e80,388,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a020,78,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a080,83,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a0e0,357,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a260,359,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a3e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a460,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a4e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a560,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a5e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a660,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a6e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a760,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a7e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a860,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a8e0,104,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a960,106,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a9e0,110,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82aa60,112,"A builtin from the snapshot"
-code-creation,LoadIC,5,0x2b82aae0,106,"A load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b82ab60,106,"A keyed load IC from the snapshot"
-code-creation,StoreIC,9,0x2b82abe0,108,"A store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b82ac60,108,"A keyed store IC from the snapshot"
-code-creation,Stub,14,0x2b82ace0,104,"CallFunctionStub"
-code-creation,Builtin,3,0x2b82ad60,65,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82adc0,93,"A builtin from the snapshot"
-timer-event-start,"V8.GCCompactor",6301
-timer-event-start,"V8.External",6312
-timer-event-end,"V8.External",6321
-timer-event-start,"V8.External",7418
-timer-event-end,"V8.External",7436
-timer-event-end,"V8.GCCompactor",7446
-code-creation,LazyCompile,3,0x2b81ac80,77,"Empty :1",0x4420e5cc,
-code-creation,LazyCompile,0,0x2b81aea0,264," native uri.js:1",0x4420e668,
-code-creation,LazyCompile,0,0x2b81b040,336,"SetUpUri native uri.js:442",0x4420f5b4,
-code-creation,LazyCompile,0,0x2b81b200,1880," native messages.js:1",0x4420f670,
-code-creation,LazyCompile,0,0x2b81b960,1429,"FormatString native messages.js:187",0x4420f918,
-code-creation,LazyCompile,0,0x2b81bf60,280,"MakeGenericError native messages.js:282",0x44211088,
-code-creation,LazyCompile,0,0x2b81c080,264,"FormatMessage native messages.js:301",0x4421113c,
-code-creation,LazyCompile,0,0x2b81c1a0,200,"MakeRangeError native messages.js:335",0x442112f0,
-code-creation,LazyCompile,0,0x2b81c280,1012,"captureStackTrace native messages.js:1123",0x44212280,
-code-creation,LazyCompile,0,0x2b81c6e0,460,"SetUpError native messages.js:1173",0x44212410,
-code-creation,LazyCompile,0,0x2b81c8c0,692,"SetUpError.a native messages.js:1176",0x442124c0,
-code-creation,LazyCompile,0,0x2b81cb80,164,"d native messages.js:1192",0x44212548,
-code-creation,LazyCompile,0,0x2b81cc40,360," native messages.js:1202",0x442125d0,
-code-creation,LazyCompile,0,0x2b81cc40,360,"Error",0x44212698,
-code-creation,LazyCompile,0,0x2b81cc40,360,"TypeError",0x442126f8,
-code-creation,LazyCompile,0,0x2b81cc40,360,"RangeError",0x44212758,
-code-creation,LazyCompile,0,0x2b81cc40,360,"SyntaxError",0x442127b8,
-code-creation,LazyCompile,0,0x2b81cc40,360,"ReferenceError",0x44212818,
-code-creation,LazyCompile,0,0x2b81cc40,360,"EvalError",0x44212878,
-code-creation,LazyCompile,0,0x2b81cc40,360,"URIError",0x442128d8,
-code-creation,LazyCompile,0,0x2b81cdc0,424,"SetUpStackOverflowBoilerplate native messages.js:1301",0x44212a74,
-code-creation,LazyCompile,0,0x2b81cf80,216," native messages.js:294",0x44214b3c,
-code-creation,LazyCompile,0,0x2b81d060,408," native string.js:1",0x44214c2c,
-code-creation,LazyCompile,0,0x2b81d200,380,"StringConstructor native string.js:35",0x44214e2c,
-code-creation,LazyCompile,0,0x2b81d380,1132,"SetUpString native string.js:962",0x44216ea8,
-code-creation,LazyCompile,0,0x2b81d800,616," native date.js:1",0x44216fa0,
-code-creation,LazyCompile,0,0x2b81da80,1392,"DateConstructor native date.js:141",0x442182bc,
-code-creation,LazyCompile,0,0x2b81e000,1396,"SetUpDate native date.js:761",0x44219944,
-code-creation,LazyCompile,0,0x2b81e580,268," native array.js:1",0x44219b20,
-code-creation,LazyCompile,0,0x2b81e6a0,2272,"SetUpArray native array.js:1591",0x4421c6ac,
-code-creation,LazyCompile,0,0x2b81ef80,292,"SetUpArray.b native array.js:1605",0x4421c814,
-code-creation,LazyCompile,0,0x2b81f0c0,1084," native v8natives.js:1",0x4421c904,
-code-creation,LazyCompile,0,0x2b81f500,561,"InstallFunctions native v8natives.js:46",0x4421cc1c,
-code-creation,LazyCompile,0,0x2b81f740,304,"InstallGetterSetter native v8natives.js:72",0x4421ea1c,
-code-creation,LazyCompile,0,0x2b81f880,814,"SetUpLockedPrototype native v8natives.js:87",0x4421eab4,
-code-creation,LazyCompile,0,0x2b81fbc0,452,"SetUpGlobal native v8natives.js:197",0x4421ed3c,
-code-creation,LazyCompile,0,0x2b81fda0,404,"hasOwnProperty native v8natives.js:251",0x4421eee4,
-code-creation,LazyCompile,0,0x2b81ff40,308,"ObjectConstructor native v8natives.js:1371",0x442200b4,
-code-creation,LazyCompile,0,0x2b820080,1044,"SetUpObject native v8natives.js:1385",0x44220140,
-code-creation,LazyCompile,0,0x2b8204a0,292,"BooleanConstructor native v8natives.js:1437",0x442201c8,
-code-creation,LazyCompile,0,0x2b8205e0,448,"SetUpBoolean native v8natives.js:1472",0x44220314,
-code-creation,LazyCompile,0,0x2b8207a0,336,"NumberConstructor native v8natives.js:1491",0x442203ac,
-code-creation,LazyCompile,0,0x2b820900,924,"SetUpNumber native v8natives.js:1635",0x4422073c,
-code-creation,LazyCompile,0,0x2b820ca0,440,"FunctionConstructor native v8natives.js:1813",0x44220954,
-code-creation,LazyCompile,0,0x2b820e60,380,"SetUpFunction native v8natives.js:1826",0x442209f8,
-code-creation,LazyCompile,0,0x2b820fe0,264," native json.js:1",0x44221238,
-code-creation,LazyCompile,0,0x2b821100,260,"SetUpJSON native json.js:219",0x44221940,
-code-creation,LazyCompile,0,0x2b821220,340," native math.js:1",0x44221a5c,
-code-creation,LazyCompile,0,0x2b821380,164,"MathConstructor native math.js:40",0x44221ba4,
-code-creation,LazyCompile,0,0x2b821440,1112,"SetUpMath native math.js:226",0x4422283c,
-code-creation,LazyCompile,0,0x2b8218a0,404," native regexp.js:1",0x442228f8,
-code-creation,LazyCompile,0,0x2b821a40,324,"RegExpConstructor native regexp.js:90",0x44223264,
-code-creation,LazyCompile,0,0x2b821ba0,224,"RegExpMakeCaptureGetter native regexp.js:360",0x44223784,
-code-creation,LazyCompile,0,0x2b821c80,1561,"SetUpRegExp native regexp.js:400",0x44223878,
-code-creation,LazyCompile,0,0x2b8222a0,280," native apinatives.js:1",0x44223b98,
-code-creation,LazyCompile,0,0x2b8223c0,612," native runtime.js:1",0x44223e30,
-code-creation,LazyCompile,0,0x2b822640,1728,"EQUALS native runtime.js:54",0x44224078,
-code-creation,LazyCompile,0,0x2b822d00,376,"STRICT_EQUALS native runtime.js:108",0x44224c18,
-code-creation,LazyCompile,0,0x2b822e80,924,"COMPARE native runtime.js:128",0x44224ca4,
-code-creation,LazyCompile,0,0x2b823220,596,"ADD native runtime.js:171",0x44224d44,
-code-creation,LazyCompile,0,0x2b823480,572,"STRING_ADD_LEFT native runtime.js:191",0x44224dd8,
-code-creation,LazyCompile,0,0x2b8236c0,580,"STRING_ADD_RIGHT native runtime.js:206",0x44224e64,
-code-creation,LazyCompile,0,0x2b823920,296,"SUB native runtime.js:222",0x44224ef4,
-code-creation,LazyCompile,0,0x2b823a60,296,"MUL native runtime.js:230",0x44224f84,
-code-creation,LazyCompile,0,0x2b823ba0,296,"DIV native runtime.js:238",0x44225014,
-code-creation,LazyCompile,0,0x2b823ce0,296,"MOD native runtime.js:246",0x442250a4,
-code-creation,LazyCompile,0,0x2b823e20,296,"BIT_OR native runtime.js:260",0x44225134,
-code-creation,LazyCompile,0,0x2b823f60,384,"BIT_AND native runtime.js:268",0x442251c4,
-code-creation,LazyCompile,0,0x2b8240e0,296,"BIT_XOR native runtime.js:290",0x44225254,
-code-creation,LazyCompile,0,0x2b824220,244,"UNARY_MINUS native runtime.js:298",0x442252e4,
-code-creation,LazyCompile,0,0x2b824320,244,"BIT_NOT native runtime.js:305",0x44225370,
-code-creation,LazyCompile,0,0x2b824420,296,"SHL native runtime.js:312",0x442253fc,
-code-creation,LazyCompile,0,0x2b824560,384,"SAR native runtime.js:320",0x4422548c,
-code-creation,LazyCompile,0,0x2b8246e0,296,"SHR native runtime.js:342",0x4422551c,
-code-creation,LazyCompile,0,0x2b824820,228,"DELETE native runtime.js:356",0x442255ac,
-code-creation,LazyCompile,0,0x2b824920,368,"IN native runtime.js:362",0x4422563c,
-code-creation,LazyCompile,0,0x2b824aa0,644,"INSTANCE_OF native runtime.js:375",0x442256e8,
-code-creation,LazyCompile,0,0x2b824d40,236,"FILTER_KEY native runtime.js:406",0x442257b8,
-code-creation,LazyCompile,0,0x2b824e40,380,"CALL_NON_FUNCTION native runtime.js:413",0x44225848,
-code-creation,LazyCompile,0,0x2b824fc0,380,"CALL_NON_FUNCTION_AS_CONSTRUCTOR native runtime.js:422",0x442258f4,
-code-creation,LazyCompile,0,0x2b825140,288,"CALL_FUNCTION_PROXY native runtime.js:431",0x442259a0,
-code-creation,LazyCompile,0,0x2b825260,260,"CALL_FUNCTION_PROXY_AS_CONSTRUCTOR native runtime.js:439",0x44225a38,
-code-creation,LazyCompile,0,0x2b825380,912,"APPLY_PREPARE native runtime.js:446",0x44225acc,
-code-creation,LazyCompile,0,0x2b825720,232,"APPLY_OVERFLOW native runtime.js:484",0x44225b9c,
-code-creation,LazyCompile,0,0x2b825820,188,"TO_OBJECT native runtime.js:490",0x44225c38,
-code-creation,LazyCompile,0,0x2b8258e0,188,"TO_NUMBER native runtime.js:496",0x44225cc0,
-code-creation,LazyCompile,0,0x2b8259a0,188,"TO_STRING native runtime.js:502",0x44225d48,
-code-creation,LazyCompile,0,0x2b825a60,600,"ToPrimitive native runtime.js:514",0x44225dd0,
-code-creation,LazyCompile,0,0x2b825cc0,404,"ToBoolean native runtime.js:526",0x44225e60,
-code-creation,LazyCompile,0,0x2b825e60,504,"ToNumber native runtime.js:536",0x44225eec,
-code-creation,LazyCompile,0,0x2b826060,416,"ToString native runtime.js:561",0x44225fd8,
-code-creation,LazyCompile,0,0x2b826200,220,"ToName native runtime.js:578",0x442260c4,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x44227108,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x44227168,
-code-creation,LazyCompile,3,0x2b8262e0,77,"OpaqueReference",0x442271c8,
-code-creation,LazyCompile,3,0x2b8262e0,77,"JSON",0x44227228,
-code-creation,LazyCompile,0,0x2b8204a0,292,"Boolean",0x44227288,
-code-creation,LazyCompile,3,0x2b82a080,83,"Array",0x442272e8,
-code-creation,LazyCompile,3,0x2b826460,77,"pop",0x44227348,
-code-creation,LazyCompile,3,0x2b826400,77,"push",0x442273a8,
-code-creation,LazyCompile,3,0x2b826640,77,"concat",0x44227408,
-code-creation,LazyCompile,3,0x2b8264c0,77,"shift",0x44227468,
-code-creation,LazyCompile,3,0x2b826520,77,"unshift",0x442274c8,
-code-creation,LazyCompile,3,0x2b826580,77,"slice",0x44227528,
-code-creation,LazyCompile,3,0x2b8265e0,77,"splice",0x44227588,
-code-creation,LazyCompile,0,0x2b8207a0,336,"Number",0x442275e8,
-code-creation,LazyCompile,3,0x2b82a020,78,"InternalArray",0x44227648,
-code-creation,LazyCompile,3,0x2b82a020,78,"InternalPackedArray",0x442276b4,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x44227714,
-code-creation,LazyCompile,0,0x2b821a40,324,"RegExp",0x44227774,
-code-creation,LazyCompile,0,0x2b81da80,1392,"Date",0x442277d4,
-code-creation,LazyCompile,0,0x2b820ca0,440,"Function",0x44227834,
-code-creation,LazyCompile,0,0x2b81d200,380,"String",0x44227894,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x442278f4,
-code-creation,LazyCompile,0,0x2b81cf80,216,"Script",0x44227960,
-code-creation,LazyCompile,0,0x2b81ff40,308,"Object",0x44227a00,
-code-creation,LazyCompile,3,0x2b829d00,376,"call",0x44227a60,
-code-creation,LazyCompile,3,0x2b829e80,388,"apply",0x44227ac0,
-code-creation,LazyCompile,3,0x2b8262e0,77,"Arguments",0x44227b20,
-code-creation,LazyCompile,3,0x2b826820,77,"ThrowTypeError",0x44227b80,
-code-creation,LazyCompile,3,0x2b826760,77,"",0x44227be0,
-code-creation,LazyCompile,3,0x2b8267c0,77,"",0x44227c40,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x44227ca0,
-timer-event-start,"V8.GCCompactor",9350
-timer-event-start,"V8.External",9362
-timer-event-end,"V8.External",9370
-timer-event-start,"V8.External",10477
-timer-event-end,"V8.External",10500
-timer-event-end,"V8.GCCompactor",10511
-code-creation,Stub,2,0x2b80a000,484,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80a200,622,"CEntryStub"
-code-creation,Stub,2,0x2b80a480,540,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,13,0x2b80a6a0,116,"CompareICStub"
-code-creation,Stub,2,0x2b80a720,1428,"RecordWriteStub"
-code-creation,Stub,2,0x2b80acc0,97,"StoreBufferOverflowStub"
-code-creation,Stub,2,0x2b80ad40,611,"RecordWriteStub"
-code-creation,Stub,2,0x2b80afc0,76,"InterruptStub"
-code-creation,Stub,13,0x2b80b020,104,"CompareICStub"
-code-creation,Stub,2,0x2b80b0a0,130,"ArgumentsAccessStub"
-code-creation,Stub,2,0x2b80b140,160,"FastNewContextStub"
-code-creation,Stub,2,0x2b80b1e0,79,"StubFailureTrampolineStub"
-code-creation,Stub,2,0x2b80b240,704,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,14,0x2b80b500,93,"CompareNilICStub"
-code-creation,Stub,2,0x2b80b560,289,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80b6a0,664,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80b940,740,"NameDictionaryLookupStub"
-code-creation,Stub,13,0x2b80bc40,156,"CompareICStub"
-code-creation,Stub,2,0x2b80bce0,611,"RecordWriteStub"
-code-creation,Stub,13,0x2b80bf60,122,"CompareICStub"
-code-creation,Stub,2,0x2b80bfe0,217,"CreateAllocationSiteStub"
-code-creation,Stub,2,0x2b80c0c0,1456,"RecordWriteStub"
-code-creation,Stub,2,0x2b80c680,245,"StoreArrayLiteralElementStub"
-code-creation,Stub,2,0x2b80c780,1448,"RecordWriteStub"
-code-creation,Stub,2,0x2b80cd40,1471,"StringAddStub"
-code-creation,Stub,2,0x2b80d300,1448,"RecordWriteStub"
-code-creation,Stub,2,0x2b80d8c0,1453,"RecordWriteStub"
-code-creation,Stub,12,0x2b80de80,146,"BinaryOpStub"
-code-creation,Stub,2,0x2b80df20,640,"InternalArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e1a0,517,"ArrayConstructorStub"
-code-creation,Stub,2,0x2b80e3c0,305,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e500,305,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e640,349,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e7a0,349,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80e900,289,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b80ea40,680,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80ed00,692,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80efc0,704,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80f280,664,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b80f520,488,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80f720,540,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80f940,432,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80fb00,432,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b80fcc0,1453,"RecordWriteStub"
-code-creation,Stub,2,0x2b810280,400,"InternalArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b810420,611,"RecordWriteStub"
-code-creation,Stub,2,0x2b8106a0,213,"JSEntryStub"
-code-creation,Stub,13,0x2b810780,104,"CompareICStub"
-code-creation,Stub,12,0x2b810800,124,"BinaryOpStub"
-code-creation,Stub,2,0x2b810880,1447,"StringAddStub"
-code-creation,Stub,2,0x2b810e40,640,"InternalArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b8110c0,400,"InternalArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b811260,261,"FastCloneShallowArrayStub"
-code-creation,Stub,12,0x2b811380,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b8113e0,76,"StackCheckStub"
-code-creation,Stub,2,0x2b811440,1437,"RecordWriteStub"
-code-creation,Stub,2,0x2b8119e0,289,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b811b20,331,"CallFunctionStub"
-code-creation,Builtin,3,0x2b811c80,174,"A builtin from the snapshot"
-code-creation,Stub,14,0x2b811d40,124,"CompareNilICStub"
-code-creation,Stub,2,0x2b811dc0,1420,"RecordWriteStub"
-code-creation,Stub,13,0x2b812360,104,"CompareICStub"
-code-creation,Stub,2,0x2b8123e0,76,"LoadFieldStub"
-code-creation,Stub,13,0x2b812440,104,"CompareICStub"
-code-creation,Stub,2,0x2b8124c0,195,"NumberToStringStub"
-code-creation,Stub,15,0x2b8125a0,148,"ToBooleanStub"
-code-creation,Stub,2,0x2b812640,351,"ArgumentsAccessStub"
-code-creation,Stub,2,0x2b8127a0,664,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b812a40,1420,"RecordWriteStub"
-code-creation,Stub,12,0x2b812fe0,133,"BinaryOpStub"
-code-creation,Stub,2,0x2b813080,1664,"StringAddStub"
-code-creation,Stub,2,0x2b813700,1661,"StringAddStub"
-code-creation,Stub,2,0x2b813d80,472,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,2,0x2b813f60,80,"StubFailureTrampolineStub"
-code-creation,Stub,13,0x2b813fc0,104,"CompareICStub"
-code-creation,Stub,2,0x2b814040,331,"CallFunctionStub"
-code-creation,Stub,2,0x2b8141a0,660,"ArraySingleArgumentConstructorStub"
-code-creation,Stub,2,0x2b814440,1433,"RecordWriteStub"
-code-creation,Stub,12,0x2b8149e0,146,"BinaryOpStub"
-code-creation,Stub,2,0x2b814a80,271,"CallConstructStub"
-code-creation,Stub,15,0x2b814ba0,136,"ToBooleanStub"
-code-creation,Stub,2,0x2b814c40,468,"ArrayNArgumentsConstructorStub"
-code-creation,Stub,15,0x2b814e20,128,"ToBooleanStub"
-code-creation,Stub,2,0x2b814ea0,163,"FastNewContextStub"
-code-creation,Stub,2,0x2b814f60,1425,"RecordWriteStub"
-code-creation,LoadIC,5,0x2b815500,145,"A load IC from the snapshot"
-code-creation,Builtin,3,0x2b8155a0,83,"A builtin from the snapshot"
-code-creation,Stub,12,0x2b815600,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b815660,1433,"RecordWriteStub"
-code-creation,Stub,2,0x2b815c00,331,"CallFunctionStub"
-code-creation,Stub,13,0x2b815d60,104,"CompareICStub"
-code-creation,Stub,2,0x2b815de0,304,"FastNewClosureStub"
-code-creation,Stub,2,0x2b815f20,285,"ArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b816040,1433,"RecordWriteStub"
-code-creation,Stub,2,0x2b8165e0,233,"InternalArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b8166e0,740,"NameDictionaryLookupStub"
-code-creation,Stub,2,0x2b8169e0,740,"NameDictionaryLookupStub"
-code-creation,Stub,12,0x2b816ce0,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b816d40,216,"StringCompareStub"
-code-creation,Stub,15,0x2b816e20,93,"ToBooleanStub"
-code-creation,Stub,12,0x2b816e80,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b816ee0,1433,"RecordWriteStub"
-code-creation,Stub,12,0x2b817480,155,"BinaryOpStub"
-code-creation,Stub,2,0x2b817520,169,"InternalArrayConstructorStub"
-code-creation,Stub,2,0x2b8175e0,233,"InternalArrayNoArgumentConstructorStub"
-code-creation,Stub,2,0x2b8176e0,1433,"RecordWriteStub"
-code-creation,Stub,12,0x2b817c80,88,"BinaryOpStub"
-code-creation,Stub,2,0x2b817ce0,328,"KeyedLoadElementStub"
-code-creation,Stub,2,0x2b817e40,1461,"RecordWriteStub"
-code-creation,Stub,2,0x2b818400,98,"ToNumberStub"
-code-creation,Stub,13,0x2b818480,122,"CompareICStub"
-code-creation,Stub,12,0x2b818500,124,"BinaryOpStub"
-code-creation,Stub,2,0x2b818580,148,"CallConstructStub"
-code-creation,Stub,13,0x2b818620,491,"CompareICStub"
-code-creation,Stub,2,0x2b818820,213,"JSEntryStub"
-code-creation,CallIC,7,0x2b818900,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b8189c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818a80,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818b40,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818c00,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818cc0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818d80,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818e40,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818f00,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b818fc0,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819080,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819140,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819200,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b8192c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819380,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819440,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819500,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b8195c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819680,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819740,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819800,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b8198c0,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819980,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819a40,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819b00,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819bc0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819c80,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819d40,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819e00,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819ec0,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b819f80,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a040,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a100,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a1c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a280,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a340,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a400,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a4c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a580,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a640,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a700,189,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a7c0,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a880,178,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81a940,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81aa00,180,"A call IC from the snapshot"
-code-creation,CallIC,7,0x2b81aac0,180,"A call IC from the snapshot"
-code-creation,Builtin,3,0x2b81ab80,107,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b81ac00,105,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b81ac80,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b81ace0,432,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b81afc0,101,"A builtin from the snapshot"
-code-creation,LoadIC,5,0x2b81b1a0,83,"A load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b81bf00,83,"A keyed load IC from the snapshot"
-code-creation,StoreIC,9,0x2b81c680,84,"A store IC from the snapshot"
-code-creation,Builtin,3,0x2b8262e0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826340,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8263a0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826400,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826460,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8264c0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826520,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826580,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8265e0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826640,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8266a0,80,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826700,80,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826760,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8267c0,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826820,77,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826880,75,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8268e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826960,491,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826b60,406,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826d00,157,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826da0,131,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826e40,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826ec0,107,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826f40,143,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b826fe0,143,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827080,143,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827120,94,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827180,91,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8271e0,83,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827240,83,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8272a0,83,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827300,84,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827360,84,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b8273c0,84,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827420,84,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b827480,84,"A builtin from the snapshot"
-code-creation,LoadIC,5,0x2b8274e0,83,"A load IC from the snapshot"
-code-creation,LoadIC,5,0x2b827540,313,"A load IC from the snapshot"
-code-creation,LoadIC,5,0x2b827680,266,"A load IC from the snapshot"
-code-creation,LoadIC,5,0x2b8277a0,80,"A load IC from the snapshot"
-code-creation,LoadIC,5,0x2b827800,83,"A load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b827860,83,"A keyed load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b8278c0,896,"A keyed load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b827c40,499,"A keyed load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b827e40,144,"A keyed load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b827ee0,216,"A keyed load IC from the snapshot"
-code-creation,StoreIC,9,0x2b827fc0,365,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828140,293,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828280,88,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b8282e0,88,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828340,88,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b8283a0,84,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828400,365,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828580,293,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b8286c0,88,"A store IC from the snapshot"
-code-creation,StoreIC,9,0x2b828720,82,"A store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b828780,84,"A keyed store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b8287e0,2082,"A keyed store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b829020,84,"A keyed store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b829080,2082,"A keyed store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b8298c0,286,"A keyed store IC from the snapshot"
-code-creation,Builtin,3,0x2b8299e0,355,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b829b60,416,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b829d00,376,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b829e80,388,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a020,78,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a080,83,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a0e0,357,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a260,359,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a3e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a460,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a4e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a560,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a5e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a660,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a6e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a760,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a7e0,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a860,101,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a8e0,104,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a960,106,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82a9e0,110,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82aa60,112,"A builtin from the snapshot"
-code-creation,LoadIC,5,0x2b82aae0,106,"A load IC from the snapshot"
-code-creation,KeyedLoadIC,6,0x2b82ab60,106,"A keyed load IC from the snapshot"
-code-creation,StoreIC,9,0x2b82abe0,108,"A store IC from the snapshot"
-code-creation,KeyedStoreIC,10,0x2b82ac60,108,"A keyed store IC from the snapshot"
-code-creation,Stub,14,0x2b82ace0,104,"CallFunctionStub"
-code-creation,Builtin,3,0x2b82ad60,65,"A builtin from the snapshot"
-code-creation,Builtin,3,0x2b82adc0,93,"A builtin from the snapshot"
-timer-event-start,"V8.GCCompactor",12962
-timer-event-start,"V8.External",12972
-timer-event-end,"V8.External",12981
-timer-event-start,"V8.External",13996
-timer-event-end,"V8.External",14014
-timer-event-end,"V8.GCCompactor",14024
-code-creation,LazyCompile,3,0x2b81ac80,77,"Empty :1",0x4420e5cc,
-code-creation,LazyCompile,0,0x2b81b040,336,"SetUpUri native uri.js:442",0x4420f5b4,
-code-creation,LazyCompile,0,0x2b81b960,1429,"FormatString native messages.js:187",0x4420f918,
-code-creation,LazyCompile,0,0x2b81bf60,280,"MakeGenericError native messages.js:282",0x44211088,
-code-creation,LazyCompile,0,0x2b81c080,264,"FormatMessage native messages.js:301",0x4421113c,
-code-creation,LazyCompile,0,0x2b81c1a0,200,"MakeRangeError native messages.js:335",0x442112f0,
-code-creation,LazyCompile,0,0x2b81c280,1012,"captureStackTrace native messages.js:1123",0x44212280,
-code-creation,LazyCompile,0,0x2b81c6e0,460,"SetUpError native messages.js:1173",0x44212410,
-code-creation,LazyCompile,0,0x2b81c8c0,692,"SetUpError.a native messages.js:1176",0x442124c0,
-code-creation,LazyCompile,0,0x2b81cb80,164,"d native messages.js:1192",0x44212548,
-code-creation,LazyCompile,0,0x2b81cc40,360," native messages.js:1202",0x442125d0,
-code-creation,LazyCompile,0,0x2b81cc40,360,"Error",0x44212698,
-code-creation,LazyCompile,0,0x2b81cc40,360,"TypeError",0x442126f8,
-code-creation,LazyCompile,0,0x2b81cc40,360,"RangeError",0x44212758,
-code-creation,LazyCompile,0,0x2b81cc40,360,"SyntaxError",0x442127b8,
-code-creation,LazyCompile,0,0x2b81cc40,360,"ReferenceError",0x44212818,
-code-creation,LazyCompile,0,0x2b81cc40,360,"EvalError",0x44212878,
-code-creation,LazyCompile,0,0x2b81cc40,360,"URIError",0x442128d8,
-code-creation,LazyCompile,0,0x2b81cdc0,424,"SetUpStackOverflowBoilerplate native messages.js:1301",0x44212a74,
-code-creation,LazyCompile,0,0x2b81d200,380,"StringConstructor native string.js:35",0x44214e2c,
-code-creation,LazyCompile,0,0x2b81d380,1132,"SetUpString native string.js:962",0x44216ea8,
-code-creation,LazyCompile,0,0x2b81da80,1392,"DateConstructor native date.js:141",0x442182bc,
-code-creation,LazyCompile,0,0x2b81e000,1396,"SetUpDate native date.js:761",0x44219944,
-code-creation,LazyCompile,0,0x2b81e6a0,2272,"SetUpArray native array.js:1591",0x4421c6ac,
-code-creation,LazyCompile,0,0x2b81f500,561,"InstallFunctions native v8natives.js:46",0x4421cc1c,
-code-creation,LazyCompile,0,0x2b81f740,304,"InstallGetterSetter native v8natives.js:72",0x4421ea1c,
-code-creation,LazyCompile,0,0x2b81f880,814,"SetUpLockedPrototype native v8natives.js:87",0x4421eab4,
-code-creation,LazyCompile,0,0x2b81fbc0,452,"SetUpGlobal native v8natives.js:197",0x4421ed3c,
-code-creation,LazyCompile,0,0x2b81fda0,404,"hasOwnProperty native v8natives.js:251",0x4421eee4,
-code-creation,LazyCompile,0,0x2b81ff40,308,"ObjectConstructor native v8natives.js:1371",0x442200b4,
-code-creation,LazyCompile,0,0x2b820080,1044,"SetUpObject native v8natives.js:1385",0x44220140,
-code-creation,LazyCompile,0,0x2b8204a0,292,"BooleanConstructor native v8natives.js:1437",0x442201c8,
-code-creation,LazyCompile,0,0x2b8205e0,448,"SetUpBoolean native v8natives.js:1472",0x44220314,
-code-creation,LazyCompile,0,0x2b8207a0,336,"NumberConstructor native v8natives.js:1491",0x442203ac,
-code-creation,LazyCompile,0,0x2b820900,924,"SetUpNumber native v8natives.js:1635",0x4422073c,
-code-creation,LazyCompile,0,0x2b820ca0,440,"FunctionConstructor native v8natives.js:1813",0x44220954,
-code-creation,LazyCompile,0,0x2b820e60,380,"SetUpFunction native v8natives.js:1826",0x442209f8,
-code-creation,LazyCompile,0,0x2b821100,260,"SetUpJSON native json.js:219",0x44221940,
-code-creation,LazyCompile,0,0x2b821380,164,"MathConstructor native math.js:40",0x44221ba4,
-code-creation,LazyCompile,0,0x2b821440,1112,"SetUpMath native math.js:226",0x4422283c,
-code-creation,LazyCompile,0,0x2b821a40,324,"RegExpConstructor native regexp.js:90",0x44223264,
-code-creation,LazyCompile,0,0x2b821ba0,224,"RegExpMakeCaptureGetter native regexp.js:360",0x44223784,
-code-creation,LazyCompile,0,0x2b821c80,1561,"SetUpRegExp native regexp.js:400",0x44223878,
-code-creation,LazyCompile,0,0x2b822640,1728,"EQUALS native runtime.js:54",0x44224078,
-code-creation,LazyCompile,0,0x2b822d00,376,"STRICT_EQUALS native runtime.js:108",0x44224c18,
-code-creation,LazyCompile,0,0x2b822e80,924,"COMPARE native runtime.js:128",0x44224ca4,
-code-creation,LazyCompile,0,0x2b823220,596,"ADD native runtime.js:171",0x44224d44,
-code-creation,LazyCompile,0,0x2b823480,572,"STRING_ADD_LEFT native runtime.js:191",0x44224dd8,
-code-creation,LazyCompile,0,0x2b8236c0,580,"STRING_ADD_RIGHT native runtime.js:206",0x44224e64,
-code-creation,LazyCompile,0,0x2b823920,296,"SUB native runtime.js:222",0x44224ef4,
-code-creation,LazyCompile,0,0x2b823a60,296,"MUL native runtime.js:230",0x44224f84,
-code-creation,LazyCompile,0,0x2b823ba0,296,"DIV native runtime.js:238",0x44225014,
-code-creation,LazyCompile,0,0x2b823ce0,296,"MOD native runtime.js:246",0x442250a4,
-code-creation,LazyCompile,0,0x2b823e20,296,"BIT_OR native runtime.js:260",0x44225134,
-code-creation,LazyCompile,0,0x2b823f60,384,"BIT_AND native runtime.js:268",0x442251c4,
-code-creation,LazyCompile,0,0x2b8240e0,296,"BIT_XOR native runtime.js:290",0x44225254,
-code-creation,LazyCompile,0,0x2b824220,244,"UNARY_MINUS native runtime.js:298",0x442252e4,
-code-creation,LazyCompile,0,0x2b824320,244,"BIT_NOT native runtime.js:305",0x44225370,
-code-creation,LazyCompile,0,0x2b824420,296,"SHL native runtime.js:312",0x442253fc,
-code-creation,LazyCompile,0,0x2b824560,384,"SAR native runtime.js:320",0x4422548c,
-code-creation,LazyCompile,0,0x2b8246e0,296,"SHR native runtime.js:342",0x4422551c,
-code-creation,LazyCompile,0,0x2b824820,228,"DELETE native runtime.js:356",0x442255ac,
-code-creation,LazyCompile,0,0x2b824920,368,"IN native runtime.js:362",0x4422563c,
-code-creation,LazyCompile,0,0x2b824aa0,644,"INSTANCE_OF native runtime.js:375",0x442256e8,
-code-creation,LazyCompile,0,0x2b824d40,236,"FILTER_KEY native runtime.js:406",0x442257b8,
-code-creation,LazyCompile,0,0x2b824e40,380,"CALL_NON_FUNCTION native runtime.js:413",0x44225848,
-code-creation,LazyCompile,0,0x2b824fc0,380,"CALL_NON_FUNCTION_AS_CONSTRUCTOR native runtime.js:422",0x442258f4,
-code-creation,LazyCompile,0,0x2b825140,288,"CALL_FUNCTION_PROXY native runtime.js:431",0x442259a0,
-code-creation,LazyCompile,0,0x2b825260,260,"CALL_FUNCTION_PROXY_AS_CONSTRUCTOR native runtime.js:439",0x44225a38,
-code-creation,LazyCompile,0,0x2b825380,912,"APPLY_PREPARE native runtime.js:446",0x44225acc,
-code-creation,LazyCompile,0,0x2b825720,232,"APPLY_OVERFLOW native runtime.js:484",0x44225b9c,
-code-creation,LazyCompile,0,0x2b825820,188,"TO_OBJECT native runtime.js:490",0x44225c38,
-code-creation,LazyCompile,0,0x2b8258e0,188,"TO_NUMBER native runtime.js:496",0x44225cc0,
-code-creation,LazyCompile,0,0x2b8259a0,188,"TO_STRING native runtime.js:502",0x44225d48,
-code-creation,LazyCompile,0,0x2b825a60,600,"ToPrimitive native runtime.js:514",0x44225dd0,
-code-creation,LazyCompile,0,0x2b825cc0,404,"ToBoolean native runtime.js:526",0x44225e60,
-code-creation,LazyCompile,0,0x2b825e60,504,"ToNumber native runtime.js:536",0x44225eec,
-code-creation,LazyCompile,0,0x2b826060,416,"ToString native runtime.js:561",0x44225fd8,
-code-creation,LazyCompile,0,0x2b826200,220,"ToName native runtime.js:578",0x442260c4,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x44227108,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x44227168,
-code-creation,LazyCompile,3,0x2b8262e0,77,"OpaqueReference",0x442271c8,
-code-creation,LazyCompile,3,0x2b8262e0,77,"JSON",0x44227228,
-code-creation,LazyCompile,0,0x2b8204a0,292,"Boolean",0x44227288,
-code-creation,LazyCompile,3,0x2b82a080,83,"Array",0x442272e8,
-code-creation,LazyCompile,3,0x2b826460,77,"pop",0x44227348,
-code-creation,LazyCompile,3,0x2b826400,77,"push",0x442273a8,
-code-creation,LazyCompile,3,0x2b826640,77,"concat",0x44227408,
-code-creation,LazyCompile,3,0x2b8264c0,77,"shift",0x44227468,
-code-creation,LazyCompile,3,0x2b826520,77,"unshift",0x442274c8,
-code-creation,LazyCompile,3,0x2b826580,77,"slice",0x44227528,
-code-creation,LazyCompile,3,0x2b8265e0,77,"splice",0x44227588,
-code-creation,LazyCompile,0,0x2b8207a0,336,"Number",0x442275e8,
-code-creation,LazyCompile,3,0x2b82a020,78,"InternalArray",0x44227648,
-code-creation,LazyCompile,3,0x2b82a020,78,"InternalPackedArray",0x442276b4,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x44227714,
-code-creation,LazyCompile,0,0x2b821a40,324,"RegExp",0x44227774,
-code-creation,LazyCompile,0,0x2b81da80,1392,"Date",0x442277d4,
-code-creation,LazyCompile,0,0x2b820ca0,440,"Function",0x44227834,
-code-creation,LazyCompile,0,0x2b81d200,380,"String",0x44227894,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x442278f4,
-code-creation,LazyCompile,0,0x2b81cf80,216,"Script",0x44227960,
-code-creation,LazyCompile,0,0x2b81ff40,308,"Object",0x44227a00,
-code-creation,LazyCompile,3,0x2b829d00,376,"call",0x44227a60,
-code-creation,LazyCompile,3,0x2b829e80,388,"apply",0x44227ac0,
-code-creation,LazyCompile,3,0x2b8262e0,77,"Arguments",0x44227b20,
-code-creation,LazyCompile,3,0x2b826820,77,"ThrowTypeError",0x44227b80,
-code-creation,LazyCompile,3,0x2b826760,77,"",0x44227be0,
-code-creation,LazyCompile,3,0x2b8267c0,77,"",0x44227c40,
-code-creation,LazyCompile,3,0x2b8262e0,77,"",0x44227ca0,
-code-creation,Stub,2,0x2b81ef80,782,"CEntryStub"
-code-creation,Stub,2,0x2b81f2a0,197,"StoreBufferOverflowStub"
-code-creation,Stub,2,0x2b81f380,79,"StubFailureTrampolineStub"
-code-creation,Stub,2,0x2b81f3e0,80,"StubFailureTrampolineStub"
+shared-library,/usr/local/google/home/yangguo/v8/out/ia32.release/d8,0x08048000,0x08557000
+shared-library,2506f000-25070000,0x2506f000,0x25070000
+shared-library,31e60000-31e61000,0x31e60000,0x31e61000
+shared-library,35dff000-35e00000,0x35dff000,0x35e00000
+shared-library,48218000-48219000,0x48218000,0x48219000
+shared-library,4af7d000-4af7e000,0x4af7d000,0x4af7e000
+shared-library,55bf2000-55bf3000,0x55bf2000,0x55bf3000
+shared-library,/lib/i386-linux-gnu/libc-2.15.so,0xf7450000,0xf75f3000
+shared-library,/lib/i386-linux-gnu/libpthread-2.15.so,0xf75f9000,0xf7610000
+shared-library,/lib/i386-linux-gnu/libgcc_s.so.1,0xf7614000,0xf7630000
+shared-library,/lib/i386-linux-gnu/libm-2.15.so,0xf7633000,0xf765d000
+shared-library,/usr/lib/i386-linux-gnu/libstdc++.so.6.0.16,0xf765f000,0xf7737000
+shared-library,[vdso],0xf776d000,0xf776e000
+shared-library,/lib/i386-linux-gnu/ld-2.15.so,0xf776e000,0xf778e000
+profiler,begin,1
+timer-event-start,V8.GCCompactor,2425
+timer-event-start,V8.External,2458
+timer-event-end,V8.External,2468
+timer-event-start,V8.External,3810
+timer-event-end,V8.External,3830
+timer-event-end,V8.GCCompactor,3840
+code-creation,Stub,2,0x2b80a000,484,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80a200,622,CEntryStub
+code-creation,Stub,2,0x2b80a480,540,ArrayNArgumentsConstructorStub
+code-creation,Stub,13,0x2b80a6a0,116,CompareICStub
+code-creation,Stub,2,0x2b80a720,1428,RecordWriteStub
+code-creation,Stub,2,0x2b80acc0,97,StoreBufferOverflowStub
+code-creation,Stub,2,0x2b80ad40,611,RecordWriteStub
+code-creation,Stub,2,0x2b80afc0,76,InterruptStub
+code-creation,Stub,13,0x2b80b020,104,CompareICStub
+code-creation,Stub,2,0x2b80b0a0,130,ArgumentsAccessStub
+code-creation,Stub,2,0x2b80b140,160,FastNewContextStub
+code-creation,Stub,2,0x2b80b1e0,79,StubFailureTrampolineStub
+code-creation,Stub,2,0x2b80b240,704,ArraySingleArgumentConstructorStub
+code-creation,Stub,14,0x2b80b500,93,CompareNilICStub
+code-creation,Stub,2,0x2b80b560,289,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80b6a0,664,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80b940,740,NameDictionaryLookupStub
+code-creation,Stub,13,0x2b80bc40,156,CompareICStub
+code-creation,Stub,2,0x2b80bce0,611,RecordWriteStub
+code-creation,Stub,13,0x2b80bf60,122,CompareICStub
+code-creation,Stub,2,0x2b80bfe0,217,CreateAllocationSiteStub
+code-creation,Stub,2,0x2b80c0c0,1456,RecordWriteStub
+code-creation,Stub,2,0x2b80c680,245,StoreArrayLiteralElementStub
+code-creation,Stub,2,0x2b80c780,1448,RecordWriteStub
+code-creation,Stub,2,0x2b80cd40,1471,StringAddStub
+code-creation,Stub,2,0x2b80d300,1448,RecordWriteStub
+code-creation,Stub,2,0x2b80d8c0,1453,RecordWriteStub
+code-creation,Stub,12,0x2b80de80,146,BinaryOpStub
+code-creation,Stub,2,0x2b80df20,640,InternalArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80e1a0,517,ArrayConstructorStub
+code-creation,Stub,2,0x2b80e3c0,305,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80e500,305,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80e640,349,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80e7a0,349,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80e900,289,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80ea40,680,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80ed00,692,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80efc0,704,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80f280,664,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80f520,488,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80f720,540,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80f940,432,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80fb00,432,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80fcc0,1453,RecordWriteStub
+code-creation,Stub,2,0x2b810280,400,InternalArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b810420,611,RecordWriteStub
+code-creation,Stub,2,0x2b8106a0,213,JSEntryStub
+code-creation,Stub,13,0x2b810780,104,CompareICStub
+code-creation,Stub,12,0x2b810800,124,BinaryOpStub
+code-creation,Stub,2,0x2b810880,1447,StringAddStub
+code-creation,Stub,2,0x2b810e40,640,InternalArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b8110c0,400,InternalArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b811260,261,FastCloneShallowArrayStub
+code-creation,Stub,12,0x2b811380,88,BinaryOpStub
+code-creation,Stub,2,0x2b8113e0,76,StackCheckStub
+code-creation,Stub,2,0x2b811440,1437,RecordWriteStub
+code-creation,Stub,2,0x2b8119e0,289,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b811b20,331,CallFunctionStub
+code-creation,Builtin,3,0x2b811c80,174,A builtin from the snapshot
+code-creation,Stub,14,0x2b811d40,124,CompareNilICStub
+code-creation,Stub,2,0x2b811dc0,1420,RecordWriteStub
+code-creation,Stub,13,0x2b812360,104,CompareICStub
+code-creation,Stub,2,0x2b8123e0,76,LoadFieldStub
+code-creation,Stub,13,0x2b812440,104,CompareICStub
+code-creation,Stub,2,0x2b8124c0,195,NumberToStringStub
+code-creation,Stub,15,0x2b8125a0,148,ToBooleanStub
+code-creation,Stub,2,0x2b812640,351,ArgumentsAccessStub
+code-creation,Stub,2,0x2b8127a0,664,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b812a40,1420,RecordWriteStub
+code-creation,Stub,12,0x2b812fe0,133,BinaryOpStub
+code-creation,Stub,2,0x2b813080,1664,StringAddStub
+code-creation,Stub,2,0x2b813700,1661,StringAddStub
+code-creation,Stub,2,0x2b813d80,472,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b813f60,80,StubFailureTrampolineStub
+code-creation,Stub,13,0x2b813fc0,104,CompareICStub
+code-creation,Stub,2,0x2b814040,331,CallFunctionStub
+code-creation,Stub,2,0x2b8141a0,660,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b814440,1433,RecordWriteStub
+code-creation,Stub,12,0x2b8149e0,146,BinaryOpStub
+code-creation,Stub,2,0x2b814a80,271,CallConstructStub
+code-creation,Stub,15,0x2b814ba0,136,ToBooleanStub
+code-creation,Stub,2,0x2b814c40,468,ArrayNArgumentsConstructorStub
+code-creation,Stub,15,0x2b814e20,128,ToBooleanStub
+code-creation,Stub,2,0x2b814ea0,163,FastNewContextStub
+code-creation,Stub,2,0x2b814f60,1425,RecordWriteStub
+code-creation,LoadIC,5,0x2b815500,145,A load IC from the snapshot
+code-creation,Builtin,3,0x2b8155a0,83,A builtin from the snapshot
+code-creation,Stub,12,0x2b815600,88,BinaryOpStub
+code-creation,Stub,2,0x2b815660,1433,RecordWriteStub
+code-creation,Stub,2,0x2b815c00,331,CallFunctionStub
+code-creation,Stub,13,0x2b815d60,104,CompareICStub
+code-creation,Stub,2,0x2b815de0,304,FastNewClosureStub
+code-creation,Stub,2,0x2b815f20,285,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b816040,1433,RecordWriteStub
+code-creation,Stub,2,0x2b8165e0,233,InternalArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b8166e0,740,NameDictionaryLookupStub
+code-creation,Stub,2,0x2b8169e0,740,NameDictionaryLookupStub
+code-creation,Stub,12,0x2b816ce0,88,BinaryOpStub
+code-creation,Stub,2,0x2b816d40,216,StringCompareStub
+code-creation,Stub,15,0x2b816e20,93,ToBooleanStub
+code-creation,Stub,12,0x2b816e80,88,BinaryOpStub
+code-creation,Stub,2,0x2b816ee0,1433,RecordWriteStub
+code-creation,Stub,12,0x2b817480,155,BinaryOpStub
+code-creation,Stub,2,0x2b817520,169,InternalArrayConstructorStub
+code-creation,Stub,2,0x2b8175e0,233,InternalArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b8176e0,1433,RecordWriteStub
+code-creation,Stub,12,0x2b817c80,88,BinaryOpStub
+code-creation,Stub,2,0x2b817ce0,328,KeyedLoadElementStub
+code-creation,Stub,2,0x2b817e40,1461,RecordWriteStub
+code-creation,Stub,2,0x2b818400,98,ToNumberStub
+code-creation,Stub,13,0x2b818480,122,CompareICStub
+code-creation,Stub,12,0x2b818500,124,BinaryOpStub
+code-creation,Stub,2,0x2b818580,148,CallConstructStub
+code-creation,Stub,13,0x2b818620,491,CompareICStub
+code-creation,Stub,2,0x2b818820,213,JSEntryStub
+code-creation,CallIC,7,0x2b818900,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b8189c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818a80,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818b40,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818c00,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818cc0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818d80,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818e40,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818f00,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818fc0,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819080,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819140,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819200,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b8192c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819380,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819440,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819500,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b8195c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819680,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819740,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819800,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b8198c0,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819980,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819a40,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819b00,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819bc0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819c80,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819d40,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819e00,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819ec0,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819f80,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a040,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a100,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a1c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a280,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a340,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a400,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a4c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a580,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a640,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a700,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a7c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a880,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a940,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81aa00,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81aac0,180,A call IC from the snapshot
+code-creation,Builtin,3,0x2b81ab80,107,A builtin from the snapshot
+code-creation,Builtin,3,0x2b81ac00,105,A builtin from the snapshot
+code-creation,Builtin,3,0x2b81ac80,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b81ace0,432,A builtin from the snapshot
+code-creation,Builtin,3,0x2b81afc0,101,A builtin from the snapshot
+code-creation,LoadIC,5,0x2b81b1a0,83,A load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b81bf00,83,A keyed load IC from the snapshot
+code-creation,StoreIC,9,0x2b81c680,84,A store IC from the snapshot
+code-creation,Builtin,3,0x2b8262e0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826340,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8263a0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826400,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826460,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8264c0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826520,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826580,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8265e0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826640,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8266a0,80,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826700,80,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826760,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8267c0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826820,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826880,75,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8268e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826960,491,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826b60,406,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826d00,157,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826da0,131,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826e40,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826ec0,107,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826f40,143,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826fe0,143,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827080,143,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827120,94,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827180,91,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8271e0,83,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827240,83,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8272a0,83,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827300,84,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827360,84,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8273c0,84,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827420,84,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827480,84,A builtin from the snapshot
+code-creation,LoadIC,5,0x2b8274e0,83,A load IC from the snapshot
+code-creation,LoadIC,5,0x2b827540,313,A load IC from the snapshot
+code-creation,LoadIC,5,0x2b827680,266,A load IC from the snapshot
+code-creation,LoadIC,5,0x2b8277a0,80,A load IC from the snapshot
+code-creation,LoadIC,5,0x2b827800,83,A load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b827860,83,A keyed load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b8278c0,896,A keyed load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b827c40,499,A keyed load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b827e40,144,A keyed load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b827ee0,216,A keyed load IC from the snapshot
+code-creation,StoreIC,9,0x2b827fc0,365,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828140,293,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828280,88,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b8282e0,88,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828340,88,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b8283a0,84,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828400,365,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828580,293,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b8286c0,88,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828720,82,A store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b828780,84,A keyed store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b8287e0,2082,A keyed store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b829020,84,A keyed store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b829080,2082,A keyed store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b8298c0,286,A keyed store IC from the snapshot
+code-creation,Builtin,3,0x2b8299e0,355,A builtin from the snapshot
+code-creation,Builtin,3,0x2b829b60,416,A builtin from the snapshot
+code-creation,Builtin,3,0x2b829d00,376,A builtin from the snapshot
+code-creation,Builtin,3,0x2b829e80,388,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a020,78,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a080,83,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a0e0,357,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a260,359,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a3e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a460,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a4e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a560,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a5e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a660,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a6e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a760,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a7e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a860,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a8e0,104,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a960,106,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a9e0,110,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82aa60,112,A builtin from the snapshot
+code-creation,LoadIC,5,0x2b82aae0,106,A load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b82ab60,106,A keyed load IC from the snapshot
+code-creation,StoreIC,9,0x2b82abe0,108,A store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b82ac60,108,A keyed store IC from the snapshot
+code-creation,Stub,14,0x2b82ace0,104,CallFunctionStub
+code-creation,Builtin,3,0x2b82ad60,65,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82adc0,93,A builtin from the snapshot
+timer-event-start,V8.GCCompactor,6301
+timer-event-start,V8.External,6312
+timer-event-end,V8.External,6321
+timer-event-start,V8.External,7418
+timer-event-end,V8.External,7436
+timer-event-end,V8.GCCompactor,7446
+code-creation,LazyCompile,3,0x2b81ac80,77,Empty :1,0x4420e5cc,
+code-creation,LazyCompile,0,0x2b81aea0,264, native uri.js:1,0x4420e668,
+code-creation,LazyCompile,0,0x2b81b040,336,SetUpUri native uri.js:442,0x4420f5b4,
+code-creation,LazyCompile,0,0x2b81b200,1880, native messages.js:1,0x4420f670,
+code-creation,LazyCompile,0,0x2b81b960,1429,FormatString native messages.js:187,0x4420f918,
+code-creation,LazyCompile,0,0x2b81bf60,280,MakeGenericError native messages.js:282,0x44211088,
+code-creation,LazyCompile,0,0x2b81c080,264,FormatMessage native messages.js:301,0x4421113c,
+code-creation,LazyCompile,0,0x2b81c1a0,200,MakeRangeError native messages.js:335,0x442112f0,
+code-creation,LazyCompile,0,0x2b81c280,1012,captureStackTrace native messages.js:1123,0x44212280,
+code-creation,LazyCompile,0,0x2b81c6e0,460,SetUpError native messages.js:1173,0x44212410,
+code-creation,LazyCompile,0,0x2b81c8c0,692,SetUpError.a native messages.js:1176,0x442124c0,
+code-creation,LazyCompile,0,0x2b81cb80,164,d native messages.js:1192,0x44212548,
+code-creation,LazyCompile,0,0x2b81cc40,360, native messages.js:1202,0x442125d0,
+code-creation,LazyCompile,0,0x2b81cc40,360,Error,0x44212698,
+code-creation,LazyCompile,0,0x2b81cc40,360,TypeError,0x442126f8,
+code-creation,LazyCompile,0,0x2b81cc40,360,RangeError,0x44212758,
+code-creation,LazyCompile,0,0x2b81cc40,360,SyntaxError,0x442127b8,
+code-creation,LazyCompile,0,0x2b81cc40,360,ReferenceError,0x44212818,
+code-creation,LazyCompile,0,0x2b81cc40,360,EvalError,0x44212878,
+code-creation,LazyCompile,0,0x2b81cc40,360,URIError,0x442128d8,
+code-creation,LazyCompile,0,0x2b81cdc0,424,SetUpStackOverflowBoilerplate native messages.js:1301,0x44212a74,
+code-creation,LazyCompile,0,0x2b81cf80,216, native messages.js:294,0x44214b3c,
+code-creation,LazyCompile,0,0x2b81d060,408, native string.js:1,0x44214c2c,
+code-creation,LazyCompile,0,0x2b81d200,380,StringConstructor native string.js:35,0x44214e2c,
+code-creation,LazyCompile,0,0x2b81d380,1132,SetUpString native string.js:962,0x44216ea8,
+code-creation,LazyCompile,0,0x2b81d800,616, native date.js:1,0x44216fa0,
+code-creation,LazyCompile,0,0x2b81da80,1392,DateConstructor native date.js:141,0x442182bc,
+code-creation,LazyCompile,0,0x2b81e000,1396,SetUpDate native date.js:761,0x44219944,
+code-creation,LazyCompile,0,0x2b81e580,268, native array.js:1,0x44219b20,
+code-creation,LazyCompile,0,0x2b81e6a0,2272,SetUpArray native array.js:1591,0x4421c6ac,
+code-creation,LazyCompile,0,0x2b81ef80,292,SetUpArray.b native array.js:1605,0x4421c814,
+code-creation,LazyCompile,0,0x2b81f0c0,1084, native v8natives.js:1,0x4421c904,
+code-creation,LazyCompile,0,0x2b81f500,561,InstallFunctions native v8natives.js:46,0x4421cc1c,
+code-creation,LazyCompile,0,0x2b81f740,304,InstallGetterSetter native v8natives.js:72,0x4421ea1c,
+code-creation,LazyCompile,0,0x2b81f880,814,SetUpLockedPrototype native v8natives.js:87,0x4421eab4,
+code-creation,LazyCompile,0,0x2b81fbc0,452,SetUpGlobal native v8natives.js:197,0x4421ed3c,
+code-creation,LazyCompile,0,0x2b81fda0,404,hasOwnProperty native v8natives.js:251,0x4421eee4,
+code-creation,LazyCompile,0,0x2b81ff40,308,ObjectConstructor native v8natives.js:1371,0x442200b4,
+code-creation,LazyCompile,0,0x2b820080,1044,SetUpObject native v8natives.js:1385,0x44220140,
+code-creation,LazyCompile,0,0x2b8204a0,292,BooleanConstructor native v8natives.js:1437,0x442201c8,
+code-creation,LazyCompile,0,0x2b8205e0,448,SetUpBoolean native v8natives.js:1472,0x44220314,
+code-creation,LazyCompile,0,0x2b8207a0,336,NumberConstructor native v8natives.js:1491,0x442203ac,
+code-creation,LazyCompile,0,0x2b820900,924,SetUpNumber native v8natives.js:1635,0x4422073c,
+code-creation,LazyCompile,0,0x2b820ca0,440,FunctionConstructor native v8natives.js:1813,0x44220954,
+code-creation,LazyCompile,0,0x2b820e60,380,SetUpFunction native v8natives.js:1826,0x442209f8,
+code-creation,LazyCompile,0,0x2b820fe0,264, native json.js:1,0x44221238,
+code-creation,LazyCompile,0,0x2b821100,260,SetUpJSON native json.js:219,0x44221940,
+code-creation,LazyCompile,0,0x2b821220,340, native math.js:1,0x44221a5c,
+code-creation,LazyCompile,0,0x2b821380,164,MathConstructor native math.js:40,0x44221ba4,
+code-creation,LazyCompile,0,0x2b821440,1112,SetUpMath native math.js:226,0x4422283c,
+code-creation,LazyCompile,0,0x2b8218a0,404, native regexp.js:1,0x442228f8,
+code-creation,LazyCompile,0,0x2b821a40,324,RegExpConstructor native regexp.js:90,0x44223264,
+code-creation,LazyCompile,0,0x2b821ba0,224,RegExpMakeCaptureGetter native regexp.js:360,0x44223784,
+code-creation,LazyCompile,0,0x2b821c80,1561,SetUpRegExp native regexp.js:400,0x44223878,
+code-creation,LazyCompile,0,0x2b8222a0,280, native apinatives.js:1,0x44223b98,
+code-creation,LazyCompile,0,0x2b8223c0,612, native runtime.js:1,0x44223e30,
+code-creation,LazyCompile,0,0x2b822640,1728,EQUALS native runtime.js:54,0x44224078,
+code-creation,LazyCompile,0,0x2b822d00,376,STRICT_EQUALS native runtime.js:108,0x44224c18,
+code-creation,LazyCompile,0,0x2b822e80,924,COMPARE native runtime.js:128,0x44224ca4,
+code-creation,LazyCompile,0,0x2b823220,596,ADD native runtime.js:171,0x44224d44,
+code-creation,LazyCompile,0,0x2b823480,572,STRING_ADD_LEFT native runtime.js:191,0x44224dd8,
+code-creation,LazyCompile,0,0x2b8236c0,580,STRING_ADD_RIGHT native runtime.js:206,0x44224e64,
+code-creation,LazyCompile,0,0x2b823920,296,SUB native runtime.js:222,0x44224ef4,
+code-creation,LazyCompile,0,0x2b823a60,296,MUL native runtime.js:230,0x44224f84,
+code-creation,LazyCompile,0,0x2b823ba0,296,DIV native runtime.js:238,0x44225014,
+code-creation,LazyCompile,0,0x2b823ce0,296,MOD native runtime.js:246,0x442250a4,
+code-creation,LazyCompile,0,0x2b823e20,296,BIT_OR native runtime.js:260,0x44225134,
+code-creation,LazyCompile,0,0x2b823f60,384,BIT_AND native runtime.js:268,0x442251c4,
+code-creation,LazyCompile,0,0x2b8240e0,296,BIT_XOR native runtime.js:290,0x44225254,
+code-creation,LazyCompile,0,0x2b824220,244,UNARY_MINUS native runtime.js:298,0x442252e4,
+code-creation,LazyCompile,0,0x2b824320,244,BIT_NOT native runtime.js:305,0x44225370,
+code-creation,LazyCompile,0,0x2b824420,296,SHL native runtime.js:312,0x442253fc,
+code-creation,LazyCompile,0,0x2b824560,384,SAR native runtime.js:320,0x4422548c,
+code-creation,LazyCompile,0,0x2b8246e0,296,SHR native runtime.js:342,0x4422551c,
+code-creation,LazyCompile,0,0x2b824820,228,DELETE native runtime.js:356,0x442255ac,
+code-creation,LazyCompile,0,0x2b824920,368,IN native runtime.js:362,0x4422563c,
+code-creation,LazyCompile,0,0x2b824aa0,644,INSTANCE_OF native runtime.js:375,0x442256e8,
+code-creation,LazyCompile,0,0x2b824d40,236,FILTER_KEY native runtime.js:406,0x442257b8,
+code-creation,LazyCompile,0,0x2b824e40,380,CALL_NON_FUNCTION native runtime.js:413,0x44225848,
+code-creation,LazyCompile,0,0x2b824fc0,380,CALL_NON_FUNCTION_AS_CONSTRUCTOR native runtime.js:422,0x442258f4,
+code-creation,LazyCompile,0,0x2b825140,288,CALL_FUNCTION_PROXY native runtime.js:431,0x442259a0,
+code-creation,LazyCompile,0,0x2b825260,260,CALL_FUNCTION_PROXY_AS_CONSTRUCTOR native runtime.js:439,0x44225a38,
+code-creation,LazyCompile,0,0x2b825380,912,APPLY_PREPARE native runtime.js:446,0x44225acc,
+code-creation,LazyCompile,0,0x2b825720,232,APPLY_OVERFLOW native runtime.js:484,0x44225b9c,
+code-creation,LazyCompile,0,0x2b825820,188,TO_OBJECT native runtime.js:490,0x44225c38,
+code-creation,LazyCompile,0,0x2b8258e0,188,TO_NUMBER native runtime.js:496,0x44225cc0,
+code-creation,LazyCompile,0,0x2b8259a0,188,TO_STRING native runtime.js:502,0x44225d48,
+code-creation,LazyCompile,0,0x2b825a60,600,ToPrimitive native runtime.js:514,0x44225dd0,
+code-creation,LazyCompile,0,0x2b825cc0,404,ToBoolean native runtime.js:526,0x44225e60,
+code-creation,LazyCompile,0,0x2b825e60,504,ToNumber native runtime.js:536,0x44225eec,
+code-creation,LazyCompile,0,0x2b826060,416,ToString native runtime.js:561,0x44225fd8,
+code-creation,LazyCompile,0,0x2b826200,220,ToName native runtime.js:578,0x442260c4,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x44227108,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x44227168,
+code-creation,LazyCompile,3,0x2b8262e0,77,OpaqueReference,0x442271c8,
+code-creation,LazyCompile,3,0x2b8262e0,77,JSON,0x44227228,
+code-creation,LazyCompile,0,0x2b8204a0,292,Boolean,0x44227288,
+code-creation,LazyCompile,3,0x2b82a080,83,Array,0x442272e8,
+code-creation,LazyCompile,3,0x2b826460,77,pop,0x44227348,
+code-creation,LazyCompile,3,0x2b826400,77,push,0x442273a8,
+code-creation,LazyCompile,3,0x2b826640,77,concat,0x44227408,
+code-creation,LazyCompile,3,0x2b8264c0,77,shift,0x44227468,
+code-creation,LazyCompile,3,0x2b826520,77,unshift,0x442274c8,
+code-creation,LazyCompile,3,0x2b826580,77,slice,0x44227528,
+code-creation,LazyCompile,3,0x2b8265e0,77,splice,0x44227588,
+code-creation,LazyCompile,0,0x2b8207a0,336,Number,0x442275e8,
+code-creation,LazyCompile,3,0x2b82a020,78,InternalArray,0x44227648,
+code-creation,LazyCompile,3,0x2b82a020,78,InternalPackedArray,0x442276b4,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x44227714,
+code-creation,LazyCompile,0,0x2b821a40,324,RegExp,0x44227774,
+code-creation,LazyCompile,0,0x2b81da80,1392,Date,0x442277d4,
+code-creation,LazyCompile,0,0x2b820ca0,440,Function,0x44227834,
+code-creation,LazyCompile,0,0x2b81d200,380,String,0x44227894,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x442278f4,
+code-creation,LazyCompile,0,0x2b81cf80,216,Script,0x44227960,
+code-creation,LazyCompile,0,0x2b81ff40,308,Object,0x44227a00,
+code-creation,LazyCompile,3,0x2b829d00,376,call,0x44227a60,
+code-creation,LazyCompile,3,0x2b829e80,388,apply,0x44227ac0,
+code-creation,LazyCompile,3,0x2b8262e0,77,Arguments,0x44227b20,
+code-creation,LazyCompile,3,0x2b826820,77,ThrowTypeError,0x44227b80,
+code-creation,LazyCompile,3,0x2b826760,77,,0x44227be0,
+code-creation,LazyCompile,3,0x2b8267c0,77,,0x44227c40,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x44227ca0,
+timer-event-start,V8.GCCompactor,9350
+timer-event-start,V8.External,9362
+timer-event-end,V8.External,9370
+timer-event-start,V8.External,10477
+timer-event-end,V8.External,10500
+timer-event-end,V8.GCCompactor,10511
+code-creation,Stub,2,0x2b80a000,484,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80a200,622,CEntryStub
+code-creation,Stub,2,0x2b80a480,540,ArrayNArgumentsConstructorStub
+code-creation,Stub,13,0x2b80a6a0,116,CompareICStub
+code-creation,Stub,2,0x2b80a720,1428,RecordWriteStub
+code-creation,Stub,2,0x2b80acc0,97,StoreBufferOverflowStub
+code-creation,Stub,2,0x2b80ad40,611,RecordWriteStub
+code-creation,Stub,2,0x2b80afc0,76,InterruptStub
+code-creation,Stub,13,0x2b80b020,104,CompareICStub
+code-creation,Stub,2,0x2b80b0a0,130,ArgumentsAccessStub
+code-creation,Stub,2,0x2b80b140,160,FastNewContextStub
+code-creation,Stub,2,0x2b80b1e0,79,StubFailureTrampolineStub
+code-creation,Stub,2,0x2b80b240,704,ArraySingleArgumentConstructorStub
+code-creation,Stub,14,0x2b80b500,93,CompareNilICStub
+code-creation,Stub,2,0x2b80b560,289,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80b6a0,664,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80b940,740,NameDictionaryLookupStub
+code-creation,Stub,13,0x2b80bc40,156,CompareICStub
+code-creation,Stub,2,0x2b80bce0,611,RecordWriteStub
+code-creation,Stub,13,0x2b80bf60,122,CompareICStub
+code-creation,Stub,2,0x2b80bfe0,217,CreateAllocationSiteStub
+code-creation,Stub,2,0x2b80c0c0,1456,RecordWriteStub
+code-creation,Stub,2,0x2b80c680,245,StoreArrayLiteralElementStub
+code-creation,Stub,2,0x2b80c780,1448,RecordWriteStub
+code-creation,Stub,2,0x2b80cd40,1471,StringAddStub
+code-creation,Stub,2,0x2b80d300,1448,RecordWriteStub
+code-creation,Stub,2,0x2b80d8c0,1453,RecordWriteStub
+code-creation,Stub,12,0x2b80de80,146,BinaryOpStub
+code-creation,Stub,2,0x2b80df20,640,InternalArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80e1a0,517,ArrayConstructorStub
+code-creation,Stub,2,0x2b80e3c0,305,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80e500,305,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80e640,349,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80e7a0,349,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80e900,289,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b80ea40,680,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80ed00,692,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80efc0,704,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80f280,664,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b80f520,488,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80f720,540,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80f940,432,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80fb00,432,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b80fcc0,1453,RecordWriteStub
+code-creation,Stub,2,0x2b810280,400,InternalArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b810420,611,RecordWriteStub
+code-creation,Stub,2,0x2b8106a0,213,JSEntryStub
+code-creation,Stub,13,0x2b810780,104,CompareICStub
+code-creation,Stub,12,0x2b810800,124,BinaryOpStub
+code-creation,Stub,2,0x2b810880,1447,StringAddStub
+code-creation,Stub,2,0x2b810e40,640,InternalArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b8110c0,400,InternalArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b811260,261,FastCloneShallowArrayStub
+code-creation,Stub,12,0x2b811380,88,BinaryOpStub
+code-creation,Stub,2,0x2b8113e0,76,StackCheckStub
+code-creation,Stub,2,0x2b811440,1437,RecordWriteStub
+code-creation,Stub,2,0x2b8119e0,289,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b811b20,331,CallFunctionStub
+code-creation,Builtin,3,0x2b811c80,174,A builtin from the snapshot
+code-creation,Stub,14,0x2b811d40,124,CompareNilICStub
+code-creation,Stub,2,0x2b811dc0,1420,RecordWriteStub
+code-creation,Stub,13,0x2b812360,104,CompareICStub
+code-creation,Stub,2,0x2b8123e0,76,LoadFieldStub
+code-creation,Stub,13,0x2b812440,104,CompareICStub
+code-creation,Stub,2,0x2b8124c0,195,NumberToStringStub
+code-creation,Stub,15,0x2b8125a0,148,ToBooleanStub
+code-creation,Stub,2,0x2b812640,351,ArgumentsAccessStub
+code-creation,Stub,2,0x2b8127a0,664,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b812a40,1420,RecordWriteStub
+code-creation,Stub,12,0x2b812fe0,133,BinaryOpStub
+code-creation,Stub,2,0x2b813080,1664,StringAddStub
+code-creation,Stub,2,0x2b813700,1661,StringAddStub
+code-creation,Stub,2,0x2b813d80,472,ArrayNArgumentsConstructorStub
+code-creation,Stub,2,0x2b813f60,80,StubFailureTrampolineStub
+code-creation,Stub,13,0x2b813fc0,104,CompareICStub
+code-creation,Stub,2,0x2b814040,331,CallFunctionStub
+code-creation,Stub,2,0x2b8141a0,660,ArraySingleArgumentConstructorStub
+code-creation,Stub,2,0x2b814440,1433,RecordWriteStub
+code-creation,Stub,12,0x2b8149e0,146,BinaryOpStub
+code-creation,Stub,2,0x2b814a80,271,CallConstructStub
+code-creation,Stub,15,0x2b814ba0,136,ToBooleanStub
+code-creation,Stub,2,0x2b814c40,468,ArrayNArgumentsConstructorStub
+code-creation,Stub,15,0x2b814e20,128,ToBooleanStub
+code-creation,Stub,2,0x2b814ea0,163,FastNewContextStub
+code-creation,Stub,2,0x2b814f60,1425,RecordWriteStub
+code-creation,LoadIC,5,0x2b815500,145,A load IC from the snapshot
+code-creation,Builtin,3,0x2b8155a0,83,A builtin from the snapshot
+code-creation,Stub,12,0x2b815600,88,BinaryOpStub
+code-creation,Stub,2,0x2b815660,1433,RecordWriteStub
+code-creation,Stub,2,0x2b815c00,331,CallFunctionStub
+code-creation,Stub,13,0x2b815d60,104,CompareICStub
+code-creation,Stub,2,0x2b815de0,304,FastNewClosureStub
+code-creation,Stub,2,0x2b815f20,285,ArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b816040,1433,RecordWriteStub
+code-creation,Stub,2,0x2b8165e0,233,InternalArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b8166e0,740,NameDictionaryLookupStub
+code-creation,Stub,2,0x2b8169e0,740,NameDictionaryLookupStub
+code-creation,Stub,12,0x2b816ce0,88,BinaryOpStub
+code-creation,Stub,2,0x2b816d40,216,StringCompareStub
+code-creation,Stub,15,0x2b816e20,93,ToBooleanStub
+code-creation,Stub,12,0x2b816e80,88,BinaryOpStub
+code-creation,Stub,2,0x2b816ee0,1433,RecordWriteStub
+code-creation,Stub,12,0x2b817480,155,BinaryOpStub
+code-creation,Stub,2,0x2b817520,169,InternalArrayConstructorStub
+code-creation,Stub,2,0x2b8175e0,233,InternalArrayNoArgumentConstructorStub
+code-creation,Stub,2,0x2b8176e0,1433,RecordWriteStub
+code-creation,Stub,12,0x2b817c80,88,BinaryOpStub
+code-creation,Stub,2,0x2b817ce0,328,KeyedLoadElementStub
+code-creation,Stub,2,0x2b817e40,1461,RecordWriteStub
+code-creation,Stub,2,0x2b818400,98,ToNumberStub
+code-creation,Stub,13,0x2b818480,122,CompareICStub
+code-creation,Stub,12,0x2b818500,124,BinaryOpStub
+code-creation,Stub,2,0x2b818580,148,CallConstructStub
+code-creation,Stub,13,0x2b818620,491,CompareICStub
+code-creation,Stub,2,0x2b818820,213,JSEntryStub
+code-creation,CallIC,7,0x2b818900,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b8189c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818a80,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818b40,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818c00,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818cc0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818d80,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818e40,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818f00,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b818fc0,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819080,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819140,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819200,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b8192c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819380,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819440,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819500,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b8195c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819680,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819740,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819800,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b8198c0,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819980,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819a40,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819b00,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819bc0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819c80,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819d40,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819e00,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819ec0,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b819f80,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a040,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a100,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a1c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a280,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a340,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a400,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a4c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a580,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a640,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a700,189,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a7c0,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a880,178,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81a940,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81aa00,180,A call IC from the snapshot
+code-creation,CallIC,7,0x2b81aac0,180,A call IC from the snapshot
+code-creation,Builtin,3,0x2b81ab80,107,A builtin from the snapshot
+code-creation,Builtin,3,0x2b81ac00,105,A builtin from the snapshot
+code-creation,Builtin,3,0x2b81ac80,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b81ace0,432,A builtin from the snapshot
+code-creation,Builtin,3,0x2b81afc0,101,A builtin from the snapshot
+code-creation,LoadIC,5,0x2b81b1a0,83,A load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b81bf00,83,A keyed load IC from the snapshot
+code-creation,StoreIC,9,0x2b81c680,84,A store IC from the snapshot
+code-creation,Builtin,3,0x2b8262e0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826340,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8263a0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826400,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826460,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8264c0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826520,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826580,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8265e0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826640,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8266a0,80,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826700,80,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826760,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8267c0,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826820,77,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826880,75,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8268e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826960,491,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826b60,406,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826d00,157,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826da0,131,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826e40,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826ec0,107,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826f40,143,A builtin from the snapshot
+code-creation,Builtin,3,0x2b826fe0,143,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827080,143,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827120,94,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827180,91,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8271e0,83,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827240,83,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8272a0,83,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827300,84,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827360,84,A builtin from the snapshot
+code-creation,Builtin,3,0x2b8273c0,84,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827420,84,A builtin from the snapshot
+code-creation,Builtin,3,0x2b827480,84,A builtin from the snapshot
+code-creation,LoadIC,5,0x2b8274e0,83,A load IC from the snapshot
+code-creation,LoadIC,5,0x2b827540,313,A load IC from the snapshot
+code-creation,LoadIC,5,0x2b827680,266,A load IC from the snapshot
+code-creation,LoadIC,5,0x2b8277a0,80,A load IC from the snapshot
+code-creation,LoadIC,5,0x2b827800,83,A load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b827860,83,A keyed load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b8278c0,896,A keyed load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b827c40,499,A keyed load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b827e40,144,A keyed load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b827ee0,216,A keyed load IC from the snapshot
+code-creation,StoreIC,9,0x2b827fc0,365,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828140,293,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828280,88,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b8282e0,88,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828340,88,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b8283a0,84,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828400,365,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828580,293,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b8286c0,88,A store IC from the snapshot
+code-creation,StoreIC,9,0x2b828720,82,A store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b828780,84,A keyed store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b8287e0,2082,A keyed store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b829020,84,A keyed store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b829080,2082,A keyed store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b8298c0,286,A keyed store IC from the snapshot
+code-creation,Builtin,3,0x2b8299e0,355,A builtin from the snapshot
+code-creation,Builtin,3,0x2b829b60,416,A builtin from the snapshot
+code-creation,Builtin,3,0x2b829d00,376,A builtin from the snapshot
+code-creation,Builtin,3,0x2b829e80,388,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a020,78,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a080,83,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a0e0,357,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a260,359,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a3e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a460,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a4e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a560,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a5e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a660,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a6e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a760,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a7e0,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a860,101,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a8e0,104,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a960,106,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82a9e0,110,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82aa60,112,A builtin from the snapshot
+code-creation,LoadIC,5,0x2b82aae0,106,A load IC from the snapshot
+code-creation,KeyedLoadIC,6,0x2b82ab60,106,A keyed load IC from the snapshot
+code-creation,StoreIC,9,0x2b82abe0,108,A store IC from the snapshot
+code-creation,KeyedStoreIC,10,0x2b82ac60,108,A keyed store IC from the snapshot
+code-creation,Stub,14,0x2b82ace0,104,CallFunctionStub
+code-creation,Builtin,3,0x2b82ad60,65,A builtin from the snapshot
+code-creation,Builtin,3,0x2b82adc0,93,A builtin from the snapshot
+timer-event-start,V8.GCCompactor,12962
+timer-event-start,V8.External,12972
+timer-event-end,V8.External,12981
+timer-event-start,V8.External,13996
+timer-event-end,V8.External,14014
+timer-event-end,V8.GCCompactor,14024
+code-creation,LazyCompile,3,0x2b81ac80,77,Empty :1,0x4420e5cc,
+code-creation,LazyCompile,0,0x2b81b040,336,SetUpUri native uri.js:442,0x4420f5b4,
+code-creation,LazyCompile,0,0x2b81b960,1429,FormatString native messages.js:187,0x4420f918,
+code-creation,LazyCompile,0,0x2b81bf60,280,MakeGenericError native messages.js:282,0x44211088,
+code-creation,LazyCompile,0,0x2b81c080,264,FormatMessage native messages.js:301,0x4421113c,
+code-creation,LazyCompile,0,0x2b81c1a0,200,MakeRangeError native messages.js:335,0x442112f0,
+code-creation,LazyCompile,0,0x2b81c280,1012,captureStackTrace native messages.js:1123,0x44212280,
+code-creation,LazyCompile,0,0x2b81c6e0,460,SetUpError native messages.js:1173,0x44212410,
+code-creation,LazyCompile,0,0x2b81c8c0,692,SetUpError.a native messages.js:1176,0x442124c0,
+code-creation,LazyCompile,0,0x2b81cb80,164,d native messages.js:1192,0x44212548,
+code-creation,LazyCompile,0,0x2b81cc40,360, native messages.js:1202,0x442125d0,
+code-creation,LazyCompile,0,0x2b81cc40,360,Error,0x44212698,
+code-creation,LazyCompile,0,0x2b81cc40,360,TypeError,0x442126f8,
+code-creation,LazyCompile,0,0x2b81cc40,360,RangeError,0x44212758,
+code-creation,LazyCompile,0,0x2b81cc40,360,SyntaxError,0x442127b8,
+code-creation,LazyCompile,0,0x2b81cc40,360,ReferenceError,0x44212818,
+code-creation,LazyCompile,0,0x2b81cc40,360,EvalError,0x44212878,
+code-creation,LazyCompile,0,0x2b81cc40,360,URIError,0x442128d8,
+code-creation,LazyCompile,0,0x2b81cdc0,424,SetUpStackOverflowBoilerplate native messages.js:1301,0x44212a74,
+code-creation,LazyCompile,0,0x2b81d200,380,StringConstructor native string.js:35,0x44214e2c,
+code-creation,LazyCompile,0,0x2b81d380,1132,SetUpString native string.js:962,0x44216ea8,
+code-creation,LazyCompile,0,0x2b81da80,1392,DateConstructor native date.js:141,0x442182bc,
+code-creation,LazyCompile,0,0x2b81e000,1396,SetUpDate native date.js:761,0x44219944,
+code-creation,LazyCompile,0,0x2b81e6a0,2272,SetUpArray native array.js:1591,0x4421c6ac,
+code-creation,LazyCompile,0,0x2b81f500,561,InstallFunctions native v8natives.js:46,0x4421cc1c,
+code-creation,LazyCompile,0,0x2b81f740,304,InstallGetterSetter native v8natives.js:72,0x4421ea1c,
+code-creation,LazyCompile,0,0x2b81f880,814,SetUpLockedPrototype native v8natives.js:87,0x4421eab4,
+code-creation,LazyCompile,0,0x2b81fbc0,452,SetUpGlobal native v8natives.js:197,0x4421ed3c,
+code-creation,LazyCompile,0,0x2b81fda0,404,hasOwnProperty native v8natives.js:251,0x4421eee4,
+code-creation,LazyCompile,0,0x2b81ff40,308,ObjectConstructor native v8natives.js:1371,0x442200b4,
+code-creation,LazyCompile,0,0x2b820080,1044,SetUpObject native v8natives.js:1385,0x44220140,
+code-creation,LazyCompile,0,0x2b8204a0,292,BooleanConstructor native v8natives.js:1437,0x442201c8,
+code-creation,LazyCompile,0,0x2b8205e0,448,SetUpBoolean native v8natives.js:1472,0x44220314,
+code-creation,LazyCompile,0,0x2b8207a0,336,NumberConstructor native v8natives.js:1491,0x442203ac,
+code-creation,LazyCompile,0,0x2b820900,924,SetUpNumber native v8natives.js:1635,0x4422073c,
+code-creation,LazyCompile,0,0x2b820ca0,440,FunctionConstructor native v8natives.js:1813,0x44220954,
+code-creation,LazyCompile,0,0x2b820e60,380,SetUpFunction native v8natives.js:1826,0x442209f8,
+code-creation,LazyCompile,0,0x2b821100,260,SetUpJSON native json.js:219,0x44221940,
+code-creation,LazyCompile,0,0x2b821380,164,MathConstructor native math.js:40,0x44221ba4,
+code-creation,LazyCompile,0,0x2b821440,1112,SetUpMath native math.js:226,0x4422283c,
+code-creation,LazyCompile,0,0x2b821a40,324,RegExpConstructor native regexp.js:90,0x44223264,
+code-creation,LazyCompile,0,0x2b821ba0,224,RegExpMakeCaptureGetter native regexp.js:360,0x44223784,
+code-creation,LazyCompile,0,0x2b821c80,1561,SetUpRegExp native regexp.js:400,0x44223878,
+code-creation,LazyCompile,0,0x2b822640,1728,EQUALS native runtime.js:54,0x44224078,
+code-creation,LazyCompile,0,0x2b822d00,376,STRICT_EQUALS native runtime.js:108,0x44224c18,
+code-creation,LazyCompile,0,0x2b822e80,924,COMPARE native runtime.js:128,0x44224ca4,
+code-creation,LazyCompile,0,0x2b823220,596,ADD native runtime.js:171,0x44224d44,
+code-creation,LazyCompile,0,0x2b823480,572,STRING_ADD_LEFT native runtime.js:191,0x44224dd8,
+code-creation,LazyCompile,0,0x2b8236c0,580,STRING_ADD_RIGHT native runtime.js:206,0x44224e64,
+code-creation,LazyCompile,0,0x2b823920,296,SUB native runtime.js:222,0x44224ef4,
+code-creation,LazyCompile,0,0x2b823a60,296,MUL native runtime.js:230,0x44224f84,
+code-creation,LazyCompile,0,0x2b823ba0,296,DIV native runtime.js:238,0x44225014,
+code-creation,LazyCompile,0,0x2b823ce0,296,MOD native runtime.js:246,0x442250a4,
+code-creation,LazyCompile,0,0x2b823e20,296,BIT_OR native runtime.js:260,0x44225134,
+code-creation,LazyCompile,0,0x2b823f60,384,BIT_AND native runtime.js:268,0x442251c4,
+code-creation,LazyCompile,0,0x2b8240e0,296,BIT_XOR native runtime.js:290,0x44225254,
+code-creation,LazyCompile,0,0x2b824220,244,UNARY_MINUS native runtime.js:298,0x442252e4,
+code-creation,LazyCompile,0,0x2b824320,244,BIT_NOT native runtime.js:305,0x44225370,
+code-creation,LazyCompile,0,0x2b824420,296,SHL native runtime.js:312,0x442253fc,
+code-creation,LazyCompile,0,0x2b824560,384,SAR native runtime.js:320,0x4422548c,
+code-creation,LazyCompile,0,0x2b8246e0,296,SHR native runtime.js:342,0x4422551c,
+code-creation,LazyCompile,0,0x2b824820,228,DELETE native runtime.js:356,0x442255ac,
+code-creation,LazyCompile,0,0x2b824920,368,IN native runtime.js:362,0x4422563c,
+code-creation,LazyCompile,0,0x2b824aa0,644,INSTANCE_OF native runtime.js:375,0x442256e8,
+code-creation,LazyCompile,0,0x2b824d40,236,FILTER_KEY native runtime.js:406,0x442257b8,
+code-creation,LazyCompile,0,0x2b824e40,380,CALL_NON_FUNCTION native runtime.js:413,0x44225848,
+code-creation,LazyCompile,0,0x2b824fc0,380,CALL_NON_FUNCTION_AS_CONSTRUCTOR native runtime.js:422,0x442258f4,
+code-creation,LazyCompile,0,0x2b825140,288,CALL_FUNCTION_PROXY native runtime.js:431,0x442259a0,
+code-creation,LazyCompile,0,0x2b825260,260,CALL_FUNCTION_PROXY_AS_CONSTRUCTOR native runtime.js:439,0x44225a38,
+code-creation,LazyCompile,0,0x2b825380,912,APPLY_PREPARE native runtime.js:446,0x44225acc,
+code-creation,LazyCompile,0,0x2b825720,232,APPLY_OVERFLOW native runtime.js:484,0x44225b9c,
+code-creation,LazyCompile,0,0x2b825820,188,TO_OBJECT native runtime.js:490,0x44225c38,
+code-creation,LazyCompile,0,0x2b8258e0,188,TO_NUMBER native runtime.js:496,0x44225cc0,
+code-creation,LazyCompile,0,0x2b8259a0,188,TO_STRING native runtime.js:502,0x44225d48,
+code-creation,LazyCompile,0,0x2b825a60,600,ToPrimitive native runtime.js:514,0x44225dd0,
+code-creation,LazyCompile,0,0x2b825cc0,404,ToBoolean native runtime.js:526,0x44225e60,
+code-creation,LazyCompile,0,0x2b825e60,504,ToNumber native runtime.js:536,0x44225eec,
+code-creation,LazyCompile,0,0x2b826060,416,ToString native runtime.js:561,0x44225fd8,
+code-creation,LazyCompile,0,0x2b826200,220,ToName native runtime.js:578,0x442260c4,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x44227108,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x44227168,
+code-creation,LazyCompile,3,0x2b8262e0,77,OpaqueReference,0x442271c8,
+code-creation,LazyCompile,3,0x2b8262e0,77,JSON,0x44227228,
+code-creation,LazyCompile,0,0x2b8204a0,292,Boolean,0x44227288,
+code-creation,LazyCompile,3,0x2b82a080,83,Array,0x442272e8,
+code-creation,LazyCompile,3,0x2b826460,77,pop,0x44227348,
+code-creation,LazyCompile,3,0x2b826400,77,push,0x442273a8,
+code-creation,LazyCompile,3,0x2b826640,77,concat,0x44227408,
+code-creation,LazyCompile,3,0x2b8264c0,77,shift,0x44227468,
+code-creation,LazyCompile,3,0x2b826520,77,unshift,0x442274c8,
+code-creation,LazyCompile,3,0x2b826580,77,slice,0x44227528,
+code-creation,LazyCompile,3,0x2b8265e0,77,splice,0x44227588,
+code-creation,LazyCompile,0,0x2b8207a0,336,Number,0x442275e8,
+code-creation,LazyCompile,3,0x2b82a020,78,InternalArray,0x44227648,
+code-creation,LazyCompile,3,0x2b82a020,78,InternalPackedArray,0x442276b4,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x44227714,
+code-creation,LazyCompile,0,0x2b821a40,324,RegExp,0x44227774,
+code-creation,LazyCompile,0,0x2b81da80,1392,Date,0x442277d4,
+code-creation,LazyCompile,0,0x2b820ca0,440,Function,0x44227834,
+code-creation,LazyCompile,0,0x2b81d200,380,String,0x44227894,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x442278f4,
+code-creation,LazyCompile,0,0x2b81cf80,216,Script,0x44227960,
+code-creation,LazyCompile,0,0x2b81ff40,308,Object,0x44227a00,
+code-creation,LazyCompile,3,0x2b829d00,376,call,0x44227a60,
+code-creation,LazyCompile,3,0x2b829e80,388,apply,0x44227ac0,
+code-creation,LazyCompile,3,0x2b8262e0,77,Arguments,0x44227b20,
+code-creation,LazyCompile,3,0x2b826820,77,ThrowTypeError,0x44227b80,
+code-creation,LazyCompile,3,0x2b826760,77,,0x44227be0,
+code-creation,LazyCompile,3,0x2b8267c0,77,,0x44227c40,
+code-creation,LazyCompile,3,0x2b8262e0,77,,0x44227ca0,
+code-creation,Stub,2,0x2b81ef80,782,CEntryStub
+code-creation,Stub,2,0x2b81f2a0,197,StoreBufferOverflowStub
+code-creation,Stub,2,0x2b81f380,79,StubFailureTrampolineStub
+code-creation,Stub,2,0x2b81f3e0,80,StubFailureTrampolineStub
tick,0xf776d430,16272,0,0x0,3
-timer-event-start,"V8.ParseLazyMicroSeconds",16854
-timer-event-end,"V8.ParseLazyMicroSeconds",17081
-timer-event-start,"V8.CompileLazy",17098
-timer-event-start,"V8.CompileFullCode",17125
+timer-event-start,V8.ParseLazyMicroSeconds,16854
+timer-event-end,V8.ParseLazyMicroSeconds,17081
+timer-event-start,V8.CompileLazy,17098
+timer-event-start,V8.CompileFullCode,17125
tick,0xf74c79de,17348,0,0xff820034,2
-code-creation,Stub,2,0x2b81b200,246,"FastCloneShallowObjectStub"
-code-creation,Stub,12,0x2b81b300,88,"BinaryOpStub_ADD_Alloc_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b81b360,88,"BinaryOpStub_ADD_OverwriteLeft_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",17910
-code-creation,LazyCompile,0,0x2b81b3c0,572,"Instantiate native apinatives.js:44",0x44223cdc,~
-timer-event-end,"V8.CompileLazy",17948
-code-creation,Stub,13,0x2b81b600,116,"CompareICStub"
-timer-event-start,"V8.ParseLazyMicroSeconds",18020
-timer-event-end,"V8.ParseLazyMicroSeconds",18170
-timer-event-start,"V8.CompileLazy",18187
-timer-event-start,"V8.CompileFullCode",18208
-code-creation,Stub,12,0x2b81b680,88,"BinaryOpStub_BIT_AND_Alloc_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",18340
-code-creation,LazyCompile,0,0x2b82ae20,1008,"InstantiateFunction native apinatives.js:65",0x44223d3c,
-timer-event-end,"V8.CompileLazy",18396
+code-creation,Stub,2,0x2b81b200,246,FastCloneShallowObjectStub
+code-creation,Stub,12,0x2b81b300,88,BinaryOpStub_ADD_Alloc_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b81b360,88,BinaryOpStub_ADD_OverwriteLeft_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,17910
+code-creation,LazyCompile,0,0x2b81b3c0,572,Instantiate native apinatives.js:44,0x44223cdc,~
+timer-event-end,V8.CompileLazy,17948
+code-creation,Stub,13,0x2b81b600,116,CompareICStub
+timer-event-start,V8.ParseLazyMicroSeconds,18020
+timer-event-end,V8.ParseLazyMicroSeconds,18170
+timer-event-start,V8.CompileLazy,18187
+timer-event-start,V8.CompileFullCode,18208
+code-creation,Stub,12,0x2b81b680,88,BinaryOpStub_BIT_AND_Alloc_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,18340
+code-creation,LazyCompile,0,0x2b82ae20,1008,InstantiateFunction native apinatives.js:65,0x44223d3c,
+timer-event-end,V8.CompileLazy,18396
tick,0xf776d430,18420,0,0x90d68fc,2,0x2b81b4f0
-code-creation,Stub,2,0x2b82b220,1800,"RecordWriteStub"
-code-creation,Stub,2,0x2b82b940,236,"KeyedStoreElementStub"
-code-creation,KeyedStoreIC,10,0x2b82ba40,91,""
-code-creation,CallIC,7,0x2b82baa0,129,"InstantiateFunction"
-code-creation,LoadIC,5,0x2b82bb40,103,"kApiFunctionCache"
-code-creation,Stub,12,0x2b82bbc0,146,"BinaryOpStub_BIT_AND_Alloc_Smi+Smi"
-code-creation,Stub,15,0x2b82bc60,132,"ToBooleanStub(Smi)"
-timer-event-start,"V8.ParseLazyMicroSeconds",19172
-timer-event-end,"V8.ParseLazyMicroSeconds",19253
-timer-event-start,"V8.CompileLazy",19268
-timer-event-start,"V8.CompileFullCode",19285
-timer-event-end,"V8.CompileFullCode",19350
-code-creation,LazyCompile,0,0x2b82bd00,753,"ConfigureTemplateInstance native apinatives.js:105",0x44223d9c,
-timer-event-end,"V8.CompileLazy",19384
+code-creation,Stub,2,0x2b82b220,1800,RecordWriteStub
+code-creation,Stub,2,0x2b82b940,236,KeyedStoreElementStub
+code-creation,KeyedStoreIC,10,0x2b82ba40,91,
+code-creation,CallIC,7,0x2b82baa0,129,InstantiateFunction
+code-creation,LoadIC,5,0x2b82bb40,103,kApiFunctionCache
+code-creation,Stub,12,0x2b82bbc0,146,BinaryOpStub_BIT_AND_Alloc_Smi+Smi
+code-creation,Stub,15,0x2b82bc60,132,ToBooleanStub(Smi)
+timer-event-start,V8.ParseLazyMicroSeconds,19172
+timer-event-end,V8.ParseLazyMicroSeconds,19253
+timer-event-start,V8.CompileLazy,19268
+timer-event-start,V8.CompileFullCode,19285
+timer-event-end,V8.CompileFullCode,19350
+code-creation,LazyCompile,0,0x2b82bd00,753,ConfigureTemplateInstance native apinatives.js:105,0x44223d9c,
+timer-event-end,V8.CompileLazy,19384
tick,0x83c1620,19510,0,0xff81f92c,0,0x2b82b1de,0x2b81b4f0,0x2b81b576,0x2b82b0b8,0x2b81b4f0
-code-creation,Stub,2,0x2b82c000,208,"KeyedLoadElementStub"
-code-creation,KeyedLoadIC,6,0x2b82c0e0,91,""
-code-creation,Stub,15,0x2b82c140,156,"ToBooleanStub(Undefined,SpecObject)"
-code-creation,KeyedLoadIC,6,0x2b82c1e0,91,""
-code-creation,Stub,12,0x2b82c240,146,"BinaryOpStub_ADD_Alloc_Smi+Smi"
-code-creation,Stub,15,0x2b82c2e0,168,"ToBooleanStub(Undefined,String)"
-code-creation,CallIC,7,0x2b82c3a0,129,"ConfigureTemplateInstance"
-code-creation,CallIC,7,0x2b82c440,129,"Instantiate"
-code-creation,CallIC,7,0x2b82c4e0,144,"Instantiate"
-code-creation,Stub,13,0x2b82c580,469,"CompareICStub"
-code-creation,Stub,14,0x2b82c760,144,"CompareNilICStub(NullValue)(MonomorphicMap)"
-code-creation,Stub,14,0x2b82c800,144,"CompareNilICStub(NullValue)(MonomorphicMap)"
+code-creation,Stub,2,0x2b82c000,208,KeyedLoadElementStub
+code-creation,KeyedLoadIC,6,0x2b82c0e0,91,
+code-creation,Stub,15,0x2b82c140,156,ToBooleanStub(Undefined,SpecObject)
+code-creation,KeyedLoadIC,6,0x2b82c1e0,91,
+code-creation,Stub,12,0x2b82c240,146,BinaryOpStub_ADD_Alloc_Smi+Smi
+code-creation,Stub,15,0x2b82c2e0,168,ToBooleanStub(Undefined,String)
+code-creation,CallIC,7,0x2b82c3a0,129,ConfigureTemplateInstance
+code-creation,CallIC,7,0x2b82c440,129,Instantiate
+code-creation,CallIC,7,0x2b82c4e0,144,Instantiate
+code-creation,Stub,13,0x2b82c580,469,CompareICStub
+code-creation,Stub,14,0x2b82c760,144,CompareNilICStub(NullValue)(MonomorphicMap)
+code-creation,Stub,14,0x2b82c800,144,CompareNilICStub(NullValue)(MonomorphicMap)
tick,0x8132a60,20593,0,0x8141e5e,0,0x2b822c4e,0x2b82af24,0x2b81b4f0,0x2b82beff,0x2b81b59f,0x2b82beff,0x2b81b589,0x2b82b0b8,0x2b81b4f0
-code-creation,Stub,14,0x2b82c8a0,124,"CompareNilICStub(NullValue)(Undefined,Null,Undetectable,Generic)"
-code-creation,Stub,13,0x2b82c920,156,"CompareICStub"
-timer-event-start,"V8.ParseLazyMicroSeconds",20736
-timer-event-end,"V8.ParseLazyMicroSeconds",20818
-timer-event-start,"V8.CompileLazy",20838
-timer-event-start,"V8.CompileFullCode",20854
-code-creation,Stub,2,0x2b82c9c0,587,"FastCloneShallowArrayStub"
-timer-event-end,"V8.CompileFullCode",21298
-code-creation,LazyCompile,0,0x2b82cc20,812,"DefaultNumber native runtime.js:645",0x44226390,~
-timer-event-end,"V8.CompileLazy",21330
-timer-event-start,"V8.ParseLazyMicroSeconds",21352
-timer-event-end,"V8.ParseLazyMicroSeconds",21381
-timer-event-start,"V8.CompileLazy",21393
-timer-event-start,"V8.CompileFullCode",21405
-timer-event-end,"V8.CompileFullCode",21436
-code-creation,LazyCompile,0,0x2b82cf60,184,"valueOf native v8natives.js:245",0x4421ee84,~
-timer-event-end,"V8.CompileLazy",21465
-timer-event-start,"V8.ParseLazyMicroSeconds",21482
-timer-event-end,"V8.ParseLazyMicroSeconds",21544
-timer-event-start,"V8.CompileLazy",21557
-timer-event-start,"V8.CompileFullCode",21571
-timer-event-end,"V8.CompileFullCode",21651
-code-creation,LazyCompile,0,0x2b82d020,652,"ToObject native runtime.js:584",0x44226150,~
-timer-event-end,"V8.CompileLazy",21690
+code-creation,Stub,14,0x2b82c8a0,124,CompareNilICStub(NullValue)(Undefined,Null,Undetectable,Generic)
+code-creation,Stub,13,0x2b82c920,156,CompareICStub
+timer-event-start,V8.ParseLazyMicroSeconds,20736
+timer-event-end,V8.ParseLazyMicroSeconds,20818
+timer-event-start,V8.CompileLazy,20838
+timer-event-start,V8.CompileFullCode,20854
+code-creation,Stub,2,0x2b82c9c0,587,FastCloneShallowArrayStub
+timer-event-end,V8.CompileFullCode,21298
+code-creation,LazyCompile,0,0x2b82cc20,812,DefaultNumber native runtime.js:645,0x44226390,~
+timer-event-end,V8.CompileLazy,21330
+timer-event-start,V8.ParseLazyMicroSeconds,21352
+timer-event-end,V8.ParseLazyMicroSeconds,21381
+timer-event-start,V8.CompileLazy,21393
+timer-event-start,V8.CompileFullCode,21405
+timer-event-end,V8.CompileFullCode,21436
+code-creation,LazyCompile,0,0x2b82cf60,184,valueOf native v8natives.js:245,0x4421ee84,~
+timer-event-end,V8.CompileLazy,21465
+timer-event-start,V8.ParseLazyMicroSeconds,21482
+timer-event-end,V8.ParseLazyMicroSeconds,21544
+timer-event-start,V8.CompileLazy,21557
+timer-event-start,V8.CompileFullCode,21571
+timer-event-end,V8.CompileFullCode,21651
+code-creation,LazyCompile,0,0x2b82d020,652,ToObject native runtime.js:584,0x44226150,~
+timer-event-end,V8.CompileLazy,21690
tick,0x80eabe2,21708,0,0xff81f7a8,2,0x2b82cfe4,0x2b82cd79,0x2b825c84,0x2b822ca7,0x2b82af24,0x2b81b4f0,0x2b82beff,0x2b81b59f,0x2b82beff,0x2b81b589,0x2b82b0b8,0x2b81b4f0
-timer-event-start,"V8.ParseLazyMicroSeconds",21761
-timer-event-end,"V8.ParseLazyMicroSeconds",21796
-timer-event-start,"V8.CompileLazy",21808
-timer-event-start,"V8.CompileFullCode",21820
-timer-event-end,"V8.CompileFullCode",21845
-code-creation,LazyCompile,0,0x2b82d2c0,220,"IsPrimitive native runtime.js:636",0x44226330,~
-timer-event-end,"V8.CompileLazy",21873
-timer-event-start,"V8.ParseLazyMicroSeconds",21895
-timer-event-end,"V8.ParseLazyMicroSeconds",21921
-timer-event-start,"V8.CompileLazy",21932
-timer-event-start,"V8.CompileFullCode",21946
-timer-event-end,"V8.CompileFullCode",21966
-code-creation,LazyCompile,0,0x2b82d3a0,184,"toString native v8natives.js:1721",0x44220834,~
-timer-event-end,"V8.CompileLazy",21994
-timer-event-start,"V8.ParseLazyMicroSeconds",22009
-timer-event-end,"V8.ParseLazyMicroSeconds",22087
-timer-event-start,"V8.CompileLazy",22101
-timer-event-start,"V8.CompileFullCode",22116
-timer-event-end,"V8.CompileFullCode",22221
-code-creation,LazyCompile,0,0x2b82d460,681,"FunctionSourceString native v8natives.js:1693",0x442207d4,~
-timer-event-end,"V8.CompileLazy",22237
-code-creation,Stub,15,0x2b82d720,156,"ToBooleanStub(String)"
-code-creation,Stub,12,0x2b82d7c0,124,"BinaryOpStub_ADD_Alloc_String+String"
-code-creation,Stub,12,0x2b82d840,124,"BinaryOpStub_ADD_OverwriteLeft_String+String"
-code-creation,CallMiss,7,0x2b82d8c0,178,"args_count: 2"
-code-creation,CallIC,7,0x2b82d980,128,"ToPrimitive"
-code-creation,CallIC,7,0x2b82da00,128,"DefaultNumber"
-code-creation,Stub,2,0x2b82da80,116,"valueOf"
-code-creation,LoadIC,5,0x2b82db00,93,"valueOf"
-code-creation,CallIC,7,0x2b82db60,129,"ToObject"
-code-creation,CallIC,7,0x2b82dc00,128,"IsPrimitive"
-code-creation,Stub,2,0x2b82dc80,98,"toString"
-code-creation,LoadIC,5,0x2b82dd00,93,"toString"
-code-creation,CallIC,7,0x2b82dd60,129,"FunctionSourceString"
-code-creation,CallIC,7,0x2b82de00,128,"ToNumber"
-timer-event-start,"V8.ParseMicroSeconds",22650
+timer-event-start,V8.ParseLazyMicroSeconds,21761
+timer-event-end,V8.ParseLazyMicroSeconds,21796
+timer-event-start,V8.CompileLazy,21808
+timer-event-start,V8.CompileFullCode,21820
+timer-event-end,V8.CompileFullCode,21845
+code-creation,LazyCompile,0,0x2b82d2c0,220,IsPrimitive native runtime.js:636,0x44226330,~
+timer-event-end,V8.CompileLazy,21873
+timer-event-start,V8.ParseLazyMicroSeconds,21895
+timer-event-end,V8.ParseLazyMicroSeconds,21921
+timer-event-start,V8.CompileLazy,21932
+timer-event-start,V8.CompileFullCode,21946
+timer-event-end,V8.CompileFullCode,21966
+code-creation,LazyCompile,0,0x2b82d3a0,184,toString native v8natives.js:1721,0x44220834,~
+timer-event-end,V8.CompileLazy,21994
+timer-event-start,V8.ParseLazyMicroSeconds,22009
+timer-event-end,V8.ParseLazyMicroSeconds,22087
+timer-event-start,V8.CompileLazy,22101
+timer-event-start,V8.CompileFullCode,22116
+timer-event-end,V8.CompileFullCode,22221
+code-creation,LazyCompile,0,0x2b82d460,681,FunctionSourceString native v8natives.js:1693,0x442207d4,~
+timer-event-end,V8.CompileLazy,22237
+code-creation,Stub,15,0x2b82d720,156,ToBooleanStub(String)
+code-creation,Stub,12,0x2b82d7c0,124,BinaryOpStub_ADD_Alloc_String+String
+code-creation,Stub,12,0x2b82d840,124,BinaryOpStub_ADD_OverwriteLeft_String+String
+code-creation,CallMiss,7,0x2b82d8c0,178,args_count: 2
+code-creation,CallIC,7,0x2b82d980,128,ToPrimitive
+code-creation,CallIC,7,0x2b82da00,128,DefaultNumber
+code-creation,Stub,2,0x2b82da80,116,valueOf
+code-creation,LoadIC,5,0x2b82db00,93,valueOf
+code-creation,CallIC,7,0x2b82db60,129,ToObject
+code-creation,CallIC,7,0x2b82dc00,128,IsPrimitive
+code-creation,Stub,2,0x2b82dc80,98,toString
+code-creation,LoadIC,5,0x2b82dd00,93,toString
+code-creation,CallIC,7,0x2b82dd60,129,FunctionSourceString
+code-creation,CallIC,7,0x2b82de00,128,ToNumber
+timer-event-start,V8.ParseMicroSeconds,22650
tick,0xf776d430,22726,0,0x0,2
-timer-event-end,"V8.ParseMicroSeconds",22773
-timer-event-start,"V8.Compile",22785
-timer-event-start,"V8.CompileFullCode",22801
-timer-event-end,"V8.CompileFullCode",22822
-code-creation,Script,0,0x2b82de80,264,"native arraybuffer.js",0x4423ab7c,~
-timer-event-end,"V8.Compile",22836
-timer-event-start,"V8.ParseLazyMicroSeconds",22859
-timer-event-end,"V8.ParseLazyMicroSeconds",22881
-timer-event-start,"V8.CompileLazy",22887
-timer-event-start,"V8.CompileFullCode",22899
-timer-event-end,"V8.CompileFullCode",22918
-code-creation,LazyCompile,0,0x2b82dfa0,480,"SetUpArrayBuffer native arraybuffer.js:84",0x4423aac0,~
-timer-event-end,"V8.CompileLazy",22934
-timer-event-start,"V8.ParseLazyMicroSeconds",22943
-timer-event-end,"V8.ParseLazyMicroSeconds",22962
-timer-event-start,"V8.CompileLazy",22967
-timer-event-start,"V8.CompileFullCode",22972
-timer-event-end,"V8.CompileFullCode",22987
-code-creation,LazyCompile,0,0x2b82e180,324,"ArrayBufferConstructor native arraybuffer.js:34",0x4423a9a0,~
-timer-event-end,"V8.CompileLazy",23000
-code-creation,LazyCompile,0,0x2b82e180,324,"ArrayBufferConstructor native arraybuffer.js:34",0x4423a9a0,
-timer-event-start,"V8.ParseLazyMicroSeconds",23021
-timer-event-end,"V8.ParseLazyMicroSeconds",23037
-timer-event-start,"V8.CompileLazy",23042
-timer-event-start,"V8.CompileFullCode",23047
-timer-event-end,"V8.CompileFullCode",23057
-code-creation,LazyCompile,0,0x2b82e2e0,252,"InstallGetter native v8natives.js:63",0x4421e9bc,~
-timer-event-end,"V8.CompileLazy",23069
-code-creation,KeyedLoadIC,6,0x2b82e3e0,91,""
-code-creation,LoadIC,5,0x2b82e440,93,"length"
-timer-event-start,"V8.ParseMicroSeconds",23160
-timer-event-end,"V8.ParseMicroSeconds",23613
-timer-event-start,"V8.Compile",23621
-timer-event-start,"V8.CompileFullCode",23666
-timer-event-end,"V8.CompileFullCode",23702
-code-creation,Script,0,0x2b82e4a0,720,"native typedarray.js",0x4423bc04,~
-timer-event-end,"V8.Compile",23724
-timer-event-start,"V8.ParseLazyMicroSeconds",23755
+timer-event-end,V8.ParseMicroSeconds,22773
+timer-event-start,V8.Compile,22785
+timer-event-start,V8.CompileFullCode,22801
+timer-event-end,V8.CompileFullCode,22822
+code-creation,Script,0,0x2b82de80,264,native arraybuffer.js,0x4423ab7c,~
+timer-event-end,V8.Compile,22836
+timer-event-start,V8.ParseLazyMicroSeconds,22859
+timer-event-end,V8.ParseLazyMicroSeconds,22881
+timer-event-start,V8.CompileLazy,22887
+timer-event-start,V8.CompileFullCode,22899
+timer-event-end,V8.CompileFullCode,22918
+code-creation,LazyCompile,0,0x2b82dfa0,480,SetUpArrayBuffer native arraybuffer.js:84,0x4423aac0,~
+timer-event-end,V8.CompileLazy,22934
+timer-event-start,V8.ParseLazyMicroSeconds,22943
+timer-event-end,V8.ParseLazyMicroSeconds,22962
+timer-event-start,V8.CompileLazy,22967
+timer-event-start,V8.CompileFullCode,22972
+timer-event-end,V8.CompileFullCode,22987
+code-creation,LazyCompile,0,0x2b82e180,324,ArrayBufferConstructor native arraybuffer.js:34,0x4423a9a0,~
+timer-event-end,V8.CompileLazy,23000
+code-creation,LazyCompile,0,0x2b82e180,324,ArrayBufferConstructor native arraybuffer.js:34,0x4423a9a0,
+timer-event-start,V8.ParseLazyMicroSeconds,23021
+timer-event-end,V8.ParseLazyMicroSeconds,23037
+timer-event-start,V8.CompileLazy,23042
+timer-event-start,V8.CompileFullCode,23047
+timer-event-end,V8.CompileFullCode,23057
+code-creation,LazyCompile,0,0x2b82e2e0,252,InstallGetter native v8natives.js:63,0x4421e9bc,~
+timer-event-end,V8.CompileLazy,23069
+code-creation,KeyedLoadIC,6,0x2b82e3e0,91,
+code-creation,LoadIC,5,0x2b82e440,93,length
+timer-event-start,V8.ParseMicroSeconds,23160
+timer-event-end,V8.ParseMicroSeconds,23613
+timer-event-start,V8.Compile,23621
+timer-event-start,V8.CompileFullCode,23666
+timer-event-end,V8.CompileFullCode,23702
+code-creation,Script,0,0x2b82e4a0,720,native typedarray.js,0x4423bc04,~
+timer-event-end,V8.Compile,23724
+timer-event-start,V8.ParseLazyMicroSeconds,23755
tick,0xf776d430,23782,0,0x0,2
-timer-event-end,"V8.ParseLazyMicroSeconds",23867
-timer-event-start,"V8.CompileLazy",23905
-timer-event-start,"V8.CompileFullCode",23916
-timer-event-end,"V8.CompileFullCode",23939
-code-creation,LazyCompile,0,0x2b82e780,664,"SetupTypedArray native typedarray.js:170",0x4423b238,~
-timer-event-end,"V8.CompileLazy",23971
-timer-event-start,"V8.ParseLazyMicroSeconds",23979
-timer-event-end,"V8.ParseLazyMicroSeconds",24064
-timer-event-start,"V8.CompileLazy",24071
-timer-event-start,"V8.CompileFullCode",24085
-code-creation,Stub,2,0x2b82ea20,175,"FastNewContextStub"
-code-creation,Stub,2,0x2b82eae0,304,"FastNewClosureStub"
-code-creation,Stub,2,0x2b82ec20,1448,"RecordWriteStub"
-timer-event-end,"V8.CompileFullCode",24149
-code-creation,LazyCompile,0,0x2b82f1e0,460,"CreateTypedArrayConstructor native typedarray.js:38",0x4423af98,~
-timer-event-end,"V8.CompileLazy",24163
-timer-event-start,"V8.ParseLazyMicroSeconds",24170
-timer-event-end,"V8.ParseLazyMicroSeconds",24198
-timer-event-start,"V8.CompileLazy",24203
-timer-event-start,"V8.CompileFullCode",24211
-code-creation,Stub,2,0x2b82f3c0,331,"CallFunctionStub_Args4_Recording"
-code-creation,Stub,2,0x2b82f520,631,"FastCloneShallowArrayStub"
-code-creation,Stub,2,0x2b82f7a0,245,"StoreArrayLiteralElementStub"
-timer-event-end,"V8.CompileFullCode",24435
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,~
-timer-event-end,"V8.CompileLazy",24448
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-timer-event-start,"V8.ParseLazyMicroSeconds",24478
-timer-event-end,"V8.ParseLazyMicroSeconds",24519
-timer-event-start,"V8.CompileLazy",24525
-timer-event-start,"V8.CompileFullCode",24533
-timer-event-end,"V8.CompileFullCode",24546
-code-creation,LazyCompile,0,0x2b82fbe0,268,"CreateSubArray native typedarray.js:121",0x4423b178,~
-timer-event-end,"V8.CompileLazy",24559
-code-creation,CallMiss,7,0x2b82fd00,180,"args_count: 4"
-code-creation,CallIC,7,0x2b82fdc0,129,"CreateTypedArrayConstructor"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b82fe60,103,"$Object"
-code-creation,LoadIC,5,0x2b82fee0,103,"TypedArrayGetBuffer"
-code-creation,CallMiss,7,0x2b82ff60,180,"args_count: 3"
-code-creation,CallIC,7,0x2b830020,129,"InstallGetter"
-code-creation,LoadIC,5,0x2b8300c0,103,"TypedArrayGetByteOffset"
-code-creation,LoadIC,5,0x2b830140,103,"TypedArrayGetByteLength"
-code-creation,LoadIC,5,0x2b8301c0,103,"TypedArrayGetLength"
-code-creation,CallIC,7,0x2b830240,129,"CreateSubArray"
-code-creation,LoadIC,5,0x2b8302e0,103,"TypedArraySet"
-code-creation,CallIC,7,0x2b830360,133,"$Array"
-code-creation,CallIC,7,0x2b830400,129,"InstallFunctions"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
+timer-event-end,V8.ParseLazyMicroSeconds,23867
+timer-event-start,V8.CompileLazy,23905
+timer-event-start,V8.CompileFullCode,23916
+timer-event-end,V8.CompileFullCode,23939
+code-creation,LazyCompile,0,0x2b82e780,664,SetupTypedArray native typedarray.js:170,0x4423b238,~
+timer-event-end,V8.CompileLazy,23971
+timer-event-start,V8.ParseLazyMicroSeconds,23979
+timer-event-end,V8.ParseLazyMicroSeconds,24064
+timer-event-start,V8.CompileLazy,24071
+timer-event-start,V8.CompileFullCode,24085
+code-creation,Stub,2,0x2b82ea20,175,FastNewContextStub
+code-creation,Stub,2,0x2b82eae0,304,FastNewClosureStub
+code-creation,Stub,2,0x2b82ec20,1448,RecordWriteStub
+timer-event-end,V8.CompileFullCode,24149
+code-creation,LazyCompile,0,0x2b82f1e0,460,CreateTypedArrayConstructor native typedarray.js:38,0x4423af98,~
+timer-event-end,V8.CompileLazy,24163
+timer-event-start,V8.ParseLazyMicroSeconds,24170
+timer-event-end,V8.ParseLazyMicroSeconds,24198
+timer-event-start,V8.CompileLazy,24203
+timer-event-start,V8.CompileFullCode,24211
+code-creation,Stub,2,0x2b82f3c0,331,CallFunctionStub_Args4_Recording
+code-creation,Stub,2,0x2b82f520,631,FastCloneShallowArrayStub
+code-creation,Stub,2,0x2b82f7a0,245,StoreArrayLiteralElementStub
+timer-event-end,V8.CompileFullCode,24435
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,~
+timer-event-end,V8.CompileLazy,24448
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+timer-event-start,V8.ParseLazyMicroSeconds,24478
+timer-event-end,V8.ParseLazyMicroSeconds,24519
+timer-event-start,V8.CompileLazy,24525
+timer-event-start,V8.CompileFullCode,24533
+timer-event-end,V8.CompileFullCode,24546
+code-creation,LazyCompile,0,0x2b82fbe0,268,CreateSubArray native typedarray.js:121,0x4423b178,~
+timer-event-end,V8.CompileLazy,24559
+code-creation,CallMiss,7,0x2b82fd00,180,args_count: 4
+code-creation,CallIC,7,0x2b82fdc0,129,CreateTypedArrayConstructor
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b82fe60,103,$Object
+code-creation,LoadIC,5,0x2b82fee0,103,TypedArrayGetBuffer
+code-creation,CallMiss,7,0x2b82ff60,180,args_count: 3
+code-creation,CallIC,7,0x2b830020,129,InstallGetter
+code-creation,LoadIC,5,0x2b8300c0,103,TypedArrayGetByteOffset
+code-creation,LoadIC,5,0x2b830140,103,TypedArrayGetByteLength
+code-creation,LoadIC,5,0x2b8301c0,103,TypedArrayGetLength
+code-creation,CallIC,7,0x2b830240,129,CreateSubArray
+code-creation,LoadIC,5,0x2b8302e0,103,TypedArraySet
+code-creation,CallIC,7,0x2b830360,133,$Array
+code-creation,CallIC,7,0x2b830400,129,InstallFunctions
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
tick,0xf7492ece,24846,0,0xff81ff10,0,0x2b82e839,0x2b82e5f9
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-timer-event-start,"V8.ParseLazyMicroSeconds",25032
-timer-event-end,"V8.ParseLazyMicroSeconds",25074
-timer-event-start,"V8.CompileLazy",25081
-timer-event-start,"V8.CompileFullCode",25093
-timer-event-end,"V8.CompileFullCode",25115
-code-creation,LazyCompile,0,0x2b8304a0,888,"SetupDataView native typedarray.js:434",0x4423ba78,~
-timer-event-end,"V8.CompileLazy",25128
-timer-event-start,"V8.ParseLazyMicroSeconds",25136
-timer-event-end,"V8.ParseLazyMicroSeconds",25175
-timer-event-start,"V8.CompileLazy",25181
-timer-event-start,"V8.CompileFullCode",25188
-code-creation,Stub,12,0x2b830820,88,"BinaryOpStub_SUB_Alloc_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",25228
-code-creation,LazyCompile,0,0x2b830880,908,"DataViewConstructor native typedarray.js:209",0x4423b298,~
-timer-event-end,"V8.CompileLazy",25241
-code-creation,LazyCompile,0,0x2b830880,908,"DataViewConstructor native typedarray.js:209",0x4423b298,
-code-creation,KeyedStorePolymorphicIC,10,0x2b830c20,101,""
-code-creation,KeyedStorePolymorphicIC,10,0x2b830c20,101,"args_count: 0"
-code-creation,CallIC,7,0x2b830ca0,144,"Instantiate"
-code-creation,CallIC,7,0x2b830d40,129,"InstantiateFunction"
-code-creation,LoadIC,5,0x2b830de0,103,"kApiFunctionCache"
-code-creation,KeyedLoadPolymorphicIC,6,0x2b830e60,105,""
-code-creation,CallIC,7,0x2b830ee0,129,"ConfigureTemplateInstance"
-code-creation,CallIC,7,0x2b830f80,129,"Instantiate"
-code-creation,Stub,2,0x2b831020,116,"valueOf"
-code-creation,LoadPolymorphicIC,5,0x2b8310a0,105,"valueOf"
-code-creation,Stub,2,0x2b831120,98,"toString"
-code-creation,LoadPolymorphicIC,5,0x2b8311a0,105,"toString"
-code-creation,CallIC,7,0x2b831220,128,"ToPrimitive"
-code-creation,CallIC,7,0x2b8312a0,128,"DefaultNumber"
-code-creation,CallIC,7,0x2b831320,129,"ToObject"
-code-creation,CallIC,7,0x2b8313c0,128,"IsPrimitive"
-code-creation,CallIC,7,0x2b831440,129,"FunctionSourceString"
-code-creation,CallIC,7,0x2b8314e0,128,"ToNumber"
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+timer-event-start,V8.ParseLazyMicroSeconds,25032
+timer-event-end,V8.ParseLazyMicroSeconds,25074
+timer-event-start,V8.CompileLazy,25081
+timer-event-start,V8.CompileFullCode,25093
+timer-event-end,V8.CompileFullCode,25115
+code-creation,LazyCompile,0,0x2b8304a0,888,SetupDataView native typedarray.js:434,0x4423ba78,~
+timer-event-end,V8.CompileLazy,25128
+timer-event-start,V8.ParseLazyMicroSeconds,25136
+timer-event-end,V8.ParseLazyMicroSeconds,25175
+timer-event-start,V8.CompileLazy,25181
+timer-event-start,V8.CompileFullCode,25188
+code-creation,Stub,12,0x2b830820,88,BinaryOpStub_SUB_Alloc_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,25228
+code-creation,LazyCompile,0,0x2b830880,908,DataViewConstructor native typedarray.js:209,0x4423b298,~
+timer-event-end,V8.CompileLazy,25241
+code-creation,LazyCompile,0,0x2b830880,908,DataViewConstructor native typedarray.js:209,0x4423b298,
+code-creation,KeyedStorePolymorphicIC,10,0x2b830c20,101,
+code-creation,KeyedStorePolymorphicIC,10,0x2b830c20,101,args_count: 0
+code-creation,CallIC,7,0x2b830ca0,144,Instantiate
+code-creation,CallIC,7,0x2b830d40,129,InstantiateFunction
+code-creation,LoadIC,5,0x2b830de0,103,kApiFunctionCache
+code-creation,KeyedLoadPolymorphicIC,6,0x2b830e60,105,
+code-creation,CallIC,7,0x2b830ee0,129,ConfigureTemplateInstance
+code-creation,CallIC,7,0x2b830f80,129,Instantiate
+code-creation,Stub,2,0x2b831020,116,valueOf
+code-creation,LoadPolymorphicIC,5,0x2b8310a0,105,valueOf
+code-creation,Stub,2,0x2b831120,98,toString
+code-creation,LoadPolymorphicIC,5,0x2b8311a0,105,toString
+code-creation,CallIC,7,0x2b831220,128,ToPrimitive
+code-creation,CallIC,7,0x2b8312a0,128,DefaultNumber
+code-creation,CallIC,7,0x2b831320,129,ToObject
+code-creation,CallIC,7,0x2b8313c0,128,IsPrimitive
+code-creation,CallIC,7,0x2b831440,129,FunctionSourceString
+code-creation,CallIC,7,0x2b8314e0,128,ToNumber
tick,0xf776d430,25914,0,0x90ec418,0,0x2b82cda7,0x2b825c84,0x2b822ca7,0x2b82af24,0x2b81b4f0,0x2b82beff,0x2b81b59f,0x2b82beff,0x2b81b589,0x2b82b0b8,0x2b81b4f0
-timer-event-start,"V8.ParseLazyMicroSeconds",25965
-timer-event-end,"V8.ParseLazyMicroSeconds",25985
-timer-event-start,"V8.CompileLazy",25991
-timer-event-start,"V8.RecompileSynchronous",25996
-code-creation,LazyCompile,0,0x2b831560,184,"valueOf native v8natives.js:245",0x4421ee84,~
-timer-event-end,"V8.RecompileSynchronous",26121
-code-creation,LazyCompile,1,0x2b831620,180,"valueOf native v8natives.js:245",0x4421ee84,*
-timer-event-end,"V8.CompileLazy",26138
-timer-event-start,"V8.ParseLazyMicroSeconds",26144
-timer-event-end,"V8.ParseLazyMicroSeconds",26156
-timer-event-start,"V8.CompileLazy",26161
-timer-event-start,"V8.RecompileSynchronous",26166
-code-creation,LazyCompile,0,0x2b8316e0,220,"IsPrimitive native runtime.js:636",0x44226330,~
-timer-event-end,"V8.RecompileSynchronous",26250
-code-creation,LazyCompile,1,0x2b8317c0,170,"IsPrimitive native runtime.js:636",0x44226330,*
-timer-event-end,"V8.CompileLazy",26266
-timer-event-start,"V8.ParseLazyMicroSeconds",26271
-timer-event-end,"V8.ParseLazyMicroSeconds",26282
-timer-event-start,"V8.CompileLazy",26286
-timer-event-start,"V8.RecompileSynchronous",26291
-code-creation,LazyCompile,0,0x2b831880,184,"toString native v8natives.js:1721",0x44220834,~
-timer-event-end,"V8.RecompileSynchronous",26361
-code-creation,LazyCompile,1,0x2b831940,180,"toString native v8natives.js:1721",0x44220834,*
-timer-event-end,"V8.CompileLazy",26376
-code-creation,LoadIC,5,0x2b831a00,103,"global"
-code-creation,LoadIC,5,0x2b831a80,114,"ArrayBuffer"
-code-creation,CallMiss,7,0x2b831b00,180,"args_count: 0"
-code-creation,CallIC,7,0x2b831bc0,129,"SetUpArrayBuffer"
-code-creation,LoadIC,5,0x2b831c60,103,"$ArrayBuffer"
-code-creation,LoadIC,5,0x2b831ce0,103,"ArrayBufferConstructor"
-code-creation,LazyCompile,0,0x2b82e180,324,"ArrayBufferConstructor native arraybuffer.js:34",0x4423a9a0,
-code-creation,LoadIC,5,0x2b831d60,103,"$Object"
-code-creation,LoadIC,5,0x2b831de0,103,"ArrayBufferGetByteLength"
-code-creation,CallIC,7,0x2b831e60,129,"InstallGetter"
-code-creation,LoadIC,5,0x2b831f00,103,"ArrayBufferSlice"
-code-creation,CallIC,7,0x2b831f80,133,"$Array"
-code-creation,CallIC,7,0x2b832020,129,"InstallFunctions"
-code-creation,LoadPolymorphicIC,5,0x2b8320c0,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b832140,105,"length"
-code-creation,KeyedLoadPolymorphicIC,6,0x2b8321c0,105,""
-code-creation,LoadIC,5,0x2b832240,114,"Uint8Array"
-code-creation,CallIC,7,0x2b8322c0,129,"SetupTypedArray"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b832360,103,"$Object"
-code-creation,LoadIC,5,0x2b8323e0,114,"Int8Array"
-code-creation,CallIC,7,0x2b832460,129,"CreateTypedArrayConstructor"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b832500,103,"TypedArrayGetBuffer"
-code-creation,LoadIC,5,0x2b832580,103,"TypedArrayGetByteOffset"
-code-creation,LoadIC,5,0x2b832600,103,"TypedArrayGetByteLength"
-code-creation,LoadIC,5,0x2b832680,103,"TypedArrayGetLength"
-code-creation,CallIC,7,0x2b832700,129,"CreateSubArray"
-code-creation,LoadIC,5,0x2b8327a0,103,"TypedArraySet"
-code-creation,CallIC,7,0x2b832820,133,"$Array"
-code-creation,LoadIC,5,0x2b8328c0,114,"Uint16Array"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
+timer-event-start,V8.ParseLazyMicroSeconds,25965
+timer-event-end,V8.ParseLazyMicroSeconds,25985
+timer-event-start,V8.CompileLazy,25991
+timer-event-start,V8.RecompileSynchronous,25996
+code-creation,LazyCompile,0,0x2b831560,184,valueOf native v8natives.js:245,0x4421ee84,~
+timer-event-end,V8.RecompileSynchronous,26121
+code-creation,LazyCompile,1,0x2b831620,180,valueOf native v8natives.js:245,0x4421ee84,*
+timer-event-end,V8.CompileLazy,26138
+timer-event-start,V8.ParseLazyMicroSeconds,26144
+timer-event-end,V8.ParseLazyMicroSeconds,26156
+timer-event-start,V8.CompileLazy,26161
+timer-event-start,V8.RecompileSynchronous,26166
+code-creation,LazyCompile,0,0x2b8316e0,220,IsPrimitive native runtime.js:636,0x44226330,~
+timer-event-end,V8.RecompileSynchronous,26250
+code-creation,LazyCompile,1,0x2b8317c0,170,IsPrimitive native runtime.js:636,0x44226330,*
+timer-event-end,V8.CompileLazy,26266
+timer-event-start,V8.ParseLazyMicroSeconds,26271
+timer-event-end,V8.ParseLazyMicroSeconds,26282
+timer-event-start,V8.CompileLazy,26286
+timer-event-start,V8.RecompileSynchronous,26291
+code-creation,LazyCompile,0,0x2b831880,184,toString native v8natives.js:1721,0x44220834,~
+timer-event-end,V8.RecompileSynchronous,26361
+code-creation,LazyCompile,1,0x2b831940,180,toString native v8natives.js:1721,0x44220834,*
+timer-event-end,V8.CompileLazy,26376
+code-creation,LoadIC,5,0x2b831a00,103,global
+code-creation,LoadIC,5,0x2b831a80,114,ArrayBuffer
+code-creation,CallMiss,7,0x2b831b00,180,args_count: 0
+code-creation,CallIC,7,0x2b831bc0,129,SetUpArrayBuffer
+code-creation,LoadIC,5,0x2b831c60,103,$ArrayBuffer
+code-creation,LoadIC,5,0x2b831ce0,103,ArrayBufferConstructor
+code-creation,LazyCompile,0,0x2b82e180,324,ArrayBufferConstructor native arraybuffer.js:34,0x4423a9a0,
+code-creation,LoadIC,5,0x2b831d60,103,$Object
+code-creation,LoadIC,5,0x2b831de0,103,ArrayBufferGetByteLength
+code-creation,CallIC,7,0x2b831e60,129,InstallGetter
+code-creation,LoadIC,5,0x2b831f00,103,ArrayBufferSlice
+code-creation,CallIC,7,0x2b831f80,133,$Array
+code-creation,CallIC,7,0x2b832020,129,InstallFunctions
+code-creation,LoadPolymorphicIC,5,0x2b8320c0,105,length
+code-creation,LoadPolymorphicIC,5,0x2b832140,105,length
+code-creation,KeyedLoadPolymorphicIC,6,0x2b8321c0,105,
+code-creation,LoadIC,5,0x2b832240,114,Uint8Array
+code-creation,CallIC,7,0x2b8322c0,129,SetupTypedArray
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b832360,103,$Object
+code-creation,LoadIC,5,0x2b8323e0,114,Int8Array
+code-creation,CallIC,7,0x2b832460,129,CreateTypedArrayConstructor
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b832500,103,TypedArrayGetBuffer
+code-creation,LoadIC,5,0x2b832580,103,TypedArrayGetByteOffset
+code-creation,LoadIC,5,0x2b832600,103,TypedArrayGetByteLength
+code-creation,LoadIC,5,0x2b832680,103,TypedArrayGetLength
+code-creation,CallIC,7,0x2b832700,129,CreateSubArray
+code-creation,LoadIC,5,0x2b8327a0,103,TypedArraySet
+code-creation,CallIC,7,0x2b832820,133,$Array
+code-creation,LoadIC,5,0x2b8328c0,114,Uint16Array
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
tick,0xf776d430,26979,0,0x90ec418,0,0x2b82e9b7,0x2b82e593
-code-creation,LoadIC,5,0x2b832940,114,"Int16Array"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b8329c0,114,"Uint32Array"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b832a40,114,"Int32Array"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b832ac0,114,"Float32Array"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b832b40,114,"Float64Array"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b832bc0,114,"Uint8ClampedArray"
-code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-code-creation,LoadIC,5,0x2b832c40,114,"DataView"
-code-creation,CallIC,7,0x2b832cc0,129,"SetupDataView"
-code-creation,LoadIC,5,0x2b832d60,103,"$DataView"
-code-creation,LoadIC,5,0x2b832de0,103,"DataViewConstructor"
-code-creation,LazyCompile,0,0x2b830880,908,"DataViewConstructor native typedarray.js:209",0x4423b298,
-code-creation,LoadIC,5,0x2b832e60,103,"DataViewGetBuffer"
-code-creation,LoadIC,5,0x2b832ee0,103,"DataViewGetByteOffset"
-code-creation,LoadIC,5,0x2b832f60,103,"DataViewGetByteLength"
-code-creation,LoadIC,5,0x2b832fe0,103,"DataViewGetInt8"
-code-creation,LoadIC,5,0x2b833060,103,"DataViewSetInt8"
-code-creation,LoadIC,5,0x2b8330e0,103,"DataViewGetUint8"
-code-creation,LoadIC,5,0x2b833160,103,"DataViewSetUint8"
-code-creation,LoadIC,5,0x2b8331e0,103,"DataViewGetInt16"
-code-creation,LoadIC,5,0x2b833260,103,"DataViewSetInt16"
-code-creation,LoadIC,5,0x2b8332e0,103,"DataViewGetUint16"
-code-creation,LoadIC,5,0x2b833360,103,"DataViewSetUint16"
-code-creation,LoadIC,5,0x2b8333e0,103,"DataViewGetInt32"
-code-creation,LoadIC,5,0x2b833460,103,"DataViewSetInt32"
-code-creation,LoadIC,5,0x2b8334e0,103,"DataViewGetUint32"
-code-creation,LoadIC,5,0x2b833560,103,"DataViewSetUint32"
-code-creation,LoadIC,5,0x2b8335e0,103,"DataViewGetFloat32"
-code-creation,LoadIC,5,0x2b833660,103,"DataViewSetFloat32"
-code-creation,LoadIC,5,0x2b8336e0,103,"DataViewGetFloat64"
-code-creation,LoadIC,5,0x2b833760,103,"DataViewSetFloat64"
-code-creation,CallMiss,7,0x2b8337e0,189,"args_count: 32"
-code-creation,CallIC,7,0x2b8338a0,136,"$Array"
-code-creation,LoadIC,5,0x2b833940,93,"length"
-timer-event-start,"V8.ParseMicroSeconds",28734
-timer-event-start,"V8.PreParseMicroSeconds",28760
-timer-event-end,"V8.PreParseMicroSeconds",28785
-timer-event-start,"V8.PreParseMicroSeconds",28796
-timer-event-end,"V8.PreParseMicroSeconds",28803
-timer-event-start,"V8.PreParseMicroSeconds",28810
-timer-event-end,"V8.PreParseMicroSeconds",28817
-timer-event-start,"V8.PreParseMicroSeconds",28828
-timer-event-end,"V8.PreParseMicroSeconds",28862
-timer-event-start,"V8.PreParseMicroSeconds",28872
-timer-event-end,"V8.PreParseMicroSeconds",28878
-timer-event-start,"V8.PreParseMicroSeconds",28884
-timer-event-end,"V8.PreParseMicroSeconds",28890
-timer-event-start,"V8.PreParseMicroSeconds",28905
-timer-event-end,"V8.PreParseMicroSeconds",28931
-timer-event-start,"V8.PreParseMicroSeconds",28938
-timer-event-end,"V8.PreParseMicroSeconds",28970
-timer-event-start,"V8.PreParseMicroSeconds",28980
-timer-event-end,"V8.PreParseMicroSeconds",28989
-timer-event-start,"V8.PreParseMicroSeconds",28995
-timer-event-end,"V8.PreParseMicroSeconds",29005
-timer-event-start,"V8.PreParseMicroSeconds",29012
-timer-event-end,"V8.PreParseMicroSeconds",29019
-timer-event-start,"V8.PreParseMicroSeconds",29026
-timer-event-end,"V8.PreParseMicroSeconds",29045
-timer-event-start,"V8.PreParseMicroSeconds",29052
-timer-event-end,"V8.PreParseMicroSeconds",29059
-timer-event-start,"V8.PreParseMicroSeconds",29066
-timer-event-end,"V8.PreParseMicroSeconds",29072
-timer-event-start,"V8.PreParseMicroSeconds",29078
-timer-event-end,"V8.PreParseMicroSeconds",29087
+code-creation,LoadIC,5,0x2b832940,114,Int16Array
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b8329c0,114,Uint32Array
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b832a40,114,Int32Array
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b832ac0,114,Float32Array
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b832b40,114,Float64Array
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b832bc0,114,Uint8ClampedArray
+code-creation,LazyCompile,0,0x2b82f8a0,824, native typedarray.js:88,0x4423c580,
+code-creation,LoadIC,5,0x2b832c40,114,DataView
+code-creation,CallIC,7,0x2b832cc0,129,SetupDataView
+code-creation,LoadIC,5,0x2b832d60,103,$DataView
+code-creation,LoadIC,5,0x2b832de0,103,DataViewConstructor
+code-creation,LazyCompile,0,0x2b830880,908,DataViewConstructor native typedarray.js:209,0x4423b298,
+code-creation,LoadIC,5,0x2b832e60,103,DataViewGetBuffer
+code-creation,LoadIC,5,0x2b832ee0,103,DataViewGetByteOffset
+code-creation,LoadIC,5,0x2b832f60,103,DataViewGetByteLength
+code-creation,LoadIC,5,0x2b832fe0,103,DataViewGetInt8
+code-creation,LoadIC,5,0x2b833060,103,DataViewSetInt8
+code-creation,LoadIC,5,0x2b8330e0,103,DataViewGetUint8
+code-creation,LoadIC,5,0x2b833160,103,DataViewSetUint8
+code-creation,LoadIC,5,0x2b8331e0,103,DataViewGetInt16
+code-creation,LoadIC,5,0x2b833260,103,DataViewSetInt16
+code-creation,LoadIC,5,0x2b8332e0,103,DataViewGetUint16
+code-creation,LoadIC,5,0x2b833360,103,DataViewSetUint16
+code-creation,LoadIC,5,0x2b8333e0,103,DataViewGetInt32
+code-creation,LoadIC,5,0x2b833460,103,DataViewSetInt32
+code-creation,LoadIC,5,0x2b8334e0,103,DataViewGetUint32
+code-creation,LoadIC,5,0x2b833560,103,DataViewSetUint32
+code-creation,LoadIC,5,0x2b8335e0,103,DataViewGetFloat32
+code-creation,LoadIC,5,0x2b833660,103,DataViewSetFloat32
+code-creation,LoadIC,5,0x2b8336e0,103,DataViewGetFloat64
+code-creation,LoadIC,5,0x2b833760,103,DataViewSetFloat64
+code-creation,CallMiss,7,0x2b8337e0,189,args_count: 32
+code-creation,CallIC,7,0x2b8338a0,136,$Array
+code-creation,LoadIC,5,0x2b833940,93,length
+timer-event-start,V8.ParseMicroSeconds,28734
+timer-event-start,V8.PreParseMicroSeconds,28760
+timer-event-end,V8.PreParseMicroSeconds,28785
+timer-event-start,V8.PreParseMicroSeconds,28796
+timer-event-end,V8.PreParseMicroSeconds,28803
+timer-event-start,V8.PreParseMicroSeconds,28810
+timer-event-end,V8.PreParseMicroSeconds,28817
+timer-event-start,V8.PreParseMicroSeconds,28828
+timer-event-end,V8.PreParseMicroSeconds,28862
+timer-event-start,V8.PreParseMicroSeconds,28872
+timer-event-end,V8.PreParseMicroSeconds,28878
+timer-event-start,V8.PreParseMicroSeconds,28884
+timer-event-end,V8.PreParseMicroSeconds,28890
+timer-event-start,V8.PreParseMicroSeconds,28905
+timer-event-end,V8.PreParseMicroSeconds,28931
+timer-event-start,V8.PreParseMicroSeconds,28938
+timer-event-end,V8.PreParseMicroSeconds,28970
+timer-event-start,V8.PreParseMicroSeconds,28980
+timer-event-end,V8.PreParseMicroSeconds,28989
+timer-event-start,V8.PreParseMicroSeconds,28995
+timer-event-end,V8.PreParseMicroSeconds,29005
+timer-event-start,V8.PreParseMicroSeconds,29012
+timer-event-end,V8.PreParseMicroSeconds,29019
+timer-event-start,V8.PreParseMicroSeconds,29026
+timer-event-end,V8.PreParseMicroSeconds,29045
+timer-event-start,V8.PreParseMicroSeconds,29052
+timer-event-end,V8.PreParseMicroSeconds,29059
+timer-event-start,V8.PreParseMicroSeconds,29066
+timer-event-end,V8.PreParseMicroSeconds,29072
+timer-event-start,V8.PreParseMicroSeconds,29078
+timer-event-end,V8.PreParseMicroSeconds,29087
tick,0xf776d430,29099,0,0x0,2
-timer-event-start,"V8.PreParseMicroSeconds",29187
-timer-event-end,"V8.PreParseMicroSeconds",29241
-timer-event-start,"V8.PreParseMicroSeconds",29253
-timer-event-end,"V8.PreParseMicroSeconds",29261
-timer-event-start,"V8.PreParseMicroSeconds",29274
-timer-event-end,"V8.PreParseMicroSeconds",29286
-timer-event-start,"V8.PreParseMicroSeconds",29293
-timer-event-end,"V8.PreParseMicroSeconds",29305
-timer-event-start,"V8.PreParseMicroSeconds",29314
-timer-event-end,"V8.PreParseMicroSeconds",29324
-timer-event-start,"V8.PreParseMicroSeconds",29331
-timer-event-end,"V8.PreParseMicroSeconds",29344
-timer-event-start,"V8.PreParseMicroSeconds",29355
-timer-event-end,"V8.PreParseMicroSeconds",29369
-timer-event-start,"V8.PreParseMicroSeconds",29375
-timer-event-end,"V8.PreParseMicroSeconds",29391
-timer-event-start,"V8.PreParseMicroSeconds",29400
-timer-event-end,"V8.PreParseMicroSeconds",29408
-timer-event-start,"V8.PreParseMicroSeconds",29416
-timer-event-end,"V8.PreParseMicroSeconds",29422
-timer-event-start,"V8.PreParseMicroSeconds",29435
-timer-event-end,"V8.PreParseMicroSeconds",29442
-timer-event-start,"V8.PreParseMicroSeconds",29448
-timer-event-end,"V8.PreParseMicroSeconds",29461
-timer-event-start,"V8.PreParseMicroSeconds",29467
-timer-event-end,"V8.PreParseMicroSeconds",29480
-timer-event-start,"V8.PreParseMicroSeconds",29489
-timer-event-end,"V8.PreParseMicroSeconds",29508
-timer-event-start,"V8.PreParseMicroSeconds",29516
-timer-event-end,"V8.PreParseMicroSeconds",29547
-timer-event-start,"V8.PreParseMicroSeconds",29561
-timer-event-end,"V8.PreParseMicroSeconds",29579
-timer-event-start,"V8.PreParseMicroSeconds",29587
-timer-event-end,"V8.PreParseMicroSeconds",29605
-timer-event-start,"V8.PreParseMicroSeconds",29613
-timer-event-end,"V8.PreParseMicroSeconds",29639
-timer-event-start,"V8.PreParseMicroSeconds",29646
-timer-event-end,"V8.PreParseMicroSeconds",29667
-timer-event-start,"V8.PreParseMicroSeconds",29677
-timer-event-end,"V8.PreParseMicroSeconds",29702
-timer-event-start,"V8.PreParseMicroSeconds",29709
-timer-event-end,"V8.PreParseMicroSeconds",29735
-timer-event-start,"V8.PreParseMicroSeconds",29741
-timer-event-end,"V8.PreParseMicroSeconds",29758
-timer-event-start,"V8.PreParseMicroSeconds",29764
-timer-event-end,"V8.PreParseMicroSeconds",29773
-timer-event-start,"V8.PreParseMicroSeconds",29781
-timer-event-end,"V8.PreParseMicroSeconds",29796
-timer-event-start,"V8.PreParseMicroSeconds",29805
-timer-event-end,"V8.PreParseMicroSeconds",29813
-timer-event-start,"V8.PreParseMicroSeconds",29821
-timer-event-end,"V8.PreParseMicroSeconds",29839
-timer-event-start,"V8.PreParseMicroSeconds",29847
-timer-event-end,"V8.PreParseMicroSeconds",29861
-timer-event-start,"V8.PreParseMicroSeconds",29868
-timer-event-end,"V8.PreParseMicroSeconds",29873
-timer-event-start,"V8.PreParseMicroSeconds",29880
-timer-event-end,"V8.PreParseMicroSeconds",29908
-timer-event-start,"V8.PreParseMicroSeconds",29914
-timer-event-end,"V8.PreParseMicroSeconds",29923
-timer-event-start,"V8.PreParseMicroSeconds",29930
-timer-event-end,"V8.PreParseMicroSeconds",29937
-timer-event-start,"V8.PreParseMicroSeconds",29944
-timer-event-end,"V8.PreParseMicroSeconds",29955
-timer-event-start,"V8.PreParseMicroSeconds",29960
-timer-event-end,"V8.PreParseMicroSeconds",29970
-timer-event-start,"V8.PreParseMicroSeconds",29977
-timer-event-end,"V8.PreParseMicroSeconds",29982
-timer-event-start,"V8.PreParseMicroSeconds",29989
-timer-event-end,"V8.PreParseMicroSeconds",29999
-timer-event-start,"V8.PreParseMicroSeconds",30031
-timer-event-end,"V8.PreParseMicroSeconds",30041
-timer-event-start,"V8.PreParseMicroSeconds",30047
-timer-event-end,"V8.PreParseMicroSeconds",30054
-timer-event-start,"V8.PreParseMicroSeconds",30060
-timer-event-end,"V8.PreParseMicroSeconds",30069
-timer-event-start,"V8.PreParseMicroSeconds",30080
-timer-event-end,"V8.PreParseMicroSeconds",30106
-timer-event-start,"V8.PreParseMicroSeconds",30113
-timer-event-end,"V8.PreParseMicroSeconds",30121
-timer-event-start,"V8.PreParseMicroSeconds",30127
-timer-event-end,"V8.PreParseMicroSeconds",30133
-timer-event-start,"V8.PreParseMicroSeconds",30139
-timer-event-end,"V8.PreParseMicroSeconds",30148
+timer-event-start,V8.PreParseMicroSeconds,29187
+timer-event-end,V8.PreParseMicroSeconds,29241
+timer-event-start,V8.PreParseMicroSeconds,29253
+timer-event-end,V8.PreParseMicroSeconds,29261
+timer-event-start,V8.PreParseMicroSeconds,29274
+timer-event-end,V8.PreParseMicroSeconds,29286
+timer-event-start,V8.PreParseMicroSeconds,29293
+timer-event-end,V8.PreParseMicroSeconds,29305
+timer-event-start,V8.PreParseMicroSeconds,29314
+timer-event-end,V8.PreParseMicroSeconds,29324
+timer-event-start,V8.PreParseMicroSeconds,29331
+timer-event-end,V8.PreParseMicroSeconds,29344
+timer-event-start,V8.PreParseMicroSeconds,29355
+timer-event-end,V8.PreParseMicroSeconds,29369
+timer-event-start,V8.PreParseMicroSeconds,29375
+timer-event-end,V8.PreParseMicroSeconds,29391
+timer-event-start,V8.PreParseMicroSeconds,29400
+timer-event-end,V8.PreParseMicroSeconds,29408
+timer-event-start,V8.PreParseMicroSeconds,29416
+timer-event-end,V8.PreParseMicroSeconds,29422
+timer-event-start,V8.PreParseMicroSeconds,29435
+timer-event-end,V8.PreParseMicroSeconds,29442
+timer-event-start,V8.PreParseMicroSeconds,29448
+timer-event-end,V8.PreParseMicroSeconds,29461
+timer-event-start,V8.PreParseMicroSeconds,29467
+timer-event-end,V8.PreParseMicroSeconds,29480
+timer-event-start,V8.PreParseMicroSeconds,29489
+timer-event-end,V8.PreParseMicroSeconds,29508
+timer-event-start,V8.PreParseMicroSeconds,29516
+timer-event-end,V8.PreParseMicroSeconds,29547
+timer-event-start,V8.PreParseMicroSeconds,29561
+timer-event-end,V8.PreParseMicroSeconds,29579
+timer-event-start,V8.PreParseMicroSeconds,29587
+timer-event-end,V8.PreParseMicroSeconds,29605
+timer-event-start,V8.PreParseMicroSeconds,29613
+timer-event-end,V8.PreParseMicroSeconds,29639
+timer-event-start,V8.PreParseMicroSeconds,29646
+timer-event-end,V8.PreParseMicroSeconds,29667
+timer-event-start,V8.PreParseMicroSeconds,29677
+timer-event-end,V8.PreParseMicroSeconds,29702
+timer-event-start,V8.PreParseMicroSeconds,29709
+timer-event-end,V8.PreParseMicroSeconds,29735
+timer-event-start,V8.PreParseMicroSeconds,29741
+timer-event-end,V8.PreParseMicroSeconds,29758
+timer-event-start,V8.PreParseMicroSeconds,29764
+timer-event-end,V8.PreParseMicroSeconds,29773
+timer-event-start,V8.PreParseMicroSeconds,29781
+timer-event-end,V8.PreParseMicroSeconds,29796
+timer-event-start,V8.PreParseMicroSeconds,29805
+timer-event-end,V8.PreParseMicroSeconds,29813
+timer-event-start,V8.PreParseMicroSeconds,29821
+timer-event-end,V8.PreParseMicroSeconds,29839
+timer-event-start,V8.PreParseMicroSeconds,29847
+timer-event-end,V8.PreParseMicroSeconds,29861
+timer-event-start,V8.PreParseMicroSeconds,29868
+timer-event-end,V8.PreParseMicroSeconds,29873
+timer-event-start,V8.PreParseMicroSeconds,29880
+timer-event-end,V8.PreParseMicroSeconds,29908
+timer-event-start,V8.PreParseMicroSeconds,29914
+timer-event-end,V8.PreParseMicroSeconds,29923
+timer-event-start,V8.PreParseMicroSeconds,29930
+timer-event-end,V8.PreParseMicroSeconds,29937
+timer-event-start,V8.PreParseMicroSeconds,29944
+timer-event-end,V8.PreParseMicroSeconds,29955
+timer-event-start,V8.PreParseMicroSeconds,29960
+timer-event-end,V8.PreParseMicroSeconds,29970
+timer-event-start,V8.PreParseMicroSeconds,29977
+timer-event-end,V8.PreParseMicroSeconds,29982
+timer-event-start,V8.PreParseMicroSeconds,29989
+timer-event-end,V8.PreParseMicroSeconds,29999
+timer-event-start,V8.PreParseMicroSeconds,30031
+timer-event-end,V8.PreParseMicroSeconds,30041
+timer-event-start,V8.PreParseMicroSeconds,30047
+timer-event-end,V8.PreParseMicroSeconds,30054
+timer-event-start,V8.PreParseMicroSeconds,30060
+timer-event-end,V8.PreParseMicroSeconds,30069
+timer-event-start,V8.PreParseMicroSeconds,30080
+timer-event-end,V8.PreParseMicroSeconds,30106
+timer-event-start,V8.PreParseMicroSeconds,30113
+timer-event-end,V8.PreParseMicroSeconds,30121
+timer-event-start,V8.PreParseMicroSeconds,30127
+timer-event-end,V8.PreParseMicroSeconds,30133
+timer-event-start,V8.PreParseMicroSeconds,30139
+timer-event-end,V8.PreParseMicroSeconds,30148
tick,0x825e06c,30162,0,0x0,2
-timer-event-start,"V8.PreParseMicroSeconds",30217
-timer-event-end,"V8.PreParseMicroSeconds",30285
-timer-event-start,"V8.PreParseMicroSeconds",30293
-timer-event-end,"V8.PreParseMicroSeconds",30319
-timer-event-start,"V8.PreParseMicroSeconds",30326
-timer-event-end,"V8.PreParseMicroSeconds",30344
-timer-event-start,"V8.PreParseMicroSeconds",30350
-timer-event-end,"V8.PreParseMicroSeconds",30367
-timer-event-start,"V8.PreParseMicroSeconds",30374
-timer-event-end,"V8.PreParseMicroSeconds",30385
-timer-event-start,"V8.PreParseMicroSeconds",30392
-timer-event-end,"V8.PreParseMicroSeconds",30400
-timer-event-start,"V8.PreParseMicroSeconds",30407
-timer-event-end,"V8.PreParseMicroSeconds",30415
-timer-event-start,"V8.PreParseMicroSeconds",30429
-timer-event-end,"V8.PreParseMicroSeconds",30446
-timer-event-start,"V8.PreParseMicroSeconds",30456
-timer-event-end,"V8.PreParseMicroSeconds",30461
-timer-event-start,"V8.PreParseMicroSeconds",30469
-timer-event-end,"V8.PreParseMicroSeconds",30480
-timer-event-start,"V8.PreParseMicroSeconds",30488
-timer-event-end,"V8.PreParseMicroSeconds",30497
-timer-event-start,"V8.PreParseMicroSeconds",30503
-timer-event-end,"V8.PreParseMicroSeconds",30511
-timer-event-start,"V8.PreParseMicroSeconds",30517
-timer-event-end,"V8.PreParseMicroSeconds",30528
-timer-event-start,"V8.PreParseMicroSeconds",30535
-timer-event-end,"V8.PreParseMicroSeconds",30539
-timer-event-start,"V8.PreParseMicroSeconds",30546
-timer-event-end,"V8.PreParseMicroSeconds",30550
-timer-event-start,"V8.PreParseMicroSeconds",30568
-timer-event-end,"V8.PreParseMicroSeconds",30577
-timer-event-start,"V8.PreParseMicroSeconds",30586
-timer-event-end,"V8.PreParseMicroSeconds",30591
-timer-event-start,"V8.PreParseMicroSeconds",30600
-timer-event-end,"V8.PreParseMicroSeconds",30610
-timer-event-start,"V8.PreParseMicroSeconds",30616
-timer-event-end,"V8.PreParseMicroSeconds",30621
-timer-event-start,"V8.PreParseMicroSeconds",30630
-timer-event-end,"V8.PreParseMicroSeconds",30638
-timer-event-start,"V8.PreParseMicroSeconds",30649
-timer-event-end,"V8.PreParseMicroSeconds",30665
-timer-event-start,"V8.PreParseMicroSeconds",30672
-timer-event-end,"V8.PreParseMicroSeconds",30682
-timer-event-start,"V8.PreParseMicroSeconds",30692
-timer-event-end,"V8.PreParseMicroSeconds",30706
-timer-event-start,"V8.PreParseMicroSeconds",30719
-timer-event-end,"V8.PreParseMicroSeconds",30730
-timer-event-start,"V8.PreParseMicroSeconds",30737
-timer-event-end,"V8.PreParseMicroSeconds",30749
+timer-event-start,V8.PreParseMicroSeconds,30217
+timer-event-end,V8.PreParseMicroSeconds,30285
+timer-event-start,V8.PreParseMicroSeconds,30293
+timer-event-end,V8.PreParseMicroSeconds,30319
+timer-event-start,V8.PreParseMicroSeconds,30326
+timer-event-end,V8.PreParseMicroSeconds,30344
+timer-event-start,V8.PreParseMicroSeconds,30350
+timer-event-end,V8.PreParseMicroSeconds,30367
+timer-event-start,V8.PreParseMicroSeconds,30374
+timer-event-end,V8.PreParseMicroSeconds,30385
+timer-event-start,V8.PreParseMicroSeconds,30392
+timer-event-end,V8.PreParseMicroSeconds,30400
+timer-event-start,V8.PreParseMicroSeconds,30407
+timer-event-end,V8.PreParseMicroSeconds,30415
+timer-event-start,V8.PreParseMicroSeconds,30429
+timer-event-end,V8.PreParseMicroSeconds,30446
+timer-event-start,V8.PreParseMicroSeconds,30456
+timer-event-end,V8.PreParseMicroSeconds,30461
+timer-event-start,V8.PreParseMicroSeconds,30469
+timer-event-end,V8.PreParseMicroSeconds,30480
+timer-event-start,V8.PreParseMicroSeconds,30488
+timer-event-end,V8.PreParseMicroSeconds,30497
+timer-event-start,V8.PreParseMicroSeconds,30503
+timer-event-end,V8.PreParseMicroSeconds,30511
+timer-event-start,V8.PreParseMicroSeconds,30517
+timer-event-end,V8.PreParseMicroSeconds,30528
+timer-event-start,V8.PreParseMicroSeconds,30535
+timer-event-end,V8.PreParseMicroSeconds,30539
+timer-event-start,V8.PreParseMicroSeconds,30546
+timer-event-end,V8.PreParseMicroSeconds,30550
+timer-event-start,V8.PreParseMicroSeconds,30568
+timer-event-end,V8.PreParseMicroSeconds,30577
+timer-event-start,V8.PreParseMicroSeconds,30586
+timer-event-end,V8.PreParseMicroSeconds,30591
+timer-event-start,V8.PreParseMicroSeconds,30600
+timer-event-end,V8.PreParseMicroSeconds,30610
+timer-event-start,V8.PreParseMicroSeconds,30616
+timer-event-end,V8.PreParseMicroSeconds,30621
+timer-event-start,V8.PreParseMicroSeconds,30630
+timer-event-end,V8.PreParseMicroSeconds,30638
+timer-event-start,V8.PreParseMicroSeconds,30649
+timer-event-end,V8.PreParseMicroSeconds,30665
+timer-event-start,V8.PreParseMicroSeconds,30672
+timer-event-end,V8.PreParseMicroSeconds,30682
+timer-event-start,V8.PreParseMicroSeconds,30692
+timer-event-end,V8.PreParseMicroSeconds,30706
+timer-event-start,V8.PreParseMicroSeconds,30719
+timer-event-end,V8.PreParseMicroSeconds,30730
+timer-event-start,V8.PreParseMicroSeconds,30737
+timer-event-end,V8.PreParseMicroSeconds,30749
tick,0x82b07f6,31208,0,0x0,2
tick,0x824d3ad,32274,0,0x0,2
tick,0x82b07c6,33327,0,0x0,2
@@ -1309,1305 +1309,1305 @@ tick,0x81fc625,39722,0,0x0,2
tick,0x81fc61e,40783,0,0x0,2
tick,0x821c1a1,41846,0,0x0,2
tick,0x81fc62c,42913,0,0x0,2
-timer-event-start,"V8.PreParseMicroSeconds",43415
-timer-event-end,"V8.PreParseMicroSeconds",43428
-timer-event-start,"V8.PreParseMicroSeconds",43446
-timer-event-end,"V8.PreParseMicroSeconds",43481
-timer-event-end,"V8.ParseMicroSeconds",43493
-timer-event-start,"V8.Compile",43498
-timer-event-start,"V8.CompileFullCode",43528
-timer-event-end,"V8.CompileFullCode",43671
-code-creation,Script,0,0x2b8339a0,6060,"bsuite/kraken-once/stanford-crypto-ccm.js",0x2f33b684,~
-timer-event-end,"V8.Compile",43688
-timer-event-start,"V8.Execute",43739
-timer-event-start,"V8.ParseLazyMicroSeconds",43914
-timer-event-end,"V8.ParseLazyMicroSeconds",43936
-timer-event-start,"V8.CompileLazy",43942
-timer-event-start,"V8.CompileFullCode",43947
-timer-event-end,"V8.CompileFullCode",43966
+timer-event-start,V8.PreParseMicroSeconds,43415
+timer-event-end,V8.PreParseMicroSeconds,43428
+timer-event-start,V8.PreParseMicroSeconds,43446
+timer-event-end,V8.PreParseMicroSeconds,43481
+timer-event-end,V8.ParseMicroSeconds,43493
+timer-event-start,V8.Compile,43498
+timer-event-start,V8.CompileFullCode,43528
+timer-event-end,V8.CompileFullCode,43671
+code-creation,Script,0,0x2b8339a0,6060,bsuite/kraken-once/stanford-crypto-ccm.js,0x2f33b684,~
+timer-event-end,V8.Compile,43688
+timer-event-start,V8.Execute,43739
+timer-event-start,V8.ParseLazyMicroSeconds,43914
+timer-event-end,V8.ParseLazyMicroSeconds,43936
+timer-event-start,V8.CompileLazy,43942
+timer-event-start,V8.CompileFullCode,43947
+timer-event-end,V8.CompileFullCode,43966
tick,0x820b498,43978,0,0x0,0,0x2b83464f
-code-creation,LazyCompile,0,0x2b835160,372,"sjcl.hash.sha256 bsuite/kraken-once/stanford-crypto-ccm.js:15",0x2f339fb0,~
-timer-event-end,"V8.CompileLazy",44194
-code-creation,Stub,2,0x2b8352e0,188,"KeyedLoadElementStub"
-code-creation,KeyedLoadIC,6,0x2b8353a0,91,""
-code-creation,CallPreMonomorphic,7,0x2b835400,178,"args_count: 0"
-timer-event-start,"V8.ParseLazyMicroSeconds",44292
-timer-event-end,"V8.ParseLazyMicroSeconds",44326
-timer-event-start,"V8.CompileLazy",44333
-timer-event-start,"V8.CompileFullCode",44340
-code-creation,Stub,12,0x2b8354c0,88,"BinaryOpStub_MOD_Alloc_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b835520,88,"BinaryOpStub_MUL_Alloc_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",44389
-code-creation,LazyCompile,0,0x2b835580,906,"sjcl.hash.sha256.w bsuite/kraken-once/stanford-crypto-ccm.js:17",0x2f33a190,~
-timer-event-end,"V8.CompileLazy",44407
-code-creation,Stub,12,0x2b835920,167,"BinaryOpStub_MUL_Alloc_Smi+Smi"
-code-creation,Stub,13,0x2b8359e0,122,"CompareICStub"
-timer-event-start,"V8.ParseLazyMicroSeconds",44439
-timer-event-end,"V8.ParseLazyMicroSeconds",44460
-timer-event-start,"V8.CompileLazy",44465
-timer-event-start,"V8.CompileFullCode",44471
-code-creation,Stub,2,0x2b835a60,501,"MathPowStub"
-timer-event-end,"V8.CompileFullCode",44505
-code-creation,LazyCompile,0,0x2b835c60,304,"pow native math.js:181",0x4422259c,~
-timer-event-end,"V8.CompileLazy",44517
-timer-event-start,"V8.ParseLazyMicroSeconds",44522
-timer-event-end,"V8.ParseLazyMicroSeconds",44534
-timer-event-start,"V8.CompileLazy",44539
-timer-event-start,"V8.CompileFullCode",44545
-code-creation,Stub,12,0x2b835da0,88,"BinaryOpStub_MUL_OverwriteLeft_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b835e00,88,"BinaryOpStub_BIT_OR_OverwriteLeft_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",44570
-code-creation,LazyCompile,0,0x2b835e60,228,"a bsuite/kraken-once/stanford-crypto-ccm.js:17",0x2f33d150,~
-timer-event-end,"V8.CompileLazy",44582
-timer-event-start,"V8.ParseLazyMicroSeconds",44590
-timer-event-end,"V8.ParseLazyMicroSeconds",44609
-timer-event-start,"V8.CompileLazy",44614
-timer-event-start,"V8.CompileFullCode",44619
-code-creation,Stub,12,0x2b835f60,88,"BinaryOpStub_SHR_Alloc_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",44646
-code-creation,LazyCompile,0,0x2b835fc0,344,"floor native math.js:99",0x4422241c,~
-timer-event-end,"V8.CompileLazy",44657
-code-creation,Stub,13,0x2b836120,404,"CompareICStub"
-code-creation,Stub,13,0x2b8362c0,232,"CompareICStub"
-code-creation,Stub,13,0x2b8363c0,404,"CompareICStub"
-code-creation,Stub,13,0x2b836560,240,"CompareICStub"
-code-creation,Stub,12,0x2b836660,349,"BinaryOpStub_SHR_Alloc_Number+Smi"
-code-creation,Stub,12,0x2b8367c0,246,"BinaryOpStub_SUB_Alloc_Number+Smi"
-code-creation,Stub,12,0x2b8368c0,245,"BinaryOpStub_MUL_OverwriteLeft_Number+Number"
-code-creation,Stub,12,0x2b8369c0,407,"BinaryOpStub_BIT_OR_OverwriteLeft_Number+Smi"
-code-creation,Stub,2,0x2b836b60,1808,"RecordWriteStub"
-code-creation,Stub,2,0x2b837280,606,"KeyedStoreElementStub"
-code-creation,KeyedStoreIC,10,0x2b8374e0,91,""
+code-creation,LazyCompile,0,0x2b835160,372,sjcl.hash.sha256 bsuite/kraken-once/stanford-crypto-ccm.js:15,0x2f339fb0,~
+timer-event-end,V8.CompileLazy,44194
+code-creation,Stub,2,0x2b8352e0,188,KeyedLoadElementStub
+code-creation,KeyedLoadIC,6,0x2b8353a0,91,
+code-creation,CallPreMonomorphic,7,0x2b835400,178,args_count: 0
+timer-event-start,V8.ParseLazyMicroSeconds,44292
+timer-event-end,V8.ParseLazyMicroSeconds,44326
+timer-event-start,V8.CompileLazy,44333
+timer-event-start,V8.CompileFullCode,44340
+code-creation,Stub,12,0x2b8354c0,88,BinaryOpStub_MOD_Alloc_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b835520,88,BinaryOpStub_MUL_Alloc_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,44389
+code-creation,LazyCompile,0,0x2b835580,906,sjcl.hash.sha256.w bsuite/kraken-once/stanford-crypto-ccm.js:17,0x2f33a190,~
+timer-event-end,V8.CompileLazy,44407
+code-creation,Stub,12,0x2b835920,167,BinaryOpStub_MUL_Alloc_Smi+Smi
+code-creation,Stub,13,0x2b8359e0,122,CompareICStub
+timer-event-start,V8.ParseLazyMicroSeconds,44439
+timer-event-end,V8.ParseLazyMicroSeconds,44460
+timer-event-start,V8.CompileLazy,44465
+timer-event-start,V8.CompileFullCode,44471
+code-creation,Stub,2,0x2b835a60,501,MathPowStub
+timer-event-end,V8.CompileFullCode,44505
+code-creation,LazyCompile,0,0x2b835c60,304,pow native math.js:181,0x4422259c,~
+timer-event-end,V8.CompileLazy,44517
+timer-event-start,V8.ParseLazyMicroSeconds,44522
+timer-event-end,V8.ParseLazyMicroSeconds,44534
+timer-event-start,V8.CompileLazy,44539
+timer-event-start,V8.CompileFullCode,44545
+code-creation,Stub,12,0x2b835da0,88,BinaryOpStub_MUL_OverwriteLeft_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b835e00,88,BinaryOpStub_BIT_OR_OverwriteLeft_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,44570
+code-creation,LazyCompile,0,0x2b835e60,228,a bsuite/kraken-once/stanford-crypto-ccm.js:17,0x2f33d150,~
+timer-event-end,V8.CompileLazy,44582
+timer-event-start,V8.ParseLazyMicroSeconds,44590
+timer-event-end,V8.ParseLazyMicroSeconds,44609
+timer-event-start,V8.CompileLazy,44614
+timer-event-start,V8.CompileFullCode,44619
+code-creation,Stub,12,0x2b835f60,88,BinaryOpStub_SHR_Alloc_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,44646
+code-creation,LazyCompile,0,0x2b835fc0,344,floor native math.js:99,0x4422241c,~
+timer-event-end,V8.CompileLazy,44657
+code-creation,Stub,13,0x2b836120,404,CompareICStub
+code-creation,Stub,13,0x2b8362c0,232,CompareICStub
+code-creation,Stub,13,0x2b8363c0,404,CompareICStub
+code-creation,Stub,13,0x2b836560,240,CompareICStub
+code-creation,Stub,12,0x2b836660,349,BinaryOpStub_SHR_Alloc_Number+Smi
+code-creation,Stub,12,0x2b8367c0,246,BinaryOpStub_SUB_Alloc_Number+Smi
+code-creation,Stub,12,0x2b8368c0,245,BinaryOpStub_MUL_OverwriteLeft_Number+Number
+code-creation,Stub,12,0x2b8369c0,407,BinaryOpStub_BIT_OR_OverwriteLeft_Number+Smi
+code-creation,Stub,2,0x2b836b60,1808,RecordWriteStub
+code-creation,Stub,2,0x2b837280,606,KeyedStoreElementStub
+code-creation,KeyedStoreIC,10,0x2b8374e0,91,
tick,0x31e6020f,45036,0,0x2b836b61,0,0x2b8357c1,0x2b835208,0x2b83464f
-code-creation,LoadIC,5,0x2b837540,114,"Math"
-code-creation,CallIC,7,0x2b8375c0,289,"floor"
-code-creation,Stub,2,0x2b837700,80,"LoadFieldStub"
-code-creation,Stub,2,0x2b837760,95,"N"
-code-creation,LoadIC,5,0x2b8377c0,93,"N"
-code-creation,CallIC,7,0x2b837820,113,"pow"
-code-creation,Stub,2,0x2b8378a0,80,"LoadFieldStub"
-code-creation,Stub,2,0x2b837900,95,"a"
-code-creation,LoadIC,5,0x2b837960,93,"a"
-code-creation,Stub,12,0x2b8379c0,190,"BinaryOpStub_MOD_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b837a80,181,"BinaryOpStub_MOD_Alloc_Smi+Smi"
-timer-event-start,"V8.ParseLazyMicroSeconds",45383
-timer-event-end,"V8.ParseLazyMicroSeconds",45402
-timer-event-start,"V8.CompileLazy",45408
-timer-event-start,"V8.CompileFullCode",45413
-timer-event-end,"V8.CompileFullCode",45428
-code-creation,LazyCompile,0,0x2b837b40,264,"sjcl.hash.sha256.reset bsuite/kraken-once/stanford-crypto-ccm.js:16",0x2f33a070,~
-timer-event-end,"V8.CompileLazy",45442
-code-creation,StoreIC,9,0x2b837c60,138,"codec"
-code-creation,StoreIC,9,0x2b837d00,141,"hex"
+code-creation,LoadIC,5,0x2b837540,114,Math
+code-creation,CallIC,7,0x2b8375c0,289,floor
+code-creation,Stub,2,0x2b837700,80,LoadFieldStub
+code-creation,Stub,2,0x2b837760,95,N
+code-creation,LoadIC,5,0x2b8377c0,93,N
+code-creation,CallIC,7,0x2b837820,113,pow
+code-creation,Stub,2,0x2b8378a0,80,LoadFieldStub
+code-creation,Stub,2,0x2b837900,95,a
+code-creation,LoadIC,5,0x2b837960,93,a
+code-creation,Stub,12,0x2b8379c0,190,BinaryOpStub_MOD_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b837a80,181,BinaryOpStub_MOD_Alloc_Smi+Smi
+timer-event-start,V8.ParseLazyMicroSeconds,45383
+timer-event-end,V8.ParseLazyMicroSeconds,45402
+timer-event-start,V8.CompileLazy,45408
+timer-event-start,V8.CompileFullCode,45413
+timer-event-end,V8.CompileFullCode,45428
+code-creation,LazyCompile,0,0x2b837b40,264,sjcl.hash.sha256.reset bsuite/kraken-once/stanford-crypto-ccm.js:16,0x2f33a070,~
+timer-event-end,V8.CompileLazy,45442
+code-creation,StoreIC,9,0x2b837c60,138,codec
+code-creation,StoreIC,9,0x2b837d00,141,hex
tick,0x8294f6f,46096,0,0xff820124,0,0x2b834ff0
-code-creation,StoreIC,9,0x2b837da0,171,"ccm"
-timer-event-start,"V8.ParseLazyMicroSeconds",46605
-timer-event-end,"V8.ParseLazyMicroSeconds",46625
-timer-event-start,"V8.CompileLazy",46630
-timer-event-start,"V8.CompileFullCode",46635
-timer-event-end,"V8.CompileFullCode",46649
-code-creation,LazyCompile,0,0x2b837e60,300,"sjcl.test.TestCase bsuite/kraken-once/stanford-crypto-ccm.js:99",0x2f33b210,~
-timer-event-end,"V8.CompileLazy",46663
-timer-event-start,"V8.ParseLazyMicroSeconds",46681
-timer-event-end,"V8.ParseLazyMicroSeconds",46712
-timer-event-start,"V8.CompileLazy",46718
-timer-event-start,"V8.CompileFullCode",46725
-code-creation,CallInitialize,7,0x2b837fa0,178,"args_count: 4"
-timer-event-end,"V8.CompileFullCode",46771
-code-creation,LazyCompile,0,0x2b838060,953,"sjcl.test.run bsuite/kraken-once/stanford-crypto-ccm.js:180",0x2f33b4b0,~
-timer-event-end,"V8.CompileLazy",46788
-code-creation,Stub,13,0x2b838420,485,"CompareICStub"
-code-creation,CallIC,7,0x2b838620,128,"ToString"
-code-creation,CallPreMonomorphic,7,0x2b8386a0,178,"args_count: 4"
-timer-event-start,"V8.ParseLazyMicroSeconds",46859
-timer-event-end,"V8.ParseLazyMicroSeconds",46876
-timer-event-start,"V8.CompileLazy",46881
-timer-event-start,"V8.CompileFullCode",46888
-code-creation,CallInitialize,7,0x2b838760,178,"args_count: 5"
-timer-event-end,"V8.CompileFullCode",46910
-code-creation,LazyCompile,0,0x2b838820,320,"browserUtil.cpsMap bsuite/kraken-once/stanford-crypto-ccm.js:63",0x2f33b030,~
-timer-event-end,"V8.CompileLazy",46922
-code-creation,CallPreMonomorphic,7,0x2b838960,178,"args_count: 5"
-timer-event-start,"V8.ParseLazyMicroSeconds",46937
-timer-event-end,"V8.ParseLazyMicroSeconds",46959
-timer-event-start,"V8.CompileLazy",46965
-timer-event-start,"V8.CompileFullCode",46972
-code-creation,Stub,2,0x2b838a20,172,"FastNewContextStub"
-timer-event-end,"V8.CompileFullCode",46995
-code-creation,LazyCompile,0,0x2b838ae0,420,"browserUtil.cpsIterate bsuite/kraken-once/stanford-crypto-ccm.js:49",0x2f33afd0,~
-timer-event-end,"V8.CompileLazy",47008
-timer-event-start,"V8.ParseLazyMicroSeconds",47013
-timer-event-end,"V8.ParseLazyMicroSeconds",47029
-timer-event-start,"V8.CompileLazy",47034
-timer-event-start,"V8.CompileFullCode",47041
-code-creation,Stub,2,0x2b838ca0,328,"CallFunctionStub_Args0_Recording"
-timer-event-end,"V8.CompileFullCode",47070
-code-creation,LazyCompile,0,0x2b838e00,372,"go bsuite/kraken-once/stanford-crypto-ccm.js:50",0x2f33da7c,~
-timer-event-end,"V8.CompileLazy",47082
-timer-event-start,"V8.ParseLazyMicroSeconds",47088
-timer-event-end,"V8.ParseLazyMicroSeconds",47110
-timer-event-start,"V8.CompileLazy",47115
-timer-event-start,"V8.CompileFullCode",47121
-timer-event-end,"V8.CompileFullCode",47134
-code-creation,LazyCompile,0,0x2b838f80,236," bsuite/kraken-once/stanford-crypto-ccm.js:64",0x2f33d9d4,~
-timer-event-end,"V8.CompileLazy",47146
+code-creation,StoreIC,9,0x2b837da0,171,ccm
+timer-event-start,V8.ParseLazyMicroSeconds,46605
+timer-event-end,V8.ParseLazyMicroSeconds,46625
+timer-event-start,V8.CompileLazy,46630
+timer-event-start,V8.CompileFullCode,46635
+timer-event-end,V8.CompileFullCode,46649
+code-creation,LazyCompile,0,0x2b837e60,300,sjcl.test.TestCase bsuite/kraken-once/stanford-crypto-ccm.js:99,0x2f33b210,~
+timer-event-end,V8.CompileLazy,46663
+timer-event-start,V8.ParseLazyMicroSeconds,46681
+timer-event-end,V8.ParseLazyMicroSeconds,46712
+timer-event-start,V8.CompileLazy,46718
+timer-event-start,V8.CompileFullCode,46725
+code-creation,CallInitialize,7,0x2b837fa0,178,args_count: 4
+timer-event-end,V8.CompileFullCode,46771
+code-creation,LazyCompile,0,0x2b838060,953,sjcl.test.run bsuite/kraken-once/stanford-crypto-ccm.js:180,0x2f33b4b0,~
+timer-event-end,V8.CompileLazy,46788
+code-creation,Stub,13,0x2b838420,485,CompareICStub
+code-creation,CallIC,7,0x2b838620,128,ToString
+code-creation,CallPreMonomorphic,7,0x2b8386a0,178,args_count: 4
+timer-event-start,V8.ParseLazyMicroSeconds,46859
+timer-event-end,V8.ParseLazyMicroSeconds,46876
+timer-event-start,V8.CompileLazy,46881
+timer-event-start,V8.CompileFullCode,46888
+code-creation,CallInitialize,7,0x2b838760,178,args_count: 5
+timer-event-end,V8.CompileFullCode,46910
+code-creation,LazyCompile,0,0x2b838820,320,browserUtil.cpsMap bsuite/kraken-once/stanford-crypto-ccm.js:63,0x2f33b030,~
+timer-event-end,V8.CompileLazy,46922
+code-creation,CallPreMonomorphic,7,0x2b838960,178,args_count: 5
+timer-event-start,V8.ParseLazyMicroSeconds,46937
+timer-event-end,V8.ParseLazyMicroSeconds,46959
+timer-event-start,V8.CompileLazy,46965
+timer-event-start,V8.CompileFullCode,46972
+code-creation,Stub,2,0x2b838a20,172,FastNewContextStub
+timer-event-end,V8.CompileFullCode,46995
+code-creation,LazyCompile,0,0x2b838ae0,420,browserUtil.cpsIterate bsuite/kraken-once/stanford-crypto-ccm.js:49,0x2f33afd0,~
+timer-event-end,V8.CompileLazy,47008
+timer-event-start,V8.ParseLazyMicroSeconds,47013
+timer-event-end,V8.ParseLazyMicroSeconds,47029
+timer-event-start,V8.CompileLazy,47034
+timer-event-start,V8.CompileFullCode,47041
+code-creation,Stub,2,0x2b838ca0,328,CallFunctionStub_Args0_Recording
+timer-event-end,V8.CompileFullCode,47070
+code-creation,LazyCompile,0,0x2b838e00,372,go bsuite/kraken-once/stanford-crypto-ccm.js:50,0x2f33da7c,~
+timer-event-end,V8.CompileLazy,47082
+timer-event-start,V8.ParseLazyMicroSeconds,47088
+timer-event-end,V8.ParseLazyMicroSeconds,47110
+timer-event-start,V8.CompileLazy,47115
+timer-event-start,V8.CompileFullCode,47121
+timer-event-end,V8.CompileFullCode,47134
+code-creation,LazyCompile,0,0x2b838f80,236, bsuite/kraken-once/stanford-crypto-ccm.js:64,0x2f33d9d4,~
+timer-event-end,V8.CompileLazy,47146
tick,0xf776d430,47160,0,0x90ec418,2,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,KeyedLoadIC,6,0x2b839080,91,""
-timer-event-start,"V8.ParseLazyMicroSeconds",47296
-timer-event-end,"V8.ParseLazyMicroSeconds",47317
-timer-event-start,"V8.CompileLazy",47323
-timer-event-start,"V8.CompileFullCode",47329
-code-creation,CallInitialize,7,0x2b8390e0,178,"args_count: 3"
-timer-event-end,"V8.CompileFullCode",47355
-code-creation,LazyCompile,0,0x2b8391a0,260," bsuite/kraken-once/stanford-crypto-ccm.js:192",0x2f33d920,~
-timer-event-end,"V8.CompileLazy",47368
-code-creation,CallPreMonomorphic,7,0x2b8392c0,178,"args_count: 3"
-timer-event-start,"V8.ParseLazyMicroSeconds",47390
-timer-event-end,"V8.ParseLazyMicroSeconds",47409
-timer-event-start,"V8.CompileLazy",47415
-timer-event-start,"V8.CompileFullCode",47421
-timer-event-end,"V8.CompileFullCode",47438
-code-creation,LazyCompile,0,0x2b839380,344,"sjcl.test.TestCase.run bsuite/kraken-once/stanford-crypto-ccm.js:168",0x2f33b450,~
-timer-event-end,"V8.CompileLazy",47452
-timer-event-start,"V8.ParseLazyMicroSeconds",47462
-timer-event-end,"V8.ParseLazyMicroSeconds",47476
-timer-event-start,"V8.CompileLazy",47481
-timer-event-start,"V8.CompileFullCode",47485
-timer-event-end,"V8.CompileFullCode",47496
-code-creation,LazyCompile,0,0x2b8394e0,208,"valueOf native date.js:361",0x44218984,~
-timer-event-end,"V8.CompileLazy",47507
-timer-event-start,"V8.ParseLazyMicroSeconds",47517
-timer-event-end,"V8.ParseLazyMicroSeconds",47526
-timer-event-start,"V8.CompileLazy",47531
-timer-event-start,"V8.CompileFullCode",47536
-timer-event-end,"V8.CompileFullCode",47545
-code-creation,LazyCompile,0,0x2b8395c0,192,"browserUtil.pauseAndThen bsuite/kraken-once/stanford-crypto-ccm.js:47",0x2f33af70,~
-timer-event-end,"V8.CompileLazy",47557
-timer-event-start,"V8.ParseLazyMicroSeconds",47561
-timer-event-end,"V8.ParseLazyMicroSeconds",47571
-timer-event-start,"V8.CompileLazy",47576
-timer-event-start,"V8.CompileFullCode",47581
-timer-event-end,"V8.CompileFullCode",47591
-code-creation,LazyCompile,0,0x2b839680,192," bsuite/kraken-once/stanford-crypto-ccm.js:171",0x2f33dc70,~
-timer-event-end,"V8.CompileLazy",47602
-timer-event-start,"V8.ParseLazyMicroSeconds",47608
-timer-event-end,"V8.ParseLazyMicroSeconds",47674
-timer-event-start,"V8.CompileLazy",47681
-timer-event-start,"V8.CompileFullCode",47693
-code-creation,Stub,2,0x2b839740,196,"FastNewContextStub"
-code-creation,Stub,12,0x2b839820,88,"BinaryOpStub_DIV_Alloc_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",47755
-code-creation,LazyCompile,0,0x2b839880,716," bsuite/kraken-once/stanford-crypto-ccm.js:7235",0x2f33b5d0,~
-timer-event-end,"V8.CompileLazy",47768
-code-creation,Stub,12,0x2b839b60,196,"BinaryOpStub_DIV_Alloc_Smi+Smi"
-timer-event-start,"V8.ParseLazyMicroSeconds",47798
-timer-event-end,"V8.ParseLazyMicroSeconds",47845
-timer-event-start,"V8.CompileLazy",47851
-timer-event-start,"V8.CompileFullCode",47863
-code-creation,Stub,12,0x2b839c40,88,"BinaryOpStub_MUL_OverwriteRight_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",47917
-code-creation,LazyCompile,0,0x2b839ca0,2065," bsuite/kraken-once/stanford-crypto-ccm.js:7243",0x2f33de10,
-timer-event-end,"V8.CompileLazy",47930
-code-creation,Stub,12,0x2b83a4c0,167,"BinaryOpStub_MUL_OverwriteRight_Smi+Smi"
-timer-event-start,"V8.ParseLazyMicroSeconds",47958
-timer-event-end,"V8.ParseLazyMicroSeconds",47986
-timer-event-start,"V8.CompileLazy",47992
-timer-event-start,"V8.CompileFullCode",47998
-code-creation,Stub,12,0x2b83a580,88,"BinaryOpStub_BIT_XOR_Alloc_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",48031
-code-creation,LazyCompile,0,0x2b83a5e0,717,"sjcl.codec.hex.toBits bsuite/kraken-once/stanford-crypto-ccm.js:13",0x2f339e90,~
-timer-event-end,"V8.CompileLazy",48044
-timer-event-start,"V8.ParseLazyMicroSeconds",48061
-timer-event-end,"V8.ParseLazyMicroSeconds",48119
-timer-event-start,"V8.CompileLazy",48126
-timer-event-start,"V8.CompileFullCode",48135
-timer-event-end,"V8.CompileFullCode",48188
-code-creation,LazyCompile,0,0x2b83a8c0,1601,"DoConstructRegExp native regexp.js:39",0x44222a28,~
-timer-event-end,"V8.CompileLazy",48203
-timer-event-start,"V8.ParseLazyMicroSeconds",48213
+code-creation,KeyedLoadIC,6,0x2b839080,91,
+timer-event-start,V8.ParseLazyMicroSeconds,47296
+timer-event-end,V8.ParseLazyMicroSeconds,47317
+timer-event-start,V8.CompileLazy,47323
+timer-event-start,V8.CompileFullCode,47329
+code-creation,CallInitialize,7,0x2b8390e0,178,args_count: 3
+timer-event-end,V8.CompileFullCode,47355
+code-creation,LazyCompile,0,0x2b8391a0,260, bsuite/kraken-once/stanford-crypto-ccm.js:192,0x2f33d920,~
+timer-event-end,V8.CompileLazy,47368
+code-creation,CallPreMonomorphic,7,0x2b8392c0,178,args_count: 3
+timer-event-start,V8.ParseLazyMicroSeconds,47390
+timer-event-end,V8.ParseLazyMicroSeconds,47409
+timer-event-start,V8.CompileLazy,47415
+timer-event-start,V8.CompileFullCode,47421
+timer-event-end,V8.CompileFullCode,47438
+code-creation,LazyCompile,0,0x2b839380,344,sjcl.test.TestCase.run bsuite/kraken-once/stanford-crypto-ccm.js:168,0x2f33b450,~
+timer-event-end,V8.CompileLazy,47452
+timer-event-start,V8.ParseLazyMicroSeconds,47462
+timer-event-end,V8.ParseLazyMicroSeconds,47476
+timer-event-start,V8.CompileLazy,47481
+timer-event-start,V8.CompileFullCode,47485
+timer-event-end,V8.CompileFullCode,47496
+code-creation,LazyCompile,0,0x2b8394e0,208,valueOf native date.js:361,0x44218984,~
+timer-event-end,V8.CompileLazy,47507
+timer-event-start,V8.ParseLazyMicroSeconds,47517
+timer-event-end,V8.ParseLazyMicroSeconds,47526
+timer-event-start,V8.CompileLazy,47531
+timer-event-start,V8.CompileFullCode,47536
+timer-event-end,V8.CompileFullCode,47545
+code-creation,LazyCompile,0,0x2b8395c0,192,browserUtil.pauseAndThen bsuite/kraken-once/stanford-crypto-ccm.js:47,0x2f33af70,~
+timer-event-end,V8.CompileLazy,47557
+timer-event-start,V8.ParseLazyMicroSeconds,47561
+timer-event-end,V8.ParseLazyMicroSeconds,47571
+timer-event-start,V8.CompileLazy,47576
+timer-event-start,V8.CompileFullCode,47581
+timer-event-end,V8.CompileFullCode,47591
+code-creation,LazyCompile,0,0x2b839680,192, bsuite/kraken-once/stanford-crypto-ccm.js:171,0x2f33dc70,~
+timer-event-end,V8.CompileLazy,47602
+timer-event-start,V8.ParseLazyMicroSeconds,47608
+timer-event-end,V8.ParseLazyMicroSeconds,47674
+timer-event-start,V8.CompileLazy,47681
+timer-event-start,V8.CompileFullCode,47693
+code-creation,Stub,2,0x2b839740,196,FastNewContextStub
+code-creation,Stub,12,0x2b839820,88,BinaryOpStub_DIV_Alloc_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,47755
+code-creation,LazyCompile,0,0x2b839880,716, bsuite/kraken-once/stanford-crypto-ccm.js:7235,0x2f33b5d0,~
+timer-event-end,V8.CompileLazy,47768
+code-creation,Stub,12,0x2b839b60,196,BinaryOpStub_DIV_Alloc_Smi+Smi
+timer-event-start,V8.ParseLazyMicroSeconds,47798
+timer-event-end,V8.ParseLazyMicroSeconds,47845
+timer-event-start,V8.CompileLazy,47851
+timer-event-start,V8.CompileFullCode,47863
+code-creation,Stub,12,0x2b839c40,88,BinaryOpStub_MUL_OverwriteRight_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,47917
+code-creation,LazyCompile,0,0x2b839ca0,2065, bsuite/kraken-once/stanford-crypto-ccm.js:7243,0x2f33de10,
+timer-event-end,V8.CompileLazy,47930
+code-creation,Stub,12,0x2b83a4c0,167,BinaryOpStub_MUL_OverwriteRight_Smi+Smi
+timer-event-start,V8.ParseLazyMicroSeconds,47958
+timer-event-end,V8.ParseLazyMicroSeconds,47986
+timer-event-start,V8.CompileLazy,47992
+timer-event-start,V8.CompileFullCode,47998
+code-creation,Stub,12,0x2b83a580,88,BinaryOpStub_BIT_XOR_Alloc_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,48031
+code-creation,LazyCompile,0,0x2b83a5e0,717,sjcl.codec.hex.toBits bsuite/kraken-once/stanford-crypto-ccm.js:13,0x2f339e90,~
+timer-event-end,V8.CompileLazy,48044
+timer-event-start,V8.ParseLazyMicroSeconds,48061
+timer-event-end,V8.ParseLazyMicroSeconds,48119
+timer-event-start,V8.CompileLazy,48126
+timer-event-start,V8.CompileFullCode,48135
+timer-event-end,V8.CompileFullCode,48188
+code-creation,LazyCompile,0,0x2b83a8c0,1601,DoConstructRegExp native regexp.js:39,0x44222a28,~
+timer-event-end,V8.CompileLazy,48203
+timer-event-start,V8.ParseLazyMicroSeconds,48213
tick,0x80eabd3,48226,0,0xff81fb44,2,0x2b821ae3,0x2b83a6a4,0x2b839e4e,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.ParseLazyMicroSeconds",48288
-timer-event-start,"V8.CompileLazy",48309
-timer-event-start,"V8.CompileFullCode",48323
-timer-event-end,"V8.CompileFullCode",48372
-code-creation,LazyCompile,0,0x2b83af20,1284,"charAt native string.js:64",0x44215fa8,~
-timer-event-end,"V8.CompileLazy",48386
-code-creation,Stub,14,0x2b83b440,144,"CompareNilICStub(NullValue)(MonomorphicMap)"
-code-creation,Stub,5,0x2b83b4e0,97,"StringLengthStub"
-timer-event-start,"V8.ParseLazyMicroSeconds",48435
-timer-event-end,"V8.ParseLazyMicroSeconds",48536
-timer-event-start,"V8.CompileLazy",48543
-timer-event-start,"V8.CompileFullCode",48555
-code-creation,Stub,2,0x2b83b560,828,"SubStringStub"
-timer-event-end,"V8.CompileFullCode",48640
-code-creation,LazyCompile,0,0x2b83b8a0,2428,"replace native string.js:213",0x44216248,~
-timer-event-end,"V8.CompileLazy",48654
-code-creation,StoreIC,9,0x2b83c220,135,"lastIndex"
-code-creation,Stub,14,0x2b83c2c0,124,"CompareNilICStub(NullValue)(Null)"
-code-creation,RegExp,4,0x2b83c340,758,"\\s|0x"
-timer-event-start,"V8.ParseLazyMicroSeconds",48827
-timer-event-end,"V8.ParseLazyMicroSeconds",48873
-timer-event-start,"V8.CompileLazy",48879
-timer-event-start,"V8.CompileFullCode",48886
-timer-event-end,"V8.CompileFullCode",48916
-code-creation,LazyCompile,0,0x2b83c640,960,"substr native string.js:749",0x44216608,~
-timer-event-end,"V8.CompileLazy",48930
-code-creation,Stub,14,0x2b83ca00,144,"CompareNilICStub(NullValue)(MonomorphicMap)"
-code-creation,Stub,13,0x2b83caa0,122,"CompareICStub"
-timer-event-start,"V8.ParseLazyMicroSeconds",48959
-timer-event-end,"V8.ParseLazyMicroSeconds",49000
-timer-event-start,"V8.CompileLazy",49006
-timer-event-start,"V8.CompileFullCode",49012
-code-creation,Stub,12,0x2b83cb20,88,"BinaryOpStub_BIT_OR_Alloc_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b83cb80,88,"BinaryOpStub_SAR_Alloc_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",49062
-code-creation,LazyCompile,0,0x2b83cbe0,1096,"parseInt native v8natives.js:130",0x4421ec1c,~
-timer-event-end,"V8.CompileLazy",49075
-code-creation,Stub,12,0x2b83d040,399,"BinaryOpStub_BIT_XOR_Alloc_Number+Smi"
-code-creation,CallIC,7,0x2b83d1e0,147,"substr"
-code-creation,CallIC,7,0x2b83d280,129,"parseInt"
-code-creation,Stub,2,0x2b83d320,1433,"RecordWriteStub"
-code-creation,Stub,2,0x2b83d8c0,611,"RecordWriteStub"
-code-creation,CallIC,7,0x2b83db40,656,"push"
-timer-event-start,"V8.ParseLazyMicroSeconds",49192
-timer-event-end,"V8.ParseLazyMicroSeconds",49229
-timer-event-start,"V8.CompileLazy",49235
-timer-event-start,"V8.CompileFullCode",49242
-code-creation,Stub,12,0x2b83dde0,88,"BinaryOpStub_SAR_OverwriteRight_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b83de40,88,"BinaryOpStub_BIT_AND_OverwriteRight_Uninitialized+Uninitialized"
+timer-event-end,V8.ParseLazyMicroSeconds,48288
+timer-event-start,V8.CompileLazy,48309
+timer-event-start,V8.CompileFullCode,48323
+timer-event-end,V8.CompileFullCode,48372
+code-creation,LazyCompile,0,0x2b83af20,1284,charAt native string.js:64,0x44215fa8,~
+timer-event-end,V8.CompileLazy,48386
+code-creation,Stub,14,0x2b83b440,144,CompareNilICStub(NullValue)(MonomorphicMap)
+code-creation,Stub,5,0x2b83b4e0,97,StringLengthStub
+timer-event-start,V8.ParseLazyMicroSeconds,48435
+timer-event-end,V8.ParseLazyMicroSeconds,48536
+timer-event-start,V8.CompileLazy,48543
+timer-event-start,V8.CompileFullCode,48555
+code-creation,Stub,2,0x2b83b560,828,SubStringStub
+timer-event-end,V8.CompileFullCode,48640
+code-creation,LazyCompile,0,0x2b83b8a0,2428,replace native string.js:213,0x44216248,~
+timer-event-end,V8.CompileLazy,48654
+code-creation,StoreIC,9,0x2b83c220,135,lastIndex
+code-creation,Stub,14,0x2b83c2c0,124,CompareNilICStub(NullValue)(Null)
+code-creation,RegExp,4,0x2b83c340,758,\\s|0x
+timer-event-start,V8.ParseLazyMicroSeconds,48827
+timer-event-end,V8.ParseLazyMicroSeconds,48873
+timer-event-start,V8.CompileLazy,48879
+timer-event-start,V8.CompileFullCode,48886
+timer-event-end,V8.CompileFullCode,48916
+code-creation,LazyCompile,0,0x2b83c640,960,substr native string.js:749,0x44216608,~
+timer-event-end,V8.CompileLazy,48930
+code-creation,Stub,14,0x2b83ca00,144,CompareNilICStub(NullValue)(MonomorphicMap)
+code-creation,Stub,13,0x2b83caa0,122,CompareICStub
+timer-event-start,V8.ParseLazyMicroSeconds,48959
+timer-event-end,V8.ParseLazyMicroSeconds,49000
+timer-event-start,V8.CompileLazy,49006
+timer-event-start,V8.CompileFullCode,49012
+code-creation,Stub,12,0x2b83cb20,88,BinaryOpStub_BIT_OR_Alloc_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b83cb80,88,BinaryOpStub_SAR_Alloc_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,49062
+code-creation,LazyCompile,0,0x2b83cbe0,1096,parseInt native v8natives.js:130,0x4421ec1c,~
+timer-event-end,V8.CompileLazy,49075
+code-creation,Stub,12,0x2b83d040,399,BinaryOpStub_BIT_XOR_Alloc_Number+Smi
+code-creation,CallIC,7,0x2b83d1e0,147,substr
+code-creation,CallIC,7,0x2b83d280,129,parseInt
+code-creation,Stub,2,0x2b83d320,1433,RecordWriteStub
+code-creation,Stub,2,0x2b83d8c0,611,RecordWriteStub
+code-creation,CallIC,7,0x2b83db40,656,push
+timer-event-start,V8.ParseLazyMicroSeconds,49192
+timer-event-end,V8.ParseLazyMicroSeconds,49229
+timer-event-start,V8.CompileLazy,49235
+timer-event-start,V8.CompileFullCode,49242
+code-creation,Stub,12,0x2b83dde0,88,BinaryOpStub_SAR_OverwriteRight_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b83de40,88,BinaryOpStub_BIT_AND_OverwriteRight_Uninitialized+Uninitialized
tick,0x8250358,49284,0,0xff81fe84,2,0x2b83a871,0x2b839e4e,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.CompileFullCode",49346
-code-creation,LazyCompile,0,0x2b83dea0,536,"sjcl.bitArray.clamp bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339b30,~
-timer-event-end,"V8.CompileLazy",49390
-timer-event-start,"V8.ParseLazyMicroSeconds",49402
-timer-event-end,"V8.ParseLazyMicroSeconds",49416
-timer-event-start,"V8.CompileLazy",49421
-timer-event-start,"V8.CompileFullCode",49426
-timer-event-end,"V8.CompileFullCode",49438
-code-creation,LazyCompile,0,0x2b83e0c0,248,"ceil native math.js:81",0x442222fc,~
-timer-event-end,"V8.CompileLazy",49450
-timer-event-start,"V8.ParseLazyMicroSeconds",49466
-timer-event-end,"V8.ParseLazyMicroSeconds",49529
-timer-event-start,"V8.CompileLazy",49535
-timer-event-start,"V8.CompileFullCode",49544
-code-creation,Stub,2,0x2b83e1c0,647,"FastCloneShallowArrayStub"
-code-creation,Stub,12,0x2b83e460,88,"BinaryOpStub_SHL_Alloc_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b83e4c0,88,"BinaryOpStub_BIT_AND_OverwriteLeft_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b83e520,88,"BinaryOpStub_BIT_XOR_OverwriteLeft_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",49801
-code-creation,LazyCompile,0,0x2b83e580,3002,"sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4",0x2f339830,~
-timer-event-end,"V8.CompileLazy",49816
-timer-event-start,"V8.ParseLazyMicroSeconds",49829
-timer-event-end,"V8.ParseLazyMicroSeconds",49886
-timer-event-start,"V8.CompileLazy",49893
-timer-event-start,"V8.CompileFullCode",49902
-code-creation,Stub,12,0x2b83f140,88,"BinaryOpStub_BIT_XOR_OverwriteRight_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",49958
-code-creation,LazyCompile,0,0x2b83f1a0,2528,"sjcl.cipher.aes.w bsuite/kraken-once/stanford-crypto-ccm.js:6",0x2f339950,~
-timer-event-end,"V8.CompileLazy",49972
-code-creation,Stub,12,0x2b83fb80,167,"BinaryOpStub_SHL_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b83fc40,155,"BinaryOpStub_SAR_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b83fce0,167,"BinaryOpStub_MUL_OverwriteLeft_Smi+Smi"
-code-creation,Stub,12,0x2b83fda0,146,"BinaryOpStub_BIT_XOR_OverwriteLeft_Smi+Smi"
-code-creation,Stub,2,0x2b83fe40,1808,"RecordWriteStub"
-code-creation,Stub,2,0x2b840560,554,"KeyedStoreElementStub"
-code-creation,KeyedStoreIC,10,0x2b8407a0,91,""
-code-creation,Stub,12,0x2b840800,146,"BinaryOpStub_BIT_XOR_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b8408a0,146,"BinaryOpStub_BIT_XOR_OverwriteRight_Smi+Smi"
+timer-event-end,V8.CompileFullCode,49346
+code-creation,LazyCompile,0,0x2b83dea0,536,sjcl.bitArray.clamp bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339b30,~
+timer-event-end,V8.CompileLazy,49390
+timer-event-start,V8.ParseLazyMicroSeconds,49402
+timer-event-end,V8.ParseLazyMicroSeconds,49416
+timer-event-start,V8.CompileLazy,49421
+timer-event-start,V8.CompileFullCode,49426
+timer-event-end,V8.CompileFullCode,49438
+code-creation,LazyCompile,0,0x2b83e0c0,248,ceil native math.js:81,0x442222fc,~
+timer-event-end,V8.CompileLazy,49450
+timer-event-start,V8.ParseLazyMicroSeconds,49466
+timer-event-end,V8.ParseLazyMicroSeconds,49529
+timer-event-start,V8.CompileLazy,49535
+timer-event-start,V8.CompileFullCode,49544
+code-creation,Stub,2,0x2b83e1c0,647,FastCloneShallowArrayStub
+code-creation,Stub,12,0x2b83e460,88,BinaryOpStub_SHL_Alloc_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b83e4c0,88,BinaryOpStub_BIT_AND_OverwriteLeft_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b83e520,88,BinaryOpStub_BIT_XOR_OverwriteLeft_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,49801
+code-creation,LazyCompile,0,0x2b83e580,3002,sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4,0x2f339830,~
+timer-event-end,V8.CompileLazy,49816
+timer-event-start,V8.ParseLazyMicroSeconds,49829
+timer-event-end,V8.ParseLazyMicroSeconds,49886
+timer-event-start,V8.CompileLazy,49893
+timer-event-start,V8.CompileFullCode,49902
+code-creation,Stub,12,0x2b83f140,88,BinaryOpStub_BIT_XOR_OverwriteRight_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,49958
+code-creation,LazyCompile,0,0x2b83f1a0,2528,sjcl.cipher.aes.w bsuite/kraken-once/stanford-crypto-ccm.js:6,0x2f339950,~
+timer-event-end,V8.CompileLazy,49972
+code-creation,Stub,12,0x2b83fb80,167,BinaryOpStub_SHL_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b83fc40,155,BinaryOpStub_SAR_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b83fce0,167,BinaryOpStub_MUL_OverwriteLeft_Smi+Smi
+code-creation,Stub,12,0x2b83fda0,146,BinaryOpStub_BIT_XOR_OverwriteLeft_Smi+Smi
+code-creation,Stub,2,0x2b83fe40,1808,RecordWriteStub
+code-creation,Stub,2,0x2b840560,554,KeyedStoreElementStub
+code-creation,KeyedStoreIC,10,0x2b8407a0,91,
+code-creation,Stub,12,0x2b840800,146,BinaryOpStub_BIT_XOR_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b8408a0,146,BinaryOpStub_BIT_XOR_OverwriteRight_Smi+Smi
tick,0x82d1790,50347,0,0xff81fdb8,0,0x2b83f388,0x2b83e64a,0x2b839e65,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,Stub,12,0x2b840940,383,"BinaryOpStub_BIT_XOR_OverwriteLeft_Smi+Int32"
-code-creation,Stub,12,0x2b840ac0,375,"BinaryOpStub_SHL_Alloc_Int32+Smi"
-code-creation,Stub,12,0x2b840c40,325,"BinaryOpStub_SHR_Alloc_Int32+Smi"
-code-creation,Stub,12,0x2b840da0,167,"BinaryOpStub_SHR_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b840e60,383,"BinaryOpStub_BIT_XOR_OverwriteLeft_Int32+Smi"
-code-creation,Stub,2,0x2b840fe0,794,"ElementsTransitionAndStoreStub"
-code-creation,KeyedStorePolymorphicIC,10,0x2b841300,107,""
-code-creation,KeyedStorePolymorphicIC,10,0x2b841300,107,"args_count: 0"
-code-creation,Stub,2,0x2b841380,204,"KeyedLoadElementStub"
-code-creation,KeyedLoadIC,6,0x2b841460,91,""
-code-creation,Stub,2,0x2b8414c0,405,"ElementsTransitionAndStoreStub"
-code-creation,Stub,2,0x2b841660,554,"KeyedStoreElementStub"
-code-creation,KeyedStorePolymorphicIC,10,0x2b8418a0,107,""
-code-creation,KeyedStorePolymorphicIC,10,0x2b8418a0,107,"args_count: 0"
-code-creation,Stub,12,0x2b841920,233,"BinaryOpStub_MUL_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b841a20,407,"BinaryOpStub_BIT_XOR_OverwriteLeft_Number+Smi"
-code-creation,Stub,12,0x2b841bc0,407,"BinaryOpStub_BIT_XOR_OverwriteLeft_Smi+Number"
-code-creation,Stub,12,0x2b841d60,355,"BinaryOpStub_BIT_XOR_OverwriteLeft_Int32+Int32"
-code-creation,Stub,12,0x2b841ee0,379,"BinaryOpStub_BIT_XOR_OverwriteLeft_Int32+Number"
-code-creation,Stub,15,0x2b842060,144,"ToBooleanStub(Undefined,Smi)"
-code-creation,Stub,2,0x2b842100,236,"KeyedStoreElementStub"
-code-creation,KeyedStoreIC,10,0x2b842200,91,""
-code-creation,CallIC,7,0x2b842260,136,"slice"
-code-creation,CallMegamorphic,7,0x2b842300,685,"args_count: 1"
-code-creation,Stub,12,0x2b8425c0,146,"BinaryOpStub_ADD_OverwriteLeft_Smi+Smi"
-code-creation,Stub,12,0x2b842660,148,"BinaryOpStub_SUB_Alloc_Smi+Smi"
-code-creation,Stub,2,0x2b842700,301,"KeyedLoadElementStub"
-code-creation,KeyedLoadIC,6,0x2b842840,91,""
+code-creation,Stub,12,0x2b840940,383,BinaryOpStub_BIT_XOR_OverwriteLeft_Smi+Int32
+code-creation,Stub,12,0x2b840ac0,375,BinaryOpStub_SHL_Alloc_Int32+Smi
+code-creation,Stub,12,0x2b840c40,325,BinaryOpStub_SHR_Alloc_Int32+Smi
+code-creation,Stub,12,0x2b840da0,167,BinaryOpStub_SHR_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b840e60,383,BinaryOpStub_BIT_XOR_OverwriteLeft_Int32+Smi
+code-creation,Stub,2,0x2b840fe0,794,ElementsTransitionAndStoreStub
+code-creation,KeyedStorePolymorphicIC,10,0x2b841300,107,
+code-creation,KeyedStorePolymorphicIC,10,0x2b841300,107,args_count: 0
+code-creation,Stub,2,0x2b841380,204,KeyedLoadElementStub
+code-creation,KeyedLoadIC,6,0x2b841460,91,
+code-creation,Stub,2,0x2b8414c0,405,ElementsTransitionAndStoreStub
+code-creation,Stub,2,0x2b841660,554,KeyedStoreElementStub
+code-creation,KeyedStorePolymorphicIC,10,0x2b8418a0,107,
+code-creation,KeyedStorePolymorphicIC,10,0x2b8418a0,107,args_count: 0
+code-creation,Stub,12,0x2b841920,233,BinaryOpStub_MUL_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b841a20,407,BinaryOpStub_BIT_XOR_OverwriteLeft_Number+Smi
+code-creation,Stub,12,0x2b841bc0,407,BinaryOpStub_BIT_XOR_OverwriteLeft_Smi+Number
+code-creation,Stub,12,0x2b841d60,355,BinaryOpStub_BIT_XOR_OverwriteLeft_Int32+Int32
+code-creation,Stub,12,0x2b841ee0,379,BinaryOpStub_BIT_XOR_OverwriteLeft_Int32+Number
+code-creation,Stub,15,0x2b842060,144,ToBooleanStub(Undefined,Smi)
+code-creation,Stub,2,0x2b842100,236,KeyedStoreElementStub
+code-creation,KeyedStoreIC,10,0x2b842200,91,
+code-creation,CallIC,7,0x2b842260,136,slice
+code-creation,CallMegamorphic,7,0x2b842300,685,args_count: 1
+code-creation,Stub,12,0x2b8425c0,146,BinaryOpStub_ADD_OverwriteLeft_Smi+Smi
+code-creation,Stub,12,0x2b842660,148,BinaryOpStub_SUB_Alloc_Smi+Smi
+code-creation,Stub,2,0x2b842700,301,KeyedLoadElementStub
+code-creation,KeyedLoadIC,6,0x2b842840,91,
tick,0x817d391,51438,0,0xff81f9a4,0,0x2b83e81e,0x2b839e65,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,Stub,12,0x2b8428a0,190,"BinaryOpStub_MOD_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b842960,146,"BinaryOpStub_BIT_AND_OverwriteLeft_Smi+Smi"
-code-creation,Stub,12,0x2b842a00,347,"BinaryOpStub_BIT_XOR_Alloc_Int32+Int32"
-code-creation,Stub,12,0x2b842b60,214,"BinaryOpStub_SHL_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b842c40,375,"BinaryOpStub_SAR_Alloc_Int32+Smi"
-code-creation,Stub,12,0x2b842dc0,375,"BinaryOpStub_BIT_AND_Alloc_Int32+Smi"
-code-creation,CallIC,7,0x2b842f40,147,"replace"
-code-creation,Stub,2,0x2b842fe0,76,"LoadFieldStub"
-code-creation,LoadIC,5,0x2b843040,93,"lastIndex"
-code-creation,Stub,2,0x2b8430a0,76,"LoadFieldStub"
-code-creation,LoadIC,5,0x2b843100,93,"global"
-code-creation,LoadIC,5,0x2b843160,103,"lastMatchInfoOverride"
-code-creation,LoadIC,5,0x2b8431e0,103,"lastMatchInfo"
-code-creation,LoadIC,5,0x2b843260,103,"sjcl"
-code-creation,LoadIC,5,0x2b8432e0,93,"bitArray"
-code-creation,CallIC,7,0x2b843340,113,"clamp"
-code-creation,LoadIC,5,0x2b8433c0,93,"length"
-code-creation,CallIC,7,0x2b843420,113,"ceil"
-code-creation,CallIC,7,0x2b8434a0,136,"slice"
-code-creation,Stub,12,0x2b843540,264,"BinaryOpStub_DIV_Alloc_Smi+Smi"
-code-creation,Stub,12,0x2b843660,407,"BinaryOpStub_SAR_OverwriteRight_Number+Smi"
-code-creation,Stub,12,0x2b843800,383,"BinaryOpStub_BIT_AND_OverwriteRight_Int32+Smi"
-timer-event-start,"V8.ParseLazyMicroSeconds",51907
-timer-event-end,"V8.ParseLazyMicroSeconds",51924
-timer-event-start,"V8.CompileLazy",51930
-timer-event-start,"V8.CompileFullCode",51935
-code-creation,Stub,12,0x2b843980,88,"BinaryOpStub_SHL_OverwriteRight_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b8439e0,88,"BinaryOpStub_ADD_OverwriteRight_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",51966
-code-creation,LazyCompile,0,0x2b843a40,288,"sjcl.bitArray.partial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339b90,~
-timer-event-end,"V8.CompileLazy",51979
-code-creation,Stub,12,0x2b843b60,375,"BinaryOpStub_BIT_OR_Alloc_Int32+Smi"
-code-creation,Stub,12,0x2b843ce0,247,"BinaryOpStub_MUL_Alloc_Smi+Number"
-code-creation,Stub,12,0x2b843de0,268,"BinaryOpStub_ADD_OverwriteRight_Int32+Number"
-code-creation,Stub,2,0x2b843f00,240,"KeyedStoreElementStub"
-code-creation,KeyedStoreIC,10,0x2b844000,91,""
-code-creation,CallMiss,7,0x2b844060,178,"args_count: 3"
-code-creation,CallIC,7,0x2b844120,113,"partial"
-timer-event-start,"V8.ParseLazyMicroSeconds",52165
-timer-event-end,"V8.ParseLazyMicroSeconds",52210
-timer-event-start,"V8.CompileLazy",52217
-timer-event-start,"V8.CompileFullCode",52225
-code-creation,Stub,12,0x2b8441a0,88,"BinaryOpStub_SHR_OverwriteRight_Uninitialized+Uninitialized"
-code-creation,CallInitialize,7,0x2b844200,178,"args_count: 6"
-timer-event-end,"V8.CompileFullCode",52278
-code-creation,LazyCompile,0,0x2b8442c0,1057,"sjcl.mode.ccm.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:19",0x2f33a250,~
-timer-event-end,"V8.CompileLazy",52296
-timer-event-start,"V8.ParseLazyMicroSeconds",52306
-timer-event-end,"V8.ParseLazyMicroSeconds",52324
-timer-event-start,"V8.CompileLazy",52329
-timer-event-start,"V8.CompileFullCode",52334
-timer-event-end,"V8.CompileFullCode",52349
-code-creation,LazyCompile,0,0x2b844700,336,"sjcl.bitArray.bitLength bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339ad0,~
-timer-event-end,"V8.CompileLazy",52362
-timer-event-start,"V8.ParseLazyMicroSeconds",52375
-timer-event-end,"V8.ParseLazyMicroSeconds",52388
-timer-event-start,"V8.CompileLazy",52392
-timer-event-start,"V8.CompileFullCode",52398
-timer-event-end,"V8.CompileFullCode",52420
-code-creation,LazyCompile,0,0x2b844860,236,"sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339bf0,~
-timer-event-end,"V8.CompileLazy",52433
-code-creation,Stub,12,0x2b844960,264,"BinaryOpStub_DIV_Alloc_Int32+Number"
-timer-event-start,"V8.ParseLazyMicroSeconds",52455
+code-creation,Stub,12,0x2b8428a0,190,BinaryOpStub_MOD_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b842960,146,BinaryOpStub_BIT_AND_OverwriteLeft_Smi+Smi
+code-creation,Stub,12,0x2b842a00,347,BinaryOpStub_BIT_XOR_Alloc_Int32+Int32
+code-creation,Stub,12,0x2b842b60,214,BinaryOpStub_SHL_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b842c40,375,BinaryOpStub_SAR_Alloc_Int32+Smi
+code-creation,Stub,12,0x2b842dc0,375,BinaryOpStub_BIT_AND_Alloc_Int32+Smi
+code-creation,CallIC,7,0x2b842f40,147,replace
+code-creation,Stub,2,0x2b842fe0,76,LoadFieldStub
+code-creation,LoadIC,5,0x2b843040,93,lastIndex
+code-creation,Stub,2,0x2b8430a0,76,LoadFieldStub
+code-creation,LoadIC,5,0x2b843100,93,global
+code-creation,LoadIC,5,0x2b843160,103,lastMatchInfoOverride
+code-creation,LoadIC,5,0x2b8431e0,103,lastMatchInfo
+code-creation,LoadIC,5,0x2b843260,103,sjcl
+code-creation,LoadIC,5,0x2b8432e0,93,bitArray
+code-creation,CallIC,7,0x2b843340,113,clamp
+code-creation,LoadIC,5,0x2b8433c0,93,length
+code-creation,CallIC,7,0x2b843420,113,ceil
+code-creation,CallIC,7,0x2b8434a0,136,slice
+code-creation,Stub,12,0x2b843540,264,BinaryOpStub_DIV_Alloc_Smi+Smi
+code-creation,Stub,12,0x2b843660,407,BinaryOpStub_SAR_OverwriteRight_Number+Smi
+code-creation,Stub,12,0x2b843800,383,BinaryOpStub_BIT_AND_OverwriteRight_Int32+Smi
+timer-event-start,V8.ParseLazyMicroSeconds,51907
+timer-event-end,V8.ParseLazyMicroSeconds,51924
+timer-event-start,V8.CompileLazy,51930
+timer-event-start,V8.CompileFullCode,51935
+code-creation,Stub,12,0x2b843980,88,BinaryOpStub_SHL_OverwriteRight_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b8439e0,88,BinaryOpStub_ADD_OverwriteRight_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,51966
+code-creation,LazyCompile,0,0x2b843a40,288,sjcl.bitArray.partial bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339b90,~
+timer-event-end,V8.CompileLazy,51979
+code-creation,Stub,12,0x2b843b60,375,BinaryOpStub_BIT_OR_Alloc_Int32+Smi
+code-creation,Stub,12,0x2b843ce0,247,BinaryOpStub_MUL_Alloc_Smi+Number
+code-creation,Stub,12,0x2b843de0,268,BinaryOpStub_ADD_OverwriteRight_Int32+Number
+code-creation,Stub,2,0x2b843f00,240,KeyedStoreElementStub
+code-creation,KeyedStoreIC,10,0x2b844000,91,
+code-creation,CallMiss,7,0x2b844060,178,args_count: 3
+code-creation,CallIC,7,0x2b844120,113,partial
+timer-event-start,V8.ParseLazyMicroSeconds,52165
+timer-event-end,V8.ParseLazyMicroSeconds,52210
+timer-event-start,V8.CompileLazy,52217
+timer-event-start,V8.CompileFullCode,52225
+code-creation,Stub,12,0x2b8441a0,88,BinaryOpStub_SHR_OverwriteRight_Uninitialized+Uninitialized
+code-creation,CallInitialize,7,0x2b844200,178,args_count: 6
+timer-event-end,V8.CompileFullCode,52278
+code-creation,LazyCompile,0,0x2b8442c0,1057,sjcl.mode.ccm.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:19,0x2f33a250,~
+timer-event-end,V8.CompileLazy,52296
+timer-event-start,V8.ParseLazyMicroSeconds,52306
+timer-event-end,V8.ParseLazyMicroSeconds,52324
+timer-event-start,V8.CompileLazy,52329
+timer-event-start,V8.CompileFullCode,52334
+timer-event-end,V8.CompileFullCode,52349
+code-creation,LazyCompile,0,0x2b844700,336,sjcl.bitArray.bitLength bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339ad0,~
+timer-event-end,V8.CompileLazy,52362
+timer-event-start,V8.ParseLazyMicroSeconds,52375
+timer-event-end,V8.ParseLazyMicroSeconds,52388
+timer-event-start,V8.CompileLazy,52392
+timer-event-start,V8.CompileFullCode,52398
+timer-event-end,V8.CompileFullCode,52420
+code-creation,LazyCompile,0,0x2b844860,236,sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339bf0,~
+timer-event-end,V8.CompileLazy,52433
+code-creation,Stub,12,0x2b844960,264,BinaryOpStub_DIV_Alloc_Int32+Number
+timer-event-start,V8.ParseLazyMicroSeconds,52455
tick,0x8092495,52475,0,0xff81fcd0,2,0x2b844833,0x2b84437f,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.ParseLazyMicroSeconds",52539
-timer-event-start,"V8.CompileLazy",52558
-timer-event-start,"V8.CompileFullCode",52577
-timer-event-end,"V8.CompileFullCode",52590
-code-creation,LazyCompile,0,0x2b844a80,248,"round native math.js:193",0x4422265c,~
-timer-event-end,"V8.CompileLazy",52602
-code-creation,CallIC,7,0x2b844b80,113,"getPartial"
-code-creation,Stub,12,0x2b844c00,238,"BinaryOpStub_DIV_Alloc_Number+Number"
-code-creation,CallIC,7,0x2b844d00,113,"round"
-code-creation,Stub,12,0x2b844d80,167,"BinaryOpStub_SHR_OverwriteRight_Smi+Smi"
-code-creation,CallPreMonomorphic,7,0x2b844e40,178,"args_count: 6"
-timer-event-start,"V8.ParseLazyMicroSeconds",52676
-timer-event-end,"V8.ParseLazyMicroSeconds",52738
-timer-event-start,"V8.CompileLazy",52745
-timer-event-start,"V8.CompileFullCode",52753
-code-creation,Stub,12,0x2b844f00,88,"BinaryOpStub_SHL_OverwriteLeft_Uninitialized+Uninitialized"
-code-creation,Stub,12,0x2b844f60,88,"BinaryOpStub_BIT_OR_OverwriteRight_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",52818
-code-creation,LazyCompile,0,0x2b844fc0,1838,"sjcl.mode.ccm.G bsuite/kraken-once/stanford-crypto-ccm.js:20",0x2f33a310,~
-timer-event-end,"V8.CompileLazy",52833
-code-creation,Stub,13,0x2b845700,241,"CompareICStub"
-code-creation,Stub,12,0x2b845800,167,"BinaryOpStub_SHL_OverwriteLeft_Smi+Smi"
-code-creation,Stub,12,0x2b8458c0,145,"BinaryOpStub_BIT_OR_OverwriteRight_Smi+Smi"
-code-creation,Stub,12,0x2b845960,145,"BinaryOpStub_BIT_OR_OverwriteLeft_Smi+Smi"
-code-creation,Stub,12,0x2b845a00,167,"BinaryOpStub_SHL_OverwriteRight_Smi+Smi"
-timer-event-start,"V8.ParseLazyMicroSeconds",52912
-timer-event-end,"V8.ParseLazyMicroSeconds",52936
-timer-event-start,"V8.CompileLazy",52941
-timer-event-start,"V8.CompileFullCode",52947
-timer-event-end,"V8.CompileFullCode",52966
-code-creation,LazyCompile,0,0x2b845ac0,560,"sjcl.bitArray.concat bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a70,~
-timer-event-end,"V8.CompileLazy",52980
-code-creation,Stub,12,0x2b845d00,399,"BinaryOpStub_BIT_OR_Alloc_Number+Smi"
-timer-event-start,"V8.ParseLazyMicroSeconds",53013
-timer-event-end,"V8.ParseLazyMicroSeconds",53049
-timer-event-start,"V8.CompileLazy",53055
-timer-event-start,"V8.CompileFullCode",53062
-timer-event-end,"V8.CompileFullCode",53095
-code-creation,LazyCompile,0,0x2b845ea0,1126,"sjcl.bitArray.P bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339cb0,~
-timer-event-end,"V8.CompileLazy",53110
-code-creation,Stub,13,0x2b846320,485,"CompareICStub"
-code-creation,Stub,12,0x2b846520,383,"BinaryOpStub_BIT_OR_OverwriteRight_Int32+Smi"
-code-creation,Stub,12,0x2b8466a0,383,"BinaryOpStub_SHL_OverwriteRight_Int32+Smi"
-code-creation,Stub,12,0x2b846820,407,"BinaryOpStub_BIT_OR_OverwriteRight_Number+Smi"
-timer-event-start,"V8.ParseLazyMicroSeconds",53194
-timer-event-end,"V8.ParseLazyMicroSeconds",53206
-timer-event-start,"V8.CompileLazy",53211
-timer-event-start,"V8.CompileFullCode",53216
-timer-event-end,"V8.CompileFullCode",53226
-code-creation,LazyCompile,0,0x2b8469c0,184,"sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6",0x2f339890,~
-timer-event-end,"V8.CompileLazy",53243
-timer-event-start,"V8.ParseLazyMicroSeconds",53249
-timer-event-end,"V8.ParseLazyMicroSeconds",53325
-timer-event-start,"V8.CompileLazy",53332
-timer-event-start,"V8.CompileFullCode",53343
-code-creation,Stub,12,0x2b846a80,88,"BinaryOpStub_SUB_OverwriteLeft_Uninitialized+Uninitialized"
+timer-event-end,V8.ParseLazyMicroSeconds,52539
+timer-event-start,V8.CompileLazy,52558
+timer-event-start,V8.CompileFullCode,52577
+timer-event-end,V8.CompileFullCode,52590
+code-creation,LazyCompile,0,0x2b844a80,248,round native math.js:193,0x4422265c,~
+timer-event-end,V8.CompileLazy,52602
+code-creation,CallIC,7,0x2b844b80,113,getPartial
+code-creation,Stub,12,0x2b844c00,238,BinaryOpStub_DIV_Alloc_Number+Number
+code-creation,CallIC,7,0x2b844d00,113,round
+code-creation,Stub,12,0x2b844d80,167,BinaryOpStub_SHR_OverwriteRight_Smi+Smi
+code-creation,CallPreMonomorphic,7,0x2b844e40,178,args_count: 6
+timer-event-start,V8.ParseLazyMicroSeconds,52676
+timer-event-end,V8.ParseLazyMicroSeconds,52738
+timer-event-start,V8.CompileLazy,52745
+timer-event-start,V8.CompileFullCode,52753
+code-creation,Stub,12,0x2b844f00,88,BinaryOpStub_SHL_OverwriteLeft_Uninitialized+Uninitialized
+code-creation,Stub,12,0x2b844f60,88,BinaryOpStub_BIT_OR_OverwriteRight_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,52818
+code-creation,LazyCompile,0,0x2b844fc0,1838,sjcl.mode.ccm.G bsuite/kraken-once/stanford-crypto-ccm.js:20,0x2f33a310,~
+timer-event-end,V8.CompileLazy,52833
+code-creation,Stub,13,0x2b845700,241,CompareICStub
+code-creation,Stub,12,0x2b845800,167,BinaryOpStub_SHL_OverwriteLeft_Smi+Smi
+code-creation,Stub,12,0x2b8458c0,145,BinaryOpStub_BIT_OR_OverwriteRight_Smi+Smi
+code-creation,Stub,12,0x2b845960,145,BinaryOpStub_BIT_OR_OverwriteLeft_Smi+Smi
+code-creation,Stub,12,0x2b845a00,167,BinaryOpStub_SHL_OverwriteRight_Smi+Smi
+timer-event-start,V8.ParseLazyMicroSeconds,52912
+timer-event-end,V8.ParseLazyMicroSeconds,52936
+timer-event-start,V8.CompileLazy,52941
+timer-event-start,V8.CompileFullCode,52947
+timer-event-end,V8.CompileFullCode,52966
+code-creation,LazyCompile,0,0x2b845ac0,560,sjcl.bitArray.concat bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339a70,~
+timer-event-end,V8.CompileLazy,52980
+code-creation,Stub,12,0x2b845d00,399,BinaryOpStub_BIT_OR_Alloc_Number+Smi
+timer-event-start,V8.ParseLazyMicroSeconds,53013
+timer-event-end,V8.ParseLazyMicroSeconds,53049
+timer-event-start,V8.CompileLazy,53055
+timer-event-start,V8.CompileFullCode,53062
+timer-event-end,V8.CompileFullCode,53095
+code-creation,LazyCompile,0,0x2b845ea0,1126,sjcl.bitArray.P bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339cb0,~
+timer-event-end,V8.CompileLazy,53110
+code-creation,Stub,13,0x2b846320,485,CompareICStub
+code-creation,Stub,12,0x2b846520,383,BinaryOpStub_BIT_OR_OverwriteRight_Int32+Smi
+code-creation,Stub,12,0x2b8466a0,383,BinaryOpStub_SHL_OverwriteRight_Int32+Smi
+code-creation,Stub,12,0x2b846820,407,BinaryOpStub_BIT_OR_OverwriteRight_Number+Smi
+timer-event-start,V8.ParseLazyMicroSeconds,53194
+timer-event-end,V8.ParseLazyMicroSeconds,53206
+timer-event-start,V8.CompileLazy,53211
+timer-event-start,V8.CompileFullCode,53216
+timer-event-end,V8.CompileFullCode,53226
+code-creation,LazyCompile,0,0x2b8469c0,184,sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6,0x2f339890,~
+timer-event-end,V8.CompileLazy,53243
+timer-event-start,V8.ParseLazyMicroSeconds,53249
+timer-event-end,V8.ParseLazyMicroSeconds,53325
+timer-event-start,V8.CompileLazy,53332
+timer-event-start,V8.CompileFullCode,53343
+code-creation,Stub,12,0x2b846a80,88,BinaryOpStub_SUB_OverwriteLeft_Uninitialized+Uninitialized
tick,0x8376055,53535,0,0x81bab7d,2,0x2b846a46,0x2b845312,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,Stub,2,0x2b846ae0,683,"FastCloneShallowArrayStub"
-code-creation,Stub,11,0x2b846da0,132,"UnaryOpStubMinus(None)"
-timer-event-end,"V8.CompileFullCode",53746
-code-creation,LazyCompile,0,0x2b846e40,3418,"sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7",0x2f3399b0,~
-timer-event-end,"V8.CompileLazy",53763
-code-creation,Stub,12,0x2b847ba0,375,"BinaryOpStub_BIT_XOR_Alloc_Smi+Int32"
-code-creation,Stub,12,0x2b847d20,375,"BinaryOpStub_BIT_XOR_Alloc_Int32+Smi"
-code-creation,Stub,12,0x2b847ea0,148,"BinaryOpStub_SUB_OverwriteLeft_Smi+Smi"
-code-creation,Stub,15,0x2b847f40,164,"ToBooleanStub(Smi,HeapNumber)"
-code-creation,CallMiss,7,0x2b848000,178,"args_count: 4"
-code-creation,CallIC,7,0x2b8480c0,113,"P"
-code-creation,LoadIC,5,0x2b848140,103,"undefined"
-timer-event-start,"V8.ParseLazyMicroSeconds",54007
-timer-event-end,"V8.ParseLazyMicroSeconds",54027
-timer-event-start,"V8.CompileLazy",54032
-timer-event-start,"V8.CompileFullCode",54038
-timer-event-end,"V8.CompileFullCode",54052
-code-creation,LazyCompile,0,0x2b8481c0,388,"sjcl.bitArray.k bsuite/kraken-once/stanford-crypto-ccm.js:11",0x2f339d10,~
-timer-event-end,"V8.CompileLazy",54065
-code-creation,CallIC,7,0x2b848360,132,"H"
-code-creation,LoadIC,5,0x2b848400,93,"a"
-code-creation,Stub,2,0x2b848460,95,"h"
-code-creation,LoadIC,5,0x2b8484c0,93,"h"
-code-creation,CallIC,7,0x2b848520,132,"encrypt"
-code-creation,Stub,12,0x2b8485c0,371,"BinaryOpStub_BIT_XOR_Alloc_Int32+Number"
-timer-event-start,"V8.ParseLazyMicroSeconds",54281
-timer-event-end,"V8.ParseLazyMicroSeconds",54332
-timer-event-start,"V8.CompileLazy",54339
-timer-event-start,"V8.CompileFullCode",54347
-code-creation,Stub,2,0x2b848740,663,"FastCloneShallowArrayStub"
-timer-event-end,"V8.CompileFullCode",54560
-code-creation,LazyCompile,0,0x2b8489e0,1221,"sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21",0x2f33a370,~
-timer-event-end,"V8.CompileLazy",54577
+code-creation,Stub,2,0x2b846ae0,683,FastCloneShallowArrayStub
+code-creation,Stub,11,0x2b846da0,132,UnaryOpStubMinus(None)
+timer-event-end,V8.CompileFullCode,53746
+code-creation,LazyCompile,0,0x2b846e40,3418,sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7,0x2f3399b0,~
+timer-event-end,V8.CompileLazy,53763
+code-creation,Stub,12,0x2b847ba0,375,BinaryOpStub_BIT_XOR_Alloc_Smi+Int32
+code-creation,Stub,12,0x2b847d20,375,BinaryOpStub_BIT_XOR_Alloc_Int32+Smi
+code-creation,Stub,12,0x2b847ea0,148,BinaryOpStub_SUB_OverwriteLeft_Smi+Smi
+code-creation,Stub,15,0x2b847f40,164,ToBooleanStub(Smi,HeapNumber)
+code-creation,CallMiss,7,0x2b848000,178,args_count: 4
+code-creation,CallIC,7,0x2b8480c0,113,P
+code-creation,LoadIC,5,0x2b848140,103,undefined
+timer-event-start,V8.ParseLazyMicroSeconds,54007
+timer-event-end,V8.ParseLazyMicroSeconds,54027
+timer-event-start,V8.CompileLazy,54032
+timer-event-start,V8.CompileFullCode,54038
+timer-event-end,V8.CompileFullCode,54052
+code-creation,LazyCompile,0,0x2b8481c0,388,sjcl.bitArray.k bsuite/kraken-once/stanford-crypto-ccm.js:11,0x2f339d10,~
+timer-event-end,V8.CompileLazy,54065
+code-creation,CallIC,7,0x2b848360,132,H
+code-creation,LoadIC,5,0x2b848400,93,a
+code-creation,Stub,2,0x2b848460,95,h
+code-creation,LoadIC,5,0x2b8484c0,93,h
+code-creation,CallIC,7,0x2b848520,132,encrypt
+code-creation,Stub,12,0x2b8485c0,371,BinaryOpStub_BIT_XOR_Alloc_Int32+Number
+timer-event-start,V8.ParseLazyMicroSeconds,54281
+timer-event-end,V8.ParseLazyMicroSeconds,54332
+timer-event-start,V8.CompileLazy,54339
+timer-event-start,V8.CompileFullCode,54347
+code-creation,Stub,2,0x2b848740,663,FastCloneShallowArrayStub
+timer-event-end,V8.CompileFullCode,54560
+code-creation,LazyCompile,0,0x2b8489e0,1221,sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21,0x2f33a370,~
+timer-event-end,V8.CompileLazy,54577
tick,0x82f2dd2,54590,0,0xff81f67c,2,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,Stub,12,0x2b848ec0,371,"BinaryOpStub_BIT_XOR_Alloc_Number+Int32"
-timer-event-start,"V8.ParseLazyMicroSeconds",54663
-timer-event-end,"V8.ParseLazyMicroSeconds",54685
-timer-event-start,"V8.CompileLazy",54691
-timer-event-start,"V8.CompileFullCode",54697
-code-creation,Stub,12,0x2b849040,88,"BinaryOpStub_SUB_OverwriteRight_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",54724
-code-creation,LazyCompile,0,0x2b8490a0,392,"sjcl.bitArray.bitSlice bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a10,~
-timer-event-end,"V8.CompileLazy",54737
-code-creation,Stub,12,0x2b849240,148,"BinaryOpStub_SUB_OverwriteRight_Smi+Smi"
-code-creation,Stub,13,0x2b8492e0,494,"CompareICStub"
-code-creation,CallMegamorphic,7,0x2b8494e0,685,"args_count: 2"
-code-creation,Stub,12,0x2b8497a0,246,"BinaryOpStub_ADD_Alloc_Number+Smi"
-code-creation,LoadPolymorphicIC,5,0x2b8498a0,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b849920,105,"length"
-timer-event-start,"V8.ParseLazyMicroSeconds",54933
-timer-event-end,"V8.ParseLazyMicroSeconds",54956
-timer-event-start,"V8.CompileLazy",54962
-timer-event-start,"V8.CompileFullCode",54968
-timer-event-end,"V8.CompileFullCode",54989
-code-creation,LazyCompile,0,0x2b8499a0,585,"sjcl.bitArray.equal bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339c50,~
-timer-event-end,"V8.CompileLazy",55003
-code-creation,Stub,12,0x2b849c00,395,"BinaryOpStub_BIT_XOR_Alloc_Number+Number"
-code-creation,Stub,12,0x2b849da0,133,"BinaryOpStub_ADD_Alloc_String+Smi"
-code-creation,Stub,12,0x2b849e40,133,"BinaryOpStub_ADD_OverwriteLeft_String+Smi"
-timer-event-start,"V8.ParseLazyMicroSeconds",55131
-timer-event-end,"V8.ParseLazyMicroSeconds",55149
-timer-event-start,"V8.CompileLazy",55155
-timer-event-start,"V8.CompileFullCode",55160
-timer-event-end,"V8.CompileFullCode",55177
-code-creation,LazyCompile,0,0x2b849ee0,292,"sjcl.test.TestCase.require bsuite/kraken-once/stanford-crypto-ccm.js:131",0x2f33b390,~
-timer-event-end,"V8.CompileLazy",55190
-timer-event-start,"V8.ParseLazyMicroSeconds",55198
-timer-event-end,"V8.ParseLazyMicroSeconds",55206
-timer-event-start,"V8.CompileLazy",55211
-timer-event-start,"V8.CompileFullCode",55216
-timer-event-end,"V8.CompileFullCode",55228
-code-creation,LazyCompile,0,0x2b84a020,208,"sjcl.test.TestCase.pass bsuite/kraken-once/stanford-crypto-ccm.js:110",0x2f33b270,~
-timer-event-end,"V8.CompileLazy",55240
-code-creation,StoreIC,9,0x2b84a100,103,"passes"
-timer-event-start,"V8.ParseLazyMicroSeconds",55261
-timer-event-end,"V8.ParseLazyMicroSeconds",55307
-timer-event-start,"V8.CompileLazy",55313
-timer-event-start,"V8.CompileFullCode",55321
-code-creation,Stub,12,0x2b84a180,88,"BinaryOpStub_DIV_OverwriteLeft_Uninitialized+Uninitialized"
-timer-event-end,"V8.CompileFullCode",55365
-code-creation,LazyCompile,0,0x2b84a1e0,1229,"sjcl.mode.ccm.decrypt bsuite/kraken-once/stanford-crypto-ccm.js:19",0x2f33a2b0,~
-timer-event-end,"V8.CompileLazy",55379
-code-creation,CallIC,7,0x2b84a6c0,136,"slice"
-code-creation,CallIC,7,0x2b84a760,128,"P"
-code-creation,LoadPolymorphicIC,5,0x2b84a7e0,105,"length"
-code-creation,KeyedLoadPolymorphicIC,6,0x2b84a860,105,""
-code-creation,CallIC,7,0x2b84a8e0,656,"push"
-code-creation,Stub,12,0x2b84ab80,407,"BinaryOpStub_SHL_OverwriteRight_Number+Smi"
-code-creation,LoadPolymorphicIC,5,0x2b84ad20,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b84ada0,105,"length"
-code-creation,CallIC,7,0x2b84ae20,136,"slice"
-code-creation,Stub,12,0x2b84aec0,196,"BinaryOpStub_DIV_OverwriteLeft_Smi+Smi"
-code-creation,Stub,2,0x2b84afa0,70,"k"
-code-creation,LoadIC,5,0x2b84b000,93,"k"
-code-creation,CallIC,7,0x2b84b060,113,"bitLength"
-code-creation,CallIC,7,0x2b84b0e0,128,"partial"
-code-creation,CallIC,7,0x2b84b160,113,"concat"
-code-creation,LoadPolymorphicIC,5,0x2b84b1e0,105,"length"
-code-creation,CallIC,7,0x2b84b260,136,"concat"
-code-creation,CallIC,7,0x2b84b300,113,"bitSlice"
-code-creation,CallIC,7,0x2b84b380,136,"concat"
+code-creation,Stub,12,0x2b848ec0,371,BinaryOpStub_BIT_XOR_Alloc_Number+Int32
+timer-event-start,V8.ParseLazyMicroSeconds,54663
+timer-event-end,V8.ParseLazyMicroSeconds,54685
+timer-event-start,V8.CompileLazy,54691
+timer-event-start,V8.CompileFullCode,54697
+code-creation,Stub,12,0x2b849040,88,BinaryOpStub_SUB_OverwriteRight_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,54724
+code-creation,LazyCompile,0,0x2b8490a0,392,sjcl.bitArray.bitSlice bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339a10,~
+timer-event-end,V8.CompileLazy,54737
+code-creation,Stub,12,0x2b849240,148,BinaryOpStub_SUB_OverwriteRight_Smi+Smi
+code-creation,Stub,13,0x2b8492e0,494,CompareICStub
+code-creation,CallMegamorphic,7,0x2b8494e0,685,args_count: 2
+code-creation,Stub,12,0x2b8497a0,246,BinaryOpStub_ADD_Alloc_Number+Smi
+code-creation,LoadPolymorphicIC,5,0x2b8498a0,105,length
+code-creation,LoadPolymorphicIC,5,0x2b849920,105,length
+timer-event-start,V8.ParseLazyMicroSeconds,54933
+timer-event-end,V8.ParseLazyMicroSeconds,54956
+timer-event-start,V8.CompileLazy,54962
+timer-event-start,V8.CompileFullCode,54968
+timer-event-end,V8.CompileFullCode,54989
+code-creation,LazyCompile,0,0x2b8499a0,585,sjcl.bitArray.equal bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339c50,~
+timer-event-end,V8.CompileLazy,55003
+code-creation,Stub,12,0x2b849c00,395,BinaryOpStub_BIT_XOR_Alloc_Number+Number
+code-creation,Stub,12,0x2b849da0,133,BinaryOpStub_ADD_Alloc_String+Smi
+code-creation,Stub,12,0x2b849e40,133,BinaryOpStub_ADD_OverwriteLeft_String+Smi
+timer-event-start,V8.ParseLazyMicroSeconds,55131
+timer-event-end,V8.ParseLazyMicroSeconds,55149
+timer-event-start,V8.CompileLazy,55155
+timer-event-start,V8.CompileFullCode,55160
+timer-event-end,V8.CompileFullCode,55177
+code-creation,LazyCompile,0,0x2b849ee0,292,sjcl.test.TestCase.require bsuite/kraken-once/stanford-crypto-ccm.js:131,0x2f33b390,~
+timer-event-end,V8.CompileLazy,55190
+timer-event-start,V8.ParseLazyMicroSeconds,55198
+timer-event-end,V8.ParseLazyMicroSeconds,55206
+timer-event-start,V8.CompileLazy,55211
+timer-event-start,V8.CompileFullCode,55216
+timer-event-end,V8.CompileFullCode,55228
+code-creation,LazyCompile,0,0x2b84a020,208,sjcl.test.TestCase.pass bsuite/kraken-once/stanford-crypto-ccm.js:110,0x2f33b270,~
+timer-event-end,V8.CompileLazy,55240
+code-creation,StoreIC,9,0x2b84a100,103,passes
+timer-event-start,V8.ParseLazyMicroSeconds,55261
+timer-event-end,V8.ParseLazyMicroSeconds,55307
+timer-event-start,V8.CompileLazy,55313
+timer-event-start,V8.CompileFullCode,55321
+code-creation,Stub,12,0x2b84a180,88,BinaryOpStub_DIV_OverwriteLeft_Uninitialized+Uninitialized
+timer-event-end,V8.CompileFullCode,55365
+code-creation,LazyCompile,0,0x2b84a1e0,1229,sjcl.mode.ccm.decrypt bsuite/kraken-once/stanford-crypto-ccm.js:19,0x2f33a2b0,~
+timer-event-end,V8.CompileLazy,55379
+code-creation,CallIC,7,0x2b84a6c0,136,slice
+code-creation,CallIC,7,0x2b84a760,128,P
+code-creation,LoadPolymorphicIC,5,0x2b84a7e0,105,length
+code-creation,KeyedLoadPolymorphicIC,6,0x2b84a860,105,
+code-creation,CallIC,7,0x2b84a8e0,656,push
+code-creation,Stub,12,0x2b84ab80,407,BinaryOpStub_SHL_OverwriteRight_Number+Smi
+code-creation,LoadPolymorphicIC,5,0x2b84ad20,105,length
+code-creation,LoadPolymorphicIC,5,0x2b84ada0,105,length
+code-creation,CallIC,7,0x2b84ae20,136,slice
+code-creation,Stub,12,0x2b84aec0,196,BinaryOpStub_DIV_OverwriteLeft_Smi+Smi
+code-creation,Stub,2,0x2b84afa0,70,k
+code-creation,LoadIC,5,0x2b84b000,93,k
+code-creation,CallIC,7,0x2b84b060,113,bitLength
+code-creation,CallIC,7,0x2b84b0e0,128,partial
+code-creation,CallIC,7,0x2b84b160,113,concat
+code-creation,LoadPolymorphicIC,5,0x2b84b1e0,105,length
+code-creation,CallIC,7,0x2b84b260,136,concat
+code-creation,CallIC,7,0x2b84b300,113,bitSlice
+code-creation,CallIC,7,0x2b84b380,136,concat
tick,0x8118ca4,55654,0,0x90ec418,0,0x2b848b2e,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,CallIC,7,0x2b84b420,136,"slice"
-code-creation,StoreIC,9,0x2b84b4c0,138,"tag"
-code-creation,StoreIC,9,0x2b84b560,138,"data"
-code-creation,Stub,12,0x2b84b600,214,"BinaryOpStub_SHL_OverwriteRight_Smi+Smi"
-code-creation,LoadPolymorphicIC,5,0x2b84b6e0,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b84b760,105,"length"
-code-creation,CallMiss,7,0x2b84b7e0,178,"args_count: 0"
-code-creation,CallIC,7,0x2b84b8a0,132,"pass"
-code-creation,Stub,2,0x2b84b940,76,"LoadFieldStub"
-code-creation,LoadIC,5,0x2b84b9a0,93,"passes"
-code-creation,LoadIC,5,0x2b84ba00,93,"key"
-code-creation,LoadIC,5,0x2b84ba60,93,"cipher"
-code-creation,Stub,2,0x2b84bac0,70,"aes"
-code-creation,LoadIC,5,0x2b84bb20,93,"aes"
-code-creation,CallIC,7,0x2b84bb80,113,"toBits"
-code-creation,Stub,2,0x2b84bc00,95,"h"
-code-creation,LoadIC,5,0x2b84bc60,93,"h"
-code-creation,StoreIC,9,0x2b84bcc0,246,"a"
-code-creation,LoadIC,5,0x2b84bdc0,93,"iv"
-code-creation,Stub,2,0x2b84be20,76,"LoadFieldStub"
-code-creation,LoadIC,5,0x2b84be80,93,"adata"
-code-creation,LoadIC,5,0x2b84bee0,93,"pt"
-code-creation,Stub,2,0x2b84bf40,76,"LoadFieldStub"
-code-creation,LoadIC,5,0x2b84bfa0,93,"ct"
-code-creation,LoadIC,5,0x2b84c000,93,"tag"
-code-creation,LoadIC,5,0x2b84c060,93,"mode"
-code-creation,LoadIC,5,0x2b84c0c0,93,"ccm"
-code-creation,CallMiss,7,0x2b84c120,178,"args_count: 5"
-code-creation,CallIC,7,0x2b84c1e0,113,"encrypt"
-code-creation,CallMiss,7,0x2b84c260,178,"args_count: 6"
-code-creation,CallIC,7,0x2b84c320,113,"G"
-code-creation,CallIC,7,0x2b84c3a0,193,"pop"
-code-creation,CallIC,7,0x2b84c480,113,"I"
-code-creation,LoadIC,5,0x2b84c500,93,"data"
-code-creation,LoadIC,5,0x2b84c560,93,"tag"
-code-creation,CallIC,7,0x2b84c5c0,113,"equal"
-code-creation,CallIC,7,0x2b84c640,132,"require"
-code-creation,CallIC,7,0x2b84c6e0,113,"decrypt"
-code-creation,CallIC,7,0x2b84c760,128,"bitSlice"
-code-creation,CallMegamorphic,7,0x2b84c7e0,685,"args_count: 0"
+code-creation,CallIC,7,0x2b84b420,136,slice
+code-creation,StoreIC,9,0x2b84b4c0,138,tag
+code-creation,StoreIC,9,0x2b84b560,138,data
+code-creation,Stub,12,0x2b84b600,214,BinaryOpStub_SHL_OverwriteRight_Smi+Smi
+code-creation,LoadPolymorphicIC,5,0x2b84b6e0,105,length
+code-creation,LoadPolymorphicIC,5,0x2b84b760,105,length
+code-creation,CallMiss,7,0x2b84b7e0,178,args_count: 0
+code-creation,CallIC,7,0x2b84b8a0,132,pass
+code-creation,Stub,2,0x2b84b940,76,LoadFieldStub
+code-creation,LoadIC,5,0x2b84b9a0,93,passes
+code-creation,LoadIC,5,0x2b84ba00,93,key
+code-creation,LoadIC,5,0x2b84ba60,93,cipher
+code-creation,Stub,2,0x2b84bac0,70,aes
+code-creation,LoadIC,5,0x2b84bb20,93,aes
+code-creation,CallIC,7,0x2b84bb80,113,toBits
+code-creation,Stub,2,0x2b84bc00,95,h
+code-creation,LoadIC,5,0x2b84bc60,93,h
+code-creation,StoreIC,9,0x2b84bcc0,246,a
+code-creation,LoadIC,5,0x2b84bdc0,93,iv
+code-creation,Stub,2,0x2b84be20,76,LoadFieldStub
+code-creation,LoadIC,5,0x2b84be80,93,adata
+code-creation,LoadIC,5,0x2b84bee0,93,pt
+code-creation,Stub,2,0x2b84bf40,76,LoadFieldStub
+code-creation,LoadIC,5,0x2b84bfa0,93,ct
+code-creation,LoadIC,5,0x2b84c000,93,tag
+code-creation,LoadIC,5,0x2b84c060,93,mode
+code-creation,LoadIC,5,0x2b84c0c0,93,ccm
+code-creation,CallMiss,7,0x2b84c120,178,args_count: 5
+code-creation,CallIC,7,0x2b84c1e0,113,encrypt
+code-creation,CallMiss,7,0x2b84c260,178,args_count: 6
+code-creation,CallIC,7,0x2b84c320,113,G
+code-creation,CallIC,7,0x2b84c3a0,193,pop
+code-creation,CallIC,7,0x2b84c480,113,I
+code-creation,LoadIC,5,0x2b84c500,93,data
+code-creation,LoadIC,5,0x2b84c560,93,tag
+code-creation,CallIC,7,0x2b84c5c0,113,equal
+code-creation,CallIC,7,0x2b84c640,132,require
+code-creation,CallIC,7,0x2b84c6e0,113,decrypt
+code-creation,CallIC,7,0x2b84c760,128,bitSlice
+code-creation,CallMegamorphic,7,0x2b84c7e0,685,args_count: 0
tick,0xf776d430,56728,0,0x90ec418,0,0x2b84a349,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,Stub,15,0x2b84caa0,172,"ToBooleanStub(Undefined,Smi,HeapNumber)"
-code-creation,CallIC,7,0x2b84cb60,193,"pop"
-code-creation,Stub,2,0x2b84cc40,725,"ElementsTransitionAndStoreStub"
-code-creation,Stub,2,0x2b84cf20,1800,"RecordWriteStub"
-code-creation,Stub,2,0x2b84d640,578,"KeyedStoreElementStub"
-code-creation,KeyedStorePolymorphicIC,10,0x2b84d8a0,107,""
-code-creation,KeyedStorePolymorphicIC,10,0x2b84d8a0,107,"args_count: 0"
-timer-event-start,"V8.RecompileSynchronous",57494
-timer-event-start,"V8.ParseLazyMicroSeconds",57505
-timer-event-end,"V8.ParseLazyMicroSeconds",57586
-code-creation,LazyCompile,0,0x2b84d920,3418,"sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7",0x2f3399b0,~
+code-creation,Stub,15,0x2b84caa0,172,ToBooleanStub(Undefined,Smi,HeapNumber)
+code-creation,CallIC,7,0x2b84cb60,193,pop
+code-creation,Stub,2,0x2b84cc40,725,ElementsTransitionAndStoreStub
+code-creation,Stub,2,0x2b84cf20,1800,RecordWriteStub
+code-creation,Stub,2,0x2b84d640,578,KeyedStoreElementStub
+code-creation,KeyedStorePolymorphicIC,10,0x2b84d8a0,107,
+code-creation,KeyedStorePolymorphicIC,10,0x2b84d8a0,107,args_count: 0
+timer-event-start,V8.RecompileSynchronous,57494
+timer-event-start,V8.ParseLazyMicroSeconds,57505
+timer-event-end,V8.ParseLazyMicroSeconds,57586
+code-creation,LazyCompile,0,0x2b84d920,3418,sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7,0x2f3399b0,~
tick,0x8092457,57778,0,0x19e,2,0x2b846a46,0x2b8455f6,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.RecompileSynchronous",57904
-timer-event-start,"V8.RecompileConcurrent",57929
-code-creation,Stub,2,0x2b84e680,559,"ElementsTransitionAndStoreStub"
-code-creation,KeyedStorePolymorphicIC,10,0x2b84e8c0,107,""
-code-creation,KeyedStorePolymorphicIC,10,0x2b84e8c0,107,"args_count: 0"
-code-creation,LoadPolymorphicIC,5,0x2b84e940,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b84e9c0,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b84ea40,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b84eac0,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b84eb40,105,"length"
-timer-event-start,"V8.RecompileSynchronous",58447
-timer-event-start,"V8.ParseLazyMicroSeconds",58457
-timer-event-end,"V8.ParseLazyMicroSeconds",58501
-code-creation,LazyCompile,0,0x2b84ebc0,1096,"parseInt native v8natives.js:130",0x4421ec1c,~
-timer-event-end,"V8.RecompileSynchronous",58637
-timer-event-start,"V8.GCScavenger",58779
-timer-event-start,"V8.External",58787
-timer-event-end,"V8.External",58791
+timer-event-end,V8.RecompileSynchronous,57904
+timer-event-start,V8.RecompileConcurrent,57929
+code-creation,Stub,2,0x2b84e680,559,ElementsTransitionAndStoreStub
+code-creation,KeyedStorePolymorphicIC,10,0x2b84e8c0,107,
+code-creation,KeyedStorePolymorphicIC,10,0x2b84e8c0,107,args_count: 0
+code-creation,LoadPolymorphicIC,5,0x2b84e940,105,length
+code-creation,LoadPolymorphicIC,5,0x2b84e9c0,105,length
+code-creation,LoadPolymorphicIC,5,0x2b84ea40,105,length
+code-creation,LoadPolymorphicIC,5,0x2b84eac0,105,length
+code-creation,LoadPolymorphicIC,5,0x2b84eb40,105,length
+timer-event-start,V8.RecompileSynchronous,58447
+timer-event-start,V8.ParseLazyMicroSeconds,58457
+timer-event-end,V8.ParseLazyMicroSeconds,58501
+code-creation,LazyCompile,0,0x2b84ebc0,1096,parseInt native v8natives.js:130,0x4421ec1c,~
+timer-event-end,V8.RecompileSynchronous,58637
+timer-event-start,V8.GCScavenger,58779
+timer-event-start,V8.External,58787
+timer-event-end,V8.External,58791
tick,0x810f40c,58868,0,0x0,1
-timer-event-start,"V8.External",59191
-timer-event-end,"V8.External",59200
-timer-event-end,"V8.GCScavenger",59205
-timer-event-end,"V8.RecompileConcurrent",59219
-timer-event-start,"V8.RecompileConcurrent",59254
-timer-event-start,"V8.RecompileSynchronous",59271
-code-creation,LazyCompile,1,0x2b84f020,4592,"sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7",0x2f3399b0,*
-timer-event-end,"V8.RecompileSynchronous",59549
-timer-event-end,"V8.RecompileConcurrent",59567
-timer-event-start,"V8.RecompileSynchronous",59590
-code-creation,LazyCompile,1,0x2b850220,1662,"parseInt native v8natives.js:130",0x4421ec1c,*
-timer-event-end,"V8.RecompileSynchronous",59672
-timer-event-start,"V8.RecompileSynchronous",59682
-timer-event-start,"V8.ParseLazyMicroSeconds",59687
-timer-event-end,"V8.ParseLazyMicroSeconds",59701
-code-creation,LazyCompile,0,0x2b8508a0,236,"sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339bf0,~
-timer-event-end,"V8.RecompileSynchronous",59750
-timer-event-start,"V8.RecompileConcurrent",59776
-timer-event-start,"V8.RecompileSynchronous",59811
-timer-event-start,"V8.ParseLazyMicroSeconds",59820
-timer-event-end,"V8.ParseLazyMicroSeconds",59838
-code-creation,LazyCompile,0,0x2b8509a0,388,"sjcl.bitArray.k bsuite/kraken-once/stanford-crypto-ccm.js:11",0x2f339d10,~
-timer-event-end,"V8.RecompileConcurrent",59909
-timer-event-start,"V8.RecompileConcurrent",59926
-timer-event-end,"V8.RecompileSynchronous",59933
-timer-event-start,"V8.RecompileSynchronous",59950
+timer-event-start,V8.External,59191
+timer-event-end,V8.External,59200
+timer-event-end,V8.GCScavenger,59205
+timer-event-end,V8.RecompileConcurrent,59219
+timer-event-start,V8.RecompileConcurrent,59254
+timer-event-start,V8.RecompileSynchronous,59271
+code-creation,LazyCompile,1,0x2b84f020,4592,sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7,0x2f3399b0,*
+timer-event-end,V8.RecompileSynchronous,59549
+timer-event-end,V8.RecompileConcurrent,59567
+timer-event-start,V8.RecompileSynchronous,59590
+code-creation,LazyCompile,1,0x2b850220,1662,parseInt native v8natives.js:130,0x4421ec1c,*
+timer-event-end,V8.RecompileSynchronous,59672
+timer-event-start,V8.RecompileSynchronous,59682
+timer-event-start,V8.ParseLazyMicroSeconds,59687
+timer-event-end,V8.ParseLazyMicroSeconds,59701
+code-creation,LazyCompile,0,0x2b8508a0,236,sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339bf0,~
+timer-event-end,V8.RecompileSynchronous,59750
+timer-event-start,V8.RecompileConcurrent,59776
+timer-event-start,V8.RecompileSynchronous,59811
+timer-event-start,V8.ParseLazyMicroSeconds,59820
+timer-event-end,V8.ParseLazyMicroSeconds,59838
+code-creation,LazyCompile,0,0x2b8509a0,388,sjcl.bitArray.k bsuite/kraken-once/stanford-crypto-ccm.js:11,0x2f339d10,~
+timer-event-end,V8.RecompileConcurrent,59909
+timer-event-start,V8.RecompileConcurrent,59926
+timer-event-end,V8.RecompileSynchronous,59933
+timer-event-start,V8.RecompileSynchronous,59950
tick,0xf776d430,59966,0,0x90ec418,2,0x2b8455e6,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,LazyCompile,1,0x2b850b40,536,"sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339bf0,*
-timer-event-end,"V8.RecompileSynchronous",60077
-timer-event-start,"V8.RecompileSynchronous",60141
-timer-event-start,"V8.ParseLazyMicroSeconds",60149
-timer-event-end,"V8.RecompileConcurrent",60177
-timer-event-end,"V8.ParseLazyMicroSeconds",60195
-code-creation,LazyCompile,0,0x2b850d60,960,"substr native string.js:749",0x44216608,~
-timer-event-end,"V8.RecompileSynchronous",60329
-timer-event-start,"V8.RecompileConcurrent",60356
+code-creation,LazyCompile,1,0x2b850b40,536,sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339bf0,*
+timer-event-end,V8.RecompileSynchronous,60077
+timer-event-start,V8.RecompileSynchronous,60141
+timer-event-start,V8.ParseLazyMicroSeconds,60149
+timer-event-end,V8.RecompileConcurrent,60177
+timer-event-end,V8.ParseLazyMicroSeconds,60195
+code-creation,LazyCompile,0,0x2b850d60,960,substr native string.js:749,0x44216608,~
+timer-event-end,V8.RecompileSynchronous,60329
+timer-event-start,V8.RecompileConcurrent,60356
code-deopt,60375,544
-timer-event-start,"V8.RecompileSynchronous",60409
-code-creation,LazyCompile,1,0x2b851120,1534,"sjcl.bitArray.k bsuite/kraken-once/stanford-crypto-ccm.js:11",0x2f339d10,*
-timer-event-end,"V8.RecompileSynchronous",60474
-code-creation,LoadPolymorphicIC,5,0x2b851720,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b8517a0,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b851820,105,"length"
-timer-event-end,"V8.RecompileConcurrent",60691
-timer-event-start,"V8.RecompileSynchronous",60716
-code-creation,LazyCompile,1,0x2b8518a0,1792,"substr native string.js:749",0x44216608,*
-timer-event-end,"V8.RecompileSynchronous",60803
+timer-event-start,V8.RecompileSynchronous,60409
+code-creation,LazyCompile,1,0x2b851120,1534,sjcl.bitArray.k bsuite/kraken-once/stanford-crypto-ccm.js:11,0x2f339d10,*
+timer-event-end,V8.RecompileSynchronous,60474
+code-creation,LoadPolymorphicIC,5,0x2b851720,105,length
+code-creation,LoadPolymorphicIC,5,0x2b8517a0,105,length
+code-creation,LoadPolymorphicIC,5,0x2b851820,105,length
+timer-event-end,V8.RecompileConcurrent,60691
+timer-event-start,V8.RecompileSynchronous,60716
+code-creation,LazyCompile,1,0x2b8518a0,1792,substr native string.js:749,0x44216608,*
+timer-event-end,V8.RecompileSynchronous,60803
tick,0x2b849c4b,60997,0,0x2b849afa,0,0x2b83a0df,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.RecompileSynchronous",61042
-timer-event-start,"V8.ParseLazyMicroSeconds",61054
-timer-event-end,"V8.ParseLazyMicroSeconds",61066
-code-creation,LazyCompile,0,0x2b851fa0,184,"sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6",0x2f339890,~
-timer-event-end,"V8.RecompileSynchronous",61112
-timer-event-start,"V8.RecompileConcurrent",61139
-timer-event-start,"V8.RecompileSynchronous",61159
-timer-event-start,"V8.ParseLazyMicroSeconds",61168
-timer-event-end,"V8.ParseLazyMicroSeconds",61186
-timer-event-end,"V8.RecompileConcurrent",61201
-code-creation,LazyCompile,0,0x2b852060,336,"sjcl.bitArray.bitLength bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339ad0,~
-timer-event-start,"V8.ParseLazyMicroSeconds",61249
-timer-event-end,"V8.ParseLazyMicroSeconds",61264
-timer-event-end,"V8.RecompileSynchronous",61290
-timer-event-start,"V8.RecompileSynchronous",61309
-timer-event-start,"V8.RecompileConcurrent",61317
-code-creation,LazyCompile,1,0x2b8521c0,196,"sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6",0x2f339890,*
-timer-event-end,"V8.RecompileSynchronous",61348
-timer-event-start,"V8.RecompileSynchronous",61374
-timer-event-start,"V8.ParseLazyMicroSeconds",61381
-timer-event-end,"V8.ParseLazyMicroSeconds",61394
-timer-event-end,"V8.RecompileSynchronous",61418
-timer-event-start,"V8.RecompileSynchronous",61424
-timer-event-start,"V8.ParseLazyMicroSeconds",61429
-timer-event-end,"V8.ParseLazyMicroSeconds",61442
-code-creation,LazyCompile,0,0x2b8522a0,248,"round native math.js:193",0x4422265c,~
-timer-event-end,"V8.RecompileConcurrent",61471
-timer-event-start,"V8.RecompileConcurrent",61480
-timer-event-end,"V8.RecompileSynchronous",61487
-timer-event-start,"V8.RecompileSynchronous",61512
-timer-event-end,"V8.RecompileConcurrent",61536
-timer-event-start,"V8.RecompileConcurrent",61543
-code-creation,LazyCompile,1,0x2b8523a0,888,"sjcl.bitArray.bitLength bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339ad0,*
-timer-event-end,"V8.RecompileSynchronous",61565
-timer-event-start,"V8.RecompileSynchronous",61570
-code-creation,LazyCompile,1,0x2b852720,536,"sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339bf0,*
-timer-event-end,"V8.RecompileSynchronous",61599
-timer-event-start,"V8.RecompileSynchronous",61606
-timer-event-end,"V8.RecompileConcurrent",61610
-code-creation,LazyCompile,1,0x2b852940,242,"round native math.js:193",0x4422265c,*
-timer-event-end,"V8.RecompileSynchronous",61629
-code-creation,LoadPolymorphicIC,5,0x2b852a40,105,"length"
-timer-event-start,"V8.RecompileSynchronous",61726
-timer-event-start,"V8.ParseLazyMicroSeconds",61731
-timer-event-end,"V8.ParseLazyMicroSeconds",61757
-code-creation,LazyCompile,0,0x2b852ac0,536,"sjcl.bitArray.clamp bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339b30,~
-timer-event-start,"V8.ParseLazyMicroSeconds",61847
-timer-event-end,"V8.ParseLazyMicroSeconds",61865
-code-creation,Function,0,0x2b852ce0,288,"sjcl.bitArray.partial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339b90,~
-timer-event-end,"V8.RecompileSynchronous",61926
-timer-event-start,"V8.RecompileSynchronous",61933
-timer-event-start,"V8.ParseLazyMicroSeconds",61939
-timer-event-end,"V8.ParseLazyMicroSeconds",61953
-timer-event-start,"V8.RecompileConcurrent",61961
-code-creation,LazyCompile,0,0x2b852e00,248,"ceil native math.js:81",0x442222fc,~
-timer-event-end,"V8.RecompileSynchronous",62019
+timer-event-start,V8.RecompileSynchronous,61042
+timer-event-start,V8.ParseLazyMicroSeconds,61054
+timer-event-end,V8.ParseLazyMicroSeconds,61066
+code-creation,LazyCompile,0,0x2b851fa0,184,sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6,0x2f339890,~
+timer-event-end,V8.RecompileSynchronous,61112
+timer-event-start,V8.RecompileConcurrent,61139
+timer-event-start,V8.RecompileSynchronous,61159
+timer-event-start,V8.ParseLazyMicroSeconds,61168
+timer-event-end,V8.ParseLazyMicroSeconds,61186
+timer-event-end,V8.RecompileConcurrent,61201
+code-creation,LazyCompile,0,0x2b852060,336,sjcl.bitArray.bitLength bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339ad0,~
+timer-event-start,V8.ParseLazyMicroSeconds,61249
+timer-event-end,V8.ParseLazyMicroSeconds,61264
+timer-event-end,V8.RecompileSynchronous,61290
+timer-event-start,V8.RecompileSynchronous,61309
+timer-event-start,V8.RecompileConcurrent,61317
+code-creation,LazyCompile,1,0x2b8521c0,196,sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6,0x2f339890,*
+timer-event-end,V8.RecompileSynchronous,61348
+timer-event-start,V8.RecompileSynchronous,61374
+timer-event-start,V8.ParseLazyMicroSeconds,61381
+timer-event-end,V8.ParseLazyMicroSeconds,61394
+timer-event-end,V8.RecompileSynchronous,61418
+timer-event-start,V8.RecompileSynchronous,61424
+timer-event-start,V8.ParseLazyMicroSeconds,61429
+timer-event-end,V8.ParseLazyMicroSeconds,61442
+code-creation,LazyCompile,0,0x2b8522a0,248,round native math.js:193,0x4422265c,~
+timer-event-end,V8.RecompileConcurrent,61471
+timer-event-start,V8.RecompileConcurrent,61480
+timer-event-end,V8.RecompileSynchronous,61487
+timer-event-start,V8.RecompileSynchronous,61512
+timer-event-end,V8.RecompileConcurrent,61536
+timer-event-start,V8.RecompileConcurrent,61543
+code-creation,LazyCompile,1,0x2b8523a0,888,sjcl.bitArray.bitLength bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339ad0,*
+timer-event-end,V8.RecompileSynchronous,61565
+timer-event-start,V8.RecompileSynchronous,61570
+code-creation,LazyCompile,1,0x2b852720,536,sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339bf0,*
+timer-event-end,V8.RecompileSynchronous,61599
+timer-event-start,V8.RecompileSynchronous,61606
+timer-event-end,V8.RecompileConcurrent,61610
+code-creation,LazyCompile,1,0x2b852940,242,round native math.js:193,0x4422265c,*
+timer-event-end,V8.RecompileSynchronous,61629
+code-creation,LoadPolymorphicIC,5,0x2b852a40,105,length
+timer-event-start,V8.RecompileSynchronous,61726
+timer-event-start,V8.ParseLazyMicroSeconds,61731
+timer-event-end,V8.ParseLazyMicroSeconds,61757
+code-creation,LazyCompile,0,0x2b852ac0,536,sjcl.bitArray.clamp bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339b30,~
+timer-event-start,V8.ParseLazyMicroSeconds,61847
+timer-event-end,V8.ParseLazyMicroSeconds,61865
+code-creation,Function,0,0x2b852ce0,288,sjcl.bitArray.partial bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339b90,~
+timer-event-end,V8.RecompileSynchronous,61926
+timer-event-start,V8.RecompileSynchronous,61933
+timer-event-start,V8.ParseLazyMicroSeconds,61939
+timer-event-end,V8.ParseLazyMicroSeconds,61953
+timer-event-start,V8.RecompileConcurrent,61961
+code-creation,LazyCompile,0,0x2b852e00,248,ceil native math.js:81,0x442222fc,~
+timer-event-end,V8.RecompileSynchronous,62019
tick,0x811e913,62060,0,0xf773bff4,2,0x2b83dfae,0x2b8445e0,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-deopt,62122,544
-timer-event-end,"V8.RecompileConcurrent",62287
-timer-event-start,"V8.RecompileConcurrent",62297
-timer-event-start,"V8.RecompileSynchronous",62309
-timer-event-end,"V8.RecompileConcurrent",62353
-code-creation,Stub,2,0x2b852f00,1785,"RecordWriteStub"
-code-creation,LazyCompile,1,0x2b853600,1514,"sjcl.bitArray.clamp bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339b30,*
-timer-event-end,"V8.RecompileSynchronous",62415
-timer-event-start,"V8.RecompileSynchronous",62421
-code-creation,LazyCompile,1,0x2b853c00,242,"ceil native math.js:81",0x442222fc,*
-timer-event-end,"V8.RecompileSynchronous",62445
-code-creation,LoadPolymorphicIC,5,0x2b853d00,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b853d80,105,"length"
-timer-event-start,"V8.RecompileSynchronous",63048
-timer-event-start,"V8.ParseLazyMicroSeconds",63067
-timer-event-end,"V8.ParseLazyMicroSeconds",63085
-timer-event-end,"V8.RecompileSynchronous",63117
+timer-event-end,V8.RecompileConcurrent,62287
+timer-event-start,V8.RecompileConcurrent,62297
+timer-event-start,V8.RecompileSynchronous,62309
+timer-event-end,V8.RecompileConcurrent,62353
+code-creation,Stub,2,0x2b852f00,1785,RecordWriteStub
+code-creation,LazyCompile,1,0x2b853600,1514,sjcl.bitArray.clamp bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339b30,*
+timer-event-end,V8.RecompileSynchronous,62415
+timer-event-start,V8.RecompileSynchronous,62421
+code-creation,LazyCompile,1,0x2b853c00,242,ceil native math.js:81,0x442222fc,*
+timer-event-end,V8.RecompileSynchronous,62445
+code-creation,LoadPolymorphicIC,5,0x2b853d00,105,length
+code-creation,LoadPolymorphicIC,5,0x2b853d80,105,length
+timer-event-start,V8.RecompileSynchronous,63048
+timer-event-start,V8.ParseLazyMicroSeconds,63067
+timer-event-end,V8.ParseLazyMicroSeconds,63085
+timer-event-end,V8.RecompileSynchronous,63117
tick,0xf776d430,63132,0,0x90ec418,0,0x2b8462cc,0x2b845cd7,0x2b848b0a,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.RecompileConcurrent",63203
-timer-event-end,"V8.RecompileConcurrent",63315
-timer-event-start,"V8.RecompileSynchronous",63329
-code-creation,LazyCompile,1,0x2b853e00,644,"sjcl.bitArray.partial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339b90,*
-timer-event-end,"V8.RecompileSynchronous",63379
-timer-event-start,"V8.RecompileSynchronous",63494
-timer-event-start,"V8.ParseLazyMicroSeconds",63503
-timer-event-end,"V8.ParseLazyMicroSeconds",63517
-timer-event-end,"V8.RecompileSynchronous",63544
-timer-event-start,"V8.RecompileConcurrent",63572
-timer-event-start,"V8.RecompileSynchronous",63641
-timer-event-start,"V8.ParseLazyMicroSeconds",63651
-timer-event-end,"V8.RecompileConcurrent",63664
-timer-event-end,"V8.ParseLazyMicroSeconds",63678
-code-creation,LazyCompile,0,0x2b8540a0,560,"sjcl.bitArray.concat bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a70,~
-timer-event-start,"V8.ParseLazyMicroSeconds",63757
-timer-event-end,"V8.ParseLazyMicroSeconds",63772
-timer-event-start,"V8.ParseLazyMicroSeconds",63808
-timer-event-end,"V8.ParseLazyMicroSeconds",63848
-code-creation,Function,0,0x2b8542e0,1126,"sjcl.bitArray.P bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339cb0,~
-timer-event-start,"V8.ParseLazyMicroSeconds",63977
-timer-event-end,"V8.ParseLazyMicroSeconds",63994
-timer-event-start,"V8.ParseLazyMicroSeconds",64023
-timer-event-end,"V8.ParseLazyMicroSeconds",64039
-timer-event-end,"V8.RecompileSynchronous",64072
-timer-event-start,"V8.RecompileSynchronous",64079
-timer-event-start,"V8.RecompileConcurrent",64099
-code-creation,LazyCompile,1,0x2b854760,536,"sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339bf0,*
-timer-event-end,"V8.RecompileSynchronous",64194
+timer-event-start,V8.RecompileConcurrent,63203
+timer-event-end,V8.RecompileConcurrent,63315
+timer-event-start,V8.RecompileSynchronous,63329
+code-creation,LazyCompile,1,0x2b853e00,644,sjcl.bitArray.partial bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339b90,*
+timer-event-end,V8.RecompileSynchronous,63379
+timer-event-start,V8.RecompileSynchronous,63494
+timer-event-start,V8.ParseLazyMicroSeconds,63503
+timer-event-end,V8.ParseLazyMicroSeconds,63517
+timer-event-end,V8.RecompileSynchronous,63544
+timer-event-start,V8.RecompileConcurrent,63572
+timer-event-start,V8.RecompileSynchronous,63641
+timer-event-start,V8.ParseLazyMicroSeconds,63651
+timer-event-end,V8.RecompileConcurrent,63664
+timer-event-end,V8.ParseLazyMicroSeconds,63678
+code-creation,LazyCompile,0,0x2b8540a0,560,sjcl.bitArray.concat bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339a70,~
+timer-event-start,V8.ParseLazyMicroSeconds,63757
+timer-event-end,V8.ParseLazyMicroSeconds,63772
+timer-event-start,V8.ParseLazyMicroSeconds,63808
+timer-event-end,V8.ParseLazyMicroSeconds,63848
+code-creation,Function,0,0x2b8542e0,1126,sjcl.bitArray.P bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339cb0,~
+timer-event-start,V8.ParseLazyMicroSeconds,63977
+timer-event-end,V8.ParseLazyMicroSeconds,63994
+timer-event-start,V8.ParseLazyMicroSeconds,64023
+timer-event-end,V8.ParseLazyMicroSeconds,64039
+timer-event-end,V8.RecompileSynchronous,64072
+timer-event-start,V8.RecompileSynchronous,64079
+timer-event-start,V8.RecompileConcurrent,64099
+code-creation,LazyCompile,1,0x2b854760,536,sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339bf0,*
+timer-event-end,V8.RecompileSynchronous,64194
tick,0xf776d430,64209,0,0x4059,2,0x2b845c29,0x2b848b0a,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-deopt,64271,544
-timer-event-start,"V8.RecompileSynchronous",64467
-timer-event-start,"V8.ParseLazyMicroSeconds",64476
-timer-event-end,"V8.ParseLazyMicroSeconds",64542
-code-creation,LazyCompile,0,0x2b854980,3002,"sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4",0x2f339830,~
-timer-event-end,"V8.RecompileSynchronous",64818
-timer-event-end,"V8.RecompileConcurrent",64871
-timer-event-start,"V8.RecompileConcurrent",64883
-timer-event-start,"V8.RecompileSynchronous",64890
-code-creation,LazyCompile,1,0x2b855540,3364,"sjcl.bitArray.concat bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a70,*
-timer-event-end,"V8.RecompileSynchronous",65047
+timer-event-start,V8.RecompileSynchronous,64467
+timer-event-start,V8.ParseLazyMicroSeconds,64476
+timer-event-end,V8.ParseLazyMicroSeconds,64542
+code-creation,LazyCompile,0,0x2b854980,3002,sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4,0x2f339830,~
+timer-event-end,V8.RecompileSynchronous,64818
+timer-event-end,V8.RecompileConcurrent,64871
+timer-event-start,V8.RecompileConcurrent,64883
+timer-event-start,V8.RecompileSynchronous,64890
+code-creation,LazyCompile,1,0x2b855540,3364,sjcl.bitArray.concat bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339a70,*
+timer-event-end,V8.RecompileSynchronous,65047
code-deopt,65079,4608
-code-creation,LoadPolymorphicIC,5,0x2b856280,105,"length"
+code-creation,LoadPolymorphicIC,5,0x2b856280,105,length
tick,0x2b8472a7,65264,0,0x52f0b0e1,0,0x2b852252,0x2b848c4b,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,LoadPolymorphicIC,5,0x2b856300,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b856380,105,"length"
-timer-event-start,"V8.GCScavenger",65757
-timer-event-start,"V8.External",65766
-timer-event-end,"V8.External",65770
-timer-event-start,"V8.External",66154
-timer-event-end,"V8.External",66162
-timer-event-end,"V8.GCScavenger",66166
-timer-event-end,"V8.RecompileConcurrent",66181
-timer-event-start,"V8.RecompileSynchronous",66254
+code-creation,LoadPolymorphicIC,5,0x2b856300,105,length
+code-creation,LoadPolymorphicIC,5,0x2b856380,105,length
+timer-event-start,V8.GCScavenger,65757
+timer-event-start,V8.External,65766
+timer-event-end,V8.External,65770
+timer-event-start,V8.External,66154
+timer-event-end,V8.External,66162
+timer-event-end,V8.GCScavenger,66166
+timer-event-end,V8.RecompileConcurrent,66181
+timer-event-start,V8.RecompileSynchronous,66254
tick,0x81c09b0,66332,0,0x91632e8,2,0x2b839e65,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,Stub,2,0x2b856400,1785,"RecordWriteStub"
-code-creation,Stub,2,0x2b856b00,1785,"RecordWriteStub"
-code-creation,Stub,2,0x2b857200,783,"RecordWriteStub"
-code-creation,Stub,2,0x2b857520,1772,"RecordWriteStub"
-code-creation,Stub,2,0x2b857c20,1785,"RecordWriteStub"
-code-creation,LazyCompile,1,0x2b858320,4397,"sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4",0x2f339830,*
-timer-event-end,"V8.RecompileSynchronous",66661
-timer-event-start,"V8.RecompileSynchronous",66788
-timer-event-start,"V8.ParseLazyMicroSeconds",66797
-timer-event-end,"V8.ParseLazyMicroSeconds",66878
-timer-event-end,"V8.RecompileSynchronous",67067
-timer-event-start,"V8.RecompileConcurrent",67094
+code-creation,Stub,2,0x2b856400,1785,RecordWriteStub
+code-creation,Stub,2,0x2b856b00,1785,RecordWriteStub
+code-creation,Stub,2,0x2b857200,783,RecordWriteStub
+code-creation,Stub,2,0x2b857520,1772,RecordWriteStub
+code-creation,Stub,2,0x2b857c20,1785,RecordWriteStub
+code-creation,LazyCompile,1,0x2b858320,4397,sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4,0x2f339830,*
+timer-event-end,V8.RecompileSynchronous,66661
+timer-event-start,V8.RecompileSynchronous,66788
+timer-event-start,V8.ParseLazyMicroSeconds,66797
+timer-event-end,V8.ParseLazyMicroSeconds,66878
+timer-event-end,V8.RecompileSynchronous,67067
+timer-event-start,V8.RecompileConcurrent,67094
tick,0x2b8473da,67403,0,0x2f392d35,0,0x2b852252,0x2b8455f6,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.RecompileConcurrent",68064
-timer-event-start,"V8.RecompileSynchronous",68081
-code-creation,LazyCompile,1,0x2b859460,4752,"sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7",0x2f3399b0,*
-timer-event-end,"V8.RecompileSynchronous",68294
-code-creation,LoadPolymorphicIC,5,0x2b85a700,105,"length"
+timer-event-end,V8.RecompileConcurrent,68064
+timer-event-start,V8.RecompileSynchronous,68081
+code-creation,LazyCompile,1,0x2b859460,4752,sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7,0x2f3399b0,*
+timer-event-end,V8.RecompileSynchronous,68294
+code-creation,LoadPolymorphicIC,5,0x2b85a700,105,length
tick,0x2b85055a,68462,0,0x527b30d9,0,0x2b83a782,0x2b839f55,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",69250
-timer-event-start,"V8.External",69260
-timer-event-end,"V8.External",69264
-timer-event-start,"V8.External",69314
-timer-event-end,"V8.External",69320
-timer-event-end,"V8.GCScavenger",69324
+timer-event-start,V8.GCScavenger,69250
+timer-event-start,V8.External,69260
+timer-event-end,V8.External,69264
+timer-event-start,V8.External,69314
+timer-event-end,V8.External,69320
+timer-event-end,V8.GCScavenger,69324
tick,0x82ec00c,69525,0,0xff81fcf4,0,0x2b85056f,0x2b83a782,0x2b839fd2,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,LoadPolymorphicIC,5,0x2b85a780,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b85a800,117,"length"
-code-creation,LoadPolymorphicIC,5,0x2b85a880,117,"length"
-code-creation,LoadPolymorphicIC,5,0x2b85a900,117,"length"
-code-creation,LoadPolymorphicIC,5,0x2b85a980,117,"length"
-code-creation,LoadPolymorphicIC,5,0x2b85aa00,117,"length"
+code-creation,LoadPolymorphicIC,5,0x2b85a780,105,length
+code-creation,LoadPolymorphicIC,5,0x2b85a800,117,length
+code-creation,LoadPolymorphicIC,5,0x2b85a880,117,length
+code-creation,LoadPolymorphicIC,5,0x2b85a900,117,length
+code-creation,LoadPolymorphicIC,5,0x2b85a980,117,length
+code-creation,LoadPolymorphicIC,5,0x2b85aa00,117,length
tick,0x81168ba,70588,0,0x90d5060,0,0x2b85056f,0x2b83a782,0x2b839f04,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.RecompileSynchronous",71064
-timer-event-start,"V8.ParseLazyMicroSeconds",71076
-timer-event-end,"V8.ParseLazyMicroSeconds",71094
-code-creation,LazyCompile,0,0x2b85aa80,292,"sjcl.test.TestCase.require bsuite/kraken-once/stanford-crypto-ccm.js:131",0x2f33b390,~
-timer-event-start,"V8.ParseLazyMicroSeconds",71142
-timer-event-end,"V8.ParseLazyMicroSeconds",71152
-code-creation,Function,0,0x2b85abc0,208,"sjcl.test.TestCase.pass bsuite/kraken-once/stanford-crypto-ccm.js:110",0x2f33b270,~
-timer-event-end,"V8.RecompileSynchronous",71195
-timer-event-start,"V8.RecompileSynchronous",71204
-timer-event-start,"V8.ParseLazyMicroSeconds",71210
-timer-event-start,"V8.RecompileConcurrent",71216
-timer-event-end,"V8.ParseLazyMicroSeconds",71228
-timer-event-end,"V8.RecompileSynchronous",71254
-timer-event-end,"V8.RecompileConcurrent",71304
-timer-event-start,"V8.RecompileConcurrent",71312
-timer-event-start,"V8.RecompileSynchronous",71316
-code-creation,LazyCompile,1,0x2b85aca0,322,"sjcl.test.TestCase.require bsuite/kraken-once/stanford-crypto-ccm.js:131",0x2f33b390,*
-timer-event-end,"V8.RecompileSynchronous",71361
-timer-event-start,"V8.RecompileSynchronous",71367
-timer-event-end,"V8.RecompileConcurrent",71373
-code-creation,LazyCompile,1,0x2b85ae00,198,"sjcl.test.TestCase.pass bsuite/kraken-once/stanford-crypto-ccm.js:110",0x2f33b270,*
-timer-event-end,"V8.RecompileSynchronous",71390
+timer-event-start,V8.RecompileSynchronous,71064
+timer-event-start,V8.ParseLazyMicroSeconds,71076
+timer-event-end,V8.ParseLazyMicroSeconds,71094
+code-creation,LazyCompile,0,0x2b85aa80,292,sjcl.test.TestCase.require bsuite/kraken-once/stanford-crypto-ccm.js:131,0x2f33b390,~
+timer-event-start,V8.ParseLazyMicroSeconds,71142
+timer-event-end,V8.ParseLazyMicroSeconds,71152
+code-creation,Function,0,0x2b85abc0,208,sjcl.test.TestCase.pass bsuite/kraken-once/stanford-crypto-ccm.js:110,0x2f33b270,~
+timer-event-end,V8.RecompileSynchronous,71195
+timer-event-start,V8.RecompileSynchronous,71204
+timer-event-start,V8.ParseLazyMicroSeconds,71210
+timer-event-start,V8.RecompileConcurrent,71216
+timer-event-end,V8.ParseLazyMicroSeconds,71228
+timer-event-end,V8.RecompileSynchronous,71254
+timer-event-end,V8.RecompileConcurrent,71304
+timer-event-start,V8.RecompileConcurrent,71312
+timer-event-start,V8.RecompileSynchronous,71316
+code-creation,LazyCompile,1,0x2b85aca0,322,sjcl.test.TestCase.require bsuite/kraken-once/stanford-crypto-ccm.js:131,0x2f33b390,*
+timer-event-end,V8.RecompileSynchronous,71361
+timer-event-start,V8.RecompileSynchronous,71367
+timer-event-end,V8.RecompileConcurrent,71373
+code-creation,LazyCompile,1,0x2b85ae00,198,sjcl.test.TestCase.pass bsuite/kraken-once/stanford-crypto-ccm.js:110,0x2f33b270,*
+timer-event-end,V8.RecompileSynchronous,71390
tick,0x2b83c3b1,71653,0,0xffffff6b,0,0x2b83bd35,0x2b83a725,0x2b839f55,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",72513
-timer-event-start,"V8.External",72524
-timer-event-end,"V8.External",72530
-timer-event-start,"V8.External",72583
-timer-event-end,"V8.External",72591
-timer-event-end,"V8.GCScavenger",72596
+timer-event-start,V8.GCScavenger,72513
+timer-event-start,V8.External,72524
+timer-event-end,V8.External,72530
+timer-event-start,V8.External,72583
+timer-event-end,V8.External,72591
+timer-event-end,V8.GCScavenger,72596
tick,0x8116878,72711,0,0x90d5060,0,0x2b85056f,0x2b83a782,0x2b839f04,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,CallIC,7,0x2b85aee0,136,"concat"
-timer-event-start,"V8.RecompileSynchronous",72947
-timer-event-start,"V8.ParseLazyMicroSeconds",72956
-timer-event-end,"V8.ParseLazyMicroSeconds",72977
-code-creation,LazyCompile,0,0x2b85af80,392,"sjcl.bitArray.bitSlice bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a10,~
-timer-event-start,"V8.ParseLazyMicroSeconds",73044
-timer-event-end,"V8.ParseLazyMicroSeconds",73083
-timer-event-start,"V8.ParseLazyMicroSeconds",73169
-timer-event-end,"V8.ParseLazyMicroSeconds",73185
-timer-event-start,"V8.ParseLazyMicroSeconds",73217
-timer-event-end,"V8.ParseLazyMicroSeconds",73232
-timer-event-start,"V8.ParseLazyMicroSeconds",73263
-timer-event-end,"V8.ParseLazyMicroSeconds",73289
-timer-event-start,"V8.ParseLazyMicroSeconds",73339
-timer-event-end,"V8.ParseLazyMicroSeconds",73356
-timer-event-end,"V8.RecompileSynchronous",73393
-timer-event-start,"V8.RecompileConcurrent",73422
+code-creation,CallIC,7,0x2b85aee0,136,concat
+timer-event-start,V8.RecompileSynchronous,72947
+timer-event-start,V8.ParseLazyMicroSeconds,72956
+timer-event-end,V8.ParseLazyMicroSeconds,72977
+code-creation,LazyCompile,0,0x2b85af80,392,sjcl.bitArray.bitSlice bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339a10,~
+timer-event-start,V8.ParseLazyMicroSeconds,73044
+timer-event-end,V8.ParseLazyMicroSeconds,73083
+timer-event-start,V8.ParseLazyMicroSeconds,73169
+timer-event-end,V8.ParseLazyMicroSeconds,73185
+timer-event-start,V8.ParseLazyMicroSeconds,73217
+timer-event-end,V8.ParseLazyMicroSeconds,73232
+timer-event-start,V8.ParseLazyMicroSeconds,73263
+timer-event-end,V8.ParseLazyMicroSeconds,73289
+timer-event-start,V8.ParseLazyMicroSeconds,73339
+timer-event-end,V8.ParseLazyMicroSeconds,73356
+timer-event-end,V8.RecompileSynchronous,73393
+timer-event-start,V8.RecompileConcurrent,73422
tick,0x82eea09,73786,0,0x90de9b0,0,0x2b85056f,0x2b83a782,0x2b839e4e,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.ParseLazyMicroSeconds",74228
-timer-event-end,"V8.RecompileConcurrent",74243
-timer-event-end,"V8.ParseLazyMicroSeconds",74259
-timer-event-start,"V8.CompileLazy",74267
-timer-event-start,"V8.CompileFullCode",74273
-timer-event-end,"V8.CompileFullCode",74291
-code-creation,LazyCompile,0,0x2b85b120,332," bsuite/kraken-once/stanford-crypto-ccm.js:55",0x2f33db50,~
-timer-event-end,"V8.CompileLazy",74304
-timer-event-start,"V8.RecompileSynchronous",74351
-code-creation,LazyCompile,1,0x2b85b280,4132,"sjcl.bitArray.bitSlice bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a10,*
-timer-event-end,"V8.RecompileSynchronous",74533
+timer-event-start,V8.ParseLazyMicroSeconds,74228
+timer-event-end,V8.RecompileConcurrent,74243
+timer-event-end,V8.ParseLazyMicroSeconds,74259
+timer-event-start,V8.CompileLazy,74267
+timer-event-start,V8.CompileFullCode,74273
+timer-event-end,V8.CompileFullCode,74291
+code-creation,LazyCompile,0,0x2b85b120,332, bsuite/kraken-once/stanford-crypto-ccm.js:55,0x2f33db50,~
+timer-event-end,V8.CompileLazy,74304
+timer-event-start,V8.RecompileSynchronous,74351
+code-creation,LazyCompile,1,0x2b85b280,4132,sjcl.bitArray.bitSlice bsuite/kraken-once/stanford-crypto-ccm.js:9,0x2f339a10,*
+timer-event-end,V8.RecompileSynchronous,74533
tick,0x2b85a2b8,74843,0,0xf5,0,0x2b852252,0x2b8454f6,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b84a934,75905,0,0x2b855c42,0,0x2b8446a3,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",76182
-timer-event-start,"V8.External",76193
-timer-event-end,"V8.External",76197
-timer-event-start,"V8.External",76251
-timer-event-end,"V8.External",76258
-timer-event-end,"V8.GCScavenger",76262
+timer-event-start,V8.GCScavenger,76182
+timer-event-start,V8.External,76193
+timer-event-end,V8.External,76197
+timer-event-start,V8.External,76251
+timer-event-end,V8.External,76258
+timer-event-end,V8.GCScavenger,76262
tick,0x81168ba,76974,0,0x90d5060,0,0x2b85056f,0x2b83a782,0x2b839fd2,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x81168ba,78047,0,0x90d5060,0,0x2b85056f,0x2b83a782,0x2b839f55,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.RecompileSynchronous",78403
-timer-event-start,"V8.ParseLazyMicroSeconds",78415
-timer-event-end,"V8.ParseLazyMicroSeconds",78444
-code-creation,LazyCompile,0,0x2b85c2c0,717,"sjcl.codec.hex.toBits bsuite/kraken-once/stanford-crypto-ccm.js:13",0x2f339e90,~
-timer-event-start,"V8.ParseLazyMicroSeconds",78530
-timer-event-end,"V8.ParseLazyMicroSeconds",78559
-timer-event-start,"V8.ParseLazyMicroSeconds",78614
-timer-event-end,"V8.ParseLazyMicroSeconds",78632
-timer-event-end,"V8.RecompileSynchronous",78666
-timer-event-start,"V8.RecompileConcurrent",78695
-timer-event-end,"V8.RecompileConcurrent",79073
-timer-event-start,"V8.RecompileSynchronous",79089
+timer-event-start,V8.RecompileSynchronous,78403
+timer-event-start,V8.ParseLazyMicroSeconds,78415
+timer-event-end,V8.ParseLazyMicroSeconds,78444
+code-creation,LazyCompile,0,0x2b85c2c0,717,sjcl.codec.hex.toBits bsuite/kraken-once/stanford-crypto-ccm.js:13,0x2f339e90,~
+timer-event-start,V8.ParseLazyMicroSeconds,78530
+timer-event-end,V8.ParseLazyMicroSeconds,78559
+timer-event-start,V8.ParseLazyMicroSeconds,78614
+timer-event-end,V8.ParseLazyMicroSeconds,78632
+timer-event-end,V8.RecompileSynchronous,78666
+timer-event-start,V8.RecompileConcurrent,78695
+timer-event-end,V8.RecompileConcurrent,79073
+timer-event-start,V8.RecompileSynchronous,79089
tick,0x2b859d1c,79108,0,0x6,0,0x2b852252,0x2b8455f6,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,Stub,2,0x2b85c5a0,1421,"StringAddStub"
-code-creation,LazyCompile,1,0x2b85cb40,2261,"sjcl.codec.hex.toBits bsuite/kraken-once/stanford-crypto-ccm.js:13",0x2f339e90,*
-timer-event-end,"V8.RecompileSynchronous",79251
-timer-event-start,"V8.GCScavenger",79473
-timer-event-start,"V8.External",79482
-timer-event-end,"V8.External",79486
-timer-event-start,"V8.External",79534
-timer-event-end,"V8.External",79540
-timer-event-end,"V8.GCScavenger",79544
+code-creation,Stub,2,0x2b85c5a0,1421,StringAddStub
+code-creation,LazyCompile,1,0x2b85cb40,2261,sjcl.codec.hex.toBits bsuite/kraken-once/stanford-crypto-ccm.js:13,0x2f339e90,*
+timer-event-end,V8.RecompileSynchronous,79251
+timer-event-start,V8.GCScavenger,79473
+timer-event-start,V8.External,79482
+timer-event-end,V8.External,79486
+timer-event-start,V8.External,79534
+timer-event-end,V8.External,79540
+timer-event-end,V8.GCScavenger,79544
tick,0x2b85b446,80176,0,0x256b20d1,0,0x2b848b8c,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x80c4222,81235,0,0x90d5060,0,0x2b85b578,0x2b84a349,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",82255
-timer-event-start,"V8.External",82266
-timer-event-end,"V8.External",82270
+timer-event-start,V8.GCScavenger,82255
+timer-event-start,V8.External,82266
+timer-event-end,V8.External,82270
tick,0x2b858631,82290,0,0x2b80a276,0,0x2b839e65,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.External",82339
-timer-event-end,"V8.External",82349
-timer-event-end,"V8.GCScavenger",82353
+timer-event-start,V8.External,82339
+timer-event-end,V8.External,82349
+timer-event-end,V8.GCScavenger,82353
tick,0x2b82f581,83363,0,0x2b84539a,0,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x808cf8f,84440,0,0x90dabb0,0,0x2b85599a,0x2b845482,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",85035
-timer-event-start,"V8.External",85046
-timer-event-end,"V8.External",85050
-timer-event-start,"V8.External",85093
-timer-event-end,"V8.External",85099
-timer-event-end,"V8.GCScavenger",85103
+timer-event-start,V8.GCScavenger,85035
+timer-event-start,V8.External,85046
+timer-event-end,V8.External,85050
+timer-event-start,V8.External,85093
+timer-event-end,V8.External,85099
+timer-event-end,V8.GCScavenger,85103
tick,0x2b829c56,85495,0,0x3e60ce29,0,0x2b852252,0x2b8454f6,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x808b74f,86567,0,0x2f308081,0,0x2b8537a0,0x2b8456a8,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b85d04c,87632,0,0x2b839f55,0,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",87755
-timer-event-start,"V8.External",87766
-timer-event-end,"V8.External",87770
-timer-event-start,"V8.External",87822
-timer-event-end,"V8.External",87829
-timer-event-end,"V8.GCScavenger",87833
-timer-event-start,"V8.RecompileSynchronous",88294
-timer-event-start,"V8.ParseLazyMicroSeconds",88303
-timer-event-end,"V8.ParseLazyMicroSeconds",88361
-code-creation,LazyCompile,0,0x2b85d420,1221,"sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21",0x2f33a370,~
-timer-event-start,"V8.ParseLazyMicroSeconds",88473
-timer-event-end,"V8.ParseLazyMicroSeconds",88492
-timer-event-start,"V8.ParseLazyMicroSeconds",88532
-timer-event-end,"V8.ParseLazyMicroSeconds",88545
-timer-event-start,"V8.ParseLazyMicroSeconds",88572
-timer-event-end,"V8.ParseLazyMicroSeconds",88588
-timer-event-start,"V8.ParseLazyMicroSeconds",88612
-timer-event-end,"V8.ParseLazyMicroSeconds",88645
-timer-event-start,"V8.ParseLazyMicroSeconds",88688
-timer-event-end,"V8.ParseLazyMicroSeconds",88714
+timer-event-start,V8.GCScavenger,87755
+timer-event-start,V8.External,87766
+timer-event-end,V8.External,87770
+timer-event-start,V8.External,87822
+timer-event-end,V8.External,87829
+timer-event-end,V8.GCScavenger,87833
+timer-event-start,V8.RecompileSynchronous,88294
+timer-event-start,V8.ParseLazyMicroSeconds,88303
+timer-event-end,V8.ParseLazyMicroSeconds,88361
+code-creation,LazyCompile,0,0x2b85d420,1221,sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21,0x2f33a370,~
+timer-event-start,V8.ParseLazyMicroSeconds,88473
+timer-event-end,V8.ParseLazyMicroSeconds,88492
+timer-event-start,V8.ParseLazyMicroSeconds,88532
+timer-event-end,V8.ParseLazyMicroSeconds,88545
+timer-event-start,V8.ParseLazyMicroSeconds,88572
+timer-event-end,V8.ParseLazyMicroSeconds,88588
+timer-event-start,V8.ParseLazyMicroSeconds,88612
+timer-event-end,V8.ParseLazyMicroSeconds,88645
+timer-event-start,V8.ParseLazyMicroSeconds,88688
+timer-event-end,V8.ParseLazyMicroSeconds,88714
tick,0x81fc61b,88727,0,0xff81ebbc,2,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.ParseLazyMicroSeconds",88792
-timer-event-end,"V8.ParseLazyMicroSeconds",88867
-timer-event-start,"V8.ParseLazyMicroSeconds",88951
-timer-event-end,"V8.ParseLazyMicroSeconds",88967
-timer-event-start,"V8.ParseLazyMicroSeconds",88996
-timer-event-end,"V8.ParseLazyMicroSeconds",89012
-timer-event-end,"V8.RecompileSynchronous",89134
-timer-event-start,"V8.RecompileConcurrent",89160
-timer-event-start,"V8.RecompileSynchronous",89215
-timer-event-start,"V8.ParseLazyMicroSeconds",89224
-timer-event-end,"V8.ParseLazyMicroSeconds",89245
-code-creation,LazyCompile,0,0x2b85d900,585,"sjcl.bitArray.equal bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339c50,~
-timer-event-start,"V8.ParseLazyMicroSeconds",89309
-timer-event-end,"V8.ParseLazyMicroSeconds",89326
-timer-event-start,"V8.ParseLazyMicroSeconds",89356
-timer-event-end,"V8.ParseLazyMicroSeconds",89369
-timer-event-start,"V8.ParseLazyMicroSeconds",89391
-timer-event-end,"V8.ParseLazyMicroSeconds",89406
-timer-event-start,"V8.ParseLazyMicroSeconds",89433
-timer-event-end,"V8.ParseLazyMicroSeconds",89445
-timer-event-end,"V8.RecompileSynchronous",89485
-timer-event-start,"V8.RecompileSynchronous",89730
-timer-event-start,"V8.ParseLazyMicroSeconds",89740
+timer-event-start,V8.ParseLazyMicroSeconds,88792
+timer-event-end,V8.ParseLazyMicroSeconds,88867
+timer-event-start,V8.ParseLazyMicroSeconds,88951
+timer-event-end,V8.ParseLazyMicroSeconds,88967
+timer-event-start,V8.ParseLazyMicroSeconds,88996
+timer-event-end,V8.ParseLazyMicroSeconds,89012
+timer-event-end,V8.RecompileSynchronous,89134
+timer-event-start,V8.RecompileConcurrent,89160
+timer-event-start,V8.RecompileSynchronous,89215
+timer-event-start,V8.ParseLazyMicroSeconds,89224
+timer-event-end,V8.ParseLazyMicroSeconds,89245
+code-creation,LazyCompile,0,0x2b85d900,585,sjcl.bitArray.equal bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339c50,~
+timer-event-start,V8.ParseLazyMicroSeconds,89309
+timer-event-end,V8.ParseLazyMicroSeconds,89326
+timer-event-start,V8.ParseLazyMicroSeconds,89356
+timer-event-end,V8.ParseLazyMicroSeconds,89369
+timer-event-start,V8.ParseLazyMicroSeconds,89391
+timer-event-end,V8.ParseLazyMicroSeconds,89406
+timer-event-start,V8.ParseLazyMicroSeconds,89433
+timer-event-end,V8.ParseLazyMicroSeconds,89445
+timer-event-end,V8.RecompileSynchronous,89485
+timer-event-start,V8.RecompileSynchronous,89730
+timer-event-start,V8.ParseLazyMicroSeconds,89740
tick,0x81168ba,89761,0,0x90d5060,0,0x2b85056f,0x2b85cd2d,0x2b839f04,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.ParseLazyMicroSeconds",89805
-code-creation,LazyCompile,0,0x2b85db60,1838,"sjcl.mode.ccm.G bsuite/kraken-once/stanford-crypto-ccm.js:20",0x2f33a310,~
-timer-event-start,"V8.ParseLazyMicroSeconds",89969
-timer-event-end,"V8.ParseLazyMicroSeconds",89990
-timer-event-start,"V8.ParseLazyMicroSeconds",90016
-timer-event-end,"V8.ParseLazyMicroSeconds",90042
-timer-event-start,"V8.ParseLazyMicroSeconds",90084
-timer-event-end,"V8.ParseLazyMicroSeconds",90098
-timer-event-start,"V8.ParseLazyMicroSeconds",90129
-timer-event-end,"V8.ParseLazyMicroSeconds",90170
-timer-event-start,"V8.ParseLazyMicroSeconds",90271
-timer-event-end,"V8.ParseLazyMicroSeconds",90286
-timer-event-start,"V8.ParseLazyMicroSeconds",90326
-timer-event-end,"V8.ParseLazyMicroSeconds",90344
-timer-event-end,"V8.RecompileSynchronous",90480
+timer-event-end,V8.ParseLazyMicroSeconds,89805
+code-creation,LazyCompile,0,0x2b85db60,1838,sjcl.mode.ccm.G bsuite/kraken-once/stanford-crypto-ccm.js:20,0x2f33a310,~
+timer-event-start,V8.ParseLazyMicroSeconds,89969
+timer-event-end,V8.ParseLazyMicroSeconds,89990
+timer-event-start,V8.ParseLazyMicroSeconds,90016
+timer-event-end,V8.ParseLazyMicroSeconds,90042
+timer-event-start,V8.ParseLazyMicroSeconds,90084
+timer-event-end,V8.ParseLazyMicroSeconds,90098
+timer-event-start,V8.ParseLazyMicroSeconds,90129
+timer-event-end,V8.ParseLazyMicroSeconds,90170
+timer-event-start,V8.ParseLazyMicroSeconds,90271
+timer-event-end,V8.ParseLazyMicroSeconds,90286
+timer-event-start,V8.ParseLazyMicroSeconds,90326
+timer-event-end,V8.ParseLazyMicroSeconds,90344
+timer-event-end,V8.RecompileSynchronous,90480
tick,0x2b8596f9,90829,0,0x8,0,0x2b852252,0x2b8454f6,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.RecompileConcurrent",91133
-timer-event-start,"V8.RecompileConcurrent",91145
-timer-event-start,"V8.RecompileSynchronous",91197
-code-creation,Stub,2,0x2b85e2a0,1800,"RecordWriteStub"
-code-creation,Stub,2,0x2b85e9c0,1805,"RecordWriteStub"
-code-creation,Stub,2,0x2b85f0e0,1785,"RecordWriteStub"
-code-creation,Stub,2,0x2b85f7e0,1797,"RecordWriteStub"
-timer-event-end,"V8.RecompileConcurrent",91529
-timer-event-start,"V8.RecompileConcurrent",91540
-code-creation,Stub,2,0x2b85ff00,1789,"RecordWriteStub"
-code-creation,Stub,2,0x2b860600,1805,"RecordWriteStub"
-code-creation,LazyCompile,1,0x2b860d20,9288,"sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21",0x2f33a370,*
-timer-event-end,"V8.RecompileSynchronous",91729
-timer-event-start,"V8.RecompileSynchronous",91735
-code-creation,LazyCompile,1,0x2b863180,2119,"sjcl.bitArray.equal bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339c50,*
-timer-event-end,"V8.RecompileSynchronous",91833
+timer-event-end,V8.RecompileConcurrent,91133
+timer-event-start,V8.RecompileConcurrent,91145
+timer-event-start,V8.RecompileSynchronous,91197
+code-creation,Stub,2,0x2b85e2a0,1800,RecordWriteStub
+code-creation,Stub,2,0x2b85e9c0,1805,RecordWriteStub
+code-creation,Stub,2,0x2b85f0e0,1785,RecordWriteStub
+code-creation,Stub,2,0x2b85f7e0,1797,RecordWriteStub
+timer-event-end,V8.RecompileConcurrent,91529
+timer-event-start,V8.RecompileConcurrent,91540
+code-creation,Stub,2,0x2b85ff00,1789,RecordWriteStub
+code-creation,Stub,2,0x2b860600,1805,RecordWriteStub
+code-creation,LazyCompile,1,0x2b860d20,9288,sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21,0x2f33a370,*
+timer-event-end,V8.RecompileSynchronous,91729
+timer-event-start,V8.RecompileSynchronous,91735
+code-creation,LazyCompile,1,0x2b863180,2119,sjcl.bitArray.equal bsuite/kraken-once/stanford-crypto-ccm.js:10,0x2f339c50,*
+timer-event-end,V8.RecompileSynchronous,91833
tick,0xf74c34b6,91883,0,0x90ebc51,2,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-deopt,91990,9312
-code-creation,LoadPolymorphicIC,5,0x2b8639e0,105,"length"
+code-creation,LoadPolymorphicIC,5,0x2b8639e0,105,length
tick,0x2b859a99,92950,0,0x4,0,0x2b852252,0x2b8455f6,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",93234
-timer-event-start,"V8.External",93245
-timer-event-end,"V8.External",93249
-timer-event-end,"V8.RecompileConcurrent",93304
-timer-event-start,"V8.External",93319
-timer-event-end,"V8.External",93327
-timer-event-end,"V8.GCScavenger",93331
-timer-event-start,"V8.RecompileSynchronous",93353
-code-creation,Stub,2,0x2b863a60,1800,"RecordWriteStub"
-code-creation,Stub,2,0x2b864180,1780,"RecordWriteStub"
-code-creation,LazyCompile,1,0x2b864880,7990,"sjcl.mode.ccm.G bsuite/kraken-once/stanford-crypto-ccm.js:20",0x2f33a310,*
-timer-event-end,"V8.RecompileSynchronous",93732
-code-creation,LoadPolymorphicIC,5,0x2b8667c0,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b866840,105,"length"
-code-creation,LoadPolymorphicIC,5,0x2b8668c0,105,"length"
+timer-event-start,V8.GCScavenger,93234
+timer-event-start,V8.External,93245
+timer-event-end,V8.External,93249
+timer-event-end,V8.RecompileConcurrent,93304
+timer-event-start,V8.External,93319
+timer-event-end,V8.External,93327
+timer-event-end,V8.GCScavenger,93331
+timer-event-start,V8.RecompileSynchronous,93353
+code-creation,Stub,2,0x2b863a60,1800,RecordWriteStub
+code-creation,Stub,2,0x2b864180,1780,RecordWriteStub
+code-creation,LazyCompile,1,0x2b864880,7990,sjcl.mode.ccm.G bsuite/kraken-once/stanford-crypto-ccm.js:20,0x2f33a310,*
+timer-event-end,V8.RecompileSynchronous,93732
+code-creation,LoadPolymorphicIC,5,0x2b8667c0,105,length
+code-creation,LoadPolymorphicIC,5,0x2b866840,105,length
+code-creation,LoadPolymorphicIC,5,0x2b8668c0,105,length
tick,0x2b848cbd,94006,0,0xa,0,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,LoadPolymorphicIC,5,0x2b866940,105,"length"
+code-creation,LoadPolymorphicIC,5,0x2b866940,105,length
tick,0x8231000,95074,0,0x90d5060,0,0x2b848e54,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x83b9d75,96149,0,0x527db159,0,0x2b82364b,0x2b83a13d,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",96446
-timer-event-start,"V8.External",96457
-timer-event-end,"V8.External",96461
-timer-event-start,"V8.External",96513
-timer-event-end,"V8.External",96520
-timer-event-end,"V8.GCScavenger",96524
+timer-event-start,V8.GCScavenger,96446
+timer-event-start,V8.External,96457
+timer-event-end,V8.External,96461
+timer-event-start,V8.External,96513
+timer-event-end,V8.External,96520
+timer-event-end,V8.GCScavenger,96524
tick,0x2b85960c,97208,0,0xfee0,0,0x2b852252,0x2b848c4b,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b83b5b3,98279,0,0x2b851c24,0,0x2b85cd19,0x2b839f04,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",99128
-timer-event-start,"V8.External",99139
-timer-event-end,"V8.External",99143
-timer-event-start,"V8.External",99189
-timer-event-end,"V8.External",99195
-timer-event-end,"V8.GCScavenger",99199
+timer-event-start,V8.GCScavenger,99128
+timer-event-start,V8.External,99139
+timer-event-end,V8.External,99143
+timer-event-start,V8.External,99189
+timer-event-end,V8.External,99195
+timer-event-end,V8.GCScavenger,99199
tick,0x821c54b,99345,0,0x5270e530,0,0x2b83bd35,0x2b85ccab,0x2b839fd2,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b85a010,100416,0,0x0,0,0x2b852252,0x2b865810,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x808b5f1,101472,0,0x2f308081,0,0x2b8537a0,0x2b84a325,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",101820
-timer-event-start,"V8.External",101831
-timer-event-end,"V8.External",101835
-timer-event-start,"V8.External",101885
-timer-event-end,"V8.External",101891
-timer-event-end,"V8.GCScavenger",101895
+timer-event-start,V8.GCScavenger,101820
+timer-event-start,V8.External,101831
+timer-event-end,V8.External,101835
+timer-event-start,V8.External,101885
+timer-event-end,V8.External,101891
+timer-event-end,V8.GCScavenger,101895
tick,0x2b85a376,102533,0,0x9c155cd6,0,0x2b852252,0x2b865810,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b863678,103607,0,0x4c0,0,0x2b83a0df,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",104515
-timer-event-start,"V8.External",104529
-timer-event-end,"V8.External",104533
-timer-event-start,"V8.External",104581
-timer-event-end,"V8.External",104588
-timer-event-end,"V8.GCScavenger",104592
+timer-event-start,V8.GCScavenger,104515
+timer-event-start,V8.External,104529
+timer-event-end,V8.External,104533
+timer-event-start,V8.External,104581
+timer-event-end,V8.External,104588
+timer-event-end,V8.GCScavenger,104592
tick,0x2b85a2dc,104658,0,0x29,0,0x2b852252,0x2b8658f7,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b859e24,105742,0,0x80c5e06,0,0x2b852252,0x2b8654d2,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-deopt,106093,4416
-code-creation,Stub,12,0x2b8669c0,190,"BinaryOpStub_MOD_Alloc_Smi+Smi"
+code-creation,Stub,12,0x2b8669c0,190,BinaryOpStub_MOD_Alloc_Smi+Smi
tick,0x2b8514f9,106811,0,0xd1b6f5df,0,0x2b8657f1,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",107254
-timer-event-start,"V8.External",107264
-timer-event-end,"V8.External",107268
-timer-event-start,"V8.External",107317
-timer-event-end,"V8.External",107323
-timer-event-end,"V8.GCScavenger",107327
-timer-event-start,"V8.RecompileSynchronous",107462
-timer-event-start,"V8.ParseLazyMicroSeconds",107471
-timer-event-end,"V8.ParseLazyMicroSeconds",107537
-timer-event-end,"V8.RecompileSynchronous",107729
-timer-event-start,"V8.RecompileConcurrent",107764
+timer-event-start,V8.GCScavenger,107254
+timer-event-start,V8.External,107264
+timer-event-end,V8.External,107268
+timer-event-start,V8.External,107317
+timer-event-end,V8.External,107323
+timer-event-end,V8.GCScavenger,107327
+timer-event-start,V8.RecompileSynchronous,107462
+timer-event-start,V8.ParseLazyMicroSeconds,107471
+timer-event-end,V8.ParseLazyMicroSeconds,107537
+timer-event-end,V8.RecompileSynchronous,107729
+timer-event-start,V8.RecompileConcurrent,107764
tick,0x2b859da9,107874,0,0x2,0,0x2b852252,0x2b848b65,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.RecompileConcurrent",108795
-timer-event-start,"V8.RecompileSynchronous",108885
+timer-event-end,V8.RecompileConcurrent,108795
+timer-event-start,V8.RecompileSynchronous,108885
tick,0x2b859d60,108935,0,0x0,0,0x2b852252,0x2b8658f7,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,Stub,2,0x2b866a80,783,"RecordWriteStub"
-code-creation,Stub,2,0x2b866da0,1772,"RecordWriteStub"
-code-creation,LazyCompile,1,0x2b8674a0,4040,"sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4",0x2f339830,*
-timer-event-end,"V8.RecompileSynchronous",109204
+code-creation,Stub,2,0x2b866a80,783,RecordWriteStub
+code-creation,Stub,2,0x2b866da0,1772,RecordWriteStub
+code-creation,LazyCompile,1,0x2b8674a0,4040,sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4,0x2f339830,*
+timer-event-end,V8.RecompileSynchronous,109204
tick,0x2b851bd9,110005,0,0x68,0,0x2b85cd19,0x2b839f04,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",110651
-timer-event-start,"V8.External",110662
-timer-event-end,"V8.External",110666
-timer-event-start,"V8.External",110715
-timer-event-end,"V8.External",110721
-timer-event-end,"V8.GCScavenger",110725
+timer-event-start,V8.GCScavenger,110651
+timer-event-start,V8.External,110662
+timer-event-end,V8.External,110666
+timer-event-start,V8.External,110715
+timer-event-end,V8.External,110721
+timer-event-end,V8.GCScavenger,110725
tick,0x2b85a1d8,111072,0,0x0,0,0x2b852252,0x2b8658f7,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b848d38,112161,0,0x4c,0,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.RecompileSynchronous",112323
-timer-event-start,"V8.ParseLazyMicroSeconds",112335
-timer-event-end,"V8.ParseLazyMicroSeconds",112387
-timer-event-start,"V8.ParseLazyMicroSeconds",112444
-timer-event-end,"V8.ParseLazyMicroSeconds",112463
-timer-event-start,"V8.ParseLazyMicroSeconds",112496
-timer-event-end,"V8.ParseLazyMicroSeconds",112509
-timer-event-start,"V8.ParseLazyMicroSeconds",112536
-timer-event-end,"V8.ParseLazyMicroSeconds",112552
-timer-event-start,"V8.ParseLazyMicroSeconds",112576
-timer-event-end,"V8.ParseLazyMicroSeconds",112598
-timer-event-start,"V8.ParseLazyMicroSeconds",112639
-timer-event-end,"V8.ParseLazyMicroSeconds",112653
-timer-event-start,"V8.ParseLazyMicroSeconds",112685
-timer-event-end,"V8.ParseLazyMicroSeconds",112722
-timer-event-start,"V8.ParseLazyMicroSeconds",112803
-timer-event-end,"V8.ParseLazyMicroSeconds",112819
-timer-event-start,"V8.ParseLazyMicroSeconds",112848
-timer-event-end,"V8.ParseLazyMicroSeconds",112863
-timer-event-end,"V8.RecompileSynchronous",112986
-timer-event-start,"V8.RecompileConcurrent",113012
+timer-event-start,V8.RecompileSynchronous,112323
+timer-event-start,V8.ParseLazyMicroSeconds,112335
+timer-event-end,V8.ParseLazyMicroSeconds,112387
+timer-event-start,V8.ParseLazyMicroSeconds,112444
+timer-event-end,V8.ParseLazyMicroSeconds,112463
+timer-event-start,V8.ParseLazyMicroSeconds,112496
+timer-event-end,V8.ParseLazyMicroSeconds,112509
+timer-event-start,V8.ParseLazyMicroSeconds,112536
+timer-event-end,V8.ParseLazyMicroSeconds,112552
+timer-event-start,V8.ParseLazyMicroSeconds,112576
+timer-event-end,V8.ParseLazyMicroSeconds,112598
+timer-event-start,V8.ParseLazyMicroSeconds,112639
+timer-event-end,V8.ParseLazyMicroSeconds,112653
+timer-event-start,V8.ParseLazyMicroSeconds,112685
+timer-event-end,V8.ParseLazyMicroSeconds,112722
+timer-event-start,V8.ParseLazyMicroSeconds,112803
+timer-event-end,V8.ParseLazyMicroSeconds,112819
+timer-event-start,V8.ParseLazyMicroSeconds,112848
+timer-event-end,V8.ParseLazyMicroSeconds,112863
+timer-event-end,V8.RecompileSynchronous,112986
+timer-event-start,V8.RecompileConcurrent,113012
tick,0x2b867dc3,113148,0,0x100,0,0x2b839e65,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",114171
-timer-event-start,"V8.External",114182
-timer-event-end,"V8.External",114186
+timer-event-start,V8.GCScavenger,114171
+timer-event-start,V8.External,114182
+timer-event-end,V8.External,114186
tick,0x82c920e,114254,0,0x0,1
-timer-event-start,"V8.External",114309
-timer-event-end,"V8.External",114330
-timer-event-end,"V8.GCScavenger",114350
-timer-event-end,"V8.RecompileConcurrent",115013
-timer-event-start,"V8.RecompileSynchronous",115032
+timer-event-start,V8.External,114309
+timer-event-end,V8.External,114330
+timer-event-end,V8.GCScavenger,114350
+timer-event-end,V8.RecompileConcurrent,115013
+timer-event-start,V8.RecompileSynchronous,115032
tick,0x8369515,115325,0,0x9135ff0,2,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,LazyCompile,1,0x5120a000,9284,"sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21",0x2f33a370,*
-timer-event-end,"V8.RecompileSynchronous",115434
+code-creation,LazyCompile,1,0x5120a000,9284,sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21,0x2f33a370,*
+timer-event-end,V8.RecompileSynchronous,115434
code-deopt,115666,9312
tick,0x2b85056f,116392,0,0x52f8f619,0,0x2b85cd2d,0x2b839fd2,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b812420,117465,0,0x2b85592e,0,0x2b86573b,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",117571
-timer-event-start,"V8.External",117583
-timer-event-end,"V8.External",117587
-timer-event-start,"V8.External",117636
-timer-event-end,"V8.External",117642
-timer-event-end,"V8.GCScavenger",117646
+timer-event-start,V8.GCScavenger,117571
+timer-event-start,V8.External,117583
+timer-event-end,V8.External,117587
+timer-event-start,V8.External,117636
+timer-event-end,V8.External,117642
+timer-event-end,V8.GCScavenger,117646
tick,0x811db13,118481,0,0x90d5060,0,0x2b85cd53,0x2b839eb3,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x80c4222,119548,0,0x90d5060,0,0x2b85599a,0x2b86573b,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",120368
-timer-event-start,"V8.External",120379
-timer-event-end,"V8.External",120383
-timer-event-start,"V8.External",120428
-timer-event-end,"V8.External",120434
-timer-event-end,"V8.GCScavenger",120438
+timer-event-start,V8.GCScavenger,120368
+timer-event-start,V8.External,120379
+timer-event-end,V8.External,120383
+timer-event-start,V8.External,120428
+timer-event-end,V8.External,120434
+timer-event-end,V8.GCScavenger,120438
tick,0x2b867eb3,120610,0,0x100,0,0x2b839e65,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b859eae,121680,0,0x80c5e06,0,0x2b852252,0x2b8658f7,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b85981d,122808,0,0x21,0,0x2b852252,0x2b848c4b,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",123188
-timer-event-start,"V8.External",123199
-timer-event-end,"V8.External",123203
-timer-event-start,"V8.External",123248
-timer-event-end,"V8.External",123254
-timer-event-end,"V8.GCScavenger",123258
+timer-event-start,V8.GCScavenger,123188
+timer-event-start,V8.External,123199
+timer-event-end,V8.External,123203
+timer-event-start,V8.External,123248
+timer-event-end,V8.External,123254
+timer-event-end,V8.GCScavenger,123258
tick,0x2b859ca8,123878,0,0x0,0,0x2b852252,0x2b865810,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x80fa2d1,124943,0,0x5279ab29,0,0x2b83bd35,0x2b85ccab,0x2b839fd2,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",126000
-timer-event-start,"V8.External",126017
-timer-event-end,"V8.External",126022
+timer-event-start,V8.GCScavenger,126000
+timer-event-start,V8.External,126017
+timer-event-end,V8.External,126022
tick,0x808b6b0,126038,0,0x2f308081,0,0x2b855838,0x2b848b0a,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.External",126109
-timer-event-end,"V8.External",126129
-timer-event-end,"V8.GCScavenger",126139
+timer-event-start,V8.External,126109
+timer-event-end,V8.External,126129
+timer-event-end,V8.GCScavenger,126139
tick,0x808b656,127081,0,0x2f308081,0,0x2b8658be,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b85a1f0,128141,0,0x1f,0,0x2b852252,0x2b848c4b,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",128896
-timer-event-start,"V8.External",128907
-timer-event-end,"V8.External",128911
-timer-event-start,"V8.External",128958
-timer-event-end,"V8.External",128964
-timer-event-end,"V8.GCScavenger",128968
+timer-event-start,V8.GCScavenger,128896
+timer-event-start,V8.External,128907
+timer-event-end,V8.External,128911
+timer-event-start,V8.External,128958
+timer-event-end,V8.External,128964
+timer-event-end,V8.GCScavenger,128968
tick,0x2b867d2b,129212,0,0x100,0,0x2b839e65,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b859a7b,130285,0,0x80c5e06,0,0x2b852252,0x2b865810,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b859c3b,131350,0,0x0,0,0x2b852252,0x2b8658f7,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",131713
-timer-event-start,"V8.External",131723
-timer-event-end,"V8.External",131727
-timer-event-start,"V8.External",131772
-timer-event-end,"V8.External",131778
-timer-event-end,"V8.GCScavenger",131782
+timer-event-start,V8.GCScavenger,131713
+timer-event-start,V8.External,131723
+timer-event-end,V8.External,131727
+timer-event-start,V8.External,131772
+timer-event-end,V8.External,131778
+timer-event-end,V8.GCScavenger,131782
tick,0x80c413f,132412,0,0x90d5060,0,0x2b855ca8,0x2b8446a3,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x80a1baf,133466,0,0x811e5c0,0,0x2b848b2e,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",134532
+timer-event-start,V8.GCScavenger,134532
tick,0x2b85d126,134550,0,0x2,0,0x2b839f04,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.External",134619
-timer-event-end,"V8.External",134636
-timer-event-start,"V8.External",134702
-timer-event-end,"V8.External",134708
-timer-event-end,"V8.GCScavenger",134712
+timer-event-start,V8.External,134619
+timer-event-end,V8.External,134636
+timer-event-start,V8.External,134702
+timer-event-end,V8.External,134708
+timer-event-end,V8.GCScavenger,134712
tick,0x2b8594d3,135617,0,0x80c5e06,0,0x2b852252,0x2b8654d2,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.RecompileSynchronous",135776
-timer-event-start,"V8.ParseLazyMicroSeconds",135787
-timer-event-end,"V8.ParseLazyMicroSeconds",135838
-timer-event-start,"V8.ParseLazyMicroSeconds",135894
-timer-event-end,"V8.ParseLazyMicroSeconds",135913
-timer-event-start,"V8.ParseLazyMicroSeconds",135946
-timer-event-end,"V8.ParseLazyMicroSeconds",135960
-timer-event-start,"V8.ParseLazyMicroSeconds",135987
-timer-event-end,"V8.ParseLazyMicroSeconds",136002
-timer-event-start,"V8.ParseLazyMicroSeconds",136026
-timer-event-end,"V8.ParseLazyMicroSeconds",136048
-timer-event-start,"V8.ParseLazyMicroSeconds",136089
-timer-event-end,"V8.ParseLazyMicroSeconds",136103
-timer-event-start,"V8.ParseLazyMicroSeconds",136135
-timer-event-end,"V8.ParseLazyMicroSeconds",136172
-timer-event-start,"V8.ParseLazyMicroSeconds",136253
-timer-event-end,"V8.ParseLazyMicroSeconds",136270
-timer-event-start,"V8.ParseLazyMicroSeconds",136301
-timer-event-end,"V8.ParseLazyMicroSeconds",136317
-timer-event-end,"V8.RecompileSynchronous",136440
-timer-event-start,"V8.RecompileConcurrent",136466
+timer-event-start,V8.RecompileSynchronous,135776
+timer-event-start,V8.ParseLazyMicroSeconds,135787
+timer-event-end,V8.ParseLazyMicroSeconds,135838
+timer-event-start,V8.ParseLazyMicroSeconds,135894
+timer-event-end,V8.ParseLazyMicroSeconds,135913
+timer-event-start,V8.ParseLazyMicroSeconds,135946
+timer-event-end,V8.ParseLazyMicroSeconds,135960
+timer-event-start,V8.ParseLazyMicroSeconds,135987
+timer-event-end,V8.ParseLazyMicroSeconds,136002
+timer-event-start,V8.ParseLazyMicroSeconds,136026
+timer-event-end,V8.ParseLazyMicroSeconds,136048
+timer-event-start,V8.ParseLazyMicroSeconds,136089
+timer-event-end,V8.ParseLazyMicroSeconds,136103
+timer-event-start,V8.ParseLazyMicroSeconds,136135
+timer-event-end,V8.ParseLazyMicroSeconds,136172
+timer-event-start,V8.ParseLazyMicroSeconds,136253
+timer-event-end,V8.ParseLazyMicroSeconds,136270
+timer-event-start,V8.ParseLazyMicroSeconds,136301
+timer-event-end,V8.ParseLazyMicroSeconds,136317
+timer-event-end,V8.RecompileSynchronous,136440
+timer-event-start,V8.RecompileConcurrent,136466
tick,0x2b859c6e,136680,0,0x0,0,0x2b852252,0x2b8658f7,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-code-creation,LoadPolymorphicIC,5,0x5120c460,105,"length"
-code-creation,LoadPolymorphicIC,5,0x5120c4e0,117,"length"
-code-creation,LoadPolymorphicIC,5,0x5120c560,117,"length"
-code-creation,LoadPolymorphicIC,5,0x5120c5e0,105,"length"
-code-creation,LoadPolymorphicIC,5,0x5120c660,105,"length"
+code-creation,LoadPolymorphicIC,5,0x5120c460,105,length
+code-creation,LoadPolymorphicIC,5,0x5120c4e0,117,length
+code-creation,LoadPolymorphicIC,5,0x5120c560,117,length
+code-creation,LoadPolymorphicIC,5,0x5120c5e0,105,length
+code-creation,LoadPolymorphicIC,5,0x5120c660,105,length
tick,0x2b855ece,137742,0,0x527d0961,0,0x2b8446a3,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",138203
-timer-event-start,"V8.External",138213
-timer-event-end,"V8.External",138217
-timer-event-start,"V8.External",138271
-timer-event-end,"V8.External",138277
-timer-event-end,"V8.GCScavenger",138281
-timer-event-end,"V8.RecompileConcurrent",138393
-timer-event-start,"V8.RecompileSynchronous",138412
-code-creation,LazyCompile,1,0x5120c6e0,9284,"sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21",0x2f33a370,*
-timer-event-end,"V8.RecompileSynchronous",138781
+timer-event-start,V8.GCScavenger,138203
+timer-event-start,V8.External,138213
+timer-event-end,V8.External,138217
+timer-event-start,V8.External,138271
+timer-event-end,V8.External,138277
+timer-event-end,V8.GCScavenger,138281
+timer-event-end,V8.RecompileConcurrent,138393
+timer-event-start,V8.RecompileSynchronous,138412
+code-creation,LazyCompile,1,0x5120c6e0,9284,sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21,0x2f33a370,*
+timer-event-end,V8.RecompileSynchronous,138781
tick,0x83647f0,138812,0,0xf633ddf4,2,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-deopt,138895,9312
tick,0x2b851212,139867,0,0xff81fd00,0,0x2b8657f1,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x82ebff7,140937,0,0x2f33ca81,0,0x2b85056f,0x2b85cd2d,0x2b839fd2,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",141540
-timer-event-start,"V8.External",141551
-timer-event-end,"V8.External",141555
-timer-event-start,"V8.External",141605
-timer-event-end,"V8.External",141611
-timer-event-end,"V8.GCScavenger",141615
+timer-event-start,V8.GCScavenger,141540
+timer-event-start,V8.External,141551
+timer-event-end,V8.External,141555
+timer-event-start,V8.External,141605
+timer-event-end,V8.External,141611
+timer-event-end,V8.GCScavenger,141615
tick,0x2b85a0a6,142005,0,0x0,0,0x2b852252,0x2b865810,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x8116886,143088,0,0x90d5060,0,0x2b85b862,0x2b848b8c,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b83dbaf,144137,0,0x2b85cd53,0,0x2b839fd2,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.GCScavenger",144365
-timer-event-start,"V8.External",144376
-timer-event-end,"V8.External",144380
-timer-event-start,"V8.External",144428
-timer-event-end,"V8.External",144434
-timer-event-end,"V8.GCScavenger",144438
+timer-event-start,V8.GCScavenger,144365
+timer-event-start,V8.External,144376
+timer-event-end,V8.External,144380
+timer-event-start,V8.External,144428
+timer-event-end,V8.External,144434
+timer-event-end,V8.GCScavenger,144438
tick,0x81168ba,145212,0,0x90d5060,0,0x2b85056f,0x2b85cd2d,0x2b839f04,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b851430,146268,0,0xff81fd00,0,0x2b8657f1,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.ParseLazyMicroSeconds",146339
-timer-event-end,"V8.ParseLazyMicroSeconds",146358
-timer-event-start,"V8.CompileLazy",146364
-timer-event-start,"V8.CompileFullCode",146369
-timer-event-end,"V8.CompileFullCode",146386
-code-creation,LazyCompile,0,0x5120eb40,212," bsuite/kraken-once/stanford-crypto-ccm.js:172",0x2f33dd88,~
-timer-event-end,"V8.CompileLazy",146400
-code-creation,Stub,12,0x5120ec20,311,"BinaryOpStub_SUB_Alloc_Generic+Generic"
-timer-event-start,"V8.ParseLazyMicroSeconds",146431
-timer-event-end,"V8.ParseLazyMicroSeconds",146461
-timer-event-start,"V8.CompileLazy",146467
-timer-event-start,"V8.CompileFullCode",146475
-timer-event-end,"V8.CompileFullCode",146495
-code-creation,LazyCompile,0,0x5120ed60,580,"NonNumberToNumber native runtime.js:548",0x44225f78,~
-timer-event-end,"V8.CompileLazy",146508
-code-creation,Stub,2,0x5120efc0,98,"valueOf"
-code-creation,LoadPolymorphicIC,5,0x5120f040,117,"valueOf"
-code-creation,CallIC,7,0x5120f0c0,129,"ToNumber"
-timer-event-start,"V8.ParseLazyMicroSeconds",146556
-timer-event-end,"V8.ParseLazyMicroSeconds",146569
-timer-event-start,"V8.CompileLazy",146574
-timer-event-start,"V8.CompileFullCode",146580
-timer-event-end,"V8.CompileFullCode",146591
-code-creation,LazyCompile,0,0x5120f160,208,"record bsuite/kraken-once/stanford-crypto-ccm.js:7229",0x2f339680,~
-timer-event-end,"V8.CompileLazy",146603
-timer-event-start,"V8.External",146613
-timer-event-end,"V8.External",146656
-timer-event-end,"V8.Execute",146662
-timer-event-start,"V8.RecompileConcurrent",146700
-timer-event-end,"V8.RecompileConcurrent",146738
-profiler,"end"
+timer-event-start,V8.ParseLazyMicroSeconds,146339
+timer-event-end,V8.ParseLazyMicroSeconds,146358
+timer-event-start,V8.CompileLazy,146364
+timer-event-start,V8.CompileFullCode,146369
+timer-event-end,V8.CompileFullCode,146386
+code-creation,LazyCompile,0,0x5120eb40,212, bsuite/kraken-once/stanford-crypto-ccm.js:172,0x2f33dd88,~
+timer-event-end,V8.CompileLazy,146400
+code-creation,Stub,12,0x5120ec20,311,BinaryOpStub_SUB_Alloc_Generic+Generic
+timer-event-start,V8.ParseLazyMicroSeconds,146431
+timer-event-end,V8.ParseLazyMicroSeconds,146461
+timer-event-start,V8.CompileLazy,146467
+timer-event-start,V8.CompileFullCode,146475
+timer-event-end,V8.CompileFullCode,146495
+code-creation,LazyCompile,0,0x5120ed60,580,NonNumberToNumber native runtime.js:548,0x44225f78,~
+timer-event-end,V8.CompileLazy,146508
+code-creation,Stub,2,0x5120efc0,98,valueOf
+code-creation,LoadPolymorphicIC,5,0x5120f040,117,valueOf
+code-creation,CallIC,7,0x5120f0c0,129,ToNumber
+timer-event-start,V8.ParseLazyMicroSeconds,146556
+timer-event-end,V8.ParseLazyMicroSeconds,146569
+timer-event-start,V8.CompileLazy,146574
+timer-event-start,V8.CompileFullCode,146580
+timer-event-end,V8.CompileFullCode,146591
+code-creation,LazyCompile,0,0x5120f160,208,record bsuite/kraken-once/stanford-crypto-ccm.js:7229,0x2f339680,~
+timer-event-end,V8.CompileLazy,146603
+timer-event-start,V8.External,146613
+timer-event-end,V8.External,146656
+timer-event-end,V8.Execute,146662
+timer-event-start,V8.RecompileConcurrent,146700
+timer-event-end,V8.RecompileConcurrent,146738
+profiler,end
diff --git a/deps/v8/test/mjsunit/tools/profviz.js b/deps/v8/test/mjsunit/tools/profviz.js
index 6ff0e2cb9e..fc0da5d4b0 100644
--- a/deps/v8/test/mjsunit/tools/profviz.js
+++ b/deps/v8/test/mjsunit/tools/profviz.js
@@ -28,7 +28,7 @@
// Load implementations from <project root>/tools.
// Files: tools/csvparser.js tools/splaytree.js tools/codemap.js
// Files: tools/consarray.js tools/profile.js tools/profile_view.js
-// Files: tools/logreader.js tools/tickprocessor.js
+// Files: tools/logreader.js tools/arguments.js tools/tickprocessor.js
// Files: tools/profviz/composer.js
// Env: TEST_FILE_NAME
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log b/deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
index 0d7f39d29e..67511217e1 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
@@ -1,11 +1,11 @@
-shared-library,"shell",0x08048000,0x081ee000,0
-shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000,0
-shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000,0
-profiler,"begin",1
-code-creation,Stub,0,100,0x424260,348,"CompareStub_GE"
-code-creation,LazyCompile,0,101,0x2a8100,18535,"DrawQube 3d-cube.js:188",0xf43abcac,
-code-creation,LazyCompile,0,102,0x480100,3908,"DrawLine 3d-cube.js:17",0xf43abc50,
+shared-library,shell,0x08048000,0x081ee000,0
+shared-library,/lib32/libm-2.7.so,0xf7db6000,0xf7dd9000,0
+shared-library,ffffe000-fffff000,0xffffe000,0xfffff000,0
+profiler,begin,1
+code-creation,Stub,0,100,0x424260,348,CompareStub_GE
+code-creation,LazyCompile,0,101,0x2a8100,18535,DrawQube 3d-cube.js:188,0xf43abcac,
+code-creation,LazyCompile,0,102,0x480100,3908,DrawLine 3d-cube.js:17,0xf43abc50,
tick,0x424284,0,0,0x480600,0,0x2aaaa5
tick,0x42429f,0,0,0x480600,0,0x2aacb4
tick,0x48063d,0,0,0x2d0f7c,0,0x2aaec6
-profiler,"end"
+profiler,end
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.log b/deps/v8/test/mjsunit/tools/tickprocessor-test.log
index 713790015d..049611e23b 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test.log
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.log
@@ -1,14 +1,14 @@
-shared-library,"shell",0x08048000,0x081ee000,0
-shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000,0
-shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000,0
-profiler,"begin",1
-code-creation,Stub,0,100,0xf540a100,474,"CEntryStub"
-code-creation,Script,0,101,0xf541cd80,736,"exp.js"
-code-creation,Stub,0,102,0xf541d0e0,47,"RuntimeStub_Math_exp"
-code-creation,LazyCompile,0,103,0xf541d120,145,"exp native math.js:41"
+shared-library,shell,0x08048000,0x081ee000,0
+shared-library,/lib32/libm-2.7.so,0xf7db6000,0xf7dd9000,0
+shared-library,ffffe000-fffff000,0xffffe000,0xfffff000,0
+profiler,begin,1
+code-creation,Stub,0,100,0xf540a100,474,CEntryStub
+code-creation,Script,0,101,0xf541cd80,736,exp.js
+code-creation,Stub,0,102,0xf541d0e0,47,RuntimeStub_Math_exp
+code-creation,LazyCompile,0,103,0xf541d120,145,exp native math.js:41
function-creation,0xf441d280,0xf541d120
-code-creation,LoadIC,0,104,0xf541d280,117,"j"
-code-creation,LoadIC,0,105,0xf541d360,63,"i"
+code-creation,LoadIC,0,104,0xf541d280,117,j
+code-creation,LoadIC,0,105,0xf541d360,63,i
tick,0x80f82d1,0,0,0,0,0xf541ce5c
tick,0x80f89a1,0,0,0,0,0xf541ce5c
tick,0x8123b5c,0,0,0,0,0xf541d1a1,0xf541ceea
@@ -22,4 +22,4 @@ tick,0xf7dbc508,0,0,0,0,0xf541d1a1,0xf541ceea
tick,0xf7dbff21,0,0,0,0,0xf541d1a1,0xf541ceea
tick,0xf7edec90,0,0,0,0,0xf541d1a1,0xf541ceea
tick,0xffffe402,0,0,0,0
-profiler,"end"
+profiler,end
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index 4d113c009a..cf38985e78 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -28,14 +28,14 @@
// Load implementations from <project root>/tools.
// Files: tools/splaytree.js tools/codemap.js tools/csvparser.js
// Files: tools/consarray.js tools/profile.js tools/profile_view.js
-// Files: tools/logreader.js tools/tickprocessor.js
+// Files: tools/logreader.js tools/arguments.js tools/tickprocessor.js
// Env: TEST_FILE_NAME
(function testArgumentsProcessor() {
var p_default = new ArgumentsProcessor([]);
assertTrue(p_default.parse());
- assertEquals(ArgumentsProcessor.DEFAULTS, p_default.result());
+ assertEquals(p_default.getDefaultResults(), p_default.result());
var p_logFile = new ArgumentsProcessor(['logfile.log']);
assertTrue(p_logFile.parse());
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
index 0a642f2ab7..71c6b10490 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -7,7 +7,7 @@
var stdlib = this;
function assertValidAsm(func) {
- assertTrue(%IsAsmWasmCode(func));
+ assertTrue(%IsAsmWasmCode(func), "must be valid asm code");
}
function assertWasm(expected, func, ffi) {
@@ -426,6 +426,23 @@ function TestContinueInDoWhileFalse() {
assertWasm(47, TestContinueInDoWhileFalse);
+function TestContinueInForLoop() {
+ "use asm";
+
+ function caller() {
+ var i = 0;
+ for (; (i|0) < 10; i = (i+1)|0) {
+ continue;
+ }
+ return 4711;
+ }
+
+ return {caller: caller};
+}
+
+assertWasm(4711, TestContinueInForLoop);
+
+
function TestNot() {
"use asm";
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index a8940af1d1..63d8eb0ca8 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -104,9 +104,13 @@ function VerifyBoundsCheck(func, memtype_size) {
assertTraps(kTrapMemOutOfBounds, () => func((maxSize + 1) * kPageSize, 5, 1));
}
+// Test many elements in the small range, make bigger steps later. This is still
+// O(2^n), but takes 213 steps to reach 2^32.
+const inc = i => i + Math.floor(i/10) + 1;
+
function Test32Op(operation, func) {
let i32 = new Uint32Array(memory.buffer);
- for (let i = 0; i < i32.length; i++) {
+ for (let i = 0; i < i32.length; i = inc(i)) {
let expected = 0x9cedf00d;
let value = 0x11111111;
i32[i] = expected;
@@ -118,7 +122,7 @@ function Test32Op(operation, func) {
function Test16Op(operation, func) {
let i16 = new Uint16Array(memory.buffer);
- for (let i = 0; i < i16.length; i++) {
+ for (let i = 0; i < i16.length; i = inc(i)) {
let expected = 0xd00d;
let value = 0x1111;
i16[i] = expected;
@@ -130,7 +134,7 @@ function Test16Op(operation, func) {
function Test8Op(operation, func) {
let i8 = new Uint8Array(memory.buffer);
- for (let i = 0; i < i8.length; i++) {
+ for (let i = 0; i < i8.length; i = inc(i)) {
let expected = 0xbe;
let value = 0x12;
i8[i] = expected;
@@ -249,7 +253,7 @@ function Test8Op(operation, func) {
})();
function TestCmpExchange(func, buffer, params, size) {
- for (let i = 0; i < buffer.length; i++) {
+ for (let i = 0; i < buffer.length; i = inc(i)) {
for (let j = 0; j < params.length; j++) {
for (let k = 0; k < params.length; k++) {
buffer[i] = params[j];
@@ -291,7 +295,7 @@ function TestCmpExchange(func, buffer, params, size) {
})();
function TestLoad(func, buffer, value, size) {
- for (let i = 0; i < buffer.length; i++) {
+ for (let i = 0; i < buffer.length; i = inc(i)) {
buffer[i] = value;
assertEquals(value, func(i * size) >>> 0);
}
@@ -323,7 +327,7 @@ function TestLoad(func, buffer, value, size) {
})();
function TestStore(func, buffer, value, size) {
- for (let i = 0; i < buffer.length; i++) {
+ for (let i = 0; i < buffer.length; i = inc(i)) {
func(i * size, value)
assertEquals(value, buffer[i]);
}
diff --git a/deps/v8/test/mjsunit/wasm/default-liftoff-setting.js b/deps/v8/test/mjsunit/wasm/default-liftoff-setting.js
new file mode 100644
index 0000000000..9ae6e928af
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/default-liftoff-setting.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// This test makes sure that by default, we do not compile with liftoff.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('i32_add', kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .exportFunc();
+
+const instance = builder.instantiate();
+
+assertFalse(
+ %IsLiftoffFunction(instance.exports.i32_add),
+ 'liftoff compilation should be off by default');
diff --git a/deps/v8/test/mjsunit/wasm/disallow-codegen.js b/deps/v8/test/mjsunit/wasm/disallow-codegen.js
index 3374a9efd7..9ac2bcd103 100644
--- a/deps/v8/test/mjsunit/wasm/disallow-codegen.js
+++ b/deps/v8/test/mjsunit/wasm/disallow-codegen.js
@@ -20,10 +20,12 @@ let buffer = (function CreateBuffer() {
})();
%DisallowCodegenFromStrings(true);
+%DisallowWasmCodegen(true);
async function SyncTestOk() {
print('sync module compile (ok)...');
%DisallowCodegenFromStrings(false);
+ %DisallowWasmCodegen(false);
let module = new WebAssembly.Module(buffer);
assertInstanceof(module, WebAssembly.Module);
}
@@ -31,6 +33,20 @@ async function SyncTestOk() {
async function SyncTestFail() {
print('sync module compile (fail)...');
%DisallowCodegenFromStrings(true);
+ %DisallowWasmCodegen(false);
+ try {
+ let module = new WebAssembly.Module(buffer);
+ assertUnreachable();
+ } catch (e) {
+ print(" " + e);
+ assertInstanceof(e, WebAssembly.CompileError);
+ }
+}
+
+async function SyncTestWasmFail(disallow_codegen) {
+ print('sync wasm module compile (fail)...');
+ %DisallowCodegenFromStrings(disallow_codegen);
+ %DisallowWasmCodegen(true);
try {
let module = new WebAssembly.Module(buffer);
assertUnreachable();
@@ -43,6 +59,7 @@ async function SyncTestFail() {
async function AsyncTestOk() {
print('async module compile (ok)...');
%DisallowCodegenFromStrings(false);
+ %DisallowWasmCodegen(false);
let promise = WebAssembly.compile(buffer);
assertPromiseResult(
promise, module => assertInstanceof(module, WebAssembly.Module));
@@ -51,6 +68,20 @@ async function AsyncTestOk() {
async function AsyncTestFail() {
print('async module compile (fail)...');
%DisallowCodegenFromStrings(true);
+ %DisallowWasmCodegen(false);
+ try {
+ let m = await WebAssembly.compile(buffer);
+ assertUnreachable();
+ } catch (e) {
+ print(" " + e);
+ assertInstanceof(e, WebAssembly.CompileError);
+ }
+}
+
+async function AsyncTestWasmFail(disallow_codegen) {
+ print('async wasm module compile (fail)...');
+ %DisallowCodegenFromStrings(disallow_codegen);
+ %DisallowWasmCodegen(true);
try {
let m = await WebAssembly.compile(buffer);
assertUnreachable();
@@ -65,6 +96,7 @@ async function StreamingTestOk() {
// TODO(titzer): compileStreaming must be supplied by embedder.
// (and it takes a response, not a buffer)
%DisallowCodegenFromStrings(false);
+ %DisallowWasmCodegen(false);
if ("Function" != typeof WebAssembly.compileStreaming) {
print(" no embedder for streaming compilation");
return;
@@ -77,6 +109,27 @@ async function StreamingTestOk() {
async function StreamingTestFail() {
print('streaming module compile (fail)...');
%DisallowCodegenFromStrings(true);
+ %DisallowWasmCodegen(false);
+ // TODO(titzer): compileStreaming must be supplied by embedder.
+ // (and it takes a response, not a buffer)
+ if ("Function" != typeof WebAssembly.compileStreaming) {
+ print(" no embedder for streaming compilation");
+ return;
+ }
+ try {
+ let m = await WebAssembly.compileStreaming(buffer);
+ assertUnreachable();
+ } catch (e) {
+ print(" " + e);
+ assertInstanceof(e, WebAssembly.CompileError);
+ }
+}
+
+
+async function StreamingTestWasmFail(disallow_codegen) {
+ print('streaming wasm module compile (fail)...');
+ %DisallowCodegenFromStrings(disallow_codegen);
+ %DisallowWasmCodegen(true);
// TODO(titzer): compileStreaming must be supplied by embedder.
// (and it takes a response, not a buffer)
if ("Function" != typeof WebAssembly.compileStreaming) {
@@ -99,6 +152,14 @@ async function RunAll() {
await AsyncTestFail();
await StreamingTestOk();
await StreamingTestFail();
+
+ disallow_codegen = false;
+ for (count = 0; count < 2; ++count) {
+ SyncTestWasmFail(disallow_codegen);
+ AsyncTestWasmFail(disallow_codegen);
+ StreamingTestWasmFail(disallow_codegen)
+ disallow_codegen = true;
+ }
}
assertPromiseResult(RunAll());
diff --git a/deps/v8/test/mjsunit/wasm/ffi-error.js b/deps/v8/test/mjsunit/wasm/ffi-error.js
index ef7d5ab27b..e7811d2b78 100644
--- a/deps/v8/test/mjsunit/wasm/ffi-error.js
+++ b/deps/v8/test/mjsunit/wasm/ffi-error.js
@@ -7,11 +7,11 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-function testCallFFI(ffi) {
+function instantiateWithFFI(ffi) {
var builder = new WasmModuleBuilder();
var sig_index = kSig_i_dd;
- builder.addImport("", "fun", sig_index);
+ builder.addImport("mod", "fun", sig_index);
builder.addFunction("main", sig_index)
.addBody([
kExprGetLocal, 0, // --
@@ -20,45 +20,75 @@ function testCallFFI(ffi) {
]) // --
.exportFunc();
- var module = builder.instantiate(ffi);
+ return builder.instantiate(ffi);
}
// everything is good.
(function() {
- var ffi = {"": {fun: function(a, b) { print(a, b); }}}
- testCallFFI(ffi);
+ var ffi = {"mod": {fun: function(a, b) { print(a, b); }}}
+ instantiateWithFFI(ffi);
})();
// FFI object should be an object.
assertThrows(function() {
var ffi = 0;
- testCallFFI(ffi);
+ instantiateWithFFI(ffi);
+});
+
+
+// FFI object should have a "mod" property.
+assertThrows(function() {
+ instantiateWithFFI({});
});
// FFI object should have a "fun" property.
assertThrows(function() {
- var ffi = new Object();
- testCallFFI(ffi);
+ instantiateWithFFI({mod: {}});
});
// "fun" should be a JS function.
assertThrows(function() {
- var ffi = new Object();
- ffi.fun = new Object();
- testCallFFI(ffi);
+ instantiateWithFFI({mod: {fun: new Object()}});
});
// "fun" should be a JS function.
assertThrows(function() {
- var ffi = new Object();
- ffi.fun = 0;
- testCallFFI(ffi);
+ instantiateWithFFI({mod: {fun: 0}});
});
+// "fun" should have signature "i_dd"
+assertThrows(function () {
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = kSig_i_dd;
+ builder.addFunction("exp", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ ]) // --
+ .exportFunc();
+
+ var exported = builder.instantiate().exports.exp;
+ instantiateWithFFI({mod: {fun: exported}});
+});
+
+// "fun" matches signature "i_dd"
+(function () {
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("exp", kSig_i_dd)
+ .addBody([
+ kExprI32Const, 33,
+ ]) // --
+ .exportFunc();
+
+ var exported = builder.instantiate().exports.exp;
+ var instance = instantiateWithFFI({mod: {fun: exported}});
+ assertEquals(33, instance.exports.main());
+})();
(function I64InSignatureThrows() {
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/ffi.js b/deps/v8/test/mjsunit/wasm/ffi.js
index 9451c875d6..67b1ccc8f5 100644
--- a/deps/v8/test/mjsunit/wasm/ffi.js
+++ b/deps/v8/test/mjsunit/wasm/ffi.js
@@ -385,3 +385,18 @@ testCallBinopVoid(kWasmF64);
main();
assertEquals(0, num_valueOf);
})();
+
+(function ImportWithCustomGetter() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addImport("import", "func", kSig_v_v);
+
+ const ffi = {};
+ Object.defineProperty(ffi, 'import', {
+ get: _ => {
+ return {func: () => null };
+ }
+ });
+
+ builder.instantiate(ffi);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/globals.js b/deps/v8/test/mjsunit/wasm/globals.js
index afd2dcc5ce..21f73ebe7e 100644
--- a/deps/v8/test/mjsunit/wasm/globals.js
+++ b/deps/v8/test/mjsunit/wasm/globals.js
@@ -7,6 +7,48 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
+(function TestMultipleInstances() {
+ print("TestMultipleInstances");
+
+ var builder = new WasmModuleBuilder();
+
+ let g = builder.addGlobal(kWasmI32, true);
+ let sig_index = builder.addType(kSig_i_v);
+ builder.addFunction("get", sig_index)
+ .addBody([
+ kExprGetGlobal, g.index])
+ .exportAs("get");
+ builder.addFunction("set", kSig_v_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, g.index])
+ .exportAs("set");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ let a = new WebAssembly.Instance(module);
+ let b = new WebAssembly.Instance(module);
+
+ assertEquals(0, a.exports.get());
+ assertEquals(0, b.exports.get());
+
+ a.exports.set(1);
+
+ assertEquals(1, a.exports.get());
+ assertEquals(0, b.exports.get());
+
+ b.exports.set(6);
+
+ assertEquals(1, a.exports.get());
+ assertEquals(6, b.exports.get());
+
+ a.exports.set(7);
+
+ assertEquals(7, a.exports.get());
+ assertEquals(6, b.exports.get());
+
+})();
+
function TestImported(type, val, expected) {
print("TestImported " + type + "(" + val +")" + " = " + expected);
var builder = new WasmModuleBuilder();
@@ -26,6 +68,29 @@ TestImported(kWasmF32, 87234.87238, Math.fround(87234.87238));
TestImported(kWasmF64, 77777.88888, 77777.88888);
+(function TestImportedMultipleInstances() {
+ print("TestImportedMultipleInstances");
+
+ var builder = new WasmModuleBuilder();
+
+ let g = builder.addImportedGlobal("mod", "g", kWasmI32);
+ let sig_index = builder.addType(kSig_i_v);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprGetGlobal, g])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ print(" i 100...");
+ let i100 = new WebAssembly.Instance(module, {mod: {g: 100}});
+ assertEquals(100, i100.exports.main());
+
+ print(" i 300...");
+ let i300 = new WebAssembly.Instance(module, {mod: {g: 300}});
+ assertEquals(300, i300.exports.main());
+})();
+
function TestExported(type, val, expected) {
print("TestExported " + type + "(" + val +")" + " = " + expected);
var builder = new WasmModuleBuilder();
@@ -96,3 +161,54 @@ function TestGlobalIndexSpace(type, val) {
TestGlobalIndexSpace(kWasmI32, 123);
TestGlobalIndexSpace(kWasmF32, 54321.125);
TestGlobalIndexSpace(kWasmF64, 12345.678);
+
+(function TestAccessesInBranch() {
+ print("TestAccessesInBranches");
+
+ var builder = new WasmModuleBuilder();
+
+ let g = builder.addGlobal(kWasmI32, true);
+ let h = builder.addGlobal(kWasmI32, true);
+ let sig_index = builder.addType(kSig_i_i);
+ builder.addFunction("get", sig_index)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprIf, kWasmI32,
+ kExprGetGlobal, g.index,
+ kExprElse,
+ kExprGetGlobal, h.index,
+ kExprEnd])
+ .exportAs("get");
+ builder.addFunction("set", kSig_v_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprIf, kWasmStmt,
+ kExprGetLocal, 1,
+ kExprSetGlobal, g.index,
+ kExprElse,
+ kExprGetLocal, 1,
+ kExprSetGlobal, h.index,
+ kExprEnd])
+ .exportAs("set");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ let a = new WebAssembly.Instance(module);
+ let get = a.exports.get;
+ let set = a.exports.set;
+
+ assertEquals(0, get(0));
+ assertEquals(0, get(1));
+ set(0, 1);
+ assertEquals(1, get(0));
+ assertEquals(0, get(1));
+
+ set(0, 7);
+ assertEquals(7, get(0));
+ assertEquals(0, get(1));
+
+ set(1, 9);
+ assertEquals(7, get(0));
+ assertEquals(9, get(1));
+
+})();
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index f158718f8d..4c6d9c9f3b 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --expose-wasm --expose-gc
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
@@ -601,3 +601,106 @@ function js_div(a, b) { return (a / b) | 0; }
() => instance0.exports.main(0), WebAssembly.RuntimeError,
/signature mismatch/);
})();
+
+(function ImportedFreestandingTable() {
+ print("ImportedFreestandingTable...");
+
+ function forceGc() {
+ gc();
+ gc();
+ gc();
+ }
+
+ function setup() {
+ let builder = new WasmModuleBuilder();
+ let sig = builder.addType(kSig_i_v);
+ builder.addFunction('main', kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprCallIndirect, sig, kTableZero])
+ .exportAs('main');
+
+ builder.addImportedTable('', 'table');
+
+ let module1 = new WebAssembly.Module(builder.toBuffer());
+ let table = new WebAssembly.Table({initial:2, element:'anyfunc'});
+ let instance1 = new WebAssembly.Instance(module1, {'':{table: table}});
+
+ builder = new WasmModuleBuilder();
+ builder.addExport('theImport', builder.addImport('', 'callout', kSig_i_v));
+ builder.addImportedMemory('', 'memory', 1);
+ builder.addFunction('main', kSig_i_v)
+ .addBody([
+ kExprCallFunction, 0,
+ kExprI32Const, 0, kExprI32LoadMem, 0, 0,
+ kExprI32Add
+ ]).exportAs('main');
+
+ let mem = new WebAssembly.Memory({initial:1});
+ let view = new Int32Array(mem.buffer);
+ view[0] = 4;
+
+ let module2 = new WebAssembly.Module(builder.toBuffer());
+ let instance2 = new WebAssembly.Instance(module2, {
+ '': {
+ callout: () => {
+ forceGc();
+ return 3;
+ },
+ 'memory': mem
+ }
+ });
+ table.set(0, instance2.exports.main);
+ table.set(1, instance2.exports.theImport);
+ return instance1;
+ }
+
+ function test(variant, expectation) {
+ var instance = setup();
+ forceGc();
+ assertEquals(expectation, instance.exports.main(variant));
+ }
+
+ // 0 indirectly calls the wasm function that calls the import,
+ // 1 does the same but for the exported import.
+ test(0, 7);
+ test(1, 3);
+})();
+
+(function IndirectCallIntoOtherInstance() {
+ print("IndirectCallIntoOtherInstance...");
+ var mem_1 = new WebAssembly.Memory({initial: 1});
+ var mem_2 = new WebAssembly.Memory({initial: 1});
+ var view_1 = new Int32Array(mem_1.buffer);
+ var view_2 = new Int32Array(mem_2.buffer);
+ view_1[0] = 1;
+ view_2[0] = 1000;
+
+ let builder = new WasmModuleBuilder();
+ let sig = builder.addType(kSig_i_v);
+ builder.addFunction('main', kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprCallIndirect, sig, kTableZero])
+ .exportAs('main');
+ builder.addImportedMemory('', 'memory', 1);
+
+ builder.setFunctionTableBounds(1, 1);
+ builder.addExportOfKind('table', kExternalTable);
+
+ let module1 = new WebAssembly.Module(builder.toBuffer());
+ let instance1 = new WebAssembly.Instance(module1, {'':{memory:mem_1}});
+
+ builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_i_v).addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0]);
+ builder.addImportedTable('', 'table');
+ builder.addFunctionTableInit(0, false, [0], true);
+ builder.addImportedMemory('', 'memory', 1);
+
+
+ let module2 = new WebAssembly.Module(builder.toBuffer());
+ let instance2 = new WebAssembly.Instance(module2, {
+ '': {
+ table: instance1.exports.table,
+ memory: mem_2
+ }
+ });
+
+ assertEquals(instance1.exports.main(0), 1000);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index 0f54dc97d3..f5697eb00f 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -22,6 +22,7 @@ function checkStack(stack, expected_lines) {
}
(function testCallImported() {
+ print(arguments.callee.name);
var stack;
let func = () => stack = new Error('test imported stack').stack;
@@ -47,6 +48,7 @@ function checkStack(stack, expected_lines) {
})();
(function testCallImportedWithParameters() {
+ print(arguments.callee.name);
var stack;
var passed_args = [];
let func1 = (i, j) => (passed_args.push(i, j), 2 * i + j);
@@ -80,6 +82,7 @@ function checkStack(stack, expected_lines) {
})();
(function testTrap() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var foo_idx = builder.addFunction('foo', kSig_v_v)
.addBody([kExprNop, kExprNop, kExprUnreachable])
@@ -110,6 +113,7 @@ function checkStack(stack, expected_lines) {
})();
(function testThrowFromImport() {
+ print(arguments.callee.name);
function func() {
throw new Error('thrown from imported function');
}
@@ -141,6 +145,7 @@ function checkStack(stack, expected_lines) {
})();
(function testGlobals() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
builder.addGlobal(kWasmI32, true); // 0
builder.addGlobal(kWasmI64, true); // 1
@@ -190,6 +195,7 @@ function checkStack(stack, expected_lines) {
})();
(function testReentrantInterpreter() {
+ print(arguments.callee.name);
var stacks;
var instance;
function func(i) {
@@ -227,6 +233,7 @@ function checkStack(stack, expected_lines) {
})();
(function testIndirectImports() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var sig_i_ii = builder.addType(kSig_i_ii);
@@ -260,6 +267,7 @@ function checkStack(stack, expected_lines) {
})();
(function testIllegalImports() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var sig_l_v = builder.addType(kSig_l_v);
@@ -311,6 +319,7 @@ function checkStack(stack, expected_lines) {
})();
(function testInfiniteRecursion() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var direct = builder.addFunction('main', kSig_v_v)
@@ -331,6 +340,7 @@ function checkStack(stack, expected_lines) {
})();
(function testUnwindSingleActivation() {
+ print(arguments.callee.name);
// Create two activations and unwind just the top one.
var builder = new WasmModuleBuilder();
@@ -367,6 +377,7 @@ function checkStack(stack, expected_lines) {
})();
(function testInterpreterGC() {
+ print(arguments.callee.name);
function run(f) {
// wrap the creation in a closure so that the only thing returned is
// the module (i.e. the underlying array buffer of wasm wire bytes dies).
@@ -398,3 +409,87 @@ function checkStack(stack, expected_lines) {
run(x => (x - 18));
}
})();
+
+(function testImportThrowsOnToNumber() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const imp_idx = builder.addImport('mod', 'func', kSig_i_v);
+ builder.addFunction('main', kSig_i_v)
+ .addBody([kExprCallFunction, imp_idx])
+ .exportFunc();
+ var num_callback_calls = 0;
+ const callback = () => {
+ ++num_callback_calls;
+ return Symbol()
+ };
+ var instance = builder.instantiate({mod: {func: callback}});
+ // Test that this does not mess up internal state by executing it three times.
+ for (var i = 0; i < 3; ++i) {
+ var interpreted_before = %WasmNumInterpretedCalls(instance);
+ assertThrows(
+ () => instance.exports.main(), TypeError,
+ 'Cannot convert a Symbol value to a number');
+ assertEquals(interpreted_before + 1, %WasmNumInterpretedCalls(instance));
+ assertEquals(i + 1, num_callback_calls);
+ }
+})();
+
+(function testCallWithMoreReturnsThenParams() {
+ print(arguments.callee.name);
+ const builder1 = new WasmModuleBuilder();
+ builder1.addFunction('exp', kSig_l_v)
+ .addBody([kExprI64Const, 23])
+ .exportFunc();
+ const exp = builder1.instantiate().exports.exp;
+ const builder2 = new WasmModuleBuilder();
+ const imp_idx = builder2.addImport('imp', 'func', kSig_l_v);
+ builder2.addFunction('main', kSig_i_v)
+ .addBody([kExprCallFunction, imp_idx, kExprI32ConvertI64])
+ .exportFunc();
+ const instance = builder2.instantiate({imp: {func: exp}});
+ assertEquals(23, instance.exports.main());
+})();
+
+(function testTableCall() {
+ print(arguments.callee.name);
+ const builder1 = new WasmModuleBuilder();
+ builder1.addFunction('func', kSig_v_v).addBody([]).exportFunc();
+ const instance1 = builder1.instantiate();
+ const table = new WebAssembly.Table({element: 'anyfunc', initial: 2});
+
+ const builder2 = new WasmModuleBuilder()
+ builder2.addImportedTable('m', 'table');
+ const sig = builder2.addType(kSig_v_v);
+ builder2.addFunction('call_func', kSig_v_v)
+ .addBody([kExprI32Const, 0, kExprCallIndirect, sig, kTableZero])
+ .exportFunc();
+ const instance2 = builder2.instantiate({m: {table: table}});
+ table.set(0, instance1.exports.func);
+ instance2.exports.call_func();
+})();
+
+(function testTableCall2() {
+ // See crbug.com/787910.
+ print(arguments.callee.name);
+ const builder1 = new WasmModuleBuilder();
+ builder1.addFunction('exp', kSig_i_i)
+ .addBody([kExprI32Const, 0])
+ .exportFunc();
+ const instance1 = builder1.instantiate();
+ const builder2 = new WasmModuleBuilder();
+ const sig1 = builder2.addType(kSig_i_v);
+ const sig2 = builder2.addType(kSig_i_i);
+ builder2.addFunction('call2', kSig_i_v)
+ .addBody([
+ kExprI32Const, 0, kExprI32Const, 0, kExprCallIndirect, sig2, kTableZero
+ ])
+ .exportAs('call2');
+ builder2.addImportedTable('imp', 'table');
+ const tab = new WebAssembly.Table({
+ element: 'anyfunc',
+ initial: 3,
+ });
+ const instance2 = builder2.instantiate({imp: {table: tab}});
+ tab.set(0, instance1.exports.exp);
+ instance2.exports.call2();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index 952c6296cd..f27fa7a361 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -907,3 +907,21 @@ assertInstantiateSuccess(
class Y extends WebAssembly.Memory { }
assertThrows(() => new Y());
})();
+
+(function TestCallWithoutNew() {
+ var bytes = Uint8Array.of(0x0, 0x61, 0x73, 0x6d, 0x1, 0x00, 0x00, 0x00);
+ assertThrows(() => WebAssembly.Module(bytes), TypeError);
+ assertThrows(() => WebAssembly.Instance(new WebAssembly.Module(bytes)),
+ TypeError);
+ assertThrows(() => WebAssembly.Table({size: 10, element: 'anyfunc'}),
+ TypeError);
+ assertThrows(() => WebAssembly.Memory({size: 10}), TypeError);
+})();
+
+(function TestTinyModule() {
+ var bytes = Uint8Array.of(0x0, 0x61, 0x73, 0x6d, 0x1, 0x00, 0x00, 0x00);
+ var module = new WebAssembly.Module(bytes);
+ assertTrue(module instanceof Module);
+ var instance = new WebAssembly.Instance(module);
+ assertTrue(instance instanceof Instance);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/lazy-compilation.js b/deps/v8/test/mjsunit/wasm/lazy-compilation.js
new file mode 100644
index 0000000000..3d840398a8
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/lazy-compilation.js
@@ -0,0 +1,63 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function importFromOtherInstance() {
+ print(arguments.callee.name);
+ const builder1 = new WasmModuleBuilder();
+ builder1.addFunction('func', kSig_v_v).addBody([]).exportFunc();
+ const instance1 = builder1.instantiate();
+
+ const builder2 = new WasmModuleBuilder();
+ builder2.addImport('mod', 'fn', kSig_v_v);
+ builder2.instantiate({mod: {fn: instance1.exports.func}});
+})();
+
+(function testWasmToWasmWithDifferentMemory() {
+ print(arguments.callee.name);
+ const builder1 = new WasmModuleBuilder();
+ builder1.addMemory(1, 1, true);
+ builder1.addFunction('store', kSig_v_i)
+ .addBody([
+ kExprI32Const, 0, // i32.const 1
+ kExprGetLocal, 0, // get_local 0
+ kExprI32StoreMem, 0, 0, // i32.store offset=0 align=0
+ ])
+ .exportFunc();
+ const instance1 = builder1.instantiate();
+ const mem1 = new Int32Array(instance1.exports.memory.buffer);
+
+ const builder2 = new WasmModuleBuilder();
+ builder2.addMemory(1, 1, true);
+ builder2.addImport('mod', 'store', kSig_v_i);
+ builder2.addFunction('call_store', kSig_v_i)
+ .addBody([kExprGetLocal, 0, kExprCallFunction, 0])
+ .exportFunc();
+ const instance2 = builder2.instantiate({mod: {store: instance1.exports.store}});
+ const mem2 = new Int32Array(instance2.exports.memory.buffer);
+
+ assertEquals(0, mem1[0]);
+ assertEquals(0, mem2[0]);
+ instance2.exports.call_store(3);
+ assertEquals(3, mem1[0]);
+ assertEquals(0, mem2[0]);
+})();
+
+(function exportImportedFunction() {
+ print(arguments.callee.name);
+ const builder1 = new WasmModuleBuilder();
+ builder1.addFunction('foo', kSig_v_v).addBody([]).exportAs('foo');
+ const instance1 = builder1.instantiate();
+
+ const builder2 = new WasmModuleBuilder();
+ const imp_idx = builder2.addImport('A', 'foo', kSig_v_v);
+ builder2.addExport('foo', imp_idx);
+ const instance2 = builder2.instantiate({A: instance1.exports});
+
+ instance2.exports.foo();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/liftoff.js b/deps/v8/test/mjsunit/wasm/liftoff.js
new file mode 100644
index 0000000000..b65f83f9ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/liftoff.js
@@ -0,0 +1,36 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --liftoff --wasm-async-compilation
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function testLiftoffSync() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('i32_add', kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .exportFunc();
+
+ const instance = builder.instantiate();
+
+ assertTrue(%IsLiftoffFunction(instance.exports.i32_add));
+})();
+
+async function testLiftoffAsync() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('i32_add', kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .exportFunc();
+
+ print('Compiling...');
+ const module = await WebAssembly.compile(builder.toBuffer());
+ print('Instantiating...');
+ const instance = new WebAssembly.Instance(module);
+ assertTrue(%IsLiftoffFunction(instance.exports.i32_add));
+}
+
+assertPromiseResult(testLiftoffAsync());
diff --git a/deps/v8/test/mjsunit/wasm/memory-external-call.js b/deps/v8/test/mjsunit/wasm/memory-external-call.js
index 0095ba1e7d..2af5888daa 100644
--- a/deps/v8/test/mjsunit/wasm/memory-external-call.js
+++ b/deps/v8/test/mjsunit/wasm/memory-external-call.js
@@ -38,6 +38,111 @@ function generateBuilder(add_memory, import_sig) {
return builder;
}
+function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
+
+ assertEquals(0, load_a(0));
+ assertEquals(0, load_b(0));
+ assertEquals(0, load_a(4));
+ assertEquals(0, load_b(4));
+
+ store_a(0, 101);
+ assertEquals(101, load_a(0));
+ assertEquals(0, load_b(0));
+ assertEquals(0, load_a(4));
+ assertEquals(0, load_b(4));
+
+ store_a(4, 102);
+ assertEquals(101, load_a(0));
+ assertEquals(0, load_b(0));
+ assertEquals(102, load_a(4));
+ assertEquals(0, load_b(4));
+
+ store_b(0, 103);
+ assertEquals(101, load_a(0));
+ assertEquals(103, load_b(0));
+ assertEquals(102, load_a(4));
+ assertEquals(0, load_b(4));
+
+ store_b(4, 107);
+ assertEquals(101, load_a(0));
+ assertEquals(103, load_b(0));
+ assertEquals(102, load_a(4));
+ assertEquals(107, load_b(4));
+
+ store_a(0, 0);
+ store_a(4, 0);
+ store_b(0, 0);
+ store_b(4, 0);
+}
+
+// A simple test for memory-independence between modules.
+(function SimpleMemoryIndependenceTest() {
+ print("SimpleMemoryIndependenceTest");
+ let kPages = 1;
+ let builder = new WasmModuleBuilder();
+
+ builder.addMemory(kPages, kPages, true);
+ builder.addFunction("store", kSig_v_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32StoreMem, 0, 0, // --
+ ]) // --
+ .exportFunc();
+ builder.addFunction("load", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprI32LoadMem, 0, 0, // --
+ ]) // --
+ .exportFunc();
+
+ var a = builder.instantiate();
+
+ // The {b} instance forwards all {store} calls to the imported function.
+ builder = new WasmModuleBuilder();
+ builder.addImport("mod", "store", kSig_v_ii);
+ builder.addMemory(kPages, kPages, true);
+ builder.addFunction("store", kSig_v_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallFunction, 0, // --
+ ]) // --
+ .exportFunc();
+ builder.addFunction("load", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprI32LoadMem, 0, 0, // --
+ ]) // --
+ .exportFunc();
+
+ var b = builder.instantiate({mod: {store: a.exports.store}});
+
+ assertEquals(0, a.exports.load(0));
+ assertEquals(0, b.exports.load(0));
+ assertEquals(0, a.exports.load(4));
+ assertEquals(0, b.exports.load(4));
+
+ a.exports.store(0, 101);
+ assertEquals(101, a.exports.load(0));
+ assertEquals(0, b.exports.load(0));
+ assertEquals(0, a.exports.load(4));
+ assertEquals(0, b.exports.load(4));
+
+ a.exports.store(4, 102);
+ assertEquals(101, a.exports.load(0));
+ assertEquals(0, b.exports.load(0));
+ assertEquals(102, a.exports.load(4));
+ assertEquals(0, b.exports.load(4));
+
+ b.exports.store(4, 107); // should forward to {a}.
+ assertEquals(101, a.exports.load(0));
+ assertEquals(0, b.exports.load(0));
+ assertEquals(107, a.exports.load(4));
+ assertEquals(0, b.exports.load(4));
+
+})();
+
// This test verifies that when a Wasm module without memory invokes a function
// imported from another module that has memory, the second module reads its own
// memory and returns the expected value.
@@ -147,3 +252,87 @@ function generateBuilder(add_memory, import_sig) {
assertEquals(first_value, first_instance.exports.load(index));
assertEquals(second_value, second_instance.exports.load(index));
})();
+
+// A test for memory-independence between modules when calling through
+// imported tables.
+(function CallThroughTableMemoryIndependenceTest() {
+ print("CallThroughTableIndependenceTest");
+ let kTableSize = 2;
+ let kPages = 1;
+ let builder = new WasmModuleBuilder();
+
+ builder.addMemory(kPages, kPages, true);
+ builder.addFunction("store", kSig_v_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32StoreMem, 0, 0, // --
+ ]) // --
+ .exportFunc();
+ builder.addFunction("load", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprI32LoadMem, 0, 0, // --
+ ]) // --
+ .exportFunc();
+
+ {
+ // Create two instances.
+ let module = builder.toModule();
+ var a = new WebAssembly.Instance(module);
+ var b = new WebAssembly.Instance(module);
+ // Check that the memories are initially independent.
+ assertMemoryIndependence(a.exports.load, a.exports.store,
+ b.exports.load, b.exports.store);
+ }
+
+ let table = new WebAssembly.Table({element: "anyfunc",
+ initial: kTableSize,
+ maximum: kTableSize});
+
+ table.set(0, a.exports.store);
+ table.set(1, b.exports.store);
+ // Check that calling (from JS) through the table maintains independence.
+ assertMemoryIndependence(a.exports.load, table.get(0),
+ b.exports.load, table.get(1));
+
+ table.set(1, a.exports.store);
+ table.set(0, b.exports.store);
+ // Check that calling (from JS) through the table maintains independence,
+ // even after reorganizing the table.
+ assertMemoryIndependence(a.exports.load, table.get(1),
+ b.exports.load, table.get(0));
+
+ // Check that calling (from WASM) through the table maintains independence.
+ builder = new WasmModuleBuilder();
+ builder.addImportedTable("m", "table", kTableSize, kTableSize);
+ var sig_index = builder.addType(kSig_v_ii);
+ builder.addFunction("store", kSig_v_iii)
+ .addBody([
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index, kTableZero,
+ ]).exportFunc();
+
+ let c = builder.instantiate({m: {table: table}});
+
+ let a_index = 1;
+ let b_index = 0;
+ let store_a = (index, val) => c.exports.store(a_index, index, val)
+ let store_b = (index, val) => c.exports.store(b_index, index, val);
+
+ assertMemoryIndependence(a.exports.load, store_a,
+ b.exports.load, store_b);
+
+ // Flip the order in the table and do it again.
+ table.set(0, a.exports.store);
+ table.set(1, b.exports.store);
+
+ a_index = 0;
+ b_index = 1;
+
+ assertMemoryIndependence(a.exports.load, store_a,
+ b.exports.load, store_b);
+
+})();
diff --git a/deps/v8/test/mjsunit/wasm/multi-value.js b/deps/v8/test/mjsunit/wasm/multi-value.js
new file mode 100644
index 0000000000..d6eff16293
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/multi-value.js
@@ -0,0 +1,322 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-mv
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function MultiBlockResultTest() {
+ print("MultiBlockResultTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let sig_ii_v = builder.addType(kSig_ii_v);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprBlock, sig_ii_v,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprEnd,
+ kExprI32Add])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(1, 4), 5);
+})();
+
+(function MultiBlockParamTest() {
+ print("MultiBlockParamTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprBlock, sig_i_ii,
+ kExprI32Add,
+ kExprEnd])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(1, 4), 5);
+})();
+
+(function MultiBlockBrTest() {
+ print("MultiBlockBrTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let sig_ii_v = builder.addType(kSig_ii_v);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprBlock, sig_ii_v,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprBr, 0,
+ kExprEnd,
+ kExprI32Add])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(1, 4), 5);
+})();
+
+
+(function MultiLoopResultTest() {
+ print("MultiLoopResultTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let sig_ii_v = builder.addType(kSig_ii_v);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprLoop, sig_ii_v,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprEnd,
+ kExprI32Add])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(1, 4), 5);
+})();
+
+(function MultiLoopParamTest() {
+ print("MultiLoopParamTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprLoop, sig_i_ii,
+ kExprI32Add,
+ kExprEnd])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(1, 4), 5);
+})();
+
+(function MultiLoopBrTest() {
+ print("MultiLoopBrTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let sig_ii_i = builder.addType(kSig_ii_i);
+ let sig_ii_ii = builder.addType(kSig_ii_ii);
+
+ builder.addFunction("dup", kSig_ii_i)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 0]);
+ builder.addFunction("swap", kSig_ii_ii)
+ .addBody([kExprGetLocal, 1, kExprGetLocal, 0]);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprLoop, sig_ii_ii,
+ kExprCallFunction, 1, // swap
+ kExprCallFunction, 0, // dup
+ kExprI32Add,
+ kExprCallFunction, 0, // dup
+ kExprI32Const, 20,
+ kExprI32LeU,
+ kExprBrIf, 0,
+ kExprEnd,
+ kExprDrop])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(0, instance.exports.main(0, 1));
+ assertEquals(16, instance.exports.main(1, 1));
+ assertEquals(4, instance.exports.main(3, 1));
+ assertEquals(4, instance.exports.main(4, 1));
+ assertEquals(0, instance.exports.main(0, 2));
+ assertEquals(16, instance.exports.main(1, 2));
+ assertEquals(8, instance.exports.main(3, 2));
+ assertEquals(8, instance.exports.main(4, 2));
+ assertEquals(0, instance.exports.main(0, 3));
+ assertEquals(8, instance.exports.main(1, 3));
+ assertEquals(12, instance.exports.main(3, 3));
+ assertEquals(12, instance.exports.main(4, 3));
+ assertEquals(0, instance.exports.main(0, 4));
+ assertEquals(8, instance.exports.main(1, 4));
+ assertEquals(16, instance.exports.main(3, 4));
+ assertEquals(16, instance.exports.main(4, 4));
+ assertEquals(3, instance.exports.main(100, 3));
+ assertEquals(6, instance.exports.main(3, 100));
+})();
+
+
+(function MultiIfResultTest() {
+ print("MultiIfResultTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let sig_ii_v = builder.addType(kSig_ii_v);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprIf, sig_ii_v,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprElse,
+ kExprGetLocal, 1,
+ kExprGetLocal, 0,
+ kExprEnd,
+ kExprI32Sub])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(8, 3), 5);
+ assertEquals(instance.exports.main(0, 3), 3);
+})();
+
+(function MultiIfParamTest() {
+ print("MultiIfParamTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 0,
+ kExprIf, sig_i_ii,
+ kExprI32Add,
+ kExprElse,
+ kExprI32Sub,
+ kExprEnd])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(1, 4), 5);
+ assertEquals(instance.exports.main(0, 4), -4);
+})();
+
+(function MultiIfBrTest() {
+ print("MultiIfBrTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let sig_ii_v = builder.addType(kSig_ii_v);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprIf, sig_ii_v,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprBr, 0,
+ kExprElse,
+ kExprGetLocal, 1,
+ kExprGetLocal, 0,
+ kExprBr, 0,
+ kExprEnd,
+ kExprI32Sub])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(8, 3), 5);
+ assertEquals(instance.exports.main(0, 3), 3);
+})();
+
+(function MultiResultTest() {
+ print("MultiResultTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let sig_iii_ii = builder.addType(kSig_iii_ii);
+
+ builder.addFunction("callee", kSig_iii_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprI32Sub]);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallFunction, 0,
+ kExprI32Mul,
+ kExprI32Add])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(0, 0), 0);
+ assertEquals(instance.exports.main(1, 0), 1);
+ assertEquals(instance.exports.main(2, 0), 2);
+ assertEquals(instance.exports.main(0, 1), -1);
+ assertEquals(instance.exports.main(0, 2), -4);
+ assertEquals(instance.exports.main(3, 4), -1);
+ assertEquals(instance.exports.main(4, 3), 7);
+})();
+
+(function MultiReturnTest() {
+ print("MultiReturnTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_i = builder.addType(kSig_i_i);
+ let sig_ii_i = builder.addType(kSig_ii_i);
+
+ builder.addFunction("callee", kSig_ii_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprI32Add,
+ kExprReturn]);
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallFunction, 0,
+ kExprI32Mul])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(0), 0);
+ assertEquals(instance.exports.main(1), 2);
+ assertEquals(instance.exports.main(2), 8);
+ assertEquals(instance.exports.main(10), 200);
+})();
+
+(function MultiBrReturnTest() {
+ print("MultiBrReturnTest");
+ let builder = new WasmModuleBuilder();
+ let sig_i_i = builder.addType(kSig_i_i);
+ let sig_ii_i = builder.addType(kSig_ii_i);
+
+ builder.addFunction("callee", kSig_ii_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprI32Add,
+ kExprBr, 0]);
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallFunction, 0,
+ kExprI32Mul])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(0), 0);
+ assertEquals(instance.exports.main(1), 2);
+ assertEquals(instance.exports.main(2), 8);
+ assertEquals(instance.exports.main(10), 200);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-grow.js b/deps/v8/test/mjsunit/wasm/table-grow.js
index ab998e91b0..a9a4ba298c 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow.js
@@ -64,7 +64,7 @@ let id = (() => { // identity exported function
print("TableGrowBoundsCheck");
let builder = new WasmModuleBuilder();
addMain(builder);
- let module = WebAssembly.Module(builder.toBuffer());
+ let module = new WebAssembly.Module(builder.toBuffer());
let table = new WebAssembly.Table({element: "anyfunc",
initial: 1, maximum:kMaxTableSize});
function fillTable() {
@@ -95,7 +95,7 @@ let id = (() => { // identity exported function
print("TableGrowBoundsZeroInitial");
let builder = new WasmModuleBuilder();
addMain(builder);
- let module = WebAssembly.Module(builder.toBuffer());
+ let module = new WebAssembly.Module(builder.toBuffer());
var table = new WebAssembly.Table({element: "anyfunc",
initial: 0, maximum:kMaxTableSize});
function growTableByOne() {
diff --git a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
index e16a7af26a..e298468350 100644
--- a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
+++ b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
@@ -299,3 +299,28 @@ assertThrows(function TestWasmWrapperNoElisionTypeMismatch() {
assertEquals(the_export(2, -2), 0);
assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
});
+
+
+(function TestSimpleI64Ret() {
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("exp", kSig_l_v)
+ .addBody([
+ kExprI64Const, 23
+ ])
+ .exportFunc();
+ var exported = builder.instantiate().exports.exp;
+
+ var builder = new WasmModuleBuilder();
+ builder.addImport("imp", "func", kSig_l_v);
+ builder.addFunction("main", kSig_i_v)
+ .addBody([
+ kExprCallFunction, 0,
+ kExprI32ConvertI64
+ ])
+ .exportFunc();
+
+ var instance = builder.instantiate({imp: {func: exported}});
+
+ assertEquals(23, instance.exports.main());
+
+})();
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
index abe003e3e7..02f28ff515 100644
--- a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -10,7 +10,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
var debug = true;
function instantiate(buffer, ffi) {
- return new WebAssembly.Instance(WebAssembly.Module(buffer), ffi);
+ return new WebAssembly.Instance(new WebAssembly.Module(buffer), ffi);
}
(function BasicTest() {
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-common.js b/deps/v8/test/mjsunit/wasm/user-properties-common.js
new file mode 100644
index 0000000000..ab6b2bc979
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/user-properties-common.js
@@ -0,0 +1,52 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --verify-heap
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+const verifyHeap = gc;
+let globalCounter = 10000000;
+
+function testProperties(obj) {
+ for (let i = 0; i < 3; i++) {
+ obj.x = 1001;
+ assertEquals(1001, obj.x);
+
+ obj.y = "old";
+ assertEquals("old", obj.y);
+
+ delete obj.y;
+ assertEquals("undefined", typeof obj.y);
+
+ let uid = globalCounter++;
+ let fresh = "f_" + uid;
+
+ obj.z = fresh;
+ assertEquals(fresh, obj.z);
+
+ obj[fresh] = uid;
+ assertEquals(uid, obj[fresh]);
+
+ verifyHeap();
+
+ assertEquals(1001, obj.x);
+ assertEquals(fresh, obj.z);
+ assertEquals(uid, obj[fresh]);
+ }
+
+ // These properties are special for JSFunctions.
+ Object.defineProperty(obj, 'name', {value: "crazy"});
+ Object.defineProperty(obj, 'length', {value: 999});
+}
+
+function minus18(x) { return x - 18; }
+function id(x) { return x; }
+
+function printName(when, f) {
+ print(" " + when + ": name=" + f.name + ", length=" + f.length);
+}
+
+// Note that this test is a helper with common code for user-properties-*.js.
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-constructed.js b/deps/v8/test/mjsunit/wasm/user-properties-constructed.js
new file mode 100644
index 0000000000..77e7865b1c
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/user-properties-constructed.js
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --verify-heap
+
+load("test/mjsunit/wasm/user-properties-common.js");
+
+(function ConstructedTest() {
+ print("ConstructedTest");
+
+ var memory = undefined, table = undefined;
+ for (let i = 0; i < 4; i++) {
+ print(" iteration " + i);
+
+ let m = new WebAssembly.Memory({initial: 1});
+ let t = new WebAssembly.Table({element: "anyfunc", initial: 1});
+ m.old = memory;
+ t.old = table;
+
+ memory = m;
+ table = t;
+ testProperties(memory);
+ testProperties(table);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-exported.js b/deps/v8/test/mjsunit/wasm/user-properties-exported.js
new file mode 100644
index 0000000000..80f2077f3c
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/user-properties-exported.js
@@ -0,0 +1,34 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --verify-heap
+
+load("test/mjsunit/wasm/user-properties-common.js");
+
+(function ExportedFunctionTest() {
+ print("ExportedFunctionTest");
+
+ print(" instance 1, exporting");
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("exp", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallFunction, 0])
+ .exportAs("exp");
+ let module1 = builder.toModule();
+ let instance1 = new WebAssembly.Instance(module1);
+ let g = instance1.exports.exp;
+
+ testProperties(g);
+
+ // The WASM-internal fields of {g} are only inspected when {g} is
+ // used as an import into another instance.
+ print(" instance 2, importing");
+ var builder = new WasmModuleBuilder();
+ builder.addImport("imp", "func", kSig_i_i);
+ let module2 = builder.toModule();
+ let instance2 = new WebAssembly.Instance(module2, {imp: {func: g}});
+
+ testProperties(g);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-module.js b/deps/v8/test/mjsunit/wasm/user-properties-module.js
new file mode 100644
index 0000000000..69a1f898d7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/user-properties-module.js
@@ -0,0 +1,54 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --verify-heap
+
+load("test/mjsunit/wasm/user-properties-common.js");
+
+(function ModuleTest() {
+ for (f of [x => (x + 19 + globalCounter), minus18]) {
+ print("ModuleTest");
+
+ let builder = new WasmModuleBuilder();
+ builder.addImport("m", "f", kSig_i_i);
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallFunction, 0])
+ .exportAs("main");
+ builder.addMemory(1, 1, false)
+ .exportMemoryAs("memory")
+
+ let module = builder.toModule();
+ testProperties(module);
+
+ for (let i = 0; i < 3; i++) {
+ print(" instance " + i);
+ let instance = new WebAssembly.Instance(module, {m: {f: f}});
+ testProperties(instance);
+
+ print(" memory " + i);
+ let m = instance.exports.memory;
+ assertInstanceof(m, WebAssembly.Memory);
+ testProperties(m);
+
+ print(" function " + i);
+ let g = instance.exports.main;
+ assertInstanceof(g, Function);
+ printName("before", g);
+ testProperties(g);
+ printName(" after", g);
+ assertInstanceof(g, Function);
+ testProperties(g);
+ for (let j = 10; j < 15; j++) {
+ assertEquals(f(j), g(j));
+ }
+ verifyHeap();
+ // The WASM-internal fields of {g} are only inspected when {g} is
+ // used as an import into another instance. Use {g} as the import
+ // the next time through the loop.
+ f = g;
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-reexport.js b/deps/v8/test/mjsunit/wasm/user-properties-reexport.js
new file mode 100644
index 0000000000..e4f155df5a
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/user-properties-reexport.js
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --verify-heap
+
+load("test/mjsunit/wasm/user-properties-common.js");
+
+(function ImportReexportChain() {
+ print("ImportReexportChain");
+
+ var f = id;
+
+ for (let i = 0; i < 5; i++) {
+ let builder = new WasmModuleBuilder();
+ builder.addImport("imp", "func", kSig_i_i);
+ builder.addExport("exp", 0);
+ let module = builder.toModule();
+ let instance = new WebAssembly.Instance(module, {imp: {func: f}});
+ let g = instance.exports.exp;
+ assertInstanceof(g, Function);
+ printName("before", g);
+ testProperties(g);
+ printName(" after", g);
+
+ // The WASM-internal fields of {g} are only inspected when {g} is
+ // used as an import into another instance. Use {g} as the import
+ // the next time through the loop.
+ f = g;
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/user-properties.js b/deps/v8/test/mjsunit/wasm/user-properties.js
deleted file mode 100644
index facc58cad7..0000000000
--- a/deps/v8/test/mjsunit/wasm/user-properties.js
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --expose-gc --verify-heap
-
-load("test/mjsunit/wasm/wasm-constants.js");
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-const verifyHeap = gc;
-let globalCounter = 10000000;
-
-function testProperties(obj) {
- for (let i = 0; i < 3; i++) {
- obj.x = 1001;
- assertEquals(1001, obj.x);
-
- obj.y = "old";
- assertEquals("old", obj.y);
-
- delete obj.y;
- assertEquals("undefined", typeof obj.y);
-
- let uid = globalCounter++;
- let fresh = "f_" + uid;
-
- obj.z = fresh;
- assertEquals(fresh, obj.z);
-
- obj[fresh] = uid;
- assertEquals(uid, obj[fresh]);
-
- verifyHeap();
-
- assertEquals(1001, obj.x);
- assertEquals(fresh, obj.z);
- assertEquals(uid, obj[fresh]);
- }
-
- // These properties are special for JSFunctions.
- Object.defineProperty(obj, 'name', {value: "crazy"});
- Object.defineProperty(obj, 'length', {value: 999});
-}
-
-function minus18(x) { return x - 18; }
-function id(x) { return x; }
-
-function printName(when, f) {
- print(" " + when + ": name=" + f.name + ", length=" + f.length);
-}
-
-(function ExportedFunctionTest() {
- print("ExportedFunctionTest");
-
- print(" instance 1, exporting");
- var builder = new WasmModuleBuilder();
- builder.addFunction("exp", kSig_i_i)
- .addBody([
- kExprGetLocal, 0,
- kExprCallFunction, 0])
- .exportAs("exp");
- let module1 = builder.toModule();
- let instance1 = new WebAssembly.Instance(module1);
- let g = instance1.exports.exp;
-
- testProperties(g);
-
- // The WASM-internal fields of {g} are only inspected when {g} is
- // used as an import into another instance.
- print(" instance 2, importing");
- var builder = new WasmModuleBuilder();
- builder.addImport("imp", "func", kSig_i_i);
- let module2 = builder.toModule();
- let instance2 = new WebAssembly.Instance(module2, {imp: {func: g}});
-
- testProperties(g);
-})();
-
-(function ImportReexportChain() {
- print("ImportReexportChain");
-
- var f = id;
-
- for (let i = 0; i < 5; i++) {
- let builder = new WasmModuleBuilder();
- builder.addImport("imp", "func", kSig_i_i);
- builder.addExport("exp", 0);
- let module = builder.toModule();
- let instance = new WebAssembly.Instance(module, {imp: {func: f}});
- let g = instance.exports.exp;
- assertInstanceof(g, Function);
- printName("before", g);
- testProperties(g);
- printName(" after", g);
-
- // The WASM-internal fields of {g} are only inspected when {g} is
- // used as an import into another instance. Use {g} as the import
- // the next time through the loop.
- f = g;
- }
-})();
-
-
-(function ModuleTest() {
- for (f of [x => (x + 19 + globalCounter), minus18]) {
- print("ModuleTest");
-
- let builder = new WasmModuleBuilder();
- builder.addImport("m", "f", kSig_i_i);
- builder.addFunction("main", kSig_i_i)
- .addBody([
- kExprGetLocal, 0,
- kExprCallFunction, 0])
- .exportAs("main");
- builder.addMemory(1, 1, false)
- .exportMemoryAs("memory")
-
- let module = builder.toModule();
- testProperties(module);
-
- for (let i = 0; i < 3; i++) {
- print(" instance " + i);
- let instance = new WebAssembly.Instance(module, {m: {f: f}});
- testProperties(instance);
-
- print(" memory " + i);
- let m = instance.exports.memory;
- assertInstanceof(m, WebAssembly.Memory);
- testProperties(m);
-
- print(" function " + i);
- let g = instance.exports.main;
- assertInstanceof(g, Function);
- printName("before", g);
- testProperties(g);
- printName(" after", g);
- assertInstanceof(g, Function);
- testProperties(g);
- for (let j = 10; j < 15; j++) {
- assertEquals(f(j), g(j));
- }
- verifyHeap();
- // The WASM-internal fields of {g} are only inspected when {g} is
- // used as an import into another instance. Use {g} as the import
- // the next time through the loop.
- f = g;
- }
- }
-
-})();
-
-(function ConstructedTest() {
- print("ConstructedTest");
-
- var memory = undefined, table = undefined;
- for (let i = 0; i < 4; i++) {
- print(" iteration " + i);
-
- let m = new WebAssembly.Memory({initial: 1});
- let t = new WebAssembly.Table({element: "anyfunc", initial: 1});
- m.old = memory;
- t.old = table;
-
- memory = m;
- table = t;
- testProperties(memory);
- testProperties(table);
- }
-})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index cc5f1e9c4a..4c86065b89 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -78,7 +78,7 @@ let kLocalNamesCode = 2;
let kWasmFunctionTypeForm = 0x60;
let kWasmAnyFunctionTypeForm = 0x70;
-let kResizableMaximumFlag = 1;
+let kHasMaximumFlag = 1;
// Function declaration flags
let kDeclFunctionName = 0x01;
@@ -123,6 +123,12 @@ let kSig_v_l = makeSig([kWasmI64], []);
let kSig_v_d = makeSig([kWasmF64], []);
let kSig_v_dd = makeSig([kWasmF64, kWasmF64], []);
let kSig_v_ddi = makeSig([kWasmF64, kWasmF64, kWasmI32], []);
+let kSig_ii_v = makeSig([], [kWasmI32, kWasmI32]);
+let kSig_iii_v = makeSig([], [kWasmI32, kWasmI32, kWasmI32]);
+let kSig_ii_i = makeSig([kWasmI32], [kWasmI32, kWasmI32]);
+let kSig_iii_i = makeSig([kWasmI32], [kWasmI32, kWasmI32, kWasmI32]);
+let kSig_ii_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32, kWasmI32]);
+let kSig_iii_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32, kWasmI32, kWasmI32]);
let kSig_v_f = makeSig([kWasmF32], []);
let kSig_f_f = makeSig([kWasmF32], [kWasmF32]);
diff --git a/deps/v8/test/mjsunit/wasm/worker-memory.js b/deps/v8/test/mjsunit/wasm/worker-memory.js
new file mode 100644
index 0000000000..6d96e7ba36
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/worker-memory.js
@@ -0,0 +1,69 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+(function TestPostMessageUnsharedMemory() {
+ let worker = new Worker('');
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 2});
+
+ assertThrows(() => worker.postMessage(memory), Error);
+})();
+
+// Can't use assert in a worker.
+let workerHelpers =
+ `function assertTrue(value, msg) {
+ if (!value) {
+ postMessage("Error: " + msg);
+ throw new Error("Exit"); // To stop testing.
+ }
+ }
+
+ function assertIsWasmMemory(memory, expectedSize) {
+ assertTrue(memory instanceof WebAssembly.Memory,
+ "object is not a WebAssembly.Memory");
+
+ assertTrue(memory.buffer instanceof SharedArrayBuffer,
+ "object.buffer is not a SharedArrayBuffer");
+
+ assertTrue(memory.buffer.byteLength == expectedSize,
+ "object.buffer.byteLength is not " + expectedSize + " bytes");
+ }
+`;
+
+(function TestPostMessageSharedMemory() {
+ let workerScript = workerHelpers +
+ `onmessage = function(memory) {
+ assertIsWasmMemory(memory, 65536);
+ postMessage("OK");
+ };`;
+
+ let worker = new Worker(workerScript);
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 2, shared: true});
+ worker.postMessage(memory);
+ assertEquals("OK", worker.getMessage());
+ worker.terminate();
+})();
+
+(function TestPostMessageComplexObjectWithSharedMemory() {
+ let workerScript = workerHelpers +
+ `onmessage = function(obj) {
+ assertIsWasmMemory(obj.memories[0], 65536);
+ assertIsWasmMemory(obj.memories[1], 65536);
+ assertTrue(obj.buffer instanceof SharedArrayBuffer,
+ "buffer is not a SharedArrayBuffer");
+ assertTrue(obj.memories[0] === obj.memories[1], "memories aren't equal");
+ assertTrue(obj.memories[0].buffer === obj.buffer,
+ "buffers aren't equal");
+ assertTrue(obj.foo === 1, "foo is not 1");
+ postMessage("OK");
+ };`;
+
+ let worker = new Worker(workerScript);
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 2, shared: true});
+ let obj = {memories: [memory, memory], buffer: memory.buffer, foo: 1};
+ worker.postMessage(obj);
+ assertEquals("OK", worker.getMessage());
+ worker.terminate();
+})();
diff --git a/deps/v8/test/mjsunit/whitespaces.js b/deps/v8/test/mjsunit/whitespaces.js
index 5b3c6c03f7..cd8595fcc7 100644
--- a/deps/v8/test/mjsunit/whitespaces.js
+++ b/deps/v8/test/mjsunit/whitespaces.js
@@ -99,16 +99,32 @@ function test_stringtonumber(c, postfix) {
}
}
-for (var i = 0; i < 0x10000; i++) {
- c = String.fromCharCode(i);
- test_regexp(c + onebyte);
- test_regexp(c + twobyte);
- test_trim(c, onebyte + "trim");
- test_trim(c, twobyte + "trim");
- test_parseInt(c, onebyte);
- test_parseInt(c, twobyte);
- test_eval(c, onebyte);
- test_eval(c, twobyte);
- test_stringtonumber(c, onebytespace);
- test_stringtonumber(c, twobytespace);
+// Test is split into parts to increase parallelism.
+const number_of_tests = 10;
+const max_codepoint = 0x10000;
+
+function firstCodePointOfRange(i) {
+ return Math.floor(i * (max_codepoint / number_of_tests));
+}
+
+function testCodePointRange(i) {
+ assertTrue(i >= 0 && i < number_of_tests);
+
+ const from = firstCodePointOfRange(i);
+ const to = (i == number_of_tests - 1)
+ ? max_codepoint : firstCodePointOfRange(i + 1);
+
+ for (let i = from; i < to; i++) {
+ c = String.fromCharCode(i);
+ test_regexp(c + onebyte);
+ test_regexp(c + twobyte);
+ test_trim(c, onebyte + "trim");
+ test_trim(c, twobyte + "trim");
+ test_parseInt(c, onebyte);
+ test_parseInt(c, twobyte);
+ test_eval(c, onebyte);
+ test_eval(c, twobyte);
+ test_stringtonumber(c, onebytespace);
+ test_stringtonumber(c, twobytespace);
+ }
}
diff --git a/deps/v8/test/mjsunit/whitespaces0.js b/deps/v8/test/mjsunit/whitespaces0.js
new file mode 100644
index 0000000000..9bcd4a7024
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces0.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(0);
diff --git a/deps/v8/test/mjsunit/whitespaces1.js b/deps/v8/test/mjsunit/whitespaces1.js
new file mode 100644
index 0000000000..8d09f4c29e
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces1.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(1);
diff --git a/deps/v8/test/mjsunit/whitespaces2.js b/deps/v8/test/mjsunit/whitespaces2.js
new file mode 100644
index 0000000000..2919d0367b
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces2.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(2);
diff --git a/deps/v8/test/mjsunit/whitespaces3.js b/deps/v8/test/mjsunit/whitespaces3.js
new file mode 100644
index 0000000000..db28a92fc2
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces3.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(3);
diff --git a/deps/v8/test/mjsunit/whitespaces4.js b/deps/v8/test/mjsunit/whitespaces4.js
new file mode 100644
index 0000000000..e1033418fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces4.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(4);
diff --git a/deps/v8/test/mjsunit/whitespaces5.js b/deps/v8/test/mjsunit/whitespaces5.js
new file mode 100644
index 0000000000..5b748ad9b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces5.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(5);
diff --git a/deps/v8/test/mjsunit/whitespaces6.js b/deps/v8/test/mjsunit/whitespaces6.js
new file mode 100644
index 0000000000..6944700054
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces6.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(6);
diff --git a/deps/v8/test/mjsunit/whitespaces7.js b/deps/v8/test/mjsunit/whitespaces7.js
new file mode 100644
index 0000000000..ad01584351
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces7.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(7);
diff --git a/deps/v8/test/mjsunit/whitespaces8.js b/deps/v8/test/mjsunit/whitespaces8.js
new file mode 100644
index 0000000000..aff47d5c6c
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces8.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(8);
diff --git a/deps/v8/test/mjsunit/whitespaces9.js b/deps/v8/test/mjsunit/whitespaces9.js
new file mode 100644
index 0000000000..8d5e97aceb
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces9.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Files: test/mjsunit/whitespaces.js
+
+testCodePointRange(9);
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index ab6247fa6b..264779601b 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -46,8 +46,8 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
static int DumpHeapConstants(const char* argv0) {
// Start up V8.
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
v8::V8::InitializeExternalStartupData(argv0);
Isolate::CreateParams create_params;
@@ -128,7 +128,6 @@ static int DumpHeapConstants(const char* argv0) {
// Teardown.
isolate->Dispose();
v8::V8::ShutdownPlatform();
- delete platform;
return 0;
}
diff --git a/deps/v8/test/mkgrokdump/testcfg.py b/deps/v8/test/mkgrokdump/testcfg.py
index c47b59de4a..3dcf80a6a1 100644
--- a/deps/v8/test/mkgrokdump/testcfg.py
+++ b/deps/v8/test/mkgrokdump/testcfg.py
@@ -10,16 +10,20 @@ from testrunner.objects import testcase
class MkGrokdump(testsuite.TestSuite):
+ SHELL = 'mkgrokdump'
def __init__(self, name, root):
super(MkGrokdump, self).__init__(name, root)
def ListTests(self, context):
- test = testcase.TestCase(self, self.shell())
+ test = testcase.TestCase(self, self.SHELL)
return [test]
- def GetFlagsForTestCase(self, testcase, context):
- return []
+ def GetShellForTestCase(self, testcase):
+ return self.SHELL
+
+ def GetParametersForTestCase(self, testcase, context):
+ return [], [], {}
def IsFailureOutput(self, testcase):
output = testcase.output
@@ -42,8 +46,5 @@ class MkGrokdump(testsuite.TestSuite):
return True
return False
- def shell(self):
- return "mkgrokdump"
-
def GetSuite(name, root):
return MkGrokdump(name, root)
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 686018c007..d64b74f354 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -152,6 +152,10 @@
##################### SLOW TESTS #####################
+ # Compiles a long chain of && or || operations, can time out under slower
+ # variants.
+ 'js1_5/Expressions/regress-394673': [PASS, FAST_VARIANTS],
+
# This takes a long time to run (~100 seconds). It should only be run
# by the really patient.
'js1_5/GC/regress-324278': [SKIP],
@@ -301,6 +305,9 @@
'js1_5/Regress/regress-317476': [FAIL],
'js1_5/Regress/regress-314401': [FAIL],
+ # d8 implements setTimeout, but not clearTimeout.
+ 'js1_5/GC/regress-319980-01': [FAIL],
+
# Any local 'arguments' variable should not be allowed to shadow the value
# returned via the indirect 'arguments' property accessor.
'js1_4/Functions/function-001': [FAIL_OK],
@@ -674,10 +681,6 @@
# is given null or undefined as this argument (and so does firefox nightly).
'js1_5/Regress/regress-295052': [FAIL],
- # Bug 1202597: New js1_5/Expressions/regress-394673 is failing.
- # Marked as: Will not fix. V8 throws an acceptable RangeError.
- 'js1_5/Expressions/regress-394673': [FAIL],
-
# Bug 762: http://code.google.com/p/v8/issues/detail?id=762
# We do not correctly handle assignments within "with"
'ecma_3/Statements/12.10-01': [FAIL],
@@ -808,8 +811,8 @@
# error message in debug mode.
'js1_5/extensions/regress-336410-1': [FAIL_OK, ['mode == debug and arch == x64', NO_VARIANTS]],
- # These tests fail when --harmony-strict-legacy-accessor-builtins
- # is enabled.
+ # These tests fail due to __defineGetter__ & friends throwing
+ # for undefined receivers.
'js1_5/extensions/regress-313500': [SKIP],
'js1_5/extensions/regress-325269': [SKIP],
@@ -935,7 +938,7 @@
}], # 'arch == mipsel and simulator_run'
['arch == mips64el and simulator_run', {
- 'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
+ 'js1_5/extensions/regress-355497': [FAIL_OK, '--sim-stack-size=512'],
}], # 'arch == mips64el and simulator_run'
['arch == mips', {
@@ -984,7 +987,7 @@
'js1_5/extensions/regress-336410-1': [SKIP],
#BUG(3152): Avoid C stack overflow.
- 'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
+ 'js1_5/extensions/regress-355497': [FAIL_OK, '--sim-stack-size=512'],
}], # 'arch == arm64 and simulator_run'
['variant == wasm_traps', {
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index d721a8c95f..46623d0848 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -81,11 +81,8 @@ class MozillaTestSuite(testsuite.TestSuite):
tests.append(case)
return tests
- def GetFlagsForTestCase(self, testcase, context):
- result = []
- result += context.mode_flags
- result += ["--expose-gc"]
- result += [os.path.join(self.root, "mozilla-shell-emulation.js")]
+ def GetParametersForTestCase(self, testcase, context):
+ files = [os.path.join(self.root, "mozilla-shell-emulation.js")]
testfilename = testcase.path + ".js"
testfilepath = testfilename.split("/")
for i in xrange(len(testfilepath)):
@@ -93,9 +90,10 @@ class MozillaTestSuite(testsuite.TestSuite):
reduce(os.path.join, testfilepath[:i], ""),
"shell.js")
if os.path.exists(script):
- result.append(script)
- result.append(os.path.join(self.testroot, testfilename))
- return testcase.flags + result
+ files.append(script)
+ files.append(os.path.join(self.testroot, testfilename))
+ flags = testcase.flags + context.mode_flags + ["--expose-gc"]
+ return files, flags, {}
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index c55e4a85a8..f90d34f4ac 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -36,9 +36,6 @@ class PreparserTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(PreparserTestSuite, self).__init__(name, root)
- def shell(self):
- return "d8"
-
def _ParsePythonTestTemplates(self, result, filename):
pathname = os.path.join(self.root, filename + ".pyt")
def Test(name, source, expectation, extra_flags=[]):
@@ -71,8 +68,8 @@ class PreparserTestSuite(testsuite.TestSuite):
self._ParsePythonTestTemplates(result, f)
return result
- def GetFlagsForTestCase(self, testcase, context):
- return testcase.flags
+ def GetParametersForTestCase(self, testcase, context):
+ return [], testcase.flags, {}
def GetSourceForTest(self, testcase):
assert testcase.flags[0] == "-e"
diff --git a/deps/v8/test/promises-aplus/testcfg.py b/deps/v8/test/promises-aplus/testcfg.py
index bd80f97a13..4db598a78a 100644
--- a/deps/v8/test/promises-aplus/testcfg.py
+++ b/deps/v8/test/promises-aplus/testcfg.py
@@ -37,20 +37,15 @@ from testrunner.local import utils
from testrunner.objects import testcase
-SINON_TAG = '1.7.3'
-SINON_NAME = 'sinon'
-SINON_FILENAME = 'sinon.js'
-SINON_URL = 'http://sinonjs.org/releases/sinon-' + SINON_TAG + '.js'
-SINON_HASH = 'b7ab4dd9a1a2cf0460784af3728ad15caf4bbea923f680c5abde5c8332f35984'
-
-TEST_TAG = '2.0.3'
-TEST_ARCHIVE_TOP = 'promises-tests-' + TEST_TAG
+"""
+Requirements for using this test suite:
+Download http://sinonjs.org/releases/sinon-1.7.3.js into
+test/promises-aplus/sinon.
+Download https://github.com/promises-aplus/promises-tests/tree/2.0.3 into
+test/promises-aplus/promises-tests.
+"""
+
TEST_NAME = 'promises-tests'
-TEST_ARCHIVE = TEST_NAME + '.tar.gz'
-TEST_URL = 'https://github.com/promises-aplus/promises-tests/archive/' + \
- TEST_TAG + '.tar.gz'
-TEST_ARCHIVE_HASH = \
- 'e446ca557ac5836dd439fecd19689c243a28b1d5a6644dd7fed4274d0fa67270'
class PromiseAplusTestSuite(testsuite.TestSuite):
@@ -76,11 +71,14 @@ class PromiseAplusTestSuite(testsuite.TestSuite):
os.listdir(os.path.join(self.root, TEST_NAME, 'lib', 'tests'))
if fname.endswith('.js')]
- def GetFlagsForTestCase(self, testcase, context):
- return (testcase.flags + context.mode_flags + ['--allow-natives-syntax'] +
- self.helper_files_pre +
- [os.path.join(self.test_files_root, testcase.path + '.js')] +
- self.helper_files_post)
+ def GetParametersForTestCase(self, testcase, context):
+ files = (
+ self.helper_files_pre +
+ [os.path.join(self.test_files_root, testcase.path + '.js')] +
+ self.helper_files_post
+ )
+ flags = testcase.flags + context.mode_flags + ['--allow-natives-syntax']
+ return files, flags, {}
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, TEST_NAME,
@@ -97,51 +95,5 @@ class PromiseAplusTestSuite(testsuite.TestSuite):
return not 'All tests have run.' in testcase.output.stdout or \
'FAIL:' in testcase.output.stdout
- def DownloadTestData(self):
- archive = os.path.join(self.root, TEST_ARCHIVE)
- directory = os.path.join(self.root, TEST_NAME)
- if not os.path.exists(archive):
- print('Downloading {0} from {1} ...'.format(TEST_NAME, TEST_URL))
- utils.URLRetrieve(TEST_URL, archive)
- if os.path.exists(directory):
- shutil.rmtree(directory)
-
- if not os.path.exists(directory):
- print('Extracting {0} ...'.format(TEST_ARCHIVE))
- hash = hashlib.sha256()
- with open(archive, 'rb') as f:
- for chunk in iter(lambda: f.read(8192), ''):
- hash.update(chunk)
- if hash.hexdigest() != TEST_ARCHIVE_HASH:
- os.remove(archive)
- raise Exception('Hash mismatch of test data file')
- archive = tarfile.open(archive, 'r:gz')
- if sys.platform in ('win32', 'cygwin'):
- # Magic incantation to allow longer path names on Windows.
- archive.extractall(u'\\\\?\\%s' % self.root)
- else:
- archive.extractall(self.root)
- shutil.move(os.path.join(self.root, TEST_ARCHIVE_TOP), directory)
-
- def DownloadSinon(self):
- directory = os.path.join(self.root, SINON_NAME)
- if not os.path.exists(directory):
- os.mkdir(directory)
- path = os.path.join(directory, SINON_FILENAME)
- if not os.path.exists(path):
- utils.URLRetrieve(SINON_URL, path)
- hash = hashlib.sha256()
- with open(path, 'rb') as f:
- for chunk in iter(lambda: f.read(8192), ''):
- hash.update(chunk)
- if hash.hexdigest() != SINON_HASH:
- os.remove(path)
- raise Exception('Hash mismatch of test data file')
-
- def DownloadData(self):
- self.DownloadTestData()
- self.DownloadSinon()
-
-
def GetSuite(name, root):
return PromiseAplusTestSuite(name, root)
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 8e716309ee..51ab51447a 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -84,11 +84,6 @@
'language/expressions/prefix-increment/S11.4.4_A5_*': [FAIL],
'language/statements/variable/binding-resolution': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6216
- 'built-ins/TypedArrays/buffer-arg-byteoffset-to-number-detachbuffer': [FAIL],
- 'built-ins/TypedArrays/buffer-arg-length-to-number-detachbuffer': [FAIL],
- 'built-ins/TypedArrays/buffer-arg-detachedbuffer': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4895
'built-ins/TypedArrays/internals/DefineOwnProperty/detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/DefineOwnProperty/detached-buffer-realm': [FAIL],
@@ -156,14 +151,9 @@
'language/eval-code/direct/var-env-lower-lex-catch-non-strict': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4951
- 'language/expressions/assignment/dstr-array-elem-iter-rtrn-close': [FAIL],
'language/expressions/assignment/dstr-array-elem-iter-rtrn-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-iter-rtrn-close-null': [FAIL],
'language/expressions/assignment/dstr-array-elem-iter-thrw-close': [FAIL],
'language/expressions/assignment/dstr-array-elem-iter-thrw-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-rtrn-close': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-rtrn-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-rtrn-close-null': [FAIL],
'language/expressions/assignment/dstr-array-elem-trlg-iter-list-thrw-close': [FAIL],
'language/expressions/assignment/dstr-array-elem-trlg-iter-list-thrw-close-err': [FAIL],
'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-rtrn-close': [FAIL],
@@ -177,14 +167,9 @@
'language/expressions/assignment/dstr-array-rest-iter-thrw-close': [FAIL],
'language/expressions/assignment/dstr-array-rest-iter-thrw-close-err': [FAIL],
'language/expressions/assignment/dstr-array-rest-lref-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-iter-rtrn-close': [FAIL],
'language/statements/for-of/dstr-array-elem-iter-rtrn-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-iter-rtrn-close-null': [FAIL],
'language/statements/for-of/dstr-array-elem-iter-thrw-close': [FAIL],
'language/statements/for-of/dstr-array-elem-iter-thrw-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-list-rtrn-close': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-list-rtrn-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-list-rtrn-close-null': [FAIL],
'language/statements/for-of/dstr-array-elem-trlg-iter-list-thrw-close': [FAIL],
'language/statements/for-of/dstr-array-elem-trlg-iter-list-thrw-close-err': [FAIL],
'language/statements/for-of/dstr-array-elem-trlg-iter-rest-rtrn-close': [FAIL],
@@ -198,7 +183,8 @@
'language/statements/for-of/dstr-array-rest-iter-thrw-close': [FAIL],
'language/statements/for-of/dstr-array-rest-iter-thrw-close-err': [FAIL],
'language/statements/for-of/dstr-array-rest-lref-err': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-iter-rtrn-close-null': [FAIL],
+ 'language/expressions/assignment/destructuring/iterator-destructuring-property-reference-target-evaluation-order': [FAIL],
+ 'language/expressions/assignment/destructuring/keyed-destructuring-property-reference-target-evaluation-order': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=896
'built-ins/RegExp/property-escapes/binary-properties-with-value': [FAIL],
@@ -248,16 +234,6 @@
'built-ins/TypedArrays/internals/DefineOwnProperty/conversion-operation-consistent-nan': [PASS, FAIL],
'built-ins/TypedArrays/internals/Set/conversion-operation-consistent-nan': [PASS, FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5070
- 'annexB/built-ins/Object/prototype/__defineGetter__/define-non-configurable': ['--harmony-strict-legacy-accessor-builtins'],
- 'annexB/built-ins/Object/prototype/__defineGetter__/define-non-extensible': ['--harmony-strict-legacy-accessor-builtins'],
- 'annexB/built-ins/Object/prototype/__defineGetter__/this-non-obj': ['--harmony-strict-legacy-accessor-builtins'],
- 'annexB/built-ins/Object/prototype/__defineSetter__/define-non-configurable': ['--harmony-strict-legacy-accessor-builtins'],
- 'annexB/built-ins/Object/prototype/__defineSetter__/define-non-extensible': ['--harmony-strict-legacy-accessor-builtins'],
- 'annexB/built-ins/Object/prototype/__defineSetter__/this-non-obj': ['--harmony-strict-legacy-accessor-builtins'],
- 'annexB/built-ins/Object/prototype/__lookupGetter__/this-non-obj': ['--harmony-strict-legacy-accessor-builtins'],
- 'annexB/built-ins/Object/prototype/__lookupSetter__/this-non-obj': ['--harmony-strict-legacy-accessor-builtins'],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4451
# https://github.com/tc39/ecma262/issues/753
'annexB/language/eval-code/direct/global-block-decl-eval-global-existing-global-init': [FAIL],
@@ -346,9 +322,6 @@
'annexB/language/eval-code/direct/func-switch-case-eval-func-no-skip-try': [FAIL],
'annexB/language/eval-code/direct/func-switch-dflt-eval-func-no-skip-try': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5546
- 'language/expressions/tagged-template/invalid-escape-sequences': ['--harmony-template-escapes'],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5537
'built-ins/global/*': [SKIP],
@@ -446,27 +419,34 @@
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-caller': [FAIL_SLOPPY],
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-arguments': [FAIL_SLOPPY],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4743#c28
- 'built-ins/RegExp/property-escapes/generated/Emoji_Component': [FAIL],
-
- # ICU 59 uses Unicode 9 data; property escape tests were generated for Unicode 10
- 'built-ins/RegExp/property-escapes/generated/*': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=6776
'built-ins/Proxy/ownKeys/return-duplicate-entries-throws': [FAIL],
'built-ins/Proxy/ownKeys/return-duplicate-symbol-entries-throws': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=6861
+ 'language/statements/for-of/iterator-next-reference': [FAIL],
+ 'language/expressions/async-generator/named-yield-star-async-next': [FAIL],
+ 'language/expressions/async-generator/yield-star-async-next': [FAIL],
+ 'language/expressions/class/async-gen-method-yield-star-async-next': [FAIL],
+ 'language/expressions/class/async-gen-method-static-yield-star-async-next': [FAIL],
+ 'language/expressions/object/method-definition/async-gen-yield-star-async-next': [FAIL],
+ 'language/statements/async-generator/yield-star-async-next': [FAIL],
+ 'language/statements/class/async-gen-method-yield-star-async-next': [FAIL],
+ 'language/statements/class/async-gen-method-static-yield-star-async-next': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
# to be either marked as bugs with issues filed for them or as deliberate
# incompatibilities if the test cases turn out to be broken or ambiguous.
# Some of these are related to v8:4361 in being visible side effects from Intl.
- 'intl402/6.2.3': [FAIL],
+ 'intl402/6.2.3_a': [FAIL],
'intl402/Collator/10.1.2_a': [PASS, FAIL],
'intl402/Collator/10.2.3_b': [PASS, FAIL],
+ 'intl402/DateTimeFormat/prototype/resolvedOptions/hourCycle': [FAIL],
'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
'intl402/DateTimeFormat/12.2.3_b': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/success_cases': [FAIL],
'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
'intl402/NumberFormat/11.1.1_20_c': [FAIL],
'intl402/NumberFormat/11.1.2': [PASS, FAIL],
@@ -512,10 +492,6 @@
'language/statements/labeled/value-await-non-module-escaped': [FAIL],
'language/statements/labeled/value-yield-non-strict-escaped': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6912
- 'built-ins/RegExp/named-groups/string-replace-missing': [FAIL],
- 'built-ins/RegExp/named-groups/string-replace-unclosed': [FAIL],
-
############################ INVALID TESTS #############################
# Test makes unjustified assumptions about the number of calls to SortCompare.
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 849a3036cd..358d0db459 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -41,12 +41,9 @@ from testrunner.objects import testcase
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
- 'object-rest': '--harmony-object-rest-spread',
- 'object-spread': '--harmony-object-rest-spread',
'async-iteration': '--harmony-async-iteration',
'regexp-named-groups': '--harmony-regexp-named-captures',
'regexp-unicode-property-escapes': '--harmony-regexp-property',
- 'regexp-lookbehind': '--harmony-regexp-lookbehind',
'Promise.prototype.finally': '--harmony-promise-finally',
}
@@ -105,7 +102,8 @@ FAST_VARIANTS = {
class Test262VariantGenerator(testsuite.VariantGenerator):
def GetFlagSets(self, testcase, variant):
- if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
+ if outcomes and statusfile.OnlyFastVariants(outcomes):
variant_flags = FAST_VARIANTS
else:
variant_flags = ALL_VARIANTS
@@ -155,22 +153,27 @@ class Test262TestSuite(testsuite.TestSuite):
SKIPPED_FEATURES.intersection(
self.GetTestRecord(case).get("features", []))) == 0]
- def GetFlagsForTestCase(self, testcase, context):
- return (testcase.flags + context.mode_flags + self.harness +
- ([os.path.join(self.root, "harness-agent.js")]
- if testcase.path.startswith('built-ins/Atomics') else []) +
- self.GetIncludesForTest(testcase) +
- (["--module"] if "module" in self.GetTestRecord(testcase) else []) +
- [self.GetPathForTest(testcase)] +
- (["--throws"] if "negative" in self.GetTestRecord(testcase)
- else []) +
- (["--allow-natives-syntax"]
- if "detachArrayBuffer.js" in
- self.GetTestRecord(testcase).get("includes", [])
- else []) +
- ([flag for flag in testcase.outcomes if flag.startswith("--")]) +
- ([flag for (feature, flag) in FEATURE_FLAGS.items()
- if feature in self.GetTestRecord(testcase).get("features", [])]))
+ def GetParametersForTestCase(self, testcase, context):
+ files = (
+ list(self.harness) +
+ ([os.path.join(self.root, "harness-agent.js")]
+ if testcase.path.startswith('built-ins/Atomics') else []) +
+ self.GetIncludesForTest(testcase) +
+ (["--module"] if "module" in self.GetTestRecord(testcase) else []) +
+ [self.GetPathForTest(testcase)]
+ )
+ flags = (
+ testcase.flags + context.mode_flags +
+ (["--throws"] if "negative" in self.GetTestRecord(testcase)
+ else []) +
+ (["--allow-natives-syntax"]
+ if "detachArrayBuffer.js" in
+ self.GetTestRecord(testcase).get("includes", [])
+ else []) +
+ ([flag for (feature, flag) in FEATURE_FLAGS.items()
+ if feature in self.GetTestRecord(testcase).get("features", [])])
+ )
+ return files, flags, {}
def _VariantGeneratorFactory(self):
return Test262VariantGenerator
@@ -203,12 +206,8 @@ class Test262TestSuite(testsuite.TestSuite):
def GetIncludesForTest(self, testcase):
test_record = self.GetTestRecord(testcase)
- if "includes" in test_record:
- return [os.path.join(self.BasePath(filename), filename)
- for filename in test_record.get("includes", [])]
- else:
- includes = []
- return includes
+ return [os.path.join(self.BasePath(filename), filename)
+ for filename in test_record.get("includes", [])]
def GetPathForTest(self, testcase):
filename = os.path.join(self.localtestroot, testcase.path + ".js")
@@ -243,15 +242,12 @@ class Test262TestSuite(testsuite.TestSuite):
return True
return "FAILED!" in output.stdout
- def HasUnexpectedOutput(self, testcase):
- outcome = self.GetOutcome(testcase)
- if (statusfile.FAIL_SLOPPY in testcase.outcomes and
- "--use-strict" not in testcase.flags):
- return outcome != statusfile.FAIL
- return not outcome in ([outcome for outcome in testcase.outcomes
- if not outcome.startswith('--')
- and outcome != statusfile.FAIL_SLOPPY]
- or [statusfile.PASS])
+ def GetExpectedOutcomes(self, testcase):
+ outcomes = self.GetStatusFileOutcomes(testcase)
+ if (statusfile.FAIL_SLOPPY in outcomes and
+ '--use-strict' not in testcase.flags):
+ return [statusfile.FAIL]
+ return super(Test262TestSuite, self).GetExpectedOutcomes(testcase)
def PrepareSources(self):
# The archive is created only on swarming. Local checkouts have the
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 0f8d8c6cfc..d6d0a1067f 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -63,6 +63,7 @@ v8_source_set("unittests_sources") {
"base/sys-info-unittest.cc",
"base/template-utils-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
+ "bigint-unittest.cc",
"cancelable-tasks-unittest.cc",
"char-predicates-unittest.cc",
"code-stub-assembler-unittest.cc",
@@ -165,6 +166,7 @@ v8_source_set("unittests_sources") {
"libplatform/worker-thread-unittest.cc",
"locked-queue-unittest.cc",
"object-unittest.cc",
+ "parser/ast-value-unittest.cc",
"parser/preparser-unittest.cc",
"register-configuration-unittest.cc",
"run-all-unittests.cc",
@@ -174,6 +176,7 @@ v8_source_set("unittests_sources") {
"test-utils.cc",
"test-utils.h",
"unicode-unittest.cc",
+ "utils-unittest.cc",
"value-serializer-unittest.cc",
"wasm/control-transfer-unittest.cc",
"wasm/decoder-unittest.cc",
@@ -182,6 +185,7 @@ v8_source_set("unittests_sources") {
"wasm/loop-assignment-analysis-unittest.cc",
"wasm/module-decoder-unittest.cc",
"wasm/streaming-decoder-unittest.cc",
+ "wasm/trap-handler-unittest.cc",
"wasm/wasm-heap-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
diff --git a/deps/v8/test/unittests/api/v8-object-unittest.cc b/deps/v8/test/unittests/api/v8-object-unittest.cc
index bbb8230e6b..d11dba69cf 100644
--- a/deps/v8/test/unittests/api/v8-object-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-object-unittest.cc
@@ -37,10 +37,7 @@ TEST_F(ObjectTest, SetAccessorWhenUnconfigurablePropAlreadyDefined) {
using LapContextTest = TestWithIsolate;
-// TODO(yukishiino): Enable this unittest once
-// PropertyAccessInfo::accessor_holder() gets supported. Currently we're using
-// PropertyAccessInfo::holder(), which doesn't return the accessor holder.
-TEST_F(LapContextTest, DISABLED_CurrentContextInLazyAccessorOnPrototype) {
+TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPrototype) {
// The receiver object is created in |receiver_context|, but its prototype
// object is created in |prototype_context|, and the property is accessed
// from |caller_context|.
diff --git a/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc b/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
index fc94d2d55a..ebfcc665f8 100644
--- a/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
@@ -35,10 +35,10 @@ class AsmJsScannerTest : public ::testing::Test {
scanner->Next();
}
- void CheckForEnd() { CHECK(scanner->Token() == AsmJsScanner::kEndOfInput); }
+ void CheckForEnd() { CHECK_EQ(scanner->Token(), AsmJsScanner::kEndOfInput); }
void CheckForParseError() {
- CHECK(scanner->Token() == AsmJsScanner::kParseError);
+ CHECK_EQ(scanner->Token(), AsmJsScanner::kParseError);
}
std::unique_ptr<Utf16CharacterStream> stream;
diff --git a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
index 0c443476c0..7430ce6b35 100644
--- a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
@@ -85,7 +85,7 @@ class AsmTypeTest : public TestWithZone {
template <typename Arg, typename... Others>
static void AddAllArguments(AsmFunctionType* function_type, Arg* arg,
Others... others) {
- CHECK(function_type != nullptr);
+ CHECK_NOT_NULL(function_type);
function_type->AddArgument((*arg)());
AddAllArguments(function_type, others...);
}
@@ -120,7 +120,7 @@ class AsmTypeTest : public TestWithZone {
template <typename Overload, typename... Others>
static void AddAllOverloads(AsmOverloadedFunctionType* function,
Overload* overload, Others... others) {
- CHECK(function != nullptr);
+ CHECK_NOT_NULL(function);
function->AddOverload(overload);
AddAllOverloads(function, others...);
}
diff --git a/deps/v8/test/unittests/base/bits-unittest.cc b/deps/v8/test/unittests/base/bits-unittest.cc
index 45fb921797..485dddf529 100644
--- a/deps/v8/test/unittests/base/bits-unittest.cc
+++ b/deps/v8/test/unittests/base/bits-unittest.cc
@@ -18,70 +18,103 @@ namespace v8 {
namespace base {
namespace bits {
-TEST(Bits, CountPopulation32) {
- EXPECT_EQ(0u, CountPopulation32(0));
- EXPECT_EQ(1u, CountPopulation32(1));
- EXPECT_EQ(8u, CountPopulation32(0x11111111));
- EXPECT_EQ(16u, CountPopulation32(0xf0f0f0f0));
- EXPECT_EQ(24u, CountPopulation32(0xfff0f0ff));
- EXPECT_EQ(32u, CountPopulation32(0xffffffff));
+TEST(Bits, CountPopulation16) {
+ EXPECT_EQ(0u, CountPopulation(uint16_t{0}));
+ EXPECT_EQ(1u, CountPopulation(uint16_t{1}));
+ EXPECT_EQ(4u, CountPopulation(uint16_t{0x1111}));
+ EXPECT_EQ(8u, CountPopulation(uint16_t{0xf0f0}));
+ EXPECT_EQ(12u, CountPopulation(uint16_t{0xf0ff}));
+ EXPECT_EQ(16u, CountPopulation(uint16_t{0xffff}));
}
+TEST(Bits, CountPopulation32) {
+ EXPECT_EQ(0u, CountPopulation(uint32_t{0}));
+ EXPECT_EQ(1u, CountPopulation(uint32_t{1}));
+ EXPECT_EQ(8u, CountPopulation(uint32_t{0x11111111}));
+ EXPECT_EQ(16u, CountPopulation(uint32_t{0xf0f0f0f0}));
+ EXPECT_EQ(24u, CountPopulation(uint32_t{0xfff0f0ff}));
+ EXPECT_EQ(32u, CountPopulation(uint32_t{0xffffffff}));
+}
TEST(Bits, CountPopulation64) {
- EXPECT_EQ(0u, CountPopulation64(0));
- EXPECT_EQ(1u, CountPopulation64(1));
- EXPECT_EQ(2u, CountPopulation64(0x8000000000000001));
- EXPECT_EQ(8u, CountPopulation64(0x11111111));
- EXPECT_EQ(16u, CountPopulation64(0xf0f0f0f0));
- EXPECT_EQ(24u, CountPopulation64(0xfff0f0ff));
- EXPECT_EQ(32u, CountPopulation64(0xffffffff));
- EXPECT_EQ(16u, CountPopulation64(0x1111111111111111));
- EXPECT_EQ(32u, CountPopulation64(0xf0f0f0f0f0f0f0f0));
- EXPECT_EQ(48u, CountPopulation64(0xfff0f0fffff0f0ff));
- EXPECT_EQ(64u, CountPopulation64(0xffffffffffffffff));
+ EXPECT_EQ(0u, CountPopulation(uint64_t{0}));
+ EXPECT_EQ(1u, CountPopulation(uint64_t{1}));
+ EXPECT_EQ(2u, CountPopulation(uint64_t{0x8000000000000001}));
+ EXPECT_EQ(8u, CountPopulation(uint64_t{0x11111111}));
+ EXPECT_EQ(16u, CountPopulation(uint64_t{0xf0f0f0f0}));
+ EXPECT_EQ(24u, CountPopulation(uint64_t{0xfff0f0ff}));
+ EXPECT_EQ(32u, CountPopulation(uint64_t{0xffffffff}));
+ EXPECT_EQ(16u, CountPopulation(uint64_t{0x1111111111111111}));
+ EXPECT_EQ(32u, CountPopulation(uint64_t{0xf0f0f0f0f0f0f0f0}));
+ EXPECT_EQ(48u, CountPopulation(uint64_t{0xfff0f0fffff0f0ff}));
+ EXPECT_EQ(64u, CountPopulation(uint64_t{0xffffffffffffffff}));
}
+TEST(Bits, CountLeadingZeros16) {
+ EXPECT_EQ(16u, CountLeadingZeros(uint16_t{0}));
+ EXPECT_EQ(15u, CountLeadingZeros(uint16_t{1}));
+ TRACED_FORRANGE(uint16_t, shift, 0, 15) {
+ EXPECT_EQ(15u - shift,
+ CountLeadingZeros(static_cast<uint16_t>(1 << shift)));
+ }
+ EXPECT_EQ(4u, CountLeadingZeros(uint16_t{0x0f0f}));
+}
TEST(Bits, CountLeadingZeros32) {
- EXPECT_EQ(32u, CountLeadingZeros32(0));
- EXPECT_EQ(31u, CountLeadingZeros32(1));
+ EXPECT_EQ(32u, CountLeadingZeros(uint32_t{0}));
+ EXPECT_EQ(31u, CountLeadingZeros(uint32_t{1}));
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
- EXPECT_EQ(31u - shift, CountLeadingZeros32(1u << shift));
+ EXPECT_EQ(31u - shift, CountLeadingZeros(uint32_t{1} << shift));
}
- EXPECT_EQ(4u, CountLeadingZeros32(0x0f0f0f0f));
+ EXPECT_EQ(4u, CountLeadingZeros(uint32_t{0x0f0f0f0f}));
}
-
TEST(Bits, CountLeadingZeros64) {
- EXPECT_EQ(64u, CountLeadingZeros64(0));
- EXPECT_EQ(63u, CountLeadingZeros64(1));
+ EXPECT_EQ(64u, CountLeadingZeros(uint64_t{0}));
+ EXPECT_EQ(63u, CountLeadingZeros(uint64_t{1}));
TRACED_FORRANGE(uint32_t, shift, 0, 63) {
- EXPECT_EQ(63u - shift, CountLeadingZeros64(V8_UINT64_C(1) << shift));
+ EXPECT_EQ(63u - shift, CountLeadingZeros(uint64_t{1} << shift));
}
- EXPECT_EQ(36u, CountLeadingZeros64(0x0f0f0f0f));
- EXPECT_EQ(4u, CountLeadingZeros64(0x0f0f0f0f00000000));
+ EXPECT_EQ(36u, CountLeadingZeros(uint64_t{0x0f0f0f0f}));
+ EXPECT_EQ(4u, CountLeadingZeros(uint64_t{0x0f0f0f0f00000000}));
}
+TEST(Bits, CountTrailingZeros16) {
+ EXPECT_EQ(16u, CountTrailingZeros(uint16_t{0}));
+ EXPECT_EQ(15u, CountTrailingZeros(uint16_t{0x8000}));
+ TRACED_FORRANGE(uint16_t, shift, 0, 15) {
+ EXPECT_EQ(shift, CountTrailingZeros(static_cast<uint16_t>(1 << shift)));
+ }
+ EXPECT_EQ(4u, CountTrailingZeros(uint16_t{0xf0f0u}));
+}
-TEST(Bits, CountTrailingZeros32) {
- EXPECT_EQ(32u, CountTrailingZeros32(0));
- EXPECT_EQ(31u, CountTrailingZeros32(0x80000000));
+TEST(Bits, CountTrailingZerosu32) {
+ EXPECT_EQ(32u, CountTrailingZeros(uint32_t{0}));
+ EXPECT_EQ(31u, CountTrailingZeros(uint32_t{0x80000000}));
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
- EXPECT_EQ(shift, CountTrailingZeros32(1u << shift));
+ EXPECT_EQ(shift, CountTrailingZeros(uint32_t{1} << shift));
}
- EXPECT_EQ(4u, CountTrailingZeros32(0xf0f0f0f0));
+ EXPECT_EQ(4u, CountTrailingZeros(uint32_t{0xf0f0f0f0u}));
}
+TEST(Bits, CountTrailingZerosi32) {
+ EXPECT_EQ(32u, CountTrailingZeros(int32_t{0}));
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(shift, CountTrailingZeros(int32_t{1} << shift));
+ }
+ EXPECT_EQ(4u, CountTrailingZeros(int32_t{0x70f0f0f0u}));
+ EXPECT_EQ(2u, CountTrailingZeros(int32_t{-4}));
+ EXPECT_EQ(0u, CountTrailingZeros(int32_t{-1}));
+}
TEST(Bits, CountTrailingZeros64) {
- EXPECT_EQ(64u, CountTrailingZeros64(0));
- EXPECT_EQ(63u, CountTrailingZeros64(0x8000000000000000));
+ EXPECT_EQ(64u, CountTrailingZeros(uint64_t{0}));
+ EXPECT_EQ(63u, CountTrailingZeros(uint64_t{0x8000000000000000}));
TRACED_FORRANGE(uint32_t, shift, 0, 63) {
- EXPECT_EQ(shift, CountTrailingZeros64(V8_UINT64_C(1) << shift));
+ EXPECT_EQ(shift, CountTrailingZeros(uint64_t{1} << shift));
}
- EXPECT_EQ(4u, CountTrailingZeros64(0xf0f0f0f0));
- EXPECT_EQ(36u, CountTrailingZeros64(0xf0f0f0f000000000));
+ EXPECT_EQ(4u, CountTrailingZeros(uint64_t{0xf0f0f0f0}));
+ EXPECT_EQ(36u, CountTrailingZeros(uint64_t{0xf0f0f0f000000000}));
}
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
index 97a27a438e..10f9f32c7d 100644
--- a/deps/v8/test/unittests/base/functional-unittest.cc
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -31,9 +31,28 @@ TEST(FunctionalTest, HashDoubleZero) {
EXPECT_EQ(h(0.0), h(-0.0));
}
+namespace {
+
+inline int64_t GetRandomSeedFromFlag(int random_seed) {
+ return random_seed ? random_seed : TimeTicks::Now().ToInternalValue();
+}
+
+} // namespace
template <typename T>
-class FunctionalTest : public TestWithRandomNumberGenerator {};
+class FunctionalTest : public ::testing::Test {
+ public:
+ FunctionalTest()
+ : rng_(GetRandomSeedFromFlag(::v8::internal::FLAG_random_seed)) {}
+ virtual ~FunctionalTest() {}
+
+ RandomNumberGenerator* rng() { return &rng_; }
+
+ private:
+ RandomNumberGenerator rng_;
+
+ DISALLOW_COPY_AND_ASSIGN(FunctionalTest);
+};
typedef ::testing::Types<signed char, unsigned char,
short, // NOLINT(runtime/int)
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index 69efc483ac..cb07ad1ca4 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -179,13 +179,12 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
void TestPermissions(OS::MemoryPermission permission, bool can_read,
bool can_write) {
- const size_t allocation_size = OS::CommitPageSize();
- size_t actual = 0;
- int* buffer =
- static_cast<int*>(OS::Allocate(allocation_size, &actual, permission));
+ const size_t page_size = OS::AllocatePageSize();
+ int* buffer = static_cast<int*>(
+ OS::Allocate(nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
- OS::Free(buffer, actual);
+ CHECK(OS::Free(buffer, page_size));
}
};
diff --git a/deps/v8/test/unittests/base/template-utils-unittest.cc b/deps/v8/test/unittests/base/template-utils-unittest.cc
index ea8796a123..2c1da8ec86 100644
--- a/deps/v8/test/unittests/base/template-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/template-utils-unittest.cc
@@ -101,6 +101,63 @@ static_assert(has_output_operator<TestClass2>::value,
static_assert(!has_output_operator<const TestClass2>::value,
"const TestClass2 can not be output");
+//////////////////////////////
+// Test fold.
+//////////////////////////////
+
+struct FoldAllSameType {
+ constexpr uint32_t operator()(uint32_t a, uint32_t b) const { return a | b; }
+};
+static_assert(base::fold(FoldAllSameType{}, 3, 6) == 7, "check fold");
+// Test that it works if implicit conversion is needed for one of the
+// parameters.
+static_assert(base::fold(FoldAllSameType{}, uint8_t{1}, 256) == 257,
+ "check correct type inference");
+// Test a single parameter.
+static_assert(base::fold(FoldAllSameType{}, 25) == 25,
+ "check folding a single argument");
+
+TEST(TemplateUtilsTest, FoldDifferentType) {
+ auto fn = [](std::string str, char c) {
+ str.push_back(c);
+ return str;
+ };
+ CHECK_EQ(base::fold(fn, std::string("foo"), 'b', 'a', 'r'), "foobar");
+}
+
+TEST(TemplateUtilsTest, FoldMoveOnlyType) {
+ auto fn = [](std::unique_ptr<std::string> str, char c) {
+ str->push_back(c);
+ return str;
+ };
+ std::unique_ptr<std::string> str = base::make_unique<std::string>("foo");
+ std::unique_ptr<std::string> folded =
+ base::fold(fn, std::move(str), 'b', 'a', 'r');
+ CHECK_NULL(str);
+ CHECK_NOT_NULL(folded);
+ CHECK_EQ(*folded, "foobar");
+}
+
+struct TemplatizedFoldFunctor {
+ template <typename T, typename... Tup>
+ std::tuple<Tup..., typename std::decay<T>::type> operator()(
+ std::tuple<Tup...> tup, T&& val) {
+ return std::tuple_cat(std::move(tup),
+ std::make_tuple(std::forward<T>(val)));
+ }
+};
+TEST(TemplateUtilsTest, FoldToTuple) {
+ auto input = std::make_tuple(char{'x'}, int{4}, double{3.2},
+ std::unique_ptr<uint8_t>{}, std::string{"foo"});
+ auto result =
+ base::fold(TemplatizedFoldFunctor{}, std::make_tuple(),
+ std::get<0>(input), std::get<1>(input), std::get<2>(input),
+ std::unique_ptr<uint8_t>{}, std::get<4>(input));
+ static_assert(std::is_same<decltype(result), decltype(input)>::value,
+ "the resulting tuple should have the same type as the input");
+ DCHECK_EQ(input, result);
+}
+
} // namespace template_utils_unittest
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
index 7c533db4f0..38c14cd96c 100644
--- a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
+++ b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
@@ -12,9 +12,38 @@ namespace base {
class RandomNumberGeneratorTest : public ::testing::TestWithParam<int> {};
-
static const int kMaxRuns = 12345;
+static void CheckSample(std::vector<uint64_t> sample, uint64_t max,
+ size_t size) {
+ EXPECT_EQ(sample.size(), size);
+
+ // Check if values are unique.
+ std::sort(sample.begin(), sample.end());
+ EXPECT_EQ(std::adjacent_find(sample.begin(), sample.end()), sample.end());
+
+ for (uint64_t x : sample) {
+ EXPECT_LT(x, max);
+ }
+}
+
+static void CheckSlowSample(const std::vector<uint64_t>& sample, uint64_t max,
+ size_t size,
+ const std::unordered_set<uint64_t>& excluded) {
+ CheckSample(sample, max, size);
+
+ for (uint64_t i : sample) {
+ EXPECT_FALSE(excluded.count(i));
+ }
+}
+
+static void TestNextSample(RandomNumberGenerator& rng, uint64_t max,
+ size_t size, bool slow = false) {
+ std::vector<uint64_t> sample =
+ slow ? rng.NextSampleSlow(max, size) : rng.NextSample(max, size);
+
+ CheckSample(sample, max, size);
+}
TEST_P(RandomNumberGeneratorTest, NextIntWithMaxValue) {
RandomNumberGenerator rng(GetParam());
@@ -44,6 +73,176 @@ TEST_P(RandomNumberGeneratorTest, NextDoubleReturnsValueBetween0And1) {
}
}
+#if GTEST_HAS_DEATH_TEST
+TEST(RandomNumberGenerator, NextSampleInvalidParam) {
+ RandomNumberGenerator rng(123);
+ std::vector<uint64_t> sample;
+ EXPECT_DEATH(sample = rng.NextSample(10, 11), ".*Check failed: n <= max.*");
+}
+
+TEST(RandomNumberGenerator, NextSampleSlowInvalidParam1) {
+ RandomNumberGenerator rng(123);
+ std::vector<uint64_t> sample;
+ EXPECT_DEATH(sample = rng.NextSampleSlow(10, 11),
+ ".*Check failed: max - excluded.size*");
+}
+
+TEST(RandomNumberGenerator, NextSampleSlowInvalidParam2) {
+ RandomNumberGenerator rng(123);
+ std::vector<uint64_t> sample;
+ EXPECT_DEATH(sample = rng.NextSampleSlow(5, 3, {0, 2, 3}),
+ ".*Check failed: max - excluded.size*");
+}
+#endif
+
+TEST_P(RandomNumberGeneratorTest, NextSample0) {
+ size_t m = 1;
+ RandomNumberGenerator rng(GetParam());
+
+ TestNextSample(rng, m, 0);
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlow0) {
+ size_t m = 1;
+ RandomNumberGenerator rng(GetParam());
+
+ TestNextSample(rng, m, 0, true);
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSample1) {
+ size_t m = 10;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, 1);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlow1) {
+ size_t m = 10;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, 1, true);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleMax) {
+ size_t m = 10;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, m);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlowMax) {
+ size_t m = 10;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, m, true);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleHalf) {
+ size_t n = 5;
+ uint64_t m = 10;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, n);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlowHalf) {
+ size_t n = 5;
+ uint64_t m = 10;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, n, true);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleMoreThanHalf) {
+ size_t n = 90;
+ uint64_t m = 100;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, n);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlowMoreThanHalf) {
+ size_t n = 90;
+ uint64_t m = 100;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, n, true);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleLessThanHalf) {
+ size_t n = 10;
+ uint64_t m = 100;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, n);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlowLessThanHalf) {
+ size_t n = 10;
+ uint64_t m = 100;
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ TestNextSample(rng, m, n, true);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlowExcluded) {
+ size_t n = 2;
+ uint64_t m = 10;
+ std::unordered_set<uint64_t> excluded = {2, 4, 5, 9};
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ std::vector<uint64_t> sample = rng.NextSampleSlow(m, n, excluded);
+
+ CheckSlowSample(sample, m, n, excluded);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlowExcludedMax1) {
+ size_t n = 1;
+ uint64_t m = 5;
+ std::unordered_set<uint64_t> excluded = {0, 2, 3, 4};
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ std::vector<uint64_t> sample = rng.NextSampleSlow(m, n, excluded);
+
+ CheckSlowSample(sample, m, n, excluded);
+ }
+}
+
+TEST_P(RandomNumberGeneratorTest, NextSampleSlowExcludedMax2) {
+ size_t n = 7;
+ uint64_t m = 10;
+ std::unordered_set<uint64_t> excluded = {0, 4, 8};
+ RandomNumberGenerator rng(GetParam());
+
+ for (int k = 0; k < kMaxRuns; ++k) {
+ std::vector<uint64_t> sample = rng.NextSampleSlow(m, n, excluded);
+
+ CheckSlowSample(sample, m, n, excluded);
+ }
+}
INSTANTIATE_TEST_CASE_P(RandomSeeds, RandomNumberGeneratorTest,
::testing::Values(INT_MIN, -1, 0, 1, 42, 100,
diff --git a/deps/v8/test/unittests/bigint-unittest.cc b/deps/v8/test/unittests/bigint-unittest.cc
new file mode 100644
index 0000000000..252cad6a8f
--- /dev/null
+++ b/deps/v8/test/unittests/bigint-unittest.cc
@@ -0,0 +1,115 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+
+#include "src/conversions.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/bigint.h"
+#include "test/unittests/test-utils.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+typedef TestWithIsolate BigIntWithIsolate;
+
+void Compare(Handle<BigInt> x, double value, ComparisonResult expected) {
+ CHECK_EQ(expected, BigInt::CompareToDouble(x, value));
+}
+
+Handle<BigInt> NewFromInt(Isolate* isolate, int value) {
+ Handle<Smi> smi_value = handle(Smi::FromInt(value), isolate);
+ return BigInt::FromNumber(isolate, smi_value).ToHandleChecked();
+}
+
+TEST_F(BigIntWithIsolate, CompareToDouble) {
+ Handle<BigInt> zero = NewFromInt(isolate(), 0);
+ Handle<BigInt> one = NewFromInt(isolate(), 1);
+ Handle<BigInt> minus_one = NewFromInt(isolate(), -1);
+
+ // Non-finite doubles.
+ Compare(zero, std::nan(""), ComparisonResult::kUndefined);
+ Compare(one, INFINITY, ComparisonResult::kLessThan);
+ Compare(one, -INFINITY, ComparisonResult::kGreaterThan);
+
+ // Unequal sign.
+ Compare(one, -1, ComparisonResult::kGreaterThan);
+ Compare(minus_one, 1, ComparisonResult::kLessThan);
+
+ // Cases involving zero.
+ Compare(zero, 0, ComparisonResult::kEqual);
+ Compare(zero, -0, ComparisonResult::kEqual);
+ Compare(one, 0, ComparisonResult::kGreaterThan);
+ Compare(minus_one, 0, ComparisonResult::kLessThan);
+ Compare(zero, 1, ComparisonResult::kLessThan);
+ Compare(zero, -1, ComparisonResult::kGreaterThan);
+
+ // Small doubles.
+ Compare(zero, 0.25, ComparisonResult::kLessThan);
+ Compare(one, 0.5, ComparisonResult::kGreaterThan);
+ Compare(one, -0.5, ComparisonResult::kGreaterThan);
+ Compare(zero, -0.25, ComparisonResult::kGreaterThan);
+ Compare(minus_one, -0.5, ComparisonResult::kLessThan);
+
+ // Different bit lengths.
+ Handle<BigInt> four = NewFromInt(isolate(), 4);
+ Handle<BigInt> minus_five = NewFromInt(isolate(), -5);
+ Compare(four, 3.9, ComparisonResult::kGreaterThan);
+ Compare(four, 1.5, ComparisonResult::kGreaterThan);
+ Compare(four, 8, ComparisonResult::kLessThan);
+ Compare(four, 16, ComparisonResult::kLessThan);
+ Compare(minus_five, -4.9, ComparisonResult::kLessThan);
+ Compare(minus_five, -4, ComparisonResult::kLessThan);
+ Compare(minus_five, -25, ComparisonResult::kGreaterThan);
+
+ // Same bit length, difference in first digit.
+ double big_double = 4428155326412785451008.0;
+ Handle<BigInt> big =
+ BigIntLiteral(isolate(), "0xF10D00000000000000").ToHandleChecked();
+ Compare(big, big_double, ComparisonResult::kGreaterThan);
+ big = BigIntLiteral(isolate(), "0xE00D00000000000000").ToHandleChecked();
+ Compare(big, big_double, ComparisonResult::kLessThan);
+
+ double other_double = -13758438578910658560.0;
+ Handle<BigInt> other =
+ BigIntLiteral(isolate(), "-0xBEEFC1FE00000000").ToHandleChecked();
+ Compare(other, other_double, ComparisonResult::kGreaterThan);
+ other = BigIntLiteral(isolate(), "-0xBEEFCBFE00000000").ToHandleChecked();
+ Compare(other, other_double, ComparisonResult::kLessThan);
+
+ // Same bit length, difference in non-first digit.
+ big = BigIntLiteral(isolate(), "0xF00D00000000000001").ToHandleChecked();
+ Compare(big, big_double, ComparisonResult::kGreaterThan);
+ big = BigIntLiteral(isolate(), "0xF00A00000000000000").ToHandleChecked();
+ Compare(big, big_double, ComparisonResult::kLessThan);
+
+ other = BigIntLiteral(isolate(), "-0xBEEFCAFE00000001").ToHandleChecked();
+ Compare(other, other_double, ComparisonResult::kLessThan);
+
+ // Same bit length, difference in fractional part.
+ Compare(one, 1.5, ComparisonResult::kLessThan);
+ Compare(minus_one, -1.25, ComparisonResult::kGreaterThan);
+ big = NewFromInt(isolate(), 0xF00D00);
+ Compare(big, 15731968.125, ComparisonResult::kLessThan);
+ Compare(big, 15731967.875, ComparisonResult::kGreaterThan);
+ big = BigIntLiteral(isolate(), "0x123456789ab").ToHandleChecked();
+ Compare(big, 1250999896491.125, ComparisonResult::kLessThan);
+
+ // Equality!
+ Compare(one, 1, ComparisonResult::kEqual);
+ Compare(minus_one, -1, ComparisonResult::kEqual);
+ big = BigIntLiteral(isolate(), "0xF00D00000000000000").ToHandleChecked();
+ Compare(big, big_double, ComparisonResult::kEqual);
+
+ Handle<BigInt> two_52 =
+ BigIntLiteral(isolate(), "0x10000000000000").ToHandleChecked();
+ Compare(two_52, 4503599627370496.0, ComparisonResult::kEqual);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc
index 4ace9ef19f..6ae5c7bd6c 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc
@@ -11,40 +11,36 @@ namespace internal {
TEST(CompilerDispatcherTracerTest, EstimateWithoutSamples) {
CompilerDispatcherTracer tracer(nullptr);
- EXPECT_EQ(0.0, tracer.EstimatePrepareToParseInMs());
- EXPECT_EQ(1.0, tracer.EstimateParseInMs(0));
- EXPECT_EQ(1.0, tracer.EstimateParseInMs(42));
- EXPECT_EQ(0.0, tracer.EstimateFinalizeParsingInMs());
- EXPECT_EQ(0.0, tracer.EstimatePrepareToCompileInMs());
- EXPECT_EQ(0.0, tracer.EstimateCompileInMs());
- EXPECT_EQ(0.0, tracer.EstimateCompileInMs());
- EXPECT_EQ(0.0, tracer.EstimateFinalizeCompilingInMs());
+ EXPECT_EQ(0.0, tracer.EstimatePrepareInMs());
+ EXPECT_EQ(1.0, tracer.EstimateCompileInMs(1));
+ EXPECT_EQ(1.0, tracer.EstimateCompileInMs(42));
+ EXPECT_EQ(0.0, tracer.EstimateFinalizeInMs());
}
TEST(CompilerDispatcherTracerTest, Average) {
CompilerDispatcherTracer tracer(nullptr);
- EXPECT_EQ(0.0, tracer.EstimatePrepareToParseInMs());
+ EXPECT_EQ(0.0, tracer.EstimatePrepareInMs());
- tracer.RecordPrepareToParse(1.0);
- tracer.RecordPrepareToParse(2.0);
- tracer.RecordPrepareToParse(3.0);
+ tracer.RecordPrepare(1.0);
+ tracer.RecordPrepare(2.0);
+ tracer.RecordPrepare(3.0);
- EXPECT_EQ((1.0 + 2.0 + 3.0) / 3, tracer.EstimatePrepareToParseInMs());
+ EXPECT_EQ((1.0 + 2.0 + 3.0) / 3, tracer.EstimatePrepareInMs());
}
TEST(CompilerDispatcherTracerTest, SizeBasedAverage) {
CompilerDispatcherTracer tracer(nullptr);
- EXPECT_EQ(1.0, tracer.EstimateParseInMs(100));
+ EXPECT_EQ(1.0, tracer.EstimateCompileInMs(100));
// All three samples parse 100 units/ms.
- tracer.RecordParse(1.0, 100);
- tracer.RecordParse(2.0, 200);
- tracer.RecordParse(3.0, 300);
+ tracer.RecordCompile(1.0, 100);
+ tracer.RecordCompile(2.0, 200);
+ tracer.RecordCompile(3.0, 300);
- EXPECT_EQ(1.0, tracer.EstimateParseInMs(100));
- EXPECT_EQ(5.0, tracer.EstimateParseInMs(500));
+ EXPECT_EQ(1.0, tracer.EstimateCompileInMs(100));
+ EXPECT_EQ(5.0, tracer.EstimateCompileInMs(500));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 5d776d318b..a0ddd1e5e4 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -31,9 +31,8 @@
#define STR(x) _STR(x)
#define _SCRIPT(fn, a, b, c) a fn b fn c
#define SCRIPT(a, b, c) _SCRIPT("f" STR(__LINE__), a, b, c)
-#define TEST_SCRIPT() \
- SCRIPT("function g() { var y = 1; function ", \
- "(x) { return x * y }; return ", "; } g();")
+#define TEST_SCRIPT() \
+ "function f" STR(__LINE__) "(x, y) { return x * y }; f" STR(__LINE__) ";"
namespace v8 {
namespace internal {
@@ -62,55 +61,25 @@ class CompilerDispatcherTestFlags {
SaveFlags* CompilerDispatcherTestFlags::save_flags_ = nullptr;
-class CompilerDispatcherTest : public TestWithContext {
+class CompilerDispatcherTest : public TestWithNativeContext {
public:
CompilerDispatcherTest() = default;
~CompilerDispatcherTest() override = default;
static void SetUpTestCase() {
CompilerDispatcherTestFlags::SetFlagsForTest();
- TestWithContext::SetUpTestCase();
+ TestWithNativeContext ::SetUpTestCase();
}
static void TearDownTestCase() {
- TestWithContext::TearDownTestCase();
+ TestWithNativeContext ::TearDownTestCase();
CompilerDispatcherTestFlags::RestoreFlags();
}
- static UnoptimizedCompileJob::Status GetUnoptimizedJobStatus(
- const CompilerDispatcherJob* job) {
- CHECK_EQ(CompilerDispatcherJob::kUnoptimizedCompile, job->type());
- return job->AsUnoptimizedCompileJob()->status();
- }
-
- static UnoptimizedCompileJob::Status GetUnoptimizedJobStatus(
- const std::unique_ptr<CompilerDispatcherJob>& job) {
- return GetUnoptimizedJobStatus(job.get());
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherTest);
};
-class CompilerDispatcherTestWithoutContext : public v8::TestWithIsolate {
- public:
- CompilerDispatcherTestWithoutContext() = default;
- ~CompilerDispatcherTestWithoutContext() override = default;
-
- static void SetUpTestCase() {
- CompilerDispatcherTestFlags::SetFlagsForTest();
- TestWithContext::SetUpTestCase();
- }
-
- static void TearDownTestCase() {
- TestWithContext::TearDownTestCase();
- CompilerDispatcherTestFlags::RestoreFlags();
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherTestWithoutContext);
-};
-
namespace {
class MockPlatform : public v8::Platform {
@@ -130,6 +99,18 @@ class MockPlatform : public v8::Platform {
size_t NumberOfAvailableBackgroundThreads() override { return 1; }
+ std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate* isolate) override {
+ constexpr bool is_foreground_task_runner = true;
+ return std::make_shared<MockTaskRunner>(this, is_foreground_task_runner);
+ }
+
+ std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
+ v8::Isolate* isolate) override {
+ constexpr bool is_foreground_task_runner = false;
+ return std::make_shared<MockTaskRunner>(this, is_foreground_task_runner);
+ }
+
void CallOnBackgroundThread(Task* task,
ExpectedRuntime expected_runtime) override {
base::LockGuard<base::Mutex> lock(&mutex_);
@@ -282,6 +263,43 @@ class MockPlatform : public v8::Platform {
DISALLOW_COPY_AND_ASSIGN(TaskWrapper);
};
+ class MockTaskRunner final : public TaskRunner {
+ public:
+ MockTaskRunner(MockPlatform* platform, bool is_foreground_task_runner)
+ : platform_(platform),
+ is_foreground_task_runner_(is_foreground_task_runner) {}
+
+ void PostTask(std::unique_ptr<v8::Task> task) override {
+ base::LockGuard<base::Mutex> lock(&platform_->mutex_);
+ if (is_foreground_task_runner_) {
+ platform_->foreground_tasks_.push_back(task.release());
+ } else {
+ platform_->background_tasks_.push_back(task.release());
+ }
+ }
+
+ void PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) override {
+ UNREACHABLE();
+ };
+
+ void PostIdleTask(std::unique_ptr<IdleTask> task) override {
+ DCHECK(IdleTasksEnabled());
+ base::LockGuard<base::Mutex> lock(&platform_->mutex_);
+ ASSERT_TRUE(platform_->idle_task_ == nullptr);
+ platform_->idle_task_ = task.release();
+ }
+
+ bool IdleTasksEnabled() override {
+ // Idle tasks are enabled only in the foreground task runner.
+ return is_foreground_task_runner_;
+ };
+
+ private:
+ MockPlatform* platform_;
+ bool is_foreground_task_runner_;
+ };
+
double time_;
double time_step_;
@@ -311,8 +329,8 @@ TEST_F(CompilerDispatcherTest, IsEnqueued) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
+
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
@@ -329,8 +347,7 @@ TEST_F(CompilerDispatcherTest, FinishNow) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(shared->is_compiled());
@@ -355,10 +372,9 @@ TEST_F(CompilerDispatcherTest, FinishAllNow) {
std::stringstream ss;
ss << 'f' << STR(__LINE__) << '_' << i;
std::string func_name = ss.str();
- std::string script("function g() { function " + func_name +
- "(x) { var a = 'x'; }; return " + func_name +
- "; } g();");
- f[i] = Handle<JSFunction>::cast(test::RunJS(isolate(), script.c_str()));
+ std::string script("function f" + func_name + "(x, y) { return x * y }; f" +
+ func_name + ";");
+ f[i] = RunJS<JSFunction>(script.c_str());
shared[i] = Handle<SharedFunctionInfo>(f[i]->shared(), i_isolate());
ASSERT_FALSE(shared[i]->is_compiled());
ASSERT_TRUE(dispatcher.Enqueue(shared[i]));
@@ -378,8 +394,7 @@ TEST_F(CompilerDispatcherTest, IdleTask) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -399,8 +414,7 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -410,7 +424,7 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
// The job should be scheduled for the main thread.
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
// Only grant a little idle time and have time advance beyond it in one step.
platform.RunIdleTask(2.0, 1.0);
@@ -422,8 +436,8 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
// The job should be still scheduled for the main thread, but ready for
// parsing.
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToParse,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
// Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0);
@@ -438,13 +452,13 @@ TEST_F(CompilerDispatcherTest, IdleTaskException) {
CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
std::string func_name("f" STR(__LINE__));
- std::string script("function g() { function " + func_name + "(x) { var a = ");
- for (int i = 0; i < 1000; i++) {
- script += "'x' + ";
+ std::string script("function " + func_name + "(x) { var a = ");
+ for (int i = 0; i < 500; i++) {
+ // Alternate + and - to avoid n-ary operation nodes.
+ script += "'x' + 'x' - ";
}
- script += " 'x'; }; return " + func_name + "; } g();";
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script.c_str()));
+ script += " 'x'; }; " + func_name + ";";
+ Handle<JSFunction> f = RunJS<JSFunction>(script.c_str());
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -465,8 +479,7 @@ TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -475,14 +488,14 @@ TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
// Make compiling super expensive, and advance job as much as possible on the
// foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0);
+ dispatcher.tracer_->RecordCompile(50000.0, 1);
platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToCompile,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
@@ -494,7 +507,7 @@ TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.BackgroundTasksPending());
ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
// Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0);
@@ -509,8 +522,7 @@ TEST_F(CompilerDispatcherTest, FinishNowWithBackgroundTask) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -519,14 +531,14 @@ TEST_F(CompilerDispatcherTest, FinishNowWithBackgroundTask) {
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
// Make compiling super expensive, and advance job as much as possible on the
// foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0);
+ dispatcher.tracer_->RecordCompile(50000.0, 1);
platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToCompile,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
@@ -549,13 +561,11 @@ TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script1));
+ Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script2));
+ Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -578,13 +588,13 @@ TEST_F(CompilerDispatcherTest, FinishNowException) {
CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
std::string func_name("f" STR(__LINE__));
- std::string script("function g() { function " + func_name + "(x) { var a = ");
- for (int i = 0; i < 1000; i++) {
- script += "'x' + ";
+ std::string script("function " + func_name + "(x) { var a = ");
+ for (int i = 0; i < 500; i++) {
+ // Alternate + and - to avoid n-ary operation nodes.
+ script += "'x' + 'x' - ";
}
- script += " 'x'; }; return " + func_name + "; } g();";
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script.c_str()));
+ script += " 'x'; }; " + func_name + ";";
+ Handle<JSFunction> f = RunJS<JSFunction>(script.c_str());
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -606,8 +616,7 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingBackgroundTask) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -616,14 +625,14 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingBackgroundTask) {
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
// Make compiling super expensive, and advance job as much as possible on the
// foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0);
+ dispatcher.tracer_->RecordCompile(50000.0, 1);
platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToCompile,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
@@ -649,13 +658,11 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningBackgroundTask) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script1));
+ Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script2));
+ Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -664,14 +671,14 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningBackgroundTask) {
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
// Make compiling super expensive, and advance job as much as possible on the
// foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0);
+ dispatcher.tracer_->RecordCompile(50000.0, 1);
platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToCompile,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(dispatcher.IsEnqueued(shared1));
ASSERT_FALSE(shared1->is_compiled());
@@ -731,8 +738,7 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -741,14 +747,14 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
// Make compiling super expensive, and advance job as much as possible on the
// foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0);
+ dispatcher.tracer_->RecordCompile(50000.0, 1);
platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToCompile,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
@@ -810,8 +816,7 @@ TEST_F(CompilerDispatcherTest, MemoryPressure) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
// Can't enqueue tasks under memory pressure.
@@ -858,8 +863,7 @@ TEST_F(CompilerDispatcherTest, MemoryPressureFromBackground) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_TRUE(dispatcher.Enqueue(shared));
@@ -890,8 +894,7 @@ TEST_F(CompilerDispatcherTest, EnqueueJob) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
std::unique_ptr<CompilerDispatcherJob> job(
new UnoptimizedCompileJob(i_isolate(), dispatcher.tracer_.get(), shared,
@@ -910,16 +913,15 @@ TEST_F(CompilerDispatcherTest, EnqueueAndStep) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToParse,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(platform.IdleTaskPending());
platform.ClearIdleTask();
@@ -932,9 +934,8 @@ TEST_F(CompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
// enqueued functions.
CompilerDispatcher* dispatcher = i_isolate()->compiler_dispatcher();
- const char source[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), source));
+ const char script[] = "function lazy() { return 42; }; lazy;";
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(shared->is_compiled());
@@ -944,7 +945,7 @@ TEST_F(CompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
// Now force the function to run and ensure CompileLazy finished and dequeues
// it from the dispatcher.
- test::RunJS(isolate(), "g()();");
+ RunJS("lazy();");
ASSERT_TRUE(shared->is_compiled());
ASSERT_FALSE(dispatcher->IsEnqueued(shared));
}
@@ -955,21 +956,19 @@ TEST_F(CompilerDispatcherTest, CompileLazy2FinishesDispatcherJob) {
CompilerDispatcher* dispatcher = i_isolate()->compiler_dispatcher();
const char source2[] = "function lazy2() { return 42; }; lazy2;";
- Handle<JSFunction> lazy2 =
- Handle<JSFunction>::cast(test::RunJS(isolate(), source2));
+ Handle<JSFunction> lazy2 = RunJS<JSFunction>(source2);
Handle<SharedFunctionInfo> shared2(lazy2->shared(), i_isolate());
ASSERT_FALSE(shared2->is_compiled());
const char source1[] = "function lazy1() { return lazy2(); }; lazy1;";
- Handle<JSFunction> lazy1 =
- Handle<JSFunction>::cast(test::RunJS(isolate(), source1));
+ Handle<JSFunction> lazy1 = RunJS<JSFunction>(source1);
Handle<SharedFunctionInfo> shared1(lazy1->shared(), i_isolate());
ASSERT_FALSE(shared1->is_compiled());
ASSERT_TRUE(dispatcher->Enqueue(shared1));
ASSERT_TRUE(dispatcher->Enqueue(shared2));
- test::RunJS(isolate(), "lazy1();");
+ RunJS("lazy1();");
ASSERT_TRUE(shared1->is_compiled());
ASSERT_TRUE(shared2->is_compiled());
ASSERT_FALSE(dispatcher->IsEnqueued(shared1));
@@ -981,20 +980,19 @@ TEST_F(CompilerDispatcherTest, EnqueueAndStepTwice) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToParse,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
// EnqueueAndStep of the same function again (shouldn't step the job.
ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToParse,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_TRUE(platform.BackgroundTasksPending());
@@ -1007,12 +1005,10 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script1));
+ Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script2));
+ Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
ASSERT_FALSE(platform.IdleTaskPending());
@@ -1022,19 +1018,19 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- GetUnoptimizedJobStatus((++dispatcher.jobs_.begin())->second));
+ (++dispatcher.jobs_.begin())->second->status());
// Make compiling super expensive, and advance job as much as possible on the
// foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0);
+ dispatcher.tracer_->RecordCompile(50000.0, 1);
platform.RunIdleTask(10.0, 0.0);
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToCompile,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToCompile,
- GetUnoptimizedJobStatus((++dispatcher.jobs_.begin())->second));
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ dispatcher.jobs_.begin()->second->status());
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ (++dispatcher.jobs_.begin())->second->status());
ASSERT_TRUE(dispatcher.IsEnqueued(shared1));
ASSERT_TRUE(dispatcher.IsEnqueued(shared2));
@@ -1049,9 +1045,9 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_FALSE(platform.BackgroundTasksPending());
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
- GetUnoptimizedJobStatus(dispatcher.jobs_.begin()->second));
+ dispatcher.jobs_.begin()->second->status());
ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
- GetUnoptimizedJobStatus((++dispatcher.jobs_.begin())->second));
+ (++dispatcher.jobs_.begin())->second->status());
// Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0);
@@ -1063,5 +1059,11 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_FALSE(platform.IdleTaskPending());
}
+#undef _STR
+#undef STR
+#undef _SCRIPT
+#undef SCRIPT
+#undef TEST_SCRIPT
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index 8280810293..d5c37264a5 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -19,14 +19,15 @@
namespace v8 {
namespace internal {
-typedef TestWithContext OptimizingCompileDispatcherTest;
+typedef TestWithNativeContext OptimizingCompileDispatcherTest;
namespace {
class BlockingCompilationJob : public CompilationJob {
public:
BlockingCompilationJob(Isolate* isolate, Handle<JSFunction> function)
- : CompilationJob(isolate, &parse_info_, &info_, "BlockingCompilationJob",
+ : CompilationJob(isolate->stack_guard()->real_climit(), &parse_info_,
+ &info_, "BlockingCompilationJob",
State::kReadyToExecute),
shared_(function->shared()),
parse_info_(shared_),
@@ -39,9 +40,7 @@ class BlockingCompilationJob : public CompilationJob {
void Signal() { semaphore_.Signal(); }
// CompilationJob implementation.
- Status PrepareJobImpl() override {
- UNREACHABLE();
- }
+ Status PrepareJobImpl(Isolate* isolate) override { UNREACHABLE(); }
Status ExecuteJobImpl() override {
blocking_.SetValue(true);
@@ -50,7 +49,7 @@ class BlockingCompilationJob : public CompilationJob {
return SUCCEEDED;
}
- Status FinalizeJobImpl() override { return SUCCEEDED; }
+ Status FinalizeJobImpl(Isolate* isolate) override { return SUCCEEDED; }
private:
Handle<SharedFunctionInfo> shared_;
@@ -71,8 +70,8 @@ TEST_F(OptimizingCompileDispatcherTest, Construct) {
}
TEST_F(OptimizingCompileDispatcherTest, NonBlockingFlush) {
- Handle<JSFunction> fun = Handle<JSFunction>::cast(test::RunJS(
- isolate(), "function f() { function g() {}; return g;}; f();"));
+ Handle<JSFunction> fun =
+ RunJS<JSFunction>("function f() { function g() {}; return g;}; f();");
BlockingCompilationJob* job = new BlockingCompilationJob(i_isolate(), fun);
OptimizingCompileDispatcher dispatcher(i_isolate());
diff --git a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
index 36bafcf006..d6816279ed 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
@@ -23,9 +23,9 @@
namespace v8 {
namespace internal {
-class UnoptimizedCompileJobTest : public TestWithContext {
+class UnoptimizedCompileJobTest : public TestWithNativeContext {
public:
- UnoptimizedCompileJobTest() : tracer_(i_isolate()) {}
+ UnoptimizedCompileJobTest() : tracer_(isolate()) {}
~UnoptimizedCompileJobTest() override {}
CompilerDispatcherTracer* tracer() { return &tracer_; }
@@ -33,25 +33,16 @@ class UnoptimizedCompileJobTest : public TestWithContext {
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
- TestWithContext::SetUpTestCase();
+ TestWithNativeContext ::SetUpTestCase();
}
static void TearDownTestCase() {
- TestWithContext::TearDownTestCase();
+ TestWithNativeContext ::TearDownTestCase();
CHECK_NOT_NULL(save_flags_);
delete save_flags_;
save_flags_ = nullptr;
}
- static UnoptimizedCompileJob::Status GetStatus(UnoptimizedCompileJob* job) {
- return job->status();
- }
-
- static UnoptimizedCompileJob::Status GetStatus(
- const std::unique_ptr<UnoptimizedCompileJob>& job) {
- return GetStatus(job.get());
- }
-
static Variable* LookupVariableByName(UnoptimizedCompileJob* job,
const char* name) {
const AstRawString* name_raw_string =
@@ -68,98 +59,52 @@ class UnoptimizedCompileJobTest : public TestWithContext {
SaveFlags* UnoptimizedCompileJobTest::save_flags_ = nullptr;
-#define ASSERT_JOB_STATUS(STATUS, JOB) ASSERT_EQ(STATUS, GetStatus(JOB))
+#define ASSERT_JOB_STATUS(STATUS, JOB) ASSERT_EQ(STATUS, JOB->status())
TEST_F(UnoptimizedCompileJobTest, Construct) {
std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(),
- test::CreateSharedFunctionInfo(i_isolate(), nullptr), FLAG_stack_size));
+ isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), nullptr),
+ FLAG_stack_size));
}
TEST_F(UnoptimizedCompileJobTest, StateTransitions) {
std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(),
- test::CreateSharedFunctionInfo(i_isolate(), nullptr), FLAG_stack_size));
+ isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), nullptr),
+ FLAG_stack_size));
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kReadyToParse, job);
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kParsed, job);
- job->StepNextOnMainThread(i_isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
+ job->PrepareOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kReadyToAnalyze, job);
- job->StepNextOnMainThread(i_isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kPrepared, job);
+ job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kAnalyzed, job);
- job->StepNextOnMainThread(i_isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kCompiled, job);
+ job->FinalizeOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kReadyToCompile, job);
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kCompiled, job);
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kDone, job);
- job->ResetOnMainThread(i_isolate());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
TEST_F(UnoptimizedCompileJobTest, SyntaxError) {
test::ScriptResource script("^^^", strlen("^^^"));
std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(),
- test::CreateSharedFunctionInfo(i_isolate(), &script), FLAG_stack_size));
+ isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), &script),
+ FLAG_stack_size));
- job->StepNextOnMainThread(i_isolate());
+ job->PrepareOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->ReportErrorsOnMainThread(isolate());
ASSERT_TRUE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kFailed, job);
- ASSERT_TRUE(i_isolate()->has_pending_exception());
-
- i_isolate()->clear_pending_exception();
-
- job->ResetOnMainThread(i_isolate());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
-}
-
-TEST_F(UnoptimizedCompileJobTest, ScopeChain) {
- const char script[] =
- "function g() { var y = 1; function f(x) { return x * y }; return f; } "
- "g();";
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
-
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(), handle(f->shared()), FLAG_stack_size));
-
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kReadyToCompile, job);
-
- Variable* var = LookupVariableByName(job.get(), "x");
- ASSERT_TRUE(var);
- ASSERT_TRUE(var->IsParameter());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
+ ASSERT_TRUE(isolate()->has_pending_exception());
- var = LookupVariableByName(job.get(), "y");
- ASSERT_TRUE(var);
- ASSERT_TRUE(var->IsContextSlot());
+ isolate()->clear_pending_exception();
- job->ResetOnMainThread(i_isolate());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
TEST_F(UnoptimizedCompileJobTest, CompileAndRun) {
@@ -172,92 +117,78 @@ TEST_F(UnoptimizedCompileJobTest, CompileAndRun) {
" return f;\n"
"}\n"
"g();";
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(), handle(f->shared()), FLAG_stack_size));
+ isolate(), tracer(), handle(f->shared()), FLAG_stack_size));
- job->StepNextOnMainThread(i_isolate());
+ job->PrepareOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->FinalizeOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kDone, job);
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
- Smi* value = Smi::cast(*test::RunJS(isolate(), "f(100);"));
+ Smi* value = Smi::cast(*RunJS("f(100);"));
ASSERT_TRUE(value == Smi::FromInt(160));
- job->ResetOnMainThread(i_isolate());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
TEST_F(UnoptimizedCompileJobTest, CompileFailureToAnalyse) {
std::string raw_script("() { var a = ");
- for (int i = 0; i < 100000; i++) {
- raw_script += "'x' + ";
+ for (int i = 0; i < 500000; i++) {
+ // TODO(leszeks): Figure out a more "unit-test-y" way of forcing an analysis
+ // failure than a binop stack overflow.
+
+ // Alternate + and - to avoid n-ary operation nodes.
+ raw_script += "'x' + 'x' - ";
}
raw_script += " 'x'; }";
test::ScriptResource script(raw_script.c_str(), strlen(raw_script.c_str()));
std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(),
- test::CreateSharedFunctionInfo(i_isolate(), &script), 100));
+ isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), &script),
+ 100));
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->PrepareOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->ReportErrorsOnMainThread(isolate());
ASSERT_TRUE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kFailed, job);
- ASSERT_TRUE(i_isolate()->has_pending_exception());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
+ ASSERT_TRUE(isolate()->has_pending_exception());
- i_isolate()->clear_pending_exception();
- job->ResetOnMainThread(i_isolate());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
+ isolate()->clear_pending_exception();
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
TEST_F(UnoptimizedCompileJobTest, CompileFailureToFinalize) {
std::string raw_script("() { var a = ");
- for (int i = 0; i < 1000; i++) {
- raw_script += "'x' + ";
+ for (int i = 0; i < 500; i++) {
+ // Alternate + and - to avoid n-ary operation nodes.
+ raw_script += "'x' + 'x' - ";
}
raw_script += " 'x'; }";
test::ScriptResource script(raw_script.c_str(), strlen(raw_script.c_str()));
std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(),
- test::CreateSharedFunctionInfo(i_isolate(), &script), 50));
+ isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), &script),
+ 50));
- job->StepNextOnMainThread(i_isolate());
+ job->PrepareOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->ReportErrorsOnMainThread(isolate());
ASSERT_TRUE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kFailed, job);
- ASSERT_TRUE(i_isolate()->has_pending_exception());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
+ ASSERT_TRUE(isolate()->has_pending_exception());
- i_isolate()->clear_pending_exception();
- job->ResetOnMainThread(i_isolate());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
+ isolate()->clear_pending_exception();
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
class CompileTask : public Task {
@@ -267,7 +198,7 @@ class CompileTask : public Task {
~CompileTask() override {}
void Run() override {
- job_->StepNextOnBackgroundThread();
+ job_->Compile(true);
ASSERT_FALSE(job_->IsFailed());
semaphore_->Signal();
}
@@ -288,74 +219,55 @@ TEST_F(UnoptimizedCompileJobTest, CompileOnBackgroundThread) {
"}";
test::ScriptResource script(raw_script, strlen(raw_script));
std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(),
- test::CreateSharedFunctionInfo(i_isolate(), &script), 100));
+ isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), &script),
+ 100));
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->PrepareOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
base::Semaphore semaphore(0);
CompileTask* background_task = new CompileTask(job.get(), &semaphore);
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kReadyToCompile, job);
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kPrepared, job);
V8::GetCurrentPlatform()->CallOnBackgroundThread(background_task,
Platform::kShortRunningTask);
semaphore.Wait();
- job->StepNextOnMainThread(i_isolate());
+ job->FinalizeOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kDone, job);
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
- job->ResetOnMainThread(i_isolate());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
TEST_F(UnoptimizedCompileJobTest, LazyInnerFunctions) {
const char script[] =
- "function g() {\n"
- " f = function() {\n"
- " e = (function() { return 42; });\n"
- " return e;\n"
- " };\n"
- " return f;\n"
- "}\n"
- "g();";
- Handle<JSFunction> f =
- Handle<JSFunction>::cast(test::RunJS(isolate(), script));
+ "f = function() {\n"
+ " e = (function() { return 42; });\n"
+ " return e;\n"
+ "};\n"
+ "f;";
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- i_isolate(), tracer(), handle(f->shared()), FLAG_stack_size));
+ isolate(), tracer(), handle(f->shared()), FLAG_stack_size));
- job->StepNextOnMainThread(i_isolate());
+ job->PrepareOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
+ job->FinalizeOnMainThread(isolate());
ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- job->StepNextOnMainThread(i_isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kDone, job);
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
- Handle<JSFunction> e =
- Handle<JSFunction>::cast(test::RunJS(isolate(), "f();"));
+ Handle<JSFunction> e = RunJS<JSFunction>("f();");
ASSERT_FALSE(e->shared()->is_compiled());
- job->ResetOnMainThread(i_isolate());
- ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
+#undef ASSERT_JOB_STATUS
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 78d299fce5..92cdb4962c 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -1021,7 +1021,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnRight) {
TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
// Skip the cases where the instruction selector would use tbz/tbnz.
- if (base::bits::CountPopulation32(imm) == 1) continue;
+ if (base::bits::CountPopulation(static_cast<uint32_t>(imm)) == 1) continue;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
RawMachineLabel a, b;
@@ -1044,7 +1044,7 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnRight) {
TEST_F(InstructionSelectorTest, Word64AndBranchWithImmediateOnRight) {
TRACED_FOREACH(int64_t, imm, kLogical64Immediates) {
// Skip the cases where the instruction selector would use tbz/tbnz.
- if (base::bits::CountPopulation64(imm) == 1) continue;
+ if (base::bits::CountPopulation(static_cast<uint64_t>(imm)) == 1) continue;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
@@ -1104,7 +1104,7 @@ TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
// Skip the cases where the instruction selector would use tbz/tbnz.
- if (base::bits::CountPopulation32(imm) == 1) continue;
+ if (base::bits::CountPopulation(static_cast<uint32_t>(imm)) == 1) continue;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
RawMachineLabel a, b;
@@ -1128,7 +1128,7 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnLeft) {
TEST_F(InstructionSelectorTest, Word64AndBranchWithImmediateOnLeft) {
TRACED_FOREACH(int64_t, imm, kLogical64Immediates) {
// Skip the cases where the instruction selector would use tbz/tbnz.
- if (base::bits::CountPopulation64(imm) == 1) continue;
+ if (base::bits::CountPopulation(static_cast<uint64_t>(imm)) == 1) continue;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 310f264c74..dd8b661fcf 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -90,7 +90,7 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
SaveFlags* BytecodeAnalysisTest::save_flags_ = nullptr;
TEST_F(BytecodeAnalysisTest, EmptyBlock) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -104,7 +104,7 @@ TEST_F(BytecodeAnalysisTest, EmptyBlock) {
}
TEST_F(BytecodeAnalysisTest, SimpleLoad) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -121,7 +121,7 @@ TEST_F(BytecodeAnalysisTest, SimpleLoad) {
}
TEST_F(BytecodeAnalysisTest, StoreThenLoad) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -141,7 +141,7 @@ TEST_F(BytecodeAnalysisTest, StoreThenLoad) {
}
TEST_F(BytecodeAnalysisTest, DiamondLoad) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -178,7 +178,7 @@ TEST_F(BytecodeAnalysisTest, DiamondLoad) {
}
TEST_F(BytecodeAnalysisTest, DiamondLookupsAndBinds) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -225,7 +225,7 @@ TEST_F(BytecodeAnalysisTest, DiamondLookupsAndBinds) {
}
TEST_F(BytecodeAnalysisTest, SimpleLoop) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -273,7 +273,7 @@ TEST_F(BytecodeAnalysisTest, SimpleLoop) {
}
TEST_F(BytecodeAnalysisTest, TryCatch) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -284,7 +284,8 @@ TEST_F(BytecodeAnalysisTest, TryCatch) {
builder.StoreAccumulatorInRegister(reg_0);
expected_liveness.emplace_back(".LLL", "LLL.");
- interpreter::TryCatchBuilder try_builder(&builder, HandlerTable::CAUGHT);
+ interpreter::TryCatchBuilder try_builder(&builder, nullptr, nullptr,
+ HandlerTable::CAUGHT);
try_builder.BeginTry(reg_context);
{
// Gen r0.
@@ -325,7 +326,7 @@ TEST_F(BytecodeAnalysisTest, DiamondInLoop) {
// diamond should eventually propagate up the other path when the loop is
// reprocessed.
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -395,7 +396,7 @@ TEST_F(BytecodeAnalysisTest, KillingLoopInsideLoop) {
// still process the inner loop when processing the outer loop, to ensure that
// r1 becomes live in 3 (via 5), but r0 stays dead (because of 4).
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
index 604e1baf86..1716c8d535 100644
--- a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
@@ -109,6 +109,20 @@ TARGET_TEST_F(CodeAssemblerTest, IntPtrMul) {
Node* c = m.IntPtrMul(a, b);
EXPECT_THAT(c, IsIntPtrConstant(500));
}
+ // x * 2^CONST => x << CONST
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.IntPtrConstant(1 << 3);
+ Node* c = m.IntPtrMul(a, b);
+ EXPECT_THAT(c, IsWordShl(a, IsIntPtrConstant(3)));
+ }
+ // 2^CONST * x => x << CONST
+ {
+ Node* a = m.IntPtrConstant(1 << 3);
+ Node* b = m.Parameter(0);
+ Node* c = m.IntPtrMul(a, b);
+ EXPECT_THAT(c, IsWordShl(b, IsIntPtrConstant(3)));
+ }
}
TARGET_TEST_F(CodeAssemblerTest, WordShl) {
diff --git a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
index d1f914c68e..4444ed0ca5 100644
--- a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
@@ -24,7 +24,7 @@ class DeadCodeEliminationTest : public GraphTest {
protected:
Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
- DeadCodeElimination reducer(editor, graph(), common());
+ DeadCodeElimination reducer(editor, graph(), common(), zone());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
index 78a038dd87..5c6de21900 100644
--- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -81,8 +81,9 @@ TEST_F(EffectControlLinearizerTest, SimpleLoad) {
schedule.AddReturn(start, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
- source_positions());
+ EffectControlLinearizer introducer(
+ jsgraph(), &schedule, zone(), source_positions(),
+ EffectControlLinearizer::kDoNotMaskArrayIndex);
introducer.Run();
EXPECT_THAT(load,
@@ -143,8 +144,9 @@ TEST_F(EffectControlLinearizerTest, DiamondLoad) {
schedule.AddReturn(mblock, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
- source_positions());
+ EffectControlLinearizer introducer(
+ jsgraph(), &schedule, zone(), source_positions(),
+ EffectControlLinearizer::kDoNotMaskArrayIndex);
introducer.Run();
// The effect input to the return should be an effect phi with the
@@ -210,8 +212,9 @@ TEST_F(EffectControlLinearizerTest, LoopLoad) {
schedule.AddReturn(rblock, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
- source_positions());
+ EffectControlLinearizer introducer(
+ jsgraph(), &schedule, zone(), source_positions(),
+ EffectControlLinearizer::kDoNotMaskArrayIndex);
introducer.Run();
ASSERT_THAT(ret, IsReturn(load, load, if_true));
@@ -273,8 +276,9 @@ TEST_F(EffectControlLinearizerTest, CloneBranch) {
schedule.AddNode(mblock, merge);
schedule.AddNode(mblock, graph()->end());
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
- source_positions());
+ EffectControlLinearizer introducer(
+ jsgraph(), &schedule, zone(), source_positions(),
+ EffectControlLinearizer::kDoNotMaskArrayIndex);
introducer.Run();
Capture<Node *> branch1_capture, branch2_capture;
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 6e48eaf96d..55931d51fb 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -119,14 +119,11 @@ Node* TypedGraphTest::Parameter(Type* type, int32_t index) {
return node;
}
-
-namespace {
+namespace graph_unittest {
const Operator kDummyOperator(0, Operator::kNoProperties, "Dummy", 0, 0, 0, 1,
0, 0);
-} // namespace
-
TEST_F(GraphTest, NewNode) {
Node* n0 = graph()->NewNode(&kDummyOperator);
@@ -139,6 +136,7 @@ TEST_F(GraphTest, NewNode) {
EXPECT_EQ(&kDummyOperator, n1->op());
}
+} // namespace graph_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index a9ef25b60c..2b0ccaed24 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -88,7 +88,7 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsArray) {
IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
- effect, _),
+ _, _),
IsNumberConstant(JS_ARRAY_TYPE)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
@@ -119,7 +119,7 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
- effect, _),
+ _, _),
IsNumberConstant(JS_TYPED_ARRAY_TYPE)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 8a458a736f..338232b6e0 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -46,7 +46,6 @@ const SharedOperator kSharedOperators[] = {
SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToObject, Operator::kFoldable, 1, 1, 1, 1, 1, 1, 2),
SHARED(Create, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
#undef SHARED
};
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 868c8b3052..485efd6288 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -65,77 +65,6 @@ class JSTypedLoweringTest : public TypedGraphTest {
};
-// -----------------------------------------------------------------------------
-// JSToBoolean
-
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithBoolean) {
- Node* input = Parameter(Type::Boolean(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(input, r.replacement());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
- Node* input = Parameter(Type::OrderedNumber(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0.0))));
-}
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithNumber) {
- Node* input = Parameter(Type::Number(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberToBoolean(input));
-}
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithDetectableReceiverOrNull) {
- Node* input = Parameter(Type::DetectableReceiverOrNull(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsBooleanNot(IsReferenceEqual(input, IsNullConstant())));
-}
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithReceiverOrNullOrUndefined) {
- Node* input = Parameter(Type::ReceiverOrNullOrUndefined(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsBooleanNot(IsObjectIsUndetectable(input)));
-}
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
- Node* input = Parameter(Type::String(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsBooleanNot(IsReferenceEqual(
- input, IsHeapConstant(factory()->empty_string()))));
-}
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
- Node* input = Parameter(Type::Any(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_FALSE(r.Changed());
-}
-
// -----------------------------------------------------------------------------
// JSToName
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index 400b05828a..75e8468244 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -11,6 +11,7 @@
namespace v8 {
namespace internal {
namespace compiler {
+namespace machine_operator_unittest {
#if GTEST_HAS_COMBINE
@@ -31,8 +32,6 @@ class MachineOperatorTestWithParam
};
-namespace {
-
const MachineRepresentation kMachineReps[] = {MachineRepresentation::kWord32,
MachineRepresentation::kWord64};
@@ -50,8 +49,6 @@ const MachineRepresentation kRepresentationsForStore[] = {
MachineRepresentation::kWord32, MachineRepresentation::kWord64,
MachineRepresentation::kTagged};
-} // namespace
-
// -----------------------------------------------------------------------------
// Load operator.
@@ -169,8 +166,6 @@ INSTANTIATE_TEST_CASE_P(
// -----------------------------------------------------------------------------
// Pure operators.
-namespace {
-
struct PureOperator {
const Operator* (MachineOperatorBuilder::*constructor)();
char const* const constructor_name;
@@ -269,7 +264,6 @@ const PureOperator kPureOperators[] = {
#undef PURE
};
-} // namespace
class MachinePureOperatorTest : public TestWithZone {
protected:
@@ -299,8 +293,6 @@ TEST_F(MachinePureOperatorTest, PureOperators) {
// Optional operators.
-namespace {
-
struct OptionalOperatorEntry {
const OptionalOperator (MachineOperatorBuilder::*constructor)();
MachineOperatorBuilder::Flag enabling_flag;
@@ -327,7 +319,6 @@ const OptionalOperatorEntry kOptionalOperators[] = {
OPTIONAL_ENTRY(Float64RoundTiesAway, 1, 0, 1), // --
#undef OPTIONAL_ENTRY
};
-} // namespace
class MachineOptionalOperatorTest : public TestWithZone {
@@ -365,12 +356,8 @@ TEST_F(MachineOptionalOperatorTest, OptionalOperators) {
// Pseudo operators.
-namespace {
-
typedef TestWithZone MachineOperatorTest;
-} // namespace
-
TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
MachineOperatorBuilder machine(zone(), MachineRepresentation::kWord32);
@@ -415,6 +402,7 @@ TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
EXPECT_EQ(machine.Int64LessThanOrEqual(), machine.IntLessThanOrEqual());
}
+} // namespace machine_operator_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips/OWNERS b/deps/v8/test/unittests/compiler/mips/OWNERS
index 3f8fbfc7c8..3fce7dd688 100644
--- a/deps/v8/test/unittests/compiler/mips/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/mips64/OWNERS b/deps/v8/test/unittests/compiler/mips64/OWNERS
index 3f8fbfc7c8..978563cab5 100644
--- a/deps/v8/test/unittests/compiler/mips64/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
diff --git a/deps/v8/test/unittests/compiler/node-unittest.cc b/deps/v8/test/unittests/compiler/node-unittest.cc
index 8379e2668d..b333c20cd1 100644
--- a/deps/v8/test/unittests/compiler/node-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-unittest.cc
@@ -15,12 +15,11 @@ using testing::UnorderedElementsAre;
namespace v8 {
namespace internal {
namespace compiler {
+namespace node_unittest {
typedef TestWithZone NodeTest;
-namespace {
-
const IrOpcode::Value kOpcode0 = static_cast<IrOpcode::Value>(0);
const IrOpcode::Value kOpcode1 = static_cast<IrOpcode::Value>(1);
const IrOpcode::Value kOpcode2 = static_cast<IrOpcode::Value>(2);
@@ -29,8 +28,6 @@ const Operator kOp0(kOpcode0, Operator::kNoProperties, "Op0", 0, 0, 0, 1, 0, 0);
const Operator kOp1(kOpcode1, Operator::kNoProperties, "Op1", 1, 0, 0, 1, 0, 0);
const Operator kOp2(kOpcode2, Operator::kNoProperties, "Op2", 2, 0, 0, 1, 0, 0);
-} // namespace
-
TEST_F(NodeTest, New) {
Node* const node = Node::New(zone(), 1, &kOp0, 0, nullptr, false);
@@ -261,7 +258,7 @@ TEST_F(NodeTest, BigNodes) {
}
}
-
+} // namespace node_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/schedule-unittest.cc b/deps/v8/test/unittests/compiler/schedule-unittest.cc
index bc825353c4..ab2f384db4 100644
--- a/deps/v8/test/unittests/compiler/schedule-unittest.cc
+++ b/deps/v8/test/unittests/compiler/schedule-unittest.cc
@@ -12,6 +12,7 @@ using testing::ElementsAre;
namespace v8 {
namespace internal {
namespace compiler {
+namespace schedule_unittest {
typedef TestWithIsolateAndZone BasicBlockTest;
@@ -71,8 +72,6 @@ TEST_F(BasicBlockTest, GetCommonDominator3) {
typedef TestWithZone ScheduleTest;
-namespace {
-
const Operator kCallOperator(IrOpcode::kCall, Operator::kNoProperties,
"MockCall", 0, 0, 0, 0, 0, 0);
const Operator kBranchOperator(IrOpcode::kBranch, Operator::kNoProperties,
@@ -80,8 +79,6 @@ const Operator kBranchOperator(IrOpcode::kBranch, Operator::kNoProperties,
const Operator kDummyOperator(IrOpcode::kParameter, Operator::kNoProperties,
"Dummy", 0, 0, 0, 0, 0, 0);
-} // namespace
-
TEST_F(ScheduleTest, Constructor) {
Schedule schedule(zone());
@@ -244,6 +241,7 @@ TEST_F(ScheduleTest, InsertBranch) {
EXPECT_THAT(end->predecessors(), ElementsAre(mblock));
}
+} // namespace schedule_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index d77c762ce6..239f19ff93 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -12,12 +12,11 @@
namespace v8 {
namespace internal {
namespace compiler {
+namespace simplified_operator_unittest {
// -----------------------------------------------------------------------------
-// Pure operators.
-
-namespace {
+// Pure operators.
struct PureOperator {
const Operator* (SimplifiedOperatorBuilder::*constructor)();
@@ -67,12 +66,10 @@ const PureOperator kPureOperators[] = {
PURE(TruncateTaggedToBit, Operator::kNoProperties, 1),
PURE(ObjectIsNumber, Operator::kNoProperties, 1),
PURE(ObjectIsReceiver, Operator::kNoProperties, 1),
- PURE(ObjectIsSmi, Operator::kNoProperties, 1)
+ PURE(ObjectIsSmi, Operator::kNoProperties, 1),
#undef PURE
};
-} // namespace
-
class SimplifiedPureOperatorTest
: public TestWithZone,
@@ -123,10 +120,8 @@ INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
// -----------------------------------------------------------------------------
-// Element access operators.
-
-namespace {
+// Element access operators.
const ElementAccess kElementAccesses[] = {
{kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
@@ -171,8 +166,6 @@ const ElementAccess kElementAccesses[] = {
MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
kNoWriteBarrier}};
-} // namespace
-
class SimplifiedElementAccessOperatorTest
: public TestWithZone,
@@ -225,6 +218,7 @@ INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
SimplifiedElementAccessOperatorTest,
::testing::ValuesIn(kElementAccesses));
+} // namespace simplified_operator_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index b527a36c55..4106b2b7a7 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -62,14 +62,14 @@ const double kIntegerValues[] = {-V8_INFINITY, INT_MIN, -1000.0, -42.0,
class TypedOptimizationTest : public TypedGraphTest {
public:
TypedOptimizationTest()
- : TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {}
+ : TypedGraphTest(3), simplified_(zone()), deps_(isolate(), zone()) {}
~TypedOptimizationTest() override {}
protected:
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone());
- SimplifiedOperatorBuilder simplified(zone());
- JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
@@ -77,10 +77,10 @@ class TypedOptimizationTest : public TypedGraphTest {
return reducer.Reduce(node);
}
- JSOperatorBuilder* javascript() { return &javascript_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
private:
- JSOperatorBuilder javascript_;
+ SimplifiedOperatorBuilder simplified_;
CompilationDependencies deps_;
};
@@ -169,7 +169,10 @@ TEST_F(TypedOptimizationTest, ParameterWithUndefined) {
}
}
-TEST_F(TypedOptimizationTest, JSToBooleanWithFalsish) {
+// -----------------------------------------------------------------------------
+// ToBoolean
+
+TEST_F(TypedOptimizationTest, ToBooleanWithFalsish) {
Node* input = Parameter(
Type::Union(
Type::MinusZero(),
@@ -190,36 +193,82 @@ TEST_F(TypedOptimizationTest, JSToBooleanWithFalsish) {
zone()),
zone()),
0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
-TEST_F(TypedOptimizationTest, JSToBooleanWithTruish) {
+TEST_F(TypedOptimizationTest, ToBooleanWithTruish) {
Node* input = Parameter(
Type::Union(
Type::NewConstant(factory()->true_value(), zone()),
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
zone()),
0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
-TEST_F(TypedOptimizationTest, JSToBooleanWithNonZeroPlainNumber) {
+TEST_F(TypedOptimizationTest, ToBooleanWithNonZeroPlainNumber) {
Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
+TEST_F(TypedOptimizationTest, ToBooleanWithBoolean) {
+ Node* input = Parameter(Type::Boolean(), 0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(input, r.replacement());
+}
+
+TEST_F(TypedOptimizationTest, ToBooleanWithOrderedNumber) {
+ Node* input = Parameter(Type::OrderedNumber(), 0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0.0))));
+}
+
+TEST_F(TypedOptimizationTest, ToBooleanWithNumber) {
+ Node* input = Parameter(Type::Number(), 0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberToBoolean(input));
+}
+
+TEST_F(TypedOptimizationTest, ToBooleanWithDetectableReceiverOrNull) {
+ Node* input = Parameter(Type::DetectableReceiverOrNull(), 0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsReferenceEqual(input, IsNullConstant())));
+}
+
+TEST_F(TypedOptimizationTest, ToBooleanWithReceiverOrNullOrUndefined) {
+ Node* input = Parameter(Type::ReceiverOrNullOrUndefined(), 0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsBooleanNot(IsObjectIsUndetectable(input)));
+}
+
+TEST_F(TypedOptimizationTest, ToBooleanWithString) {
+ Node* input = Parameter(Type::String(), 0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsReferenceEqual(
+ input, IsHeapConstant(factory()->empty_string()))));
+}
+
+TEST_F(TypedOptimizationTest, ToBooleanWithAny) {
+ Node* input = Parameter(Type::Any(), 0);
+ Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ ASSERT_FALSE(r.Changed());
+}
+
} // namespace typed_optimization_unittest
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 7c445e606e..40f3efd2cf 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -302,6 +302,7 @@ int32_t shift_right(int32_t x, int32_t y) { return x >> (y & 0x1f); }
int32_t bit_or(int32_t x, int32_t y) { return x | y; }
int32_t bit_and(int32_t x, int32_t y) { return x & y; }
int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
+double modulo_double_double(double x, double y) { return Modulo(x, y); }
} // namespace
@@ -329,7 +330,7 @@ TEST_F(TyperTest, TypeJSDivide) {
}
TEST_F(TyperTest, TypeJSModulus) {
- TestBinaryArithOp(javascript_.Modulus(), modulo);
+ TestBinaryArithOp(javascript_.Modulus(), modulo_double_double);
}
TEST_F(TyperTest, TypeJSBitwiseOr) {
@@ -430,16 +431,6 @@ TEST_MONOTONICITY(ToName)
TEST_MONOTONICITY(ToNumber)
TEST_MONOTONICITY(ToObject)
TEST_MONOTONICITY(ToString)
-TEST_MONOTONICITY(ClassOf)
-TEST_MONOTONICITY(TypeOf)
-#undef TEST_MONOTONICITY
-
-// JS UNOPs with ToBooleanHint
-#define TEST_MONOTONICITY(name) \
- TEST_F(TyperTest, Monotonicity_##name) { \
- TestUnaryMonotonicity(javascript_.name(ToBooleanHint())); \
- }
-TEST_MONOTONICITY(ToBoolean)
#undef TEST_MONOTONICITY
// JS BINOPs with CompareOperationHint
@@ -463,6 +454,10 @@ TEST_MONOTONICITY(GreaterThanOrEqual)
TEST_MONOTONICITY(Add)
#undef TEST_MONOTONICITY
+TEST_F(TyperTest, Monotonicity_InstanceOf) {
+ TestBinaryMonotonicity(javascript_.InstanceOf(VectorSlotPair()));
+}
+
// JS BINOPS without hint
#define TEST_MONOTONICITY(name) \
TEST_F(TyperTest, Monotonicity_##name) { \
@@ -478,7 +473,6 @@ TEST_MONOTONICITY(Subtract)
TEST_MONOTONICITY(Multiply)
TEST_MONOTONICITY(Divide)
TEST_MONOTONICITY(Modulus)
-TEST_MONOTONICITY(InstanceOf)
TEST_MONOTONICITY(OrdinaryHasInstance)
#undef TEST_MONOTONICITY
@@ -496,6 +490,9 @@ TEST_MONOTONICITY(ObjectIsSmi)
TEST_MONOTONICITY(ObjectIsString)
TEST_MONOTONICITY(ObjectIsSymbol)
TEST_MONOTONICITY(ObjectIsUndetectable)
+TEST_MONOTONICITY(TypeOf)
+TEST_MONOTONICITY(ClassOf)
+TEST_MONOTONICITY(ToBoolean)
#undef TEST_MONOTONICITY
// SIMPLIFIED BINOPs without hint, with Number input restriction
diff --git a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
index c04d6609a7..13f54ce5df 100644
--- a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
@@ -13,6 +13,7 @@
namespace v8 {
namespace internal {
namespace compiler {
+namespace value_numbering_reducer_unittest {
struct TestOperator : public Operator {
TestOperator(Operator::Opcode opcode, Operator::Properties properties,
@@ -126,6 +127,7 @@ TEST_F(ValueNumberingReducerTest, WontReplaceNodeWithItself) {
EXPECT_FALSE(Reduce(n).Changed());
}
+} // namespace value_numbering_reducer_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/counters-unittest.cc b/deps/v8/test/unittests/counters-unittest.cc
index 1f7ac0532e..d32d01060e 100644
--- a/deps/v8/test/unittests/counters-unittest.cc
+++ b/deps/v8/test/unittests/counters-unittest.cc
@@ -4,11 +4,14 @@
#include <vector>
+#include "src/base/platform/time.h"
#include "src/counters-inl.h"
#include "src/counters.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/tracing/tracing-category-observer.h"
+
+#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -42,40 +45,102 @@ class AggregatedMemoryHistogramTest : public ::testing::Test {
MockHistogram mock_;
};
-class RuntimeCallStatsTest : public ::testing::Test {
+static base::TimeTicks runtime_call_stats_test_time_ = base::TimeTicks();
+// Time source used for the RuntimeCallTimer during tests. We cannot rely on
+// the native timer since it's too unpredictable on the build bots.
+static base::TimeTicks RuntimeCallStatsTestNow() {
+ return runtime_call_stats_test_time_;
+}
+
+class RuntimeCallStatsTest : public TestWithNativeContext {
public:
RuntimeCallStatsTest() {
FLAG_runtime_stats =
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE;
+ // We need to set {time_} to a non-zero value since it would otherwise
+ // cause runtime call timers to think they are uninitialized.
+ Sleep(1);
+ stats()->Reset();
+ }
+
+ ~RuntimeCallStatsTest() {
+ // Disable RuntimeCallStats before tearing down the isolate to prevent
+ // printing the tests table. Comment the following line for debugging
+ // purposes.
+ FLAG_runtime_stats = 0;
+ }
+
+ static void SetUpTestCase() {
+ TestWithIsolate::SetUpTestCase();
+ // Use a custom time source to precisly emulate system time.
+ RuntimeCallTimer::Now = &RuntimeCallStatsTestNow;
+ }
+
+ static void TearDownTestCase() {
+ TestWithIsolate::TearDownTestCase();
+ // Restore the original time source.
+ RuntimeCallTimer::Now = &base::TimeTicks::HighResolutionNow;
}
- virtual ~RuntimeCallStatsTest() {}
- RuntimeCallStats* stats() { return &stats_; }
+ RuntimeCallStats* stats() {
+ return isolate()->counters()->runtime_call_stats();
+ }
+
+ // Print current RuntimeCallStats table. For debugging purposes.
+ void PrintStats() { stats()->Print(); }
+
RuntimeCallStats::CounterId counter_id() {
return &RuntimeCallStats::TestCounter1;
}
+
RuntimeCallStats::CounterId counter_id2() {
return &RuntimeCallStats::TestCounter2;
}
+
RuntimeCallStats::CounterId counter_id3() {
return &RuntimeCallStats::TestCounter3;
}
+
+ RuntimeCallCounter* js_counter() { return &stats()->JS_Execution; }
RuntimeCallCounter* counter() { return &(stats()->*counter_id()); }
RuntimeCallCounter* counter2() { return &(stats()->*counter_id2()); }
RuntimeCallCounter* counter3() { return &(stats()->*counter_id3()); }
- void Sleep(int32_t milliseconds) {
- base::ElapsedTimer timer;
- base::TimeDelta delta = base::TimeDelta::FromMilliseconds(milliseconds);
- timer.Start();
- while (!timer.HasExpired(delta)) {
- base::OS::Sleep(base::TimeDelta::FromMicroseconds(0));
- }
+
+ void Sleep(int64_t microseconds) {
+ base::TimeDelta delta = base::TimeDelta::FromMicroseconds(microseconds);
+ time_ += delta;
+ runtime_call_stats_test_time_ =
+ base::TimeTicks::FromInternalValue(time_.InMicroseconds());
}
- const uint32_t kEpsilonMs = 20;
+ private:
+ base::TimeDelta time_;
+};
+
+// Temporarily use the native time to modify the test time.
+class ElapsedTimeScope {
+ public:
+ explicit ElapsedTimeScope(RuntimeCallStatsTest* test) : test_(test) {
+ timer_.Start();
+ }
+ ~ElapsedTimeScope() { test_->Sleep(timer_.Elapsed().InMicroseconds()); }
private:
- RuntimeCallStats stats_;
+ base::ElapsedTimer timer_;
+ RuntimeCallStatsTest* test_;
+};
+
+// Temporarily use the default time source.
+class NativeTimeScope {
+ public:
+ NativeTimeScope() {
+ CHECK_EQ(RuntimeCallTimer::Now, &RuntimeCallStatsTestNow);
+ RuntimeCallTimer::Now = &base::TimeTicks::HighResolutionNow;
+ }
+ ~NativeTimeScope() {
+ CHECK_EQ(RuntimeCallTimer::Now, &base::TimeTicks::HighResolutionNow);
+ RuntimeCallTimer::Now = &RuntimeCallStatsTestNow;
+ }
};
} // namespace
@@ -231,10 +296,6 @@ TEST_F(AggregatedMemoryHistogramTest, ManySamples2) {
}
}
-#define EXPECT_IN_RANGE(start, value, end) \
- EXPECT_LE(start, value); \
- EXPECT_GE(end, value)
-
TEST_F(RuntimeCallStatsTest, RuntimeCallTimer) {
RuntimeCallTimer timer;
@@ -251,7 +312,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimer) {
Sleep(50);
EXPECT_FALSE(timer.IsStarted());
EXPECT_EQ(1, counter()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(), 100 + kEpsilonMs);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, RuntimeCallTimerSubTimer) {
@@ -284,8 +345,8 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerSubTimer) {
EXPECT_FALSE(timer2.IsStarted());
EXPECT_EQ(0, counter()->count());
EXPECT_EQ(1, counter2()->count());
- EXPECT_EQ(0, counter()->time().InMilliseconds());
- EXPECT_IN_RANGE(100, counter2()->time().InMilliseconds(), 100 + kEpsilonMs);
+ EXPECT_EQ(0, counter()->time().InMicroseconds());
+ EXPECT_EQ(100, counter2()->time().InMicroseconds());
EXPECT_EQ(&timer, stats()->current_timer());
Sleep(100);
@@ -294,8 +355,8 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerSubTimer) {
EXPECT_FALSE(timer.IsStarted());
EXPECT_EQ(1, counter()->count());
EXPECT_EQ(1, counter2()->count());
- EXPECT_IN_RANGE(150, counter()->time().InMilliseconds(), 150 + kEpsilonMs);
- EXPECT_IN_RANGE(100, counter2()->time().InMilliseconds(), 100 + kEpsilonMs);
+ EXPECT_EQ(150, counter()->time().InMicroseconds());
+ EXPECT_EQ(100, counter2()->time().InMicroseconds());
EXPECT_EQ(nullptr, stats()->current_timer());
}
@@ -323,15 +384,14 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerRecursive) {
EXPECT_FALSE(timer2.IsStarted());
EXPECT_TRUE(timer.IsStarted());
EXPECT_EQ(1, counter()->count());
- EXPECT_IN_RANGE(50, counter()->time().InMilliseconds(), 50 + kEpsilonMs);
+ EXPECT_EQ(50, counter()->time().InMicroseconds());
Sleep(100);
RuntimeCallStats::Leave(stats(), &timer);
EXPECT_FALSE(timer.IsStarted());
EXPECT_EQ(2, counter()->count());
- EXPECT_IN_RANGE(150, counter()->time().InMilliseconds(),
- 150 + 2 * kEpsilonMs);
+ EXPECT_EQ(150, counter()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, RuntimeCallTimerScope) {
@@ -341,14 +401,13 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerScope) {
}
Sleep(100);
EXPECT_EQ(1, counter()->count());
- EXPECT_IN_RANGE(50, counter()->time().InMilliseconds(), 50 + kEpsilonMs);
+ EXPECT_EQ(50, counter()->time().InMicroseconds());
{
RuntimeCallTimerScope scope(stats(), counter_id());
Sleep(50);
}
EXPECT_EQ(2, counter()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(),
- 100 + 2 * kEpsilonMs);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, RuntimeCallTimerScopeRecursive) {
@@ -356,17 +415,16 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerScopeRecursive) {
RuntimeCallTimerScope scope(stats(), counter_id());
Sleep(50);
EXPECT_EQ(0, counter()->count());
- EXPECT_EQ(0, counter()->time().InMilliseconds());
+ EXPECT_EQ(0, counter()->time().InMicroseconds());
{
RuntimeCallTimerScope scope(stats(), counter_id());
Sleep(50);
}
EXPECT_EQ(1, counter()->count());
- EXPECT_IN_RANGE(50, counter()->time().InMilliseconds(), 50 + kEpsilonMs);
+ EXPECT_EQ(50, counter()->time().InMicroseconds());
}
EXPECT_EQ(2, counter()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(),
- 100 + 2 * kEpsilonMs);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, RenameTimer) {
@@ -375,8 +433,8 @@ TEST_F(RuntimeCallStatsTest, RenameTimer) {
Sleep(50);
EXPECT_EQ(0, counter()->count());
EXPECT_EQ(0, counter2()->count());
- EXPECT_EQ(0, counter()->time().InMilliseconds());
- EXPECT_EQ(0, counter2()->time().InMilliseconds());
+ EXPECT_EQ(0, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, counter2()->time().InMicroseconds());
{
RuntimeCallTimerScope scope(stats(), counter_id());
Sleep(100);
@@ -384,13 +442,13 @@ TEST_F(RuntimeCallStatsTest, RenameTimer) {
CHANGE_CURRENT_RUNTIME_COUNTER(stats(), TestCounter2);
EXPECT_EQ(1, counter()->count());
EXPECT_EQ(0, counter2()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(), 100 + kEpsilonMs);
- EXPECT_IN_RANGE(0, counter2()->time().InMilliseconds(), 0);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, counter2()->time().InMicroseconds());
}
EXPECT_EQ(1, counter()->count());
EXPECT_EQ(1, counter2()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(), 100 + kEpsilonMs);
- EXPECT_IN_RANGE(50, counter2()->time().InMilliseconds(), 50 + kEpsilonMs);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(50, counter2()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, BasicPrintAndSnapshot) {
@@ -399,9 +457,9 @@ TEST_F(RuntimeCallStatsTest, BasicPrintAndSnapshot) {
EXPECT_EQ(0, counter()->count());
EXPECT_EQ(0, counter2()->count());
EXPECT_EQ(0, counter3()->count());
- EXPECT_EQ(0, counter()->time().InMilliseconds());
- EXPECT_EQ(0, counter2()->time().InMilliseconds());
- EXPECT_EQ(0, counter3()->time().InMilliseconds());
+ EXPECT_EQ(0, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, counter2()->time().InMicroseconds());
+ EXPECT_EQ(0, counter3()->time().InMicroseconds());
{
RuntimeCallTimerScope scope(stats(), counter_id());
@@ -412,9 +470,9 @@ TEST_F(RuntimeCallStatsTest, BasicPrintAndSnapshot) {
EXPECT_EQ(1, counter()->count());
EXPECT_EQ(0, counter2()->count());
EXPECT_EQ(0, counter3()->count());
- EXPECT_IN_RANGE(50, counter()->time().InMilliseconds(), 50 + kEpsilonMs);
- EXPECT_EQ(0, counter2()->time().InMilliseconds());
- EXPECT_EQ(0, counter3()->time().InMilliseconds());
+ EXPECT_EQ(50, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, counter2()->time().InMicroseconds());
+ EXPECT_EQ(0, counter3()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, PrintAndSnapshot) {
@@ -422,11 +480,11 @@ TEST_F(RuntimeCallStatsTest, PrintAndSnapshot) {
RuntimeCallTimerScope scope(stats(), counter_id());
Sleep(100);
EXPECT_EQ(0, counter()->count());
- EXPECT_EQ(0, counter()->time().InMilliseconds());
+ EXPECT_EQ(0, counter()->time().InMicroseconds());
{
RuntimeCallTimerScope scope(stats(), counter_id2());
EXPECT_EQ(0, counter2()->count());
- EXPECT_EQ(0, counter2()->time().InMilliseconds());
+ EXPECT_EQ(0, counter2()->time().InMicroseconds());
Sleep(50);
// This calls Snapshot on the current active timer and sychronizes and
@@ -435,40 +493,35 @@ TEST_F(RuntimeCallStatsTest, PrintAndSnapshot) {
stats()->Print(out);
EXPECT_EQ(0, counter()->count());
EXPECT_EQ(0, counter2()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(),
- 100 + kEpsilonMs);
- EXPECT_IN_RANGE(50, counter2()->time().InMilliseconds(), 50 + kEpsilonMs);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(50, counter2()->time().InMicroseconds());
// Calling Print several times shouldn't have a (big) impact on the
// measured times.
stats()->Print(out);
EXPECT_EQ(0, counter()->count());
EXPECT_EQ(0, counter2()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(),
- 100 + kEpsilonMs);
- EXPECT_IN_RANGE(50, counter2()->time().InMilliseconds(), 50 + kEpsilonMs);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(50, counter2()->time().InMicroseconds());
Sleep(50);
stats()->Print(out);
EXPECT_EQ(0, counter()->count());
EXPECT_EQ(0, counter2()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(),
- 100 + kEpsilonMs);
- EXPECT_IN_RANGE(100, counter2()->time().InMilliseconds(),
- 100 + kEpsilonMs);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(100, counter2()->time().InMicroseconds());
Sleep(50);
}
Sleep(50);
EXPECT_EQ(0, counter()->count());
EXPECT_EQ(1, counter2()->count());
- EXPECT_IN_RANGE(100, counter()->time().InMilliseconds(), 100 + kEpsilonMs);
- EXPECT_IN_RANGE(150, counter2()->time().InMilliseconds(), 150 + kEpsilonMs);
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(150, counter2()->time().InMicroseconds());
Sleep(50);
}
EXPECT_EQ(1, counter()->count());
EXPECT_EQ(1, counter2()->count());
- EXPECT_IN_RANGE(200, counter()->time().InMilliseconds(), 200 + kEpsilonMs);
- EXPECT_IN_RANGE(150, counter2()->time().InMilliseconds(),
- 150 + 2 * kEpsilonMs);
+ EXPECT_EQ(200, counter()->time().InMicroseconds());
+ EXPECT_EQ(150, counter2()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, NestedScopes) {
@@ -499,9 +552,124 @@ TEST_F(RuntimeCallStatsTest, NestedScopes) {
EXPECT_EQ(1, counter()->count());
EXPECT_EQ(2, counter2()->count());
EXPECT_EQ(2, counter3()->count());
- EXPECT_IN_RANGE(250, counter()->time().InMilliseconds(), 250 + kEpsilonMs);
- EXPECT_IN_RANGE(300, counter2()->time().InMilliseconds(), 300 + kEpsilonMs);
- EXPECT_IN_RANGE(100, counter3()->time().InMilliseconds(), 100 + kEpsilonMs);
+ EXPECT_EQ(250, counter()->time().InMicroseconds());
+ EXPECT_EQ(300, counter2()->time().InMicroseconds());
+ EXPECT_EQ(100, counter3()->time().InMicroseconds());
+}
+
+TEST_F(RuntimeCallStatsTest, BasicJavaScript) {
+ RuntimeCallCounter* counter = &stats()->JS_Execution;
+ EXPECT_EQ(0, counter->count());
+ EXPECT_EQ(0, counter->time().InMicroseconds());
+
+ {
+ NativeTimeScope native_timer_scope;
+ RunJS("function f() { return 1; }");
+ }
+ EXPECT_EQ(1, counter->count());
+ int64_t time = counter->time().InMicroseconds();
+ EXPECT_LT(0, time);
+
+ {
+ NativeTimeScope native_timer_scope;
+ RunJS("f()");
+ }
+ EXPECT_EQ(2, counter->count());
+ EXPECT_LE(time, counter->time().InMicroseconds());
+}
+
+TEST_F(RuntimeCallStatsTest, FunctionLengthGetter) {
+ RuntimeCallCounter* getter_counter = &stats()->FunctionLengthGetter;
+ RuntimeCallCounter* js_counter = &stats()->JS_Execution;
+ EXPECT_EQ(0, getter_counter->count());
+ EXPECT_EQ(0, js_counter->count());
+ EXPECT_EQ(0, getter_counter->time().InMicroseconds());
+ EXPECT_EQ(0, js_counter->time().InMicroseconds());
+
+ {
+ NativeTimeScope native_timer_scope;
+ RunJS("function f(array) { return array.length; }");
+ }
+ EXPECT_EQ(0, getter_counter->count());
+ EXPECT_EQ(1, js_counter->count());
+ EXPECT_EQ(0, getter_counter->time().InMicroseconds());
+ int64_t js_time = js_counter->time().InMicroseconds();
+ EXPECT_LT(0, js_time);
+
+ {
+ NativeTimeScope native_timer_scope;
+ RunJS("f.length");
+ }
+ EXPECT_EQ(1, getter_counter->count());
+ EXPECT_EQ(2, js_counter->count());
+ EXPECT_LE(0, getter_counter->time().InMicroseconds());
+ EXPECT_LT(js_time, js_counter->time().InMicroseconds());
+
+ {
+ NativeTimeScope native_timer_scope;
+ RunJS("for (let i = 0; i < 50; i++) { f.length }");
+ }
+ EXPECT_EQ(51, getter_counter->count());
+ EXPECT_EQ(3, js_counter->count());
+}
+
+namespace {
+static RuntimeCallStatsTest* current_test;
+static const int kCustomCallbackTime = 1234;
+static void CustomCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ RuntimeCallTimerScope scope(current_test->stats(),
+ current_test->counter_id2());
+ current_test->Sleep(kCustomCallbackTime);
+}
+} // namespace
+
+TEST_F(RuntimeCallStatsTest, CustomCallback) {
+ current_test = this;
+ // Set up a function template with a custom callback.
+ v8::Isolate* isolate = v8_isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->Set(isolate, "callback",
+ v8::FunctionTemplate::New(isolate, CustomCallback));
+ v8::Local<v8::Object> object =
+ object_template->NewInstance(v8_context()).ToLocalChecked();
+ SetGlobalProperty("custom_object", object);
+
+ // TODO(cbruni): Check api accessor timer (one above the custom callback).
+ EXPECT_EQ(0, js_counter()->count());
+ EXPECT_EQ(0, counter()->count());
+ EXPECT_EQ(0, counter2()->count());
+ {
+ RuntimeCallTimerScope scope(stats(), counter_id());
+ Sleep(100);
+ RunJS("custom_object.callback();");
+ }
+ EXPECT_EQ(1, js_counter()->count());
+ // Given that no native timers are used, only the two scopes explitly
+ // mentioned above will track the time.
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(1, counter2()->count());
+ EXPECT_EQ(kCustomCallbackTime, counter2()->time().InMicroseconds());
+
+ RunJS("for (let i = 0; i < 9; i++) { custom_object.callback() };");
+ EXPECT_EQ(2, js_counter()->count());
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(10, counter2()->count());
+ EXPECT_EQ(kCustomCallbackTime * 10, counter2()->time().InMicroseconds());
+
+ RunJS("for (let i = 0; i < 4000; i++) { custom_object.callback() };");
+ EXPECT_EQ(3, js_counter()->count());
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(4010, counter2()->count());
+ EXPECT_EQ(kCustomCallbackTime * 4010, counter2()->time().InMicroseconds());
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 1f2aab06d6..3b7b610c8c 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -99,7 +99,7 @@ TEST_F(HeapTest, ASLR) {
}
if (hints.size() == 1) {
EXPECT_TRUE((*hints.begin()) == nullptr);
- EXPECT_TRUE(v8::internal::GetRandomMmapAddr() == nullptr);
+ EXPECT_TRUE(base::OS::GetRandomMmapAddr() == nullptr);
} else {
// It is unlikely that 1000 random samples will collide to less then 500
// values.
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 5cdce7fc00..bbc9e565c9 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <limits>
+
#include "src/v8.h"
#include "src/ast/scopes.h"
@@ -26,7 +28,8 @@ class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
- BytecodeArrayBuilder builder(isolate(), zone(), 1, 131);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 1, 131, &feedback_spec);
Factory* factory = isolate()->factory();
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
@@ -56,8 +59,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(10000000))
.StoreAccumulatorInRegister(reg)
- .LoadLiteral(
- ast_factory.NewString(ast_factory.GetOneByteString("A constant")))
+ .LoadLiteral(ast_factory.GetOneByteString("A constant"))
.StoreAccumulatorInRegister(reg)
.LoadUndefined()
.StoreAccumulatorInRegister(reg)
@@ -81,12 +83,36 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.MoveRegister(reg, other);
builder.MoveRegister(reg, wide);
+ FeedbackSlot load_global_slot =
+ feedback_spec.AddLoadGlobalICSlot(NOT_INSIDE_TYPEOF);
+ FeedbackSlot load_global_typeof_slot =
+ feedback_spec.AddLoadGlobalICSlot(INSIDE_TYPEOF);
+ FeedbackSlot sloppy_store_global_slot =
+ feedback_spec.AddStoreGlobalICSlot(LanguageMode::kSloppy);
+ FeedbackSlot strict_store_global_slot =
+ feedback_spec.AddStoreGlobalICSlot(LanguageMode::kStrict);
+ FeedbackSlot load_slot = feedback_spec.AddLoadICSlot();
+ FeedbackSlot keyed_load_slot = feedback_spec.AddKeyedLoadICSlot();
+ FeedbackSlot sloppy_store_slot =
+ feedback_spec.AddStoreICSlot(LanguageMode::kSloppy);
+ FeedbackSlot strict_store_slot =
+ feedback_spec.AddStoreICSlot(LanguageMode::kStrict);
+ FeedbackSlot sloppy_keyed_store_slot =
+ feedback_spec.AddKeyedStoreICSlot(LanguageMode::kSloppy);
+ FeedbackSlot strict_keyed_store_slot =
+ feedback_spec.AddKeyedStoreICSlot(LanguageMode::kStrict);
+ FeedbackSlot store_own_slot = feedback_spec.AddStoreOwnICSlot();
+
// Emit global load / store operations.
const AstRawString* name = ast_factory.GetOneByteString("var_name");
- builder.LoadGlobal(name, 1, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1, TypeofMode::INSIDE_TYPEOF)
- .StoreGlobal(name, 1, LanguageMode::SLOPPY)
- .StoreGlobal(name, 1, LanguageMode::STRICT);
+ builder
+ .LoadGlobal(name, load_global_slot.ToInt(), TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, load_global_typeof_slot.ToInt(),
+ TypeofMode::INSIDE_TYPEOF)
+ .StoreGlobal(name, sloppy_store_global_slot.ToInt(),
+ LanguageMode::kSloppy)
+ .StoreGlobal(name, strict_store_global_slot.ToInt(),
+ LanguageMode::kStrict);
// Emit context operations.
builder.PushContext(reg)
@@ -106,21 +132,26 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreContextSlot(Register::current_context(), 3, 0);
// Emit load / store property operations.
- builder.LoadNamedProperty(reg, name, 0)
- .LoadKeyedProperty(reg, 0)
- .StoreNamedProperty(reg, name, 0, LanguageMode::SLOPPY)
- .StoreKeyedProperty(reg, reg, 0, LanguageMode::SLOPPY)
- .StoreNamedProperty(reg, name, 0, LanguageMode::STRICT)
- .StoreKeyedProperty(reg, reg, 0, LanguageMode::STRICT)
- .StoreNamedOwnProperty(reg, name, 0);
+ builder.LoadNamedProperty(reg, name, load_slot.ToInt())
+ .LoadKeyedProperty(reg, keyed_load_slot.ToInt())
+ .StoreNamedProperty(reg, name, sloppy_store_slot.ToInt(),
+ LanguageMode::kSloppy)
+ .StoreKeyedProperty(reg, reg, sloppy_keyed_store_slot.ToInt(),
+ LanguageMode::kSloppy)
+ .StoreNamedProperty(reg, name, strict_store_slot.ToInt(),
+ LanguageMode::kStrict)
+ .StoreKeyedProperty(reg, reg, strict_keyed_store_slot.ToInt(),
+ LanguageMode::kStrict)
+ .StoreNamedOwnProperty(reg, name, store_own_slot.ToInt());
// Emit load / store lookup slots.
builder.LoadLookupSlot(name, TypeofMode::NOT_INSIDE_TYPEOF)
.LoadLookupSlot(name, TypeofMode::INSIDE_TYPEOF)
- .StoreLookupSlot(name, LanguageMode::SLOPPY, LookupHoistingMode::kNormal)
- .StoreLookupSlot(name, LanguageMode::SLOPPY,
+ .StoreLookupSlot(name, LanguageMode::kSloppy, LookupHoistingMode::kNormal)
+ .StoreLookupSlot(name, LanguageMode::kSloppy,
LookupHoistingMode::kLegacySloppy)
- .StoreLookupSlot(name, LanguageMode::STRICT, LookupHoistingMode::kNormal);
+ .StoreLookupSlot(name, LanguageMode::kStrict,
+ LookupHoistingMode::kNormal);
// Emit load / store lookup slots with context fast paths.
builder.LoadLookupContextSlot(name, TypeofMode::NOT_INSIDE_TYPEOF, 1, 0)
@@ -168,7 +199,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.BinaryOperation(Token::Value::SUB, reg, 2)
.BinaryOperation(Token::Value::MUL, reg, 3)
.BinaryOperation(Token::Value::DIV, reg, 4)
- .BinaryOperation(Token::Value::MOD, reg, 5);
+ .BinaryOperation(Token::Value::MOD, reg, 5)
+ .BinaryOperation(Token::Value::EXP, reg, 6);
// Emit bitwise operator invocations
builder.BinaryOperation(Token::Value::BIT_OR, reg, 6)
@@ -186,6 +218,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.BinaryOperationSmiLiteral(Token::Value::MUL, Smi::FromInt(42), 2)
.BinaryOperationSmiLiteral(Token::Value::DIV, Smi::FromInt(42), 2)
.BinaryOperationSmiLiteral(Token::Value::MOD, Smi::FromInt(42), 2)
+ .BinaryOperationSmiLiteral(Token::Value::EXP, Smi::FromInt(42), 2)
.BinaryOperationSmiLiteral(Token::Value::BIT_OR, Smi::FromInt(42), 2)
.BinaryOperationSmiLiteral(Token::Value::BIT_XOR, Smi::FromInt(42), 2)
.BinaryOperationSmiLiteral(Token::Value::BIT_AND, Smi::FromInt(42), 2)
@@ -206,7 +239,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.TypeOf();
// Emit delete
- builder.Delete(reg, LanguageMode::SLOPPY).Delete(reg, LanguageMode::STRICT);
+ builder.Delete(reg, LanguageMode::kSloppy).Delete(reg, LanguageMode::kStrict);
// Emit construct.
builder.Construct(reg, reg_list, 1).ConstructWithSpread(reg, reg_list, 1);
@@ -220,14 +253,14 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CompareOperation(Token::Value::LTE, reg, 5)
.CompareOperation(Token::Value::GTE, reg, 6)
.CompareTypeOf(TestTypeOfFlags::LiteralFlag::kNumber)
- .CompareOperation(Token::Value::INSTANCEOF, reg)
+ .CompareOperation(Token::Value::INSTANCEOF, reg, 7)
.CompareOperation(Token::Value::IN, reg)
.CompareUndetectable()
.CompareUndefined()
.CompareNull();
// Emit conversion operator invocations.
- builder.ToNumber(1).ToObject(reg).ToName(reg);
+ builder.ToNumber(1).ToNumeric(1).ToObject(reg).ToName(reg);
// Emit GetSuperConstructor.
builder.GetSuperConstructor(reg);
@@ -310,30 +343,11 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Wide constant pool loads
for (int i = 0; i < 256; i++) {
// Emit junk in constant pool to force wide constant pool index.
- builder.LoadLiteral(ast_factory.NewNumber(2.5321 + i));
+ builder.LoadLiteral(2.5321 + i);
}
builder.LoadLiteral(Smi::FromInt(20000000));
const AstRawString* wide_name = ast_factory.GetOneByteString("var_wide_name");
- // Emit wide global load / store operations.
- builder.LoadGlobal(name, 1024, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
- .StoreGlobal(name, 1024, LanguageMode::SLOPPY)
- .StoreGlobal(wide_name, 1, LanguageMode::STRICT);
-
- // Emit extra wide global load.
- builder.LoadGlobal(name, 1024 * 1024, TypeofMode::NOT_INSIDE_TYPEOF);
-
- // Emit wide load / store property operations.
- builder.LoadNamedProperty(reg, wide_name, 0)
- .LoadKeyedProperty(reg, 2056)
- .StoreNamedProperty(reg, wide_name, 0, LanguageMode::SLOPPY)
- .StoreKeyedProperty(reg, reg, 2056, LanguageMode::SLOPPY)
- .StoreNamedProperty(reg, wide_name, 0, LanguageMode::STRICT)
- .StoreKeyedProperty(reg, reg, 2056, LanguageMode::STRICT)
- .StoreNamedOwnProperty(reg, wide_name, 0);
-
builder.StoreDataPropertyInLiteral(reg, reg,
DataPropertyInLiteralFlag::kNoFlags, 0);
@@ -344,11 +358,11 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit wide load / store lookup slots.
builder.LoadLookupSlot(wide_name, TypeofMode::NOT_INSIDE_TYPEOF)
.LoadLookupSlot(wide_name, TypeofMode::INSIDE_TYPEOF)
- .StoreLookupSlot(wide_name, LanguageMode::SLOPPY,
+ .StoreLookupSlot(wide_name, LanguageMode::kSloppy,
LookupHoistingMode::kNormal)
- .StoreLookupSlot(wide_name, LanguageMode::SLOPPY,
+ .StoreLookupSlot(wide_name, LanguageMode::kSloppy,
LookupHoistingMode::kLegacySloppy)
- .StoreLookupSlot(wide_name, LanguageMode::STRICT,
+ .StoreLookupSlot(wide_name, LanguageMode::kStrict,
LookupHoistingMode::kNormal);
// CreateClosureWide
@@ -454,7 +468,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
for (int locals = 0; locals < 5; locals++) {
for (int temps = 0; temps < 3; temps++) {
- BytecodeArrayBuilder builder(isolate(), zone(), 1, locals);
+ BytecodeArrayBuilder builder(zone(), 1, locals);
BytecodeRegisterAllocator* allocator(builder.register_allocator());
for (int i = 0; i < locals; i++) {
builder.LoadLiteral(Smi::kZero);
@@ -491,7 +505,7 @@ TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
TEST_F(BytecodeArrayBuilderTest, Parameters) {
- BytecodeArrayBuilder builder(isolate(), zone(), 10, 0);
+ BytecodeArrayBuilder builder(zone(), 10, 0);
Register receiver(builder.Receiver());
Register param8(builder.Parameter(8));
@@ -500,35 +514,37 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
TEST_F(BytecodeArrayBuilderTest, Constants) {
- BytecodeArrayBuilder builder(isolate(), zone(), 1, 0);
+ BytecodeArrayBuilder builder(zone(), 1, 0);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_1 = ast_factory.NewNumber(3.14);
- const AstValue* heap_num_2 = ast_factory.NewNumber(5.2);
- const AstValue* string =
- ast_factory.NewString(ast_factory.GetOneByteString("foo"));
- const AstValue* string_copy =
- ast_factory.NewString(ast_factory.GetOneByteString("foo"));
+ double heap_num_1 = 3.14;
+ double heap_num_2 = 5.2;
+ double nan = std::numeric_limits<double>::quiet_NaN();
+ const AstRawString* string = ast_factory.GetOneByteString("foo");
+ const AstRawString* string_copy = ast_factory.GetOneByteString("foo");
builder.LoadLiteral(heap_num_1)
.LoadLiteral(heap_num_2)
.LoadLiteral(string)
.LoadLiteral(heap_num_1)
.LoadLiteral(heap_num_1)
+ .LoadLiteral(nan)
.LoadLiteral(string_copy)
+ .LoadLiteral(heap_num_2)
+ .LoadLiteral(nan)
.Return();
ast_factory.Internalize(isolate());
Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
// Should only have one entry for each identical constant.
- CHECK_EQ(array->constant_pool()->length(), 3);
+ EXPECT_EQ(4, array->constant_pool()->length());
}
TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
static const int kFarJumpDistance = 256 + 20;
- BytecodeArrayBuilder builder(isolate(), zone(), 1, 1);
+ BytecodeArrayBuilder builder(zone(), 1, 1);
Register reg(0);
BytecodeLabel far0, far1, far2, far3, far4;
@@ -643,7 +659,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
- BytecodeArrayBuilder builder(isolate(), zone(), 1, 1);
+ BytecodeArrayBuilder builder(zone(), 1, 1);
Register reg(0);
@@ -691,7 +707,7 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
}
TEST_F(BytecodeArrayBuilderTest, SmallSwitch) {
- BytecodeArrayBuilder builder(isolate(), zone(), 1, 1);
+ BytecodeArrayBuilder builder(zone(), 1, 1);
// Small jump table that fits into the single-size constant pool
int small_jump_table_size = 5;
@@ -739,7 +755,7 @@ TEST_F(BytecodeArrayBuilderTest, SmallSwitch) {
}
TEST_F(BytecodeArrayBuilderTest, WideSwitch) {
- BytecodeArrayBuilder builder(isolate(), zone(), 1, 1);
+ BytecodeArrayBuilder builder(zone(), 1, 1);
// Large jump table that requires a wide Switch bytecode.
int large_jump_table_size = 256;
@@ -787,7 +803,7 @@ TEST_F(BytecodeArrayBuilderTest, WideSwitch) {
}
TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
- BytecodeArrayBuilder builder(isolate(), zone(), 1, 0);
+ BytecodeArrayBuilder builder(zone(), 1, 0);
// Labels can only have 1 forward reference, but
// can be referred to mulitple times once bound.
@@ -821,7 +837,7 @@ TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
static const int kRepeats = 3;
- BytecodeArrayBuilder builder(isolate(), zone(), 1, 0);
+ BytecodeArrayBuilder builder(zone(), 1, 0);
for (int i = 0; i < kRepeats; i++) {
BytecodeLabel label, after_jump0, after_jump1;
builder.Jump(&label)
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index aefef108bf..ee5d8803f7 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -22,11 +22,12 @@ class BytecodeArrayIteratorTest : public TestWithIsolateAndZone {
TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_0 = ast_factory.NewNumber(2.718);
- const AstValue* heap_num_1 = ast_factory.NewNumber(2.0 * Smi::kMaxValue);
+ double heap_num_0 = 2.718;
+ double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
@@ -37,7 +38,10 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
- uint32_t feedback_slot = 97;
+ uint32_t load_feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
+ uint32_t forin_feedback_slot = feedback_spec.AddForInSlot().ToInt();
+ uint32_t load_global_feedback_slot =
+ feedback_spec.AddLoadGlobalICSlot(TypeofMode::NOT_INSIDE_TYPEOF).ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -54,14 +58,15 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
.StoreAccumulatorInRegister(reg_1)
- .LoadNamedProperty(reg_1, name, feedback_slot)
+ .LoadNamedProperty(reg_1, name, load_feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(triple, feedback_slot)
+ .ForInPrepare(triple, forin_feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, load_global_feedback_slot,
+ TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
// Test iterator sees the expected output from the builder.
@@ -73,8 +78,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(
- heap_num_0->value()));
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
@@ -91,8 +95,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(
- heap_num_1->value()));
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
@@ -204,7 +207,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
- EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
+ EXPECT_EQ(iterator.GetIndexOperand(2), load_feedback_slot);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
iterator.Advance();
@@ -246,7 +249,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3);
- EXPECT_EQ(iterator.GetIndexOperand(1), feedback_slot);
+ EXPECT_EQ(iterator.GetIndexOperand(1), forin_feedback_slot);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
iterator.Advance();
@@ -270,11 +273,10 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
- EXPECT_EQ(iterator.current_bytecode_size(), 10);
- EXPECT_EQ(iterator.GetIndexOperand(1), 0x10000000u);
- offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
- kPrefixByteSize;
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode_size(), 3);
+ EXPECT_EQ(iterator.GetIndexOperand(1), load_global_feedback_slot);
+ offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kSingle);
iterator.Advance();
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 7d9bcd09c0..12cd55c2a9 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -22,11 +22,12 @@ class BytecodeArrayRandomIteratorTest : public TestWithIsolateAndZone {
TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_0 = ast_factory.NewNumber(2.718);
- const AstValue* heap_num_1 = ast_factory.NewNumber(2.0 * Smi::kMaxValue);
+ double heap_num_0 = 2.718;
+ double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
@@ -36,7 +37,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
RegisterList triple(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
- uint32_t feedback_slot = 97;
+ uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -60,7 +61,6 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
ast_factory.Internalize(isolate());
@@ -76,11 +76,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_0 = ast_factory.NewNumber(2.718);
- const AstValue* heap_num_1 = ast_factory.NewNumber(2.0 * Smi::kMaxValue);
+ double heap_num_0 = 2.718;
+ double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
@@ -90,7 +91,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
RegisterList triple(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
- uint32_t feedback_slot = 97;
+ uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -114,7 +115,6 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
ast_factory.Internalize(isolate());
@@ -130,11 +130,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_0 = ast_factory.NewNumber(2.718);
- const AstValue* heap_num_1 = ast_factory.NewNumber(2.0 * Smi::kMaxValue);
+ double heap_num_0 = 2.718;
+ double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
@@ -144,7 +145,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
RegisterList triple(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
- uint32_t feedback_slot = 97;
+ uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -168,7 +169,6 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
ast_factory.Internalize(isolate());
@@ -181,19 +181,19 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), 0);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_TRUE(iterator.GetConstantForIndexOperand(0).is_identical_to(
- heap_num_0->value()));
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0);
ASSERT_TRUE(iterator.IsValid());
}
TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_0 = ast_factory.NewNumber(2.718);
- const AstValue* heap_num_1 = ast_factory.NewNumber(2.0 * Smi::kMaxValue);
+ double heap_num_0 = 2.718;
+ double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
@@ -203,7 +203,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
RegisterList triple(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
- uint32_t feedback_slot = 97;
+ uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -227,7 +227,6 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
ast_factory.Internalize(isolate());
@@ -239,7 +238,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
int offset = bytecodeArray->length() -
Bytecodes::Size(Bytecode::kReturn, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- EXPECT_EQ(iterator.current_index(), 23);
+ EXPECT_EQ(iterator.current_index(), 22);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
@@ -248,11 +247,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_0 = ast_factory.NewNumber(2.718);
- const AstValue* heap_num_1 = ast_factory.NewNumber(2.0 * Smi::kMaxValue);
+ double heap_num_0 = 2.718;
+ double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
@@ -263,7 +263,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
- uint32_t feedback_slot = 97;
+ uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -287,7 +287,6 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
// Test iterator sees the expected output from the builder.
@@ -329,8 +328,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_TRUE(iterator.GetConstantForIndexOperand(0).is_identical_to(
- heap_num_1->value()));
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1);
ASSERT_TRUE(iterator.IsValid());
iterator.GoToIndex(18);
@@ -392,7 +390,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
- iterator.GoToIndex(23);
+ iterator.GoToIndex(22);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
@@ -417,11 +415,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
- kPrefixByteSize;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- EXPECT_EQ(iterator.current_index(), 23);
+ EXPECT_EQ(iterator.current_index(), 22);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
@@ -436,11 +432,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_0 = ast_factory.NewNumber(2.718);
- const AstValue* heap_num_1 = ast_factory.NewNumber(2.0 * Smi::kMaxValue);
+ double heap_num_0 = 2.718;
+ double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
@@ -451,7 +448,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
- uint32_t feedback_slot = 97;
+ uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -475,7 +472,6 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
// Test iterator sees the expected output from the builder.
@@ -489,8 +485,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_TRUE(iterator.GetConstantForIndexOperand(0).is_identical_to(
- heap_num_0->value()));
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
@@ -509,8 +504,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_TRUE(iterator.GetConstantForIndexOperand(0).is_identical_to(
- heap_num_1->value()));
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
@@ -705,18 +699,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
++iterator;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
- EXPECT_EQ(iterator.current_index(), 22);
- EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
- EXPECT_EQ(iterator.current_bytecode_size(), 10);
- EXPECT_EQ(iterator.GetIndexOperand(1), 0x10000000u);
- offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
- kPrefixByteSize;
- ++iterator;
-
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- EXPECT_EQ(iterator.current_index(), 23);
+ EXPECT_EQ(iterator.current_index(), 22);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
@@ -727,11 +711,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
+ FeedbackVectorSpec feedback_spec(zone());
+ BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- const AstValue* heap_num_0 = ast_factory.NewNumber(2.718);
- const AstValue* heap_num_1 = ast_factory.NewNumber(2.0 * Smi::kMaxValue);
+ double heap_num_0 = 2.718;
+ double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
@@ -742,7 +727,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
- uint32_t feedback_slot = 97;
+ uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -766,7 +751,6 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
// Test iterator sees the expected output from the builder.
@@ -780,22 +764,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kReturn, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- EXPECT_EQ(iterator.current_index(), 23);
+ EXPECT_EQ(iterator.current_index(), 22);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
--iterator;
- offset -= Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
- kPrefixByteSize;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
- EXPECT_EQ(iterator.current_index(), 22);
- EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
- EXPECT_EQ(iterator.current_bytecode_size(), 10);
- EXPECT_EQ(iterator.GetIndexOperand(1), 0x10000000u);
- --iterator;
-
offset -= Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
EXPECT_EQ(iterator.current_index(), 21);
@@ -991,8 +965,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_TRUE(iterator.GetConstantForIndexOperand(0).is_identical_to(
- heap_num_1->value()));
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -1011,8 +984,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_TRUE(iterator.GetConstantForIndexOperand(0).is_identical_to(
- heap_num_0->value()));
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0);
ASSERT_TRUE(iterator.IsValid());
--iterator;
ASSERT_FALSE(iterator.IsValid());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index bd8f702d5f..e4a956a0ce 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -22,7 +22,9 @@
namespace v8 {
namespace internal {
namespace interpreter {
+namespace bytecode_array_writer_unittest {
+#define B(Name) static_cast<uint8_t>(Bytecode::k##Name)
#define R(i) static_cast<uint32_t>(Register(i).ToOperand())
class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
@@ -363,6 +365,10 @@ TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
CHECK(source_iterator.done());
}
+#undef B
+#undef R
+
+} // namespace bytecode_array_writer_unittest
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
index 612b96e32c..018263f06b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -16,6 +16,8 @@ namespace v8 {
namespace internal {
namespace interpreter {
+#define B(Name) static_cast<uint8_t>(Bytecode::k##Name)
+
TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
struct BytecodesAndResult {
const uint8_t bytecode[32];
@@ -95,6 +97,8 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
}
}
+#undef B
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-utils.h b/deps/v8/test/unittests/interpreter/bytecode-utils.h
index fffb7190c8..9a2cee3014 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-utils.h
+++ b/deps/v8/test/unittests/interpreter/bytecode-utils.h
@@ -27,7 +27,6 @@
#endif
#define U8(i) static_cast<uint8_t>(i)
-#define B(Name) static_cast<uint8_t>(Bytecode::k##Name)
#define REG_OPERAND(i) \
(InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize - (i))
#define R8(i) static_cast<uint8_t>(REG_OPERAND(i))
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index cfcdf6c3bc..ce6ba81fc1 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -134,70 +134,72 @@ TEST(Bytecodes, PrefixMappings) {
}
TEST(Bytecodes, ScaleForSignedOperand) {
- CHECK(Bytecodes::ScaleForSignedOperand(0) == OperandScale::kSingle);
- CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt8) == OperandScale::kSingle);
- CHECK(Bytecodes::ScaleForSignedOperand(kMinInt8) == OperandScale::kSingle);
- CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt8 + 1) ==
- OperandScale::kDouble);
- CHECK(Bytecodes::ScaleForSignedOperand(kMinInt8 - 1) ==
- OperandScale::kDouble);
- CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt16) == OperandScale::kDouble);
- CHECK(Bytecodes::ScaleForSignedOperand(kMinInt16) == OperandScale::kDouble);
- CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt16 + 1) ==
- OperandScale::kQuadruple);
- CHECK(Bytecodes::ScaleForSignedOperand(kMinInt16 - 1) ==
- OperandScale::kQuadruple);
- CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt) == OperandScale::kQuadruple);
- CHECK(Bytecodes::ScaleForSignedOperand(kMinInt) == OperandScale::kQuadruple);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(0), OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMaxInt8), OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMinInt8), OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMaxInt8 + 1),
+ OperandScale::kDouble);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMinInt8 - 1),
+ OperandScale::kDouble);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMaxInt16), OperandScale::kDouble);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMinInt16), OperandScale::kDouble);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMaxInt16 + 1),
+ OperandScale::kQuadruple);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMinInt16 - 1),
+ OperandScale::kQuadruple);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMaxInt), OperandScale::kQuadruple);
+ CHECK_EQ(Bytecodes::ScaleForSignedOperand(kMinInt), OperandScale::kQuadruple);
}
TEST(Bytecodes, ScaleForUnsignedOperands) {
// int overloads
- CHECK(Bytecodes::ScaleForUnsignedOperand(0) == OperandScale::kSingle);
- CHECK(Bytecodes::ScaleForUnsignedOperand(kMaxUInt8) == OperandScale::kSingle);
- CHECK(Bytecodes::ScaleForUnsignedOperand(kMaxUInt8 + 1) ==
- OperandScale::kDouble);
- CHECK(Bytecodes::ScaleForUnsignedOperand(kMaxUInt16) ==
- OperandScale::kDouble);
- CHECK(Bytecodes::ScaleForUnsignedOperand(kMaxUInt16 + 1) ==
- OperandScale::kQuadruple);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(0), OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(kMaxUInt8),
+ OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(kMaxUInt8 + 1),
+ OperandScale::kDouble);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(kMaxUInt16),
+ OperandScale::kDouble);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(kMaxUInt16 + 1),
+ OperandScale::kQuadruple);
// size_t overloads
- CHECK(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(0)) ==
- OperandScale::kSingle);
- CHECK(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt8)) ==
- OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(0)),
+ OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt8)),
+ OperandScale::kSingle);
CHECK(Bytecodes::ScaleForUnsignedOperand(
static_cast<size_t>(kMaxUInt8 + 1)) == OperandScale::kDouble);
- CHECK(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt16)) ==
- OperandScale::kDouble);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt16)),
+ OperandScale::kDouble);
CHECK(Bytecodes::ScaleForUnsignedOperand(
static_cast<size_t>(kMaxUInt16 + 1)) == OperandScale::kQuadruple);
- CHECK(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt32)) ==
- OperandScale::kQuadruple);
+ CHECK_EQ(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt32)),
+ OperandScale::kQuadruple);
}
TEST(Bytecodes, SizesForUnsignedOperands) {
// int overloads
- CHECK(Bytecodes::SizeForUnsignedOperand(0) == OperandSize::kByte);
- CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt8) == OperandSize::kByte);
- CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt8 + 1) ==
- OperandSize::kShort);
- CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt16) == OperandSize::kShort);
- CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt16 + 1) ==
- OperandSize::kQuad);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(0), OperandSize::kByte);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(kMaxUInt8), OperandSize::kByte);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(kMaxUInt8 + 1),
+ OperandSize::kShort);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(kMaxUInt16), OperandSize::kShort);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(kMaxUInt16 + 1),
+ OperandSize::kQuad);
// size_t overloads
- CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(0)) ==
- OperandSize::kByte);
- CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt8)) ==
- OperandSize::kByte);
- CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt8 + 1)) ==
- OperandSize::kShort);
- CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt16)) ==
- OperandSize::kShort);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(0)),
+ OperandSize::kByte);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt8)),
+ OperandSize::kByte);
+ CHECK_EQ(
+ Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt8 + 1)),
+ OperandSize::kShort);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt16)),
+ OperandSize::kShort);
CHECK(Bytecodes::SizeForUnsignedOperand(
static_cast<size_t>(kMaxUInt16 + 1)) == OperandSize::kQuad);
- CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt32)) ==
- OperandSize::kQuad);
+ CHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt32)),
+ OperandSize::kQuad);
}
// Helper macros to generate a check for if a bytecode is in a macro list of
@@ -325,10 +327,10 @@ TEST(OperandScale, PrefixesRequired) {
CHECK(Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale::kDouble));
CHECK(
Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale::kQuadruple));
- CHECK(Bytecodes::OperandScaleToPrefixBytecode(OperandScale::kDouble) ==
- Bytecode::kWide);
- CHECK(Bytecodes::OperandScaleToPrefixBytecode(OperandScale::kQuadruple) ==
- Bytecode::kExtraWide);
+ CHECK_EQ(Bytecodes::OperandScaleToPrefixBytecode(OperandScale::kDouble),
+ Bytecode::kWide);
+ CHECK_EQ(Bytecodes::OperandScaleToPrefixBytecode(OperandScale::kQuadruple),
+ Bytecode::kExtraWide);
}
TEST(AccumulatorUse, LogicalOperators) {
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 027fa95ede..5c1bfe127c 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -36,7 +36,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
for (size_t i = 0; i < k16BitCapacity; i++) {
- builder.Insert(ast_factory.NewNumber(i + 0.5));
+ builder.Insert(i + 0.5);
}
CHECK_EQ(builder.size(), k16BitCapacity);
ast_factory.Internalize(isolate());
@@ -51,44 +51,32 @@ TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
CanonicalHandleScope canonical(isolate());
ConstantArrayBuilder builder(zone());
- AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
- static const size_t kNumberOfElements = 37;
- for (size_t i = 0; i < kNumberOfElements; i++) {
- const AstValue* value = ast_factory.NewNumber(i + 0.5);
- builder.Insert(value);
- ast_factory.Internalize(isolate());
- CHECK(
- builder.At(i, isolate()).ToHandleChecked()->SameValue(*value->value()));
+ static const int kNumberOfElements = 37;
+ for (int i = 0; i < kNumberOfElements; i++) {
+ builder.Insert(i + 0.5);
}
- ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), static_cast<int>(kNumberOfElements));
- for (size_t i = 0; i < kNumberOfElements; i++) {
- CHECK(constant_array->get(static_cast<int>(i))
- ->SameValue(*builder.At(i, isolate()).ToHandleChecked()));
+ ASSERT_EQ(kNumberOfElements, constant_array->length());
+ for (int i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> actual(constant_array->get(i), isolate());
+ Handle<Object> expected = builder.At(i, isolate()).ToHandleChecked();
+ ASSERT_EQ(expected->Number(), actual->Number()) << "Failure at index " << i;
}
}
TEST_F(ConstantArrayBuilderTest, ToLargeFixedArray) {
CanonicalHandleScope canonical(isolate());
ConstantArrayBuilder builder(zone());
- AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
- static const size_t kNumberOfElements = 37373;
- for (size_t i = 0; i < kNumberOfElements; i++) {
- const AstValue* value = ast_factory.NewNumber(i + 0.5);
- builder.Insert(value);
- ast_factory.Internalize(isolate());
- CHECK(
- builder.At(i, isolate()).ToHandleChecked()->SameValue(*value->value()));
+ static const int kNumberOfElements = 37373;
+ for (int i = 0; i < kNumberOfElements; i++) {
+ builder.Insert(i + 0.5);
}
- ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), static_cast<int>(kNumberOfElements));
- for (size_t i = 0; i < kNumberOfElements; i++) {
- CHECK(constant_array->get(static_cast<int>(i))
- ->SameValue(*builder.At(i, isolate()).ToHandleChecked()));
+ ASSERT_EQ(kNumberOfElements, constant_array->length());
+ for (int i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> actual(constant_array->get(i), isolate());
+ Handle<Object> expected = builder.At(i, isolate()).ToHandleChecked();
+ ASSERT_EQ(expected->Number(), actual->Number()) << "Failure at index " << i;
}
}
@@ -97,17 +85,17 @@ TEST_F(ConstantArrayBuilderTest, ToLargeFixedArrayWithReservations) {
ConstantArrayBuilder builder(zone());
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
- static const size_t kNumberOfElements = 37373;
- for (size_t i = 0; i < kNumberOfElements; i++) {
- builder.CommitReservedEntry(builder.CreateReservedEntry(),
- Smi::FromInt(static_cast<int>(i)));
+ static const int kNumberOfElements = 37373;
+ for (int i = 0; i < kNumberOfElements; i++) {
+ builder.CommitReservedEntry(builder.CreateReservedEntry(), Smi::FromInt(i));
}
ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), static_cast<int>(kNumberOfElements));
- for (size_t i = 0; i < kNumberOfElements; i++) {
- CHECK(constant_array->get(static_cast<int>(i))
- ->SameValue(*builder.At(i, isolate()).ToHandleChecked()));
+ ASSERT_EQ(kNumberOfElements, constant_array->length());
+ for (int i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> actual(constant_array->get(i), isolate());
+ Handle<Object> expected = builder.At(i, isolate()).ToHandleChecked();
+ ASSERT_EQ(expected->Number(), actual->Number()) << "Failure at index " << i;
}
}
@@ -119,7 +107,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
isolate()->heap()->HashSeed());
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
- CHECK(operand_size == OperandSize::kByte);
+ CHECK_EQ(operand_size, OperandSize::kByte);
}
for (size_t i = 0; i < 2 * k8BitCapacity; i++) {
builder.CommitReservedEntry(builder.CreateReservedEntry(),
@@ -147,7 +135,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
// Now make reservations, and commit them with unique entries.
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
- CHECK(operand_size == OperandSize::kByte);
+ CHECK_EQ(operand_size, OperandSize::kByte);
}
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
Smi* value = Smi::FromInt(static_cast<int>(2 * k8BitCapacity + i));
@@ -192,7 +180,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
}
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
- CHECK(operand_size == OperandSize::kShort);
+ CHECK_EQ(operand_size, OperandSize::kShort);
CHECK_EQ(builder.size(), k8BitCapacity);
}
for (size_t i = 0; i < reserved; i++) {
@@ -201,14 +189,14 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
}
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
- CHECK(operand_size == OperandSize::kShort);
+ CHECK_EQ(operand_size, OperandSize::kShort);
builder.CommitReservedEntry(operand_size,
Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), k8BitCapacity);
}
for (size_t i = k8BitCapacity; i < k8BitCapacity + reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
- CHECK(operand_size == OperandSize::kShort);
+ CHECK_EQ(operand_size, OperandSize::kShort);
builder.CommitReservedEntry(operand_size,
Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), i + 1);
@@ -232,7 +220,7 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
isolate()->heap()->HashSeed());
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
- CHECK(OperandSize::kByte == operand_size);
+ CHECK_EQ(OperandSize::kByte, operand_size);
CHECK_EQ(builder.size(), 0u);
}
for (size_t i = 0; i < k8BitCapacity; i++) {
@@ -260,28 +248,25 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
CanonicalHandleScope canonical(isolate());
ConstantArrayBuilder builder(zone());
- AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
- CHECK(OperandSize::kByte == operand_size);
+ CHECK_EQ(OperandSize::kByte, operand_size);
CHECK_EQ(builder.size(), 0u);
}
- const AstValue* ast_values[k8BitCapacity];
+ double values[k8BitCapacity];
for (size_t i = 0; i < k8BitCapacity; i++) {
- ast_values[i] = ast_factory.NewNumber(i + 0.5);
+ values[i] = i + 0.5;
}
for (size_t i = 0; i < k8BitCapacity; i++) {
- builder.Insert(ast_values[i]);
+ builder.Insert(values[i]);
CHECK_EQ(builder.size(), i + k8BitCapacity + 1);
}
for (size_t i = 0; i < k8BitCapacity; i++) {
builder.DiscardReservedEntry(OperandSize::kByte);
- builder.Insert(ast_values[i]);
+ builder.Insert(values[i]);
CHECK_EQ(builder.size(), 2 * k8BitCapacity);
}
- ast_factory.Internalize(isolate());
for (size_t i = 0; i < k8BitCapacity; i++) {
Handle<Object> reference = isolate()->factory()->NewNumber(i + 0.5);
Handle<Object> original =
@@ -304,12 +289,10 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
}
// Values are placed before the reserved entries in the same slice.
for (int i = 0; i < k8BitCapacity - kNumberOfHoles; ++i) {
- CHECK_EQ(builder.Insert(ast_factory.NewNumber(i + 0.5)),
- static_cast<size_t>(i));
+ CHECK_EQ(builder.Insert(i + 0.5), static_cast<size_t>(i));
}
// The next value is pushed into the next slice.
- CHECK_EQ(builder.Insert(ast_factory.NewNumber(k8BitCapacity + 0.5)),
- k8BitCapacity);
+ CHECK_EQ(builder.Insert(k8BitCapacity + 0.5), k8BitCapacity);
// Discard the reserved entries.
for (int i = 0; i < kNumberOfHoles; ++i) {
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 2c3f182395..88acf680f5 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -20,6 +20,7 @@ namespace c = v8::internal::compiler;
namespace v8 {
namespace internal {
namespace interpreter {
+namespace interpreter_assembler_unittest {
InterpreterAssemblerTestState::InterpreterAssemblerTestState(
InterpreterAssemblerTest* test, Bytecode bytecode)
@@ -548,6 +549,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
}
}
+} // namespace interpreter_assembler_unittest
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 210a201d07..589d0c8df5 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -14,6 +14,7 @@
namespace v8 {
namespace internal {
namespace interpreter {
+namespace interpreter_assembler_unittest {
using ::testing::Matcher;
@@ -65,6 +66,7 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
};
};
+} // namespace interpreter_assembler_unittest
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
index f9b3e0b98f..17f1a34ba4 100644
--- a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/libplatform/default-platform.h"
+#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -30,17 +31,18 @@ struct MockIdleTask : public IdleTask {
class DefaultPlatformWithMockTime : public DefaultPlatform {
public:
DefaultPlatformWithMockTime()
- : DefaultPlatform(IdleTaskSupport::kEnabled), time_(0) {}
- double MonotonicallyIncreasingTime() override { return time_; }
- double CurrentClockTimeMillis() override {
- return time_ * base::Time::kMillisecondsPerSecond;
+ : DefaultPlatform(IdleTaskSupport::kEnabled, nullptr) {
+ mock_time_ = 0.0;
+ SetTimeFunctionForTesting([]() { return mock_time_; });
}
- void IncreaseTime(double seconds) { time_ += seconds; }
+ void IncreaseTime(double seconds) { mock_time_ += seconds; }
private:
- double time_;
+ static double mock_time_;
};
+double DefaultPlatformWithMockTime::mock_time_ = 0.0;
+
} // namespace
@@ -61,6 +63,24 @@ TEST(DefaultPlatformTest, PumpMessageLoop) {
EXPECT_FALSE(platform.PumpMessageLoop(isolate));
}
+TEST(DefaultPlatformTest, PumpMessageLoopWithTaskRunner) {
+ InSequence s;
+
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatform platform;
+ std::shared_ptr<TaskRunner> taskrunner =
+ platform.GetForegroundTaskRunner(isolate);
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ taskrunner->PostTask(std::unique_ptr<Task>(task));
+ EXPECT_CALL(*task, Run());
+ EXPECT_CALL(*task, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+}
TEST(DefaultPlatformTest, PumpMessageLoopDelayed) {
InSequence s;
@@ -91,6 +111,36 @@ TEST(DefaultPlatformTest, PumpMessageLoopDelayed) {
EXPECT_TRUE(platform.PumpMessageLoop(isolate));
}
+TEST(DefaultPlatformTest, PumpMessageLoopDelayedWithTaskRunner) {
+ InSequence s;
+
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatformWithMockTime platform;
+ std::shared_ptr<TaskRunner> taskrunner =
+ platform.GetForegroundTaskRunner(isolate);
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ StrictMock<MockTask>* task1 = new StrictMock<MockTask>;
+ StrictMock<MockTask>* task2 = new StrictMock<MockTask>;
+ taskrunner->PostDelayedTask(std::unique_ptr<Task>(task2), 100);
+ taskrunner->PostDelayedTask(std::unique_ptr<Task>(task1), 10);
+
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ platform.IncreaseTime(11);
+ EXPECT_CALL(*task1, Run());
+ EXPECT_CALL(*task1, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ platform.IncreaseTime(90);
+ EXPECT_CALL(*task2, Run());
+ EXPECT_CALL(*task2, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+}
TEST(DefaultPlatformTest, PumpMessageLoopNoStarvation) {
InSequence s;
@@ -153,6 +203,24 @@ TEST(DefaultPlatformTest, RunIdleTasks) {
platform.RunIdleTasks(isolate, 42.0);
}
+TEST(DefaultPlatformTest, RunIdleTasksWithTaskRunner) {
+ InSequence s;
+
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatformWithMockTime platform;
+ std::shared_ptr<TaskRunner> taskrunner =
+ platform.GetForegroundTaskRunner(isolate);
+
+ StrictMock<MockIdleTask>* task = new StrictMock<MockIdleTask>;
+ taskrunner->PostIdleTask(std::unique_ptr<IdleTask>(task));
+ EXPECT_CALL(*task, Run(42.0 + 23.0));
+ EXPECT_CALL(*task, Die());
+ platform.IncreaseTime(23.0);
+ platform.RunIdleTasks(isolate, 42.0);
+}
+
TEST(DefaultPlatformTest, PendingIdleTasksAreDestroyedOnShutdown) {
InSequence s;
@@ -167,6 +235,88 @@ TEST(DefaultPlatformTest, PendingIdleTasksAreDestroyedOnShutdown) {
}
}
+namespace {
+
+class TestBackgroundTask : public Task {
+ public:
+ explicit TestBackgroundTask(base::Semaphore* sem, bool* executed)
+ : sem_(sem), executed_(executed) {}
+
+ virtual ~TestBackgroundTask() { Die(); }
+ MOCK_METHOD0(Die, void());
+
+ void Run() {
+ *executed_ = true;
+ sem_->Signal();
+ }
+
+ private:
+ base::Semaphore* sem_;
+ bool* executed_;
+};
+
+} // namespace
+
+TEST(DefaultPlatformTest, RunBackgroundTask) {
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatform platform;
+ platform.SetThreadPoolSize(1);
+ std::shared_ptr<TaskRunner> taskrunner =
+ platform.GetBackgroundTaskRunner(isolate);
+
+ base::Semaphore sem(0);
+ bool task_executed = false;
+ StrictMock<TestBackgroundTask>* task =
+ new StrictMock<TestBackgroundTask>(&sem, &task_executed);
+ EXPECT_CALL(*task, Die());
+ taskrunner->PostTask(std::unique_ptr<Task>(task));
+ EXPECT_TRUE(sem.WaitFor(base::TimeDelta::FromSeconds(1)));
+ EXPECT_TRUE(task_executed);
+}
+
+TEST(DefaultPlatformTest, NoIdleTasksInBackground) {
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+ DefaultPlatform platform;
+ platform.SetThreadPoolSize(1);
+ std::shared_ptr<TaskRunner> taskrunner =
+ platform.GetBackgroundTaskRunner(isolate);
+ EXPECT_FALSE(taskrunner->IdleTasksEnabled());
+}
+
+TEST(DefaultPlatformTest, PostTaskAfterPlatformTermination) {
+ std::shared_ptr<TaskRunner> foreground_taskrunner;
+ std::shared_ptr<TaskRunner> background_taskrunner;
+ {
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatformWithMockTime platform;
+ platform.SetThreadPoolSize(1);
+ foreground_taskrunner = platform.GetForegroundTaskRunner(isolate);
+ background_taskrunner = platform.GetBackgroundTaskRunner(isolate);
+ }
+ // It should still be possible to post tasks, even when the platform does not
+ // exist anymore.
+ StrictMock<MockTask>* task1 = new StrictMock<MockTask>;
+ EXPECT_CALL(*task1, Die());
+ foreground_taskrunner->PostTask(std::unique_ptr<Task>(task1));
+
+ StrictMock<MockTask>* task2 = new StrictMock<MockTask>;
+ EXPECT_CALL(*task2, Die());
+ foreground_taskrunner->PostDelayedTask(std::unique_ptr<Task>(task2), 10);
+
+ StrictMock<MockIdleTask>* task3 = new StrictMock<MockIdleTask>;
+ EXPECT_CALL(*task3, Die());
+ foreground_taskrunner->PostIdleTask(std::unique_ptr<IdleTask>(task3));
+
+ StrictMock<MockTask>* task4 = new StrictMock<MockTask>;
+ EXPECT_CALL(*task4, Die());
+ background_taskrunner->PostTask(std::unique_ptr<Task>(task4));
+}
+
} // namespace default_platform_unittest
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
index 9bb160dd31..4001048a8e 100644
--- a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
@@ -38,9 +38,10 @@ class TaskQueueThread final : public base::Thread {
TEST(TaskQueueTest, Basic) {
TaskQueue queue;
- MockTask task;
- queue.Append(&task);
- EXPECT_EQ(&task, queue.GetNext());
+ std::unique_ptr<Task> task(new MockTask());
+ Task* ptr = task.get();
+ queue.Append(std::move(task));
+ EXPECT_EQ(ptr, queue.GetNext().get());
queue.Terminate();
EXPECT_THAT(queue.GetNext(), IsNull());
}
diff --git a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
index 7079d22eb6..a42b37aa7c 100644
--- a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
@@ -32,10 +32,10 @@ TEST(WorkerThreadTest, PostSingleTask) {
WorkerThread thread2(&queue);
InSequence s;
- StrictMock<MockTask>* task = new StrictMock<MockTask>;
- EXPECT_CALL(*task, Run());
- EXPECT_CALL(*task, Die());
- queue.Append(task);
+ std::unique_ptr<StrictMock<MockTask>> task(new StrictMock<MockTask>);
+ EXPECT_CALL(*task.get(), Run());
+ EXPECT_CALL(*task.get(), Die());
+ queue.Append(std::move(task));
// The next call should not time out.
queue.BlockUntilQueueEmptyForTesting();
@@ -50,10 +50,10 @@ TEST(WorkerThreadTest, Basic) {
TaskQueue queue;
for (size_t i = 0; i < kNumTasks; ++i) {
InSequence s;
- StrictMock<MockTask>* task = new StrictMock<MockTask>;
- EXPECT_CALL(*task, Run());
- EXPECT_CALL(*task, Die());
- queue.Append(task);
+ std::unique_ptr<StrictMock<MockTask>> task(new StrictMock<MockTask>);
+ EXPECT_CALL(*task.get(), Run());
+ EXPECT_CALL(*task.get(), Die());
+ queue.Append(std::move(task));
}
WorkerThread thread1(&queue);
diff --git a/deps/v8/test/unittests/object-unittest.cc b/deps/v8/test/unittests/object-unittest.cc
index 7f4a737b48..47772a0f20 100644
--- a/deps/v8/test/unittests/object-unittest.cc
+++ b/deps/v8/test/unittests/object-unittest.cc
@@ -15,6 +15,42 @@
namespace v8 {
namespace internal {
+namespace {
+
+bool IsInStringInstanceTypeList(InstanceType instance_type) {
+ switch (instance_type) {
+#define TEST_INSTANCE_TYPE(type, ...) \
+ case InstanceType::type: \
+ STATIC_ASSERT(InstanceType::type < InstanceType::FIRST_NONSTRING_TYPE);
+
+ STRING_TYPE_LIST(TEST_INSTANCE_TYPE)
+#undef TEST_INSTANCE_TYPE
+ return true;
+ default:
+ EXPECT_LE(InstanceType::FIRST_NONSTRING_TYPE, instance_type);
+ return false;
+ }
+}
+
+void CheckOneInstanceType(InstanceType instance_type) {
+ if (IsInStringInstanceTypeList(instance_type)) {
+ EXPECT_TRUE((instance_type & kIsNotStringMask) == kStringTag)
+ << "Failing IsString mask check for " << instance_type;
+ } else {
+ EXPECT_FALSE((instance_type & kIsNotStringMask) == kStringTag)
+ << "Failing !IsString mask check for " << instance_type;
+ }
+}
+
+} // namespace
+
+TEST(Object, InstanceTypeList) {
+#define TEST_INSTANCE_TYPE(type) CheckOneInstanceType(InstanceType::type);
+
+ INSTANCE_TYPE_LIST(TEST_INSTANCE_TYPE)
+#undef TEST_INSTANCE_TYPE
+}
+
TEST(Object, InstanceTypeListOrder) {
int current = 0;
int last = -1;
@@ -57,8 +93,7 @@ TEST(Object, StructListOrder) {
typedef TestWithIsolate ObjectWithIsolate;
TEST_F(ObjectWithIsolate, DictionaryGrowth) {
- Handle<SeededNumberDictionary> dict =
- SeededNumberDictionary::New(isolate(), 1);
+ Handle<NumberDictionary> dict = NumberDictionary::New(isolate(), 1);
Handle<Object> value = isolate()->factory()->null_value();
PropertyDetails details = PropertyDetails::Empty();
@@ -68,48 +103,48 @@ TEST_F(ObjectWithIsolate, DictionaryGrowth) {
uint32_t i = 1;
// 3 elements fit into the initial capacity.
for (; i <= 3; i++) {
- dict = SeededNumberDictionary::Add(dict, i, value, details);
+ dict = NumberDictionary::Add(dict, i, value, details);
CHECK_EQ(4, dict->Capacity());
}
// 4th element triggers growth.
DCHECK_EQ(4, i);
for (; i <= 5; i++) {
- dict = SeededNumberDictionary::Add(dict, i, value, details);
+ dict = NumberDictionary::Add(dict, i, value, details);
CHECK_EQ(8, dict->Capacity());
}
// 6th element triggers growth.
DCHECK_EQ(6, i);
for (; i <= 11; i++) {
- dict = SeededNumberDictionary::Add(dict, i, value, details);
+ dict = NumberDictionary::Add(dict, i, value, details);
CHECK_EQ(16, dict->Capacity());
}
// 12th element triggers growth.
DCHECK_EQ(12, i);
for (; i <= 21; i++) {
- dict = SeededNumberDictionary::Add(dict, i, value, details);
+ dict = NumberDictionary::Add(dict, i, value, details);
CHECK_EQ(32, dict->Capacity());
}
// 22nd element triggers growth.
DCHECK_EQ(22, i);
for (; i <= 43; i++) {
- dict = SeededNumberDictionary::Add(dict, i, value, details);
+ dict = NumberDictionary::Add(dict, i, value, details);
CHECK_EQ(64, dict->Capacity());
}
// 44th element triggers growth.
DCHECK_EQ(44, i);
for (; i <= 50; i++) {
- dict = SeededNumberDictionary::Add(dict, i, value, details);
+ dict = NumberDictionary::Add(dict, i, value, details);
CHECK_EQ(128, dict->Capacity());
}
// If we grow by larger chunks, the next (sufficiently big) power of 2 is
// chosen as the capacity.
- dict = SeededNumberDictionary::New(isolate(), 1);
- dict = SeededNumberDictionary::EnsureCapacity(dict, 65);
+ dict = NumberDictionary::New(isolate(), 1);
+ dict = NumberDictionary::EnsureCapacity(dict, 65);
CHECK_EQ(128, dict->Capacity());
- dict = SeededNumberDictionary::New(isolate(), 1);
- dict = SeededNumberDictionary::EnsureCapacity(dict, 30);
+ dict = NumberDictionary::New(isolate(), 1);
+ dict = NumberDictionary::EnsureCapacity(dict, 30);
CHECK_EQ(64, dict->Capacity());
}
diff --git a/deps/v8/test/unittests/parser/ast-value-unittest.cc b/deps/v8/test/unittests/parser/ast-value-unittest.cc
new file mode 100644
index 0000000000..2b7a227e0f
--- /dev/null
+++ b/deps/v8/test/unittests/parser/ast-value-unittest.cc
@@ -0,0 +1,51 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/heap/heap-inl.h"
+#include "src/isolate-inl.h"
+#include "src/zone/zone.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+class AstValueTest : public TestWithIsolateAndZone {
+ protected:
+ AstValueTest()
+ : ast_value_factory_(zone(), i_isolate()->ast_string_constants(),
+ i_isolate()->heap()->HashSeed()),
+ ast_node_factory_(&ast_value_factory_, zone()) {}
+
+ Literal* NewBigInt(const char* str) {
+ return ast_node_factory_.NewBigIntLiteral(AstBigInt(str),
+ kNoSourcePosition);
+ }
+
+ AstValueFactory ast_value_factory_;
+ AstNodeFactory ast_node_factory_;
+};
+
+TEST_F(AstValueTest, BigIntToBooleanIsTrue) {
+ EXPECT_FALSE(NewBigInt("0")->ToBooleanIsTrue());
+ EXPECT_FALSE(NewBigInt("0b0")->ToBooleanIsTrue());
+ EXPECT_FALSE(NewBigInt("0o0")->ToBooleanIsTrue());
+ EXPECT_FALSE(NewBigInt("0x0")->ToBooleanIsTrue());
+ EXPECT_FALSE(NewBigInt("0b000")->ToBooleanIsTrue());
+ EXPECT_FALSE(NewBigInt("0o00000")->ToBooleanIsTrue());
+ EXPECT_FALSE(NewBigInt("0x000000000")->ToBooleanIsTrue());
+
+ EXPECT_TRUE(NewBigInt("3")->ToBooleanIsTrue());
+ EXPECT_TRUE(NewBigInt("0b1")->ToBooleanIsTrue());
+ EXPECT_TRUE(NewBigInt("0o6")->ToBooleanIsTrue());
+ EXPECT_TRUE(NewBigInt("0xa")->ToBooleanIsTrue());
+ EXPECT_TRUE(NewBigInt("0b0000001")->ToBooleanIsTrue());
+ EXPECT_TRUE(NewBigInt("0o00005000")->ToBooleanIsTrue());
+ EXPECT_TRUE(NewBigInt("0x0000d00c0")->ToBooleanIsTrue());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/parser/preparser-unittest.cc b/deps/v8/test/unittests/parser/preparser-unittest.cc
index 26f5cd5ab3..37dda73822 100644
--- a/deps/v8/test/unittests/parser/preparser-unittest.cc
+++ b/deps/v8/test/unittests/parser/preparser-unittest.cc
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-class PreParserTest : public TestWithContext {
+class PreParserTest : public TestWithNativeContext {
public:
PreParserTest() {}
@@ -22,16 +22,12 @@ class PreParserTest : public TestWithContext {
TEST_F(PreParserTest, LazyFunctionLength) {
const char* script_source = "function lazy(a, b, c) { } lazy";
- Handle<Object> lazy_object = test::RunJS(isolate(), script_source);
+ Handle<JSFunction> lazy_function = RunJS<JSFunction>(script_source);
- Handle<SharedFunctionInfo> shared(
- Handle<JSFunction>::cast(lazy_object)->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared(lazy_function->shared());
CHECK_EQ(shared->length(), SharedFunctionInfo::kInvalidLength);
- const char* get_length_source = "lazy.length";
-
- Handle<Object> length = test::RunJS(isolate(), get_length_source);
- CHECK(length->IsSmi());
+ Handle<Smi> length = RunJS<Smi>("lazy.length");
int32_t value;
CHECK(length->ToInt32(&value));
CHECK_EQ(3, value);
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index 55217ae2bb..f353e83ecf 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -11,27 +11,24 @@ namespace {
class DefaultPlatformEnvironment final : public ::testing::Environment {
public:
- DefaultPlatformEnvironment() : platform_(NULL) {}
+ DefaultPlatformEnvironment() {}
void SetUp() override {
- EXPECT_EQ(NULL, platform_);
- platform_ = v8::platform::CreateDefaultPlatform(
+ platform_ = v8::platform::NewDefaultPlatform(
0, v8::platform::IdleTaskSupport::kEnabled);
- ASSERT_TRUE(platform_ != NULL);
- v8::V8::InitializePlatform(platform_);
+ ASSERT_TRUE(platform_.get() != NULL);
+ v8::V8::InitializePlatform(platform_.get());
ASSERT_TRUE(v8::V8::Initialize());
}
void TearDown() override {
- ASSERT_TRUE(platform_ != NULL);
+ ASSERT_TRUE(platform_.get() != NULL);
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
- delete platform_;
- platform_ = NULL;
}
private:
- v8::Platform* platform_;
+ std::unique_ptr<v8::Platform> platform_;
};
} // namespace
diff --git a/deps/v8/test/unittests/source-position-table-unittest.cc b/deps/v8/test/unittests/source-position-table-unittest.cc
index 680e1be4c7..1ad6dec006 100644
--- a/deps/v8/test/unittests/source-position-table-unittest.cc
+++ b/deps/v8/test/unittests/source-position-table-unittest.cc
@@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class SourcePositionTableTest : public TestWithIsolateAndZone {
+class SourcePositionTableTest : public TestWithIsolate {
public:
SourcePositionTableTest() {}
~SourcePositionTableTest() override {}
@@ -28,19 +28,18 @@ static int offsets[] = {0, 1, 2, 3, 4, 30, 31, 32,
129, 250, 1000, 9999, 12000, 31415926};
TEST_F(SourcePositionTableTest, EncodeStatement) {
- SourcePositionTableBuilder builder(zone());
+ SourcePositionTableBuilder builder;
for (size_t i = 0; i < arraysize(offsets); i++) {
builder.AddPosition(offsets[i], toPos(offsets[i]), true);
}
// To test correctness, we rely on the assertions in ToSourcePositionTable().
// (Also below.)
- CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
- .is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate()).is_null());
}
TEST_F(SourcePositionTableTest, EncodeStatementDuplicates) {
- SourcePositionTableBuilder builder(zone());
+ SourcePositionTableBuilder builder;
for (size_t i = 0; i < arraysize(offsets); i++) {
builder.AddPosition(offsets[i], toPos(offsets[i]), true);
builder.AddPosition(offsets[i], toPos(offsets[i] + 1), true);
@@ -48,21 +47,19 @@ TEST_F(SourcePositionTableTest, EncodeStatementDuplicates) {
// To test correctness, we rely on the assertions in ToSourcePositionTable().
// (Also below.)
- CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
- .is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate()).is_null());
}
TEST_F(SourcePositionTableTest, EncodeExpression) {
- SourcePositionTableBuilder builder(zone());
+ SourcePositionTableBuilder builder;
for (size_t i = 0; i < arraysize(offsets); i++) {
builder.AddPosition(offsets[i], toPos(offsets[i]), false);
}
- CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
- .is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate()).is_null());
}
TEST_F(SourcePositionTableTest, EncodeAscending) {
- SourcePositionTableBuilder builder(zone());
+ SourcePositionTableBuilder builder;
int code_offset = 0;
int source_position = 0;
@@ -87,8 +84,7 @@ TEST_F(SourcePositionTableTest, EncodeAscending) {
}
}
- CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
- .is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate()).is_null());
}
} // namespace interpreter
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index 43fccc3567..46600c662d 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -15,17 +15,6 @@ namespace v8 {
namespace internal {
namespace test {
-Handle<Object> RunJS(v8::Isolate* isolate, const char* script) {
- return Utils::OpenHandle(
- *v8::Script::Compile(
- isolate->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate, script, v8::NewStringType::kNormal)
- .ToLocalChecked())
- .ToLocalChecked()
- ->Run(isolate->GetCurrentContext())
- .ToLocalChecked());
-}
-
Handle<String> CreateSource(Isolate* isolate,
ExternalOneByteString::Resource* maybe_resource) {
static const char test_script[] = "(x) { x*x; }";
diff --git a/deps/v8/test/unittests/test-helpers.h b/deps/v8/test/unittests/test-helpers.h
index a022a0b998..223b22e38e 100644
--- a/deps/v8/test/unittests/test-helpers.h
+++ b/deps/v8/test/unittests/test-helpers.h
@@ -40,7 +40,6 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
DISALLOW_COPY_AND_ASSIGN(ScriptResource);
};
-Handle<Object> RunJS(v8::Isolate* isolate, const char* script);
Handle<String> CreateSource(
Isolate* isolate,
v8::String::ExternalOneByteStringResource* maybe_resource);
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index f433926e53..354d1b7d2d 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -5,6 +5,8 @@
#include "test/unittests/test-utils.h"
#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+#include "src/api.h"
#include "src/base/platform/time.h"
#include "src/flags.h"
#include "src/isolate.h"
@@ -50,6 +52,15 @@ void TestWithIsolate::TearDownTestCase() {
Test::TearDownTestCase();
}
+Local<Value> TestWithIsolate::RunJS(const char* source) {
+ Local<Script> script =
+ v8::Script::Compile(
+ isolate()->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate(), source, v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .ToLocalChecked();
+ return script->Run(isolate()->GetCurrentContext()).ToLocalChecked();
+}
TestWithContext::TestWithContext()
: context_(Context::New(isolate())), context_scope_(context_) {}
@@ -57,25 +68,17 @@ TestWithContext::TestWithContext()
TestWithContext::~TestWithContext() {}
-
-namespace base {
-namespace {
-
-inline int64_t GetRandomSeedFromFlag(int random_seed) {
- return random_seed ? random_seed : TimeTicks::Now().ToInternalValue();
+void TestWithContext::SetGlobalProperty(const char* name,
+ v8::Local<v8::Value> value) {
+ v8::Local<v8::String> property_name =
+ v8::String::NewFromUtf8(v8_isolate(), name, v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ CHECK(v8_context()
+ ->Global()
+ ->Set(v8_context(), property_name, value)
+ .FromJust());
}
-} // namespace
-
-TestWithRandomNumberGenerator::TestWithRandomNumberGenerator()
- : rng_(GetRandomSeedFromFlag(::v8::internal::FLAG_random_seed)) {}
-
-
-TestWithRandomNumberGenerator::~TestWithRandomNumberGenerator() {}
-
-} // namespace base
-
-
namespace internal {
TestWithIsolate::~TestWithIsolate() {}
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 3d832e6500..17a5eb7c21 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -8,8 +8,12 @@
#include <vector>
#include "include/v8.h"
+#include "src/api.h"
#include "src/base/macros.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone.h"
#include "testing/gtest-support.h"
@@ -18,64 +22,54 @@ namespace v8 {
class ArrayBufferAllocator;
-
+// Use v8::internal::TestWithIsolate if you are testing internals,
+// aka. directly work with Handles.
class TestWithIsolate : public virtual ::testing::Test {
public:
TestWithIsolate();
virtual ~TestWithIsolate();
- Isolate* isolate() const { return isolate_; }
+ v8::Isolate* isolate() const { return v8_isolate(); }
+
+ v8::Isolate* v8_isolate() const { return isolate_; }
v8::internal::Isolate* i_isolate() const {
return reinterpret_cast<v8::internal::Isolate*>(isolate());
}
+ Local<Value> RunJS(const char* source);
+
static void SetUpTestCase();
static void TearDownTestCase();
private:
static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
- static Isolate* isolate_;
- Isolate::Scope isolate_scope_;
- HandleScope handle_scope_;
+ static v8::Isolate* isolate_;
+ v8::Isolate::Scope isolate_scope_;
+ v8::HandleScope handle_scope_;
DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
};
-
-class TestWithContext : public virtual TestWithIsolate {
+// Use v8::internal::TestWithNativeContext if you are testing internals,
+// aka. directly work with Handles.
+class TestWithContext : public virtual v8::TestWithIsolate {
public:
TestWithContext();
virtual ~TestWithContext();
- const Local<Context>& context() const { return context_; }
+ const Local<Context>& context() const { return v8_context(); }
+ const Local<Context>& v8_context() const { return context_; }
+
+ void SetGlobalProperty(const char* name, v8::Local<v8::Value> value);
private:
Local<Context> context_;
- Context::Scope context_scope_;
+ v8::Context::Scope context_scope_;
DISALLOW_COPY_AND_ASSIGN(TestWithContext);
};
-
-namespace base {
-
-class TestWithRandomNumberGenerator : public ::testing::Test {
- public:
- TestWithRandomNumberGenerator();
- virtual ~TestWithRandomNumberGenerator();
-
- RandomNumberGenerator* rng() { return &rng_; }
-
- private:
- RandomNumberGenerator rng_;
-
- DISALLOW_COPY_AND_ASSIGN(TestWithRandomNumberGenerator);
-};
-
-} // namespace base
-
-
namespace internal {
// Forward declarations.
@@ -88,8 +82,12 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
virtual ~TestWithIsolate();
Factory* factory() const;
- Isolate* isolate() const {
- return reinterpret_cast<Isolate*>(::v8::TestWithIsolate::isolate());
+ Isolate* isolate() const { return i_isolate(); }
+ template <typename T = Object>
+ Handle<T> RunJS(const char* source) {
+ Handle<Object> result =
+ Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
+ return Handle<T>::cast(result);
}
base::RandomNumberGenerator* random_number_generator() const;
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 38d071d71e..575f550871 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -36,6 +36,7 @@
'base/sys-info-unittest.cc',
'base/template-utils-unittest.cc',
'base/utils/random-number-generator-unittest.cc',
+ 'bigint-unittest.cc',
'cancelable-tasks-unittest.cc',
'char-predicates-unittest.cc',
"code-stub-assembler-unittest.cc",
@@ -138,6 +139,7 @@
'libplatform/worker-thread-unittest.cc',
'locked-queue-unittest.cc',
'object-unittest.cc',
+ 'parser/ast-value-unittest.cc',
'parser/preparser-unittest.cc',
'register-configuration-unittest.cc',
'run-all-unittests.cc',
@@ -147,6 +149,7 @@
'test-utils.h',
'test-utils.cc',
'unicode-unittest.cc',
+ 'utils-unittest.cc',
'value-serializer-unittest.cc',
'zone/segmentpool-unittest.cc',
'zone/zone-allocator-unittest.cc',
@@ -160,6 +163,7 @@
'wasm/loop-assignment-analysis-unittest.cc',
'wasm/module-decoder-unittest.cc',
'wasm/streaming-decoder-unittest.cc',
+ 'wasm/trap-handler-unittest.cc',
'wasm/wasm-macro-gen-unittest.cc',
'wasm/wasm-module-builder-unittest.cc',
'wasm/wasm-opcodes-unittest.cc',
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index d8b7c1102d..d439913ccf 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -3,8 +3,4 @@
# found in the LICENSE file.
[
-[ALWAYS, {
- # BUG(5677): Real timers are flaky
- 'RuntimeCallStatsTest.*': [SKIP],
-}], # ALWAYS
]
diff --git a/deps/v8/test/unittests/utils-unittest.cc b/deps/v8/test/unittests/utils-unittest.cc
new file mode 100644
index 0000000000..65088d873b
--- /dev/null
+++ b/deps/v8/test/unittests/utils-unittest.cc
@@ -0,0 +1,113 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class UtilsTest : public ::testing::Test {};
+
+typedef ::testing::Types<signed char, unsigned char,
+ short, // NOLINT(runtime/int)
+ unsigned short, // NOLINT(runtime/int)
+ int, unsigned int, long, // NOLINT(runtime/int)
+ unsigned long, // NOLINT(runtime/int)
+ long long, // NOLINT(runtime/int)
+ unsigned long long, // NOLINT(runtime/int)
+ int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
+ int64_t, uint64_t>
+ IntegerTypes;
+
+TYPED_TEST_CASE(UtilsTest, IntegerTypes);
+
+TYPED_TEST(UtilsTest, SaturateSub) {
+ TypeParam min = std::numeric_limits<TypeParam>::min();
+ TypeParam max = std::numeric_limits<TypeParam>::max();
+ EXPECT_EQ(SaturateSub<TypeParam>(min, 0), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(max, 0), max);
+ EXPECT_EQ(SaturateSub<TypeParam>(max, min), max);
+ EXPECT_EQ(SaturateSub<TypeParam>(min, max), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(min, max / 3), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(min + 1, 2), min);
+ if (std::numeric_limits<TypeParam>::is_signed) {
+ EXPECT_EQ(SaturateSub<TypeParam>(min, min), static_cast<TypeParam>(0));
+ EXPECT_EQ(SaturateSub<TypeParam>(0, min), max);
+ EXPECT_EQ(SaturateSub<TypeParam>(max / 3, min), max);
+ EXPECT_EQ(SaturateSub<TypeParam>(max / 5, min), max);
+ EXPECT_EQ(SaturateSub<TypeParam>(min / 3, max), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(min / 9, max), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(max, min / 3), max);
+ EXPECT_EQ(SaturateSub<TypeParam>(min, max / 3), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(max / 3 * 2, min / 2), max);
+ EXPECT_EQ(SaturateSub<TypeParam>(min / 3 * 2, max / 2), min);
+ } else {
+ EXPECT_EQ(SaturateSub<TypeParam>(min, min), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(0, min), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(0, max), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(max / 3, max), min);
+ EXPECT_EQ(SaturateSub<TypeParam>(max - 3, max), min);
+ }
+ TypeParam test_cases[] = {static_cast<TypeParam>(min / 23),
+ static_cast<TypeParam>(max / 3),
+ 63,
+ static_cast<TypeParam>(min / 6),
+ static_cast<TypeParam>(max / 55),
+ static_cast<TypeParam>(min / 2),
+ static_cast<TypeParam>(max / 2),
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 42};
+ TRACED_FOREACH(TypeParam, x, test_cases) {
+ TRACED_FOREACH(TypeParam, y, test_cases) {
+ if (std::numeric_limits<TypeParam>::is_signed) {
+ EXPECT_EQ(SaturateSub<TypeParam>(x, y), x - y);
+ } else {
+ EXPECT_EQ(SaturateSub<TypeParam>(x, y), y > x ? min : x - y);
+ }
+ }
+ }
+}
+
+TYPED_TEST(UtilsTest, SaturateAdd) {
+ TypeParam min = std::numeric_limits<TypeParam>::min();
+ TypeParam max = std::numeric_limits<TypeParam>::max();
+ EXPECT_EQ(SaturateAdd<TypeParam>(min, min), min);
+ EXPECT_EQ(SaturateAdd<TypeParam>(max, max), max);
+ EXPECT_EQ(SaturateAdd<TypeParam>(min, min / 3), min);
+ EXPECT_EQ(SaturateAdd<TypeParam>(max / 8 * 7, max / 3 * 2), max);
+ EXPECT_EQ(SaturateAdd<TypeParam>(min / 3 * 2, min / 8 * 7), min);
+ EXPECT_EQ(SaturateAdd<TypeParam>(max / 20 * 18, max / 25 * 18), max);
+ EXPECT_EQ(SaturateAdd<TypeParam>(min / 3 * 2, min / 3 * 2), min);
+ EXPECT_EQ(SaturateAdd<TypeParam>(max - 1, 2), max);
+ EXPECT_EQ(SaturateAdd<TypeParam>(max - 100, 101), max);
+ TypeParam test_cases[] = {static_cast<TypeParam>(min / 23),
+ static_cast<TypeParam>(max / 3),
+ 63,
+ static_cast<TypeParam>(min / 6),
+ static_cast<TypeParam>(max / 55),
+ static_cast<TypeParam>(min / 2),
+ static_cast<TypeParam>(max / 2),
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 42};
+ TRACED_FOREACH(TypeParam, x, test_cases) {
+ TRACED_FOREACH(TypeParam, y, test_cases) {
+ EXPECT_EQ(SaturateAdd<TypeParam>(x, y), x + y);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index 45e7784e15..b3e656e917 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -11,6 +11,7 @@
#include "src/api.h"
#include "src/base/build_config.h"
#include "src/objects-inl.h"
+#include "src/wasm/wasm-objects.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -1298,14 +1299,16 @@ TEST_F(ValueSerializerTest, DecodeDenseArrayContainingUndefined) {
}
TEST_F(ValueSerializerTest, RoundTripDate) {
- RoundTripTest("new Date(1e6)", [](Local<Value> value) {
+ RoundTripTest("new Date(1e6)", [this](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
- EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Date.prototype"));
});
- RoundTripTest("new Date(Date.UTC(1867, 6, 1))", [](Local<Value> value) {
+ RoundTripTest("new Date(Date.UTC(1867, 6, 1))", [this](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toISOString() === '1867-07-01T00:00:00.000Z'"));
});
RoundTripTest("new Date(NaN)", [](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
@@ -1323,18 +1326,19 @@ TEST_F(ValueSerializerTest, DecodeDate) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84,
0x2e, 0x41, 0x00},
- [](Local<Value> value) {
+ [this](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
- EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Date.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45, 0x27, 0x89,
+ 0x87, 0xc2, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toISOString() === '1867-07-01T00:00:00.000Z'"));
});
- DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45, 0x27, 0x89, 0x87,
- 0xc2, 0x00},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
- });
DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf8, 0x7f, 0x00},
[](Local<Value> value) {
@@ -1344,18 +1348,19 @@ TEST_F(ValueSerializerTest, DecodeDate) {
#else
DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x41, 0x2e, 0x84, 0x80, 0x00, 0x00,
0x00, 0x00, 0x00},
- [](Local<Value> value) {
+ [this](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
- EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Date.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0xc2, 0x87, 0x89, 0x27, 0x45, 0x20,
+ 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toISOString() === '1867-07-01T00:00:00.000Z'"));
});
- DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x44, 0xc2, 0x87, 0x89, 0x27, 0x45, 0x20, 0x00,
- 0x00, 0x00},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
- });
DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00},
[](Local<Value> value) {
@@ -1603,24 +1608,8 @@ TEST_F(ValueSerializerTest, DecodeRegExp) {
});
}
-// Tests that invalid flags are not accepted by the deserializer. In particular,
-// the dotAll flag ('s') is only valid when the corresponding flag is enabled.
+// Tests that invalid flags are not accepted by the deserializer.
TEST_F(ValueSerializerTest, DecodeRegExpDotAll) {
- i::FLAG_harmony_regexp_dotall = false;
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x1f},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === RegExp.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '/foo/gimuy'"));
- });
- InvalidDecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x3f});
- InvalidDecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x7f});
-
- i::FLAG_harmony_regexp_dotall = true;
DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x1f},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsRegExp());
@@ -2233,21 +2222,20 @@ TEST_F(ValueSerializerTest, DecodeInvalidDataView) {
class ValueSerializerTestWithSharedArrayBufferTransfer
: public ValueSerializerTest {
protected:
- static const size_t kTestByteLength = 4;
-
ValueSerializerTestWithSharedArrayBufferTransfer()
- : serializer_delegate_(this) {
- const uint8_t data[kTestByteLength] = {0x00, 0x01, 0x80, 0xff};
- memcpy(data_, data, kTestByteLength);
+ : serializer_delegate_(this) {}
+
+ void InitializeData(const std::vector<uint8_t>& data) {
+ data_ = data;
{
Context::Scope scope(serialization_context());
input_buffer_ =
- SharedArrayBuffer::New(isolate(), &data_, kTestByteLength);
+ SharedArrayBuffer::New(isolate(), data_.data(), data_.size());
}
{
Context::Scope scope(deserialization_context());
output_buffer_ =
- SharedArrayBuffer::New(isolate(), &data_, kTestByteLength);
+ SharedArrayBuffer::New(isolate(), data_.data(), data_.size());
}
}
@@ -2305,7 +2293,7 @@ class ValueSerializerTestWithSharedArrayBufferTransfer
private:
static bool flag_was_enabled_;
- uint8_t data_[kTestByteLength];
+ std::vector<uint8_t> data_;
Local<SharedArrayBuffer> input_buffer_;
Local<SharedArrayBuffer> output_buffer_;
};
@@ -2315,6 +2303,8 @@ bool ValueSerializerTestWithSharedArrayBufferTransfer::flag_was_enabled_ =
TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
RoundTripSharedArrayBufferTransfer) {
+ InitializeData({0x00, 0x01, 0x80, 0xff});
+
EXPECT_CALL(serializer_delegate_,
GetSharedArrayBufferId(isolate(), input_buffer()))
.WillRepeatedly(Return(Just(0U)));
@@ -2350,6 +2340,40 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
});
}
+TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
+ RoundTripWebAssemblyMemory) {
+ bool flag_was_enabled = i::FLAG_experimental_wasm_threads;
+ i::FLAG_experimental_wasm_threads = true;
+
+ std::vector<uint8_t> data = {0x00, 0x01, 0x80, 0xff};
+ data.resize(65536);
+ InitializeData(data);
+
+ EXPECT_CALL(serializer_delegate_,
+ GetSharedArrayBufferId(isolate(), input_buffer()))
+ .WillRepeatedly(Return(Just(0U)));
+
+ RoundTripTest(
+ [this]() -> Local<Value> {
+ const int32_t kMaxPages = 1;
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(*input_buffer());
+ return Utils::Convert<i::WasmMemoryObject, Value>(
+ i::WasmMemoryObject::New(i_isolate, obj, kMaxPages));
+ },
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result instanceof WebAssembly.Memory"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.buffer.byteLength === 65536"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("new Uint8Array(result.buffer, 0, "
+ "4).toString() === '0,1,128,255'"));
+ });
+
+ i::FLAG_experimental_wasm_threads = flag_was_enabled;
+}
+
TEST_F(ValueSerializerTest, UnsupportedHostObject) {
InvalidEncodeTest("new ExampleHostObject()");
InvalidEncodeTest("({ a: new ExampleHostObject() })");
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 0f11933383..24606a43fd 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -19,50 +19,54 @@ class DecoderTest : public TestWithZone {
Decoder decoder;
};
-#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(static_cast<uint32_t>(expected), \
- decoder.read_u32v<true>(decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
- EXPECT_EQ(data, decoder.pc()); \
- EXPECT_TRUE(decoder.ok()); \
- EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
- EXPECT_EQ(data + expected_length, decoder.pc()); \
+#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ( \
+ static_cast<uint32_t>(expected), \
+ decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
-#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, decoder.read_i32v<true>(decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
- EXPECT_EQ(data, decoder.pc()); \
- EXPECT_TRUE(decoder.ok()); \
- EXPECT_EQ(expected, decoder.consume_i32v()); \
- EXPECT_EQ(data + expected_length, decoder.pc()); \
+#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ(expected, decoder.read_i32v<Decoder::kValidate>(decoder.start(), \
+ &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(expected, decoder.consume_i32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
-#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(static_cast<uint64_t>(expected), \
- decoder.read_u64v<false>(decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ( \
+ static_cast<uint64_t>(expected), \
+ decoder.read_u64v<Decoder::kValidate>(decoder.start(), &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
-#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, decoder.read_i64v<false>(decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ(expected, decoder.read_i64v<Decoder::kValidate>(decoder.start(), \
+ &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
TEST_F(DecoderTest, ReadU32v_OneByte) {
@@ -374,7 +378,7 @@ TEST_F(DecoderTest, ReadU32v_off_end1) {
static const byte data[] = {U32V_1(11)};
unsigned length = 0;
decoder.Reset(data, data);
- decoder.read_u32v<true>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(0u, length);
EXPECT_FALSE(decoder.ok());
}
@@ -384,7 +388,7 @@ TEST_F(DecoderTest, ReadU32v_off_end2) {
for (size_t i = 0; i < sizeof(data); i++) {
unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<true>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(i, length);
EXPECT_FALSE(decoder.ok());
}
@@ -395,7 +399,7 @@ TEST_F(DecoderTest, ReadU32v_off_end3) {
for (size_t i = 0; i < sizeof(data); i++) {
unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<true>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(i, length);
EXPECT_FALSE(decoder.ok());
}
@@ -406,7 +410,7 @@ TEST_F(DecoderTest, ReadU32v_off_end4) {
for (size_t i = 0; i < sizeof(data); i++) {
unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<true>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(i, length);
EXPECT_FALSE(decoder.ok());
}
@@ -417,7 +421,7 @@ TEST_F(DecoderTest, ReadU32v_off_end5) {
for (size_t i = 0; i < sizeof(data); i++) {
unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<true>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(i, length);
EXPECT_FALSE(decoder.ok());
}
@@ -429,7 +433,7 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
data[4] = static_cast<byte>(i << 4);
unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
- decoder.read_u32v<true>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(5u, length);
EXPECT_FALSE(decoder.ok());
}
@@ -440,7 +444,7 @@ TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
unsigned length = 0;
byte data[] = {0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i32v<true>(decoder.start(), &length);
+ decoder.read_i32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(5u, length);
EXPECT_TRUE(decoder.ok());
}
@@ -450,7 +454,7 @@ TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i32v<true>(decoder.start(), &length);
+ decoder.read_i32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(5u, length);
EXPECT_FALSE(decoder.ok());
}
@@ -485,7 +489,7 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
- uint32_t result = decoder.read_u32v<true>(data, &rlen);
+ uint32_t result = decoder.read_u32v<Decoder::kValidate>(data, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -541,7 +545,7 @@ TEST_F(DecoderTest, ReadU64v_PowerOf2) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned length;
- uint64_t result = decoder.read_u64v<true>(data, &length);
+ uint64_t result = decoder.read_u64v<Decoder::kValidate>(data, &length);
if (limit <= index) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -582,7 +586,7 @@ TEST_F(DecoderTest, ReadU64v_Bits) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
- uint64_t result = decoder.read_u64v<true>(data, &rlen);
+ uint64_t result = decoder.read_u64v<Decoder::kValidate>(data, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -624,7 +628,7 @@ TEST_F(DecoderTest, ReadI64v_Bits) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
- int64_t result = decoder.read_i64v<true>(data, &rlen);
+ int64_t result = decoder.read_i64v<Decoder::kValidate>(data, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -643,7 +647,7 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
data[9] = static_cast<byte>(i << 1);
unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
- decoder.read_u64v<true>(decoder.start(), &length);
+ decoder.read_u64v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(10u, length);
EXPECT_FALSE(decoder.ok());
}
@@ -654,7 +658,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
unsigned length = 0;
byte data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i64v<true>(decoder.start(), &length);
+ decoder.read_i64v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(10u, length);
EXPECT_TRUE(decoder.ok());
}
@@ -664,7 +668,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i64v<true>(decoder.start(), &length);
+ decoder.read_i64v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(10u, length);
EXPECT_FALSE(decoder.ok());
}
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index bda1073281..d02dca36be 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -198,12 +198,12 @@ class TestModuleBuilder {
}
byte AddGlobal(ValueType type, bool mutability = true) {
mod.globals.push_back({type, mutability, WasmInitExpr(), 0, false, false});
- CHECK(mod.globals.size() <= kMaxByteSizedLeb128);
+ CHECK_LE(mod.globals.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.globals.size() - 1);
}
byte AddSignature(FunctionSig* sig) {
mod.signatures.push_back(sig);
- CHECK(mod.signatures.size() <= kMaxByteSizedLeb128);
+ CHECK_LE(mod.signatures.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.signatures.size() - 1);
}
byte AddFunction(FunctionSig* sig) {
@@ -214,7 +214,7 @@ class TestModuleBuilder {
{0, 0}, // code
false, // import
false}); // export
- CHECK(mod.functions.size() <= kMaxByteSizedLeb128);
+ CHECK_LE(mod.functions.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.functions.size() - 1);
}
byte AddImport(FunctionSig* sig) {
@@ -224,7 +224,7 @@ class TestModuleBuilder {
}
byte AddException(WasmExceptionSig* sig) {
mod.exceptions.emplace_back(sig);
- CHECK(mod.signatures.size() <= kMaxByteSizedLeb128);
+ CHECK_LE(mod.signatures.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.exceptions.size() - 1);
}
@@ -2387,45 +2387,227 @@ TEST_F(FunctionBodyDecoderTest, TryCatch) {
TEST_F(FunctionBodyDecoderTest, MultiValBlock1) {
EXPERIMENTAL_FLAG_SCOPE(mv);
- EXPECT_VERIFIES(i_ii, WASM_BLOCK_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1)),
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f0 = builder.AddSignature(sigs.ii_v());
+ EXPECT_VERIFIES(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
kExprI32Add);
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_NOP), kExprI32Add);
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0)), kExprI32Add);
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0)),
+ kExprI32Add);
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ kExprF32Add);
}
TEST_F(FunctionBodyDecoderTest, MultiValBlock2) {
EXPERIMENTAL_FLAG_SCOPE(mv);
- EXPECT_VERIFIES(i_ii, WASM_BLOCK_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1)),
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f0 = builder.AddSignature(sigs.ii_v());
+ EXPECT_VERIFIES(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
WASM_I32_ADD(WASM_NOP, WASM_NOP));
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_NOP),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP));
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0)),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP));
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0)),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP));
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_F32_ADD(WASM_NOP, WASM_NOP));
+}
+
+TEST_F(FunctionBodyDecoderTest, MultiValBlockBr) {
+ EXPERIMENTAL_FLAG_SCOPE(mv);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f0 = builder.AddSignature(sigs.ii_v());
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_BR(0)),
+ kExprI32Add);
+ EXPECT_VERIFIES(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_BR(0)),
+ kExprI32Add);
}
-TEST_F(FunctionBodyDecoderTest, MultiValBlockBr1) {
+TEST_F(FunctionBodyDecoderTest, MultiValLoop1) {
EXPERIMENTAL_FLAG_SCOPE(mv);
- EXPECT_FAILURE(
- i_ii, WASM_BLOCK_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0), WASM_BR(0)),
- kExprI32Add);
- EXPECT_VERIFIES(i_ii, WASM_BLOCK_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1), WASM_BR(0)),
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f0 = builder.AddSignature(sigs.ii_v());
+ EXPECT_VERIFIES(i_ii, WASM_LOOP_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
kExprI32Add);
+ EXPECT_FAILURE(i_ii, WASM_LOOP_X(f0, WASM_NOP), kExprI32Add);
+ EXPECT_FAILURE(i_ii, WASM_LOOP_X(f0, WASM_GET_LOCAL(0)), kExprI32Add);
+ EXPECT_FAILURE(i_ii, WASM_LOOP_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0)),
+ kExprI32Add);
+ EXPECT_FAILURE(i_ii, WASM_LOOP_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ kExprF32Add);
}
-TEST_F(FunctionBodyDecoderTest, MultiValIf1) {
+TEST_F(FunctionBodyDecoderTest, MultiValIf) {
EXPERIMENTAL_FLAG_SCOPE(mv);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f0 = builder.AddSignature(sigs.ii_v());
+ EXPECT_VERIFIES(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add);
EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0)),
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP),
kExprI32Add);
- EXPECT_FAILURE(i_ii,
- WASM_IF_ELSE_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_SEQ(WASM_GET_LOCAL(1))),
- kExprI32Add);
- EXPECT_VERIFIES(
- i_ii, WASM_IF_ELSE_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_NOP,
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add);
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_NOP),
+ kExprI32Add);
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ kExprI32Add);
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add);
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_GET_LOCAL(1)),
+ kExprI32Add);
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0))),
kExprI32Add);
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))),
+ kExprI32Add);
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(1))),
+ kExprI32Add);
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprF32Add);
+}
+
+TEST_F(FunctionBodyDecoderTest, BlockParam) {
+ EXPERIMENTAL_FLAG_SCOPE(mv);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f1 = builder.AddSignature(sigs.i_i());
+ byte f2 = builder.AddSignature(sigs.i_ii());
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
+ WASM_BLOCK_X(f1, WASM_GET_LOCAL(1),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_BLOCK_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_BLOCK_X(f1, WASM_NOP),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP));
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f1, WASM_NOP),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f1, WASM_GET_LOCAL(0)),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
+ WASM_BLOCK_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
+ WASM_BLOCK_X(f1, WASM_F32_NEG(WASM_NOP)),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+}
+
+TEST_F(FunctionBodyDecoderTest, LoopParam) {
+ EXPERIMENTAL_FLAG_SCOPE(mv);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f1 = builder.AddSignature(sigs.i_i());
+ byte f2 = builder.AddSignature(sigs.i_ii());
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f1, WASM_GET_LOCAL(1),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_LOOP_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_LOOP_X(f1, WASM_NOP),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP));
+ EXPECT_FAILURE(i_ii, WASM_LOOP_X(f1, WASM_NOP),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(i_ii, WASM_LOOP_X(f1, WASM_GET_LOCAL(0)),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f1, WASM_F32_NEG(WASM_NOP)),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+}
+
+TEST_F(FunctionBodyDecoderTest, LoopParamBr) {
+ EXPERIMENTAL_FLAG_SCOPE(mv);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f1 = builder.AddSignature(sigs.i_i());
+ byte f2 = builder.AddSignature(sigs.i_ii());
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f1, WASM_BR(0)));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f1, WASM_BRV(0, WASM_GET_LOCAL(1))));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_LOOP_X(f2, WASM_BR(0)));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f1, WASM_BLOCK_X(f1, WASM_BR(1))));
+ EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f1, WASM_BLOCK(WASM_BR(1))),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_LOOP_X(f2, WASM_BLOCK_X(f1, WASM_BR(1))),
+ WASM_RETURN1(WASM_GET_LOCAL(0)));
+}
+
+TEST_F(FunctionBodyDecoderTest, IfParam) {
+ EXPERIMENTAL_FLAG_SCOPE(mv);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte f1 = builder.AddSignature(sigs.i_i());
+ byte f2 = builder.AddSignature(sigs.i_ii());
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
+ WASM_IF_X(f1, WASM_GET_LOCAL(0),
+ WASM_I32_ADD(WASM_NOP, WASM_GET_LOCAL(1))));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
+ WASM_IF_ELSE_X(f1, WASM_GET_LOCAL(0),
+ WASM_I32_ADD(WASM_NOP, WASM_GET_LOCAL(1)),
+ WASM_I32_EQZ(WASM_NOP)));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_IF_ELSE_X(f2, WASM_GET_LOCAL(0),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP),
+ WASM_I32_MUL(WASM_NOP, WASM_NOP)));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_IF_X(f1, WASM_GET_LOCAL(0), WASM_NOP),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_IF_ELSE_X(f1, WASM_GET_LOCAL(0),
+ WASM_NOP, WASM_I32_EQZ(WASM_NOP)),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP));
}
TEST_F(FunctionBodyDecoderTest, Regression709741) {
@@ -2451,15 +2633,15 @@ class BranchTableIteratorTest : public TestWithZone {
BranchTableIteratorTest() : TestWithZone() {}
void CheckBrTableSize(const byte* start, const byte* end) {
Decoder decoder(start, end);
- BranchTableOperand<true> operand(&decoder, start);
- BranchTableIterator<true> iterator(&decoder, operand);
+ BranchTableOperand<Decoder::kValidate> operand(&decoder, start);
+ BranchTableIterator<Decoder::kValidate> iterator(&decoder, operand);
EXPECT_EQ(end - start - 1u, iterator.length());
EXPECT_TRUE(decoder.ok());
}
void CheckBrTableError(const byte* start, const byte* end) {
Decoder decoder(start, end);
- BranchTableOperand<true> operand(&decoder, start);
- BranchTableIterator<true> iterator(&decoder, operand);
+ BranchTableOperand<Decoder::kValidate> operand(&decoder, start);
+ BranchTableIterator<Decoder::kValidate> iterator(&decoder, operand);
iterator.length();
EXPECT_FALSE(decoder.ok());
}
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
index 474d49c1c5..ec9fd3efb3 100644
--- a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -88,19 +88,19 @@ TEST_F(LEBHelperTest, sizeof_i32v) {
}
}
-#define DECLARE_ENCODE_DECODE_CHECKER(ctype, name) \
- static void CheckEncodeDecode_##name(ctype val) { \
- static const int kSize = 16; \
- static byte buffer[kSize]; \
- byte* ptr = buffer; \
- LEBHelper::write_##name(&ptr, val); \
- EXPECT_EQ(LEBHelper::sizeof_##name(val), \
- static_cast<size_t>(ptr - buffer)); \
- Decoder decoder(buffer, buffer + kSize); \
- unsigned length = 0; \
- ctype result = decoder.read_##name<false>(buffer, &length); \
- EXPECT_EQ(val, result); \
- EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
+#define DECLARE_ENCODE_DECODE_CHECKER(ctype, name) \
+ static void CheckEncodeDecode_##name(ctype val) { \
+ static const int kSize = 16; \
+ static byte buffer[kSize]; \
+ byte* ptr = buffer; \
+ LEBHelper::write_##name(&ptr, val); \
+ EXPECT_EQ(LEBHelper::sizeof_##name(val), \
+ static_cast<size_t>(ptr - buffer)); \
+ Decoder decoder(buffer, buffer + kSize); \
+ unsigned length = 0; \
+ ctype result = decoder.read_##name<Decoder::kNoValidate>(buffer, &length); \
+ EXPECT_EQ(val, result); \
+ EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
}
DECLARE_ENCODE_DECODE_CHECKER(int32_t, i32v)
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 2e76d374d3..ae98bd9a70 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -494,7 +494,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableImportedGlobal) {
0, // mutability
SECTION(Memory, 4),
ENTRY_COUNT(1),
- kResizableMaximumFlag,
+ kHasMaximumFlag,
28,
28,
SECTION(Data, 9),
@@ -527,7 +527,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithMutableImportedGlobal) {
1, // mutability
SECTION(Memory, 4),
ENTRY_COUNT(1),
- kResizableMaximumFlag,
+ kHasMaximumFlag,
28,
28,
SECTION(Data, 9),
@@ -546,7 +546,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableGlobal) {
const byte data[] = {
SECTION(Memory, 4),
ENTRY_COUNT(1),
- kResizableMaximumFlag,
+ kHasMaximumFlag,
28,
28,
SECTION(Global, 8), // --
@@ -571,7 +571,7 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
const byte data[] = {
SECTION(Memory, 4),
ENTRY_COUNT(1),
- kResizableMaximumFlag,
+ kHasMaximumFlag,
28,
28,
SECTION(Data, 11),
@@ -610,7 +610,7 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
const byte data[] = {
SECTION(Memory, 4),
ENTRY_COUNT(1),
- kResizableMaximumFlag,
+ kHasMaximumFlag,
28,
28,
SECTION(Data, 29),
@@ -678,15 +678,13 @@ TEST_F(WasmModuleVerifyTest, DataWithoutMemory) {
TEST_F(WasmModuleVerifyTest, MaxMaximumMemorySize) {
{
const byte data[] = {
- SECTION(Memory, 6), ENTRY_COUNT(1), kResizableMaximumFlag, 0,
- U32V_3(65536),
+ SECTION(Memory, 6), ENTRY_COUNT(1), kHasMaximumFlag, 0, U32V_3(65536),
};
EXPECT_VERIFIES(data);
}
{
const byte data[] = {
- SECTION(Memory, 6), ENTRY_COUNT(1), kResizableMaximumFlag, 0,
- U32V_3(65537),
+ SECTION(Memory, 6), ENTRY_COUNT(1), kHasMaximumFlag, 0, U32V_3(65537),
};
EXPECT_FAILURE(data);
}
@@ -696,7 +694,7 @@ TEST_F(WasmModuleVerifyTest, DataSegment_wrong_init_type) {
const byte data[] = {
SECTION(Memory, 4),
ENTRY_COUNT(1),
- kResizableMaximumFlag,
+ kHasMaximumFlag,
28,
28,
SECTION(Data, 11),
@@ -715,7 +713,7 @@ TEST_F(WasmModuleVerifyTest, DataSegment_wrong_init_type) {
TEST_F(WasmModuleVerifyTest, DataSegmentEndOverflow) {
const byte data[] = {
SECTION(Memory, 4), // memory section
- ENTRY_COUNT(1), kResizableMaximumFlag, 28, 28,
+ ENTRY_COUNT(1), kHasMaximumFlag, 28, 28,
SECTION(Data, 10), // data section
ENTRY_COUNT(1), // one entry
LINEAR_MEMORY_INDEX_0, // mem index
@@ -1505,6 +1503,31 @@ TEST_F(WasmModuleVerifyTest, Regression_738097) {
EXPECT_FAILURE(data);
}
+TEST_F(WasmModuleVerifyTest, FunctionBodySizeLimit) {
+ const uint32_t delta = 3;
+ for (uint32_t body_size = kV8MaxWasmFunctionSize - delta;
+ body_size < kV8MaxWasmFunctionSize + delta; body_size++) {
+ byte data[] = {
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ kCodeSectionCode, // code section
+ U32V_5(1 + body_size + 5), // section size
+ 1, // # functions
+ U32V_5(body_size) // body size
+ };
+ size_t total = sizeof(data) + body_size;
+ byte* buffer = reinterpret_cast<byte*>(calloc(1, total));
+ memcpy(buffer, data, sizeof(data));
+ ModuleResult result = DecodeModule(buffer, buffer + total);
+ if (body_size <= kV8MaxWasmFunctionSize) {
+ EXPECT_TRUE(result.ok());
+ } else {
+ EXPECT_FALSE(result.ok());
+ }
+ free(buffer);
+ }
+}
+
TEST_F(WasmModuleVerifyTest, FunctionBodies_empty) {
static const byte data[] = {
EMPTY_SIGNATURES_SECTION, // --
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index 2ed28125f0..41211ac960 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -461,6 +461,17 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
}
+TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0xd, // Section Length
+ 0x0, // Number of Functions
+ };
+ ExpectFailure(ArrayVector(data));
+}
+
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
const uint8_t data[] = {
U32_LE(kWasmMagic), // --
diff --git a/deps/v8/test/unittests/wasm/trap-handler-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
new file mode 100644
index 0000000000..eb578647ad
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
@@ -0,0 +1,69 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/trap-handler/trap-handler.h"
+#include "include/v8.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if V8_OS_POSIX
+#include <setjmp.h>
+#include <signal.h>
+#endif
+
+namespace {
+
+#if V8_OS_POSIX
+
+void CrashOnPurpose() { *reinterpret_cast<volatile int*>(42); }
+
+// When using V8::RegisterDefaultSignalHandler, we save the old one to fall back
+// on if V8 doesn't handle the signal. This allows tools like ASan to register a
+// handler early on during the process startup and still generate stack traces
+// on failures.
+class SignalHandlerFallbackTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ struct sigaction action;
+ action.sa_sigaction = SignalHandler;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_SIGINFO;
+ sigaction(SIGSEGV, &action, &old_segv_action_);
+ sigaction(SIGBUS, &action, &old_bus_action_);
+ }
+
+ virtual void TearDown() {
+ // be a good citizen and restore the old signal handler.
+ sigaction(SIGSEGV, &old_segv_action_, nullptr);
+ sigaction(SIGBUS, &old_bus_action_, nullptr);
+ }
+
+ static sigjmp_buf continuation_;
+
+ private:
+ static void SignalHandler(int signal, siginfo_t* info, void*) {
+ siglongjmp(continuation_, 1);
+ }
+ struct sigaction old_segv_action_;
+ struct sigaction old_bus_action_; // We get SIGBUS on Mac sometimes.
+};
+sigjmp_buf SignalHandlerFallbackTest::continuation_;
+
+TEST_F(SignalHandlerFallbackTest, DoTest) {
+ const int save_sigs = 1;
+ if (!sigsetjmp(continuation_, save_sigs)) {
+ v8::V8::RegisterDefaultSignalHandler();
+ CrashOnPurpose();
+ FAIL();
+ } else {
+ // Our signal handler ran.
+ v8::internal::trap_handler::RestoreOriginalSignalHandler();
+ SUCCEED();
+ return;
+ }
+ FAIL();
+}
+
+#endif
+
+} // namespace
diff --git a/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc b/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc
index 6e75e84b43..d0c9284f93 100644
--- a/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc
@@ -151,6 +151,241 @@ TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
CheckLooksLike(a, {{10, 15}, {20, 35}, {36, 40}});
}
+class WasmCodeManagerTest : public TestWithIsolate {
+ public:
+ using NativeModulePtr = std::unique_ptr<NativeModule>;
+ enum ModuleStyle : int { Fixed = 0, Growable = 1 };
+
+ const std::vector<ModuleStyle> styles() const {
+ return std::vector<ModuleStyle>({Fixed, Growable});
+ }
+ // We pretend all our modules have 10 functions and no imports, just so
+ // we can size up the code_table.
+ NativeModulePtr AllocFixedModule(WasmCodeManager* manager, size_t size) {
+ return manager->NewNativeModule(size, 10, 0, false);
+ }
+
+ NativeModulePtr AllocGrowableModule(WasmCodeManager* manager, size_t size) {
+ return manager->NewNativeModule(size, 10, 0, true);
+ }
+
+ NativeModulePtr AllocModule(WasmCodeManager* manager, size_t size,
+ ModuleStyle style) {
+ switch (style) {
+ case Fixed:
+ return AllocFixedModule(manager, size);
+ case Growable:
+ return AllocGrowableModule(manager, size);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
+ CodeDesc desc;
+ memset(reinterpret_cast<void*>(&desc), 0, sizeof(CodeDesc));
+ std::unique_ptr<byte[]> exec_buff(new byte[size]);
+ desc.buffer = exec_buff.get();
+ desc.instr_size = static_cast<int>(size);
+ return native_module->AddCode(desc, 0, index, 0, {}, false);
+ }
+
+ size_t page() const { return base::OS::AllocatePageSize(); }
+ v8::Isolate* v8_isolate() const {
+ return reinterpret_cast<v8::Isolate*>(isolate());
+ }
+};
+
+TEST_F(WasmCodeManagerTest, EmptyCase) {
+ for (auto style : styles()) {
+ WasmCodeManager manager(v8_isolate(), 0 * page());
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ NativeModulePtr native_module = AllocModule(&manager, 1 * page(), style);
+ CHECK(native_module);
+ WasmCode* code = AddCode(native_module.get(), 0, 10);
+ CHECK_NULL(code);
+ CHECK_EQ(0, manager.remaining_uncommitted());
+ native_module.reset();
+ CHECK_EQ(0, manager.remaining_uncommitted());
+ }
+}
+
+TEST_F(WasmCodeManagerTest, AllocateAndGoOverLimit) {
+ for (auto style : styles()) {
+ WasmCodeManager manager(v8_isolate(), 1 * page());
+ CHECK_EQ(1 * page(), manager.remaining_uncommitted());
+ NativeModulePtr native_module = AllocModule(&manager, 1 * page(), style);
+ CHECK(native_module);
+ CHECK_EQ(1 * page(), manager.remaining_uncommitted());
+ uint32_t index = 0;
+ WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ code = AddCode(native_module.get(), index++, 3 * kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ code = AddCode(native_module.get(), index++, page() - 4 * kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
+ CHECK_NULL(code);
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ native_module.reset();
+ CHECK_EQ(1 * page(), manager.remaining_uncommitted());
+ }
+}
+
+TEST_F(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
+ for (auto style : styles()) {
+ WasmCodeManager manager(v8_isolate(), 1 * page());
+ NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
+ NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), style);
+ CHECK(nm1);
+ CHECK(nm2);
+ WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
+ CHECK_NOT_NULL(code);
+ code = AddCode(nm2.get(), 0, 1 * page());
+ CHECK_NULL(code);
+ }
+}
+
+TEST_F(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
+ for (auto style : styles()) {
+ WasmCodeManager manager1(v8_isolate(), 1 * page());
+ WasmCodeManager manager2(v8_isolate(), 2 * page());
+ NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), style);
+ NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), style);
+ CHECK(nm1);
+ CHECK(nm2);
+ WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(0, manager1.remaining_uncommitted());
+ code = AddCode(nm2.get(), 0, 1 * page());
+ CHECK_NOT_NULL(code);
+ }
+}
+
+TEST_F(WasmCodeManagerTest, GrowingVsFixedModule) {
+ for (auto style : styles()) {
+ WasmCodeManager manager(v8_isolate(), 3 * page());
+ NativeModulePtr nm = AllocModule(&manager, 1 * page(), style);
+ WasmCode* code = AddCode(nm.get(), 0, 1 * page() + kCodeAlignment);
+ if (style == Fixed) {
+ CHECK_NULL(code);
+ CHECK_EQ(manager.remaining_uncommitted(), 3 * page());
+ } else {
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(manager.remaining_uncommitted(), 1 * page());
+ }
+ }
+}
+
+TEST_F(WasmCodeManagerTest, CommitIncrements) {
+ for (auto style : styles()) {
+ WasmCodeManager manager(v8_isolate(), 10 * page());
+ NativeModulePtr nm = AllocModule(&manager, 3 * page(), style);
+ WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(manager.remaining_uncommitted(), 9 * page());
+ code = AddCode(nm.get(), 1, 2 * page());
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(manager.remaining_uncommitted(), 7 * page());
+ code = AddCode(nm.get(), 2, page() - kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(manager.remaining_uncommitted(), 7 * page());
+ }
+}
+
+TEST_F(WasmCodeManagerTest, Lookup) {
+ for (auto style : styles()) {
+ WasmCodeManager manager(v8_isolate(), 2 * page());
+
+ NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
+ NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), style);
+ WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
+ CHECK_EQ(nm1.get(), code1_0->owner());
+ WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
+ WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
+ WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
+ CHECK_EQ(nm2.get(), code2_1->owner());
+
+ CHECK_EQ(0, code1_0->index());
+ CHECK_EQ(1, code1_1->index());
+ CHECK_EQ(0, code2_0->index());
+ CHECK_EQ(1, code2_1->index());
+
+ // we know the manager object is allocated here, so we shouldn't
+ // find any WasmCode* associated with that ptr.
+ WasmCode* not_found =
+ manager.LookupCode(reinterpret_cast<Address>(&manager));
+ CHECK_NULL(not_found);
+ WasmCode* found = manager.LookupCode(code1_0->instructions().start());
+ CHECK_EQ(found, code1_0);
+ found = manager.LookupCode(code2_1->instructions().start() +
+ (code2_1->instructions().size() / 2));
+ CHECK_EQ(found, code2_1);
+ found = manager.LookupCode(code2_1->instructions().start() +
+ code2_1->instructions().size() - 1);
+ CHECK_EQ(found, code2_1);
+ found = manager.LookupCode(code2_1->instructions().start() +
+ code2_1->instructions().size());
+ CHECK_NULL(found);
+ Address mid_code1_1 =
+ code1_1->instructions().start() + (code1_1->instructions().size() / 2);
+ CHECK_EQ(code1_1, manager.LookupCode(mid_code1_1));
+ nm1.reset();
+ CHECK_NULL(manager.LookupCode(mid_code1_1));
+ }
+}
+
+TEST_F(WasmCodeManagerTest, MultiManagerLookup) {
+ for (auto style : styles()) {
+ WasmCodeManager manager1(v8_isolate(), 2 * page());
+ WasmCodeManager manager2(v8_isolate(), 2 * page());
+
+ NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), style);
+ NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), style);
+
+ WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
+ CHECK_EQ(nm1.get(), code1_0->owner());
+ WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
+ WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
+ WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
+ CHECK_EQ(nm2.get(), code2_1->owner());
+
+ CHECK_EQ(0, code1_0->index());
+ CHECK_EQ(1, code1_1->index());
+ CHECK_EQ(0, code2_0->index());
+ CHECK_EQ(1, code2_1->index());
+
+ CHECK_EQ(code1_0, manager1.LookupCode(code1_0->instructions().start()));
+ CHECK_NULL(manager2.LookupCode(code1_0->instructions().start()));
+ }
+}
+
+TEST_F(WasmCodeManagerTest, LookupWorksAfterRewrite) {
+ for (auto style : styles()) {
+ WasmCodeManager manager(v8_isolate(), 2 * page());
+
+ NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
+
+ WasmCode* code0 = AddCode(nm1.get(), 0, kCodeAlignment);
+ WasmCode* code1 = AddCode(nm1.get(), 1, kCodeAlignment);
+ CHECK_EQ(0, code0->index());
+ CHECK_EQ(1, code1->index());
+ CHECK_EQ(code1, manager.LookupCode(code1->instructions().start()));
+ WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
+ CHECK_EQ(1, code1_1->index());
+ CHECK_EQ(code1, manager.LookupCode(code1->instructions().start()));
+ CHECK_EQ(code1_1, manager.LookupCode(code1_1->instructions().start()));
+ }
+}
+
} // namespace wasm_heap_unittest
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index d3b06d076b..d1eae764e3 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -25,10 +25,10 @@ class WasmSpecTestsTestSuite(testsuite.TestSuite):
tests.append(test)
return tests
- def GetFlagsForTestCase(self, testcase, context):
- flags = [] + context.mode_flags
- flags.append(os.path.join(self.root, testcase.path + self.suffix()))
- return testcase.flags + flags
+ def GetParametersForTestCase(self, testcase, context):
+ flags = testcase.flags + context.mode_flags
+ files = [os.path.join(self.root, testcase.path + self.suffix())]
+ return files, flags, {}
def GetSuite(name, root):
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 240b11b803..a0f2e20e1f 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-2c29039a7935576bb6d01f9b4b6c96b8861b5bbe \ No newline at end of file
+4ca2075a2ceb1c7b6e4d7b2d26d23fdb9998fd56 \ No newline at end of file
diff --git a/deps/v8/test/webkit/class-syntax-declaration-expected.txt b/deps/v8/test/webkit/class-syntax-declaration-expected.txt
index a424edfe90..c198f26914 100644
--- a/deps/v8/test/webkit/class-syntax-declaration-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-declaration-expected.txt
@@ -26,9 +26,9 @@ PASS class X { get constructor() {} } threw exception SyntaxError: Class constru
PASS class X { set constructor() {} } threw exception SyntaxError: Class constructor may not be an accessor.
PASS class X { constructor() {} static constructor() { return staticMethodValue; } } did not throw exception.
PASS class X { constructor() {} static constructor() { return staticMethodValue; } }; X.constructor() is staticMethodValue
-PASS class X { constructor() {} static prototype() {} } threw exception SyntaxError: Classes may not have static property named prototype.
-PASS class X { constructor() {} static get prototype() {} } threw exception SyntaxError: Classes may not have static property named prototype.
-PASS class X { constructor() {} static set prototype() {} } threw exception SyntaxError: Classes may not have static property named prototype.
+PASS class X { constructor() {} static prototype() {} } threw exception SyntaxError: Classes may not have a static property named 'prototype'.
+PASS class X { constructor() {} static get prototype() {} } threw exception SyntaxError: Classes may not have a static property named 'prototype'.
+PASS class X { constructor() {} static set prototype() {} } threw exception SyntaxError: Classes may not have a static property named 'prototype'.
PASS class X { constructor() {} prototype() { return instanceMethodValue; } } did not throw exception.
PASS class X { constructor() {} prototype() { return instanceMethodValue; } }; (new X).prototype() is instanceMethodValue
PASS class X { constructor() {} set foo(a) {} } did not throw exception.
diff --git a/deps/v8/test/webkit/class-syntax-declaration.js b/deps/v8/test/webkit/class-syntax-declaration.js
index 775a3353d0..bc6c31d312 100644
--- a/deps/v8/test/webkit/class-syntax-declaration.js
+++ b/deps/v8/test/webkit/class-syntax-declaration.js
@@ -64,9 +64,9 @@ shouldThrow("class X { set constructor() {} }", "'SyntaxError: Class constructor
shouldNotThrow("class X { constructor() {} static constructor() { return staticMethodValue; } }");
shouldBe("class X { constructor() {} static constructor() { return staticMethodValue; } }; X.constructor()", "staticMethodValue");
-shouldThrow("class X { constructor() {} static prototype() {} }", "'SyntaxError: Classes may not have static property named prototype'");
-shouldThrow("class X { constructor() {} static get prototype() {} }", "'SyntaxError: Classes may not have static property named prototype'");
-shouldThrow("class X { constructor() {} static set prototype() {} }", "'SyntaxError: Classes may not have static property named prototype'");
+shouldThrow("class X { constructor() {} static prototype() {} }", '"SyntaxError: Classes may not have a static property named \'prototype\'"');
+shouldThrow("class X { constructor() {} static get prototype() {} }", '"SyntaxError: Classes may not have a static property named \'prototype\'"');
+shouldThrow("class X { constructor() {} static set prototype() {} }", '"SyntaxError: Classes may not have a static property named \'prototype\'"');
shouldNotThrow("class X { constructor() {} prototype() { return instanceMethodValue; } }");
shouldBe("class X { constructor() {} prototype() { return instanceMethodValue; } }; (new X).prototype()", "instanceMethodValue");
diff --git a/deps/v8/test/webkit/class-syntax-expression-expected.txt b/deps/v8/test/webkit/class-syntax-expression-expected.txt
index 5bcaf002f8..acda6272f0 100644
--- a/deps/v8/test/webkit/class-syntax-expression-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-expression-expected.txt
@@ -24,9 +24,9 @@ PASS x = class { get constructor() {} } threw exception SyntaxError: Class const
PASS x = class { set constructor() {} } threw exception SyntaxError: Class constructor may not be an accessor.
PASS x = class { constructor() {} static constructor() { return staticMethodValue; } } did not throw exception.
PASS x = class { constructor() {} static constructor() { return staticMethodValue; } }; x.constructor() is staticMethodValue
-PASS x = class { constructor() {} static prototype() {} } threw exception SyntaxError: Classes may not have static property named prototype.
-PASS x = class { constructor() {} static get prototype() {} } threw exception SyntaxError: Classes may not have static property named prototype.
-PASS x = class { constructor() {} static set prototype() {} } threw exception SyntaxError: Classes may not have static property named prototype.
+PASS x = class { constructor() {} static prototype() {} } threw exception SyntaxError: Classes may not have a static property named 'prototype'.
+PASS x = class { constructor() {} static get prototype() {} } threw exception SyntaxError: Classes may not have a static property named 'prototype'.
+PASS x = class { constructor() {} static set prototype() {} } threw exception SyntaxError: Classes may not have a static property named 'prototype'.
PASS x = class { constructor() {} prototype() { return instanceMethodValue; } } did not throw exception.
PASS x = class { constructor() {} prototype() { return instanceMethodValue; } }; (new x).prototype() is instanceMethodValue
PASS x = class { constructor() {} set foo(a) {} } did not throw exception.
diff --git a/deps/v8/test/webkit/class-syntax-expression.js b/deps/v8/test/webkit/class-syntax-expression.js
index ab6dc0e49e..0cea5d70e4 100644
--- a/deps/v8/test/webkit/class-syntax-expression.js
+++ b/deps/v8/test/webkit/class-syntax-expression.js
@@ -62,9 +62,9 @@ shouldThrow("x = class { set constructor() {} }", "'SyntaxError: Class construct
shouldNotThrow("x = class { constructor() {} static constructor() { return staticMethodValue; } }");
shouldBe("x = class { constructor() {} static constructor() { return staticMethodValue; } }; x.constructor()", "staticMethodValue");
-shouldThrow("x = class { constructor() {} static prototype() {} }", "'SyntaxError: Classes may not have static property named prototype'");
-shouldThrow("x = class { constructor() {} static get prototype() {} }", "'SyntaxError: Classes may not have static property named prototype'");
-shouldThrow("x = class { constructor() {} static set prototype() {} }", "'SyntaxError: Classes may not have static property named prototype'");
+shouldThrow("x = class { constructor() {} static prototype() {} }", '"SyntaxError: Classes may not have a static property named \'prototype\'"');
+shouldThrow("x = class { constructor() {} static get prototype() {} }", '"SyntaxError: Classes may not have a static property named \'prototype\'"');
+shouldThrow("x = class { constructor() {} static set prototype() {} }", '"SyntaxError: Classes may not have a static property named \'prototype\'"');
shouldNotThrow("x = class { constructor() {} prototype() { return instanceMethodValue; } }");
shouldBe("x = class { constructor() {} prototype() { return instanceMethodValue; } }; (new x).prototype()", "instanceMethodValue");
diff --git a/deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt b/deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt
index 9e98e2608b..4ebd4cd97f 100644
--- a/deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt
+++ b/deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt
@@ -29,15 +29,15 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS Array.prototype.toString.call(undefined) threw exception TypeError: Cannot convert undefined or null to object.
PASS Array.prototype.toLocaleString.call(undefined) threw exception TypeError: Cannot convert undefined or null to object.
PASS Array.prototype.concat.call(undefined, []) threw exception TypeError: Array.prototype.concat called on null or undefined.
-PASS Array.prototype.join.call(undefined, []) threw exception TypeError: Array.prototype.join called on null or undefined.
-PASS Array.prototype.pop.call(undefined) threw exception TypeError: Array.prototype.pop called on null or undefined.
-PASS Array.prototype.push.call(undefined, {}) threw exception TypeError: Array.prototype.push called on null or undefined.
-PASS Array.prototype.reverse.call(undefined) threw exception TypeError: Array.prototype.reverse called on null or undefined.
-PASS Array.prototype.shift.call(undefined) threw exception TypeError: Array.prototype.shift called on null or undefined.
-PASS Array.prototype.slice.call(undefined, 0, 1) threw exception TypeError: Array.prototype.slice called on null or undefined.
-PASS Array.prototype.sort.call(undefined) threw exception TypeError: Array.prototype.sort called on null or undefined.
-PASS Array.prototype.splice.call(undefined, 0, 1) threw exception TypeError: Array.prototype.splice called on null or undefined.
-PASS Array.prototype.unshift.call(undefined, {}) threw exception TypeError: Array.prototype.unshift called on null or undefined.
+PASS Array.prototype.join.call(undefined, []) threw exception TypeError: Cannot convert undefined or null to object.
+PASS Array.prototype.pop.call(undefined) threw exception TypeError: Cannot convert undefined or null to object.
+PASS Array.prototype.push.call(undefined, {}) threw exception TypeError: Cannot convert undefined or null to object.
+PASS Array.prototype.reverse.call(undefined) threw exception TypeError: Cannot convert undefined or null to object.
+PASS Array.prototype.shift.call(undefined) threw exception TypeError: Cannot convert undefined or null to object.
+PASS Array.prototype.slice.call(undefined, 0, 1) threw exception TypeError: Cannot convert undefined or null to object.
+PASS Array.prototype.sort.call(undefined) threw exception TypeError: Cannot convert undefined or null to object.
+PASS Array.prototype.splice.call(undefined, 0, 1) threw exception TypeError: Cannot convert undefined or null to object.
+PASS Array.prototype.unshift.call(undefined, {}) threw exception TypeError: Cannot convert undefined or null to object.
PASS Array.prototype.every.call(undefined, toString) threw exception TypeError: Array.prototype.every called on null or undefined.
PASS Array.prototype.forEach.call(undefined, toString) threw exception TypeError: Array.prototype.forEach called on null or undefined.
PASS Array.prototype.some.call(undefined, toString) threw exception TypeError: Array.prototype.some called on null or undefined.
diff --git a/deps/v8/test/webkit/fast/js/array-prototype-properties.js b/deps/v8/test/webkit/fast/js/array-prototype-properties.js
index 12f2f9c47d..79b9f81d30 100644
--- a/deps/v8/test/webkit/fast/js/array-prototype-properties.js
+++ b/deps/v8/test/webkit/fast/js/array-prototype-properties.js
@@ -22,7 +22,7 @@
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
description(
-'This is a test case for <a https://bugs.webkit.org/show_bug.cgi?id=64679">bug 64679</a>.'
+'This is a test case for <a href="https://bugs.webkit.org/show_bug.cgi?id=64679">bug 64679</a>.'
);
// These calls pass undefined as this value, and as such should throw in toObject.
diff --git a/deps/v8/test/webkit/resources/JSON-stringify.js b/deps/v8/test/webkit/resources/JSON-stringify.js
index 24edafac09..8ea24c4fd9 100644
--- a/deps/v8/test/webkit/resources/JSON-stringify.js
+++ b/deps/v8/test/webkit/resources/JSON-stringify.js
@@ -475,7 +475,7 @@ function createTests() {
});
result.push(function (jsonObject){
var deepObject = {};
- for (var i = 0; i < 1024; i++)
+ for (var i = 0; i < 1000; i++)
deepObject = {next:deepObject};
return jsonObject.stringify(deepObject);
});
@@ -488,7 +488,7 @@ function createTests() {
result.push(function (jsonObject){
var depth = 0;
function toDeepVirtualJSONObject() {
- if (++depth >= 1024)
+ if (++depth >= 1000)
return {};
var r = {};
r.toJSON = toDeepVirtualJSONObject;
diff --git a/deps/v8/test/webkit/run-json-stringify-expected.txt b/deps/v8/test/webkit/run-json-stringify-expected.txt
index bbd9f2806b..f62adb07b2 100644
--- a/deps/v8/test/webkit/run-json-stringify-expected.txt
+++ b/deps/v8/test/webkit/run-json-stringify-expected.txt
@@ -499,7 +499,7 @@ function (jsonObject){
PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
var deepObject = {};
- for (var i = 0; i < 1024; i++)
+ for (var i = 0; i < 1000; i++)
deepObject = {next:deepObject};
return jsonObject.stringify(deepObject);
}
@@ -514,7 +514,7 @@ PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
var depth = 0;
function toDeepVirtualJSONObject() {
- if (++depth >= 1024)
+ if (++depth >= 1000)
return {};
var r = {};
r.toJSON = toDeepVirtualJSONObject;
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index c18120ca18..855a1327ba 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -62,9 +62,9 @@ class WebkitTestSuite(testsuite.TestSuite):
tests.append(test)
return tests
- def GetFlagsForTestCase(self, testcase, context):
+ def GetParametersForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
- flags = [] + context.mode_flags
+ flags = testcase.flags + context.mode_flags
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
flags += match.strip().split()
@@ -88,12 +88,11 @@ class WebkitTestSuite(testsuite.TestSuite):
files.append(testfilename)
files.append(os.path.join(self.root, "resources/standalone-post.js"))
- flags += files
+ all_files = list(files)
if context.isolates:
- flags.append("--isolate")
- flags += files
+ all_files += ["--isolate"] + files
- return testcase.flags + flags
+ return all_files, flags, {}
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
@@ -102,10 +101,11 @@ class WebkitTestSuite(testsuite.TestSuite):
# TODO(machenbach): Share with test/message/testcfg.py
def _IgnoreLine(self, string):
- """Ignore empty lines, valgrind output and Android output."""
+ """Ignore empty lines, valgrind output, Android output and trace
+ incremental marking output."""
if not string: return True
return (string.startswith("==") or string.startswith("**") or
- string.startswith("ANDROID") or
+ string.startswith("ANDROID") or "[IncrementalMarking]" in string or
# FIXME(machenbach): The test driver shouldn't try to use slow
# asserts if they weren't compiled. This fails in optdebug=2.
string == "Warning: unknown flag --enable-slow-asserts." or
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 322454ba17..0b6f5a5ff6 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -116,12 +116,6 @@
}], # 'gcov_coverage'
##############################################################################
-['variant == stress_asm_wasm', {
- # Stack size too large with asm.js validator.
- 'fast/js/excessive-comma-usage': [SKIP],
-}], # variant == stress_asm_wasm
-
-##############################################################################
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
diff --git a/deps/v8/third_party/colorama/LICENSE b/deps/v8/third_party/colorama/LICENSE
new file mode 100644
index 0000000000..3105888ec1
--- /dev/null
+++ b/deps/v8/third_party/colorama/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2010 Jonathan Hartley
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holders, nor those of its contributors
+ may be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/third_party/colorama/README.v8 b/deps/v8/third_party/colorama/README.v8
new file mode 100644
index 0000000000..6ee5c17045
--- /dev/null
+++ b/deps/v8/third_party/colorama/README.v8
@@ -0,0 +1,14 @@
+Name: colorama
+Short Name: colorama
+URL: https://github.com/tartley/colorama.git
+Version: 799604a104
+License: BSD
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+Provides a simple cross-platform API to print colored terminal text from Python
+applications.
+
+Local modifications:
+None
diff --git a/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py b/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py
index dd9acad898..c70162a2a4 100755
--- a/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py
+++ b/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py
@@ -160,7 +160,7 @@ def compare_params_list(context, kind, params_1, params_2, types_map_1, types_ma
def compare_types(context, kind, type_1, type_2, types_map_1, types_map_2, depth, errors, reverse):
- if depth > 10:
+ if depth > 5:
return
base_type_1 = type_1["type"]
diff --git a/deps/v8/third_party/inspector_protocol/CodeGenerator.py b/deps/v8/third_party/inspector_protocol/CodeGenerator.py
index 8b28d13609..6be153d7e6 100644
--- a/deps/v8/third_party/inspector_protocol/CodeGenerator.py
+++ b/deps/v8/third_party/inspector_protocol/CodeGenerator.py
@@ -59,12 +59,15 @@ def read_config():
jinja_dir = arg_options.jinja_dir
if not jinja_dir:
raise Exception("jinja directory must be specified")
+ jinja_dir = jinja_dir.decode('utf8')
output_base = arg_options.output_base
if not output_base:
raise Exception("Base output directory must be specified")
+ output_base = output_base.decode('utf8')
config_file = arg_options.config
if not config_file:
raise Exception("Config file name must be specified")
+ config_file = config_file.decode('utf8')
config_base = os.path.dirname(config_file)
config_values = arg_options.config_value
if not config_values:
@@ -440,6 +443,12 @@ class Protocol(object):
return self.check_options(self.config.protocol.options, domain, event, "include_events", "exclude_events", True)
+ def generate_type(self, domain, typename):
+ if not self.config.protocol.options:
+ return domain in self.generate_domains
+ return self.check_options(self.config.protocol.options, domain, typename, "include_types", "exclude_types", True)
+
+
def is_async_command(self, domain, command):
if not self.config.protocol.options:
return False
@@ -473,6 +482,10 @@ class Protocol(object):
return True
+ def is_imported_dependency(self, domain):
+ return domain in self.generate_domains or domain in self.imported_domains
+
+
def main():
jinja_dir, config_file, config = read_config()
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index b13c90694f..fdb24d41d4 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: 1a7cbe8ba8fa0d622586f549a97c73d9b52efbea
+Revision: 65caa48c1d301e35f60b94ae770b0c68c34960d4
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
index fac555363b..287f306420 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
@@ -248,7 +248,13 @@ void UberDispatcher::registerBackend(const String& name, std::unique_ptr<protoco
m_dispatchers[name] = std::move(dispatcher);
}
-DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedMessage)
+void UberDispatcher::setupRedirects(const HashMap<String, String>& redirects)
+{
+ for (const auto& pair : redirects)
+ m_redirects[pair.first] = pair.second;
+}
+
+DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedMessage, int* outCallId, String* outMethod)
{
if (!parsedMessage) {
reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kParseError, "Message must be a valid JSON");
@@ -263,6 +269,8 @@ DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedM
int callId = 0;
protocol::Value* callIdValue = messageObject->get("id");
bool success = callIdValue && callIdValue->asInteger(&callId);
+ if (outCallId)
+ *outCallId = callId;
if (!success) {
reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must have integer 'id' porperty");
return DispatchResponse::kError;
@@ -271,11 +279,17 @@ DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedM
protocol::Value* methodValue = messageObject->get("method");
String method;
success = methodValue && methodValue->asString(&method);
+ if (outMethod)
+ *outMethod = method;
if (!success) {
reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kInvalidRequest, "Message must have string 'method' porperty", nullptr);
return DispatchResponse::kError;
}
+ HashMap<String, String>::iterator redirectIt = m_redirects.find(method);
+ if (redirectIt != m_redirects.end())
+ method = redirectIt->second;
+
size_t dotIndex = StringUtil::find(method, ".");
if (dotIndex == StringUtil::kNotFound) {
if (m_fallThroughForNotFound)
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
index 3a6069b649..5404281dc6 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
@@ -113,7 +113,8 @@ class {{config.lib.export_macro}} UberDispatcher {
public:
explicit UberDispatcher(FrontendChannel*);
void registerBackend(const String& name, std::unique_ptr<protocol::DispatcherBase>);
- DispatchResponse::Status dispatch(std::unique_ptr<Value> message);
+ void setupRedirects(const HashMap<String, String>&);
+ DispatchResponse::Status dispatch(std::unique_ptr<Value> message, int* callId = nullptr, String* method = nullptr);
FrontendChannel* channel() { return m_frontendChannel; }
bool fallThroughForNotFound() { return m_fallThroughForNotFound; }
void setFallThroughForNotFound(bool);
@@ -122,6 +123,7 @@ public:
private:
FrontendChannel* m_frontendChannel;
bool m_fallThroughForNotFound;
+ HashMap<String, String> m_redirects;
protocol::HashMap<String, std::unique_ptr<protocol::DispatcherBase>> m_dispatchers;
};
diff --git a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
index 2108262a12..7b858b8dc4 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
@@ -11,6 +11,11 @@ namespace {{namespace}} {
ErrorSupport::ErrorSupport() { }
ErrorSupport::~ErrorSupport() { }
+void ErrorSupport::setName(const char* name)
+{
+ setName(String(name));
+}
+
void ErrorSupport::setName(const String& name)
{
DCHECK(m_path.size());
@@ -27,6 +32,11 @@ void ErrorSupport::pop()
m_path.pop_back();
}
+void ErrorSupport::addError(const char* error)
+{
+ addError(String(error));
+}
+
void ErrorSupport::addError(const String& error)
{
StringBuilder builder;
diff --git a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
index 0c98e3e0eb..083f2a5eb0 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
@@ -17,8 +17,10 @@ public:
~ErrorSupport();
void push();
+ void setName(const char*);
void setName(const String&);
void pop();
+ void addError(const char*);
void addError(const String&);
bool hasErrors();
String errors();
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
index 6f8b14c16c..b9f061346b 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
@@ -42,30 +42,22 @@ void appendUnsignedAsHex(uint16_t number, StringBuilder* dst)
}
}
-void escapeStringForJSON(const String& str, StringBuilder* dst)
-{
- for (unsigned i = 0; i < str.length(); ++i) {
- uint16_t c = str[i];
- if (!escapeChar(c, dst)) {
- if (c < 32 || c > 126 || c == '<' || c == '>') {
- // 1. Escaping <, > to prevent script execution.
- // 2. Technically, we could also pass through c > 126 as UTF8, but this
- // is also optional. It would also be a pain to implement here.
- appendUnsignedAsHex(c, dst);
- } else {
- StringUtil::builderAppend(*dst, c);
- }
+template <typename Char>
+void escapeStringForJSONInternal(const Char* str, unsigned len,
+ StringBuilder* dst)
+{
+ for (unsigned i = 0; i < len; ++i) {
+ Char c = str[i];
+ if (escapeChar(c, dst))
+ continue;
+ if (c < 32 || c > 126) {
+ appendUnsignedAsHex(c, dst);
+ } else {
+ StringUtil::builderAppend(*dst, c);
}
}
}
-void doubleQuoteStringForJSON(const String& str, StringBuilder* dst)
-{
- StringUtil::builderAppend(*dst, '"');
- escapeStringForJSON(str, dst);
- StringUtil::builderAppend(*dst, '"');
-}
-
} // anonymous namespace
bool Value::asBoolean(bool*) const
@@ -181,7 +173,7 @@ bool StringValue::asString(String* output) const
void StringValue::writeJSON(StringBuilder* output) const
{
DCHECK(type() == TypeString);
- doubleQuoteStringForJSON(m_stringValue, output);
+ StringUtil::builderAppendQuotedString(*output, m_stringValue);
}
std::unique_ptr<Value> StringValue::clone() const
@@ -336,7 +328,7 @@ void DictionaryValue::writeJSON(StringBuilder* output) const
CHECK(it != m_data.end());
if (i)
StringUtil::builderAppend(*output, ',');
- doubleQuoteStringForJSON(it->first, output);
+ StringUtil::builderAppendQuotedString(*output, it->first);
StringUtil::builderAppend(*output, ':');
it->second->writeJSON(output);
}
@@ -402,6 +394,16 @@ protocol::Value* ListValue::at(size_t index)
return m_data[index].get();
}
+void escapeLatinStringForJSON(const uint8_t* str, unsigned len, StringBuilder* dst)
+{
+ escapeStringForJSONInternal<uint8_t>(str, len, dst);
+}
+
+void escapeWideStringForJSON(const uint16_t* str, unsigned len, StringBuilder* dst)
+{
+ escapeStringForJSONInternal<uint16_t>(str, len, dst);
+}
+
{% for namespace in config.protocol.namespace %}
} // namespace {{namespace}}
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_h.template b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
index 646543af0b..3638b34b4e 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
@@ -239,6 +239,9 @@ private:
std::vector<std::unique_ptr<Value>> m_data;
};
+void escapeLatinStringForJSON(const uint8_t* str, unsigned len, StringBuilder* dst);
+void escapeWideStringForJSON(const uint16_t* str, unsigned len, StringBuilder* dst);
+
{% for namespace in config.protocol.namespace %}
} // namespace {{namespace}}
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
index 14b55b9794..026c1cdb8d 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
@@ -19,6 +19,7 @@ const char Metainfo::domainName[] = "{{domain.domain}}";
const char Metainfo::commandPrefix[] = "{{domain.domain}}.";
const char Metainfo::version[] = "{{domain.version}}";
{% for type in domain.types %}
+ {% if not protocol.generate_type(domain.domain, type.id) %}{% continue %} {% endif %}
{% if "enum" in type %}
namespace {{type.id}}Enum {
@@ -200,18 +201,23 @@ public:
, m_backend(backend)
, m_fallThroughForNotFound(fallThroughForNotFound) {
{% for command in domain.commands %}
- {% if "redirect" in command %}{% continue %}{% endif %}
+ {% if "redirect" in command %}
+ m_redirects["{{domain.domain}}.{{command.name}}"] = "{{command.redirect}}.{{command.name}}";
+ {% continue %}
+ {% endif %}
{% if not protocol.generate_command(domain.domain, command.name) %}{% continue %}{% endif %}
m_dispatchMap["{{domain.domain}}.{{command.name}}"] = &DispatcherImpl::{{command.name}};
{% endfor %}
}
~DispatcherImpl() override { }
DispatchResponse::Status dispatch(int callId, const String& method, std::unique_ptr<protocol::DictionaryValue> messageObject) override;
+ HashMap<String, String>& redirects() { return m_redirects; }
protected:
using CallHandler = DispatchResponse::Status (DispatcherImpl::*)(int callId, std::unique_ptr<DictionaryValue> messageObject, ErrorSupport* errors);
using DispatchMap = protocol::HashMap<String, CallHandler>;
DispatchMap m_dispatchMap;
+ HashMap<String, String> m_redirects;
{% for command in domain.commands %}
{% if "redirect" in command %}{% continue %}{% endif %}
@@ -337,9 +343,9 @@ DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::uniqu
&out_{{parameter.name}}
{%- endfor %}
{% endif %});
- {% if "returns" in command %}
if (response.status() == DispatchResponse::kFallThrough)
return response.status();
+ {% if "returns" in command %}
std::unique_ptr<protocol::DictionaryValue> result = DictionaryValue::create();
if (response.status() == DispatchResponse::kSuccess) {
{% for parameter in command.returns %}
@@ -378,9 +384,11 @@ DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::uniqu
{% endfor %}
// static
-void Dispatcher::wire(UberDispatcher* dispatcher, Backend* backend)
+void Dispatcher::wire(UberDispatcher* uber, Backend* backend)
{
- dispatcher->registerBackend("{{domain.domain}}", std::unique_ptr<protocol::DispatcherBase>(new DispatcherImpl(dispatcher->channel(), backend, dispatcher->fallThroughForNotFound())));
+ std::unique_ptr<DispatcherImpl> dispatcher(new DispatcherImpl(uber->channel(), backend, uber->fallThroughForNotFound()));
+ uber->setupRedirects(dispatcher->redirects());
+ uber->registerBackend("{{domain.domain}}", std::move(dispatcher));
}
} // {{domain.domain}}
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
index 81dd7f20e5..11d529bce9 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
@@ -14,7 +14,9 @@
// For each imported domain we generate a ValueConversions struct instead of a full domain definition
// and include Domain::API version from there.
{% for name in domain.dependencies %}
+ {% if protocol.is_imported_dependency(name) %}
#include {{format_include(config.protocol.package, name)}}
+ {% endif %}
{% endfor %}
{% if protocol.is_exported_domain(domain.domain) %}
#include {{format_include(config.exported.package, domain.domain)}}
@@ -27,6 +29,7 @@ namespace {{domain.domain}} {
// ------------- Forward and enum declarations.
{% for type in domain.types %}
+ {% if not protocol.generate_type(domain.domain, type.id) %}{% continue %}{% endif %}
{% if type.type == "object" %}
{% if "properties" in type %}
// {{type.description}}
@@ -41,6 +44,7 @@ using {{type.id}} = {{protocol.resolve_type(type).type}};
{% endif %}
{% endfor %}
{% for type in domain.types %}
+ {% if not protocol.generate_type(domain.domain, type.id) %}{% continue %}{% endif %}
{% if "enum" in type %}
namespace {{type.id}}Enum {
@@ -67,6 +71,7 @@ namespace {{param.name | to_title_case}}Enum {
// ------------- Type and builder declarations.
{% for type in domain.types %}
+ {% if not protocol.generate_type(domain.domain, type.id) %}{% continue %}{% endif %}
{% if not (type.type == "object") or not ("properties" in type) %}{% continue %}{% endif %}
// {{type.description}}
@@ -110,12 +115,8 @@ public:
public:
enum {
NoFieldsSet = 0,
- {% set count = 0 %}
- {% for property in type.properties %}
- {% if not(property.optional) %}
- {% set count = count + 1 %}
- {{property.name | to_title_case}}Set = 1 << {{count}},
- {% endif %}
+ {% for property in type.properties|rejectattr("optional") %}
+ {{property.name | to_title_case}}Set = 1 << {{loop.index}},
{% endfor %}
AllFieldsSet = (
{%- for property in type.properties %}
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 4f97777a6a..a15058a186 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -13,7 +13,7 @@ group("gn_all") {
":jsfunfuzz_run",
":run-deopt-fuzzer_run",
":run-gcmole_run",
- ":run-valgrind_run",
+ ":run-num-fuzzer_run",
]
}
}
@@ -50,10 +50,10 @@ v8_isolate_run("run-gcmole") {
isolate = "gcmole/run-gcmole.isolate"
}
-v8_isolate_run("run-valgrind") {
+v8_isolate_run("run-num-fuzzer") {
deps = [
"..:d8_run",
]
- isolate = "run-valgrind.isolate"
+ isolate = "run-num-fuzzer.isolate"
}
diff --git a/deps/v8/tools/android-sync.sh b/deps/v8/tools/android-sync.sh
index 6d9500fc52..825b3c27de 100755
--- a/deps/v8/tools/android-sync.sh
+++ b/deps/v8/tools/android-sync.sh
@@ -100,6 +100,7 @@ sync_file tools/profile.js
sync_file tools/splaytree.js
sync_file tools/profile_view.js
sync_file tools/logreader.js
+sync_file tools/arguments.js
sync_file tools/tickprocessor.js
echo ""
sync_dir tools/profviz
diff --git a/deps/v8/tools/arguments.js b/deps/v8/tools/arguments.js
new file mode 100644
index 0000000000..c2b3d1bfdb
--- /dev/null
+++ b/deps/v8/tools/arguments.js
@@ -0,0 +1,78 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class BaseArgumentsProcessor {
+ constructor(args) {
+ this.args_ = args;
+ this.result_ = this.getDefaultResults();
+ console.assert(this.result_ !== undefined)
+ console.assert(this.result_.logFileName !== undefined);
+ this.argsDispatch_ = this.getArgsDispatch();
+ console.assert(this.argsDispatch_ !== undefined);
+ }
+
+ getDefaultResults() {
+ throw "Implement in getDefaultResults in subclass";
+ }
+
+ getArgsDispatch() {
+ throw "Implement getArgsDispatch in subclass";
+ }
+
+ result() { return this.result_ }
+
+ printUsageAndExit() {
+ print('Cmdline args: [options] [log-file-name]\n' +
+ 'Default log file name is "' +
+ this.result_.logFileName + '".\n');
+ print('Options:');
+ for (var arg in this.argsDispatch_) {
+ var synonyms = [arg];
+ var dispatch = this.argsDispatch_[arg];
+ for (var synArg in this.argsDispatch_) {
+ if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
+ synonyms.push(synArg);
+ delete this.argsDispatch_[synArg];
+ }
+ }
+ print(' ' + synonyms.join(', ').padEnd(20) + " " + dispatch[2]);
+ }
+ quit(2);
+ }
+
+ parse() {
+ while (this.args_.length) {
+ var arg = this.args_.shift();
+ if (arg.charAt(0) != '-') {
+ this.result_.logFileName = arg;
+ continue;
+ }
+ var userValue = null;
+ var eqPos = arg.indexOf('=');
+ if (eqPos != -1) {
+ userValue = arg.substr(eqPos + 1);
+ arg = arg.substr(0, eqPos);
+ }
+ if (arg in this.argsDispatch_) {
+ var dispatch = this.argsDispatch_[arg];
+ var property = dispatch[0];
+ var defaultValue = dispatch[1];
+ if (typeof defaultValue == "function") {
+ userValue = defaultValue(userValue);
+ } else if (userValue == null) {
+ userValue = defaultValue;
+ }
+ this.result_[property] = userValue;
+ } else {
+ return false;
+ }
+ }
+ return true;
+ }
+}
+
+function parseBool(str) {
+ if (str == "true" || str == "1") return true;
+ return false;
+}
diff --git a/deps/v8/tools/bigint-tester.py b/deps/v8/tools/bigint-tester.py
new file mode 100755
index 0000000000..0452a0d1db
--- /dev/null
+++ b/deps/v8/tools/bigint-tester.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import math
+import multiprocessing
+import os
+import random
+import subprocess
+import sys
+import tempfile
+
+# Configuration.
+kChars = "0123456789abcdefghijklmnopqrstuvwxyz"
+kBase = 16
+kLineLength = 71 # A bit less than 80.
+kNumInputsGenerate = 20
+kNumInputsStress = 1000
+
+# Internally used sentinels.
+kNo = 0
+kYes = 1
+kRandom = 2
+
+TEST_HEADER = """\
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by %s.
+
+// Flags: --harmony-bigint
+""" % sys.argv[0]
+
+TEST_BODY = """
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+ var d = data[i];
+%s
+}
+if (error_count !== 0) {
+ print("Finished with " + error_count + " errors.")
+ quit(1);
+}"""
+
+def GenRandom(length, negative=kRandom):
+ if length == 0: return "0"
+ s = []
+ if negative == kYes or (negative == kRandom and (random.randint(0, 1) == 0)):
+ s.append("-") # 50% chance of negative.
+ s.append(kChars[random.randint(1, kBase - 1)]) # No leading zero.
+ for i in range(1, length):
+ s.append(kChars[random.randint(0, kBase - 1)])
+ return "".join(s)
+
+def Format(x, base):
+ original = x
+ negative = False
+ if x == 0: return "0"
+ if x < 0:
+ negative = True
+ x = -x
+ s = ""
+ while x > 0:
+ s = kChars[x % base] + s
+ x = x / base
+ if negative:
+ s = "-" + s
+ assert int(s, base) == original
+ return s
+
+class TestGenerator(object):
+ # Subclasses must implement these.
+ # Returns a JSON snippet defining inputs and expected output for one test.
+ def EmitOne(self): raise NotImplementedError
+ # Returns a snippet of JavaScript that will operate on a variable "d"
+ # whose content is defined by the result of a call to "EmitOne".
+ def EmitTestCore(self): raise NotImplementedError
+
+ def EmitHeader(self):
+ return TEST_HEADER
+
+ def EmitData(self, count):
+ s = []
+ for i in range(count):
+ s.append(self.EmitOne())
+ return "var data = [" + ", ".join(s) + "];"
+
+ def EmitTestBody(self):
+ return TEST_BODY % self.EmitTestCore()
+
+ def PrintTest(self, count):
+ print(self.EmitHeader())
+ print(self.EmitData(count))
+ print(self.EmitTestBody())
+
+ def RunTest(self, count, binary):
+ try:
+ fd, path = tempfile.mkstemp(suffix=".js", prefix="bigint-test-")
+ with open(path, "w") as f:
+ f.write(self.EmitData(count))
+ f.write(self.EmitTestBody())
+ return subprocess.call("%s --harmony-bigint %s" % (binary, path),
+ shell=True)
+ finally:
+ os.close(fd)
+ os.remove(path)
+
+class UnaryOp(TestGenerator):
+ # Subclasses must implement these two.
+ def GetOpString(self): raise NotImplementedError
+ def GenerateResult(self, x): raise NotImplementedError
+
+ # Subclasses may override this:
+ def GenerateInput(self):
+ return GenRandom(random.randint(0, kLineLength))
+
+ # Subclasses should not override anything below.
+ def EmitOne(self):
+ x_str = self.GenerateInput()
+ x_num = int(x_str, kBase)
+ result_num = self.GenerateResult(x_num)
+ result_str = Format(result_num, kBase)
+ return "{\n a: \"%s\",\n r: \"%s\"\n}" % (x_str, result_str)
+
+ def EmitTestCore(self):
+ return """\
+ var a = BigInt.parseInt(d.a, %(base)d);
+ var r = %(op)sa;
+ if (d.r !== r.toString(%(base)d)) {
+ print("Input: " + a.toString(%(base)d));
+ print("Result: " + r.toString(%(base)d));
+ print("Expected: " + d.r);
+ error_count++;
+ }""" % {"op": self.GetOpString(), "base": kBase}
+
+class BinaryOp(TestGenerator):
+ # Subclasses must implement these two.
+ def GetOpString(self): raise NotImplementedError
+ def GenerateResult(self, left, right): raise NotImplementedError
+
+ # Subclasses may override these:
+ def GenerateInputLengths(self):
+ return random.randint(0, kLineLength), random.randint(0, kLineLength)
+
+ def GenerateInputs(self):
+ left_length, right_length = self.GenerateInputLengths()
+ return GenRandom(left_length), GenRandom(right_length)
+
+ # Subclasses should not override anything below.
+ def EmitOne(self):
+ left_str, right_str = self.GenerateInputs()
+ left_num = int(left_str, kBase)
+ right_num = int(right_str, kBase)
+ result_num = self.GenerateResult(left_num, right_num)
+ result_str = Format(result_num, kBase)
+ return ("{\n a: \"%s\",\n b: \"%s\",\n r: \"%s\"\n}" %
+ (left_str, right_str, result_str))
+
+ def EmitTestCore(self):
+ return """\
+ var a = BigInt.parseInt(d.a, %(base)d);
+ var b = BigInt.parseInt(d.b, %(base)d);
+ var r = a %(op)s b;
+ if (d.r !== r.toString(%(base)d)) {
+ print("Input A: " + a.toString(%(base)d));
+ print("Input B: " + b.toString(%(base)d));
+ print("Result: " + r.toString(%(base)d));
+ print("Expected: " + d.r);
+ print("Op: %(op)s");
+ error_count++;
+ }""" % {"op": self.GetOpString(), "base": kBase}
+
+class Neg(UnaryOp):
+ def GetOpString(self): return "-"
+ def GenerateResult(self, x): return -x
+
+class BitNot(UnaryOp):
+ def GetOpString(self): return "~"
+ def GenerateResult(self, x): return ~x
+
+class Inc(UnaryOp):
+ def GetOpString(self): return "++"
+ def GenerateResult(self, x): return x + 1
+
+class Dec(UnaryOp):
+ def GetOpString(self): return "--"
+ def GenerateResult(self, x): return x - 1
+
+class Add(BinaryOp):
+ def GetOpString(self): return "+"
+ def GenerateResult(self, a, b): return a + b
+
+class Sub(BinaryOp):
+ def GetOpString(self): return "-"
+ def GenerateResult(self, a, b): return a - b
+
+class Mul(BinaryOp):
+ def GetOpString(self): return "*"
+ def GenerateResult(self, a, b): return a * b
+ def GenerateInputLengths(self):
+ left_length = random.randint(1, kLineLength)
+ return left_length, kLineLength - left_length
+
+class Div(BinaryOp):
+ def GetOpString(self): return "/"
+ def GenerateResult(self, a, b):
+ result = abs(a) / abs(b)
+ if (a < 0) != (b < 0): result = -result
+ return result
+ def GenerateInputLengths(self):
+ # Let the left side be longer than the right side with high probability,
+ # because that case is more interesting.
+ min_left = kLineLength * 6 / 10
+ max_right = kLineLength * 7 / 10
+ return random.randint(min_left, kLineLength), random.randint(1, max_right)
+
+class Mod(Div): # Sharing GenerateInputLengths.
+ def GetOpString(self): return "%"
+ def GenerateResult(self, a, b):
+ result = a % b
+ if a < 0 and result > 0:
+ result -= abs(b)
+ if a > 0 and result < 0:
+ result += abs(b)
+ return result
+
+class Shl(BinaryOp):
+ def GetOpString(self): return "<<"
+ def GenerateInputsInternal(self, small_shift_positive):
+ left_length = random.randint(0, kLineLength - 1)
+ left = GenRandom(left_length)
+ small_shift = random.randint(0, 1) == 0
+ if small_shift:
+ right_length = 1 + int(math.log((kLineLength - left_length), kBase))
+ neg = kNo if small_shift_positive else kYes
+ else:
+ right_length = random.randint(0, 3)
+ neg = kYes if small_shift_positive else kNo
+ right = GenRandom(right_length, negative=neg)
+ return left, right
+
+ def GenerateInputs(self): return self.GenerateInputsInternal(True)
+ def GenerateResult(self, a, b):
+ if b < 0: return a >> -b
+ return a << b
+
+class Sar(Shl): # Sharing GenerateInputsInternal.
+ def GetOpString(self): return ">>"
+ def GenerateInputs(self):
+ return self.GenerateInputsInternal(False)
+ def GenerateResult(self, a, b):
+ if b < 0: return a << -b
+ return a >> b
+
+class BitAnd(BinaryOp):
+ def GetOpString(self): return "&"
+ def GenerateResult(self, a, b): return a & b
+
+class BitOr(BinaryOp):
+ def GetOpString(self): return "|"
+ def GenerateResult(self, a, b): return a | b
+
+class BitXor(BinaryOp):
+ def GetOpString(self): return "^"
+ def GenerateResult(self, a, b): return a ^ b
+
+OPS = {
+ "add": Add,
+ "sub": Sub,
+ "mul": Mul,
+ "div": Div,
+ "mod": Mod,
+ "inc": Inc,
+ "dec": Dec,
+ "neg": Neg,
+ "not": BitNot,
+ "shl": Shl,
+ "sar": Sar,
+ "and": BitAnd,
+ "or": BitOr,
+ "xor": BitXor
+}
+
+OPS_NAMES = ", ".join(sorted(OPS.keys()))
+
+def RunOne(op, num_inputs, binary):
+ return OPS[op]().RunTest(num_inputs, binary)
+def WrapRunOne(args):
+ return RunOne(*args)
+def RunAll(args):
+ for op in args.op:
+ for r in xrange(args.runs):
+ yield (op, args.num_inputs, args.binary)
+
+def Main():
+ parser = argparse.ArgumentParser(
+ description="Helper for generating or running BigInt tests.")
+ parser.add_argument(
+ "action", help="Action to perform: 'generate' or 'stress'")
+ parser.add_argument(
+ "op", nargs="+",
+ help="Operation(s) to test, one or more of: %s. In 'stress' mode, "
+ "special op 'all' tests all ops." % OPS_NAMES)
+ parser.add_argument(
+ "-n", "--num-inputs", type=int, default=-1,
+ help="Number of input/output sets in each generated test. Defaults to "
+ "%d for 'generate' and '%d' for 'stress' mode." %
+ (kNumInputsGenerate, kNumInputsStress))
+
+ stressopts = parser.add_argument_group("'stress' mode arguments")
+ stressopts.add_argument(
+ "-r", "--runs", type=int, default=1000,
+ help="Number of tests (with NUM_INPUTS each) to generate and run. "
+ "Default: %(default)s.")
+ stressopts.add_argument(
+ "-b", "--binary", default="out/x64.debug/d8",
+ help="The 'd8' binary to use. Default: %(default)s.")
+ args = parser.parse_args()
+
+ for op in args.op:
+ if op not in OPS.keys() and op != "all":
+ print("Invalid op '%s'. See --help." % op)
+ return 1
+
+ if len(args.op) == 1 and args.op[0] == "all":
+ args.op = OPS.keys()
+
+ if args.action == "generate":
+ if args.num_inputs < 0: args.num_inputs = kNumInputsGenerate
+ for op in args.op:
+ OPS[op]().PrintTest(args.num_inputs)
+ elif args.action == "stress":
+ if args.num_inputs < 0: args.num_inputs = kNumInputsStress
+ result = 0
+ pool = multiprocessing.Pool(multiprocessing.cpu_count())
+ for r in pool.imap_unordered(WrapRunOne, RunAll(args)):
+ result = result or r
+ return result
+ else:
+ print("Invalid action '%s'. See --help." % args.action)
+ return 1
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/deps/v8/tools/csvparser.js b/deps/v8/tools/csvparser.js
index c7d46b535c..f0f8680cf8 100644
--- a/deps/v8/tools/csvparser.js
+++ b/deps/v8/tools/csvparser.js
@@ -29,50 +29,74 @@
/**
* Creates a CSV lines parser.
*/
-function CsvParser() {
-};
+class CsvParser {
+ /**
+ * Converts \x00 and \u0000 escape sequences in the given string.
+ *
+ * @param {string} input field.
+ **/
+ escapeField(string) {
+ let nextPos = string.indexOf("\\");
+ if (nextPos === -1) return string;
+ let result = string.substring(0, nextPos);
+ // Escape sequences of the form \x00 and \u0000;
+ let endPos = string.length;
+ let pos = 0;
+ while (nextPos !== -1) {
+ let escapeIdentifier = string.charAt(nextPos + 1);
+ pos = nextPos + 2;
+ if (escapeIdentifier == 'n') {
+ result += '\n';
+ nextPos = pos;
+ } else {
+ if (escapeIdentifier == 'x') {
+ // \x00 ascii range escapes consume 2 chars.
+ nextPos = pos + 2;
+ } else {
+ // \u0000 unicode range escapes consume 4 chars.
+ nextPos = pos + 4;
+ }
+ // Convert the selected escape sequence to a single character.
+ let escapeChars = string.substring(pos, nextPos);
+ result += String.fromCharCode(parseInt(escapeChars, 16));
+ }
-/**
- * A regex for matching a CSV field.
- * @private
- */
-CsvParser.CSV_FIELD_RE_ = /^"((?:[^"]|"")*)"|([^,]*)/;
-
-
-/**
- * A regex for matching a double quote.
- * @private
- */
-CsvParser.DOUBLE_QUOTE_RE_ = /""/g;
-
+ // Continue looking for the next escape sequence.
+ pos = nextPos;
+ nextPos = string.indexOf("\\", pos);
+ // If there are no more escape sequences consume the rest of the string.
+ if (nextPos === -1) {
+ result += string.substr(pos);
+ } else if (pos != nextPos) {
+ result += string.substring(pos, nextPos);
+ }
+ }
+ return result;
+ }
-/**
- * Parses a line of CSV-encoded values. Returns an array of fields.
- *
- * @param {string} line Input line.
- */
-CsvParser.prototype.parseLine = function(line) {
- var fieldRe = CsvParser.CSV_FIELD_RE_;
- var doubleQuoteRe = CsvParser.DOUBLE_QUOTE_RE_;
- var pos = 0;
- var endPos = line.length;
- var fields = [];
- if (endPos > 0) {
- do {
- var fieldMatch = fieldRe.exec(line.substr(pos));
- if (typeof fieldMatch[1] === "string") {
- var field = fieldMatch[1];
- pos += field.length + 3; // Skip comma and quotes.
- fields.push(field.replace(doubleQuoteRe, '"'));
+ /**
+ * Parses a line of CSV-encoded values. Returns an array of fields.
+ *
+ * @param {string} line Input line.
+ */
+ parseLine(line) {
+ var pos = 0;
+ var endPos = line.length;
+ var fields = [];
+ if (endPos == 0) return fields;
+ let nextPos = 0;
+ while(nextPos !== -1) {
+ nextPos = line.indexOf(',', pos);
+ let field;
+ if (nextPos === -1) {
+ field = line.substr(pos);
} else {
- // The second field pattern will match anything, thus
- // in the worst case the match will be an empty string.
- var field = fieldMatch[2];
- pos += field.length + 1; // Skip comma.
- fields.push(field);
+ field = line.substring(pos, nextPos);
}
- } while (pos <= endPos);
+ fields.push(this.escapeField(field));
+ pos = nextPos + 1;
+ };
+ return fields
}
- return fields;
-};
+}
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 1ce17e4f31..c3dab0a870 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -43,7 +43,7 @@ MODES = ["release", "debug", "optdebug"]
DEFAULT_MODES = ["release", "debug"]
# Build targets that can be manually specified.
TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump",
- "generate-bytecode-expectations"]
+ "generate-bytecode-expectations", "inspector-test"]
# Build targets that get built when you don't specify any (and specified tests
# don't imply any other targets).
DEFAULT_TARGETS = ["d8"]
@@ -75,6 +75,7 @@ TESTSUITES_TARGETS = {"benchmarks": "d8",
"cctest": "cctest",
"debugger": "d8",
"fuzzer": "v8_fuzzers",
+ "inspector": "inspector-test",
"intl": "d8",
"message": "d8",
"mjsunit": "d8",
diff --git a/deps/v8/tools/dump-cpp.py b/deps/v8/tools/dump-cpp.py
index 5198ecab21..1fc15d9fc1 100644
--- a/deps/v8/tools/dump-cpp.py
+++ b/deps/v8/tools/dump-cpp.py
@@ -17,8 +17,8 @@ def is_file_executable(fPath):
if __name__ == '__main__':
JS_FILES = ['splaytree.js', 'codemap.js', 'csvparser.js', 'consarray.js',
- 'profile.js', 'logreader.js', 'tickprocessor.js', 'SourceMap.js',
- 'dumpcpp.js', 'dumpcpp-driver.js']
+ 'profile.js', 'logreader.js', 'arguments.js', 'tickprocessor.js',
+ 'SourceMap.js', 'dumpcpp.js', 'dumpcpp-driver.js']
tools_path = os.path.dirname(os.path.realpath(__file__))
on_windows = platform.system() == 'Windows'
JS_FILES = [os.path.join(tools_path, f) for f in JS_FILES]
diff --git a/deps/v8/tools/foozzie/testdata/failure_output.txt b/deps/v8/tools/foozzie/testdata/failure_output.txt
index eff2caddc9..654a84fb98 100644
--- a/deps/v8/tools/foozzie/testdata/failure_output.txt
+++ b/deps/v8/tools/foozzie/testdata/failure_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --turbo-filter=~ --noopt
+--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --turbo-filter=~ --noopt --suppress-asm-messages
# Flags of x64,ignition_turbo:
---abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345
+--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --suppress-asm-messages
#
# Difference:
- unknown
diff --git a/deps/v8/tools/foozzie/v8_foozzie.py b/deps/v8/tools/foozzie/v8_foozzie.py
index 2178178652..2b61410ce4 100755
--- a/deps/v8/tools/foozzie/v8_foozzie.py
+++ b/deps/v8/tools/foozzie/v8_foozzie.py
@@ -127,7 +127,7 @@ def infer_arch(d8):
executable.
"""
with open(os.path.join(os.path.dirname(d8), 'v8_build_config.json')) as f:
- arch = json.load(f)['v8_target_cpu']
+ arch = json.load(f)['v8_current_cpu']
return 'ia32' if arch == 'x86' else arch
@@ -173,9 +173,9 @@ def parse_args():
options.second_arch = infer_arch(options.second_d8)
# Ensure we make a sane comparison.
- assert (options.first_arch != options.second_arch or
- options.first_config != options.second_config), (
- 'Need either arch or config difference.')
+ if (options.first_arch == options.second_arch and
+ options.first_config == options.second_config):
+ parser.error('Need either arch or config difference.')
assert options.first_arch in SUPPORTED_ARCHS
assert options.second_arch in SUPPORTED_ARCHS
assert options.first_config in CONFIGS
@@ -260,7 +260,7 @@ def main():
args = [sys.executable] + args
return v8_commands.Execute(
args,
- cwd=os.path.dirname(options.testcase),
+ cwd=os.path.dirname(os.path.abspath(options.testcase)),
timeout=TIMEOUT,
)
diff --git a/deps/v8/tools/foozzie/v8_suppressions.py b/deps/v8/tools/foozzie/v8_suppressions.py
index a84cee6296..42fdc7a241 100644
--- a/deps/v8/tools/foozzie/v8_suppressions.py
+++ b/deps/v8/tools/foozzie/v8_suppressions.py
@@ -64,6 +64,10 @@ IGNORE_SOURCES = {
'crbug.com/691587': [
'/v8/test/mjsunit/asm/regress-674089.js',
],
+
+ 'crbug.com/774805': [
+ '/v8/test/mjsunit/console.js',
+ ],
}
# Ignore by test case pattern. Map from bug->regexp.
diff --git a/deps/v8/tools/gcmole/download_gcmole_tools.py b/deps/v8/tools/gcmole/download_gcmole_tools.py
index 7183d28f34..af27723da6 100755
--- a/deps/v8/tools/gcmole/download_gcmole_tools.py
+++ b/deps/v8/tools/gcmole/download_gcmole_tools.py
@@ -18,5 +18,3 @@ if re.search(r'\bgcmole=1', os.environ.get('GYP_DEFINES', '')):
'-s', SHA1_PATH,
'--platform=linux*'
])
-else:
- print 'Skipping gcmole download as gcmole is not set in gyp flags.'
diff --git a/deps/v8/tools/gcov.sh b/deps/v8/tools/gcov.sh
index 90f3974c85..d7277043d4 100755
--- a/deps/v8/tools/gcov.sh
+++ b/deps/v8/tools/gcov.sh
@@ -57,7 +57,7 @@ do
echo "Building" $v8_target_arch
GYP_DEFINES="component=static_library use_goma=1 target_arch=$target_arch v8_target_arch=$v8_target_arch coverage=1 clang=0" python $v8_root/gypfiles/gyp_v8.py -G output_dir=$work_dir
ninja -C $build_dir -j2000
- $v8_root/tools/run-tests.py --gcov-coverage --arch=$v8_target_arch --mode=$mode --shell-dir=$build_dir --exhaustive-variants
+ $v8_root/tools/run-tests.py --gcov-coverage --arch=$v8_target_arch --mode=$mode --shell-dir=$build_dir --variants=exhaustive
fi
done
diff --git a/deps/v8/tools/gdb-v8-support.py b/deps/v8/tools/gdb-v8-support.py
index 99616727e3..a0262f0a57 100644
--- a/deps/v8/tools/gdb-v8-support.py
+++ b/deps/v8/tools/gdb-v8-support.py
@@ -26,6 +26,11 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
+import tempfile
+import os
+import subprocess
+import time
+
kSmiTag = 0
kSmiTagSize = 1
@@ -184,3 +189,50 @@ class FindAnywhere (gdb.Command):
self.find(m.group(1), m.group(2), value)
FindAnywhere()
+
+
+class Redirect(gdb.Command):
+ """Redirect the subcommand's stdout to a temporary file.
+
+Usage: redirect subcommand...
+Example:
+ redirect job 0x123456789
+ redirect x/1024xg 0x12345678
+
+If provided, the generated temporary file is directly openend with the
+GDB_EXTERNAL_EDITOR environment variable.
+ """
+ def __init__(self):
+ super(Redirect, self).__init__("redirect", gdb.COMMAND_USER)
+
+ def invoke(self, subcommand, from_tty):
+ old_stdout = gdb.execute("p dup(1)", to_string=True).split("=")[-1].strip()
+ try:
+ time_suffix = time.strftime("%Y%m%d-%H%M%S")
+ fd, file = tempfile.mkstemp(suffix="-%s.gdbout" % time_suffix)
+ try:
+ # Temporaily redirect stdout to the created tmp file for the
+ # duration of the subcommand.
+ gdb.execute('p dup2(open("%s", 1), 1)' % file, to_string=True)
+ # Execute subcommand non interactively.
+ result = gdb.execute(subcommand, from_tty=False, to_string=True)
+ # Write returned string results to the temporary file as well.
+ with open(file, 'a') as f:
+ f.write(result)
+ # Open generated result.
+ if 'GDB_EXTERNAL_EDITOR' in os.environ:
+ open_cmd = os.environ['GDB_EXTERNAL_EDITOR']
+ print("Opening '%s' with %s" % (file, open_cmd))
+ subprocess.call([open_cmd, file])
+ else:
+ print("Open output:\n %s '%s'" % (os.environ['EDITOR'], file))
+ finally:
+ # Restore original stdout.
+ gdb.execute("p dup2(%s, 1)" % old_stdout, to_string=True)
+ # Close the temporary file.
+ os.close(fd)
+ finally:
+ # Close the originally duplicated stdout descriptor.
+ gdb.execute("p close(%s)" % old_stdout, to_string=True)
+
+Redirect()
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 22f0afbef3..a618d74ed3 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -60,7 +60,6 @@ consts_misc = [
{ 'name': 'IsNotStringMask', 'value': 'kIsNotStringMask' },
{ 'name': 'StringTag', 'value': 'kStringTag' },
- { 'name': 'NotStringTag', 'value': 'kNotStringTag' },
{ 'name': 'StringEncodingMask', 'value': 'kStringEncodingMask' },
{ 'name': 'TwoByteStringTag', 'value': 'kTwoByteStringTag' },
@@ -223,15 +222,10 @@ consts_misc = [
{ 'name': 'namedictionary_prefix_start_index',
'value': 'NameDictionary::kPrefixStartIndex' },
- { 'name': 'seedednumberdictionaryshape_prefix_size',
- 'value': 'SeededNumberDictionaryShape::kPrefixSize' },
- { 'name': 'seedednumberdictionaryshape_entry_size',
- 'value': 'SeededNumberDictionaryShape::kEntrySize' },
-
- { 'name': 'unseedednumberdictionaryshape_prefix_size',
- 'value': 'UnseededNumberDictionaryShape::kPrefixSize' },
- { 'name': 'unseedednumberdictionaryshape_entry_size',
- 'value': 'UnseededNumberDictionaryShape::kEntrySize' }
+ { 'name': 'numberdictionaryshape_prefix_size',
+ 'value': 'NumberDictionaryShape::kPrefixSize' },
+ { 'name': 'numberdictionaryshape_entry_size',
+ 'value': 'NumberDictionaryShape::kEntrySize' },
];
#
@@ -252,9 +246,9 @@ extras_accessors = [
'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
'JSArrayBufferView, byte_offset, Object, kByteOffsetOffset',
'JSTypedArray, length, Object, kLengthOffset',
- 'Map, instance_attributes, int, kInstanceAttributesOffset',
- 'Map, inobject_properties_or_constructor_function_index, int, kInObjectPropertiesOrConstructorFunctionIndexOffset',
- 'Map, instance_size, int, kInstanceSizeOffset',
+ 'Map, instance_size_in_words, char, kInstanceSizeInWordsOffset',
+ 'Map, inobject_properties_start_or_constructor_function_index, char, kInObjectPropertiesStartOrConstructorFunctionIndexOffset',
+ 'Map, instance_type, uint16_t, kInstanceTypeOffset',
'Map, bit_field, char, kBitFieldOffset',
'Map, bit_field2, char, kBitField2Offset',
'Map, bit_field3, int, kBitField3Offset',
@@ -378,7 +372,7 @@ def load_objects_from_file(objfilename, checktypes):
# do so without the embedded newlines.
#
for line in objfile:
- if (line.startswith('enum InstanceType : uint8_t {')):
+ if (line.startswith('enum InstanceType : uint16_t {')):
in_insttype = True;
continue;
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index bb784ce806..5d9ffff607 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -579,6 +579,9 @@ MD_CPU_ARCHITECTURE_ARM = 5
MD_CPU_ARCHITECTURE_ARM64 = 0x8003
MD_CPU_ARCHITECTURE_AMD64 = 9
+OBJDUMP_BIN = None
+DEFAULT_OBJDUMP_BIN = '/usr/bin/objdump'
+
class FuncSymbol:
def __init__(self, start, size, name):
self.start = start
@@ -623,6 +626,11 @@ class MinidumpReader(object):
self.modules_with_symbols = []
self.symbols = []
+ self._ReadArchitecture(directories)
+ self._ReadDirectories(directories)
+ self._FindObjdump(options)
+
+ def _ReadArchitecture(self, directories):
# Find MDRawSystemInfo stream and determine arch.
for d in directories:
if d.stream_type == MD_SYSTEM_INFO_STREAM:
@@ -635,6 +643,7 @@ class MinidumpReader(object):
MD_CPU_ARCHITECTURE_X86]
assert not self.arch is None
+ def _ReadDirectories(self, directories):
for d in directories:
DebugPrint(d)
if d.stream_type == MD_EXCEPTION_STREAM:
@@ -680,6 +689,44 @@ class MinidumpReader(object):
assert ctypes.sizeof(self.memory_list64) == d.location.data_size
DebugPrint(self.memory_list64)
+ def _FindObjdump(self, options):
+ if options.objdump:
+ objdump_bin = options.objdump
+ else:
+ objdump_bin = self._FindThirdPartyObjdump()
+ if not objdump_bin or not os.path.exists(objdump_bin):
+ print "# Cannot find '%s', falling back to default objdump '%s'" % (
+ objdump_bin, DEFAULT_OBJDUMP_BIN)
+ objdump_bin = DEFAULT_OBJDUMP_BIN
+ global OBJDUMP_BIN
+ OBJDUMP_BIN = objdump_bin
+ disasm.OBJDUMP_BIN = objdump_bin
+
+ def _FindThirdPartyObjdump(self):
+ # Try to find the platform specific objdump
+ third_party_dir = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)), 'third_party')
+ objdumps = []
+ for root, dirs, files in os.walk(third_party_dir):
+ for file in files:
+ if file.endswith("objdump"):
+ objdumps.append(os.path.join(root, file))
+ if self.arch == MD_CPU_ARCHITECTURE_ARM:
+ platform_filter = 'arm-linux'
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ platform_filter = 'aarch64'
+ else:
+ # use default otherwise
+ return None
+ print ("# Looking for platform specific (%s) objdump in "
+ "third_party directory.") % platform_filter
+ objdumps = filter(lambda file: platform_filter in file >= 0, objdumps)
+ if len(objdumps) == 0:
+ print "# Could not find platform specific objdump in third_party."
+ print "# Make sure you installed the correct SDK."
+ return None
+ return objdumps[0]
+
def ContextDescriptor(self):
if self.arch == MD_CPU_ARCHITECTURE_X86:
return MINIDUMP_CONTEXT_X86
@@ -3008,9 +3055,16 @@ class InspectionWebFormatter(object):
marker = ""
if stack_slot:
marker = "=>"
- op_offset = 3 * num_bytes - 1
code = line[1]
+
+ # Some disassemblers insert spaces between each byte,
+ # while some do not.
+ if code[2] == " ":
+ op_offset = 3 * num_bytes - 1
+ else:
+ op_offset = 2 * num_bytes
+
# Compute the actual call target which the disassembler is too stupid
# to figure out (it adds the call offset to the disassembly offset rather
# than the absolute instruction address).
@@ -3807,15 +3861,10 @@ if __name__ == "__main__":
help="dump all information contained in the minidump")
parser.add_option("--symdir", dest="symdir", default=".",
help="directory containing *.pdb.sym file with symbols")
- parser.add_option("--objdump",
- default="/usr/bin/objdump",
- help="objdump tool to use [default: %default]")
+ parser.add_option("--objdump", default="",
+ help="objdump tool to use [default: %s]" % (
+ DEFAULT_OBJDUMP_BIN))
options, args = parser.parse_args()
- if os.path.exists(options.objdump):
- disasm.OBJDUMP_BIN = options.objdump
- OBJDUMP_BIN = options.objdump
- else:
- print "Cannot find %s, falling back to default objdump" % options.objdump
if len(args) != 1:
parser.print_help()
sys.exit(1)
diff --git a/deps/v8/tools/ic-explorer.html b/deps/v8/tools/ic-explorer.html
index ee4e26cadd..f60a356dd4 100644
--- a/deps/v8/tools/ic-explorer.html
+++ b/deps/v8/tools/ic-explorer.html
@@ -53,6 +53,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
<script src="./profile.js" type="text/javascript"></script>
<script src="./profile_view.js" type="text/javascript"></script>
<script src="./logreader.js" type="text/javascript"></script>
+ <script src="./arguments.js" type="text/javascript"></script>
<script src="./ic-processor.js" type="text/javascript"></script>
<script src="./SourceMap.js" type="text/javascript"></script>
@@ -254,16 +255,17 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let fragment = document.createDocumentFragment();
function td(tr, content, className) {
- let td = document.createElement("td");
+ let node = document.createElement("td");
if (typeof content == "object") {
- td.appendChild(content);
+ node.appendChild(content);
} else {
- td.innerHTML = content;
+ node.innerHTML = content;
}
- td.className = className
- tr.appendChild(td);
- return td
+ node.className = className
+ tr.appendChild(node);
+ return node
}
+
let max = Math.min(1000, entries.length)
for (let i = 0; i < max; i++) {
let entry = entries[i];
@@ -278,8 +280,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let omitted = entries.length - max;
if (omitted > 0) {
let tr = document.createElement("tr");
- let td = td(tr, 'Omitted ' + omitted + " entries.");
- td.colSpan = 4;
+ let tdNode = td(tr, 'Omitted ' + omitted + " entries.");
+ tdNode.colSpan = 4;
fragment.appendChild(tr);
}
parent.appendChild(fragment);
diff --git a/deps/v8/tools/ic-processor b/deps/v8/tools/ic-processor
index f41b447174..c33052cdf6 100755
--- a/deps/v8/tools/ic-processor
+++ b/deps/v8/tools/ic-processor
@@ -36,6 +36,6 @@ fi
cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
$tools_path/csvparser.js $tools_path/consarray.js \
$tools_path/profile.js $tools_path/profile_view.js \
- $tools_path/logreader.js $tools_path/ic-processor.js \
- $tools_path/SourceMap.js \
+ $tools_path/logreader.js $tools_path/arguments.js \
+ $tools_path/ic-processor.js $tools_path/SourceMap.js \
$tools_path/ic-processor-driver.js -- $@ 2>/dev/null
diff --git a/deps/v8/tools/ic-processor-driver.js b/deps/v8/tools/ic-processor-driver.js
index 58c608d020..2aa52006c7 100644
--- a/deps/v8/tools/ic-processor-driver.js
+++ b/deps/v8/tools/ic-processor-driver.js
@@ -12,7 +12,7 @@ function processArguments(args) {
}
function initSourceMapSupport() {
- // Pull dev tools source maps into our name space.
+ // Pull dev tools source maps into our name space.
SourceMap = WebInspector.SourceMap;
// Overwrite the load function to load scripts synchronously.
diff --git a/deps/v8/tools/ic-processor.js b/deps/v8/tools/ic-processor.js
index 9897de2c6c..93f40b38a0 100644
--- a/deps/v8/tools/ic-processor.js
+++ b/deps/v8/tools/ic-processor.js
@@ -156,94 +156,24 @@ IcProcessor.prototype.processPropertyIC = function (
var entry = this.profile_.findEntry(pc);
print(type + " (" + old_state + "->" + new_state + modifier + ") at " +
this.formatName(entry) + ":" + line + ":" + column + " " + name +
- " (map 0x" + map.toString(16) + ")");
+ " (map 0x" + map.toString(16) + ")" +
+ (slow_reason ? " " + slow_reason : ""));
}
-function padLeft(s, len) {
- s = s.toString();
- if (s.length < len) {
- var padLength = len - s.length;
- if (!(padLength in padLeft)) {
- padLeft[padLength] = new Array(padLength + 1).join(' ');
- }
- s = padLeft[padLength] + s;
- }
- return s;
-};
-
-
-function ArgumentsProcessor(args) {
- this.args_ = args;
- this.result_ = ArgumentsProcessor.DEFAULTS;
-
- this.argsDispatch_ = {
- '--range': ['range', 'auto,auto',
- 'Specify the range limit as [start],[end]'],
- '--source-map': ['sourceMap', null,
- 'Specify the source map that should be used for output']
- };
-};
-
-ArgumentsProcessor.DEFAULTS = {
- logFileName: 'v8.log',
- range: 'auto,auto',
-};
-
-
-ArgumentsProcessor.prototype.parse = function() {
- while (this.args_.length) {
- var arg = this.args_.shift();
- if (arg.charAt(0) != '-') {
- this.result_.logFileName = arg;
- continue;
- }
- var userValue = null;
- var eqPos = arg.indexOf('=');
- if (eqPos != -1) {
- userValue = arg.substr(eqPos + 1);
- arg = arg.substr(0, eqPos);
- }
- if (arg in this.argsDispatch_) {
- var dispatch = this.argsDispatch_[arg];
- this.result_[dispatch[0]] = userValue == null ? dispatch[1] : userValue;
- } else {
- return false;
- }
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+ getArgsDispatch() {
+ return {
+ '--range': ['range', 'auto,auto',
+ 'Specify the range limit as [start],[end]'],
+ '--source-map': ['sourceMap', null,
+ 'Specify the source map that should be used for output']
+ };
}
- return true;
-};
-
-
-ArgumentsProcessor.prototype.result = function() {
- return this.result_;
-};
-
-
-ArgumentsProcessor.prototype.printUsageAndExit = function() {
-
- function padRight(s, len) {
- s = s.toString();
- if (s.length < len) {
- s = s + (new Array(len - s.length + 1).join(' '));
- }
- return s;
- }
-
- print('Cmdline args: [options] [log-file-name]\n' +
- 'Default log file name is "' +
- ArgumentsProcessor.DEFAULTS.logFileName + '".\n');
- print('Options:');
- for (var arg in this.argsDispatch_) {
- var synonyms = [arg];
- var dispatch = this.argsDispatch_[arg];
- for (var synArg in this.argsDispatch_) {
- if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
- synonyms.push(synArg);
- delete this.argsDispatch_[synArg];
- }
- }
- print(' ' + padRight(synonyms.join(', '), 20) + " " + dispatch[2]);
+ getDefaultResults() {
+ return {
+ logFileName: 'v8.log',
+ range: 'auto,auto',
+ };
}
- quit(2);
-};
+}
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 7c92a4ef6e..105be0c1b6 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -125,6 +125,9 @@ def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
end = end + 1
# Remember to add the last match.
add_arg(lines[last_match:end-1])
+ if arg_index[0] < len(macro.args) -1:
+ lineno = lines.count(os.linesep, 0, start) + 1
+ raise Error('line %s: Too few arguments for macro "%s"' % (lineno, name_pattern.pattern))
result = macro.expand(mapping)
# Replace the occurrence of the macro with the expansion
lines = lines[:start] + result + lines[end:]
diff --git a/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py b/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
index 19eff02438..2925213ced 100644
--- a/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
+++ b/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
@@ -18,5 +18,3 @@ if re.search(r'\bjsfunfuzz=1', os.environ.get('GYP_DEFINES', '')):
'-s', SHA1_PATH,
'--platform=linux*'
])
-else:
- print 'Skipping jsfunfuzz download as jsfunfuzz is not set in gyp flags.'
diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor
index 0b491c3633..705e07d514 100755
--- a/deps/v8/tools/linux-tick-processor
+++ b/deps/v8/tools/linux-tick-processor
@@ -37,6 +37,6 @@ cat $log_file | $d8_exec --enable-os-system \
$tools_path/splaytree.js $tools_path/codemap.js \
$tools_path/csvparser.js $tools_path/consarray.js \
$tools_path/profile.js $tools_path/profile_view.js \
- $tools_path/logreader.js $tools_path/tickprocessor.js \
- $tools_path/SourceMap.js \
+ $tools_path/logreader.js $tools_path/arguments.js \
+ $tools_path/tickprocessor.js $tools_path/SourceMap.js \
$tools_path/tickprocessor-driver.js -- $@ 2>/dev/null
diff --git a/deps/v8/tools/memory/asan/blacklist_win.txt b/deps/v8/tools/memory/asan/blacklist_win.txt
new file mode 100644
index 0000000000..2bb1aa9714
--- /dev/null
+++ b/deps/v8/tools/memory/asan/blacklist_win.txt
@@ -0,0 +1,4 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules. \ No newline at end of file
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index 7c7da243b5..1a49223996 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -129,8 +129,8 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
int main(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeICUDefaultLocation(argv[0]);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
v8::V8::InitializeExternalStartupData(argv[0]);
@@ -184,7 +184,6 @@ int main(int argc, char* argv[]) {
}
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
- delete platform;
delete create_params.array_buffer_allocator;
return 0;
}
diff --git a/deps/v8/tools/perf/statistics-for-json.R b/deps/v8/tools/perf/statistics-for-json.R
index fde2cd75db..b731ccc5d3 100644
--- a/deps/v8/tools/perf/statistics-for-json.R
+++ b/deps/v8/tools/perf/statistics-for-json.R
@@ -8,9 +8,9 @@
# To use the script, first get some benchmark results, for example via
# tools/run_perf.py ../v8-perf/benchmarks/Octane2.1/Octane2.1-TF.json
-# --outdir=out/x64.release-on --outdir-no-patch=out/x64.release-off
+# --outdir=out/x64.release-on --outdir-secondary=out/x64.release-off
# --json-test-results=results-on.json
-# --json-test-results-no-patch=results-off.json
+# --json-test-results-secondary=results-off.json
# then run this script
# Rscript statistics-for-json.R results-on.json results-off.json ~/SVG
# to produce graphs (and get stdio output of statistical tests).
diff --git a/deps/v8/tools/plot-timer-events b/deps/v8/tools/plot-timer-events
index b65937cfe6..3294e85862 100755
--- a/deps/v8/tools/plot-timer-events
+++ b/deps/v8/tools/plot-timer-events
@@ -78,8 +78,9 @@ fi
cat $log_file |
$d8_exec $tools_path/csvparser.js $tools_path/splaytree.js \
$tools_path/codemap.js $tools_path/profile.js $tools_path/profile_view.js \
- $tools_path/logreader.js $tools_path/tickprocessor.js \
- $tools_path/profviz/composer.js $tools_path/profviz/stdio.js \
+ $tools_path/logreader.js $tools_path/arguments.js \
+ $tools_path/tickprocessor.js$tools_path/profviz/composer.js \
+ $tools_path/profviz/stdio.js \
-- $@ $options 2>/dev/null > timer-events.plot
success=$?
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index c4ee310ce9..2290422459 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -54,15 +54,11 @@ from testrunner.local import utils
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
-# TODO(bmeurer): Fix and re-enable readability/check
-# http://crrev.com/2199323003 relands.
LINT_RULES = """
-build/header_guard
-build/include_what_you_use
--readability/check
-readability/fn_size
-+readability/streams
-runtime/references
""".split()
@@ -512,11 +508,30 @@ class StatusFilesProcessor(SourceFileProcessor):
return True
def GetPathsToSearch(self):
- return ['test']
+ return ['test', 'tools/testrunner']
def ProcessFiles(self, files):
+ success = True
+ for status_file_path in sorted(self._GetStatusFiles(files)):
+ success &= statusfile.PresubmitCheck(status_file_path)
+ success &= _CheckStatusFileForDuplicateKeys(status_file_path)
+ return success
+
+ def _GetStatusFiles(self, files):
test_path = join(dirname(TOOLS_PATH), 'test')
- status_files = set([])
+ testrunner_path = join(TOOLS_PATH, 'testrunner')
+ status_files = set()
+
+ for file_path in files:
+ if file_path.startswith(testrunner_path):
+ for suitepath in os.listdir(test_path):
+ suitename = os.path.basename(suitepath)
+ status_file = os.path.join(
+ test_path, suitename, suitename + ".status")
+ if os.path.exists(status_file):
+ status_files.add(status_file)
+ return status_files
+
for file_path in files:
if file_path.startswith(test_path):
# Strip off absolute path prefix pointing to test suites.
@@ -530,12 +545,7 @@ class StatusFilesProcessor(SourceFileProcessor):
if not os.path.exists(status_file):
continue
status_files.add(status_file)
-
- success = True
- for status_file_path in sorted(status_files):
- success &= statusfile.PresubmitCheck(status_file_path)
- success &= _CheckStatusFileForDuplicateKeys(status_file_path)
- return success
+ return status_files
def CheckDeps(workspace):
diff --git a/deps/v8/tools/profview/profile-utils.js b/deps/v8/tools/profview/profile-utils.js
index 3ccf13ab7f..f5a85bed8d 100644
--- a/deps/v8/tools/profview/profile-utils.js
+++ b/deps/v8/tools/profview/profile-utils.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-"use strict"
+"use strict";
let codeKinds = [
"UNKNOWN",
@@ -80,13 +80,13 @@ function resolveCodeKindAndVmState(code, vmState) {
function codeEquals(code1, code2, allowDifferentKinds = false) {
if (!code1 || !code2) return false;
- if (code1.name != code2.name || code1.type != code2.type) return false;
+ if (code1.name !== code2.name || code1.type !== code2.type) return false;
- if (code1.type == 'CODE') {
- if (!allowDifferentKinds && code1.kind != code2.kind) return false;
- } else if (code1.type == 'JS') {
- if (!allowDifferentKinds && code1.kind != code2.kind) return false;
- if (code1.func != code2.func) return false;
+ if (code1.type === 'CODE') {
+ if (!allowDifferentKinds && code1.kind !== code2.kind) return false;
+ } else if (code1.type === 'JS') {
+ if (!allowDifferentKinds && code1.kind !== code2.kind) return false;
+ if (code1.func !== code2.func) return false;
}
return true;
}
@@ -409,7 +409,7 @@ class CategorySampler {
let { tm : timestamp, vm : vmState, s : stack } = file.ticks[tickIndex];
let i = Math.floor((timestamp - this.firstTime) / this.step);
- if (i == this.buckets.length) i--;
+ if (i === this.buckets.length) i--;
console.assert(i >= 0 && i < this.buckets.length);
let bucket = this.buckets[i];
@@ -440,7 +440,7 @@ class FunctionTimelineProcessor {
// ignoring any filtered entries.
let stackCode = undefined;
let functionPosInStack = -1;
- let filteredI = 0
+ let filteredI = 0;
for (let i = 0; i < stack.length - 1; i += 2) {
let codeId = stack[i];
let code = codeId >= 0 ? file.code[codeId] : undefined;
@@ -461,7 +461,7 @@ class FunctionTimelineProcessor {
if (functionPosInStack >= 0) {
let stackKind = resolveCodeKindAndVmState(stackCode, vmState);
- let codeIsTopOfStack = (functionPosInStack == 0);
+ let codeIsTopOfStack = (functionPosInStack === 0);
if (this.currentBlock !== null) {
this.currentBlock.end = timestamp;
diff --git a/deps/v8/tools/profview/profview.js b/deps/v8/tools/profview/profview.js
index 96a6a68328..d480cd4a77 100644
--- a/deps/v8/tools/profview/profview.js
+++ b/deps/v8/tools/profview/profview.js
@@ -49,7 +49,7 @@ let main = {
currentState : emptyState(),
setMode(mode) {
- if (mode != main.currentState.mode) {
+ if (mode !== main.currentState.mode) {
function setCallTreeModifiers(attribution, categories, sort) {
let callTreeState = Object.assign({}, main.currentState.callTree);
@@ -84,7 +84,7 @@ let main = {
},
setCallTreeAttribution(attribution) {
- if (attribution != main.currentState.attribution) {
+ if (attribution !== main.currentState.attribution) {
let callTreeState = Object.assign({}, main.currentState.callTree);
callTreeState.attribution = attribution;
main.currentState = setCallTreeState(main.currentState, callTreeState);
@@ -93,7 +93,7 @@ let main = {
},
setCallTreeSort(sort) {
- if (sort != main.currentState.sort) {
+ if (sort !== main.currentState.sort) {
let callTreeState = Object.assign({}, main.currentState.callTree);
callTreeState.sort = sort;
main.currentState = setCallTreeState(main.currentState, callTreeState);
@@ -102,7 +102,7 @@ let main = {
},
setCallTreeCategories(categories) {
- if (categories != main.currentState.categories) {
+ if (categories !== main.currentState.categories) {
let callTreeState = Object.assign({}, main.currentState.callTree);
callTreeState.categories = categories;
main.currentState = setCallTreeState(main.currentState, callTreeState);
@@ -111,8 +111,8 @@ let main = {
},
setViewInterval(start, end) {
- if (start != main.currentState.start ||
- end != main.currentState.end) {
+ if (start !== main.currentState.start ||
+ end !== main.currentState.end) {
main.currentState = Object.assign({}, main.currentState);
main.currentState.start = start;
main.currentState.end = end;
@@ -121,8 +121,8 @@ let main = {
},
setTimeLineDimensions(width, height) {
- if (width != main.currentState.timeLine.width ||
- height != main.currentState.timeLine.height) {
+ if (width !== main.currentState.timeLine.width ||
+ height !== main.currentState.timeLine.height) {
let timeLine = Object.assign({}, main.currentState.timeLine);
timeLine.width = width;
timeLine.height = height;
@@ -133,7 +133,7 @@ let main = {
},
setFile(file) {
- if (file != main.currentState.file) {
+ if (file !== main.currentState.file) {
main.currentState = Object.assign({}, main.currentState);
main.currentState.file = file;
main.delayRender();
@@ -141,7 +141,7 @@ let main = {
},
setCurrentCode(codeId) {
- if (codeId != main.currentState.currentCodeId) {
+ if (codeId !== main.currentState.currentCodeId) {
main.currentState = Object.assign({}, main.currentState);
main.currentState.currentCodeId = codeId;
main.delayRender();
@@ -235,7 +235,7 @@ let bucketDescriptors =
text : "Unknown" }
];
-let kindToBucketDescriptor = {}
+let kindToBucketDescriptor = {};
for (let i = 0; i < bucketDescriptors.length; i++) {
let bucket = bucketDescriptors[i];
for (let j = 0; j < bucket.kinds.length; j++) {
@@ -335,11 +335,11 @@ function createTableExpander(indent) {
}
function createFunctionNode(name, codeId) {
- if (codeId == -1) {
+ if (codeId === -1) {
return document.createTextNode(name);
}
let nameElement = document.createElement("span");
- nameElement.classList.add("codeid-link")
+ nameElement.classList.add("codeid-link");
nameElement.onclick = function() {
main.setCurrentCode(codeId);
};
@@ -377,13 +377,13 @@ class CallTreeView {
if (c1.ticks < c2.ticks) return 1;
else if (c1.ticks > c2.ticks) return -1;
return c2.ownTicks - c1.ownTicks;
- }
+ };
case "own-time":
return (c1, c2) => {
if (c1.ownTicks < c2.ownTicks) return 1;
else if (c1.ownTicks > c2.ownTicks) return -1;
return c2.ticks - c1.ticks;
- }
+ };
case "category-time":
return (c1, c2) => {
if (c1.type === c2.type) return c2.ticks - c1.ticks;
@@ -439,7 +439,7 @@ class CallTreeView {
let row = this.rows.insertRow(index);
row.id = id + i + "/";
- if (node.type != "CAT") {
+ if (node.type !== "CAT") {
row.style.backgroundColor = bucketFromKind(node.type).backgroundColor;
}
@@ -631,7 +631,7 @@ class CallTreeView {
} else {
console.assert(mode === "bottom-up");
- if (this.currentState.callTree.categories == "none") {
+ if (this.currentState.callTree.categories === "none") {
stackProcessor =
new PlainCallTreeProcessor(filter, true);
} else {
diff --git a/deps/v8/tools/profviz/profviz.js b/deps/v8/tools/profviz/profviz.js
index 8ac0881eb6..a7593a6f65 100644
--- a/deps/v8/tools/profviz/profviz.js
+++ b/deps/v8/tools/profviz/profviz.js
@@ -33,6 +33,7 @@ var worker_scripts = [
"../profile.js",
"../profile_view.js",
"../logreader.js",
+ "../arguments.js",
"../tickprocessor.js",
"composer.js",
"gnuplot-4.6.3-emscripten.js"
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index da4cc7efea..b27675e60c 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -159,6 +159,7 @@ class UploadCL(Step):
force=True,
bypass_hooks=True,
cq=self._options.use_commit_queue,
+ cq_dry_run=self._options.use_dry_run,
cwd=cwd)
print "CL uploaded."
else:
@@ -195,9 +196,13 @@ class AutoRoll(ScriptsBase):
"specified."),
parser.add_argument("--roll", help="Deprecated.",
default=True, action="store_true")
- parser.add_argument("--use-commit-queue",
- help="Check the CQ bit on upload.",
- default=True, action="store_true")
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("--use-commit-queue",
+ help="Trigger the CQ full run on upload.",
+ default=False, action="store_true")
+ group.add_argument("--use-dry-run",
+ help="Trigger the CQ dry run on upload.",
+ default=True, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
if not options.author or not options.reviewer:
diff --git a/deps/v8/tools/release/check_clusterfuzz.py b/deps/v8/tools/release/check_clusterfuzz.py
index 0fdffd93ac..8af835136b 100755
--- a/deps/v8/tools/release/check_clusterfuzz.py
+++ b/deps/v8/tools/release/check_clusterfuzz.py
@@ -214,7 +214,8 @@ def Main():
issues = APIRequest(key, **args)
assert issues is not None
for issue in issues:
- if re.match(spec["crash_state"], issue["crash_state"]):
+ if (re.match(spec["crash_state"], issue["crash_state"]) and
+ not issue.get('has_bug_flag')):
results.append(issue["id"])
if options.results_file:
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index d831aa3a20..9dedae8a93 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -206,7 +206,8 @@ class GitRecipesMixin(object):
self.Git(MakeArgs(args), **kwargs)
def GitUpload(self, reviewer="", author="", force=False, cq=False,
- bypass_hooks=False, cc="", private=False, **kwargs):
+ cq_dry_run=False, bypass_hooks=False, cc="", private=False,
+ **kwargs):
args = ["cl upload --send-mail"]
if author:
args += ["--email", Quoted(author)]
@@ -216,6 +217,8 @@ class GitRecipesMixin(object):
args.append("-f")
if cq:
args.append("--use-commit-queue")
+ if cq_dry_run:
+ args.append("--cq-dry-run")
if bypass_hooks:
args.append("--bypass-hooks")
if cc:
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 42bbd5a0a1..759012d833 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -1119,7 +1119,8 @@ deps = {
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
- "--use-commit-queue --bypass-hooks --gerrit", "", cwd=chrome_dir),
+ "--cq-dry-run --bypass-hooks --gerrit", "",
+ cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D work-branch", "", cwd=chrome_dir),
]
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/.gitignore b/deps/v8/tools/release/testdata/node/deps/v8/.gitignore
new file mode 100644
index 0000000000..23c2024827
--- /dev/null
+++ b/deps/v8/tools/release/testdata/node/deps/v8/.gitignore
@@ -0,0 +1,7 @@
+/unrelated
+/testing/gtest/*
+!/testing/gtest/include
+/testing/gtest/include/*
+!/testing/gtest/include/gtest
+/testing/gtest/include/gtest/*
+!/testing/gtest/include/gtest/gtest_prod.h
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me b/deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me
new file mode 100644
index 0000000000..eb1ae458f8
--- /dev/null
+++ b/deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me
@@ -0,0 +1 @@
+...
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo b/deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo
new file mode 100644
index 0000000000..eb1ae458f8
--- /dev/null
+++ b/deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo
@@ -0,0 +1 @@
+...
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/delete_me b/deps/v8/tools/release/testdata/node/deps/v8/delete_me
new file mode 100644
index 0000000000..eb1ae458f8
--- /dev/null
+++ b/deps/v8/tools/release/testdata/node/deps/v8/delete_me
@@ -0,0 +1 @@
+...
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h b/deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h
new file mode 100644
index 0000000000..fe8b2712e3
--- /dev/null
+++ b/deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h
@@ -0,0 +1,20 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INCLUDE_VERSION_H_ // V8_VERSION_H_ conflicts with src/version.h
+#define V8_INCLUDE_VERSION_H_
+
+// These macros define the version number for the current version.
+// NOTE these macros are used by some of the tool scripts and the build
+// system so their names cannot be changed without changing the scripts.
+#define V8_MAJOR_VERSION 1
+#define V8_MINOR_VERSION 2
+#define V8_BUILD_NUMBER 3
+#define V8_PATCH_LEVEL 4321
+
+// Use 1 for candidates and 0 otherwise.
+// (Boolean macro values are not supported by all preprocessors.)
+#define V8_IS_CANDIDATE_VERSION 0
+
+#endif // V8_INCLUDE_VERSION_H_
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/v8_foo b/deps/v8/tools/release/testdata/node/deps/v8/v8_foo
new file mode 100644
index 0000000000..eb1ae458f8
--- /dev/null
+++ b/deps/v8/tools/release/testdata/node/deps/v8/v8_foo
@@ -0,0 +1 @@
+...
diff --git a/deps/v8/tools/release/update_node.py b/deps/v8/tools/release/update_node.py
index 5ce32e4ec2..d060e5c615 100755
--- a/deps/v8/tools/release/update_node.py
+++ b/deps/v8/tools/release/update_node.py
@@ -28,6 +28,7 @@ import os
import shutil
import subprocess
import sys
+import stat
TARGET_SUBDIR = os.path.join("deps", "v8")
@@ -61,7 +62,11 @@ def UninitGit(path):
target = os.path.join(path, ".git")
if os.path.isdir(target):
print ">> Cleaning up %s" % path
- shutil.rmtree(target)
+ def OnRmError(func, path, exec_info):
+ # This might happen on Windows
+ os.chmod(path, stat.S_IWRITE)
+ os.unlink(path)
+ shutil.rmtree(target, onerror=OnRmError)
def CommitPatch(options):
"""Makes a dummy commit for the changes in the index.
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index 1f50e02602..ac2344b530 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -1,492 +1,14 @@
#!/usr/bin/env python
#
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
-import json
-import math
-import multiprocessing
-import optparse
-import os
-from os.path import join
-import random
-import shlex
-import subprocess
import sys
-import time
-
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.local import verbose
-from testrunner.objects import context
-
-
-# Base dir of the v8 checkout to be used as cwd.
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-
-ARCH_GUESS = utils.DefaultArch()
-DEFAULT_TESTS = ["mjsunit", "webkit"]
-TIMEOUT_DEFAULT = 60
-TIMEOUT_SCALEFACTOR = {"debug" : 4,
- "release" : 1 }
-
-MODE_FLAGS = {
- "debug" : ["--nohard-abort", "--enable-slow-asserts",
- "--verify-heap", "--noconcurrent-recompilation"],
- "release" : ["--nohard-abort", "--noconcurrent-recompilation"]}
-
-SUPPORTED_ARCHS = ["android_arm",
- "android_ia32",
- "arm",
- "ia32",
- "ppc",
- "ppc64",
- "s390",
- "s390x",
- "mipsel",
- "x64"]
-# Double the timeout for these:
-SLOW_ARCHS = ["android_arm",
- "android_ia32",
- "arm",
- "mipsel"]
-MAX_DEOPT = 1000000000
-DISTRIBUTION_MODES = ["smooth", "random"]
-
-
-class RandomDistribution:
- def __init__(self, seed=None):
- seed = seed or random.randint(1, sys.maxint)
- print "Using random distribution with seed %d" % seed
- self._random = random.Random(seed)
-
- def Distribute(self, n, m):
- if n > m:
- n = m
- return self._random.sample(xrange(1, m + 1), n)
-
-
-class SmoothDistribution:
- """Distribute n numbers into the interval [1:m].
- F1: Factor of the first derivation of the distribution function.
- F2: Factor of the second derivation of the distribution function.
- With F1 and F2 set to 0, the distribution will be equal.
- """
- def __init__(self, factor1=2.0, factor2=0.2):
- self._factor1 = factor1
- self._factor2 = factor2
-
- def Distribute(self, n, m):
- if n > m:
- n = m
- if n <= 1:
- return [ 1 ]
-
- result = []
- x = 0.0
- dx = 1.0
- ddx = self._factor1
- dddx = self._factor2
- for i in range(0, n):
- result += [ x ]
- x += dx
- dx += ddx
- ddx += dddx
-
- # Project the distribution into the interval [0:M].
- result = [ x * m / result[-1] for x in result ]
-
- # Equalize by n. The closer n is to m, the more equal will be the
- # distribution.
- for (i, x) in enumerate(result):
- # The value of x if it was equally distributed.
- equal_x = i / float(n - 1) * float(m - 1) + 1
-
- # Difference factor between actual and equal distribution.
- diff = 1 - (x / equal_x)
-
- # Equalize x dependent on the number of values to distribute.
- result[i] = int(x + (i + 1) * diff)
- return result
-
-
-def Distribution(options):
- if options.distribution_mode == "random":
- return RandomDistribution(options.seed)
- if options.distribution_mode == "smooth":
- return SmoothDistribution(options.distribution_factor1,
- options.distribution_factor2)
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.add_option("--arch",
- help=("The architecture to run tests for, "
- "'auto' or 'native' for auto-detect"),
- default="ia32,x64,arm")
- result.add_option("--arch-and-mode",
- help="Architecture and mode in the format 'arch.mode'",
- default=None)
- result.add_option("--asan",
- help="Regard test expectations for ASAN",
- default=False, action="store_true")
- result.add_option("--buildbot",
- help="Adapt to path structure used on buildbots",
- default=False, action="store_true")
- result.add_option("--dcheck-always-on",
- help="Indicates that V8 was compiled with DCHECKs enabled",
- default=False, action="store_true")
- result.add_option("--command-prefix",
- help="Prepended to each shell command used to run a test",
- default="")
- result.add_option("--coverage", help=("Exponential test coverage "
- "(range 0.0, 1.0) -- 0.0: one test, 1.0 all tests (slow)"),
- default=0.4, type="float")
- result.add_option("--coverage-lift", help=("Lifts test coverage for tests "
- "with a small number of deopt points (range 0, inf)"),
- default=20, type="int")
- result.add_option("--download-data", help="Download missing test suite data",
- default=False, action="store_true")
- result.add_option("--distribution-factor1", help=("Factor of the first "
- "derivation of the distribution function"), default=2.0,
- type="float")
- result.add_option("--distribution-factor2", help=("Factor of the second "
- "derivation of the distribution function"), default=0.7,
- type="float")
- result.add_option("--distribution-mode", help=("How to select deopt points "
- "for a given test (smooth|random)"),
- default="smooth")
- result.add_option("--dump-results-file", help=("Dump maximum number of "
- "deopt points per test to a file"))
- result.add_option("--extra-flags",
- help="Additional flags to pass to each test command",
- default="")
- result.add_option("--isolates", help="Whether to test isolates",
- default=False, action="store_true")
- result.add_option("-j", help="The number of parallel tasks to run",
- default=0, type="int")
- result.add_option("-m", "--mode",
- help="The test modes in which to run (comma-separated)",
- default="release,debug")
- result.add_option("--outdir", help="Base directory with compile output",
- default="out")
- result.add_option("-p", "--progress",
- help=("The style of progress indicator"
- " (verbose, dots, color, mono)"),
- choices=progress.PROGRESS_INDICATORS.keys(),
- default="mono")
- result.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- result.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
- result.add_option("--shell-dir", help="Directory containing executables",
- default="")
- result.add_option("--seed", help="The seed for the random distribution",
- type="int")
- result.add_option("-t", "--timeout", help="Timeout in seconds",
- default= -1, type="int")
- result.add_option("-v", "--verbose", help="Verbose output",
- default=False, action="store_true")
- result.add_option("--random-seed", default=0, dest="random_seed",
- help="Default seed for initializing random generator")
- return result
-
-
-def ProcessOptions(options):
- global VARIANT_FLAGS
-
- # Architecture and mode related stuff.
- if options.arch_and_mode:
- tokens = options.arch_and_mode.split(".")
- options.arch = tokens[0]
- options.mode = tokens[1]
- options.mode = options.mode.split(",")
- for mode in options.mode:
- if not mode.lower() in ["debug", "release"]:
- print "Unknown mode %s" % mode
- return False
- if options.arch in ["auto", "native"]:
- options.arch = ARCH_GUESS
- options.arch = options.arch.split(",")
- for arch in options.arch:
- if not arch in SUPPORTED_ARCHS:
- print "Unknown architecture %s" % arch
- return False
-
- # Special processing of other options, sorted alphabetically.
- options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = shlex.split(options.extra_flags)
- if options.j == 0:
- options.j = multiprocessing.cpu_count()
- while options.random_seed == 0:
- options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)
- if not options.distribution_mode in DISTRIBUTION_MODES:
- print "Unknown distribution mode %s" % options.distribution_mode
- return False
- if options.distribution_factor1 < 0.0:
- print ("Distribution factor1 %s is out of range. Defaulting to 0.0"
- % options.distribution_factor1)
- options.distribution_factor1 = 0.0
- if options.distribution_factor2 < 0.0:
- print ("Distribution factor2 %s is out of range. Defaulting to 0.0"
- % options.distribution_factor2)
- options.distribution_factor2 = 0.0
- if options.coverage < 0.0 or options.coverage > 1.0:
- print ("Coverage %s is out of range. Defaulting to 0.4"
- % options.coverage)
- options.coverage = 0.4
- if options.coverage_lift < 0:
- print ("Coverage lift %s is out of range. Defaulting to 0"
- % options.coverage_lift)
- options.coverage_lift = 0
- return True
-
-
-def ShardTests(tests, shard_count, shard_run):
- if shard_count < 2:
- return tests
- if shard_run < 1 or shard_run > shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
- count = 0
- shard = []
- for test in tests:
- if count % shard_count == shard_run - 1:
- shard.append(test)
- count += 1
- return shard
-
-
-def Main():
- # Use the v8 root as cwd as some test cases use "load" with relative paths.
- os.chdir(BASE_DIR)
-
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- if not ProcessOptions(options):
- parser.print_help()
- return 1
-
- exit_code = 0
-
- suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
-
- if len(args) == 0:
- suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
- else:
- args_suites = set()
- for arg in args:
- suite = arg.split(os.path.sep)[0]
- if not suite in args_suites:
- args_suites.add(suite)
- suite_paths = [ s for s in suite_paths if s in args_suites ]
-
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(BASE_DIR, "test", root))
- if suite:
- suites.append(suite)
-
- if options.download_data:
- for s in suites:
- s.DownloadData()
-
- for mode in options.mode:
- for arch in options.arch:
- try:
- code = Execute(arch, mode, args, options, suites, BASE_DIR)
- exit_code = exit_code or code
- except KeyboardInterrupt:
- return 2
- return exit_code
-
-
-def CalculateNTests(m, options):
- """Calculates the number of tests from m deopt points with exponential
- coverage.
- The coverage is expected to be between 0.0 and 1.0.
- The 'coverage lift' lifts the coverage for tests with smaller m values.
- """
- c = float(options.coverage)
- l = float(options.coverage_lift)
- return int(math.pow(m, (m * c + l) / (m + l)))
-
-
-def Execute(arch, mode, args, options, suites, workspace):
- print(">>> Running tests for %s.%s" % (arch, mode))
-
- dist = Distribution(options)
-
- shell_dir = options.shell_dir
- if not shell_dir:
- if options.buildbot:
- shell_dir = os.path.join(workspace, options.outdir, mode)
- mode = mode.lower()
- else:
- shell_dir = os.path.join(workspace, options.outdir,
- "%s.%s" % (arch, mode))
- shell_dir = os.path.relpath(shell_dir)
-
- # Populate context object.
- mode_flags = MODE_FLAGS[mode]
- timeout = options.timeout
- if timeout == -1:
- # Simulators are slow, therefore allow a longer default timeout.
- if arch in SLOW_ARCHS:
- timeout = 2 * TIMEOUT_DEFAULT;
- else:
- timeout = TIMEOUT_DEFAULT;
-
- timeout *= TIMEOUT_SCALEFACTOR[mode]
- ctx = context.Context(arch, mode, shell_dir,
- mode_flags, options.verbose,
- timeout, options.isolates,
- options.command_prefix,
- options.extra_flags,
- False, # Keep i18n on by default.
- options.random_seed,
- True, # No sorting of test cases.
- 0, # Don't rerun failing tests.
- 0, # No use of a rerun-failing-tests maximum.
- False, # No predictable mode.
- False, # No no_harness mode.
- False, # Don't use perf data.
- False) # Coverage not supported.
-
- # Find available test suites and read test cases from them.
- variables = {
- "arch": arch,
- "asan": options.asan,
- "deopt_fuzzer": True,
- "gc_stress": False,
- "gcov_coverage": False,
- "isolates": options.isolates,
- "mode": mode,
- "no_i18n": False,
- "no_snap": False,
- "simulator": utils.UseSimulator(arch),
- "system": utils.GuessOS(),
- "tsan": False,
- "msan": False,
- "dcheck_always_on": options.dcheck_always_on,
- "novfp3": False,
- "predictable": False,
- "byteorder": sys.byteorder,
- "no_harness": False,
- "ubsan_vptr": False,
- }
- all_tests = []
- num_tests = 0
- test_id = 0
-
- # Remember test case prototypes for the fuzzing phase.
- test_backup = dict((s, []) for s in suites)
-
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
- if len(args) > 0:
- s.FilterTestCasesByArgs(args)
- all_tests += s.tests
- s.FilterTestCasesByStatus(False)
- test_backup[s] = s.tests
- analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
- "--print-deopt-stress"]
- s.tests = [ t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests ]
- num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- test_id += 1
-
- if num_tests == 0:
- print "No tests to run."
- return 0
-
- print(">>> Collection phase")
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
-
- exit_code = runner.Run(options.j)
-
- print(">>> Analysis phase")
- num_tests = 0
- test_id = 0
- for s in suites:
- test_results = {}
- for t in s.tests:
- for line in t.output.stdout.splitlines():
- if line.startswith("=== Stress deopt counter: "):
- test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
- for t in s.tests:
- if t.path not in test_results:
- print "Missing results for %s" % t.path
- if options.dump_results_file:
- results_dict = dict((t.path, n) for (t, n) in test_results.iteritems())
- with file("%s.%d.txt" % (dump_results_file, time.time()), "w") as f:
- f.write(json.dumps(results_dict))
-
- # Reset tests and redistribute the prototypes from the collection phase.
- s.tests = []
- if options.verbose:
- print "Test distributions:"
- for t in test_backup[s]:
- max_deopt = test_results.get(t.path, 0)
- if max_deopt == 0:
- continue
- n_deopt = CalculateNTests(max_deopt, options)
- distribution = dist.Distribute(n_deopt, max_deopt)
- if options.verbose:
- print "%s %s" % (t.path, distribution)
- for i in distribution:
- fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
- s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
- num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- test_id += 1
-
- if num_tests == 0:
- print "No tests to run."
- return 0
-
- print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
- code = runner.Run(options.j)
- return exit_code or code
+from testrunner import deopt_fuzzer
if __name__ == "__main__":
- sys.exit(Main())
+ sys.exit(deopt_fuzzer.DeoptFuzzer().execute())
diff --git a/deps/v8/tools/run-gc-fuzzer.py b/deps/v8/tools/run-gc-fuzzer.py
new file mode 100755
index 0000000000..6311d4fd29
--- /dev/null
+++ b/deps/v8/tools/run-gc-fuzzer.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import sys
+
+from testrunner import gc_fuzzer
+
+
+if __name__ == "__main__":
+ sys.exit(gc_fuzzer.GCFuzzer().execute())
diff --git a/deps/v8/tools/run-valgrind.gyp b/deps/v8/tools/run-num-fuzzer.gyp
index 02dd26d22c..bd3b9d6423 100644
--- a/deps/v8/tools/run-valgrind.gyp
+++ b/deps/v8/tools/run-num-fuzzer.gyp
@@ -1,4 +1,4 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,7 +7,7 @@
['test_isolation_mode != "noop"', {
'targets': [
{
- 'target_name': 'run_valgrind_run',
+ 'target_name': 'run_num_fuzzer_run',
'type': 'none',
'dependencies': [
'../src/d8.gyp:d8_run',
@@ -17,7 +17,7 @@
'../gypfiles/isolate.gypi',
],
'sources': [
- 'run-valgrind.isolate',
+ 'run-num-fuzzer.isolate',
],
},
],
diff --git a/deps/v8/tools/run-num-fuzzer.isolate b/deps/v8/tools/run-num-fuzzer.isolate
new file mode 100644
index 0000000000..4bd3d8b6c0
--- /dev/null
+++ b/deps/v8/tools/run-num-fuzzer.isolate
@@ -0,0 +1,20 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ 'run-deopt-fuzzer.py',
+ ],
+ 'files': [
+ 'run-deopt-fuzzer.py',
+ 'run-gc-fuzzer.py',
+ ],
+ },
+ 'includes': [
+ 'testrunner/testrunner.isolate',
+ '../src/d8.isolate',
+ '../test/mjsunit/mjsunit.isolate',
+ '../test/webkit/webkit.isolate',
+ ],
+}
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 2dd3782ae5..2ca9385548 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -1,973 +1,14 @@
#!/usr/bin/env python
#
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
-from collections import OrderedDict
-import itertools
-import json
-import multiprocessing
-import optparse
-import os
-from os.path import getmtime, isdir, join
-import platform
-import random
-import shlex
-import subprocess
import sys
-import time
-
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
-from testrunner.local.variants import ALL_VARIANTS
-from testrunner.local import utils
-from testrunner.local import verbose
-from testrunner.network import network_execution
-from testrunner.objects import context
-
-
-# Base dir of the v8 checkout to be used as cwd.
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-
-DEFAULT_OUT_GN = "out.gn"
-
-ARCH_GUESS = utils.DefaultArch()
-
-# Map of test name synonyms to lists of test suites. Should be ordered by
-# expected runtimes (suites with slow test cases first). These groups are
-# invoked in separate steps on the bots.
-TEST_MAP = {
- # This needs to stay in sync with test/bot_default.isolate.
- "bot_default": [
- "debugger",
- "mjsunit",
- "cctest",
- "wasm-spec-tests",
- "inspector",
- "webkit",
- "mkgrokdump",
- "fuzzer",
- "message",
- "preparser",
- "intl",
- "unittests",
- ],
- # This needs to stay in sync with test/default.isolate.
- "default": [
- "debugger",
- "mjsunit",
- "cctest",
- "wasm-spec-tests",
- "inspector",
- "mkgrokdump",
- "fuzzer",
- "message",
- "preparser",
- "intl",
- "unittests",
- ],
- # This needs to stay in sync with test/optimize_for_size.isolate.
- "optimize_for_size": [
- "debugger",
- "mjsunit",
- "cctest",
- "inspector",
- "webkit",
- "intl",
- ],
- "unittests": [
- "unittests",
- ],
-}
-
-TIMEOUT_DEFAULT = 60
-
-# Variants ordered by expected runtime (slowest first).
-VARIANTS = ["default"]
-
-MORE_VARIANTS = [
- "stress",
- "stress_incremental_marking",
- "nooptimization",
- "stress_asm_wasm",
- "wasm_traps",
-]
-
-EXHAUSTIVE_VARIANTS = MORE_VARIANTS + VARIANTS
-
-VARIANT_ALIASES = {
- # The default for developer workstations.
- "dev": VARIANTS,
- # Additional variants, run on all bots.
- "more": MORE_VARIANTS,
- # TODO(machenbach): Deprecate this after the step is removed on infra side.
- # Additional variants, run on a subset of bots.
- "extra": [],
-}
-
-DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
-RELEASE_FLAGS = ["--nohard-abort"]
-
-MODES = {
- "debug": {
- "flags": DEBUG_FLAGS,
- "timeout_scalefactor": 4,
- "status_mode": "debug",
- "execution_mode": "debug",
- "output_folder": "debug",
- },
- "optdebug": {
- "flags": DEBUG_FLAGS,
- "timeout_scalefactor": 4,
- "status_mode": "debug",
- "execution_mode": "debug",
- "output_folder": "optdebug",
- },
- "release": {
- "flags": RELEASE_FLAGS,
- "timeout_scalefactor": 1,
- "status_mode": "release",
- "execution_mode": "release",
- "output_folder": "release",
- },
- # Normal trybot release configuration. There, dchecks are always on which
- # implies debug is set. Hence, the status file needs to assume debug-like
- # behavior/timeouts.
- "tryrelease": {
- "flags": RELEASE_FLAGS,
- "timeout_scalefactor": 1,
- "status_mode": "debug",
- "execution_mode": "release",
- "output_folder": "release",
- },
- # This mode requires v8 to be compiled with dchecks and slow dchecks.
- "slowrelease": {
- "flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
- "timeout_scalefactor": 2,
- "status_mode": "debug",
- "execution_mode": "release",
- "output_folder": "release",
- },
-}
-
-GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
- "--concurrent-recompilation-queue-length=64",
- "--concurrent-recompilation-delay=500",
- "--concurrent-recompilation"]
-
-SUPPORTED_ARCHS = ["android_arm",
- "android_arm64",
- "android_ia32",
- "android_x64",
- "arm",
- "ia32",
- "mips",
- "mipsel",
- "mips64",
- "mips64el",
- "s390",
- "s390x",
- "ppc",
- "ppc64",
- "x64",
- "x32",
- "arm64"]
-# Double the timeout for these:
-SLOW_ARCHS = ["android_arm",
- "android_arm64",
- "android_ia32",
- "android_x64",
- "arm",
- "mips",
- "mipsel",
- "mips64",
- "mips64el",
- "s390",
- "s390x",
- "arm64"]
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.usage = '%prog [options] [tests]'
- result.description = """TESTS: %s""" % (TEST_MAP["default"])
- result.add_option("--arch",
- help=("The architecture to run tests for, "
- "'auto' or 'native' for auto-detect: %s" % SUPPORTED_ARCHS))
- result.add_option("--arch-and-mode",
- help="Architecture and mode in the format 'arch.mode'")
- result.add_option("--asan",
- help="Regard test expectations for ASAN",
- default=False, action="store_true")
- result.add_option("--sancov-dir",
- help="Directory where to collect coverage data")
- result.add_option("--cfi-vptr",
- help="Run tests with UBSAN cfi_vptr option.",
- default=False, action="store_true")
- result.add_option("--buildbot",
- help="Adapt to path structure used on buildbots",
- default=False, action="store_true")
- result.add_option("--dcheck-always-on",
- help="Indicates that V8 was compiled with DCHECKs enabled",
- default=False, action="store_true")
- result.add_option("--novfp3",
- help="Indicates that V8 was compiled without VFP3 support",
- default=False, action="store_true")
- result.add_option("--cat", help="Print the source of the tests",
- default=False, action="store_true")
- result.add_option("--slow-tests",
- help="Regard slow tests (run|skip|dontcare)",
- default="dontcare")
- result.add_option("--pass-fail-tests",
- help="Regard pass|fail tests (run|skip|dontcare)",
- default="dontcare")
- result.add_option("--gc-stress",
- help="Switch on GC stress mode",
- default=False, action="store_true")
- result.add_option("--gcov-coverage",
- help="Uses executables instrumented for gcov coverage",
- default=False, action="store_true")
- result.add_option("--command-prefix",
- help="Prepended to each shell command used to run a test",
- default="")
- result.add_option("--download-data", help="Download missing test suite data",
- default=False, action="store_true")
- result.add_option("--download-data-only",
- help="Deprecated",
- default=False, action="store_true")
- result.add_option("--extra-flags",
- help="Additional flags to pass to each test command",
- action="append", default=[])
- result.add_option("--isolates", help="Whether to test isolates",
- default=False, action="store_true")
- result.add_option("-j", help="The number of parallel tasks to run",
- default=0, type="int")
- result.add_option("-m", "--mode",
- help="The test modes in which to run (comma-separated,"
- " uppercase for ninja and buildbot builds): %s" % MODES.keys())
- result.add_option("--no-harness", "--noharness",
- help="Run without test harness of a given suite",
- default=False, action="store_true")
- result.add_option("--no-i18n", "--noi18n",
- help="Skip internationalization tests",
- default=False, action="store_true")
- result.add_option("--network", help="Distribute tests on the network",
- default=False, dest="network", action="store_true")
- result.add_option("--no-network", "--nonetwork",
- help="Don't distribute tests on the network",
- dest="network", action="store_false")
- result.add_option("--no-presubmit", "--nopresubmit",
- help='Skip presubmit checks (deprecated)',
- default=False, dest="no_presubmit", action="store_true")
- result.add_option("--no-snap", "--nosnap",
- help='Test a build compiled without snapshot.',
- default=False, dest="no_snap", action="store_true")
- result.add_option("--no-sorting", "--nosorting",
- help="Don't sort tests according to duration of last run.",
- default=False, dest="no_sorting", action="store_true")
- result.add_option("--no-variants", "--novariants",
- help="Don't run any testing variants",
- default=False, dest="no_variants", action="store_true")
- result.add_option("--variants",
- help="Comma-separated list of testing variants;"
- " default: \"%s\"" % ",".join(VARIANTS))
- result.add_option("--exhaustive-variants",
- default=False, action="store_true",
- help="Use exhaustive set of default variants:"
- " \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS))
- result.add_option("--outdir", help="Base directory with compile output",
- default="out")
- result.add_option("--gn", help="Scan out.gn for the last built configuration",
- default=False, action="store_true")
- result.add_option("--predictable",
- help="Compare output of several reruns of each test",
- default=False, action="store_true")
- result.add_option("-p", "--progress",
- help=("The style of progress indicator"
- " (verbose, dots, color, mono)"),
- choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
- result.add_option("--quickcheck", default=False, action="store_true",
- help=("Quick check mode (skip slow tests)"))
- result.add_option("--report", help="Print a summary of the tests to be run",
- default=False, action="store_true")
- result.add_option("--json-test-results",
- help="Path to a file for storing json results.")
- result.add_option("--flakiness-results",
- help="Path to a file for storing flakiness json.")
- result.add_option("--rerun-failures-count",
- help=("Number of times to rerun each failing test case. "
- "Very slow tests will be rerun only once."),
- default=0, type="int")
- result.add_option("--rerun-failures-max",
- help="Maximum number of failing test cases to rerun.",
- default=100, type="int")
- result.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- result.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
- result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
- result.add_option("--shell-dir", help="Directory containing executables",
- default="")
- result.add_option("--dont-skip-slow-simulator-tests",
- help="Don't skip more slow tests when using a simulator.",
- default=False, action="store_true",
- dest="dont_skip_simulator_slow_tests")
- result.add_option("--swarming",
- help="Indicates running test driver on swarming.",
- default=False, action="store_true")
- result.add_option("--time", help="Print timing information after running",
- default=False, action="store_true")
- result.add_option("-t", "--timeout", help="Timeout in seconds",
- default=TIMEOUT_DEFAULT, type="int")
- result.add_option("--tsan",
- help="Regard test expectations for TSAN",
- default=False, action="store_true")
- result.add_option("-v", "--verbose", help="Verbose output",
- default=False, action="store_true")
- result.add_option("--valgrind", help="Run tests through valgrind",
- default=False, action="store_true")
- result.add_option("--warn-unused", help="Report unused rules",
- default=False, action="store_true")
- result.add_option("--junitout", help="File name of the JUnit output")
- result.add_option("--junittestsuite",
- help="The testsuite name in the JUnit output file",
- default="v8tests")
- result.add_option("--random-seed", default=0, dest="random_seed", type="int",
- help="Default seed for initializing random generator")
- result.add_option("--random-seed-stress-count", default=1, type="int",
- dest="random_seed_stress_count",
- help="Number of runs with different random seeds")
- result.add_option("--ubsan-vptr",
- help="Regard test expectations for UBSanVptr",
- default=False, action="store_true")
- result.add_option("--msan",
- help="Regard test expectations for UBSanVptr",
- default=False, action="store_true")
- return result
-
-
-def RandomSeed():
- seed = 0
- while not seed:
- seed = random.SystemRandom().randint(-2147483648, 2147483647)
- return seed
-
-
-def BuildbotToV8Mode(config):
- """Convert buildbot build configs to configs understood by the v8 runner.
-
- V8 configs are always lower case and without the additional _x64 suffix for
- 64 bit builds on windows with ninja.
- """
- mode = config[:-4] if config.endswith('_x64') else config
- return mode.lower()
-
-def SetupEnvironment(options):
- """Setup additional environment variables."""
-
- # Many tests assume an English interface.
- os.environ['LANG'] = 'en_US.UTF-8'
-
- symbolizer = 'external_symbolizer_path=%s' % (
- os.path.join(
- BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
- 'llvm-symbolizer',
- )
- )
-
- if options.asan:
- asan_options = [symbolizer, "allow_user_segv_handler=1"]
- if not utils.GuessOS() == 'macos':
- # LSAN is not available on mac.
- asan_options.append('detect_leaks=1')
- os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
-
- if options.sancov_dir:
- assert os.path.exists(options.sancov_dir)
- os.environ['ASAN_OPTIONS'] = ":".join([
- 'coverage=1',
- 'coverage_dir=%s' % options.sancov_dir,
- symbolizer,
- "allow_user_segv_handler=1",
- ])
-
- if options.cfi_vptr:
- os.environ['UBSAN_OPTIONS'] = ":".join([
- 'print_stacktrace=1',
- 'print_summary=1',
- 'symbolize=1',
- symbolizer,
- ])
-
- if options.ubsan_vptr:
- os.environ['UBSAN_OPTIONS'] = ":".join([
- 'print_stacktrace=1',
- symbolizer,
- ])
-
- if options.msan:
- os.environ['MSAN_OPTIONS'] = symbolizer
-
- if options.tsan:
- suppressions_file = os.path.join(
- BASE_DIR, 'tools', 'sanitizers', 'tsan_suppressions.txt')
- os.environ['TSAN_OPTIONS'] = " ".join([
- symbolizer,
- 'suppressions=%s' % suppressions_file,
- 'exit_code=0',
- 'report_thread_leaks=0',
- 'history_size=7',
- 'report_destroy_locked=0',
- ])
-
-def ProcessOptions(options):
- global VARIANTS
-
- # First try to auto-detect configurations based on the build if GN was
- # used. This can't be overridden by cmd-line arguments.
- options.auto_detect = False
- if options.gn:
- gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
- latest_timestamp = -1
- latest_config = None
- for gn_config in os.listdir(gn_out_dir):
- gn_config_dir = os.path.join(gn_out_dir, gn_config)
- if not isdir(gn_config_dir):
- continue
- if os.path.getmtime(gn_config_dir) > latest_timestamp:
- latest_timestamp = os.path.getmtime(gn_config_dir)
- latest_config = gn_config
- if latest_config:
- print(">>> Latest GN build found is %s" % latest_config)
- options.outdir = os.path.join(DEFAULT_OUT_GN, latest_config)
-
- if options.buildbot:
- build_config_path = os.path.join(
- BASE_DIR, options.outdir, options.mode, "v8_build_config.json")
- else:
- build_config_path = os.path.join(
- BASE_DIR, options.outdir, "v8_build_config.json")
-
- # Auto-detect test configurations based on the build (GN only).
- if os.path.exists(build_config_path):
- try:
- with open(build_config_path) as f:
- build_config = json.load(f)
- except Exception:
- print ("%s exists but contains invalid json. Is your build up-to-date?" %
- build_config_path)
- return False
- options.auto_detect = True
-
- # In auto-detect mode the outdir is always where we found the build config.
- # This ensures that we'll also take the build products from there.
- options.outdir = os.path.dirname(build_config_path)
- options.arch_and_mode = None
- if options.mode:
- # In auto-detect mode we don't use the mode for more path-magic.
- # Therefore transform the buildbot mode here to fit to the GN build
- # config.
- options.mode = BuildbotToV8Mode(options.mode)
-
- # In V8 land, GN's x86 is called ia32.
- if build_config["v8_target_cpu"] == "x86":
- build_config["v8_target_cpu"] = "ia32"
-
- # Update options based on the build config. Sanity check that we're not
- # trying to use inconsistent options.
- for param, value in (
- ('arch', build_config["v8_target_cpu"]),
- ('asan', build_config["is_asan"]),
- ('dcheck_always_on', build_config["dcheck_always_on"]),
- ('gcov_coverage', build_config["is_gcov_coverage"]),
- ('mode', 'debug' if build_config["is_debug"] else 'release'),
- ('msan', build_config["is_msan"]),
- ('no_i18n', not build_config["v8_enable_i18n_support"]),
- ('no_snap', not build_config["v8_use_snapshot"]),
- ('tsan', build_config["is_tsan"]),
- ('ubsan_vptr', build_config["is_ubsan_vptr"])):
- cmd_line_value = getattr(options, param)
- if cmd_line_value not in [None, True, False] and cmd_line_value != value:
- # TODO(machenbach): This is for string options only. Requires options
- # to not have default values. We should make this more modular and
- # implement it in our own version of the option parser.
- print "Attempted to set %s to %s, while build is %s." % (
- param, cmd_line_value, value)
- return False
- if cmd_line_value == True and value == False:
- print "Attempted to turn on %s, but it's not available." % (
- param)
- return False
- if cmd_line_value != value:
- print ">>> Auto-detected %s=%s" % (param, value)
- setattr(options, param, value)
-
- else:
- # Non-GN build without auto-detect. Set default values for missing
- # parameters.
- if not options.mode:
- options.mode = "release,debug"
- if not options.arch:
- options.arch = "ia32,x64,arm"
-
- # Architecture and mode related stuff.
- if options.arch_and_mode:
- options.arch_and_mode = [arch_and_mode.split(".")
- for arch_and_mode in options.arch_and_mode.split(",")]
- options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
- options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
- options.mode = options.mode.split(",")
- for mode in options.mode:
- if not BuildbotToV8Mode(mode) in MODES:
- print "Unknown mode %s" % mode
- return False
- if options.arch in ["auto", "native"]:
- options.arch = ARCH_GUESS
- options.arch = options.arch.split(",")
- for arch in options.arch:
- if not arch in SUPPORTED_ARCHS:
- print "Unknown architecture %s" % arch
- return False
-
- # Store the final configuration in arch_and_mode list. Don't overwrite
- # predefined arch_and_mode since it is more expressive than arch and mode.
- if not options.arch_and_mode:
- options.arch_and_mode = itertools.product(options.arch, options.mode)
-
- # Special processing of other options, sorted alphabetically.
-
- if options.buildbot:
- options.network = False
- if options.command_prefix and options.network:
- print("Specifying --command-prefix disables network distribution, "
- "running tests locally.")
- options.network = False
- options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
-
- if options.gc_stress:
- options.extra_flags += GC_STRESS_FLAGS
-
- if options.asan:
- options.extra_flags.append("--invoke-weak-callbacks")
- options.extra_flags.append("--omit-quit")
-
- if options.novfp3:
- options.extra_flags.append("--noenable-vfp3")
-
- if options.exhaustive_variants:
- # This is used on many bots. It includes a larger set of default variants.
- # Other options for manipulating variants still apply afterwards.
- VARIANTS = EXHAUSTIVE_VARIANTS
-
- # TODO(machenbach): Figure out how to test a bigger subset of variants on
- # msan and tsan.
- if options.msan:
- VARIANTS = ["default"]
-
- if options.tsan:
- VARIANTS = ["default"]
-
- if options.j == 0:
- options.j = multiprocessing.cpu_count()
-
- if options.random_seed_stress_count <= 1 and options.random_seed == 0:
- options.random_seed = RandomSeed()
-
- def excl(*args):
- """Returns true if zero or one of multiple arguments are true."""
- return reduce(lambda x, y: x + y, args) <= 1
-
- if not excl(options.no_variants, bool(options.variants)):
- print("Use only one of --no-variants or --variants.")
- return False
- if options.quickcheck:
- VARIANTS = ["default", "stress"]
- options.slow_tests = "skip"
- options.pass_fail_tests = "skip"
- if options.no_variants:
- VARIANTS = ["default"]
- if options.variants:
- VARIANTS = options.variants.split(",")
-
- # Resolve variant aliases.
- VARIANTS = reduce(
- list.__add__,
- (VARIANT_ALIASES.get(v, [v]) for v in VARIANTS),
- [],
- )
-
- if not set(VARIANTS).issubset(ALL_VARIANTS):
- print "All variants must be in %s" % str(ALL_VARIANTS)
- return False
- if options.predictable:
- VARIANTS = ["default"]
- options.extra_flags.append("--predictable")
- options.extra_flags.append("--verify_predictable")
- options.extra_flags.append("--no-inline-new")
-
- # Dedupe.
- VARIANTS = list(set(VARIANTS))
-
- if not options.shell_dir:
- if options.shell:
- print "Warning: --shell is deprecated, use --shell-dir instead."
- options.shell_dir = os.path.dirname(options.shell)
- if options.valgrind:
- run_valgrind = os.path.join("tools", "run-valgrind.py")
- # This is OK for distributed running, so we don't need to disable network.
- options.command_prefix = (["python", "-u", run_valgrind] +
- options.command_prefix)
- def CheckTestMode(name, option):
- if not option in ["run", "skip", "dontcare"]:
- print "Unknown %s mode %s" % (name, option)
- return False
- return True
- if not CheckTestMode("slow test", options.slow_tests):
- return False
- if not CheckTestMode("pass|fail test", options.pass_fail_tests):
- return False
- if options.no_i18n:
- TEST_MAP["bot_default"].remove("intl")
- TEST_MAP["default"].remove("intl")
- return True
-
-
-def ShardTests(tests, options):
- # Read gtest shard configuration from environment (e.g. set by swarming).
- # If none is present, use values passed on the command line.
- shard_count = int(os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
- shard_run = os.environ.get('GTEST_SHARD_INDEX')
- if shard_run is not None:
- # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
- shard_run = int(shard_run) + 1
- else:
- shard_run = options.shard_run
-
- if options.shard_count > 1:
- # Log if a value was passed on the cmd line and it differs from the
- # environment variables.
- if options.shard_count != shard_count:
- print("shard_count from cmd line differs from environment variable "
- "GTEST_TOTAL_SHARDS")
- if options.shard_run > 1 and options.shard_run != shard_run:
- print("shard_run from cmd line differs from environment variable "
- "GTEST_SHARD_INDEX")
-
- if shard_count < 2:
- return tests
- if shard_run < 1 or shard_run > shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
- count = 0
- shard = []
- for test in tests:
- if count % shard_count == shard_run - 1:
- shard.append(test)
- count += 1
- return shard
-
-
-def Main():
- # Use the v8 root as cwd as some test cases use "load" with relative paths.
- os.chdir(BASE_DIR)
-
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- if not ProcessOptions(options):
- parser.print_help()
- return 1
- SetupEnvironment(options)
-
- if options.swarming:
- # Swarming doesn't print how isolated commands are called. Lets make this
- # less cryptic by printing it ourselves.
- print ' '.join(sys.argv)
-
- exit_code = 0
-
- suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
-
- # Use default tests if no test configuration was provided at the cmd line.
- if len(args) == 0:
- args = ["default"]
-
- # Expand arguments with grouped tests. The args should reflect the list of
- # suites as otherwise filters would break.
- def ExpandTestGroups(name):
- if name in TEST_MAP:
- return [suite for suite in TEST_MAP[name]]
- else:
- return [name]
- args = reduce(lambda x, y: x + y,
- [ExpandTestGroups(arg) for arg in args],
- [])
-
- args_suites = OrderedDict() # Used as set
- for arg in args:
- args_suites[arg.split('/')[0]] = True
- suite_paths = [ s for s in args_suites if s in suite_paths ]
-
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(BASE_DIR, "test", root))
- if suite:
- suites.append(suite)
-
- if options.download_data or options.download_data_only:
- for s in suites:
- s.DownloadData()
-
- if options.download_data_only:
- return exit_code
-
- for s in suites:
- s.PrepareSources()
-
- for (arch, mode) in options.arch_and_mode:
- try:
- code = Execute(arch, mode, args, options, suites)
- except KeyboardInterrupt:
- return 2
- exit_code = exit_code or code
- return exit_code
-
-
-def Execute(arch, mode, args, options, suites):
- print(">>> Running tests for %s.%s" % (arch, mode))
-
- shell_dir = options.shell_dir
- if not shell_dir:
- if options.auto_detect:
- # If an output dir with a build was passed, test directly in that
- # directory.
- shell_dir = os.path.join(BASE_DIR, options.outdir)
- elif options.buildbot:
- # TODO(machenbach): Get rid of different output folder location on
- # buildbot. Currently this is capitalized Release and Debug.
- shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
- mode = BuildbotToV8Mode(mode)
- else:
- shell_dir = os.path.join(
- BASE_DIR,
- options.outdir,
- "%s.%s" % (arch, MODES[mode]["output_folder"]),
- )
- if not os.path.exists(shell_dir):
- raise Exception('Could not find shell_dir: "%s"' % shell_dir)
-
- # Populate context object.
- mode_flags = MODES[mode]["flags"]
-
- # Simulators are slow, therefore allow a longer timeout.
- if arch in SLOW_ARCHS:
- options.timeout *= 2
-
- options.timeout *= MODES[mode]["timeout_scalefactor"]
-
- if options.predictable:
- # Predictable mode is slower.
- options.timeout *= 2
-
- ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
- mode_flags, options.verbose,
- options.timeout,
- options.isolates,
- options.command_prefix,
- options.extra_flags,
- options.no_i18n,
- options.random_seed,
- options.no_sorting,
- options.rerun_failures_count,
- options.rerun_failures_max,
- options.predictable,
- options.no_harness,
- use_perf_data=not options.swarming,
- sancov_dir=options.sancov_dir)
-
- # TODO(all): Combine "simulator" and "simulator_run".
- # TODO(machenbach): In GN we can derive simulator run from
- # target_arch != v8_target_arch in the dumped build config.
- simulator_run = not options.dont_skip_simulator_slow_tests and \
- arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
- 'ppc', 'ppc64', 's390', 's390x'] and \
- bool(ARCH_GUESS) and arch != ARCH_GUESS
- # Find available test suites and read test cases from them.
- variables = {
- "arch": arch,
- "asan": options.asan,
- "deopt_fuzzer": False,
- "gc_stress": options.gc_stress,
- "gcov_coverage": options.gcov_coverage,
- "isolates": options.isolates,
- "mode": MODES[mode]["status_mode"],
- "no_i18n": options.no_i18n,
- "no_snap": options.no_snap,
- "simulator_run": simulator_run,
- "simulator": utils.UseSimulator(arch),
- "system": utils.GuessOS(),
- "tsan": options.tsan,
- "msan": options.msan,
- "dcheck_always_on": options.dcheck_always_on,
- "novfp3": options.novfp3,
- "predictable": options.predictable,
- "byteorder": sys.byteorder,
- "no_harness": options.no_harness,
- "ubsan_vptr": options.ubsan_vptr,
- }
- all_tests = []
- num_tests = 0
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
- if len(args) > 0:
- s.FilterTestCasesByArgs(args)
- all_tests += s.tests
-
- # First filtering by status applying the generic rules (independent of
- # variants).
- s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
- options.pass_fail_tests)
-
- if options.cat:
- verbose.PrintTestSource(s.tests)
- continue
- variant_gen = s.CreateVariantGenerator(VARIANTS)
- variant_tests = [ t.CopyAddingFlags(v, flags)
- for t in s.tests
- for v in variant_gen.FilterVariantsByTest(t)
- for flags in variant_gen.GetFlagSets(t, v) ]
-
- if options.random_seed_stress_count > 1:
- # Duplicate test for random seed stress mode.
- def iter_seed_flags():
- for i in range(0, options.random_seed_stress_count):
- # Use given random seed for all runs (set by default in execution.py)
- # or a new random seed if none is specified.
- if options.random_seed:
- yield []
- else:
- yield ["--random-seed=%d" % RandomSeed()]
- s.tests = [
- t.CopyAddingFlags(t.variant, flags)
- for t in variant_tests
- for flags in iter_seed_flags()
- ]
- else:
- s.tests = variant_tests
-
- # Second filtering by status applying the variant-dependent rules.
- s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
- options.pass_fail_tests, variants=True)
-
- s.tests = ShardTests(s.tests, options)
- num_tests += len(s.tests)
-
- if options.cat:
- return 0 # We're done here.
-
- if options.report:
- verbose.PrintReport(all_tests)
-
- # Run the tests, either locally or distributed on the network.
- start_time = time.time()
- progress_indicator = progress.IndicatorNotifier()
- progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]())
- if options.junitout:
- progress_indicator.Register(progress.JUnitTestProgressIndicator(
- options.junitout, options.junittestsuite))
- if options.json_test_results:
- progress_indicator.Register(progress.JsonTestProgressIndicator(
- options.json_test_results, arch, MODES[mode]["execution_mode"],
- ctx.random_seed))
- if options.flakiness_results:
- progress_indicator.Register(progress.FlakinessTestProgressIndicator(
- options.flakiness_results))
-
- run_networked = options.network
- if not run_networked:
- if options.verbose:
- print("Network distribution disabled, running tests locally.")
- elif utils.GuessOS() != "linux":
- print("Network distribution is only supported on Linux, sorry!")
- run_networked = False
- peers = []
- if run_networked:
- peers = network_execution.GetPeers()
- if not peers:
- print("No connection to distribution server; running tests locally.")
- run_networked = False
- elif len(peers) == 1:
- print("No other peers on the network; running tests locally.")
- run_networked = False
- elif num_tests <= 100:
- print("Less than 100 tests, running them locally.")
- run_networked = False
-
- if run_networked:
- runner = network_execution.NetworkedRunner(suites, progress_indicator,
- ctx, peers, BASE_DIR)
- else:
- runner = execution.Runner(suites, progress_indicator, ctx)
-
- exit_code = runner.Run(options.j)
- overall_duration = time.time() - start_time
-
- if options.time:
- verbose.PrintTestDurations(suites, overall_duration)
-
- if num_tests == 0:
- print("Warning: no tests were run!")
-
- if exit_code == 1 and options.json_test_results:
- print("Force exit code 0 after failures. Json test results file generated "
- "with failure information.")
- exit_code = 0
-
- if options.sancov_dir:
- # If tests ran with sanitizer coverage, merge coverage files in the end.
- try:
- print "Merging sancov files."
- subprocess.check_call([
- sys.executable,
- join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
- "--coverage-dir=%s" % options.sancov_dir])
- except:
- print >> sys.stderr, "Error: Merging sancov files failed."
- exit_code = 1
- return exit_code
+from testrunner import standard_runner
if __name__ == "__main__":
- sys.exit(Main())
+ sys.exit(standard_runner.StandardTestRunner().execute())
diff --git a/deps/v8/tools/run-valgrind.isolate b/deps/v8/tools/run-valgrind.isolate
deleted file mode 100644
index 5947409e17..0000000000
--- a/deps/v8/tools/run-valgrind.isolate
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'variables': {
- 'command': [
- 'run-valgrind.py',
- ],
- 'files': [
- 'run-valgrind.py',
- ],
- },
- 'conditions': [
- ['has_valgrind==1', {
- 'variables': {
- 'files': [
- # This assumes vagrind binaries have been fetched as a custom deps
- # into v8/third_party/valgrind. It is not clear on which target
- # machine this will run, but grabbing both is cheap.
- '../third_party/valgrind/linux_x86/',
- '../third_party/valgrind/linux_x64/',
- ],
- },
- }],
- ],
- 'includes': [
- '../src/d8.isolate',
- ],
-}
diff --git a/deps/v8/tools/run-valgrind.py b/deps/v8/tools/run-valgrind.py
deleted file mode 100755
index e3f84f58fe..0000000000
--- a/deps/v8/tools/run-valgrind.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Simple wrapper for running valgrind and checking the output on
-# stderr for memory leaks.
-# Uses valgrind from third_party/valgrind. Assumes the executable is passed
-# with a path relative to the v8 root.
-
-
-from os import path
-import platform
-import re
-import subprocess
-import sys
-
-V8_ROOT = path.dirname(path.dirname(path.abspath(__file__)))
-MACHINE = 'linux_x64' if platform.machine() == 'x86_64' else 'linux_x86'
-VALGRIND_ROOT = path.join(V8_ROOT, 'third_party', 'valgrind', MACHINE)
-VALGRIND_BIN = path.join(VALGRIND_ROOT, 'bin', 'valgrind')
-VALGRIND_LIB = path.join(VALGRIND_ROOT, 'lib', 'valgrind')
-
-VALGRIND_ARGUMENTS = [
- VALGRIND_BIN,
- '--error-exitcode=1',
- '--leak-check=full',
- '--smc-check=all',
-]
-
-if len(sys.argv) < 2:
- print 'Please provide an executable to analyze.'
- sys.exit(1)
-
-executable = path.join(V8_ROOT, sys.argv[1])
-if not path.exists(executable):
- print 'Cannot find the file specified: %s' % executable
- sys.exit(1)
-
-# Compute the command line.
-command = VALGRIND_ARGUMENTS + [executable] + sys.argv[2:]
-
-# Run valgrind.
-process = subprocess.Popen(
- command,
- stderr=subprocess.PIPE,
- env={'VALGRIND_LIB': VALGRIND_LIB}
-)
-code = process.wait();
-errors = process.stderr.readlines();
-
-# If valgrind produced an error, we report that to the user.
-if code != 0:
- sys.stderr.writelines(errors)
- sys.exit(code)
-
-# Look through the leak details and make sure that we don't
-# have any definitely, indirectly, and possibly lost bytes.
-LEAK_RE = r"(?:definitely|indirectly|possibly) lost: "
-LEAK_LINE_MATCHER = re.compile(LEAK_RE)
-LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks")
-leaks = []
-for line in errors:
- if LEAK_LINE_MATCHER.search(line):
- leaks.append(line)
- if not LEAK_OKAY_MATCHER.search(line):
- sys.stderr.writelines(errors)
- sys.exit(1)
-
-# Make sure we found between 2 and 3 leak lines.
-if len(leaks) < 2 or len(leaks) > 3:
- sys.stderr.writelines(errors)
- sys.stderr.write('\n\n#### Malformed valgrind output.\n#### Exiting.\n')
- sys.exit(1)
-
-# No leaks found.
-sys.stderr.writelines(errors)
-sys.exit(0)
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index b22a4f11ea..0f1646d9ea 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -221,8 +221,8 @@ class Measurement(object):
class NullMeasurement(object):
- """Null object to avoid having extra logic for configurations that didn't
- run like running without patch on trybots.
+ """Null object to avoid having extra logic for configurations that don't
+ require secondary run, e.g. CI bots.
"""
def ConsumeOutput(self, stdout):
pass
@@ -260,7 +260,7 @@ def RunResultsProcessor(results_processor, stdout, count):
def AccumulateResults(
- graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
+ graph_names, trace_configs, iter_output, perform_measurement, calc_total):
"""Iterates over the output of multiple benchmark reruns and accumulates
results for a configured list of traces.
@@ -270,14 +270,15 @@ def AccumulateResults(
trace_configs: List of "TraceConfig" instances. Each trace config defines
how to perform a measurement.
iter_output: Iterator over the standard output of each test run.
- trybot: Indicates that this is run in trybot mode, i.e. run twice, once
- with once without patch.
- no_patch: Indicates weather this is a trybot run without patch.
+ perform_measurement: Whether to actually run tests and perform measurements.
+ This is needed so that we reuse this script for both CI
+ and trybot, but want to ignore second run on CI without
+ having to spread this logic throughout the script.
calc_total: Boolean flag to speficy the calculation of a summary trace.
Returns: A "Results" object.
"""
measurements = [
- trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
+ trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
for stdout in iter_output():
for measurement in measurements:
measurement.ConsumeOutput(stdout)
@@ -451,9 +452,8 @@ class TraceConfig(GraphConfig):
super(TraceConfig, self).__init__(suite, parent, arch)
assert self.results_regexp
- def CreateMeasurement(self, trybot, no_patch):
- if not trybot and no_patch:
- # Use null object for no-patch logic if this is not a trybot run.
+ def CreateMeasurement(self, perform_measurement):
+ if not perform_measurement:
return NullMeasurement()
return Measurement(
@@ -505,22 +505,20 @@ class RunnableConfig(GraphConfig):
def Run(self, runner, trybot):
"""Iterates over several runs and handles the output for all traces."""
- stdout_with_patch, stdout_no_patch = Unzip(runner())
+ stdout, stdout_secondary = Unzip(runner())
return (
AccumulateResults(
self.graphs,
self._children,
- iter_output=self.PostProcess(stdout_with_patch),
- trybot=trybot,
- no_patch=False,
+ iter_output=self.PostProcess(stdout),
+ perform_measurement=True,
calc_total=self.total,
),
AccumulateResults(
self.graphs,
self._children,
- iter_output=self.PostProcess(stdout_no_patch),
- trybot=trybot,
- no_patch=True,
+ iter_output=self.PostProcess(stdout_secondary),
+ perform_measurement=trybot, # only run second time on trybots
calc_total=self.total,
),
)
@@ -533,14 +531,14 @@ class RunnableTraceConfig(TraceConfig, RunnableConfig):
def Run(self, runner, trybot):
"""Iterates over several runs and handles the output."""
- measurement_with_patch = self.CreateMeasurement(trybot, False)
- measurement_no_patch = self.CreateMeasurement(trybot, True)
- for stdout_with_patch, stdout_no_patch in runner():
- measurement_with_patch.ConsumeOutput(stdout_with_patch)
- measurement_no_patch.ConsumeOutput(stdout_no_patch)
+ measurement = self.CreateMeasurement(perform_measurement=True)
+ measurement_secondary = self.CreateMeasurement(perform_measurement=trybot)
+ for stdout, stdout_secondary in runner():
+ measurement.ConsumeOutput(stdout)
+ measurement_secondary.ConsumeOutput(stdout_secondary)
return (
- measurement_with_patch.GetResults(),
- measurement_no_patch.GetResults(),
+ measurement.GetResults(),
+ measurement_secondary.GetResults(),
)
@@ -550,10 +548,10 @@ class RunnableGenericConfig(RunnableConfig):
super(RunnableGenericConfig, self).__init__(suite, parent, arch)
def Run(self, runner, trybot):
- stdout_with_patch, stdout_no_patch = Unzip(runner())
+ stdout, stdout_secondary = Unzip(runner())
return (
- AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
- AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
+ AccumulateGenericResults(self.graphs, self.units, stdout),
+ AccumulateGenericResults(self.graphs, self.units, stdout_secondary),
)
@@ -615,7 +613,7 @@ def FlattenRunnables(node, node_cb):
class Platform(object):
def __init__(self, options):
self.shell_dir = options.shell_dir
- self.shell_dir_no_patch = options.shell_dir_no_patch
+ self.shell_dir_secondary = options.shell_dir_secondary
self.extra_flags = options.extra_flags.split()
@staticmethod
@@ -625,24 +623,23 @@ class Platform(object):
else:
return DesktopPlatform(options)
- def _Run(self, runnable, count, no_patch=False):
+ def _Run(self, runnable, count, secondary=False):
raise NotImplementedError() # pragma: no cover
def Run(self, runnable, count):
"""Execute the benchmark's main file.
- If options.shell_dir_no_patch is specified, the benchmark is run once with
- and once without patch.
+ If options.shell_dir_secondary is specified, the benchmark is run twice,
+ e.g. with and without patch.
Args:
runnable: A Runnable benchmark instance.
count: The number of this (repeated) run.
- Returns: A tuple with the benchmark outputs with and without patch. The
- latter will be None if options.shell_dir_no_patch was not
- specified.
+ Returns: A tuple with the two benchmark outputs. The latter will be None if
+ options.shell_dir_secondary was not specified.
"""
- stdout = self._Run(runnable, count, no_patch=False)
- if self.shell_dir_no_patch:
- return stdout, self._Run(runnable, count, no_patch=True)
+ stdout = self._Run(runnable, count, secondary=False)
+ if self.shell_dir_secondary:
+ return stdout, self._Run(runnable, count, secondary=True)
else:
return stdout, None
@@ -676,9 +673,9 @@ class DesktopPlatform(Platform):
if isinstance(node, RunnableConfig):
node.ChangeCWD(path)
- def _Run(self, runnable, count, no_patch=False):
- suffix = ' - without patch' if no_patch else ''
- shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
+ def _Run(self, runnable, count, secondary=False):
+ suffix = ' - secondary' if secondary else ''
+ shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
if runnable.process_size:
command = ["/usr/bin/time", "--format=MaxMemory: %MKB"]
@@ -816,18 +813,18 @@ class AndroidPlatform(Platform): # pragma: no cover
bench_abs = suite_dir
self._PushExecutable(self.shell_dir, "bin", node.binary)
- if self.shell_dir_no_patch:
+ if self.shell_dir_secondary:
self._PushExecutable(
- self.shell_dir_no_patch, "bin_no_patch", node.binary)
+ self.shell_dir_secondary, "bin_secondary", node.binary)
if isinstance(node, RunnableConfig):
self._PushFile(bench_abs, node.main, bench_rel)
for resource in node.resources:
self._PushFile(bench_abs, resource, bench_rel)
- def _Run(self, runnable, count, no_patch=False):
- suffix = ' - without patch' if no_patch else ''
- target_dir = "bin_no_patch" if no_patch else "bin"
+ def _Run(self, runnable, count, secondary=False):
+ suffix = ' - secondary' if secondary else ''
+ target_dir = "bin_secondary" if secondary else "bin"
title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
cache = cache_control.CacheControl(self.device)
cache.DropRamCaches()
@@ -984,17 +981,20 @@ def Main(args):
default="")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
- parser.add_option("--json-test-results-no-patch",
+ parser.add_option("--json-test-results-secondary",
+ "--json-test-results-no-patch", # TODO(sergiyb): Deprecate.
help="Path to a file for storing json results from run "
- "without patch.")
+ "without patch or for reference build run.")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
- parser.add_option("--outdir-no-patch",
- help="Base directory with compile output without patch")
+ parser.add_option("--outdir-secondary",
+ "--outdir-no-patch", # TODO(sergiyb): Deprecate.
+ help="Base directory with compile output without patch or "
+ "for reference build")
parser.add_option("--binary-override-path",
help="JavaScript engine binary. By default, d8 under "
"architecture-specific build dir. "
- "Not supported in conjunction with outdir-no-patch.")
+ "Not supported in conjunction with outdir-secondary.")
parser.add_option("--prioritize",
help="Raise the priority to nice -20 for the benchmarking "
"process.Requires Linux, schedtool, and sudo privileges.",
@@ -1040,10 +1040,10 @@ def Main(args):
print "Specifying a device requires Android build tools."
return 1
- if (options.json_test_results_no_patch and
- not options.outdir_no_patch): # pragma: no cover
- print("For writing json test results without patch, an outdir without "
- "patch must be specified.")
+ if (options.json_test_results_secondary and
+ not options.outdir_secondary): # pragma: no cover
+ print("For writing secondary json test results, a secondary outdir patch "
+ "must be specified.")
return 1
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
@@ -1060,25 +1060,25 @@ def Main(args):
if not os.path.isfile(options.binary_override_path):
print "binary-override-path must be a file name"
return 1
- if options.outdir_no_patch:
- print "specify either binary-override-path or outdir-no-patch"
+ if options.outdir_secondary:
+ print "specify either binary-override-path or outdir-secondary"
return 1
options.shell_dir = os.path.abspath(
os.path.dirname(options.binary_override_path))
default_binary_name = os.path.basename(options.binary_override_path)
- if options.outdir_no_patch:
- options.shell_dir_no_patch = os.path.join(
- workspace, options.outdir_no_patch, build_config)
+ if options.outdir_secondary:
+ options.shell_dir_secondary = os.path.join(
+ workspace, options.outdir_secondary, build_config)
else:
- options.shell_dir_no_patch = None
+ options.shell_dir_secondary = None
if options.json_test_results:
options.json_test_results = os.path.abspath(options.json_test_results)
- if options.json_test_results_no_patch:
- options.json_test_results_no_patch = os.path.abspath(
- options.json_test_results_no_patch)
+ if options.json_test_results_secondary:
+ options.json_test_results_secondary = os.path.abspath(
+ options.json_test_results_secondary)
# Ensure all arguments have absolute path before we start changing current
# directory.
@@ -1089,7 +1089,7 @@ def Main(args):
platform = Platform.GetPlatform(options)
results = Results()
- results_no_patch = Results()
+ results_secondary = Results()
with CustomMachineConfiguration(governor = options.cpu_governor,
disable_aslr = options.noaslr) as conf:
for path in args:
@@ -1129,10 +1129,10 @@ def Main(args):
yield platform.Run(runnable, i)
# Let runnable iterate over all runs and handle output.
- result, result_no_patch = runnable.Run(
- Runner, trybot=options.shell_dir_no_patch)
+ result, result_secondary = runnable.Run(
+ Runner, trybot=options.shell_dir_secondary)
results += result
- results_no_patch += result_no_patch
+ results_secondary += result_secondary
platform.PostExecution()
if options.json_test_results:
@@ -1140,10 +1140,10 @@ def Main(args):
else: # pragma: no cover
print results
- if options.json_test_results_no_patch:
- results_no_patch.WriteToFile(options.json_test_results_no_patch)
+ if options.json_test_results_secondary:
+ results_secondary.WriteToFile(options.json_test_results_secondary)
else: # pragma: no cover
- print results_no_patch
+ print results_secondary
return min(1, len(results.errors))
diff --git a/deps/v8/tools/test-server.py b/deps/v8/tools/test-server.py
deleted file mode 100755
index ab927de75f..0000000000
--- a/deps/v8/tools/test-server.py
+++ /dev/null
@@ -1,215 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import subprocess
-import sys
-
-
-PIDFILE = "/tmp/v8-distributed-testing-server.pid"
-ROOT = os.path.abspath(os.path.dirname(sys.argv[0]))
-
-
-def _PrintUsage():
- print("""Usage: python %s COMMAND
-
-Where COMMAND can be any of:
- start Starts the server. Forks to the background.
- stop Stops the server.
- restart Stops, then restarts the server.
- setup Creates or updates the environment for the server to run.
- update Alias for "setup".
- trust <keyfile> Adds the given public key to the list of trusted keys.
- help Displays this help text.
- """ % sys.argv[0])
-
-
-def _IsDaemonRunning():
- return os.path.exists(PIDFILE)
-
-
-def _Cmd(cmd):
- code = subprocess.call(cmd, shell=True)
- if code != 0:
- print("Command '%s' returned error code %d" % (cmd, code))
- sys.exit(code)
-
-
-def Update():
- # Create directory for private data storage.
- data_dir = os.path.join(ROOT, "data")
- if not os.path.exists(data_dir):
- os.makedirs(data_dir)
-
- # Create directory for trusted public keys of peers (and self).
- trusted_dir = os.path.join(ROOT, "trusted")
- if not os.path.exists(trusted_dir):
- os.makedirs(trusted_dir)
-
- # Install UltraJSON. It is much faster than Python's builtin json.
- try:
- import ujson #@UnusedImport
- except ImportError:
- # Install pip if it doesn't exist.
- code = subprocess.call("which pip > /dev/null", shell=True)
- if code != 0:
- apt_get_code = subprocess.call("which apt-get > /dev/null", shell=True)
- if apt_get_code == 0:
- print("Installing pip...")
- _Cmd("sudo apt-get install python-pip")
- else:
- print("Please install pip on your machine. You can get it at: "
- "http://www.pip-installer.org/en/latest/installing.html "
- "or via your distro's package manager.")
- sys.exit(1)
- print("Using pip to install UltraJSON...")
- _Cmd("sudo pip install ujson")
-
- # Make sure we have a key pair for signing binaries.
- privkeyfile = os.path.expanduser("~/.ssh/v8_dtest")
- if not os.path.exists(privkeyfile):
- _Cmd("ssh-keygen -t rsa -f %s -N '' -q" % privkeyfile)
- fingerprint = subprocess.check_output("ssh-keygen -lf %s" % privkeyfile,
- shell=True)
- fingerprint = fingerprint.split(" ")[1].replace(":", "")[:16]
- pubkeyfile = os.path.join(trusted_dir, "%s.pem" % fingerprint)
- if (not os.path.exists(pubkeyfile) or
- os.path.getmtime(pubkeyfile) < os.path.getmtime(privkeyfile)):
- _Cmd("openssl rsa -in %s -out %s -pubout" % (privkeyfile, pubkeyfile))
- with open(pubkeyfile, "a") as f:
- f.write(fingerprint + "\n")
- datafile = os.path.join(data_dir, "mypubkey")
- with open(datafile, "w") as f:
- f.write(fingerprint + "\n")
-
- # Check out or update the server implementation in the current directory.
- testrunner_dir = os.path.join(ROOT, "testrunner")
- if os.path.exists(os.path.join(testrunner_dir, "server/daemon.py")):
- _Cmd("cd %s; svn up" % testrunner_dir)
- else:
- path = ("http://v8.googlecode.com/svn/branches/bleeding_edge/"
- "tools/testrunner")
- _Cmd("svn checkout --force %s %s" % (path, testrunner_dir))
-
- # Update this very script.
- path = ("http://v8.googlecode.com/svn/branches/bleeding_edge/"
- "tools/test-server.py")
- scriptname = os.path.abspath(sys.argv[0])
- _Cmd("svn cat %s > %s" % (path, scriptname))
-
- # Check out or update V8.
- v8_dir = os.path.join(ROOT, "v8")
- if os.path.exists(v8_dir):
- _Cmd("cd %s; git fetch" % v8_dir)
- else:
- _Cmd("git clone git://github.com/v8/v8.git %s" % v8_dir)
-
- print("Finished.")
-
-
-# Handle "setup" here, because when executing that we can't import anything
-# else yet.
-if __name__ == "__main__" and len(sys.argv) == 2:
- if sys.argv[1] in ("setup", "update"):
- if _IsDaemonRunning():
- print("Please stop the server before updating. Exiting.")
- sys.exit(1)
- Update()
- sys.exit(0)
- # Other parameters are handled below.
-
-
-#==========================================================
-# At this point we can assume that the implementation is available,
-# so we can import it.
-try:
- from testrunner.server import constants
- from testrunner.server import local_handler
- from testrunner.server import main
-except Exception, e:
- print(e)
- print("Failed to import implementation. Have you run 'setup'?")
- sys.exit(1)
-
-
-def _StartDaemon(daemon):
- if not os.path.isdir(os.path.join(ROOT, "v8")):
- print("No 'v8' working directory found. Have you run 'setup'?")
- sys.exit(1)
- daemon.start()
-
-
-if __name__ == "__main__":
- if len(sys.argv) == 2:
- arg = sys.argv[1]
- if arg == "start":
- daemon = main.Server(PIDFILE, ROOT)
- _StartDaemon(daemon)
- elif arg == "stop":
- daemon = main.Server(PIDFILE, ROOT)
- daemon.stop()
- elif arg == "restart":
- daemon = main.Server(PIDFILE, ROOT)
- daemon.stop()
- _StartDaemon(daemon)
- elif arg in ("help", "-h", "--help"):
- _PrintUsage()
- elif arg == "status":
- if not _IsDaemonRunning():
- print("Server not running.")
- else:
- print(local_handler.LocalQuery([constants.REQUEST_STATUS]))
- else:
- print("Unknown command")
- _PrintUsage()
- sys.exit(2)
- elif len(sys.argv) == 3:
- arg = sys.argv[1]
- if arg == "approve":
- filename = sys.argv[2]
- if not os.path.exists(filename):
- print("%s does not exist.")
- sys.exit(1)
- filename = os.path.abspath(filename)
- if _IsDaemonRunning():
- response = local_handler.LocalQuery([constants.ADD_TRUSTED, filename])
- else:
- daemon = main.Server(PIDFILE, ROOT)
- response = daemon.CopyToTrusted(filename)
- print("Added certificate %s to trusted certificates." % response)
- else:
- print("Unknown command")
- _PrintUsage()
- sys.exit(2)
- else:
- print("Unknown command")
- _PrintUsage()
- sys.exit(2)
- sys.exit(0)
diff --git a/deps/v8/tools/testrunner/README b/deps/v8/tools/testrunner/README
deleted file mode 100644
index 0771ef9dc2..0000000000
--- a/deps/v8/tools/testrunner/README
+++ /dev/null
@@ -1,168 +0,0 @@
-Test suite runner for V8, including support for distributed running.
-====================================================================
-
-
-Local usage instructions:
-=========================
-
-Run the main script with --help to get detailed usage instructions:
-
-$ tools/run-tests.py --help
-
-The interface is mostly the same as it was for the old test runner.
-You'll likely want something like this:
-
-$ tools/run-tests.py --nonetwork --arch ia32 --mode release
-
---nonetwork is the default on Mac and Windows. If you don't specify --arch
-and/or --mode, all available values will be used and run in turn (e.g.,
-omitting --mode from the above example will run ia32 in both Release and Debug
-modes).
-
-
-Networked usage instructions:
-=============================
-
-Networked running is only supported on Linux currently. Make sure that all
-machines participating in the cluster are binary-compatible (e.g. mixing
-Ubuntu Lucid and Precise doesn't work).
-
-Setup:
-------
-
-1.) Copy tools/test-server.py to a new empty directory anywhere on your hard
- drive (preferably not inside your V8 checkout just to keep things clean).
- Please do create a copy, not just a symlink.
-
-2.) Navigate to the new directory and let the server setup itself:
-
-$ ./test-server.py setup
-
- This will install PIP and UltraJSON, create a V8 working directory, and
- generate a keypair.
-
-3.) Swap public keys with someone who's already part of the networked cluster.
-
-$ cp trusted/`cat data/mypubkey`.pem /where/peers/can/see/it/myname.pem
-$ ./test-server.py approve /wherever/they/put/it/yourname.pem
-
-
-Usage:
-------
-
-1.) Start your server:
-
-$ ./test-server.py start
-
-2.) (Optionally) inspect the server's status:
-
-$ ./test-server.py status
-
-3.) From your regular V8 working directory, run tests:
-
-$ tool/run-tests.py --arch ia32 --mode debug
-
-4.) (Optionally) enjoy the speeeeeeeeeeeeeeeed
-
-
-Architecture overview:
-======================
-
-Code organization:
-------------------
-
-This section is written from the point of view of the tools/ directory.
-
-./run-tests.py:
- Main script. Parses command-line options and drives the test execution
- procedure from a high level. Imports the actual implementation of all
- steps from the testrunner/ directory.
-
-./test-server.py:
- Interface to interact with the server. Contains code to setup the server's
- working environment and can start and stop server daemon processes.
- Imports some stuff from the testrunner/server/ directory.
-
-./testrunner/local/*:
- Implementation needed to run tests locally. Used by run-tests.py. Inspired by
- (and partly copied verbatim from) the original test.py script.
-
-./testrunner/objects/*:
- A bunch of data container classes, used by the scripts in the various other
- directories; serializable for transmission over the network.
-
-./testrunner/network/*:
- Equivalents and extensions of some of the functionality in ./testrunner/local/
- as required when dispatching tests to peers on the network.
-
-./testrunner/network/network_execution.py:
- Drop-in replacement for ./testrunner/local/execution that distributes
- test jobs to network peers instead of running them locally.
-
-./testrunner/network/endpoint.py:
- Receiving end of a network distributed job, uses the implementation
- in ./testrunner/local/execution.py for actually running the tests.
-
-./testrunner/server/*:
- Implementation of the daemon that accepts and runs test execution jobs from
- peers on the network. Should ideally have no dependencies on any of the other
- directories, but that turned out to be impractical, so there are a few
- exceptions.
-
-./testrunner/server/compression.py:
- Defines a wrapper around Python TCP sockets that provides JSON based
- serialization, gzip based compression, and ensures message completeness.
-
-
-Networking architecture:
-------------------------
-
-The distribution stuff is designed to be a layer between deciding which tests
-to run on the one side, and actually running them on the other. The frontend
-that the user interacts with is the same for local and networked execution,
-and the actual test execution and result gathering code is the same too.
-
-The server daemon starts four separate servers, each listening on another port:
-- "Local": Communication with a run-tests.py script running on the same host.
- The test driving script e.g. needs to ask for available peers. It then talks
- to those peers directly (one of them will be the locally running server).
-- "Work": Listens for test job requests from run-tests.py scripts on the network
- (including localhost). Accepts an arbitrary number of connections at the
- same time, but only works on them in a serialized fashion.
-- "Status": Used for communication with other servers on the network, e.g. for
- exchanging trusted public keys to create the transitive trust closure.
-- "Discovery": Used to detect presence of other peers on the network.
- In contrast to the other three, this uses UDP (as opposed to TCP).
-
-
-Give us a diagram! We love diagrams!
-------------------------------------
- .
- Machine A . Machine B
- .
-+------------------------------+ .
-| run-tests.py | .
-| with flag: | .
-|--nonetwork --network | .
-| | / | | .
-| | / | | .
-| v / v | .
-|BACKEND / distribution | .
-+--------- / --------| \ ------+ .
- / | \_____________________
- / | . \
- / | . \
-+----- v ----------- v --------+ . +---- v -----------------------+
-| LocalHandler | WorkHandler | . | WorkHandler | LocalHandler |
-| | | | . | | | |
-| | v | . | v | |
-| | BACKEND | . | BACKEND | |
-|------------- +---------------| . |---------------+--------------|
-| Discovery | StatusHandler <----------> StatusHandler | Discovery |
-+---- ^ -----------------------+ . +-------------------- ^ -------+
- | . |
- +---------------------------------------------------------+
-
-Note that the three occurrences of "BACKEND" are the same code
-(testrunner/local/execution.py and its imports), but running from three
-distinct directories (and on two different machines).
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
new file mode 100644
index 0000000000..b6ef6fb5cd
--- /dev/null
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -0,0 +1,438 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import json
+import optparse
+import os
+import sys
+
+
+# Add testrunner to the path.
+sys.path.insert(
+ 0,
+ os.path.dirname(
+ os.path.dirname(os.path.abspath(__file__))))
+
+
+from local import utils
+
+
+BASE_DIR = (
+ os.path.dirname(
+ os.path.dirname(
+ os.path.dirname(
+ os.path.abspath(__file__)))))
+
+DEFAULT_OUT_GN = 'out.gn'
+
+ARCH_GUESS = utils.DefaultArch()
+
+# Map of test name synonyms to lists of test suites. Should be ordered by
+# expected runtimes (suites with slow test cases first). These groups are
+# invoked in separate steps on the bots.
+TEST_MAP = {
+ # This needs to stay in sync with test/bot_default.isolate.
+ "bot_default": [
+ "debugger",
+ "mjsunit",
+ "cctest",
+ "wasm-spec-tests",
+ "inspector",
+ "webkit",
+ "mkgrokdump",
+ "fuzzer",
+ "message",
+ "preparser",
+ "intl",
+ "unittests",
+ ],
+ # This needs to stay in sync with test/default.isolate.
+ "default": [
+ "debugger",
+ "mjsunit",
+ "cctest",
+ "wasm-spec-tests",
+ "inspector",
+ "mkgrokdump",
+ "fuzzer",
+ "message",
+ "preparser",
+ "intl",
+ "unittests",
+ ],
+ # This needs to stay in sync with test/optimize_for_size.isolate.
+ "optimize_for_size": [
+ "debugger",
+ "mjsunit",
+ "cctest",
+ "inspector",
+ "webkit",
+ "intl",
+ ],
+ "unittests": [
+ "unittests",
+ ],
+}
+
+
+class ModeConfig(object):
+ def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
+ self.flags = flags
+ self.timeout_scalefactor = timeout_scalefactor
+ self.status_mode = status_mode
+ self.execution_mode = execution_mode
+
+
+DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
+RELEASE_FLAGS = ["--nohard-abort"]
+MODES = {
+ "debug": ModeConfig(
+ flags=DEBUG_FLAGS,
+ timeout_scalefactor=4,
+ status_mode="debug",
+ execution_mode="debug",
+ ),
+ "optdebug": ModeConfig(
+ flags=DEBUG_FLAGS,
+ timeout_scalefactor=4,
+ status_mode="debug",
+ execution_mode="debug",
+ ),
+ "release": ModeConfig(
+ flags=RELEASE_FLAGS,
+ timeout_scalefactor=1,
+ status_mode="release",
+ execution_mode="release",
+ ),
+ # Normal trybot release configuration. There, dchecks are always on which
+ # implies debug is set. Hence, the status file needs to assume debug-like
+ # behavior/timeouts.
+ "tryrelease": ModeConfig(
+ flags=RELEASE_FLAGS,
+ timeout_scalefactor=1,
+ status_mode="debug",
+ execution_mode="release",
+ ),
+ # This mode requires v8 to be compiled with dchecks and slow dchecks.
+ "slowrelease": ModeConfig(
+ flags=RELEASE_FLAGS + ["--enable-slow-asserts"],
+ timeout_scalefactor=2,
+ status_mode="debug",
+ execution_mode="release",
+ ),
+}
+
+
+class TestRunnerError(Exception):
+ pass
+
+
+class BuildConfig(object):
+ def __init__(self, build_config):
+ # In V8 land, GN's x86 is called ia32.
+ if build_config['v8_target_cpu'] == 'x86':
+ self.arch = 'ia32'
+ else:
+ self.arch = build_config['v8_target_cpu']
+
+ self.is_debug = build_config['is_debug']
+ self.asan = build_config['is_asan']
+ self.cfi_vptr = build_config['is_cfi']
+ self.dcheck_always_on = build_config['dcheck_always_on']
+ self.gcov_coverage = build_config['is_gcov_coverage']
+ self.msan = build_config['is_msan']
+ self.no_i18n = not build_config['v8_enable_i18n_support']
+ self.no_snap = not build_config['v8_use_snapshot']
+ self.predictable = build_config['v8_enable_verify_predictable']
+ self.tsan = build_config['is_tsan']
+ self.ubsan_vptr = build_config['is_ubsan_vptr']
+
+ def __str__(self):
+ detected_options = []
+
+ if self.asan:
+ detected_options.append('asan')
+ if self.cfi_vptr:
+ detected_options.append('cfi_vptr')
+ if self.dcheck_always_on:
+ detected_options.append('dcheck_always_on')
+ if self.gcov_coverage:
+ detected_options.append('gcov_coverage')
+ if self.msan:
+ detected_options.append('msan')
+ if self.no_i18n:
+ detected_options.append('no_i18n')
+ if self.no_snap:
+ detected_options.append('no_snap')
+ if self.predictable:
+ detected_options.append('predictable')
+ if self.tsan:
+ detected_options.append('tsan')
+ if self.ubsan_vptr:
+ detected_options.append('ubsan_vptr')
+
+ return '\n'.join(detected_options)
+
+
+class BaseTestRunner(object):
+ def __init__(self):
+ self.outdir = None
+ self.build_config = None
+ self.mode_name = None
+ self.mode_options = None
+
+ def execute(self):
+ try:
+ parser = self._create_parser()
+ options, args = self._parse_args(parser)
+
+ self._load_build_config(options)
+
+ try:
+ self._process_default_options(options)
+ self._process_options(options)
+ except TestRunnerError:
+ parser.print_help()
+ raise
+
+ self._setup_env()
+ return self._do_execute(options, args)
+ except TestRunnerError:
+ return 1
+
+ def _create_parser(self):
+ parser = optparse.OptionParser()
+ parser.usage = '%prog [options] [tests]'
+ parser.description = """TESTS: %s""" % (TEST_MAP["default"])
+ self._add_parser_default_options(parser)
+ self._add_parser_options(parser)
+ return parser
+
+ def _add_parser_default_options(self, parser):
+ parser.add_option("--gn", help="Scan out.gn for the last built"
+ " configuration",
+ default=False, action="store_true")
+ parser.add_option("--outdir", help="Base directory with compile output",
+ default="out")
+ parser.add_option("--buildbot", help="DEPRECATED!",
+ default=False, action="store_true")
+ parser.add_option("--arch",
+ help="The architecture to run tests for")
+ parser.add_option("-m", "--mode",
+ help="The test mode in which to run (uppercase for ninja"
+ " and buildbot builds): %s" % MODES.keys())
+ parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
+ "directory will be used")
+ parser.add_option("-v", "--verbose", help="Verbose output",
+ default=False, action="store_true")
+
+ def _add_parser_options(self, parser):
+ pass
+
+ def _parse_args(self, parser):
+ options, args = parser.parse_args()
+
+ if any(map(lambda v: v and ',' in v,
+ [options.arch, options.mode])):
+ print 'Multiple arch/mode are deprecated'
+ raise TestRunnerError()
+
+ return options, args
+
+ def _load_build_config(self, options):
+ for outdir in self._possible_outdirs(options):
+ try:
+ self.build_config = self._do_load_build_config(outdir, options.verbose)
+ except TestRunnerError:
+ pass
+
+ if not self.build_config:
+ print 'Failed to load build config'
+ raise TestRunnerError
+
+ print 'Build found: %s' % self.outdir
+ if str(self.build_config):
+ print '>>> Autodetected:'
+ print self.build_config
+
+ # Returns possible build paths in order:
+ # gn
+ # outdir
+ # outdir/arch.mode
+ # Each path is provided in two versions: <path> and <path>/mode for buildbot.
+ def _possible_outdirs(self, options):
+ def outdirs():
+ if options.gn:
+ yield self._get_gn_outdir()
+ return
+
+ yield options.outdir
+ if options.arch and options.mode:
+ yield os.path.join(options.outdir,
+ '%s.%s' % (options.arch, options.mode))
+
+ for outdir in outdirs():
+ yield os.path.join(BASE_DIR, outdir)
+
+ # buildbot option
+ if options.mode:
+ yield os.path.join(BASE_DIR, outdir, options.mode)
+
+ def _get_gn_outdir(self):
+ gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
+ latest_timestamp = -1
+ latest_config = None
+ for gn_config in os.listdir(gn_out_dir):
+ gn_config_dir = os.path.join(gn_out_dir, gn_config)
+ if not os.path.isdir(gn_config_dir):
+ continue
+ if os.path.getmtime(gn_config_dir) > latest_timestamp:
+ latest_timestamp = os.path.getmtime(gn_config_dir)
+ latest_config = gn_config
+ if latest_config:
+ print(">>> Latest GN build found: %s" % latest_config)
+ return os.path.join(DEFAULT_OUT_GN, latest_config)
+
+ def _do_load_build_config(self, outdir, verbose=False):
+ build_config_path = os.path.join(outdir, "v8_build_config.json")
+ if not os.path.exists(build_config_path):
+ if verbose:
+ print("Didn't find build config: %s" % build_config_path)
+ raise TestRunnerError()
+
+ with open(build_config_path) as f:
+ try:
+ build_config_json = json.load(f)
+ except Exception:
+ print("%s exists but contains invalid json. Is your build up-to-date?"
+ % build_config_path)
+ raise TestRunnerError()
+
+ # In auto-detect mode the outdir is always where we found the build config.
+ # This ensures that we'll also take the build products from there.
+ self.outdir = os.path.dirname(build_config_path)
+
+ return BuildConfig(build_config_json)
+
+ def _process_default_options(self, options):
+ # We don't use the mode for more path-magic.
+ # Therefore transform the buildbot mode here to fix build_config value.
+ if options.mode:
+ options.mode = self._buildbot_to_v8_mode(options.mode)
+
+ build_config_mode = 'debug' if self.build_config.is_debug else 'release'
+ if options.mode:
+ if options.mode not in MODES:
+ print '%s mode is invalid' % options.mode
+ raise TestRunnerError()
+ if MODES[options.mode].execution_mode != build_config_mode:
+ print ('execution mode (%s) for %s is inconsistent with build config '
+ '(%s)' % (
+ MODES[options.mode].execution_mode,
+ options.mode,
+ build_config_mode))
+ raise TestRunnerError()
+
+ self.mode_name = options.mode
+ else:
+ self.mode_name = build_config_mode
+
+ self.mode_options = MODES[self.mode_name]
+
+ if options.arch and options.arch != self.build_config.arch:
+ print('--arch value (%s) inconsistent with build config (%s).' % (
+ options.arch, self.build_config.arch))
+ raise TestRunnerError()
+
+ if options.shell_dir:
+ print('Warning: --shell-dir is deprecated. Searching for executables in '
+ 'build directory (%s) instead.' % self.outdir)
+
+ def _buildbot_to_v8_mode(self, config):
+ """Convert buildbot build configs to configs understood by the v8 runner.
+
+ V8 configs are always lower case and without the additional _x64 suffix
+ for 64 bit builds on windows with ninja.
+ """
+ mode = config[:-4] if config.endswith('_x64') else config
+ return mode.lower()
+
+ def _process_options(self, options):
+ pass
+
+ def _setup_env(self):
+ # Use the v8 root as cwd as some test cases use "load" with relative paths.
+ os.chdir(BASE_DIR)
+
+ # Many tests assume an English interface.
+ os.environ['LANG'] = 'en_US.UTF-8'
+
+ symbolizer_option = self._get_external_symbolizer_option()
+
+ if self.build_config.asan:
+ asan_options = [
+ symbolizer_option,
+ 'allow_user_segv_handler=1',
+ 'allocator_may_return_null=1',
+ ]
+ if not utils.GuessOS() in ['macos', 'windows']:
+ # LSAN is not available on mac and windows.
+ asan_options.append('detect_leaks=1')
+ else:
+ asan_options.append('detect_leaks=0')
+ os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
+
+ if self.build_config.cfi_vptr:
+ os.environ['UBSAN_OPTIONS'] = ":".join([
+ 'print_stacktrace=1',
+ 'print_summary=1',
+ 'symbolize=1',
+ symbolizer_option,
+ ])
+
+ if self.build_config.ubsan_vptr:
+ os.environ['UBSAN_OPTIONS'] = ":".join([
+ 'print_stacktrace=1',
+ symbolizer_option,
+ ])
+
+ if self.build_config.msan:
+ os.environ['MSAN_OPTIONS'] = symbolizer_option
+
+ if self.build_config.tsan:
+ suppressions_file = os.path.join(
+ BASE_DIR,
+ 'tools',
+ 'sanitizers',
+ 'tsan_suppressions.txt')
+ os.environ['TSAN_OPTIONS'] = " ".join([
+ symbolizer_option,
+ 'suppressions=%s' % suppressions_file,
+ 'exit_code=0',
+ 'report_thread_leaks=0',
+ 'history_size=7',
+ 'report_destroy_locked=0',
+ ])
+
+ def _get_external_symbolizer_option(self):
+ external_symbolizer_path = os.path.join(
+ BASE_DIR,
+ 'third_party',
+ 'llvm-build',
+ 'Release+Asserts',
+ 'bin',
+ 'llvm-symbolizer',
+ )
+
+ if utils.IsWindows():
+ # Quote, because sanitizers might confuse colon as option separator.
+ external_symbolizer_path = '"%s.exe"' % external_symbolizer_path
+
+ return 'external_symbolizer_path=%s' % external_symbolizer_path
+
+
+ # TODO(majeski): remove options & args parameters
+ def _do_execute(self, options, args):
+ raise NotImplementedError()
diff --git a/deps/v8/tools/testrunner/deopt_fuzzer.py b/deps/v8/tools/testrunner/deopt_fuzzer.py
new file mode 100755
index 0000000000..75878d442c
--- /dev/null
+++ b/deps/v8/tools/testrunner/deopt_fuzzer.py
@@ -0,0 +1,381 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from os.path import join
+import json
+import math
+import multiprocessing
+import os
+import random
+import shlex
+import sys
+import time
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import execution
+from testrunner.local import progress
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.local import verbose
+from testrunner.objects import context
+
+
+DEFAULT_TESTS = ["mjsunit", "webkit"]
+TIMEOUT_DEFAULT = 60
+
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+ "mipsel"]
+MAX_DEOPT = 1000000000
+DISTRIBUTION_MODES = ["smooth", "random"]
+
+
+class DeoptFuzzer(base_runner.BaseTestRunner):
+ def __init__(self):
+ super(DeoptFuzzer, self).__init__()
+
+ class RandomDistribution:
+ def __init__(self, seed=None):
+ seed = seed or random.randint(1, sys.maxint)
+ print "Using random distribution with seed %d" % seed
+ self._random = random.Random(seed)
+
+ def Distribute(self, n, m):
+ if n > m:
+ n = m
+ return self._random.sample(xrange(1, m + 1), n)
+
+ class SmoothDistribution:
+ """Distribute n numbers into the interval [1:m].
+ F1: Factor of the first derivation of the distribution function.
+ F2: Factor of the second derivation of the distribution function.
+ With F1 and F2 set to 0, the distribution will be equal.
+ """
+ def __init__(self, factor1=2.0, factor2=0.2):
+ self._factor1 = factor1
+ self._factor2 = factor2
+
+ def Distribute(self, n, m):
+ if n > m:
+ n = m
+ if n <= 1:
+ return [ 1 ]
+
+ result = []
+ x = 0.0
+ dx = 1.0
+ ddx = self._factor1
+ dddx = self._factor2
+ for i in range(0, n):
+ result += [ x ]
+ x += dx
+ dx += ddx
+ ddx += dddx
+
+ # Project the distribution into the interval [0:M].
+ result = [ x * m / result[-1] for x in result ]
+
+ # Equalize by n. The closer n is to m, the more equal will be the
+ # distribution.
+ for (i, x) in enumerate(result):
+ # The value of x if it was equally distributed.
+ equal_x = i / float(n - 1) * float(m - 1) + 1
+
+ # Difference factor between actual and equal distribution.
+ diff = 1 - (x / equal_x)
+
+ # Equalize x dependent on the number of values to distribute.
+ result[i] = int(x + (i + 1) * diff)
+ return result
+
+
+ def _distribution(self, options):
+ if options.distribution_mode == "random":
+ return self.RandomDistribution(options.seed)
+ if options.distribution_mode == "smooth":
+ return self.SmoothDistribution(options.distribution_factor1,
+ options.distribution_factor2)
+
+
+ def _add_parser_options(self, parser):
+ parser.add_option("--command-prefix",
+ help="Prepended to each shell command used to run a test",
+ default="")
+ parser.add_option("--coverage", help=("Exponential test coverage "
+ "(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
+ default=0.4, type="float")
+ parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
+ "with a small number of deopt points (range 0, inf)"),
+ default=20, type="int")
+ parser.add_option("--distribution-factor1", help=("Factor of the first "
+ "derivation of the distribution function"), default=2.0,
+ type="float")
+ parser.add_option("--distribution-factor2", help=("Factor of the second "
+ "derivation of the distribution function"), default=0.7,
+ type="float")
+ parser.add_option("--distribution-mode", help=("How to select deopt points "
+ "for a given test (smooth|random)"),
+ default="smooth")
+ parser.add_option("--dump-results-file", help=("Dump maximum number of "
+ "deopt points per test to a file"))
+ parser.add_option("--extra-flags",
+ help="Additional flags to pass to each test command",
+ default="")
+ parser.add_option("--isolates", help="Whether to test isolates",
+ default=False, action="store_true")
+ parser.add_option("-j", help="The number of parallel tasks to run",
+ default=0, type="int")
+ parser.add_option("-p", "--progress",
+ help=("The style of progress indicator"
+ " (verbose, dots, color, mono)"),
+ choices=progress.PROGRESS_INDICATORS.keys(),
+ default="mono")
+ parser.add_option("--shard-count",
+ help="Split testsuites into this number of shards",
+ default=1, type="int")
+ parser.add_option("--shard-run",
+ help="Run this shard from the split up tests.",
+ default=1, type="int")
+ parser.add_option("--seed", help="The seed for the random distribution",
+ type="int")
+ parser.add_option("-t", "--timeout", help="Timeout in seconds",
+ default= -1, type="int")
+ parser.add_option("--random-seed", default=0, dest="random_seed",
+ help="Default seed for initializing random generator")
+ parser.add_option("--fuzzer-random-seed", default=0,
+ help="Default seed for initializing fuzzer random "
+ "generator")
+ return parser
+
+
+ def _process_options(self, options):
+ # Special processing of other options, sorted alphabetically.
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = shlex.split(options.extra_flags)
+ if options.j == 0:
+ options.j = multiprocessing.cpu_count()
+ while options.random_seed == 0:
+ options.random_seed = random.SystemRandom().randint(-2147483648,
+ 2147483647)
+ if not options.distribution_mode in DISTRIBUTION_MODES:
+ print "Unknown distribution mode %s" % options.distribution_mode
+ return False
+ if options.distribution_factor1 < 0.0:
+ print ("Distribution factor1 %s is out of range. Defaulting to 0.0"
+ % options.distribution_factor1)
+ options.distribution_factor1 = 0.0
+ if options.distribution_factor2 < 0.0:
+ print ("Distribution factor2 %s is out of range. Defaulting to 0.0"
+ % options.distribution_factor2)
+ options.distribution_factor2 = 0.0
+ if options.coverage < 0.0 or options.coverage > 1.0:
+ print ("Coverage %s is out of range. Defaulting to 0.4"
+ % options.coverage)
+ options.coverage = 0.4
+ if options.coverage_lift < 0:
+ print ("Coverage lift %s is out of range. Defaulting to 0"
+ % options.coverage_lift)
+ options.coverage_lift = 0
+ return True
+
+ def _shard_tests(self, tests, shard_count, shard_run):
+ if shard_count < 2:
+ return tests
+ if shard_run < 1 or shard_run > shard_count:
+ print "shard-run not a valid number, should be in [1:shard-count]"
+ print "defaulting back to running all tests"
+ return tests
+ count = 0
+ shard = []
+ for test in tests:
+ if count % shard_count == shard_run - 1:
+ shard.append(test)
+ count += 1
+ return shard
+
+ def _do_execute(self, options, args):
+ suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
+
+ if len(args) == 0:
+ suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
+ else:
+ args_suites = set()
+ for arg in args:
+ suite = arg.split(os.path.sep)[0]
+ if not suite in args_suites:
+ args_suites.add(suite)
+ suite_paths = [ s for s in suite_paths if s in args_suites ]
+
+ suites = []
+ for root in suite_paths:
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(base_runner.BASE_DIR, "test", root))
+ if suite:
+ suites.append(suite)
+
+ try:
+ return self._execute(args, options, suites)
+ except KeyboardInterrupt:
+ return 2
+
+
+ def _calculate_n_tests(self, m, options):
+ """Calculates the number of tests from m deopt points with exponential
+ coverage.
+ The coverage is expected to be between 0.0 and 1.0.
+ The 'coverage lift' lifts the coverage for tests with smaller m values.
+ """
+ c = float(options.coverage)
+ l = float(options.coverage_lift)
+ return int(math.pow(m, (m * c + l) / (m + l)))
+
+
+ def _execute(self, args, options, suites):
+ print(">>> Running tests for %s.%s" % (self.build_config.arch,
+ self.mode_name))
+
+ dist = self._distribution(options)
+
+ # Populate context object.
+ timeout = options.timeout
+ if timeout == -1:
+ # Simulators are slow, therefore allow a longer default timeout.
+ if self.build_config.arch in SLOW_ARCHS:
+ timeout = 2 * TIMEOUT_DEFAULT;
+ else:
+ timeout = TIMEOUT_DEFAULT;
+
+ timeout *= self.mode_options.timeout_scalefactor
+ ctx = context.Context(self.build_config.arch,
+ self.mode_options.execution_mode,
+ self.outdir,
+ self.mode_options.flags, options.verbose,
+ timeout, options.isolates,
+ options.command_prefix,
+ options.extra_flags,
+ False, # Keep i18n on by default.
+ options.random_seed,
+ True, # No sorting of test cases.
+ 0, # Don't rerun failing tests.
+ 0, # No use of a rerun-failing-tests maximum.
+ False, # No predictable mode.
+ False, # No no_harness mode.
+ False, # Don't use perf data.
+ False) # Coverage not supported.
+
+ # Find available test suites and read test cases from them.
+ variables = {
+ "arch": self.build_config.arch,
+ "asan": self.build_config.asan,
+ "byteorder": sys.byteorder,
+ "dcheck_always_on": self.build_config.dcheck_always_on,
+ "deopt_fuzzer": True,
+ "gc_fuzzer": False,
+ "gc_stress": False,
+ "gcov_coverage": self.build_config.gcov_coverage,
+ "isolates": options.isolates,
+ "mode": self.mode_options.status_mode,
+ "msan": self.build_config.msan,
+ "no_harness": False,
+ "no_i18n": self.build_config.no_i18n,
+ "no_snap": self.build_config.no_snap,
+ "novfp3": False,
+ "predictable": self.build_config.predictable,
+ "simulator": utils.UseSimulator(self.build_config.arch),
+ "simulator_run": False,
+ "system": utils.GuessOS(),
+ "tsan": self.build_config.tsan,
+ "ubsan_vptr": self.build_config.ubsan_vptr,
+ }
+ num_tests = 0
+ test_id = 0
+
+ # Remember test case prototypes for the fuzzing phase.
+ test_backup = dict((s, []) for s in suites)
+
+ for s in suites:
+ s.ReadStatusFile(variables)
+ s.ReadTestCases(ctx)
+ if len(args) > 0:
+ s.FilterTestCasesByArgs(args)
+ s.FilterTestCasesByStatus(False)
+ for t in s.tests:
+ t.flags += s.GetStatusfileFlags(t)
+
+ test_backup[s] = s.tests
+ analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
+ "--print-deopt-stress"]
+ s.tests = [t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests]
+ num_tests += len(s.tests)
+ for t in s.tests:
+ t.id = test_id
+ test_id += 1
+
+ if num_tests == 0:
+ print "No tests to run."
+ return 0
+
+ print(">>> Collection phase")
+ progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ runner = execution.Runner(suites, progress_indicator, ctx)
+
+ exit_code = runner.Run(options.j)
+
+ print(">>> Analysis phase")
+ num_tests = 0
+ test_id = 0
+ for s in suites:
+ test_results = {}
+ for t in s.tests:
+ for line in t.output.stdout.splitlines():
+ if line.startswith("=== Stress deopt counter: "):
+ test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
+ for t in s.tests:
+ if t.path not in test_results:
+ print "Missing results for %s" % t.path
+ if options.dump_results_file:
+ results_dict = dict((t.path, n) for (t, n) in test_results.iteritems())
+ with file("%s.%d.txt" % (options.dump_results_file, time.time()),
+ "w") as f:
+ f.write(json.dumps(results_dict))
+
+ # Reset tests and redistribute the prototypes from the collection phase.
+ s.tests = []
+ if options.verbose:
+ print "Test distributions:"
+ for t in test_backup[s]:
+ max_deopt = test_results.get(t.path, 0)
+ if max_deopt == 0:
+ continue
+ n_deopt = self._calculate_n_tests(max_deopt, options)
+ distribution = dist.Distribute(n_deopt, max_deopt)
+ if options.verbose:
+ print "%s %s" % (t.path, distribution)
+ for i in distribution:
+ fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
+ s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
+ num_tests += len(s.tests)
+ for t in s.tests:
+ t.id = test_id
+ test_id += 1
+
+ if num_tests == 0:
+ print "No tests to run."
+ return 0
+
+ print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
+ progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ runner = execution.Runner(suites, progress_indicator, ctx)
+
+ code = runner.Run(options.j)
+ return exit_code or code
+
+
+if __name__ == '__main__':
+ sys.exit(DeoptFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/gc_fuzzer.py b/deps/v8/tools/testrunner/gc_fuzzer.py
new file mode 100755
index 0000000000..4130fff8be
--- /dev/null
+++ b/deps/v8/tools/testrunner/gc_fuzzer.py
@@ -0,0 +1,341 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from os.path import join
+import itertools
+import json
+import math
+import multiprocessing
+import os
+import random
+import shlex
+import sys
+import time
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import execution
+from testrunner.local import progress
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.local import verbose
+from testrunner.objects import context
+
+
+DEFAULT_TESTS = ["mjsunit", "webkit"]
+TIMEOUT_DEFAULT = 60
+
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+ "mipsel"]
+
+
+class GCFuzzer(base_runner.BaseTestRunner):
+ def __init__(self):
+ super(GCFuzzer, self).__init__()
+
+ self.fuzzer_rng = None
+
+ def _add_parser_options(self, parser):
+ parser.add_option("--command-prefix",
+ help="Prepended to each shell command used to run a test",
+ default="")
+ parser.add_option("--coverage", help=("Exponential test coverage "
+ "(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
+ default=0.4, type="float")
+ parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
+ "with a low memory size reached (range 0, inf)"),
+ default=20, type="int")
+ parser.add_option("--dump-results-file", help="Dump maximum limit reached")
+ parser.add_option("--extra-flags",
+ help="Additional flags to pass to each test command",
+ default="")
+ parser.add_option("--isolates", help="Whether to test isolates",
+ default=False, action="store_true")
+ parser.add_option("-j", help="The number of parallel tasks to run",
+ default=0, type="int")
+ parser.add_option("-p", "--progress",
+ help=("The style of progress indicator"
+ " (verbose, dots, color, mono)"),
+ choices=progress.PROGRESS_INDICATORS.keys(),
+ default="mono")
+ parser.add_option("--shard-count",
+ help="Split testsuites into this number of shards",
+ default=1, type="int")
+ parser.add_option("--shard-run",
+ help="Run this shard from the split up tests.",
+ default=1, type="int")
+ parser.add_option("-t", "--timeout", help="Timeout in seconds",
+ default= -1, type="int")
+ parser.add_option("--random-seed", default=0,
+ help="Default seed for initializing random generator")
+ parser.add_option("--fuzzer-random-seed", default=0,
+ help="Default seed for initializing fuzzer random "
+ "generator")
+ parser.add_option("--stress-compaction", default=False, action="store_true",
+ help="Enable stress_compaction_percentage flag")
+
+ parser.add_option("--distribution-factor1", help="DEPRECATED")
+ parser.add_option("--distribution-factor2", help="DEPRECATED")
+ parser.add_option("--distribution-mode", help="DEPRECATED")
+ parser.add_option("--seed", help="DEPRECATED")
+ return parser
+
+
+ def _process_options(self, options):
+ # Special processing of other options, sorted alphabetically.
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = shlex.split(options.extra_flags)
+ if options.j == 0:
+ options.j = multiprocessing.cpu_count()
+ while options.random_seed == 0:
+ options.random_seed = random.SystemRandom().randint(-2147483648,
+ 2147483647)
+ while options.fuzzer_random_seed == 0:
+ options.fuzzer_random_seed = random.SystemRandom().randint(-2147483648,
+ 2147483647)
+ self.fuzzer_rng = random.Random(options.fuzzer_random_seed)
+ return True
+
+ def _shard_tests(self, tests, shard_count, shard_run):
+ if shard_count < 2:
+ return tests
+ if shard_run < 1 or shard_run > shard_count:
+ print "shard-run not a valid number, should be in [1:shard-count]"
+ print "defaulting back to running all tests"
+ return tests
+ count = 0
+ shard = []
+ for test in tests:
+ if count % shard_count == shard_run - 1:
+ shard.append(test)
+ count += 1
+ return shard
+
+ def _do_execute(self, options, args):
+ suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
+
+ if len(args) == 0:
+ suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
+ else:
+ args_suites = set()
+ for arg in args:
+ suite = arg.split(os.path.sep)[0]
+ if not suite in args_suites:
+ args_suites.add(suite)
+ suite_paths = [ s for s in suite_paths if s in args_suites ]
+
+ suites = []
+ for root in suite_paths:
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(base_runner.BASE_DIR, "test", root))
+ if suite:
+ suites.append(suite)
+
+ try:
+ return self._execute(args, options, suites)
+ except KeyboardInterrupt:
+ return 2
+
+
+ def _calculate_n_tests(self, m, options):
+ """Calculates the number of tests from m points with exponential coverage.
+ The coverage is expected to be between 0.0 and 1.0.
+ The 'coverage lift' lifts the coverage for tests with smaller m values.
+ """
+ c = float(options.coverage)
+ l = float(options.coverage_lift)
+ return int(math.pow(m, (m * c + l) / (m + l)))
+
+
+ def _execute(self, args, options, suites):
+ print(">>> Running tests for %s.%s" % (self.build_config.arch,
+ self.mode_name))
+
+ # Populate context object.
+ timeout = options.timeout
+ if timeout == -1:
+ # Simulators are slow, therefore allow a longer default timeout.
+ if self.build_config.arch in SLOW_ARCHS:
+ timeout = 2 * TIMEOUT_DEFAULT;
+ else:
+ timeout = TIMEOUT_DEFAULT;
+
+ timeout *= self.mode_options.timeout_scalefactor
+ ctx = context.Context(self.build_config.arch,
+ self.mode_options.execution_mode,
+ self.outdir,
+ self.mode_options.flags, options.verbose,
+ timeout, options.isolates,
+ options.command_prefix,
+ options.extra_flags,
+ False, # Keep i18n on by default.
+ options.random_seed,
+ True, # No sorting of test cases.
+ 0, # Don't rerun failing tests.
+ 0, # No use of a rerun-failing-tests maximum.
+ False, # No predictable mode.
+ False, # No no_harness mode.
+ False, # Don't use perf data.
+ False) # Coverage not supported.
+
+ num_tests = self._load_tests(args, options, suites, ctx)
+ if num_tests == 0:
+ print "No tests to run."
+ return 0
+
+ test_backup = dict(map(lambda s: (s, s.tests), suites))
+
+ print('>>> Collection phase')
+ for s in suites:
+ analysis_flags = [
+ # > 100% to not influence default incremental marking, but we need this
+ # flag to print reached incremental marking limit.
+ '--stress_marking', '1000',
+ '--trace_incremental_marking',
+ ]
+ s.tests = map(lambda t: t.CopyAddingFlags(t.variant, analysis_flags),
+ s.tests)
+
+ progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ runner = execution.Runner(suites, progress_indicator, ctx)
+ exit_code = runner.Run(options.j)
+
+ print('>>> Analysis phase')
+ test_results = dict()
+ for s in suites:
+ for t in s.tests:
+ # Skip failed tests.
+ if s.HasUnexpectedOutput(t):
+ print '%s failed, skipping' % t.path
+ continue
+ max_limit = self._get_max_limit_reached(t)
+ if max_limit:
+ test_results[t.path] = max_limit
+
+ if options.dump_results_file:
+ with file("%s.%d.txt" % (options.dump_results_file, time.time()),
+ "w") as f:
+ f.write(json.dumps(test_results))
+
+ num_tests = 0
+ for s in suites:
+ s.tests = []
+ for t in test_backup[s]:
+ max_percent = test_results.get(t.path, 0)
+ if not max_percent or max_percent < 1.0:
+ continue
+ max_percent = int(max_percent)
+
+ subtests_count = self._calculate_n_tests(max_percent, options)
+
+ if options.verbose:
+ print ('%s [x%d] (max marking limit=%.02f)' %
+ (t.path, subtests_count, max_percent))
+ for _ in xrange(0, subtests_count):
+ fuzzer_seed = self._next_fuzzer_seed()
+ fuzzing_flags = [
+ '--stress_marking', str(max_percent),
+ '--fuzzer_random_seed', str(fuzzer_seed),
+ ]
+ if options.stress_compaction:
+ fuzzing_flags.append('--stress_compaction_random')
+ s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
+ num_tests += len(s.tests)
+
+ if num_tests == 0:
+ print "No tests to run."
+ return 0
+
+ print(">>> Fuzzing phase (%d test cases)" % num_tests)
+ progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ runner = execution.Runner(suites, progress_indicator, ctx)
+
+ return runner.Run(options.j) or exit_code
+
+ def _load_tests(self, args, options, suites, ctx):
+ # Find available test suites and read test cases from them.
+ variables = {
+ "arch": self.build_config.arch,
+ "asan": self.build_config.asan,
+ "byteorder": sys.byteorder,
+ "dcheck_always_on": self.build_config.dcheck_always_on,
+ "deopt_fuzzer": False,
+ "gc_fuzzer": True,
+ "gc_stress": False,
+ "gcov_coverage": self.build_config.gcov_coverage,
+ "isolates": options.isolates,
+ "mode": self.mode_options.status_mode,
+ "msan": self.build_config.msan,
+ "no_harness": False,
+ "no_i18n": self.build_config.no_i18n,
+ "no_snap": self.build_config.no_snap,
+ "novfp3": False,
+ "predictable": self.build_config.predictable,
+ "simulator": utils.UseSimulator(self.build_config.arch),
+ "simulator_run": False,
+ "system": utils.GuessOS(),
+ "tsan": self.build_config.tsan,
+ "ubsan_vptr": self.build_config.ubsan_vptr,
+ }
+
+ num_tests = 0
+ test_id = 0
+ for s in suites:
+ s.ReadStatusFile(variables)
+ s.ReadTestCases(ctx)
+ if len(args) > 0:
+ s.FilterTestCasesByArgs(args)
+ s.FilterTestCasesByStatus(False)
+ for t in s.tests:
+ t.flags += s.GetStatusfileFlags(t)
+
+ num_tests += len(s.tests)
+ for t in s.tests:
+ t.id = test_id
+ test_id += 1
+
+ return num_tests
+
+ # Parses test stdout and returns what was the highest reached percent of the
+ # incremental marking limit (0-100).
+ # Skips values >=100% since they already trigger incremental marking.
+ @staticmethod
+ def _get_max_limit_reached(test):
+ def is_im_line(l):
+ return 'IncrementalMarking' in l and '% of the memory limit reached' in l
+
+ def line_to_percent(l):
+ return filter(lambda part: '%' in part, l.split(' '))[0]
+
+ def percent_str_to_float(s):
+ return float(s[:-1])
+
+ if not (test.output and test.output.stdout):
+ return None
+
+ im_lines = filter(is_im_line, test.output.stdout.splitlines())
+ percents_str = map(line_to_percent, im_lines)
+ percents = map(percent_str_to_float, percents_str)
+
+ # Skip >= 100%.
+ percents = filter(lambda p: p < 100, percents)
+
+ if not percents:
+ return None
+ return max(percents)
+
+ def _next_fuzzer_seed(self):
+ fuzzer_seed = None
+ while not fuzzer_seed:
+ fuzzer_seed = self.fuzzer_rng.randint(-2147483648, 2147483647)
+ return fuzzer_seed
+
+
+if __name__ == '__main__':
+ sys.exit(GCFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py
index b2dc74e4d4..4afd450d2f 100644
--- a/deps/v8/tools/testrunner/local/commands.py
+++ b/deps/v8/tools/testrunner/local/commands.py
@@ -106,7 +106,24 @@ def RunProcess(verbose, timeout, args, additional_env, **rest):
print "Return code: %d" % tk.returncode
sys.stdout.flush()
else:
+ if utils.GuessOS() == "macos":
+ # TODO(machenbach): Temporary output for investigating hanging test
+ # driver on mac.
+ print "Attempting to kill process %d - cmd %s" % (process.pid, args)
+ try:
+ print subprocess.check_output(
+ "ps -e | egrep 'd8|cctest|unittests'", shell=True)
+ except Exception:
+ pass
+ sys.stdout.flush()
process.kill()
+ if utils.GuessOS() == "macos":
+ # TODO(machenbach): Temporary output for investigating hanging test
+ # driver on mac. This will probably not print much, since kill only
+ # sends the signal.
+ print "Return code after signalling the kill: %s" % process.returncode
+ sys.stdout.flush()
+
except OSError:
sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
@@ -127,6 +144,9 @@ def RunProcess(verbose, timeout, args, additional_env, **rest):
)
+# TODO(machenbach): Instead of passing args around, we should introduce an
+# immutable Command class (that just represents the command with all flags and
+# is pretty-printable) and a member method for running such a command.
def Execute(args, verbose=False, timeout=None, env=None):
args = [ c for c in args if c != "" ]
return RunProcess(verbose, timeout, args, env or {})
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index dc55129a14..8cc3556cae 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -85,23 +85,28 @@ def MakeProcessContext(context, suite_names):
def GetCommand(test, context):
d8testflag = []
- shell = test.shell()
+ shell = test.suite.GetShellForTestCase(test)
if shell == "d8":
d8testflag = ["--test"]
if utils.IsWindows():
shell += ".exe"
if context.random_seed:
d8testflag += ["--random-seed=%s" % context.random_seed]
- cmd = (context.command_prefix +
- [os.path.abspath(os.path.join(context.shell_dir, shell))] +
- d8testflag +
- test.suite.GetFlagsForTestCase(test, context) +
- context.extra_flags)
- return cmd
+ files, flags, env = test.suite.GetParametersForTestCase(test, context)
+ cmd = (
+ context.command_prefix +
+ [os.path.abspath(os.path.join(context.shell_dir, shell))] +
+ d8testflag +
+ files +
+ context.extra_flags +
+ # Flags from test cases can overwrite extra cmd-line flags.
+ flags
+ )
+ return cmd, env
def _GetInstructions(test, context):
- command = GetCommand(test, context)
+ command, env = GetCommand(test, context)
timeout = context.timeout
if ("--stress-opt" in test.flags or
"--stress-opt" in context.mode_flags or
@@ -109,11 +114,10 @@ def _GetInstructions(test, context):
timeout *= 4
if "--noenable-vfp3" in context.extra_flags:
timeout *= 2
- # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
- # the like.
- if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
- timeout *= 2
- return Instructions(command, test.id, timeout, context.verbose, test.env)
+
+ # TODO(majeski): make it slow outcome dependent.
+ timeout *= 2
+ return Instructions(command, test.id, timeout, context.verbose, env)
class Job(object):
@@ -156,8 +160,9 @@ class TestJob(Job):
failures).
"""
if context.sancov_dir and output.pid is not None:
+ shell = self.test.suite.GetShellForTestCase(self.test)
sancov_file = os.path.join(
- context.sancov_dir, "%s.%d.sancov" % (self.test.shell(), output.pid))
+ context.sancov_dir, "%s.%d.sancov" % (shell, output.pid))
# Some tests are expected to fail and don't produce coverage data.
if os.path.exists(sancov_file):
@@ -177,6 +182,7 @@ class TestJob(Job):
self.test.SetSuiteObject(process_context.suites)
instr = _GetInstructions(self.test, process_context.context)
except Exception, e:
+ # TODO(majeski): Better exception reporting.
return SetupProblem(e, self.test)
start_time = time.time()
@@ -203,7 +209,7 @@ class Runner(object):
self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first.
- slow_key = lambda t: statusfile.IsSlow(t.outcomes)
+ slow_key = lambda t: statusfile.IsSlow(t.suite.GetStatusFileOutcomes(t))
self.tests.sort(key=slow_key, reverse=True)
# Sort by stored duration of not opted out.
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index 6321cadece..e57a6e36c9 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -71,7 +71,7 @@ class ProgressIndicator(object):
}
def _EscapeCommand(self, test):
- command = execution.GetCommand(test, self.runner.context)
+ command, _ = execution.GetCommand(test, self.runner.context)
parts = []
for part in command:
if ' ' in part:
@@ -336,7 +336,8 @@ class JsonTestProgressIndicator(ProgressIndicator):
"flags": test.flags,
"command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"duration": test.duration,
- "marked_slow": statusfile.IsSlow(test.outcomes),
+ "marked_slow": statusfile.IsSlow(
+ test.suite.GetStatusFileOutcomes(test)),
} for test in timed_tests[:20]
]
@@ -369,13 +370,13 @@ class JsonTestProgressIndicator(ProgressIndicator):
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
- "expected": list(test.outcomes or ["PASS"]),
+ "expected": test.suite.GetExpectedOutcomes(test),
"duration": test.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
- "target_name": test.suite.shell(),
+ "target_name": test.suite.GetShellForTestCase(test),
"variant": test.variant,
})
@@ -414,11 +415,7 @@ class FlakinessTestProgressIndicator(ProgressIndicator):
assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
if test.run == 1:
# First run of this test.
- expected_outcomes = ([
- expected
- for expected in (test.outcomes or ["PASS"])
- if expected in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
- ] or ["PASS"])
+ expected_outcomes = test.suite.GetExpectedOutcomes(test)
self.results[key] = {
"actual": outcome,
"expected": " ".join(expected_outcomes),
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 880837b8a7..7caf0711ca 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -31,31 +31,29 @@ import re
from variants import ALL_VARIANTS
from utils import Freeze
-# These outcomes can occur in a TestCase's outcomes list:
-SKIP = "SKIP"
+# Possible outcomes
FAIL = "FAIL"
PASS = "PASS"
-OKAY = "OKAY"
-TIMEOUT = "TIMEOUT"
-CRASH = "CRASH"
+TIMEOUT = "TIMEOUT" # TODO(majeski): unused in status files
+CRASH = "CRASH" # TODO(majeski): unused in status files
+
+# Outcomes only for status file, need special handling
+FAIL_OK = "FAIL_OK"
+FAIL_SLOPPY = "FAIL_SLOPPY"
+
+# Modifiers
+SKIP = "SKIP"
SLOW = "SLOW"
FAST_VARIANTS = "FAST_VARIANTS"
NO_VARIANTS = "NO_VARIANTS"
-# These are just for the status files and are mapped below in DEFS:
-FAIL_OK = "FAIL_OK"
-PASS_OR_FAIL = "PASS_OR_FAIL"
-FAIL_SLOPPY = "FAIL_SLOPPY"
ALWAYS = "ALWAYS"
KEYWORDS = {}
-for key in [SKIP, FAIL, PASS, OKAY, CRASH, SLOW, FAIL_OK,
- FAST_VARIANTS, NO_VARIANTS, PASS_OR_FAIL, FAIL_SLOPPY, ALWAYS]:
+for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, FAST_VARIANTS, NO_VARIANTS,
+ FAIL_SLOPPY, ALWAYS]:
KEYWORDS[key] = key
-DEFS = {FAIL_OK: [FAIL, OKAY],
- PASS_OR_FAIL: [PASS, FAIL]}
-
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little",
@@ -87,25 +85,13 @@ def OnlyFastVariants(outcomes):
def IsPassOrFail(outcomes):
- return ((PASS in outcomes) and (FAIL in outcomes) and
- (not CRASH in outcomes) and (not OKAY in outcomes))
+ return (PASS in outcomes and
+ FAIL in outcomes and
+ CRASH not in outcomes)
def IsFailOk(outcomes):
- return (FAIL in outcomes) and (OKAY in outcomes)
-
-
-def _AddOutcome(result, new):
- global DEFS
- if new in DEFS:
- mapped = DEFS[new]
- if type(mapped) == list:
- for m in mapped:
- _AddOutcome(result, m)
- elif type(mapped) == str:
- _AddOutcome(result, mapped)
- else:
- result.add(new)
+ return FAIL_OK in outcomes
def _JoinsPassAndFail(outcomes1, outcomes2):
@@ -114,13 +100,17 @@ def _JoinsPassAndFail(outcomes1, outcomes2):
"""
return (
PASS in outcomes1 and
- not FAIL in outcomes1 and
- FAIL in outcomes2
+ not (FAIL in outcomes1 or FAIL_OK in outcomes1) and
+ (FAIL in outcomes2 or FAIL_OK in outcomes2)
)
VARIANT_EXPRESSION = object()
def _EvalExpression(exp, variables):
+ """Evaluates expression and returns its result. In case of NameError caused by
+ undefined "variant" identifier returns VARIANT_EXPRESSION marker.
+ """
+
try:
return eval(exp, variables)
except NameError as e:
@@ -129,32 +119,35 @@ def _EvalExpression(exp, variables):
return VARIANT_EXPRESSION
-def _EvalVariantExpression(section, rules, wildcards, variant, variables):
- variables_with_variant = {}
- variables_with_variant.update(variables)
+def _EvalVariantExpression(
+ condition, section, variables, variant, rules, prefix_rules):
+ variables_with_variant = dict(variables)
variables_with_variant["variant"] = variant
- result = _EvalExpression(section[0], variables_with_variant)
+ result = _EvalExpression(condition, variables_with_variant)
assert result != VARIANT_EXPRESSION
if result is True:
_ReadSection(
- section[1],
- rules[variant],
- wildcards[variant],
+ section,
variables_with_variant,
+ rules[variant],
+ prefix_rules[variant],
)
else:
assert result is False, "Make sure expressions evaluate to boolean values"
-def _ParseOutcomeList(rule, outcomes, target_dict, variables):
+def _ParseOutcomeList(rule, outcomes, variables, target_dict):
+ """Outcome list format: [condition, outcome, outcome, ...]"""
+
result = set([])
if type(outcomes) == str:
outcomes = [outcomes]
for item in outcomes:
if type(item) == str:
- _AddOutcome(result, item)
+ result.add(item)
elif type(item) == list:
- exp = _EvalExpression(item[0], variables)
+ condition = item[0]
+ exp = _EvalExpression(condition, variables)
assert exp != VARIANT_EXPRESSION, (
"Nested variant expressions are not supported")
if exp is False:
@@ -166,10 +159,11 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
for outcome in item[1:]:
assert type(outcome) == str
- _AddOutcome(result, outcome)
+ result.add(outcome)
else:
assert False
- if len(result) == 0: return
+ if len(result) == 0:
+ return
if rule in target_dict:
# A FAIL without PASS in one rule has always precedence over a single
# PASS (without FAIL) in another. Otherwise the default PASS expectation
@@ -186,51 +180,69 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
def ReadContent(content):
- global KEYWORDS
return eval(content, KEYWORDS)
def ReadStatusFile(content, variables):
- # Empty defaults for rules and wildcards. Variant-independent
+ """Status file format
+ Status file := [section]
+ section = [CONDITION, section_rules]
+ section_rules := {path: outcomes}
+ outcomes := outcome | [outcome, ...]
+ outcome := SINGLE_OUTCOME | [CONDITION, SINGLE_OUTCOME, SINGLE_OUTCOME, ...]
+ """
+
+ # Empty defaults for rules and prefix_rules. Variant-independent
# rules are mapped by "", others by the variant name.
rules = {variant: {} for variant in ALL_VARIANTS}
rules[""] = {}
- wildcards = {variant: {} for variant in ALL_VARIANTS}
- wildcards[""] = {}
+ prefix_rules = {variant: {} for variant in ALL_VARIANTS}
+ prefix_rules[""] = {}
variables.update(VARIABLES)
- for section in ReadContent(content):
- assert type(section) == list
- assert len(section) == 2
- exp = _EvalExpression(section[0], variables)
+ for conditional_section in ReadContent(content):
+ assert type(conditional_section) == list
+ assert len(conditional_section) == 2
+ condition, section = conditional_section
+ exp = _EvalExpression(condition, variables)
+
+ # The expression is variant-independent and evaluates to False.
if exp is False:
- # The expression is variant-independent and evaluates to False.
continue
- elif exp == VARIANT_EXPRESSION:
- # If the expression contains one or more "variant" keywords, we evaluate
- # it for all possible variants and create rules for those that apply.
- for variant in ALL_VARIANTS:
- _EvalVariantExpression(section, rules, wildcards, variant, variables)
- else:
- # The expression is variant-independent and evaluates to True.
- assert exp is True, "Make sure expressions evaluate to boolean values"
+
+ # The expression is variant-independent and evaluates to True.
+ if exp is True:
_ReadSection(
- section[1],
- rules[""],
- wildcards[""],
+ section,
variables,
+ rules[''],
+ prefix_rules[''],
)
- return Freeze(rules), Freeze(wildcards)
+ continue
+ # The expression is variant-dependent (contains "variant" keyword)
+ if exp == VARIANT_EXPRESSION:
+ # If the expression contains one or more "variant" keywords, we evaluate
+ # it for all possible variants and create rules for those that apply.
+ for variant in ALL_VARIANTS:
+ _EvalVariantExpression(
+ condition, section, variables, variant, rules, prefix_rules)
+ continue
-def _ReadSection(section, rules, wildcards, variables):
+ assert False, "Make sure expressions evaluate to boolean values"
+
+ return Freeze(rules), Freeze(prefix_rules)
+
+
+def _ReadSection(section, variables, rules, prefix_rules):
assert type(section) == dict
- for rule in section:
+ for rule, outcome_list in section.iteritems():
assert type(rule) == str
+
if rule[-1] == '*':
- _ParseOutcomeList(rule, section[rule], wildcards, variables)
+ _ParseOutcomeList(rule[:-1], outcome_list, variables, prefix_rules)
else:
- _ParseOutcomeList(rule, section[rule], rules, variables)
+ _ParseOutcomeList(rule, outcome_list, variables, rules)
JS_TEST_PATHS = {
'debugger': [[]],
@@ -266,6 +278,8 @@ def PresubmitCheck(path):
"Suite name prefix must not be used in rule keys")
_assert(not rule.endswith('.js'),
".js extension must not be used in rule keys.")
+ _assert('*' not in rule or (rule.count('*') == 1 and rule[-1] == '*'),
+ "Only the last character of a rule key can be a wildcard")
if basename in JS_TEST_PATHS and '*' not in rule:
_assert(any(os.path.exists(os.path.join(os.path.dirname(path),
*(paths + [rule + ".js"])))
diff --git a/deps/v8/tools/testrunner/local/statusfile_unittest.py b/deps/v8/tools/testrunner/local/statusfile_unittest.py
index f64ab3425e..299e332c1c 100755
--- a/deps/v8/tools/testrunner/local/statusfile_unittest.py
+++ b/deps/v8/tools/testrunner/local/statusfile_unittest.py
@@ -87,7 +87,7 @@ class StatusFileTest(unittest.TestCase):
)
def test_read_statusfile_section_true(self):
- rules, wildcards = statusfile.ReadStatusFile(
+ rules, prefix_rules = statusfile.ReadStatusFile(
TEST_STATUS_FILE % 'system==linux', make_variables())
self.assertEquals(
@@ -99,15 +99,15 @@ class StatusFileTest(unittest.TestCase):
)
self.assertEquals(
{
- 'foo/*': set(['SLOW', 'FAIL']),
+ 'foo/': set(['SLOW', 'FAIL']),
},
- wildcards[''],
+ prefix_rules[''],
)
self.assertEquals({}, rules['default'])
- self.assertEquals({}, wildcards['default'])
+ self.assertEquals({}, prefix_rules['default'])
def test_read_statusfile_section_false(self):
- rules, wildcards = statusfile.ReadStatusFile(
+ rules, prefix_rules = statusfile.ReadStatusFile(
TEST_STATUS_FILE % 'system==windows', make_variables())
self.assertEquals(
@@ -119,15 +119,15 @@ class StatusFileTest(unittest.TestCase):
)
self.assertEquals(
{
- 'foo/*': set(['PASS', 'SLOW']),
+ 'foo/': set(['PASS', 'SLOW']),
},
- wildcards[''],
+ prefix_rules[''],
)
self.assertEquals({}, rules['default'])
- self.assertEquals({}, wildcards['default'])
+ self.assertEquals({}, prefix_rules['default'])
def test_read_statusfile_section_variant(self):
- rules, wildcards = statusfile.ReadStatusFile(
+ rules, prefix_rules = statusfile.ReadStatusFile(
TEST_STATUS_FILE % 'system==linux and variant==default',
make_variables(),
)
@@ -141,9 +141,9 @@ class StatusFileTest(unittest.TestCase):
)
self.assertEquals(
{
- 'foo/*': set(['PASS', 'SLOW']),
+ 'foo/': set(['PASS', 'SLOW']),
},
- wildcards[''],
+ prefix_rules[''],
)
self.assertEquals(
{
@@ -153,9 +153,9 @@ class StatusFileTest(unittest.TestCase):
)
self.assertEquals(
{
- 'foo/*': set(['FAIL']),
+ 'foo/': set(['FAIL']),
},
- wildcards['default'],
+ prefix_rules['default'],
)
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 3b8f956a7f..946e89a3fc 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -50,15 +50,17 @@ class VariantGenerator(object):
def FilterVariantsByTest(self, testcase):
result = self.all_variants
- if testcase.outcomes:
- if statusfile.OnlyStandardVariant(testcase.outcomes):
+ outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
+ if outcomes:
+ if statusfile.OnlyStandardVariant(outcomes):
return self.standard_variant
- if statusfile.OnlyFastVariants(testcase.outcomes):
+ if statusfile.OnlyFastVariants(outcomes):
result = self.fast_variants
return result
def GetFlagSets(self, testcase, variant):
- if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
+ if outcomes and statusfile.OnlyFastVariants(outcomes):
return FAST_VARIANT_FLAGS[variant]
else:
return ALL_VARIANT_FLAGS[variant]
@@ -86,12 +88,11 @@ class TestSuite(object):
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
- self.rules = None # dictionary mapping test path to list of outcomes
- self.wildcards = None # dictionary mapping test paths to list of outcomes
+ self.rules = None # {variant: {test name: [rule]}}
+ self.prefix_rules = None # {variant: {test name prefix: [rule]}}
self.total_duration = None # float, assigned on demand
- def shell(self):
- return "d8"
+ self._outcomes_cache = dict()
def suffix(self):
return ".js"
@@ -131,109 +132,104 @@ class TestSuite(object):
"""
pass
- def DownloadData(self):
- pass
-
def ReadStatusFile(self, variables):
with open(self.status_file()) as f:
- self.rules, self.wildcards = (
+ self.rules, self.prefix_rules = (
statusfile.ReadStatusFile(f.read(), variables))
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
- @staticmethod
- def _FilterSlow(slow, mode):
- return (mode == "run" and not slow) or (mode == "skip" and slow)
+ def GetStatusfileFlags(self, test):
+ """Gets runtime flags from a status file.
- @staticmethod
- def _FilterPassFail(pass_fail, mode):
- return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
-
- def FilterTestCasesByStatus(self, warn_unused_rules,
- slow_tests="dontcare",
- pass_fail_tests="dontcare",
- variants=False):
-
- # Use only variants-dependent rules and wildcards when filtering
- # respective test cases and generic rules when filtering generic test
- # cases.
- if not variants:
- rules = self.rules[""]
- wildcards = self.wildcards[""]
- else:
- # We set rules and wildcards to a variant-specific version for each test
- # below.
- rules = {}
- wildcards = {}
+ Every outcome that starts with "--" is a flag. Status file has to be loaded
+ before using this function.
+ """
+ flags = []
+ for outcome in self.GetStatusFileOutcomes(test):
+ if outcome.startswith('--'):
+ flags.append(outcome)
+ return flags
- filtered = []
+ def FilterTestCasesByStatus(self,
+ slow_tests_mode=None,
+ pass_fail_tests_mode=None):
+ """Filters tests by outcomes from status file.
+
+ Status file has to be loaded before using this function.
+
+ Args:
+ slow_tests_mode: What to do with slow tests.
+ pass_fail_tests_mode: What to do with pass or fail tests.
- # Remember used rules as tuples of (rule, variant), where variant is "" for
- # variant-independent rules.
+ Mode options:
+ None (default) - don't skip
+ "skip" - skip if slow/pass_fail
+ "run" - skip if not slow/pass_fail
+ """
+ def _skip_slow(is_slow, mode):
+ return (
+ (mode == 'run' and not is_slow) or
+ (mode == 'skip' and is_slow))
+
+ def _skip_pass_fail(pass_fail, mode):
+ return (
+ (mode == 'run' and not pass_fail) or
+ (mode == 'skip' and pass_fail))
+
+ def _compliant(test):
+ outcomes = self.GetStatusFileOutcomes(test)
+ if statusfile.DoSkip(outcomes):
+ return False
+ if _skip_slow(statusfile.IsSlow(outcomes), slow_tests_mode):
+ return False
+ if _skip_pass_fail(statusfile.IsPassOrFail(outcomes),
+ pass_fail_tests_mode):
+ return False
+ return True
+
+ self.tests = filter(_compliant, self.tests)
+
+ def WarnUnusedRules(self, check_variant_rules=False):
+ """Finds and prints unused rules in status file.
+
+ Rule X is unused when it doesn't apply to any tests, which can also mean
+ that all matching tests were skipped by another rule before evaluating X.
+
+ Status file has to be loaded before using this function.
+ """
+
+ if check_variant_rules:
+ variants = list(ALL_VARIANTS)
+ else:
+ variants = ['']
used_rules = set()
for t in self.tests:
- slow = False
- pass_fail = False
testname = self.CommonTestName(t)
variant = t.variant or ""
- if variants:
- rules = self.rules[variant]
- wildcards = self.wildcards[variant]
- if testname in rules:
- used_rules.add((testname, variant))
- # Even for skipped tests, as the TestCase object stays around and
- # PrintReport() uses it.
- t.outcomes = t.outcomes | rules[testname]
- if statusfile.DoSkip(t.outcomes):
- continue # Don't add skipped tests to |filtered|.
- for outcome in t.outcomes:
- if outcome.startswith('Flags: '):
- t.flags += outcome[7:].split()
- slow = statusfile.IsSlow(t.outcomes)
- pass_fail = statusfile.IsPassOrFail(t.outcomes)
- skip = False
- for rule in wildcards:
- assert rule[-1] == '*'
- if testname.startswith(rule[:-1]):
- used_rules.add((rule, variant))
- t.outcomes = t.outcomes | wildcards[rule]
- if statusfile.DoSkip(t.outcomes):
- skip = True
- break # "for rule in wildcards"
- slow = slow or statusfile.IsSlow(t.outcomes)
- pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
- if (skip
- or self._FilterSlow(slow, slow_tests)
- or self._FilterPassFail(pass_fail, pass_fail_tests)):
- continue # "for t in self.tests"
- filtered.append(t)
- self.tests = filtered
-
- if not warn_unused_rules:
- return
-
- if not variants:
- for rule in self.rules[""]:
- if (rule, "") not in used_rules:
- print("Unused rule: %s -> %s (variant independent)" % (
- rule, self.rules[""][rule]))
- for rule in self.wildcards[""]:
- if (rule, "") not in used_rules:
- print("Unused rule: %s -> %s (variant independent)" % (
- rule, self.wildcards[""][rule]))
- else:
- for variant in ALL_VARIANTS:
- for rule in self.rules[variant]:
- if (rule, variant) not in used_rules:
- print("Unused rule: %s -> %s (variant: %s)" % (
- rule, self.rules[variant][rule], variant))
- for rule in self.wildcards[variant]:
- if (rule, variant) not in used_rules:
- print("Unused rule: %s -> %s (variant: %s)" % (
- rule, self.wildcards[variant][rule], variant))
+ if testname in self.rules.get(variant, {}):
+ used_rules.add((testname, variant))
+ if statusfile.DoSkip(self.rules[variant][testname]):
+ continue
+
+ for prefix in self.prefix_rules.get(variant, {}):
+ if testname.startswith(prefix):
+ used_rules.add((prefix, variant))
+ if statusfile.DoSkip(self.prefix_rules[variant][prefix]):
+ break
+
+ for variant in variants:
+ for rule, value in (list(self.rules.get(variant, {}).iteritems()) +
+ list(self.prefix_rules.get(variant, {}).iteritems())):
+ if (rule, variant) not in used_rules:
+ if variant == '':
+ variant_desc = 'variant independent'
+ else:
+ variant_desc = 'variant: %s' % variant
+ print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
@@ -260,7 +256,66 @@ class TestSuite(object):
break
self.tests = filtered
- def GetFlagsForTestCase(self, testcase, context):
+ def GetExpectedOutcomes(self, testcase):
+ """Gets expected outcomes from status file.
+
+ It differs from GetStatusFileOutcomes by selecting only outcomes that can
+ be result of test execution.
+ Status file has to be loaded before using this function.
+ """
+ outcomes = self.GetStatusFileOutcomes(testcase)
+
+ expected = []
+ if (statusfile.FAIL in outcomes or
+ statusfile.FAIL_OK in outcomes):
+ expected.append(statusfile.FAIL)
+
+ if statusfile.CRASH in outcomes:
+ expected.append(statusfile.CRASH)
+
+ if statusfile.PASS in outcomes:
+ expected.append(statusfile.PASS)
+
+ return expected or [statusfile.PASS]
+
+ def GetStatusFileOutcomes(self, testcase):
+ """Gets outcomes from status file.
+
+ Merges variant dependent and independent rules. Status file has to be loaded
+ before using this function.
+ """
+ variant = testcase.variant or ''
+ testname = self.CommonTestName(testcase)
+ cache_key = '%s$%s' % (testname, variant)
+
+ if cache_key not in self._outcomes_cache:
+ # Load statusfile to get outcomes for the first time.
+ assert(self.rules is not None)
+ assert(self.prefix_rules is not None)
+
+ outcomes = frozenset()
+
+ for key in set([variant, '']):
+ rules = self.rules.get(key, {})
+ prefix_rules = self.prefix_rules.get(key, {})
+
+ if testname in rules:
+ outcomes |= rules[testname]
+
+ for prefix in prefix_rules:
+ if testname.startswith(prefix):
+ outcomes |= prefix_rules[prefix]
+
+ self._outcomes_cache[cache_key] = outcomes
+
+ return self._outcomes_cache[cache_key]
+
+ def GetShellForTestCase(self, testcase):
+ """Returns shell to be executed for this test case."""
+ return 'd8'
+
+ def GetParametersForTestCase(self, testcase, context):
+ """Returns a tuple of (files, flags, env) for this test case."""
raise NotImplementedError
def GetSourceForTest(self, testcase):
@@ -290,8 +345,7 @@ class TestSuite(object):
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
- outcome = self.GetOutcome(testcase)
- return not outcome in (testcase.outcomes or [statusfile.PASS])
+ return self.GetOutcome(testcase) not in self.GetExpectedOutcomes(testcase)
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
@@ -315,18 +369,24 @@ class GoogleTestSuite(TestSuite):
super(GoogleTestSuite, self).__init__(name, root)
def ListTests(self, context):
- shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
+ shell = os.path.abspath(
+ os.path.join(context.shell_dir, self.GetShellForTestCase(None)))
if utils.IsWindows():
shell += ".exe"
output = None
for i in xrange(3): # Try 3 times in case of errors.
- output = commands.Execute(context.command_prefix +
- [shell, "--gtest_list_tests"] +
- context.extra_flags)
+ cmd = (
+ context.command_prefix +
+ [shell, "--gtest_list_tests"] +
+ context.extra_flags
+ )
+ output = commands.Execute(cmd)
if output.exit_code == 0:
break
- print "Test executable failed to list the tests (try %d).\n\nStdout:" % i
+ print "Test executable failed to list the tests (try %d).\n\nCmd:" % i
+ print ' '.join(cmd)
+ print "\nStdout:"
print output.stdout
print "\nStderr:"
print output.stderr
@@ -346,14 +406,17 @@ class GoogleTestSuite(TestSuite):
tests.sort(key=lambda t: t.path)
return tests
- def GetFlagsForTestCase(self, testcase, context):
- return (testcase.flags + ["--gtest_filter=" + testcase.path] +
- ["--gtest_random_seed=%s" % context.random_seed] +
- ["--gtest_print_time=0"] +
- context.mode_flags)
+ def GetParametersForTestCase(self, testcase, context):
+ flags = (
+ testcase.flags +
+ ["--gtest_filter=" + testcase.path] +
+ ["--gtest_random_seed=%s" % context.random_seed] +
+ ["--gtest_print_time=0"] +
+ context.mode_flags)
+ return [], flags, {}
def _VariantGeneratorFactory(self):
return StandardVariantGenerator
- def shell(self):
+ def GetShellForTestCase(self, testcase):
return self.name
diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_unittest.py
index 1e10ef5564..a8483b9fc0 100755
--- a/deps/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/deps/v8/tools/testrunner/local/testsuite_unittest.py
@@ -29,17 +29,18 @@ class TestSuiteTest(unittest.TestCase):
'baz/bar': set(['PASS', 'FAIL']),
},
}
- suite.wildcards = {
+ suite.prefix_rules = {
'': {
- 'baz/*': set(['PASS', 'SLOW']),
+ 'baz/': set(['PASS', 'SLOW']),
},
}
- suite.FilterTestCasesByStatus(warn_unused_rules=False)
+ suite.FilterTestCasesByStatus()
self.assertEquals(
[TestCase(suite, 'baz/bar')],
suite.tests,
)
- self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), suite.tests[0].outcomes)
+ outcomes = suite.GetStatusFileOutcomes(suite.tests[0])
+ self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
def test_filter_testcases_by_status_second_pass(self):
suite = TestSuite('foo', 'bar')
@@ -47,10 +48,6 @@ class TestSuiteTest(unittest.TestCase):
test1 = TestCase(suite, 'foo/bar')
test2 = TestCase(suite, 'baz/bar')
- # Contrived outcomes from filtering by variant-independent rules.
- test1.outcomes = set(['PREV'])
- test2.outcomes = set(['PREV'])
-
suite.tests = [
test1.CopyAddingFlags(variant='default', flags=[]),
test1.CopyAddingFlags(variant='stress', flags=['-v']),
@@ -59,6 +56,9 @@ class TestSuiteTest(unittest.TestCase):
]
suite.rules = {
+ '': {
+ 'foo/bar': set(['PREV']),
+ },
'default': {
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL']),
@@ -67,15 +67,18 @@ class TestSuiteTest(unittest.TestCase):
'baz/bar': set(['SKIP']),
},
}
- suite.wildcards = {
+ suite.prefix_rules = {
+ '': {
+ 'baz/': set(['PREV']),
+ },
'default': {
- 'baz/*': set(['PASS', 'SLOW']),
+ 'baz/': set(['PASS', 'SLOW']),
},
'stress': {
- 'foo/*': set(['PASS', 'SLOW']),
+ 'foo/': set(['PASS', 'SLOW']),
},
}
- suite.FilterTestCasesByStatus(warn_unused_rules=False, variants=True)
+ suite.FilterTestCasesByStatus()
self.assertEquals(
[
TestCase(suite, 'foo/bar', flags=['-v']),
@@ -85,14 +88,32 @@ class TestSuiteTest(unittest.TestCase):
)
self.assertEquals(
- set(['PASS', 'SLOW', 'PREV']),
- suite.tests[0].outcomes,
+ set(['PREV', 'PASS', 'SLOW']),
+ suite.GetStatusFileOutcomes(suite.tests[0]),
)
self.assertEquals(
- set(['PASS', 'FAIL', 'SLOW', 'PREV']),
- suite.tests[1].outcomes,
+ set(['PREV', 'PASS', 'FAIL', 'SLOW']),
+ suite.GetStatusFileOutcomes(suite.tests[1]),
)
+ def test_fail_ok_outcome(self):
+ suite = TestSuite('foo', 'bar')
+ suite.tests = [
+ TestCase(suite, 'foo/bar'),
+ TestCase(suite, 'baz/bar'),
+ ]
+ suite.rules = {
+ '': {
+ 'foo/bar': set(['FAIL_OK']),
+ 'baz/bar': set(['FAIL']),
+ },
+ }
+ suite.prefix_rules = {}
+
+ for t in suite.tests:
+ expected_outcomes = suite.GetExpectedOutcomes(t)
+ self.assertEquals(['FAIL'], expected_outcomes)
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 9efa060bba..c8c7ce64a8 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -5,28 +5,37 @@
# Use this to run several variants of the tests.
ALL_VARIANT_FLAGS = {
"default": [[]],
+ "future": [["--future"]],
+ "liftoff": [["--liftoff"]],
"stress": [["--stress-opt", "--always-opt"]],
- "stress_incremental_marking": [["--stress-incremental-marking"]],
+ # TODO(6792): Write protected code has been temporary added to the below
+ # variant until the feature has been enabled (or staged) by default.
+ "stress_incremental_marking": [["--stress-incremental-marking", "--write-protect-code-memory"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
"nooptimization": [["--noopt"]],
- "stress_asm_wasm": [["--validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
- "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks"]],
+ "stress_background_compile": [["--background-compile", "--stress-background-compile"]],
+ "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
}
# FAST_VARIANTS implies no --always-opt.
FAST_VARIANT_FLAGS = {
"default": [[]],
+ "future": [["--future"]],
+ "liftoff": [["--liftoff"]],
"stress": [["--stress-opt"]],
- "stress_incremental_marking": [["--stress-incremental-marking"]],
+ # TODO(6792): Write protected code has been temporary added to the below
+ # variant until the feature has been enabled (or staged) by default.
+ "stress_incremental_marking": [["--stress-incremental-marking", "--write-protect-code-memory"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
"nooptimization": [["--noopt"]],
- "stress_asm_wasm": [["--validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
- "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks"]],
+ "stress_background_compile": [["--background-compile", "--stress-background-compile"]],
+ "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
}
-ALL_VARIANTS = set(["default", "stress", "stress_incremental_marking",
- "nooptimization", "stress_asm_wasm", "wasm_traps"])
+ALL_VARIANTS = set(["default", "future", "liftoff", "stress",
+ "stress_incremental_marking", "nooptimization",
+ "stress_background_compile", "wasm_traps"])
diff --git a/deps/v8/tools/testrunner/local/verbose.py b/deps/v8/tools/testrunner/local/verbose.py
index 00c330d2d9..f28398fa42 100644
--- a/deps/v8/tools/testrunner/local/verbose.py
+++ b/deps/v8/tools/testrunner/local/verbose.py
@@ -35,7 +35,6 @@ from . import statusfile
REPORT_TEMPLATE = (
"""Total: %(total)i tests
* %(skipped)4d tests will be skipped
- * %(timeout)4d tests are expected to timeout sometimes
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
@@ -44,24 +43,27 @@ REPORT_TEMPLATE = (
def PrintReport(tests):
total = len(tests)
- skipped = timeout = nocrash = passes = fail_ok = fail = 0
+ skipped = nocrash = passes = fail_ok = fail = 0
for t in tests:
- if "outcomes" not in dir(t) or not t.outcomes:
+ outcomes = t.suite.GetStatusFileOutcomes(t)
+ if not outcomes:
passes += 1
continue
- o = t.outcomes
- if statusfile.DoSkip(o):
+ if statusfile.DoSkip(outcomes):
skipped += 1
continue
- if statusfile.TIMEOUT in o: timeout += 1
- if statusfile.IsPassOrFail(o): nocrash += 1
- if list(o) == [statusfile.PASS]: passes += 1
- if statusfile.IsFailOk(o): fail_ok += 1
- if list(o) == [statusfile.FAIL]: fail += 1
+ if statusfile.IsPassOrFail(outcomes):
+ nocrash += 1
+ if list(outcomes) == [statusfile.PASS]:
+ passes += 1
+ if statusfile.IsFailOk(outcomes):
+ fail_ok += 1
+ if list(outcomes) == [statusfile.FAIL]:
+ fail += 1
+
print REPORT_TEMPLATE % {
"total": total,
"skipped": skipped,
- "timeout": timeout,
"nocrash": nocrash,
"pass": passes,
"fail_ok": fail_ok,
diff --git a/deps/v8/tools/testrunner/network/__init__.py b/deps/v8/tools/testrunner/network/__init__.py
deleted file mode 100644
index 202a262709..0000000000
--- a/deps/v8/tools/testrunner/network/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/tools/testrunner/network/distro.py b/deps/v8/tools/testrunner/network/distro.py
deleted file mode 100644
index 9d5a471d44..0000000000
--- a/deps/v8/tools/testrunner/network/distro.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class Shell(object):
- def __init__(self, shell):
- self.shell = shell
- self.tests = []
- self.total_duration = 0.0
-
- def AddSuite(self, suite):
- self.tests += suite.tests
- self.total_duration += suite.total_duration
-
- def SortTests(self):
- self.tests.sort(cmp=lambda x, y: cmp(x.duration, y.duration))
-
-
-def Assign(suites, peers):
- total_work = 0.0
- for s in suites:
- total_work += s.CalculateTotalDuration()
-
- total_power = 0.0
- for p in peers:
- p.assigned_work = 0.0
- total_power += p.jobs * p.relative_performance
- for p in peers:
- p.needed_work = total_work * p.jobs * p.relative_performance / total_power
-
- shells = {}
- for s in suites:
- shell = s.shell()
- if not shell in shells:
- shells[shell] = Shell(shell)
- shells[shell].AddSuite(s)
- # Convert |shells| to list and sort it, shortest total_duration first.
- shells = [ shells[s] for s in shells ]
- shells.sort(cmp=lambda x, y: cmp(x.total_duration, y.total_duration))
- # Sort tests within each shell, longest duration last (so it's
- # pop()'ed first).
- for s in shells: s.SortTests()
- # Sort peers, least needed_work first.
- peers.sort(cmp=lambda x, y: cmp(x.needed_work, y.needed_work))
- index = 0
- for shell in shells:
- while len(shell.tests) > 0:
- while peers[index].needed_work <= 0:
- index += 1
- if index == len(peers):
- print("BIG FAT WARNING: Assigning tests to peers failed. "
- "Remaining tests: %d. Going to slow mode." % len(shell.tests))
- # Pick the least-busy peer. Sorting the list for each test
- # is terribly slow, but this is just an emergency fallback anyway.
- peers.sort(cmp=lambda x, y: cmp(x.needed_work, y.needed_work))
- peers[0].ForceAddOneTest(shell.tests.pop(), shell)
- # If the peer already has a shell assigned and would need this one
- # and then yet another, try to avoid it.
- peer = peers[index]
- if (shell.total_duration < peer.needed_work and
- len(peer.shells) > 0 and
- index < len(peers) - 1 and
- shell.total_duration <= peers[index + 1].needed_work):
- peers[index + 1].AddTests(shell)
- else:
- peer.AddTests(shell)
diff --git a/deps/v8/tools/testrunner/network/endpoint.py b/deps/v8/tools/testrunner/network/endpoint.py
deleted file mode 100644
index 516578ace4..0000000000
--- a/deps/v8/tools/testrunner/network/endpoint.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import multiprocessing
-import os
-import Queue
-import threading
-import time
-
-from ..local import execution
-from ..local import progress
-from ..local import testsuite
-from ..local import utils
-from ..server import compression
-
-
-class EndpointProgress(progress.ProgressIndicator):
- def __init__(self, sock, server, ctx):
- super(EndpointProgress, self).__init__()
- self.sock = sock
- self.server = server
- self.context = ctx
- self.results_queue = [] # Accessors must synchronize themselves.
- self.sender_lock = threading.Lock()
- self.senderthread = threading.Thread(target=self._SenderThread)
- self.senderthread.start()
-
- def HasRun(self, test, has_unexpected_output):
- # The runners that call this have a lock anyway, so this is safe.
- self.results_queue.append(test)
-
- def _SenderThread(self):
- keep_running = True
- tests = []
- self.sender_lock.acquire()
- while keep_running:
- time.sleep(0.1)
- # This should be "atomic enough" without locking :-)
- # (We don't care which list any new elements get appended to, as long
- # as we don't lose any and the last one comes last.)
- current = self.results_queue
- self.results_queue = []
- for c in current:
- if c is None:
- keep_running = False
- else:
- tests.append(c)
- if keep_running and len(tests) < 1:
- continue # Wait for more results.
- if len(tests) < 1: break # We're done here.
- result = []
- for t in tests:
- result.append(t.PackResult())
- try:
- compression.Send(result, self.sock)
- except:
- self.runner.terminate = True
- for t in tests:
- self.server.CompareOwnPerf(t, self.context.arch, self.context.mode)
- tests = []
- self.sender_lock.release()
-
-
-def Execute(workspace, ctx, tests, sock, server):
- suite_paths = utils.GetSuitePaths(os.path.join(workspace, "test"))
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(workspace, "test", root))
- if suite:
- suite.SetupWorkingDirectory()
- suites.append(suite)
-
- suites_dict = {}
- for s in suites:
- suites_dict[s.name] = s
- s.tests = []
- for t in tests:
- suite = suites_dict[t.suite]
- t.suite = suite
- suite.tests.append(t)
-
- suites = [ s for s in suites if len(s.tests) > 0 ]
- for s in suites:
- s.DownloadData()
-
- progress_indicator = EndpointProgress(sock, server, ctx)
- runner = execution.Runner(suites, progress_indicator, ctx)
- try:
- runner.Run(server.jobs)
- except IOError, e:
- if e.errno == 2:
- message = ("File not found: %s, maybe you forgot to 'git add' it?" %
- e.filename)
- else:
- message = "%s" % e
- compression.Send([[-1, message]], sock)
- progress_indicator.HasRun(None, None) # Sentinel to signal the end.
- progress_indicator.sender_lock.acquire() # Released when sending is done.
- progress_indicator.sender_lock.release()
diff --git a/deps/v8/tools/testrunner/network/network_execution.py b/deps/v8/tools/testrunner/network/network_execution.py
deleted file mode 100644
index a95440178b..0000000000
--- a/deps/v8/tools/testrunner/network/network_execution.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import socket
-import subprocess
-import threading
-import time
-
-from . import distro
-from ..local import execution
-from ..local import perfdata
-from ..objects import peer
-from ..objects import workpacket
-from ..server import compression
-from ..server import constants
-from ..server import local_handler
-from ..server import signatures
-
-
-def GetPeers():
- data = local_handler.LocalQuery([constants.REQUEST_PEERS])
- if not data: return []
- return [ peer.Peer.Unpack(p) for p in data ]
-
-
-class NetworkedRunner(execution.Runner):
- def __init__(self, suites, progress_indicator, context, peers, workspace):
- self.suites = suites
- datapath = os.path.join("out", "testrunner_data")
- # TODO(machenbach): These fields should exist now in the superclass.
- # But there is no super constructor call. Check if this is a problem.
- self.perf_data_manager = perfdata.PerfDataManager(datapath)
- self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
- for s in suites:
- for t in s.tests:
- t.duration = self.perfdata.FetchPerfData(t) or 1.0
- self._CommonInit(suites, progress_indicator, context)
- self.tests = [] # Only used if we need to fall back to local execution.
- self.tests_lock = threading.Lock()
- self.peers = peers
- self.pubkey_fingerprint = None # Fetched later.
- self.base_rev = subprocess.check_output(
- "cd %s; git log -1 --format=%%H --grep=git-svn-id" % workspace,
- shell=True).strip()
- self.base_svn_rev = subprocess.check_output(
- "cd %s; git log -1 %s" # Get commit description.
- " | grep -e '^\s*git-svn-id:'" # Extract "git-svn-id" line.
- " | awk '{print $2}'" # Extract "repository@revision" part.
- " | sed -e 's/.*@//'" % # Strip away "repository@".
- (workspace, self.base_rev), shell=True).strip()
- self.patch = subprocess.check_output(
- "cd %s; git diff %s" % (workspace, self.base_rev), shell=True)
- self.binaries = {}
- self.initialization_lock = threading.Lock()
- self.initialization_lock.acquire() # Released when init is done.
- self._OpenLocalConnection()
- self.local_receiver_thread = threading.Thread(
- target=self._ListenLocalConnection)
- self.local_receiver_thread.daemon = True
- self.local_receiver_thread.start()
- self.initialization_lock.acquire()
- self.initialization_lock.release()
-
- def _OpenLocalConnection(self):
- self.local_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- code = self.local_socket.connect_ex(("localhost", constants.CLIENT_PORT))
- if code != 0:
- raise RuntimeError("Failed to connect to local server")
- compression.Send([constants.REQUEST_PUBKEY_FINGERPRINT], self.local_socket)
-
- def _ListenLocalConnection(self):
- release_lock_countdown = 1 # Pubkey.
- self.local_receiver = compression.Receiver(self.local_socket)
- while not self.local_receiver.IsDone():
- data = self.local_receiver.Current()
- if data[0] == constants.REQUEST_PUBKEY_FINGERPRINT:
- pubkey = data[1]
- if not pubkey: raise RuntimeError("Received empty public key")
- self.pubkey_fingerprint = pubkey
- release_lock_countdown -= 1
- if release_lock_countdown == 0:
- self.initialization_lock.release()
- release_lock_countdown -= 1 # Prevent repeated triggering.
- self.local_receiver.Advance()
-
- def Run(self, jobs):
- self.indicator.Starting()
- need_libv8 = False
- for s in self.suites:
- shell = s.shell()
- if shell not in self.binaries:
- path = os.path.join(self.context.shell_dir, shell)
- # Check if this is a shared library build.
- try:
- ldd = subprocess.check_output("ldd %s | grep libv8\\.so" % (path),
- shell=True)
- ldd = ldd.strip().split(" ")
- assert ldd[0] == "libv8.so"
- assert ldd[1] == "=>"
- need_libv8 = True
- binary_needs_libv8 = True
- libv8 = signatures.ReadFileAndSignature(ldd[2])
- except:
- binary_needs_libv8 = False
- binary = signatures.ReadFileAndSignature(path)
- if binary[0] is None:
- print("Error: Failed to create signature.")
- assert binary[1] != 0
- return binary[1]
- binary.append(binary_needs_libv8)
- self.binaries[shell] = binary
- if need_libv8:
- self.binaries["libv8.so"] = libv8
- distro.Assign(self.suites, self.peers)
- # Spawn one thread for each peer.
- threads = []
- for p in self.peers:
- thread = threading.Thread(target=self._TalkToPeer, args=[p])
- threads.append(thread)
- thread.start()
- try:
- for thread in threads:
- # Use a timeout so that signals (Ctrl+C) will be processed.
- thread.join(timeout=10000000)
- self._AnalyzePeerRuntimes()
- except KeyboardInterrupt:
- self.terminate = True
- raise
- except Exception, _e:
- # If there's an exception we schedule an interruption for any
- # remaining threads...
- self.terminate = True
- # ...and then reraise the exception to bail out.
- raise
- compression.Send(constants.END_OF_STREAM, self.local_socket)
- self.local_socket.close()
- if self.tests:
- self._RunInternal(jobs)
- self.indicator.Done()
- return not self.failed
-
- def _TalkToPeer(self, peer):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.settimeout(self.context.timeout + 10)
- code = sock.connect_ex((peer.address, constants.PEER_PORT))
- if code == 0:
- try:
- peer.runtime = None
- start_time = time.time()
- packet = workpacket.WorkPacket(peer=peer, context=self.context,
- base_revision=self.base_svn_rev,
- patch=self.patch,
- pubkey=self.pubkey_fingerprint)
- data, test_map = packet.Pack(self.binaries)
- compression.Send(data, sock)
- compression.Send(constants.END_OF_STREAM, sock)
- rec = compression.Receiver(sock)
- while not rec.IsDone() and not self.terminate:
- data_list = rec.Current()
- for data in data_list:
- test_id = data[0]
- if test_id < 0:
- # The peer is reporting an error.
- with self.lock:
- print("\nPeer %s reports error: %s" % (peer.address, data[1]))
- continue
- test = test_map.pop(test_id)
- test.MergeResult(data)
- try:
- self.perfdata.UpdatePerfData(test)
- except Exception, e:
- print("UpdatePerfData exception: %s" % e)
- pass # Just keep working.
- with self.lock:
- perf_key = self.perfdata.GetKey(test)
- compression.Send(
- [constants.INFORM_DURATION, perf_key, test.duration,
- self.context.arch, self.context.mode],
- self.local_socket)
- has_unexpected_output = test.suite.HasUnexpectedOutput(test)
- if has_unexpected_output:
- self.failed.append(test)
- if test.output.HasCrashed():
- self.crashed += 1
- else:
- self.succeeded += 1
- self.remaining -= 1
- self.indicator.HasRun(test, has_unexpected_output)
- rec.Advance()
- peer.runtime = time.time() - start_time
- except KeyboardInterrupt:
- sock.close()
- raise
- except Exception, e:
- print("Got exception: %s" % e)
- pass # Fall back to local execution.
- else:
- compression.Send([constants.UNRESPONSIVE_PEER, peer.address],
- self.local_socket)
- sock.close()
- if len(test_map) > 0:
- # Some tests have not received any results. Run them locally.
- print("\nNo results for %d tests, running them locally." % len(test_map))
- self._EnqueueLocally(test_map)
-
- def _EnqueueLocally(self, test_map):
- with self.tests_lock:
- for test in test_map:
- self.tests.append(test_map[test])
-
- def _AnalyzePeerRuntimes(self):
- total_runtime = 0.0
- total_work = 0.0
- for p in self.peers:
- if p.runtime is None:
- return
- total_runtime += p.runtime
- total_work += p.assigned_work
- for p in self.peers:
- p.assigned_work /= total_work
- p.runtime /= total_runtime
- perf_correction = p.assigned_work / p.runtime
- old_perf = p.relative_performance
- p.relative_performance = (old_perf + perf_correction) / 2.0
- compression.Send([constants.UPDATE_PERF, p.address,
- p.relative_performance],
- self.local_socket)
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
index 6bcbfb67aa..fb5d717728 100644
--- a/deps/v8/tools/testrunner/objects/context.py
+++ b/deps/v8/tools/testrunner/objects/context.py
@@ -49,18 +49,3 @@ class Context():
self.no_harness = no_harness
self.use_perf_data = use_perf_data
self.sancov_dir = sancov_dir
-
- def Pack(self):
- return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
- self.command_prefix, self.extra_flags, self.noi18n,
- self.random_seed, self.no_sorting, self.rerun_failures_count,
- self.rerun_failures_max, self.predictable, self.no_harness,
- self.use_perf_data, self.sancov_dir]
-
- @staticmethod
- def Unpack(packed):
- # For the order of the fields, refer to Pack() above.
- return Context(packed[0], packed[1], None, packed[2], False,
- packed[3], packed[4], packed[5], packed[6], packed[7],
- packed[8], packed[9], packed[10], packed[11], packed[12],
- packed[13], packed[14], packed[15])
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index b4bb01f797..99d6137698 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -51,11 +51,3 @@ class Output(object):
def HasTimedOut(self):
return self.timed_out
-
- def Pack(self):
- return [self.exit_code, self.timed_out, self.stdout, self.stderr, self.pid]
-
- @staticmethod
- def Unpack(packed):
- # For the order of the fields, refer to Pack() above.
- return Output(packed[0], packed[1], packed[2], packed[3], packed[4])
diff --git a/deps/v8/tools/testrunner/objects/peer.py b/deps/v8/tools/testrunner/objects/peer.py
deleted file mode 100644
index 18a6bec7a8..0000000000
--- a/deps/v8/tools/testrunner/objects/peer.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class Peer(object):
- def __init__(self, address, jobs, rel_perf, pubkey):
- self.address = address # string: IP address
- self.jobs = jobs # integer: number of CPUs
- self.relative_performance = rel_perf
- self.pubkey = pubkey # string: pubkey's fingerprint
- self.shells = set() # set of strings
- self.needed_work = 0
- self.assigned_work = 0
- self.tests = [] # list of TestCase objects
- self.trusting_me = False # This peer trusts my public key.
- self.trusted = False # I trust this peer's public key.
-
- def __str__(self):
- return ("Peer at %s, jobs: %d, performance: %.2f, trust I/O: %s/%s" %
- (self.address, self.jobs, self.relative_performance,
- self.trusting_me, self.trusted))
-
- def AddTests(self, shell):
- """Adds tests from |shell| to this peer.
-
- Stops when self.needed_work reaches zero, or when all of shell's tests
- are assigned."""
- assert self.needed_work > 0
- if shell.shell not in self.shells:
- self.shells.add(shell.shell)
- while len(shell.tests) > 0 and self.needed_work > 0:
- t = shell.tests.pop()
- self.needed_work -= t.duration
- self.assigned_work += t.duration
- shell.total_duration -= t.duration
- self.tests.append(t)
-
- def ForceAddOneTest(self, test, shell):
- """Forcibly adds another test to this peer, disregarding needed_work."""
- if shell.shell not in self.shells:
- self.shells.add(shell.shell)
- self.needed_work -= test.duration
- self.assigned_work += test.duration
- shell.total_duration -= test.duration
- self.tests.append(test)
-
-
- def Pack(self):
- """Creates a JSON serializable representation of this Peer."""
- return [self.address, self.jobs, self.relative_performance]
-
- @staticmethod
- def Unpack(packed):
- """Creates a Peer object built from a packed representation."""
- pubkey_dummy = "" # Callers of this don't care (only the server does).
- return Peer(packed[0], packed[1], packed[2], pubkey_dummy)
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 37e3cb4ec2..fd8c27bc59 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -26,76 +26,29 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from . import output
-
class TestCase(object):
- def __init__(self, suite, path, variant=None, flags=None,
- override_shell=None):
+ def __init__(self, suite, path, variant=None, flags=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant
- self.override_shell = override_shell
- self.outcomes = frozenset([])
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
- self.env = {}
def CopyAddingFlags(self, variant, flags):
- copy = TestCase(self.suite, self.path, variant, self.flags + flags,
- self.override_shell)
- copy.outcomes = self.outcomes
- copy.env = self.env
- return copy
-
- def PackTask(self):
- """
- Extracts those parts of this object that are required to run the test
- and returns them as a JSON serializable object.
- """
- assert self.id is not None
- return [self.suitename(), self.path, self.variant, self.flags,
- self.override_shell, list(self.outcomes or []),
- self.id, self.env]
-
- @staticmethod
- def UnpackTask(task):
- """Creates a new TestCase object based on packed task data."""
- # For the order of the fields, refer to PackTask() above.
- test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
- test.outcomes = frozenset(task[5])
- test.id = task[6]
- test.run = 1
- test.env = task[7]
- return test
+ return TestCase(self.suite, self.path, variant, self.flags + flags)
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
- def PackResult(self):
- """Serializes the output of the TestCase after it has run."""
- self.suite.StripOutputForTransmit(self)
- return [self.id, self.output.Pack(), self.duration]
-
- def MergeResult(self, result):
- """Applies the contents of a Result to this object."""
- assert result[0] == self.id
- self.output = output.Output.Unpack(result[1])
- self.duration = result[2]
-
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
- def shell(self):
- if self.override_shell:
- return self.override_shell
- return self.suite.shell()
-
def __getstate__(self):
"""Representation to pickle test cases.
diff --git a/deps/v8/tools/testrunner/objects/workpacket.py b/deps/v8/tools/testrunner/objects/workpacket.py
deleted file mode 100644
index d07efe76ec..0000000000
--- a/deps/v8/tools/testrunner/objects/workpacket.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-from . import context
-from . import testcase
-
-class WorkPacket(object):
- def __init__(self, peer=None, context=None, tests=None, binaries=None,
- base_revision=None, patch=None, pubkey=None):
- self.peer = peer
- self.context = context
- self.tests = tests
- self.binaries = binaries
- self.base_revision = base_revision
- self.patch = patch
- self.pubkey_fingerprint = pubkey
-
- def Pack(self, binaries_dict):
- """
- Creates a JSON serializable object containing the data of this
- work packet.
- """
- need_libv8 = False
- binaries = []
- for shell in self.peer.shells:
- prefetched_binary = binaries_dict[shell]
- binaries.append({"name": shell,
- "blob": prefetched_binary[0],
- "sign": prefetched_binary[1]})
- if prefetched_binary[2]:
- need_libv8 = True
- if need_libv8:
- libv8 = binaries_dict["libv8.so"]
- binaries.append({"name": "libv8.so",
- "blob": libv8[0],
- "sign": libv8[1]})
- tests = []
- test_map = {}
- for t in self.peer.tests:
- test_map[t.id] = t
- tests.append(t.PackTask())
- result = {
- "binaries": binaries,
- "pubkey": self.pubkey_fingerprint,
- "context": self.context.Pack(),
- "base_revision": self.base_revision,
- "patch": self.patch,
- "tests": tests
- }
- return result, test_map
-
- @staticmethod
- def Unpack(packed):
- """
- Creates a WorkPacket object from the given packed representation.
- """
- binaries = packed["binaries"]
- pubkey_fingerprint = packed["pubkey"]
- ctx = context.Context.Unpack(packed["context"])
- base_revision = packed["base_revision"]
- patch = packed["patch"]
- tests = [ testcase.TestCase.UnpackTask(t) for t in packed["tests"] ]
- return WorkPacket(context=ctx, tests=tests, binaries=binaries,
- base_revision=base_revision, patch=patch,
- pubkey=pubkey_fingerprint)
diff --git a/deps/v8/tools/testrunner/server/__init__.py b/deps/v8/tools/testrunner/server/__init__.py
deleted file mode 100644
index 202a262709..0000000000
--- a/deps/v8/tools/testrunner/server/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/tools/testrunner/server/compression.py b/deps/v8/tools/testrunner/server/compression.py
deleted file mode 100644
index d5ed415976..0000000000
--- a/deps/v8/tools/testrunner/server/compression.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import cStringIO as StringIO
-try:
- import ujson as json
-except ImportError:
- import json
-import os
-import struct
-import zlib
-
-from . import constants
-
-def Send(obj, sock):
- """
- Sends a JSON encodable object over the specified socket (zlib-compressed).
- """
- obj = json.dumps(obj)
- compression_level = 2 # 1 = fastest, 9 = best compression
- compressed = zlib.compress(obj, compression_level)
- payload = struct.pack('>i', len(compressed)) + compressed
- sock.sendall(payload)
-
-
-class Receiver(object):
- def __init__(self, sock):
- self.sock = sock
- self.data = StringIO.StringIO()
- self.datalength = 0
- self._next = self._GetNext()
-
- def IsDone(self):
- return self._next == None
-
- def Current(self):
- return self._next
-
- def Advance(self):
- try:
- self._next = self._GetNext()
- except:
- raise
-
- def _GetNext(self):
- try:
- while self.datalength < constants.SIZE_T:
- try:
- chunk = self.sock.recv(8192)
- except:
- raise
- if not chunk: return None
- self._AppendData(chunk)
- size = self._PopData(constants.SIZE_T)
- size = struct.unpack(">i", size)[0]
- while self.datalength < size:
- try:
- chunk = self.sock.recv(8192)
- except:
- raise
- if not chunk: return None
- self._AppendData(chunk)
- result = self._PopData(size)
- result = zlib.decompress(result)
- result = json.loads(result)
- if result == constants.END_OF_STREAM:
- return None
- return result
- except:
- raise
-
- def _AppendData(self, new):
- self.data.seek(0, os.SEEK_END)
- self.data.write(new)
- self.datalength += len(new)
-
- def _PopData(self, length):
- self.data.seek(0)
- chunk = self.data.read(length)
- remaining = self.data.read()
- self.data.close()
- self.data = StringIO.StringIO()
- self.data.write(remaining)
- assert self.datalength - length == len(remaining)
- self.datalength = len(remaining)
- return chunk
diff --git a/deps/v8/tools/testrunner/server/constants.py b/deps/v8/tools/testrunner/server/constants.py
deleted file mode 100644
index 5aefcbad0d..0000000000
--- a/deps/v8/tools/testrunner/server/constants.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-CLIENT_PORT = 9991 # Port for the local client to connect to.
-PEER_PORT = 9992 # Port for peers on the network to connect to.
-PRESENCE_PORT = 9993 # Port for presence daemon.
-STATUS_PORT = 9994 # Port for network requests not related to workpackets.
-
-END_OF_STREAM = "end of dtest stream" # Marker for end of network requests.
-SIZE_T = 4 # Number of bytes used for network request size header.
-
-# Messages understood by the local request handler.
-ADD_TRUSTED = "add trusted"
-INFORM_DURATION = "inform about duration"
-REQUEST_PEERS = "get peers"
-UNRESPONSIVE_PEER = "unresponsive peer"
-REQUEST_PUBKEY_FINGERPRINT = "get pubkey fingerprint"
-REQUEST_STATUS = "get status"
-UPDATE_PERF = "update performance"
-
-# Messages understood by the status request handler.
-LIST_TRUSTED_PUBKEYS = "list trusted pubkeys"
-GET_SIGNED_PUBKEY = "pass on signed pubkey"
-NOTIFY_NEW_TRUSTED = "new trusted peer"
-TRUST_YOU_NOW = "trust you now"
-DO_YOU_TRUST = "do you trust"
diff --git a/deps/v8/tools/testrunner/server/daemon.py b/deps/v8/tools/testrunner/server/daemon.py
deleted file mode 100644
index baa66fbea9..0000000000
--- a/deps/v8/tools/testrunner/server/daemon.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-
-# This code has been written by Sander Marechal and published at:
-# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
-# where the author has placed it in the public domain (see comment #6 at
-# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/#c6
-# ).
-# Some minor modifications have been made by the V8 authors. The work remains
-# in the public domain.
-
-import atexit
-import os
-from signal import SIGTERM
-from signal import SIGINT
-import sys
-import time
-
-
-class Daemon(object):
- """
- A generic daemon class.
-
- Usage: subclass the Daemon class and override the run() method
- """
- def __init__(self, pidfile, stdin='/dev/null',
- stdout='/dev/null', stderr='/dev/null'):
- self.stdin = stdin
- self.stdout = stdout
- self.stderr = stderr
- self.pidfile = pidfile
-
- def daemonize(self):
- """
- do the UNIX double-fork magic, see Stevens' "Advanced
- Programming in the UNIX Environment" for details (ISBN 0201563177)
- http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
- """
- try:
- pid = os.fork()
- if pid > 0:
- # exit first parent
- sys.exit(0)
- except OSError, e:
- sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
- sys.exit(1)
-
- # decouple from parent environment
- os.chdir("/")
- os.setsid()
- os.umask(0)
-
- # do second fork
- try:
- pid = os.fork()
- if pid > 0:
- # exit from second parent
- sys.exit(0)
- except OSError, e:
- sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
- sys.exit(1)
-
- # redirect standard file descriptors
- sys.stdout.flush()
- sys.stderr.flush()
- si = file(self.stdin, 'r')
- so = file(self.stdout, 'a+')
- se = file(self.stderr, 'a+', 0)
- # TODO: (debug) re-enable this!
- #os.dup2(si.fileno(), sys.stdin.fileno())
- #os.dup2(so.fileno(), sys.stdout.fileno())
- #os.dup2(se.fileno(), sys.stderr.fileno())
-
- # write pidfile
- atexit.register(self.delpid)
- pid = str(os.getpid())
- file(self.pidfile, 'w+').write("%s\n" % pid)
-
- def delpid(self):
- os.remove(self.pidfile)
-
- def start(self):
- """
- Start the daemon
- """
- # Check for a pidfile to see if the daemon already runs
- try:
- pf = file(self.pidfile, 'r')
- pid = int(pf.read().strip())
- pf.close()
- except IOError:
- pid = None
-
- if pid:
- message = "pidfile %s already exist. Daemon already running?\n"
- sys.stderr.write(message % self.pidfile)
- sys.exit(1)
-
- # Start the daemon
- self.daemonize()
- self.run()
-
- def stop(self):
- """
- Stop the daemon
- """
- # Get the pid from the pidfile
- try:
- pf = file(self.pidfile, 'r')
- pid = int(pf.read().strip())
- pf.close()
- except IOError:
- pid = None
-
- if not pid:
- message = "pidfile %s does not exist. Daemon not running?\n"
- sys.stderr.write(message % self.pidfile)
- return # not an error in a restart
-
- # Try killing the daemon process
- try:
- # Give the process a one-second chance to exit gracefully.
- os.kill(pid, SIGINT)
- time.sleep(1)
- while 1:
- os.kill(pid, SIGTERM)
- time.sleep(0.1)
- except OSError, err:
- err = str(err)
- if err.find("No such process") > 0:
- if os.path.exists(self.pidfile):
- os.remove(self.pidfile)
- else:
- print str(err)
- sys.exit(1)
-
- def restart(self):
- """
- Restart the daemon
- """
- self.stop()
- self.start()
-
- def run(self):
- """
- You should override this method when you subclass Daemon. It will be
- called after the process has been daemonized by start() or restart().
- """
diff --git a/deps/v8/tools/testrunner/server/local_handler.py b/deps/v8/tools/testrunner/server/local_handler.py
deleted file mode 100644
index 3b3ac495d0..0000000000
--- a/deps/v8/tools/testrunner/server/local_handler.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import socket
-import SocketServer
-import StringIO
-
-from . import compression
-from . import constants
-
-
-def LocalQuery(query):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- code = sock.connect_ex(("localhost", constants.CLIENT_PORT))
- if code != 0: return None
- compression.Send(query, sock)
- compression.Send(constants.END_OF_STREAM, sock)
- rec = compression.Receiver(sock)
- data = None
- while not rec.IsDone():
- data = rec.Current()
- assert data[0] == query[0]
- data = data[1]
- rec.Advance()
- sock.close()
- return data
-
-
-class LocalHandler(SocketServer.BaseRequestHandler):
- def handle(self):
- rec = compression.Receiver(self.request)
- while not rec.IsDone():
- data = rec.Current()
- action = data[0]
-
- if action == constants.REQUEST_PEERS:
- with self.server.daemon.peer_list_lock:
- response = [ p.Pack() for p in self.server.daemon.peers
- if p.trusting_me ]
- compression.Send([action, response], self.request)
-
- elif action == constants.UNRESPONSIVE_PEER:
- self.server.daemon.DeletePeer(data[1])
-
- elif action == constants.REQUEST_PUBKEY_FINGERPRINT:
- compression.Send([action, self.server.daemon.pubkey_fingerprint],
- self.request)
-
- elif action == constants.REQUEST_STATUS:
- compression.Send([action, self._GetStatusMessage()], self.request)
-
- elif action == constants.ADD_TRUSTED:
- fingerprint = self.server.daemon.CopyToTrusted(data[1])
- compression.Send([action, fingerprint], self.request)
-
- elif action == constants.INFORM_DURATION:
- test_key = data[1]
- test_duration = data[2]
- arch = data[3]
- mode = data[4]
- self.server.daemon.AddPerfData(test_key, test_duration, arch, mode)
-
- elif action == constants.UPDATE_PERF:
- address = data[1]
- perf = data[2]
- self.server.daemon.UpdatePeerPerformance(data[1], data[2])
-
- rec.Advance()
- compression.Send(constants.END_OF_STREAM, self.request)
-
- def _GetStatusMessage(self):
- sio = StringIO.StringIO()
- sio.write("Peers:\n")
- with self.server.daemon.peer_list_lock:
- for p in self.server.daemon.peers:
- sio.write("%s\n" % p)
- sio.write("My own jobs: %d, relative performance: %.2f\n" %
- (self.server.daemon.jobs, self.server.daemon.relative_perf))
- # Low-priority TODO: Return more information. Ideas:
- # - currently running anything,
- # - time since last job,
- # - time since last repository fetch
- # - number of workpackets/testcases handled since startup
- # - slowest test(s)
- result = sio.getvalue()
- sio.close()
- return result
-
-
-class LocalSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
- def __init__(self, daemon):
- SocketServer.TCPServer.__init__(self, ("localhost", constants.CLIENT_PORT),
- LocalHandler)
- self.daemon = daemon
diff --git a/deps/v8/tools/testrunner/server/main.py b/deps/v8/tools/testrunner/server/main.py
deleted file mode 100644
index c237e1adb4..0000000000
--- a/deps/v8/tools/testrunner/server/main.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import multiprocessing
-import os
-import shutil
-import subprocess
-import threading
-import time
-
-from . import daemon
-from . import local_handler
-from . import presence_handler
-from . import signatures
-from . import status_handler
-from . import work_handler
-from ..network import perfdata
-
-
-class Server(daemon.Daemon):
-
- def __init__(self, pidfile, root, stdin="/dev/null",
- stdout="/dev/null", stderr="/dev/null"):
- super(Server, self).__init__(pidfile, stdin, stdout, stderr)
- self.root = root
- self.local_handler = None
- self.local_handler_thread = None
- self.work_handler = None
- self.work_handler_thread = None
- self.status_handler = None
- self.status_handler_thread = None
- self.presence_daemon = None
- self.presence_daemon_thread = None
- self.peers = []
- self.jobs = multiprocessing.cpu_count()
- self.peer_list_lock = threading.Lock()
- self.perf_data_lock = None
- self.presence_daemon_lock = None
- self.datadir = os.path.join(self.root, "data")
- pubkey_fingerprint_filename = os.path.join(self.datadir, "mypubkey")
- with open(pubkey_fingerprint_filename) as f:
- self.pubkey_fingerprint = f.read().strip()
- self.relative_perf_filename = os.path.join(self.datadir, "myperf")
- if os.path.exists(self.relative_perf_filename):
- with open(self.relative_perf_filename) as f:
- try:
- self.relative_perf = float(f.read())
- except:
- self.relative_perf = 1.0
- else:
- self.relative_perf = 1.0
-
- def run(self):
- os.nice(20)
- self.ip = presence_handler.GetOwnIP()
- self.perf_data_manager = perfdata.PerfDataManager(self.datadir)
- self.perf_data_lock = threading.Lock()
-
- self.local_handler = local_handler.LocalSocketServer(self)
- self.local_handler_thread = threading.Thread(
- target=self.local_handler.serve_forever)
- self.local_handler_thread.start()
-
- self.work_handler = work_handler.WorkSocketServer(self)
- self.work_handler_thread = threading.Thread(
- target=self.work_handler.serve_forever)
- self.work_handler_thread.start()
-
- self.status_handler = status_handler.StatusSocketServer(self)
- self.status_handler_thread = threading.Thread(
- target=self.status_handler.serve_forever)
- self.status_handler_thread.start()
-
- self.presence_daemon = presence_handler.PresenceDaemon(self)
- self.presence_daemon_thread = threading.Thread(
- target=self.presence_daemon.serve_forever)
- self.presence_daemon_thread.start()
-
- self.presence_daemon.FindPeers()
- time.sleep(0.5) # Give those peers some time to reply.
-
- with self.peer_list_lock:
- for p in self.peers:
- if p.address == self.ip: continue
- status_handler.RequestTrustedPubkeys(p, self)
-
- while True:
- try:
- self.PeriodicTasks()
- time.sleep(60)
- except Exception, e:
- print("MAIN LOOP EXCEPTION: %s" % e)
- self.Shutdown()
- break
- except KeyboardInterrupt:
- self.Shutdown()
- break
-
- def Shutdown(self):
- with open(self.relative_perf_filename, "w") as f:
- f.write("%s" % self.relative_perf)
- self.presence_daemon.shutdown()
- self.presence_daemon.server_close()
- self.local_handler.shutdown()
- self.local_handler.server_close()
- self.work_handler.shutdown()
- self.work_handler.server_close()
- self.status_handler.shutdown()
- self.status_handler.server_close()
-
- def PeriodicTasks(self):
- # If we know peers we don't trust, see if someone else trusts them.
- with self.peer_list_lock:
- for p in self.peers:
- if p.trusted: continue
- if self.IsTrusted(p.pubkey):
- p.trusted = True
- status_handler.ITrustYouNow(p)
- continue
- for p2 in self.peers:
- if not p2.trusted: continue
- status_handler.TryTransitiveTrust(p2, p.pubkey, self)
- # TODO: Ping for more peers waiting to be discovered.
- # TODO: Update the checkout (if currently idle).
-
- def AddPeer(self, peer):
- with self.peer_list_lock:
- for p in self.peers:
- if p.address == peer.address:
- return
- self.peers.append(peer)
- if peer.trusted:
- status_handler.ITrustYouNow(peer)
-
- def DeletePeer(self, peer_address):
- with self.peer_list_lock:
- for i in xrange(len(self.peers)):
- if self.peers[i].address == peer_address:
- del self.peers[i]
- return
-
- def MarkPeerAsTrusting(self, peer_address):
- with self.peer_list_lock:
- for p in self.peers:
- if p.address == peer_address:
- p.trusting_me = True
- break
-
- def UpdatePeerPerformance(self, peer_address, performance):
- with self.peer_list_lock:
- for p in self.peers:
- if p.address == peer_address:
- p.relative_performance = performance
-
- def CopyToTrusted(self, pubkey_filename):
- with open(pubkey_filename, "r") as f:
- lines = f.readlines()
- fingerprint = lines[-1].strip()
- target_filename = self._PubkeyFilename(fingerprint)
- shutil.copy(pubkey_filename, target_filename)
- with self.peer_list_lock:
- for peer in self.peers:
- if peer.address == self.ip: continue
- if peer.pubkey == fingerprint:
- status_handler.ITrustYouNow(peer)
- else:
- result = self.SignTrusted(fingerprint)
- status_handler.NotifyNewTrusted(peer, result)
- return fingerprint
-
- def _PubkeyFilename(self, pubkey_fingerprint):
- return os.path.join(self.root, "trusted", "%s.pem" % pubkey_fingerprint)
-
- def IsTrusted(self, pubkey_fingerprint):
- return os.path.exists(self._PubkeyFilename(pubkey_fingerprint))
-
- def ListTrusted(self):
- path = os.path.join(self.root, "trusted")
- if not os.path.exists(path): return []
- return [ f[:-4] for f in os.listdir(path) if f.endswith(".pem") ]
-
- def SignTrusted(self, pubkey_fingerprint):
- if not self.IsTrusted(pubkey_fingerprint):
- return []
- filename = self._PubkeyFilename(pubkey_fingerprint)
- result = signatures.ReadFileAndSignature(filename) # Format: [key, sig].
- return [pubkey_fingerprint, result[0], result[1], self.pubkey_fingerprint]
-
- def AcceptNewTrusted(self, data):
- # The format of |data| matches the return value of |SignTrusted()|.
- if not data: return
- fingerprint = data[0]
- pubkey = data[1]
- signature = data[2]
- signer = data[3]
- if not self.IsTrusted(signer):
- return
- if self.IsTrusted(fingerprint):
- return # Already trusted.
- filename = self._PubkeyFilename(fingerprint)
- signer_pubkeyfile = self._PubkeyFilename(signer)
- if not signatures.VerifySignature(filename, pubkey, signature,
- signer_pubkeyfile):
- return
- return # Nothing more to do.
-
- def AddPerfData(self, test_key, duration, arch, mode):
- data_store = self.perf_data_manager.GetStore(arch, mode)
- data_store.RawUpdatePerfData(str(test_key), duration)
-
- def CompareOwnPerf(self, test, arch, mode):
- data_store = self.perf_data_manager.GetStore(arch, mode)
- observed = data_store.FetchPerfData(test)
- if not observed: return
- own_perf_estimate = observed / test.duration
- with self.perf_data_lock:
- kLearnRateLimiter = 9999
- self.relative_perf *= kLearnRateLimiter
- self.relative_perf += own_perf_estimate
- self.relative_perf /= (kLearnRateLimiter + 1)
diff --git a/deps/v8/tools/testrunner/server/presence_handler.py b/deps/v8/tools/testrunner/server/presence_handler.py
deleted file mode 100644
index 1dc2ef163a..0000000000
--- a/deps/v8/tools/testrunner/server/presence_handler.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import socket
-import SocketServer
-import threading
-try:
- import ujson as json
-except:
- import json
-
-from . import constants
-from ..objects import peer
-
-
-STARTUP_REQUEST = "V8 test peer starting up"
-STARTUP_RESPONSE = "Let's rock some tests!"
-EXIT_REQUEST = "V8 testing peer going down"
-
-
-def GetOwnIP():
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- s.connect(("8.8.8.8", 80))
- ip = s.getsockname()[0]
- s.close()
- return ip
-
-
-class PresenceHandler(SocketServer.BaseRequestHandler):
-
- def handle(self):
- data = json.loads(self.request[0].strip())
-
- if data[0] == STARTUP_REQUEST:
- jobs = data[1]
- relative_perf = data[2]
- pubkey_fingerprint = data[3]
- trusted = self.server.daemon.IsTrusted(pubkey_fingerprint)
- response = [STARTUP_RESPONSE, self.server.daemon.jobs,
- self.server.daemon.relative_perf,
- self.server.daemon.pubkey_fingerprint, trusted]
- response = json.dumps(response)
- self.server.SendTo(self.client_address[0], response)
- p = peer.Peer(self.client_address[0], jobs, relative_perf,
- pubkey_fingerprint)
- p.trusted = trusted
- self.server.daemon.AddPeer(p)
-
- elif data[0] == STARTUP_RESPONSE:
- jobs = data[1]
- perf = data[2]
- pubkey_fingerprint = data[3]
- p = peer.Peer(self.client_address[0], jobs, perf, pubkey_fingerprint)
- p.trusted = self.server.daemon.IsTrusted(pubkey_fingerprint)
- p.trusting_me = data[4]
- self.server.daemon.AddPeer(p)
-
- elif data[0] == EXIT_REQUEST:
- self.server.daemon.DeletePeer(self.client_address[0])
- if self.client_address[0] == self.server.daemon.ip:
- self.server.shutdown_lock.release()
-
-
-class PresenceDaemon(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
- def __init__(self, daemon):
- self.daemon = daemon
- address = (daemon.ip, constants.PRESENCE_PORT)
- SocketServer.UDPServer.__init__(self, address, PresenceHandler)
- self.shutdown_lock = threading.Lock()
-
- def shutdown(self):
- self.shutdown_lock.acquire()
- self.SendToAll(json.dumps([EXIT_REQUEST]))
- self.shutdown_lock.acquire()
- self.shutdown_lock.release()
- SocketServer.UDPServer.shutdown(self)
-
- def SendTo(self, target, message):
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- sock.sendto(message, (target, constants.PRESENCE_PORT))
- sock.close()
-
- def SendToAll(self, message):
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- ip = self.daemon.ip.split(".")
- for i in range(1, 254):
- ip[-1] = str(i)
- sock.sendto(message, (".".join(ip), constants.PRESENCE_PORT))
- sock.close()
-
- def FindPeers(self):
- request = [STARTUP_REQUEST, self.daemon.jobs, self.daemon.relative_perf,
- self.daemon.pubkey_fingerprint]
- request = json.dumps(request)
- self.SendToAll(request)
diff --git a/deps/v8/tools/testrunner/server/signatures.py b/deps/v8/tools/testrunner/server/signatures.py
deleted file mode 100644
index 9957a18a26..0000000000
--- a/deps/v8/tools/testrunner/server/signatures.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import base64
-import os
-import subprocess
-
-
-def ReadFileAndSignature(filename):
- with open(filename, "rb") as f:
- file_contents = base64.b64encode(f.read())
- signature_file = filename + ".signature"
- if (not os.path.exists(signature_file) or
- os.path.getmtime(signature_file) < os.path.getmtime(filename)):
- private_key = "~/.ssh/v8_dtest"
- code = subprocess.call("openssl dgst -out %s -sign %s %s" %
- (signature_file, private_key, filename),
- shell=True)
- if code != 0: return [None, code]
- with open(signature_file) as f:
- signature = base64.b64encode(f.read())
- return [file_contents, signature]
-
-
-def VerifySignature(filename, file_contents, signature, pubkeyfile):
- with open(filename, "wb") as f:
- f.write(base64.b64decode(file_contents))
- signature_file = filename + ".foreign_signature"
- with open(signature_file, "wb") as f:
- f.write(base64.b64decode(signature))
- code = subprocess.call("openssl dgst -verify %s -signature %s %s" %
- (pubkeyfile, signature_file, filename),
- shell=True)
- matched = (code == 0)
- if not matched:
- os.remove(signature_file)
- os.remove(filename)
- return matched
diff --git a/deps/v8/tools/testrunner/server/status_handler.py b/deps/v8/tools/testrunner/server/status_handler.py
deleted file mode 100644
index 3f2271dc69..0000000000
--- a/deps/v8/tools/testrunner/server/status_handler.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import socket
-import SocketServer
-
-from . import compression
-from . import constants
-
-
-def _StatusQuery(peer, query):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- code = sock.connect_ex((peer.address, constants.STATUS_PORT))
- if code != 0:
- # TODO(jkummerow): disconnect (after 3 failures?)
- return
- compression.Send(query, sock)
- compression.Send(constants.END_OF_STREAM, sock)
- rec = compression.Receiver(sock)
- data = None
- while not rec.IsDone():
- data = rec.Current()
- assert data[0] == query[0]
- data = data[1]
- rec.Advance()
- sock.close()
- return data
-
-
-def RequestTrustedPubkeys(peer, server):
- pubkey_list = _StatusQuery(peer, [constants.LIST_TRUSTED_PUBKEYS])
- for pubkey in pubkey_list:
- if server.IsTrusted(pubkey): continue
- result = _StatusQuery(peer, [constants.GET_SIGNED_PUBKEY, pubkey])
- server.AcceptNewTrusted(result)
-
-
-def NotifyNewTrusted(peer, data):
- _StatusQuery(peer, [constants.NOTIFY_NEW_TRUSTED] + data)
-
-
-def ITrustYouNow(peer):
- _StatusQuery(peer, [constants.TRUST_YOU_NOW])
-
-
-def TryTransitiveTrust(peer, pubkey, server):
- if _StatusQuery(peer, [constants.DO_YOU_TRUST, pubkey]):
- result = _StatusQuery(peer, [constants.GET_SIGNED_PUBKEY, pubkey])
- server.AcceptNewTrusted(result)
-
-
-class StatusHandler(SocketServer.BaseRequestHandler):
- def handle(self):
- rec = compression.Receiver(self.request)
- while not rec.IsDone():
- data = rec.Current()
- action = data[0]
-
- if action == constants.LIST_TRUSTED_PUBKEYS:
- response = self.server.daemon.ListTrusted()
- compression.Send([action, response], self.request)
-
- elif action == constants.GET_SIGNED_PUBKEY:
- response = self.server.daemon.SignTrusted(data[1])
- compression.Send([action, response], self.request)
-
- elif action == constants.NOTIFY_NEW_TRUSTED:
- self.server.daemon.AcceptNewTrusted(data[1:])
- pass # No response.
-
- elif action == constants.TRUST_YOU_NOW:
- self.server.daemon.MarkPeerAsTrusting(self.client_address[0])
- pass # No response.
-
- elif action == constants.DO_YOU_TRUST:
- response = self.server.daemon.IsTrusted(data[1])
- compression.Send([action, response], self.request)
-
- rec.Advance()
- compression.Send(constants.END_OF_STREAM, self.request)
-
-
-class StatusSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
- def __init__(self, daemon):
- address = (daemon.ip, constants.STATUS_PORT)
- SocketServer.TCPServer.__init__(self, address, StatusHandler)
- self.daemon = daemon
diff --git a/deps/v8/tools/testrunner/server/work_handler.py b/deps/v8/tools/testrunner/server/work_handler.py
deleted file mode 100644
index 6bf7d43cf9..0000000000
--- a/deps/v8/tools/testrunner/server/work_handler.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import SocketServer
-import stat
-import subprocess
-import threading
-
-from . import compression
-from . import constants
-from . import signatures
-from ..network import endpoint
-from ..objects import workpacket
-
-
-class WorkHandler(SocketServer.BaseRequestHandler):
-
- def handle(self):
- rec = compression.Receiver(self.request)
- while not rec.IsDone():
- data = rec.Current()
- with self.server.job_lock:
- self._WorkOnWorkPacket(data)
- rec.Advance()
-
- def _WorkOnWorkPacket(self, data):
- server_root = self.server.daemon.root
- v8_root = os.path.join(server_root, "v8")
- os.chdir(v8_root)
- packet = workpacket.WorkPacket.Unpack(data)
- self.ctx = packet.context
- self.ctx.shell_dir = os.path.join("out",
- "%s.%s" % (self.ctx.arch, self.ctx.mode))
- if not os.path.isdir(self.ctx.shell_dir):
- os.makedirs(self.ctx.shell_dir)
- for binary in packet.binaries:
- if not self._UnpackBinary(binary, packet.pubkey_fingerprint):
- return
-
- if not self._CheckoutRevision(packet.base_revision):
- return
-
- if not self._ApplyPatch(packet.patch):
- return
-
- tests = packet.tests
- endpoint.Execute(v8_root, self.ctx, tests, self.request, self.server.daemon)
- self._SendResponse()
-
- def _SendResponse(self, error_message=None):
- try:
- if error_message:
- compression.Send([[-1, error_message]], self.request)
- compression.Send(constants.END_OF_STREAM, self.request)
- return
- except Exception, e:
- pass # Peer is gone. There's nothing we can do.
- # Clean up.
- self._Call("git checkout -f")
- self._Call("git clean -f -d")
- self._Call("rm -rf %s" % self.ctx.shell_dir)
-
- def _UnpackBinary(self, binary, pubkey_fingerprint):
- binary_name = binary["name"]
- if binary_name == "libv8.so":
- libdir = os.path.join(self.ctx.shell_dir, "lib.target")
- if not os.path.exists(libdir): os.makedirs(libdir)
- target = os.path.join(libdir, binary_name)
- else:
- target = os.path.join(self.ctx.shell_dir, binary_name)
- pubkeyfile = "../trusted/%s.pem" % pubkey_fingerprint
- if not signatures.VerifySignature(target, binary["blob"],
- binary["sign"], pubkeyfile):
- self._SendResponse("Signature verification failed")
- return False
- os.chmod(target, stat.S_IRWXU)
- return True
-
- def _CheckoutRevision(self, base_svn_revision):
- get_hash_cmd = (
- "git log -1 --format=%%H --remotes --grep='^git-svn-id:.*@%s'" %
- base_svn_revision)
- try:
- base_revision = subprocess.check_output(get_hash_cmd, shell=True)
- if not base_revision: raise ValueError
- except:
- self._Call("git fetch")
- try:
- base_revision = subprocess.check_output(get_hash_cmd, shell=True)
- if not base_revision: raise ValueError
- except:
- self._SendResponse("Base revision not found.")
- return False
- code = self._Call("git checkout -f %s" % base_revision)
- if code != 0:
- self._SendResponse("Error trying to check out base revision.")
- return False
- code = self._Call("git clean -f -d")
- if code != 0:
- self._SendResponse("Failed to reset checkout")
- return False
- return True
-
- def _ApplyPatch(self, patch):
- if not patch: return True # Just skip if the patch is empty.
- patchfilename = "_dtest_incoming_patch.patch"
- with open(patchfilename, "w") as f:
- f.write(patch)
- code = self._Call("git apply %s" % patchfilename)
- if code != 0:
- self._SendResponse("Error applying patch.")
- return False
- return True
-
- def _Call(self, cmd):
- return subprocess.call(cmd, shell=True)
-
-
-class WorkSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
- def __init__(self, daemon):
- address = (daemon.ip, constants.PEER_PORT)
- SocketServer.TCPServer.__init__(self, address, WorkHandler)
- self.job_lock = threading.Lock()
- self.daemon = daemon
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
new file mode 100755
index 0000000000..d838df783c
--- /dev/null
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -0,0 +1,553 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from collections import OrderedDict
+from os.path import join
+import multiprocessing
+import os
+import random
+import shlex
+import subprocess
+import sys
+import time
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import execution
+from testrunner.local import progress
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.local import verbose
+from testrunner.local.variants import ALL_VARIANTS
+from testrunner.objects import context
+
+
+TIMEOUT_DEFAULT = 60
+
+# Variants ordered by expected runtime (slowest first).
+VARIANTS = ["default"]
+
+MORE_VARIANTS = [
+ "stress",
+ "stress_incremental_marking",
+ "nooptimization",
+ "stress_background_compile",
+ "wasm_traps",
+]
+
+VARIANT_ALIASES = {
+ # The default for developer workstations.
+ "dev": VARIANTS,
+ # Additional variants, run on all bots.
+ "more": MORE_VARIANTS,
+ # Shortcut for the two above ("more" first - it has the longer running tests).
+ "exhaustive": MORE_VARIANTS + VARIANTS,
+ # Additional variants, run on a subset of bots.
+ "extra": ["future", "liftoff"],
+}
+
+GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
+ "--concurrent-recompilation-queue-length=64",
+ "--concurrent-recompilation-delay=500",
+ "--concurrent-recompilation"]
+
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+ "mips",
+ "mipsel",
+ "mips64",
+ "mips64el",
+ "s390",
+ "s390x",
+ "arm64"]
+
+
+class StandardTestRunner(base_runner.BaseTestRunner):
+ def __init__(self):
+ super(StandardTestRunner, self).__init__()
+
+ self.sancov_dir = None
+
+ def _do_execute(self, options, args):
+ if options.swarming:
+ # Swarming doesn't print how isolated commands are called. Lets make
+ # this less cryptic by printing it ourselves.
+ print ' '.join(sys.argv)
+
+ if utils.GuessOS() == "macos":
+ # TODO(machenbach): Temporary output for investigating hanging test
+ # driver on mac.
+ print "V8 related processes running on this host:"
+ try:
+ print subprocess.check_output(
+ "ps -e | egrep 'd8|cctest|unittests'", shell=True)
+ except Exception:
+ pass
+
+ suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
+
+ # Use default tests if no test configuration was provided at the cmd line.
+ if len(args) == 0:
+ args = ["default"]
+
+ # Expand arguments with grouped tests. The args should reflect the list
+ # of suites as otherwise filters would break.
+ def ExpandTestGroups(name):
+ if name in base_runner.TEST_MAP:
+ return [suite for suite in base_runner.TEST_MAP[name]]
+ else:
+ return [name]
+ args = reduce(lambda x, y: x + y,
+ [ExpandTestGroups(arg) for arg in args],
+ [])
+
+ args_suites = OrderedDict() # Used as set
+ for arg in args:
+ args_suites[arg.split('/')[0]] = True
+ suite_paths = [ s for s in args_suites if s in suite_paths ]
+
+ suites = []
+ for root in suite_paths:
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(base_runner.BASE_DIR, "test", root))
+ if suite:
+ suites.append(suite)
+
+ for s in suites:
+ s.PrepareSources()
+
+ try:
+ return self._execute(args, options, suites)
+ except KeyboardInterrupt:
+ return 2
+
+ def _add_parser_options(self, parser):
+ parser.add_option("--sancov-dir",
+ help="Directory where to collect coverage data")
+ parser.add_option("--cfi-vptr",
+ help="Run tests with UBSAN cfi_vptr option.",
+ default=False, action="store_true")
+ parser.add_option("--novfp3",
+ help="Indicates that V8 was compiled without VFP3"
+ " support",
+ default=False, action="store_true")
+ parser.add_option("--cat", help="Print the source of the tests",
+ default=False, action="store_true")
+ parser.add_option("--slow-tests",
+ help="Regard slow tests (run|skip|dontcare)",
+ default="dontcare")
+ parser.add_option("--pass-fail-tests",
+ help="Regard pass|fail tests (run|skip|dontcare)",
+ default="dontcare")
+ parser.add_option("--gc-stress",
+ help="Switch on GC stress mode",
+ default=False, action="store_true")
+ parser.add_option("--command-prefix",
+ help="Prepended to each shell command used to run a"
+ " test",
+ default="")
+ parser.add_option("--extra-flags",
+ help="Additional flags to pass to each test command",
+ action="append", default=[])
+ parser.add_option("--isolates", help="Whether to test isolates",
+ default=False, action="store_true")
+ parser.add_option("-j", help="The number of parallel tasks to run",
+ default=0, type="int")
+ parser.add_option("--no-harness", "--noharness",
+ help="Run without test harness of a given suite",
+ default=False, action="store_true")
+ parser.add_option("--no-presubmit", "--nopresubmit",
+ help='Skip presubmit checks (deprecated)',
+ default=False, dest="no_presubmit", action="store_true")
+ parser.add_option("--no-sorting", "--nosorting",
+ help="Don't sort tests according to duration of last"
+ " run.",
+ default=False, dest="no_sorting", action="store_true")
+ parser.add_option("--no-variants", "--novariants",
+ help="Deprecated. "
+ "Equivalent to passing --variants=default",
+ default=False, dest="no_variants", action="store_true")
+ parser.add_option("--variants",
+ help="Comma-separated list of testing variants;"
+ " default: \"%s\"" % ",".join(VARIANTS))
+ parser.add_option("--exhaustive-variants",
+ default=False, action="store_true",
+ help="Deprecated. "
+ "Equivalent to passing --variants=exhaustive")
+ parser.add_option("-p", "--progress",
+ help=("The style of progress indicator"
+ " (verbose, dots, color, mono)"),
+ choices=progress.PROGRESS_INDICATORS.keys(),
+ default="mono")
+ parser.add_option("--quickcheck", default=False, action="store_true",
+ help=("Quick check mode (skip slow tests)"))
+ parser.add_option("--report", help="Print a summary of the tests to be"
+ " run",
+ default=False, action="store_true")
+ parser.add_option("--json-test-results",
+ help="Path to a file for storing json results.")
+ parser.add_option("--flakiness-results",
+ help="Path to a file for storing flakiness json.")
+ parser.add_option("--rerun-failures-count",
+ help=("Number of times to rerun each failing test case."
+ " Very slow tests will be rerun only once."),
+ default=0, type="int")
+ parser.add_option("--rerun-failures-max",
+ help="Maximum number of failing test cases to rerun.",
+ default=100, type="int")
+ parser.add_option("--shard-count",
+ help="Split testsuites into this number of shards",
+ default=1, type="int")
+ parser.add_option("--shard-run",
+ help="Run this shard from the split up tests.",
+ default=1, type="int")
+ parser.add_option("--dont-skip-slow-simulator-tests",
+ help="Don't skip more slow tests when using a"
+ " simulator.",
+ default=False, action="store_true",
+ dest="dont_skip_simulator_slow_tests")
+ parser.add_option("--swarming",
+ help="Indicates running test driver on swarming.",
+ default=False, action="store_true")
+ parser.add_option("--time", help="Print timing information after running",
+ default=False, action="store_true")
+ parser.add_option("-t", "--timeout", help="Timeout in seconds",
+ default=TIMEOUT_DEFAULT, type="int")
+ parser.add_option("--warn-unused", help="Report unused rules",
+ default=False, action="store_true")
+ parser.add_option("--junitout", help="File name of the JUnit output")
+ parser.add_option("--junittestsuite",
+ help="The testsuite name in the JUnit output file",
+ default="v8tests")
+ parser.add_option("--random-seed", default=0, dest="random_seed",
+ help="Default seed for initializing random generator",
+ type=int)
+ parser.add_option("--random-seed-stress-count", default=1, type="int",
+ dest="random_seed_stress_count",
+ help="Number of runs with different random seeds")
+
+ def _process_options(self, options):
+ global VARIANTS
+
+ if options.sancov_dir:
+ self.sancov_dir = options.sancov_dir
+ if not os.path.exists(self.sancov_dir):
+ print("sancov-dir %s doesn't exist" % self.sancov_dir)
+ raise base_runner.TestRunnerError()
+
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+
+ if options.gc_stress:
+ options.extra_flags += GC_STRESS_FLAGS
+
+ if self.build_config.asan:
+ options.extra_flags.append("--invoke-weak-callbacks")
+ options.extra_flags.append("--omit-quit")
+
+ if options.novfp3:
+ options.extra_flags.append("--noenable-vfp3")
+
+ if options.no_variants:
+ print ("Option --no-variants is deprecated. "
+ "Pass --variants=default instead.")
+ assert not options.variants
+ options.variants = "default"
+
+ if options.exhaustive_variants:
+ # TODO(machenbach): Switch infra to --variants=exhaustive after M65.
+ print ("Option --exhaustive-variants is deprecated. "
+ "Pass --variants=exhaustive instead.")
+ # This is used on many bots. It includes a larger set of default
+ # variants.
+ # Other options for manipulating variants still apply afterwards.
+ assert not options.variants
+ options.variants = "exhaustive"
+
+ if options.quickcheck:
+ assert not options.variants
+ options.variants = "stress,default"
+ options.slow_tests = "skip"
+ options.pass_fail_tests = "skip"
+
+ if self.build_config.predictable:
+ options.variants = "default"
+ options.extra_flags.append("--predictable")
+ options.extra_flags.append("--verify_predictable")
+ options.extra_flags.append("--no-inline-new")
+
+ # TODO(machenbach): Figure out how to test a bigger subset of variants on
+ # msan.
+ if self.build_config.msan:
+ options.variants = "default"
+
+ if options.j == 0:
+ options.j = multiprocessing.cpu_count()
+
+ if options.random_seed_stress_count <= 1 and options.random_seed == 0:
+ options.random_seed = self._random_seed()
+
+ # Use developer defaults if no variant was specified.
+ options.variants = options.variants or "dev"
+
+ # Resolve variant aliases and dedupe.
+ # TODO(machenbach): Don't mutate global variable. Rather pass mutated
+ # version as local variable.
+ VARIANTS = list(set(reduce(
+ list.__add__,
+ (VARIANT_ALIASES.get(v, [v]) for v in options.variants.split(",")),
+ [],
+ )))
+
+ if not set(VARIANTS).issubset(ALL_VARIANTS):
+ print "All variants must be in %s" % str(ALL_VARIANTS)
+ raise base_runner.TestRunnerError()
+
+ def CheckTestMode(name, option):
+ if not option in ["run", "skip", "dontcare"]:
+ print "Unknown %s mode %s" % (name, option)
+ raise base_runner.TestRunnerError()
+ CheckTestMode("slow test", options.slow_tests)
+ CheckTestMode("pass|fail test", options.pass_fail_tests)
+ if self.build_config.no_i18n:
+ base_runner.TEST_MAP["bot_default"].remove("intl")
+ base_runner.TEST_MAP["default"].remove("intl")
+
+ def _setup_env(self):
+ super(StandardTestRunner, self)._setup_env()
+
+ symbolizer_option = self._get_external_symbolizer_option()
+
+ if self.sancov_dir:
+ os.environ['ASAN_OPTIONS'] = ":".join([
+ 'coverage=1',
+ 'coverage_dir=%s' % self.sancov_dir,
+ symbolizer_option,
+ "allow_user_segv_handler=1",
+ ])
+
+ def _random_seed(self):
+ seed = 0
+ while not seed:
+ seed = random.SystemRandom().randint(-2147483648, 2147483647)
+ return seed
+
+ def _execute(self, args, options, suites):
+ print(">>> Running tests for %s.%s" % (self.build_config.arch,
+ self.mode_name))
+ # Populate context object.
+
+ # Simulators are slow, therefore allow a longer timeout.
+ if self.build_config.arch in SLOW_ARCHS:
+ options.timeout *= 2
+
+ options.timeout *= self.mode_options.timeout_scalefactor
+
+ if self.build_config.predictable:
+ # Predictable mode is slower.
+ options.timeout *= 2
+
+ ctx = context.Context(self.build_config.arch,
+ self.mode_options.execution_mode,
+ self.outdir,
+ self.mode_options.flags,
+ options.verbose,
+ options.timeout,
+ options.isolates,
+ options.command_prefix,
+ options.extra_flags,
+ self.build_config.no_i18n,
+ options.random_seed,
+ options.no_sorting,
+ options.rerun_failures_count,
+ options.rerun_failures_max,
+ self.build_config.predictable,
+ options.no_harness,
+ use_perf_data=not options.swarming,
+ sancov_dir=self.sancov_dir)
+
+ # TODO(all): Combine "simulator" and "simulator_run".
+ # TODO(machenbach): In GN we can derive simulator run from
+ # target_arch != v8_target_arch in the dumped build config.
+ simulator_run = (
+ not options.dont_skip_simulator_slow_tests and
+ self.build_config.arch in [
+ 'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
+ 'ppc64', 's390', 's390x'] and
+ bool(base_runner.ARCH_GUESS) and
+ self.build_config.arch != base_runner.ARCH_GUESS)
+ # Find available test suites and read test cases from them.
+ variables = {
+ "arch": self.build_config.arch,
+ "asan": self.build_config.asan,
+ "byteorder": sys.byteorder,
+ "dcheck_always_on": self.build_config.dcheck_always_on,
+ "deopt_fuzzer": False,
+ "gc_fuzzer": False,
+ "gc_stress": options.gc_stress,
+ "gcov_coverage": self.build_config.gcov_coverage,
+ "isolates": options.isolates,
+ "mode": self.mode_options.status_mode,
+ "msan": self.build_config.msan,
+ "no_harness": options.no_harness,
+ "no_i18n": self.build_config.no_i18n,
+ "no_snap": self.build_config.no_snap,
+ "novfp3": options.novfp3,
+ "predictable": self.build_config.predictable,
+ "simulator": utils.UseSimulator(self.build_config.arch),
+ "simulator_run": simulator_run,
+ "system": utils.GuessOS(),
+ "tsan": self.build_config.tsan,
+ "ubsan_vptr": self.build_config.ubsan_vptr,
+ }
+ all_tests = []
+ num_tests = 0
+ for s in suites:
+ s.ReadStatusFile(variables)
+ s.ReadTestCases(ctx)
+ if len(args) > 0:
+ s.FilterTestCasesByArgs(args)
+ all_tests += s.tests
+
+ # First filtering by status applying the generic rules (tests without
+ # variants)
+ if options.warn_unused:
+ s.WarnUnusedRules(check_variant_rules=False)
+ s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
+
+ if options.cat:
+ verbose.PrintTestSource(s.tests)
+ continue
+ variant_gen = s.CreateVariantGenerator(VARIANTS)
+ variant_tests = [ t.CopyAddingFlags(v, flags)
+ for t in s.tests
+ for v in variant_gen.FilterVariantsByTest(t)
+ for flags in variant_gen.GetFlagSets(t, v) ]
+
+ if options.random_seed_stress_count > 1:
+ # Duplicate test for random seed stress mode.
+ def iter_seed_flags():
+ for _ in range(0, options.random_seed_stress_count):
+ # Use given random seed for all runs (set by default in
+ # execution.py) or a new random seed if none is specified.
+ if options.random_seed:
+ yield []
+ else:
+ yield ["--random-seed=%d" % self._random_seed()]
+ s.tests = [
+ t.CopyAddingFlags(t.variant, flags)
+ for t in variant_tests
+ for flags in iter_seed_flags()
+ ]
+ else:
+ s.tests = variant_tests
+
+ # Second filtering by status applying also the variant-dependent rules.
+ if options.warn_unused:
+ s.WarnUnusedRules(check_variant_rules=True)
+ s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
+
+ for t in s.tests:
+ t.flags += s.GetStatusfileFlags(t)
+
+ s.tests = self._shard_tests(s.tests, options)
+ num_tests += len(s.tests)
+
+ if options.cat:
+ return 0 # We're done here.
+
+ if options.report:
+ verbose.PrintReport(all_tests)
+
+ # Run the tests.
+ start_time = time.time()
+ progress_indicator = progress.IndicatorNotifier()
+ progress_indicator.Register(
+ progress.PROGRESS_INDICATORS[options.progress]())
+ if options.junitout:
+ progress_indicator.Register(progress.JUnitTestProgressIndicator(
+ options.junitout, options.junittestsuite))
+ if options.json_test_results:
+ progress_indicator.Register(progress.JsonTestProgressIndicator(
+ options.json_test_results,
+ self.build_config.arch,
+ self.mode_options.execution_mode,
+ ctx.random_seed))
+ if options.flakiness_results:
+ progress_indicator.Register(progress.FlakinessTestProgressIndicator(
+ options.flakiness_results))
+
+ runner = execution.Runner(suites, progress_indicator, ctx)
+ exit_code = runner.Run(options.j)
+ overall_duration = time.time() - start_time
+
+ if options.time:
+ verbose.PrintTestDurations(suites, overall_duration)
+
+ if num_tests == 0:
+ print("Warning: no tests were run!")
+
+ if exit_code == 1 and options.json_test_results:
+ print("Force exit code 0 after failures. Json test results file "
+ "generated with failure information.")
+ exit_code = 0
+
+ if self.sancov_dir:
+ # If tests ran with sanitizer coverage, merge coverage files in the end.
+ try:
+ print "Merging sancov files."
+ subprocess.check_call([
+ sys.executable,
+ join(
+ base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
+ "--coverage-dir=%s" % self.sancov_dir])
+ except:
+ print >> sys.stderr, "Error: Merging sancov files failed."
+ exit_code = 1
+
+ return exit_code
+
+ def _shard_tests(self, tests, options):
+ # Read gtest shard configuration from environment (e.g. set by swarming).
+ # If none is present, use values passed on the command line.
+ shard_count = int(
+ os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+ shard_run = os.environ.get('GTEST_SHARD_INDEX')
+ if shard_run is not None:
+ # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
+ shard_run = int(shard_run) + 1
+ else:
+ shard_run = options.shard_run
+
+ if options.shard_count > 1:
+ # Log if a value was passed on the cmd line and it differs from the
+ # environment variables.
+ if options.shard_count != shard_count:
+ print("shard_count from cmd line differs from environment variable "
+ "GTEST_TOTAL_SHARDS")
+ if options.shard_run > 1 and options.shard_run != shard_run:
+ print("shard_run from cmd line differs from environment variable "
+ "GTEST_SHARD_INDEX")
+
+ if shard_count < 2:
+ return tests
+ if shard_run < 1 or shard_run > shard_count:
+ print "shard-run not a valid number, should be in [1:shard-count]"
+ print "defaulting back to running all tests"
+ return tests
+ count = 0
+ shard = []
+ for test in tests:
+ if count % shard_count == shard_run - 1:
+ shard.append(test)
+ count += 1
+ return shard
+
+
+if __name__ == '__main__':
+ sys.exit(StandardTestRunner().execute())
diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html
index 3cb4a0b2c3..b841cc0bd3 100644
--- a/deps/v8/tools/tick-processor.html
+++ b/deps/v8/tools/tick-processor.html
@@ -50,6 +50,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -->
<script src="profile.js"></script>
<script src="profile_view.js"></script>
<script src="logreader.js"></script>
+ <script src="arguments.js"></script>
<script src="tickprocessor.js"></script>
<script type="text/javascript">
@@ -80,7 +81,7 @@ function print(arg) {
}
function start_process() {
- ArgumentsProcessor.DEFAULTS = {
+ let DEFAULTS = {
logFileName: 'v8.log',
platform: 'unix',
stateFilter: null,
@@ -98,13 +99,10 @@ function start_process() {
};
var tickProcessor = new TickProcessor(
- new (entriesProviders[ArgumentsProcessor.DEFAULTS.platform])(
- ArgumentsProcessor.DEFAULTS.nm,
- ArgumentsProcessor.DEFAULTS.targetRootFS),
- ArgumentsProcessor.DEFAULTS.separateIc,
- ArgumentsProcessor.DEFAULTS.callGraphSize,
- ArgumentsProcessor.DEFAULTS.ignoreUnknown,
- ArgumentsProcessor.DEFAULTS.stateFilter);
+ new (entriesProviders[DEFAULTS.platform])(
+ DEFAULTS.nm, DEFAULTS.targetRootFS),
+ DEFAULTS.separateIc, DEFAULTS.callGraphSize,
+ DEFAULTS.ignoreUnknown, DEFAULTS.stateFilter);
tickProcessor.processLogChunk(v8log_content);
tickProcessor.printStatistics();
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index 51e6dabf49..057d328659 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -842,159 +842,91 @@ WindowsCppEntriesProvider.prototype.unmangleName = function(name) {
};
-function ArgumentsProcessor(args) {
- this.args_ = args;
- this.result_ = ArgumentsProcessor.DEFAULTS;
- function parseBool(str) {
- if (str == "true" || str == "1") return true;
- return false;
- }
-
- this.argsDispatch_ = {
- '-j': ['stateFilter', TickProcessor.VmStates.JS,
- 'Show only ticks from JS VM state'],
- '-g': ['stateFilter', TickProcessor.VmStates.GC,
- 'Show only ticks from GC VM state'],
- '-p': ['stateFilter', TickProcessor.VmStates.PARSER,
- 'Show only ticks from PARSER VM state'],
- '-b': ['stateFilter', TickProcessor.VmStates.BYTECODE_COMPILER,
- 'Show only ticks from BYTECODE_COMPILER VM state'],
- '-c': ['stateFilter', TickProcessor.VmStates.COMPILER,
- 'Show only ticks from COMPILER VM state'],
- '-o': ['stateFilter', TickProcessor.VmStates.OTHER,
- 'Show only ticks from OTHER VM state'],
- '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
- 'Show only ticks from EXTERNAL VM state'],
- '--filter-runtime-timer': ['runtimeTimerFilter', null,
- 'Show only ticks matching the given runtime timer scope'],
- '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
- 'Set the call graph size'],
- '--ignore-unknown': ['ignoreUnknown', true,
- 'Exclude ticks of unknown code entries from processing'],
- '--separate-ic': ['separateIc', parseBool,
- 'Separate IC entries'],
- '--separate-bytecodes': ['separateBytecodes', parseBool,
- 'Separate Bytecode entries'],
- '--separate-builtins': ['separateBuiltins', parseBool,
- 'Separate Builtin entries'],
- '--separate-stubs': ['separateStubs', parseBool,
- 'Separate Stub entries'],
- '--unix': ['platform', 'unix',
- 'Specify that we are running on *nix platform'],
- '--windows': ['platform', 'windows',
- 'Specify that we are running on Windows platform'],
- '--mac': ['platform', 'mac',
- 'Specify that we are running on Mac OS X platform'],
- '--nm': ['nm', 'nm',
- 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
- '--target': ['targetRootFS', '',
- 'Specify the target root directory for cross environment'],
- '--range': ['range', 'auto,auto',
- 'Specify the range limit as [start],[end]'],
- '--distortion': ['distortion', 0,
- 'Specify the logging overhead in picoseconds'],
- '--source-map': ['sourceMap', null,
- 'Specify the source map that should be used for output'],
- '--timed-range': ['timedRange', true,
- 'Ignore ticks before first and after last Date.now() call'],
- '--pairwise-timed-range': ['pairwiseTimedRange', true,
- 'Ignore ticks outside pairs of Date.now() calls'],
- '--only-summary': ['onlySummary', true,
- 'Print only tick summary, exclude other information'],
- '--preprocess': ['preprocessJson', true,
- 'Preprocess for consumption with web interface']
- };
- this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
- this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
- this.argsDispatch_['--compiler'] = this.argsDispatch_['-c'];
- this.argsDispatch_['--other'] = this.argsDispatch_['-o'];
- this.argsDispatch_['--external'] = this.argsDispatch_['-e'];
- this.argsDispatch_['--ptr'] = this.argsDispatch_['--pairwise-timed-range'];
-};
-
-
-ArgumentsProcessor.DEFAULTS = {
- logFileName: 'v8.log',
- platform: 'unix',
- stateFilter: null,
- callGraphSize: 5,
- ignoreUnknown: false,
- separateIc: true,
- separateBytecodes: false,
- separateBuiltins: true,
- separateStubs: true,
- preprocessJson: null,
- targetRootFS: '',
- nm: 'nm',
- range: 'auto,auto',
- distortion: 0,
- timedRange: false,
- pairwiseTimedRange: false,
- onlySummary: false,
- runtimeTimerFilter: null,
-};
-
-
-ArgumentsProcessor.prototype.parse = function() {
- while (this.args_.length) {
- var arg = this.args_.shift();
- if (arg.charAt(0) != '-') {
- this.result_.logFileName = arg;
- continue;
- }
- var userValue = null;
- var eqPos = arg.indexOf('=');
- if (eqPos != -1) {
- userValue = arg.substr(eqPos + 1);
- arg = arg.substr(0, eqPos);
- }
- if (arg in this.argsDispatch_) {
- var dispatch = this.argsDispatch_[arg];
- var property = dispatch[0];
- var defaultValue = dispatch[1];
- if (typeof defaultValue == "function") {
- userValue = defaultValue(userValue);
- } else if (userValue == null) {
- userValue = defaultValue;
- }
- this.result_[property] = userValue;
- } else {
- return false;
- }
- }
- return true;
-};
-
-
-ArgumentsProcessor.prototype.result = function() {
- return this.result_;
-};
-
-
-ArgumentsProcessor.prototype.printUsageAndExit = function() {
-
- function padRight(s, len) {
- s = s.toString();
- if (s.length < len) {
- s = s + (new Array(len - s.length + 1).join(' '));
- }
- return s;
- }
-
- print('Cmdline args: [options] [log-file-name]\n' +
- 'Default log file name is "' +
- ArgumentsProcessor.DEFAULTS.logFileName + '".\n');
- print('Options:');
- for (var arg in this.argsDispatch_) {
- var synonyms = [arg];
- var dispatch = this.argsDispatch_[arg];
- for (var synArg in this.argsDispatch_) {
- if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
- synonyms.push(synArg);
- delete this.argsDispatch_[synArg];
- }
- }
- print(' ' + padRight(synonyms.join(', '), 20) + " " + dispatch[2]);
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+ getArgsDispatch() {
+ let dispatch = {
+ '-j': ['stateFilter', TickProcessor.VmStates.JS,
+ 'Show only ticks from JS VM state'],
+ '-g': ['stateFilter', TickProcessor.VmStates.GC,
+ 'Show only ticks from GC VM state'],
+ '-p': ['stateFilter', TickProcessor.VmStates.PARSER,
+ 'Show only ticks from PARSER VM state'],
+ '-b': ['stateFilter', TickProcessor.VmStates.BYTECODE_COMPILER,
+ 'Show only ticks from BYTECODE_COMPILER VM state'],
+ '-c': ['stateFilter', TickProcessor.VmStates.COMPILER,
+ 'Show only ticks from COMPILER VM state'],
+ '-o': ['stateFilter', TickProcessor.VmStates.OTHER,
+ 'Show only ticks from OTHER VM state'],
+ '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
+ 'Show only ticks from EXTERNAL VM state'],
+ '--filter-runtime-timer': ['runtimeTimerFilter', null,
+ 'Show only ticks matching the given runtime timer scope'],
+ '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
+ 'Set the call graph size'],
+ '--ignore-unknown': ['ignoreUnknown', true,
+ 'Exclude ticks of unknown code entries from processing'],
+ '--separate-ic': ['separateIc', parseBool,
+ 'Separate IC entries'],
+ '--separate-bytecodes': ['separateBytecodes', parseBool,
+ 'Separate Bytecode entries'],
+ '--separate-builtins': ['separateBuiltins', parseBool,
+ 'Separate Builtin entries'],
+ '--separate-stubs': ['separateStubs', parseBool,
+ 'Separate Stub entries'],
+ '--unix': ['platform', 'unix',
+ 'Specify that we are running on *nix platform'],
+ '--windows': ['platform', 'windows',
+ 'Specify that we are running on Windows platform'],
+ '--mac': ['platform', 'mac',
+ 'Specify that we are running on Mac OS X platform'],
+ '--nm': ['nm', 'nm',
+ 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
+ '--target': ['targetRootFS', '',
+ 'Specify the target root directory for cross environment'],
+ '--range': ['range', 'auto,auto',
+ 'Specify the range limit as [start],[end]'],
+ '--distortion': ['distortion', 0,
+ 'Specify the logging overhead in picoseconds'],
+ '--source-map': ['sourceMap', null,
+ 'Specify the source map that should be used for output'],
+ '--timed-range': ['timedRange', true,
+ 'Ignore ticks before first and after last Date.now() call'],
+ '--pairwise-timed-range': ['pairwiseTimedRange', true,
+ 'Ignore ticks outside pairs of Date.now() calls'],
+ '--only-summary': ['onlySummary', true,
+ 'Print only tick summary, exclude other information'],
+ '--preprocess': ['preprocessJson', true,
+ 'Preprocess for consumption with web interface']
+ };
+ dispatch['--js'] = dispatch['-j'];
+ dispatch['--gc'] = dispatch['-g'];
+ dispatch['--compiler'] = dispatch['-c'];
+ dispatch['--other'] = dispatch['-o'];
+ dispatch['--external'] = dispatch['-e'];
+ dispatch['--ptr'] = dispatch['--pairwise-timed-range'];
+ return dispatch;
+ }
+
+ getDefaultResults() {
+ return {
+ logFileName: 'v8.log',
+ platform: 'unix',
+ stateFilter: null,
+ callGraphSize: 5,
+ ignoreUnknown: false,
+ separateIc: true,
+ separateBytecodes: false,
+ separateBuiltins: true,
+ separateStubs: true,
+ preprocessJson: null,
+ targetRootFS: '',
+ nm: 'nm',
+ range: 'auto,auto',
+ distortion: 0,
+ timedRange: false,
+ pairwiseTimedRange: false,
+ onlySummary: false,
+ runtimeTimerFilter: null,
+ };
}
- quit(2);
-};
+}
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index 17eb0706e6..cad836b2e3 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -14,7 +14,6 @@ BOTS = {
'--linux64': 'v8_linux64_perf_try',
'--linux64_atom': 'v8_linux64_atom_perf_try',
'--linux64_haswell': 'v8_linux64_haswell_perf_try',
- '--linux64_haswell_cm': 'v8_linux64_haswell_cm_perf_try',
'--nexus5': 'v8_nexus5_perf_try',
'--nexus7': 'v8_nexus7_perf_try',
'--nexus9': 'v8_nexus9_perf_try',
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index e7342e6a2c..fd1e36531a 100644
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -436,10 +436,10 @@ class PerfTest(unittest.TestCase):
"Richards: 200\nDeltaBlue: 20\n",
"Richards: 50\nDeltaBlue: 200\n",
"Richards: 100\nDeltaBlue: 20\n"])
- test_output_no_patch = path.join(TEST_WORKSPACE, "results_no_patch.json")
+ test_output_secondary = path.join(TEST_WORKSPACE, "results_secondary.json")
self.assertEquals(0, self._CallMain(
- "--outdir-no-patch", "out-no-patch",
- "--json-test-results-no-patch", test_output_no_patch,
+ "--outdir-secondary", "out-secondary",
+ "--json-test-results-secondary", test_output_secondary,
))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["100.0", "200.0"], "stddev": ""},
@@ -448,13 +448,13 @@ class PerfTest(unittest.TestCase):
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["200.0", "200.0"], "stddev": ""},
- ], test_output_no_patch)
+ ], test_output_secondary)
self._VerifyErrors([])
self._VerifyMockMultiple(
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
- (path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
+ (path.join("out-secondary", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
- (path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
+ (path.join("out-secondary", "x64.release", "d7"), "--flag", "run.js"),
)
def testWrongBinaryWithProf(self):
@@ -545,3 +545,7 @@ class PerfTest(unittest.TestCase):
'stddev': '',
},
], results['traces'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 915365c25d..d5765a6a04 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -63,20 +63,20 @@ INSTANCE_TYPES = {
159: "ALIASED_ARGUMENTS_ENTRY_TYPE",
160: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
161: "PROMISE_REACTION_JOB_INFO_TYPE",
- 162: "PROMISE_CAPABILITY_TYPE",
- 163: "DEBUG_INFO_TYPE",
- 164: "STACK_FRAME_INFO_TYPE",
- 165: "PROTOTYPE_INFO_TYPE",
- 166: "TUPLE2_TYPE",
- 167: "TUPLE3_TYPE",
- 168: "CONTEXT_EXTENSION_TYPE",
- 169: "MODULE_TYPE",
- 170: "MODULE_INFO_ENTRY_TYPE",
- 171: "ASYNC_GENERATOR_REQUEST_TYPE",
- 172: "FIXED_ARRAY_TYPE",
- 173: "HASH_TABLE_TYPE",
- 174: "FEEDBACK_VECTOR_TYPE",
- 175: "TRANSITION_ARRAY_TYPE",
+ 162: "DEBUG_INFO_TYPE",
+ 163: "STACK_FRAME_INFO_TYPE",
+ 164: "PROTOTYPE_INFO_TYPE",
+ 165: "TUPLE2_TYPE",
+ 166: "TUPLE3_TYPE",
+ 167: "CONTEXT_EXTENSION_TYPE",
+ 168: "MODULE_TYPE",
+ 169: "MODULE_INFO_ENTRY_TYPE",
+ 170: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 171: "FIXED_ARRAY_TYPE",
+ 172: "HASH_TABLE_TYPE",
+ 173: "DESCRIPTOR_ARRAY_TYPE",
+ 174: "TRANSITION_ARRAY_TYPE",
+ 175: "FEEDBACK_VECTOR_TYPE",
176: "PROPERTY_ARRAY_TYPE",
177: "SHARED_FUNCTION_INFO_TYPE",
178: "CELL_TYPE",
@@ -84,79 +84,80 @@ INSTANCE_TYPES = {
180: "PROPERTY_CELL_TYPE",
181: "SMALL_ORDERED_HASH_MAP_TYPE",
182: "SMALL_ORDERED_HASH_SET_TYPE",
- 183: "JS_PROXY_TYPE",
- 184: "JS_GLOBAL_OBJECT_TYPE",
- 185: "JS_GLOBAL_PROXY_TYPE",
- 186: "JS_MODULE_NAMESPACE_TYPE",
- 187: "JS_SPECIAL_API_OBJECT_TYPE",
- 188: "JS_VALUE_TYPE",
- 189: "JS_MESSAGE_OBJECT_TYPE",
- 190: "JS_DATE_TYPE",
- 191: "JS_API_OBJECT_TYPE",
- 192: "JS_OBJECT_TYPE",
- 193: "JS_ARGUMENTS_TYPE",
- 194: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 195: "JS_GENERATOR_OBJECT_TYPE",
- 196: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 197: "JS_ARRAY_TYPE",
- 198: "JS_ARRAY_BUFFER_TYPE",
- 199: "JS_TYPED_ARRAY_TYPE",
- 200: "JS_DATA_VIEW_TYPE",
- 201: "JS_SET_TYPE",
- 202: "JS_MAP_TYPE",
- 203: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 204: "JS_SET_VALUE_ITERATOR_TYPE",
- 205: "JS_MAP_KEY_ITERATOR_TYPE",
- 206: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 207: "JS_MAP_VALUE_ITERATOR_TYPE",
- 208: "JS_WEAK_MAP_TYPE",
- 209: "JS_WEAK_SET_TYPE",
- 210: "JS_PROMISE_TYPE",
- 211: "JS_REGEXP_TYPE",
- 212: "JS_ERROR_TYPE",
- 213: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 214: "JS_STRING_ITERATOR_TYPE",
- 215: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
- 216: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
- 217: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
- 218: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 219: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 220: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 221: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 222: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 223: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 224: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 225: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 226: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 227: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 228: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 229: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 230: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 231: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 232: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 233: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 234: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
- 235: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
- 236: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
- 237: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
- 238: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
- 239: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
- 240: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
- 241: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
- 242: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
- 243: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 244: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 245: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
- 246: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
- 247: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 248: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 249: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
- 250: "WASM_INSTANCE_TYPE",
- 251: "WASM_MEMORY_TYPE",
- 252: "WASM_MODULE_TYPE",
- 253: "WASM_TABLE_TYPE",
- 254: "JS_BOUND_FUNCTION_TYPE",
- 255: "JS_FUNCTION_TYPE",
+ 183: "CODE_DATA_CONTAINER_TYPE",
+ 184: "JS_PROXY_TYPE",
+ 185: "JS_GLOBAL_OBJECT_TYPE",
+ 186: "JS_GLOBAL_PROXY_TYPE",
+ 187: "JS_MODULE_NAMESPACE_TYPE",
+ 188: "JS_SPECIAL_API_OBJECT_TYPE",
+ 189: "JS_VALUE_TYPE",
+ 190: "JS_MESSAGE_OBJECT_TYPE",
+ 191: "JS_DATE_TYPE",
+ 192: "JS_API_OBJECT_TYPE",
+ 193: "JS_OBJECT_TYPE",
+ 194: "JS_ARGUMENTS_TYPE",
+ 195: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 196: "JS_GENERATOR_OBJECT_TYPE",
+ 197: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 198: "JS_ARRAY_TYPE",
+ 199: "JS_ARRAY_BUFFER_TYPE",
+ 200: "JS_TYPED_ARRAY_TYPE",
+ 201: "JS_DATA_VIEW_TYPE",
+ 202: "JS_SET_TYPE",
+ 203: "JS_MAP_TYPE",
+ 204: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 205: "JS_SET_VALUE_ITERATOR_TYPE",
+ 206: "JS_MAP_KEY_ITERATOR_TYPE",
+ 207: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 208: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 209: "JS_WEAK_MAP_TYPE",
+ 210: "JS_WEAK_SET_TYPE",
+ 211: "JS_PROMISE_TYPE",
+ 212: "JS_REGEXP_TYPE",
+ 213: "JS_ERROR_TYPE",
+ 214: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 215: "JS_STRING_ITERATOR_TYPE",
+ 216: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
+ 217: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
+ 218: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
+ 219: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 220: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 221: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 222: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 223: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 224: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 225: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 226: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 227: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 228: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 229: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 230: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 231: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 232: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 233: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 234: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 235: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 236: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 237: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 238: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 239: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 240: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 241: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 242: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 243: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
+ 244: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 245: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 246: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
+ 247: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
+ 248: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 249: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 250: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
+ 251: "WASM_INSTANCE_TYPE",
+ 252: "WASM_MEMORY_TYPE",
+ 253: "WASM_MODULE_TYPE",
+ 254: "WASM_TABLE_TYPE",
+ 255: "JS_BOUND_FUNCTION_TYPE",
+ 256: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
@@ -164,112 +165,118 @@ KNOWN_MAPS = {
0x02201: (138, "FreeSpaceMap"),
0x02251: (132, "MetaMap"),
0x022a1: (131, "NullMap"),
- 0x022f1: (172, "FixedArrayMap"),
- 0x02341: (149, "OnePointerFillerMap"),
- 0x02391: (149, "TwoPointerFillerMap"),
- 0x023e1: (131, "UninitializedMap"),
- 0x02431: (8, "OneByteInternalizedStringMap"),
- 0x02481: (131, "UndefinedMap"),
- 0x024d1: (129, "HeapNumberMap"),
- 0x02521: (131, "TheHoleMap"),
- 0x02571: (131, "BooleanMap"),
- 0x025c1: (136, "ByteArrayMap"),
- 0x02611: (172, "FixedCOWArrayMap"),
- 0x02661: (173, "HashTableMap"),
- 0x026b1: (128, "SymbolMap"),
- 0x02701: (72, "OneByteStringMap"),
- 0x02751: (172, "ScopeInfoMap"),
- 0x027a1: (177, "SharedFunctionInfoMap"),
- 0x027f1: (133, "CodeMap"),
- 0x02841: (172, "FunctionContextMap"),
- 0x02891: (178, "CellMap"),
- 0x028e1: (179, "WeakCellMap"),
- 0x02931: (180, "GlobalPropertyCellMap"),
- 0x02981: (135, "ForeignMap"),
- 0x029d1: (175, "TransitionArrayMap"),
- 0x02a21: (131, "ArgumentsMarkerMap"),
- 0x02a71: (131, "ExceptionMap"),
- 0x02ac1: (131, "TerminationExceptionMap"),
- 0x02b11: (131, "OptimizedOutMap"),
- 0x02b61: (131, "StaleRegisterMap"),
- 0x02bb1: (172, "NativeContextMap"),
- 0x02c01: (172, "ModuleContextMap"),
- 0x02c51: (172, "EvalContextMap"),
- 0x02ca1: (172, "ScriptContextMap"),
- 0x02cf1: (172, "BlockContextMap"),
- 0x02d41: (172, "CatchContextMap"),
- 0x02d91: (172, "WithContextMap"),
- 0x02de1: (148, "FixedDoubleArrayMap"),
- 0x02e31: (134, "MutableHeapNumberMap"),
- 0x02e81: (173, "OrderedHashTableMap"),
- 0x02ed1: (172, "SloppyArgumentsElementsMap"),
- 0x02f21: (181, "SmallOrderedHashMapMap"),
- 0x02f71: (182, "SmallOrderedHashSetMap"),
- 0x02fc1: (189, "JSMessageObjectMap"),
- 0x03011: (137, "BytecodeArrayMap"),
- 0x03061: (172, "ModuleInfoMap"),
- 0x030b1: (178, "NoClosuresCellMap"),
- 0x03101: (178, "OneClosureCellMap"),
- 0x03151: (178, "ManyClosuresCellMap"),
- 0x031a1: (176, "PropertyArrayMap"),
- 0x031f1: (130, "BigIntMap"),
- 0x03241: (64, "StringMap"),
- 0x03291: (73, "ConsOneByteStringMap"),
- 0x032e1: (65, "ConsStringMap"),
- 0x03331: (77, "ThinOneByteStringMap"),
- 0x03381: (69, "ThinStringMap"),
- 0x033d1: (67, "SlicedStringMap"),
- 0x03421: (75, "SlicedOneByteStringMap"),
- 0x03471: (66, "ExternalStringMap"),
- 0x034c1: (82, "ExternalStringWithOneByteDataMap"),
- 0x03511: (74, "ExternalOneByteStringMap"),
- 0x03561: (98, "ShortExternalStringMap"),
- 0x035b1: (114, "ShortExternalStringWithOneByteDataMap"),
- 0x03601: (0, "InternalizedStringMap"),
- 0x03651: (2, "ExternalInternalizedStringMap"),
- 0x036a1: (18, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x036f1: (10, "ExternalOneByteInternalizedStringMap"),
- 0x03741: (34, "ShortExternalInternalizedStringMap"),
- 0x03791: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x037e1: (42, "ShortExternalOneByteInternalizedStringMap"),
- 0x03831: (106, "ShortExternalOneByteStringMap"),
- 0x03881: (140, "FixedUint8ArrayMap"),
- 0x038d1: (139, "FixedInt8ArrayMap"),
- 0x03921: (142, "FixedUint16ArrayMap"),
- 0x03971: (141, "FixedInt16ArrayMap"),
- 0x039c1: (144, "FixedUint32ArrayMap"),
- 0x03a11: (143, "FixedInt32ArrayMap"),
- 0x03a61: (145, "FixedFloat32ArrayMap"),
- 0x03ab1: (146, "FixedFloat64ArrayMap"),
- 0x03b01: (147, "FixedUint8ClampedArrayMap"),
- 0x03b51: (158, "ScriptMap"),
- 0x03ba1: (174, "FeedbackVectorMap"),
- 0x03bf1: (172, "DebugEvaluateContextMap"),
- 0x03c41: (172, "ScriptContextTableMap"),
- 0x03c91: (173, "UnseededNumberDictionaryMap"),
- 0x03ce1: (192, "ExternalMap"),
- 0x03d31: (106, "NativeSourceStringMap"),
- 0x03d81: (166, "Tuple2Map"),
- 0x03dd1: (153, "InterceptorInfoMap"),
- 0x03e21: (150, "AccessorInfoMap"),
- 0x03e71: (151, "AccessorPairMap"),
- 0x03ec1: (152, "AccessCheckInfoMap"),
- 0x03f11: (154, "FunctionTemplateInfoMap"),
- 0x03f61: (155, "ObjectTemplateInfoMap"),
- 0x03fb1: (156, "AllocationSiteMap"),
- 0x04001: (157, "AllocationMementoMap"),
- 0x04051: (159, "AliasedArgumentsEntryMap"),
- 0x040a1: (160, "PromiseResolveThenableJobInfoMap"),
- 0x040f1: (161, "PromiseReactionJobInfoMap"),
- 0x04141: (162, "PromiseCapabilityMap"),
- 0x04191: (163, "DebugInfoMap"),
- 0x041e1: (164, "StackFrameInfoMap"),
- 0x04231: (165, "PrototypeInfoMap"),
- 0x04281: (167, "Tuple3Map"),
- 0x042d1: (168, "ContextExtensionMap"),
- 0x04321: (169, "ModuleMap"),
- 0x04371: (170, "ModuleInfoEntryMap"),
- 0x043c1: (171, "AsyncGeneratorRequestMap"),
+ 0x022f1: (173, "DescriptorArrayMap"),
+ 0x02341: (171, "FixedArrayMap"),
+ 0x02391: (149, "OnePointerFillerMap"),
+ 0x023e1: (149, "TwoPointerFillerMap"),
+ 0x02431: (131, "UninitializedMap"),
+ 0x02481: (8, "OneByteInternalizedStringMap"),
+ 0x024d1: (131, "UndefinedMap"),
+ 0x02521: (129, "HeapNumberMap"),
+ 0x02571: (131, "TheHoleMap"),
+ 0x025c1: (131, "BooleanMap"),
+ 0x02611: (136, "ByteArrayMap"),
+ 0x02661: (171, "FixedCOWArrayMap"),
+ 0x026b1: (172, "HashTableMap"),
+ 0x02701: (128, "SymbolMap"),
+ 0x02751: (72, "OneByteStringMap"),
+ 0x027a1: (171, "ScopeInfoMap"),
+ 0x027f1: (177, "SharedFunctionInfoMap"),
+ 0x02841: (133, "CodeMap"),
+ 0x02891: (171, "FunctionContextMap"),
+ 0x028e1: (178, "CellMap"),
+ 0x02931: (179, "WeakCellMap"),
+ 0x02981: (180, "GlobalPropertyCellMap"),
+ 0x029d1: (135, "ForeignMap"),
+ 0x02a21: (174, "TransitionArrayMap"),
+ 0x02a71: (175, "FeedbackVectorMap"),
+ 0x02ac1: (131, "ArgumentsMarkerMap"),
+ 0x02b11: (131, "ExceptionMap"),
+ 0x02b61: (131, "TerminationExceptionMap"),
+ 0x02bb1: (131, "OptimizedOutMap"),
+ 0x02c01: (131, "StaleRegisterMap"),
+ 0x02c51: (171, "NativeContextMap"),
+ 0x02ca1: (171, "ModuleContextMap"),
+ 0x02cf1: (171, "EvalContextMap"),
+ 0x02d41: (171, "ScriptContextMap"),
+ 0x02d91: (171, "BlockContextMap"),
+ 0x02de1: (171, "CatchContextMap"),
+ 0x02e31: (171, "WithContextMap"),
+ 0x02e81: (171, "DebugEvaluateContextMap"),
+ 0x02ed1: (171, "ScriptContextTableMap"),
+ 0x02f21: (148, "FixedDoubleArrayMap"),
+ 0x02f71: (134, "MutableHeapNumberMap"),
+ 0x02fc1: (172, "OrderedHashMapMap"),
+ 0x03011: (172, "OrderedHashSetMap"),
+ 0x03061: (172, "NameDictionaryMap"),
+ 0x030b1: (172, "GlobalDictionaryMap"),
+ 0x03101: (172, "NumberDictionaryMap"),
+ 0x03151: (172, "StringTableMap"),
+ 0x031a1: (172, "WeakHashTableMap"),
+ 0x031f1: (171, "SloppyArgumentsElementsMap"),
+ 0x03241: (181, "SmallOrderedHashMapMap"),
+ 0x03291: (182, "SmallOrderedHashSetMap"),
+ 0x032e1: (183, "CodeDataContainerMap"),
+ 0x03331: (190, "JSMessageObjectMap"),
+ 0x03381: (193, "ExternalMap"),
+ 0x033d1: (137, "BytecodeArrayMap"),
+ 0x03421: (171, "ModuleInfoMap"),
+ 0x03471: (178, "NoClosuresCellMap"),
+ 0x034c1: (178, "OneClosureCellMap"),
+ 0x03511: (178, "ManyClosuresCellMap"),
+ 0x03561: (176, "PropertyArrayMap"),
+ 0x035b1: (130, "BigIntMap"),
+ 0x03601: (106, "NativeSourceStringMap"),
+ 0x03651: (64, "StringMap"),
+ 0x036a1: (73, "ConsOneByteStringMap"),
+ 0x036f1: (65, "ConsStringMap"),
+ 0x03741: (77, "ThinOneByteStringMap"),
+ 0x03791: (69, "ThinStringMap"),
+ 0x037e1: (67, "SlicedStringMap"),
+ 0x03831: (75, "SlicedOneByteStringMap"),
+ 0x03881: (66, "ExternalStringMap"),
+ 0x038d1: (82, "ExternalStringWithOneByteDataMap"),
+ 0x03921: (74, "ExternalOneByteStringMap"),
+ 0x03971: (98, "ShortExternalStringMap"),
+ 0x039c1: (114, "ShortExternalStringWithOneByteDataMap"),
+ 0x03a11: (0, "InternalizedStringMap"),
+ 0x03a61: (2, "ExternalInternalizedStringMap"),
+ 0x03ab1: (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x03b01: (10, "ExternalOneByteInternalizedStringMap"),
+ 0x03b51: (34, "ShortExternalInternalizedStringMap"),
+ 0x03ba1: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x03bf1: (42, "ShortExternalOneByteInternalizedStringMap"),
+ 0x03c41: (106, "ShortExternalOneByteStringMap"),
+ 0x03c91: (140, "FixedUint8ArrayMap"),
+ 0x03ce1: (139, "FixedInt8ArrayMap"),
+ 0x03d31: (142, "FixedUint16ArrayMap"),
+ 0x03d81: (141, "FixedInt16ArrayMap"),
+ 0x03dd1: (144, "FixedUint32ArrayMap"),
+ 0x03e21: (143, "FixedInt32ArrayMap"),
+ 0x03e71: (145, "FixedFloat32ArrayMap"),
+ 0x03ec1: (146, "FixedFloat64ArrayMap"),
+ 0x03f11: (147, "FixedUint8ClampedArrayMap"),
+ 0x03f61: (165, "Tuple2Map"),
+ 0x03fb1: (158, "ScriptMap"),
+ 0x04001: (153, "InterceptorInfoMap"),
+ 0x04051: (150, "AccessorInfoMap"),
+ 0x040a1: (151, "AccessorPairMap"),
+ 0x040f1: (152, "AccessCheckInfoMap"),
+ 0x04141: (154, "FunctionTemplateInfoMap"),
+ 0x04191: (155, "ObjectTemplateInfoMap"),
+ 0x041e1: (156, "AllocationSiteMap"),
+ 0x04231: (157, "AllocationMementoMap"),
+ 0x04281: (159, "AliasedArgumentsEntryMap"),
+ 0x042d1: (160, "PromiseResolveThenableJobInfoMap"),
+ 0x04321: (161, "PromiseReactionJobInfoMap"),
+ 0x04371: (162, "DebugInfoMap"),
+ 0x043c1: (163, "StackFrameInfoMap"),
+ 0x04411: (164, "PrototypeInfoMap"),
+ 0x04461: (166, "Tuple3Map"),
+ 0x044b1: (167, "ContextExtensionMap"),
+ 0x04501: (168, "ModuleMap"),
+ 0x04551: (169, "ModuleInfoEntryMap"),
+ 0x045a1: (170, "AsyncGeneratorRequestMap"),
}
# List of known V8 objects.
@@ -291,32 +298,34 @@ KNOWN_OBJECTS = {
("OLD_SPACE", 0x02519): "TerminationException",
("OLD_SPACE", 0x02579): "OptimizedOut",
("OLD_SPACE", 0x025d1): "StaleRegister",
- ("OLD_SPACE", 0x02629): "EmptyByteArray",
- ("OLD_SPACE", 0x02639): "EmptyFixedUint8Array",
- ("OLD_SPACE", 0x02659): "EmptyFixedInt8Array",
- ("OLD_SPACE", 0x02679): "EmptyFixedUint16Array",
- ("OLD_SPACE", 0x02699): "EmptyFixedInt16Array",
- ("OLD_SPACE", 0x026b9): "EmptyFixedUint32Array",
- ("OLD_SPACE", 0x026d9): "EmptyFixedInt32Array",
- ("OLD_SPACE", 0x026f9): "EmptyFixedFloat32Array",
- ("OLD_SPACE", 0x02719): "EmptyFixedFloat64Array",
- ("OLD_SPACE", 0x02739): "EmptyFixedUint8ClampedArray",
- ("OLD_SPACE", 0x02759): "EmptyScript",
- ("OLD_SPACE", 0x027e1): "UndefinedCell",
- ("OLD_SPACE", 0x027f1): "EmptySloppyArgumentsElements",
- ("OLD_SPACE", 0x02811): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x02859): "EmptyPropertyCell",
- ("OLD_SPACE", 0x02881): "EmptyWeakCell",
- ("OLD_SPACE", 0x02891): "ArrayProtector",
- ("OLD_SPACE", 0x028b9): "IsConcatSpreadableProtector",
- ("OLD_SPACE", 0x028c9): "SpeciesProtector",
- ("OLD_SPACE", 0x028f1): "StringLengthProtector",
- ("OLD_SPACE", 0x02901): "FastArrayIterationProtector",
- ("OLD_SPACE", 0x02911): "ArrayIteratorProtector",
- ("OLD_SPACE", 0x02939): "ArrayBufferNeuteringProtector",
- ("OLD_SPACE", 0x02961): "InfinityValue",
- ("OLD_SPACE", 0x02971): "MinusZeroValue",
- ("OLD_SPACE", 0x02981): "MinusInfinityValue",
+ ("OLD_SPACE", 0x02651): "EmptyByteArray",
+ ("OLD_SPACE", 0x02661): "EmptyFixedUint8Array",
+ ("OLD_SPACE", 0x02681): "EmptyFixedInt8Array",
+ ("OLD_SPACE", 0x026a1): "EmptyFixedUint16Array",
+ ("OLD_SPACE", 0x026c1): "EmptyFixedInt16Array",
+ ("OLD_SPACE", 0x026e1): "EmptyFixedUint32Array",
+ ("OLD_SPACE", 0x02701): "EmptyFixedInt32Array",
+ ("OLD_SPACE", 0x02721): "EmptyFixedFloat32Array",
+ ("OLD_SPACE", 0x02741): "EmptyFixedFloat64Array",
+ ("OLD_SPACE", 0x02761): "EmptyFixedUint8ClampedArray",
+ ("OLD_SPACE", 0x02781): "EmptyScript",
+ ("OLD_SPACE", 0x02809): "UndefinedCell",
+ ("OLD_SPACE", 0x02819): "EmptySloppyArgumentsElements",
+ ("OLD_SPACE", 0x02839): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x02881): "EmptyOrderedHashMap",
+ ("OLD_SPACE", 0x028a9): "EmptyOrderedHashSet",
+ ("OLD_SPACE", 0x028d1): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x028f9): "EmptyWeakCell",
+ ("OLD_SPACE", 0x02969): "NoElementsProtector",
+ ("OLD_SPACE", 0x02991): "IsConcatSpreadableProtector",
+ ("OLD_SPACE", 0x029a1): "SpeciesProtector",
+ ("OLD_SPACE", 0x029c9): "StringLengthProtector",
+ ("OLD_SPACE", 0x029d9): "FastArrayIterationProtector",
+ ("OLD_SPACE", 0x029e9): "ArrayIteratorProtector",
+ ("OLD_SPACE", 0x02a11): "ArrayBufferNeuteringProtector",
+ ("OLD_SPACE", 0x02a39): "InfinityValue",
+ ("OLD_SPACE", 0x02a49): "MinusZeroValue",
+ ("OLD_SPACE", 0x02a59): "MinusInfinityValue",
}
# List of known V8 Frame Markers.
@@ -327,6 +336,7 @@ FRAME_MARKERS = (
"OPTIMIZED",
"WASM_COMPILED",
"WASM_TO_JS",
+ "WASM_TO_WASM",
"JS_TO_WASM",
"WASM_INTERPRETER_ENTRY",
"C_WASM_ENTRY",
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 37b1d82553..a39b5f1e45 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,5 +7,5 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up......
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles..........
+The bartender starts to shake the bottles...............
.
diff --git a/deps/v8/tools/windows-tick-processor.bat b/deps/v8/tools/windows-tick-processor.bat
index 79b440fa6c..8f8ad0153b 100755
--- a/deps/v8/tools/windows-tick-processor.bat
+++ b/deps/v8/tools/windows-tick-processor.bat
@@ -27,4 +27,4 @@ IF NOT %arg8:~0,2% == 8 (IF NOT %arg8:~0,2% == 8- SET log_file=%8)
SET arg9=9%9
IF NOT %arg9:~0,2% == 9 (IF NOT %arg9:~0,2% == 9- SET log_file=%9)
-type %log_file% | %D8_PATH%\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%SourceMap.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*
+type %log_file% | %D8_PATH%\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%SourceMap.js %tools_dir%arguments.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*